15f37396dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 257dacad5SJay Sternberg /* 357dacad5SJay Sternberg * NVM Express device driver 457dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 557dacad5SJay Sternberg */ 657dacad5SJay Sternberg 7df4f9bc4SDavid E. Box #include <linux/acpi.h> 8a0a3408eSKeith Busch #include <linux/aer.h> 918119775SKeith Busch #include <linux/async.h> 1057dacad5SJay Sternberg #include <linux/blkdev.h> 1157dacad5SJay Sternberg #include <linux/blk-mq.h> 12dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 13fe45e630SChristoph Hellwig #include <linux/blk-integrity.h> 14ff5350a8SAndy Lutomirski #include <linux/dmi.h> 1557dacad5SJay Sternberg #include <linux/init.h> 1657dacad5SJay Sternberg #include <linux/interrupt.h> 1757dacad5SJay Sternberg #include <linux/io.h> 1899722c8aSChristophe JAILLET #include <linux/kstrtox.h> 19dc90f084SChristoph Hellwig #include <linux/memremap.h> 2057dacad5SJay Sternberg #include <linux/mm.h> 2157dacad5SJay Sternberg #include <linux/module.h> 2277bf25eaSKeith Busch #include <linux/mutex.h> 23d0877473SKeith Busch #include <linux/once.h> 2457dacad5SJay Sternberg #include <linux/pci.h> 25d916b1beSKeith Busch #include <linux/suspend.h> 2657dacad5SJay Sternberg #include <linux/t10-pi.h> 2757dacad5SJay Sternberg #include <linux/types.h> 289cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 2920d3bb92SKlaus Jensen #include <linux/io-64-nonatomic-hi-lo.h> 30a98e58e5SScott Bauer #include <linux/sed-opal.h> 310f238ff5SLogan Gunthorpe #include <linux/pci-p2pdma.h> 3257dacad5SJay Sternberg 33604c01d5Syupeng #include "trace.h" 3457dacad5SJay Sternberg #include "nvme.h" 3557dacad5SJay Sternberg 36c1e0cc7eSBenjamin Herrenschmidt #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 378a1d09a6SBenjamin Herrenschmidt #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 3857dacad5SJay Sternberg 3984173423SKeith Busch #define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 40adf68f21SChristoph Hellwig 41943e942eSJens Axboe /* 42943e942eSJens Axboe * These can be higher, but we need to ensure that any command doesn't 43943e942eSJens Axboe * require an sg allocation that needs more than a page of data. 44943e942eSJens Axboe */ 45943e942eSJens Axboe #define NVME_MAX_KB_SZ 4096 46943e942eSJens Axboe #define NVME_MAX_SEGS 127 47943e942eSJens Axboe 4857dacad5SJay Sternberg static int use_threaded_interrupts; 492e21e445SXin Hao module_param(use_threaded_interrupts, int, 0444); 5057dacad5SJay Sternberg 5157dacad5SJay Sternberg static bool use_cmb_sqes = true; 5269f4eb9fSKeith Busch module_param(use_cmb_sqes, bool, 0444); 5357dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 5457dacad5SJay Sternberg 5587ad72a5SChristoph Hellwig static unsigned int max_host_mem_size_mb = 128; 5687ad72a5SChristoph Hellwig module_param(max_host_mem_size_mb, uint, 0444); 5787ad72a5SChristoph Hellwig MODULE_PARM_DESC(max_host_mem_size_mb, 5887ad72a5SChristoph Hellwig "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 5957dacad5SJay Sternberg 60a7a7cbe3SChaitanya Kulkarni static unsigned int sgl_threshold = SZ_32K; 61a7a7cbe3SChaitanya Kulkarni module_param(sgl_threshold, uint, 0644); 62a7a7cbe3SChaitanya Kulkarni MODULE_PARM_DESC(sgl_threshold, 63a7a7cbe3SChaitanya Kulkarni "Use SGLs when average request segment size is larger or equal to " 64a7a7cbe3SChaitanya Kulkarni "this size. Use 0 to disable SGLs."); 65a7a7cbe3SChaitanya Kulkarni 6627453b45SSagi Grimberg #define NVME_PCI_MIN_QUEUE_SIZE 2 6727453b45SSagi Grimberg #define NVME_PCI_MAX_QUEUE_SIZE 4095 68b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 69b27c1e68Sweiping zhang static const struct kernel_param_ops io_queue_depth_ops = { 70b27c1e68Sweiping zhang .set = io_queue_depth_set, 7161f3b896SChaitanya Kulkarni .get = param_get_uint, 72b27c1e68Sweiping zhang }; 73b27c1e68Sweiping zhang 7461f3b896SChaitanya Kulkarni static unsigned int io_queue_depth = 1024; 75b27c1e68Sweiping zhang module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 7627453b45SSagi Grimberg MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); 77b27c1e68Sweiping zhang 789c9e76d5SWeiping Zhang static int io_queue_count_set(const char *val, const struct kernel_param *kp) 799c9e76d5SWeiping Zhang { 809c9e76d5SWeiping Zhang unsigned int n; 819c9e76d5SWeiping Zhang int ret; 829c9e76d5SWeiping Zhang 839c9e76d5SWeiping Zhang ret = kstrtouint(val, 10, &n); 849c9e76d5SWeiping Zhang if (ret != 0 || n > num_possible_cpus()) 859c9e76d5SWeiping Zhang return -EINVAL; 869c9e76d5SWeiping Zhang return param_set_uint(val, kp); 879c9e76d5SWeiping Zhang } 889c9e76d5SWeiping Zhang 899c9e76d5SWeiping Zhang static const struct kernel_param_ops io_queue_count_ops = { 909c9e76d5SWeiping Zhang .set = io_queue_count_set, 919c9e76d5SWeiping Zhang .get = param_get_uint, 929c9e76d5SWeiping Zhang }; 939c9e76d5SWeiping Zhang 943f68baf7SKeith Busch static unsigned int write_queues; 959c9e76d5SWeiping Zhang module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); 963b6592f7SJens Axboe MODULE_PARM_DESC(write_queues, 973b6592f7SJens Axboe "Number of queues to use for writes. If not set, reads and writes " 983b6592f7SJens Axboe "will share a queue set."); 993b6592f7SJens Axboe 1003f68baf7SKeith Busch static unsigned int poll_queues; 1019c9e76d5SWeiping Zhang module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); 1024b04cc6aSJens Axboe MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 1034b04cc6aSJens Axboe 104df4f9bc4SDavid E. Box static bool noacpi; 105df4f9bc4SDavid E. Box module_param(noacpi, bool, 0444); 106df4f9bc4SDavid E. Box MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); 107df4f9bc4SDavid E. Box 1081c63dc66SChristoph Hellwig struct nvme_dev; 1091c63dc66SChristoph Hellwig struct nvme_queue; 11057dacad5SJay Sternberg 111a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 1127d879c90SChristoph Hellwig static void nvme_delete_io_queues(struct nvme_dev *dev); 11357dacad5SJay Sternberg 11457dacad5SJay Sternberg /* 1151c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 1161c63dc66SChristoph Hellwig */ 1171c63dc66SChristoph Hellwig struct nvme_dev { 118147b27e4SSagi Grimberg struct nvme_queue *queues; 1191c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 1201c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 1211c63dc66SChristoph Hellwig u32 __iomem *dbs; 1221c63dc66SChristoph Hellwig struct device *dev; 1231c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 1241c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 1251c63dc66SChristoph Hellwig unsigned online_queues; 1261c63dc66SChristoph Hellwig unsigned max_qid; 127e20ba6e1SChristoph Hellwig unsigned io_queues[HCTX_MAX_TYPES]; 12822b55601SKeith Busch unsigned int num_vecs; 1297442ddceSJohn Garry u32 q_depth; 130c1e0cc7eSBenjamin Herrenschmidt int io_sqes; 1311c63dc66SChristoph Hellwig u32 db_stride; 1321c63dc66SChristoph Hellwig void __iomem *bar; 13397f6ef64SXu Yu unsigned long bar_mapped_size; 13477bf25eaSKeith Busch struct mutex shutdown_lock; 1351c63dc66SChristoph Hellwig bool subsystem; 1361c63dc66SChristoph Hellwig u64 cmb_size; 1370f238ff5SLogan Gunthorpe bool cmb_use_sqes; 1381c63dc66SChristoph Hellwig u32 cmbsz; 139202021c1SStephen Bates u32 cmbloc; 1401c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 141d916b1beSKeith Busch u32 last_ps; 142a5df5e79SKeith Busch bool hmb; 14387ad72a5SChristoph Hellwig 144943e942eSJens Axboe mempool_t *iod_mempool; 145943e942eSJens Axboe 14687ad72a5SChristoph Hellwig /* shadow doorbell buffer support: */ 147b5f96cb7SKlaus Jensen __le32 *dbbuf_dbs; 148f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 149b5f96cb7SKlaus Jensen __le32 *dbbuf_eis; 150f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 15187ad72a5SChristoph Hellwig 15287ad72a5SChristoph Hellwig /* host memory buffer support: */ 15387ad72a5SChristoph Hellwig u64 host_mem_size; 15487ad72a5SChristoph Hellwig u32 nr_host_mem_descs; 1554033f35dSChristoph Hellwig dma_addr_t host_mem_descs_dma; 15687ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *host_mem_descs; 15787ad72a5SChristoph Hellwig void **host_mem_desc_bufs; 1582a5bcfddSWeiping Zhang unsigned int nr_allocated_queues; 1592a5bcfddSWeiping Zhang unsigned int nr_write_queues; 1602a5bcfddSWeiping Zhang unsigned int nr_poll_queues; 16157dacad5SJay Sternberg }; 16257dacad5SJay Sternberg 163b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 164b27c1e68Sweiping zhang { 16527453b45SSagi Grimberg return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, 16627453b45SSagi Grimberg NVME_PCI_MAX_QUEUE_SIZE); 167b27c1e68Sweiping zhang } 168b27c1e68Sweiping zhang 169f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 170f9f38e33SHelen Koike { 171f9f38e33SHelen Koike return qid * 2 * stride; 172f9f38e33SHelen Koike } 173f9f38e33SHelen Koike 174f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 175f9f38e33SHelen Koike { 176f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 177f9f38e33SHelen Koike } 178f9f38e33SHelen Koike 1791c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1801c63dc66SChristoph Hellwig { 1811c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1821c63dc66SChristoph Hellwig } 1831c63dc66SChristoph Hellwig 18457dacad5SJay Sternberg /* 18557dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 18657dacad5SJay Sternberg * commands and one for I/O commands). 18757dacad5SJay Sternberg */ 18857dacad5SJay Sternberg struct nvme_queue { 18957dacad5SJay Sternberg struct nvme_dev *dev; 1901ab0cd69SJens Axboe spinlock_t sq_lock; 191c1e0cc7eSBenjamin Herrenschmidt void *sq_cmds; 1923a7afd8eSChristoph Hellwig /* only used for poll queues: */ 1933a7afd8eSChristoph Hellwig spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 19474943d45SKeith Busch struct nvme_completion *cqes; 19557dacad5SJay Sternberg dma_addr_t sq_dma_addr; 19657dacad5SJay Sternberg dma_addr_t cq_dma_addr; 19757dacad5SJay Sternberg u32 __iomem *q_db; 1987442ddceSJohn Garry u32 q_depth; 1997c349ddeSKeith Busch u16 cq_vector; 20057dacad5SJay Sternberg u16 sq_tail; 20138210800SKeith Busch u16 last_sq_tail; 20257dacad5SJay Sternberg u16 cq_head; 20357dacad5SJay Sternberg u16 qid; 20457dacad5SJay Sternberg u8 cq_phase; 205c1e0cc7eSBenjamin Herrenschmidt u8 sqes; 2064e224106SChristoph Hellwig unsigned long flags; 2074e224106SChristoph Hellwig #define NVMEQ_ENABLED 0 20863223078SChristoph Hellwig #define NVMEQ_SQ_CMB 1 209d1ed6aa1SChristoph Hellwig #define NVMEQ_DELETE_ERROR 2 2107c349ddeSKeith Busch #define NVMEQ_POLLED 3 211b5f96cb7SKlaus Jensen __le32 *dbbuf_sq_db; 212b5f96cb7SKlaus Jensen __le32 *dbbuf_cq_db; 213b5f96cb7SKlaus Jensen __le32 *dbbuf_sq_ei; 214b5f96cb7SKlaus Jensen __le32 *dbbuf_cq_ei; 215d1ed6aa1SChristoph Hellwig struct completion delete_done; 21657dacad5SJay Sternberg }; 21757dacad5SJay Sternberg 21857dacad5SJay Sternberg /* 2199b048119SChristoph Hellwig * The nvme_iod describes the data in an I/O. 2209b048119SChristoph Hellwig * 2219b048119SChristoph Hellwig * The sg pointer contains the list of PRP/SGL chunk allocations in addition 2229b048119SChristoph Hellwig * to the actual struct scatterlist. 22371bd150cSChristoph Hellwig */ 22471bd150cSChristoph Hellwig struct nvme_iod { 225d49187e9SChristoph Hellwig struct nvme_request req; 226af7fae85SKeith Busch struct nvme_command cmd; 227a7a7cbe3SChaitanya Kulkarni bool use_sgl; 22852da4f3fSKeith Busch bool aborted; 229c372cdd1SKeith Busch s8 nr_allocations; /* PRP list pool allocations. 0 means small 230c372cdd1SKeith Busch pool in use */ 231dff824b2SChristoph Hellwig unsigned int dma_len; /* length of single DMA segment mapping */ 232c4c22c52SKeith Busch dma_addr_t first_dma; 233783b94bdSChristoph Hellwig dma_addr_t meta_dma; 23491fb2b60SLogan Gunthorpe struct sg_table sgt; 23557dacad5SJay Sternberg }; 23657dacad5SJay Sternberg 2372a5bcfddSWeiping Zhang static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) 2383b6592f7SJens Axboe { 2392a5bcfddSWeiping Zhang return dev->nr_allocated_queues * 8 * dev->db_stride; 240f9f38e33SHelen Koike } 241f9f38e33SHelen Koike 24265a54646SChristoph Hellwig static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 243f9f38e33SHelen Koike { 2442a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 245f9f38e33SHelen Koike 24665a54646SChristoph Hellwig if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) 24765a54646SChristoph Hellwig return; 24865a54646SChristoph Hellwig 24958847f12SKeith Busch if (dev->dbbuf_dbs) { 25058847f12SKeith Busch /* 25158847f12SKeith Busch * Clear the dbbuf memory so the driver doesn't observe stale 25258847f12SKeith Busch * values from the previous instantiation. 25358847f12SKeith Busch */ 25458847f12SKeith Busch memset(dev->dbbuf_dbs, 0, mem_size); 25558847f12SKeith Busch memset(dev->dbbuf_eis, 0, mem_size); 25665a54646SChristoph Hellwig return; 25758847f12SKeith Busch } 258f9f38e33SHelen Koike 259f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 260f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 261f9f38e33SHelen Koike GFP_KERNEL); 262f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 26365a54646SChristoph Hellwig goto fail; 264f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 265f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 266f9f38e33SHelen Koike GFP_KERNEL); 26765a54646SChristoph Hellwig if (!dev->dbbuf_eis) 26865a54646SChristoph Hellwig goto fail_free_dbbuf_dbs; 26965a54646SChristoph Hellwig return; 270f9f38e33SHelen Koike 27165a54646SChristoph Hellwig fail_free_dbbuf_dbs: 27265a54646SChristoph Hellwig dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, 27365a54646SChristoph Hellwig dev->dbbuf_dbs_dma_addr); 27465a54646SChristoph Hellwig dev->dbbuf_dbs = NULL; 27565a54646SChristoph Hellwig fail: 27665a54646SChristoph Hellwig dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); 277f9f38e33SHelen Koike } 278f9f38e33SHelen Koike 279f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 280f9f38e33SHelen Koike { 2812a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 282f9f38e33SHelen Koike 283f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 284f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 285f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 286f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 287f9f38e33SHelen Koike } 288f9f38e33SHelen Koike if (dev->dbbuf_eis) { 289f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 290f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 291f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 292f9f38e33SHelen Koike } 293f9f38e33SHelen Koike } 294f9f38e33SHelen Koike 295f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 296f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 297f9f38e33SHelen Koike { 298f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 299f9f38e33SHelen Koike return; 300f9f38e33SHelen Koike 301f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 302f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 303f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 304f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 305f9f38e33SHelen Koike } 306f9f38e33SHelen Koike 3070f0d2c87SMinwoo Im static void nvme_dbbuf_free(struct nvme_queue *nvmeq) 3080f0d2c87SMinwoo Im { 3090f0d2c87SMinwoo Im if (!nvmeq->qid) 3100f0d2c87SMinwoo Im return; 3110f0d2c87SMinwoo Im 3120f0d2c87SMinwoo Im nvmeq->dbbuf_sq_db = NULL; 3130f0d2c87SMinwoo Im nvmeq->dbbuf_cq_db = NULL; 3140f0d2c87SMinwoo Im nvmeq->dbbuf_sq_ei = NULL; 3150f0d2c87SMinwoo Im nvmeq->dbbuf_cq_ei = NULL; 3160f0d2c87SMinwoo Im } 3170f0d2c87SMinwoo Im 318f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 319f9f38e33SHelen Koike { 320f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 3210f0d2c87SMinwoo Im unsigned int i; 322f9f38e33SHelen Koike 323f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 324f9f38e33SHelen Koike return; 325f9f38e33SHelen Koike 326f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 327f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 328f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 329f9f38e33SHelen Koike 330f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 3319bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 332f9f38e33SHelen Koike /* Free memory and continue on */ 333f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 3340f0d2c87SMinwoo Im 3350f0d2c87SMinwoo Im for (i = 1; i <= dev->online_queues; i++) 3360f0d2c87SMinwoo Im nvme_dbbuf_free(&dev->queues[i]); 337f9f38e33SHelen Koike } 338f9f38e33SHelen Koike } 339f9f38e33SHelen Koike 340f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 341f9f38e33SHelen Koike { 342f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 343f9f38e33SHelen Koike } 344f9f38e33SHelen Koike 345f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 346b5f96cb7SKlaus Jensen static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, 347b5f96cb7SKlaus Jensen volatile __le32 *dbbuf_ei) 348f9f38e33SHelen Koike { 349f9f38e33SHelen Koike if (dbbuf_db) { 350b5f96cb7SKlaus Jensen u16 old_value, event_idx; 351f9f38e33SHelen Koike 352f9f38e33SHelen Koike /* 353f9f38e33SHelen Koike * Ensure that the queue is written before updating 354f9f38e33SHelen Koike * the doorbell in memory 355f9f38e33SHelen Koike */ 356f9f38e33SHelen Koike wmb(); 357f9f38e33SHelen Koike 358b5f96cb7SKlaus Jensen old_value = le32_to_cpu(*dbbuf_db); 359b5f96cb7SKlaus Jensen *dbbuf_db = cpu_to_le32(value); 360f9f38e33SHelen Koike 361f1ed3df2SMichal Wnukowski /* 362f1ed3df2SMichal Wnukowski * Ensure that the doorbell is updated before reading the event 363f1ed3df2SMichal Wnukowski * index from memory. The controller needs to provide similar 364f1ed3df2SMichal Wnukowski * ordering to ensure the envent index is updated before reading 365f1ed3df2SMichal Wnukowski * the doorbell. 366f1ed3df2SMichal Wnukowski */ 367f1ed3df2SMichal Wnukowski mb(); 368f1ed3df2SMichal Wnukowski 369b5f96cb7SKlaus Jensen event_idx = le32_to_cpu(*dbbuf_ei); 370b5f96cb7SKlaus Jensen if (!nvme_dbbuf_need_event(event_idx, value, old_value)) 371f9f38e33SHelen Koike return false; 372f9f38e33SHelen Koike } 373f9f38e33SHelen Koike 374f9f38e33SHelen Koike return true; 37557dacad5SJay Sternberg } 37657dacad5SJay Sternberg 37757dacad5SJay Sternberg /* 37857dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 37957dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 38057dacad5SJay Sternberg * the I/O. 38157dacad5SJay Sternberg */ 382b13c6393SChaitanya Kulkarni static int nvme_pci_npages_prp(void) 38357dacad5SJay Sternberg { 384c89a529eSKeith Busch unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; 385c89a529eSKeith Busch unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); 38684173423SKeith Busch return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); 38757dacad5SJay Sternberg } 38857dacad5SJay Sternberg 389a7a7cbe3SChaitanya Kulkarni /* 390a7a7cbe3SChaitanya Kulkarni * Calculates the number of pages needed for the SGL segments. For example a 4k 391a7a7cbe3SChaitanya Kulkarni * page can accommodate 256 SGL descriptors. 392a7a7cbe3SChaitanya Kulkarni */ 393b13c6393SChaitanya Kulkarni static int nvme_pci_npages_sgl(void) 394f4800d6dSChristoph Hellwig { 395b13c6393SChaitanya Kulkarni return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), 39684173423SKeith Busch NVME_CTRL_PAGE_SIZE); 397f4800d6dSChristoph Hellwig } 398f4800d6dSChristoph Hellwig 39957dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 40057dacad5SJay Sternberg unsigned int hctx_idx) 40157dacad5SJay Sternberg { 4020da7feaaSChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(data); 403147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 40457dacad5SJay Sternberg 40557dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 40657dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 40757dacad5SJay Sternberg 40857dacad5SJay Sternberg hctx->driver_data = nvmeq; 40957dacad5SJay Sternberg return 0; 41057dacad5SJay Sternberg } 41157dacad5SJay Sternberg 41257dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 41357dacad5SJay Sternberg unsigned int hctx_idx) 41457dacad5SJay Sternberg { 4150da7feaaSChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(data); 416147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 41757dacad5SJay Sternberg 41857dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 41957dacad5SJay Sternberg hctx->driver_data = nvmeq; 42057dacad5SJay Sternberg return 0; 42157dacad5SJay Sternberg } 42257dacad5SJay Sternberg 423e559398fSChristoph Hellwig static int nvme_pci_init_request(struct blk_mq_tag_set *set, 424e559398fSChristoph Hellwig struct request *req, unsigned int hctx_idx, 425e559398fSChristoph Hellwig unsigned int numa_node) 42657dacad5SJay Sternberg { 4270da7feaaSChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(set->driver_data); 428f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 42959e29ce6SSagi Grimberg 43059e29ce6SSagi Grimberg nvme_req(req)->ctrl = &dev->ctrl; 431f4b9e6c9SKeith Busch nvme_req(req)->cmd = &iod->cmd; 43257dacad5SJay Sternberg return 0; 43357dacad5SJay Sternberg } 43457dacad5SJay Sternberg 4353b6592f7SJens Axboe static int queue_irq_offset(struct nvme_dev *dev) 4363b6592f7SJens Axboe { 4373b6592f7SJens Axboe /* if we have more than 1 vec, admin queue offsets us by 1 */ 4383b6592f7SJens Axboe if (dev->num_vecs > 1) 4393b6592f7SJens Axboe return 1; 4403b6592f7SJens Axboe 4413b6592f7SJens Axboe return 0; 4423b6592f7SJens Axboe } 4433b6592f7SJens Axboe 444a4e1d0b7SBart Van Assche static void nvme_pci_map_queues(struct blk_mq_tag_set *set) 445dca51e78SChristoph Hellwig { 4460da7feaaSChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(set->driver_data); 4473b6592f7SJens Axboe int i, qoff, offset; 448dca51e78SChristoph Hellwig 4493b6592f7SJens Axboe offset = queue_irq_offset(dev); 4503b6592f7SJens Axboe for (i = 0, qoff = 0; i < set->nr_maps; i++) { 4513b6592f7SJens Axboe struct blk_mq_queue_map *map = &set->map[i]; 4523b6592f7SJens Axboe 4533b6592f7SJens Axboe map->nr_queues = dev->io_queues[i]; 4543b6592f7SJens Axboe if (!map->nr_queues) { 455e20ba6e1SChristoph Hellwig BUG_ON(i == HCTX_TYPE_DEFAULT); 4567e849dd9SChristoph Hellwig continue; 4573b6592f7SJens Axboe } 4583b6592f7SJens Axboe 4594b04cc6aSJens Axboe /* 4604b04cc6aSJens Axboe * The poll queue(s) doesn't have an IRQ (and hence IRQ 4614b04cc6aSJens Axboe * affinity), so use the regular blk-mq cpu mapping 4624b04cc6aSJens Axboe */ 4633b6592f7SJens Axboe map->queue_offset = qoff; 464cb9e0e50SKeith Busch if (i != HCTX_TYPE_POLL && offset) 4653b6592f7SJens Axboe blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); 4664b04cc6aSJens Axboe else 4674b04cc6aSJens Axboe blk_mq_map_queues(map); 4683b6592f7SJens Axboe qoff += map->nr_queues; 4693b6592f7SJens Axboe offset += map->nr_queues; 4703b6592f7SJens Axboe } 471dca51e78SChristoph Hellwig } 472dca51e78SChristoph Hellwig 47338210800SKeith Busch /* 47438210800SKeith Busch * Write sq tail if we are asked to, or if the next command would wrap. 47538210800SKeith Busch */ 47638210800SKeith Busch static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 47704f3eafdSJens Axboe { 47838210800SKeith Busch if (!write_sq) { 47938210800SKeith Busch u16 next_tail = nvmeq->sq_tail + 1; 48038210800SKeith Busch 48138210800SKeith Busch if (next_tail == nvmeq->q_depth) 48238210800SKeith Busch next_tail = 0; 48338210800SKeith Busch if (next_tail != nvmeq->last_sq_tail) 48438210800SKeith Busch return; 48538210800SKeith Busch } 48638210800SKeith Busch 48704f3eafdSJens Axboe if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 48804f3eafdSJens Axboe nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 48904f3eafdSJens Axboe writel(nvmeq->sq_tail, nvmeq->q_db); 49038210800SKeith Busch nvmeq->last_sq_tail = nvmeq->sq_tail; 49104f3eafdSJens Axboe } 49204f3eafdSJens Axboe 4933233b94cSJens Axboe static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, 4943233b94cSJens Axboe struct nvme_command *cmd) 49557dacad5SJay Sternberg { 496c1e0cc7eSBenjamin Herrenschmidt memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 4973233b94cSJens Axboe absolute_pointer(cmd), sizeof(*cmd)); 49890ea5ca4SChristoph Hellwig if (++nvmeq->sq_tail == nvmeq->q_depth) 49990ea5ca4SChristoph Hellwig nvmeq->sq_tail = 0; 50004f3eafdSJens Axboe } 50104f3eafdSJens Axboe 50204f3eafdSJens Axboe static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 50304f3eafdSJens Axboe { 50404f3eafdSJens Axboe struct nvme_queue *nvmeq = hctx->driver_data; 50504f3eafdSJens Axboe 50604f3eafdSJens Axboe spin_lock(&nvmeq->sq_lock); 50738210800SKeith Busch if (nvmeq->sq_tail != nvmeq->last_sq_tail) 50838210800SKeith Busch nvme_write_sq_db(nvmeq, true); 50990ea5ca4SChristoph Hellwig spin_unlock(&nvmeq->sq_lock); 51057dacad5SJay Sternberg } 51157dacad5SJay Sternberg 512a7a7cbe3SChaitanya Kulkarni static void **nvme_pci_iod_list(struct request *req) 51357dacad5SJay Sternberg { 514f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 51591fb2b60SLogan Gunthorpe return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req)); 51657dacad5SJay Sternberg } 51757dacad5SJay Sternberg 518955b1b5aSMinwoo Im static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) 519955b1b5aSMinwoo Im { 520a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 52120469a37SKeith Busch int nseg = blk_rq_nr_phys_segments(req); 522955b1b5aSMinwoo Im unsigned int avg_seg_size; 523955b1b5aSMinwoo Im 52420469a37SKeith Busch avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 525955b1b5aSMinwoo Im 526253a0b76SChaitanya Kulkarni if (!nvme_ctrl_sgl_supported(&dev->ctrl)) 527955b1b5aSMinwoo Im return false; 528a53232cbSKeith Busch if (!nvmeq->qid) 529955b1b5aSMinwoo Im return false; 530955b1b5aSMinwoo Im if (!sgl_threshold || avg_seg_size < sgl_threshold) 531955b1b5aSMinwoo Im return false; 532955b1b5aSMinwoo Im return true; 533955b1b5aSMinwoo Im } 534955b1b5aSMinwoo Im 5359275c206SChristoph Hellwig static void nvme_free_prps(struct nvme_dev *dev, struct request *req) 53657dacad5SJay Sternberg { 5376c3c05b0SChaitanya Kulkarni const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; 5389275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5399275c206SChristoph Hellwig dma_addr_t dma_addr = iod->first_dma; 54057dacad5SJay Sternberg int i; 54157dacad5SJay Sternberg 542c372cdd1SKeith Busch for (i = 0; i < iod->nr_allocations; i++) { 5439275c206SChristoph Hellwig __le64 *prp_list = nvme_pci_iod_list(req)[i]; 5449275c206SChristoph Hellwig dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); 5459275c206SChristoph Hellwig 5469275c206SChristoph Hellwig dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); 5479275c206SChristoph Hellwig dma_addr = next_dma_addr; 548dff824b2SChristoph Hellwig } 5499275c206SChristoph Hellwig } 5509275c206SChristoph Hellwig 5519275c206SChristoph Hellwig static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) 5529275c206SChristoph Hellwig { 5539275c206SChristoph Hellwig const int last_sg = SGES_PER_PAGE - 1; 5549275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5559275c206SChristoph Hellwig dma_addr_t dma_addr = iod->first_dma; 5569275c206SChristoph Hellwig int i; 5579275c206SChristoph Hellwig 558c372cdd1SKeith Busch for (i = 0; i < iod->nr_allocations; i++) { 5599275c206SChristoph Hellwig struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i]; 5609275c206SChristoph Hellwig dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr); 5619275c206SChristoph Hellwig 5629275c206SChristoph Hellwig dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); 5639275c206SChristoph Hellwig dma_addr = next_dma_addr; 5649275c206SChristoph Hellwig } 5659275c206SChristoph Hellwig } 5669275c206SChristoph Hellwig 5679275c206SChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 5689275c206SChristoph Hellwig { 5699275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5707fe07d14SChristoph Hellwig 5719275c206SChristoph Hellwig if (iod->dma_len) { 5729275c206SChristoph Hellwig dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, 5739275c206SChristoph Hellwig rq_dma_dir(req)); 5749275c206SChristoph Hellwig return; 5759275c206SChristoph Hellwig } 5769275c206SChristoph Hellwig 57791fb2b60SLogan Gunthorpe WARN_ON_ONCE(!iod->sgt.nents); 5789275c206SChristoph Hellwig 57991fb2b60SLogan Gunthorpe dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 58091fb2b60SLogan Gunthorpe 581c372cdd1SKeith Busch if (iod->nr_allocations == 0) 582a7a7cbe3SChaitanya Kulkarni dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], 5839275c206SChristoph Hellwig iod->first_dma); 5849275c206SChristoph Hellwig else if (iod->use_sgl) 5859275c206SChristoph Hellwig nvme_free_sgls(dev, req); 5869275c206SChristoph Hellwig else 5879275c206SChristoph Hellwig nvme_free_prps(dev, req); 58891fb2b60SLogan Gunthorpe mempool_free(iod->sgt.sgl, dev->iod_mempool); 58957dacad5SJay Sternberg } 59057dacad5SJay Sternberg 591d0877473SKeith Busch static void nvme_print_sgl(struct scatterlist *sgl, int nents) 592d0877473SKeith Busch { 593d0877473SKeith Busch int i; 594d0877473SKeith Busch struct scatterlist *sg; 595d0877473SKeith Busch 596d0877473SKeith Busch for_each_sg(sgl, sg, nents, i) { 597d0877473SKeith Busch dma_addr_t phys = sg_phys(sg); 598d0877473SKeith Busch pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 599d0877473SKeith Busch "dma_address:%pad dma_length:%d\n", 600d0877473SKeith Busch i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 601d0877473SKeith Busch sg_dma_len(sg)); 602d0877473SKeith Busch } 603d0877473SKeith Busch } 604d0877473SKeith Busch 605a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 606a7a7cbe3SChaitanya Kulkarni struct request *req, struct nvme_rw_command *cmnd) 60757dacad5SJay Sternberg { 608f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 60957dacad5SJay Sternberg struct dma_pool *pool; 610b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 61191fb2b60SLogan Gunthorpe struct scatterlist *sg = iod->sgt.sgl; 61257dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 61357dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 6146c3c05b0SChaitanya Kulkarni int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); 61557dacad5SJay Sternberg __le64 *prp_list; 616a7a7cbe3SChaitanya Kulkarni void **list = nvme_pci_iod_list(req); 61757dacad5SJay Sternberg dma_addr_t prp_dma; 61857dacad5SJay Sternberg int nprps, i; 61957dacad5SJay Sternberg 6206c3c05b0SChaitanya Kulkarni length -= (NVME_CTRL_PAGE_SIZE - offset); 6215228b328SJan H. Schönherr if (length <= 0) { 6225228b328SJan H. Schönherr iod->first_dma = 0; 623a7a7cbe3SChaitanya Kulkarni goto done; 6245228b328SJan H. Schönherr } 62557dacad5SJay Sternberg 6266c3c05b0SChaitanya Kulkarni dma_len -= (NVME_CTRL_PAGE_SIZE - offset); 62757dacad5SJay Sternberg if (dma_len) { 6286c3c05b0SChaitanya Kulkarni dma_addr += (NVME_CTRL_PAGE_SIZE - offset); 62957dacad5SJay Sternberg } else { 63057dacad5SJay Sternberg sg = sg_next(sg); 63157dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 63257dacad5SJay Sternberg dma_len = sg_dma_len(sg); 63357dacad5SJay Sternberg } 63457dacad5SJay Sternberg 6356c3c05b0SChaitanya Kulkarni if (length <= NVME_CTRL_PAGE_SIZE) { 63657dacad5SJay Sternberg iod->first_dma = dma_addr; 637a7a7cbe3SChaitanya Kulkarni goto done; 63857dacad5SJay Sternberg } 63957dacad5SJay Sternberg 6406c3c05b0SChaitanya Kulkarni nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); 64157dacad5SJay Sternberg if (nprps <= (256 / 8)) { 64257dacad5SJay Sternberg pool = dev->prp_small_pool; 643c372cdd1SKeith Busch iod->nr_allocations = 0; 64457dacad5SJay Sternberg } else { 64557dacad5SJay Sternberg pool = dev->prp_page_pool; 646c372cdd1SKeith Busch iod->nr_allocations = 1; 64757dacad5SJay Sternberg } 64857dacad5SJay Sternberg 64969d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 65057dacad5SJay Sternberg if (!prp_list) { 651c372cdd1SKeith Busch iod->nr_allocations = -1; 65286eea289SKeith Busch return BLK_STS_RESOURCE; 65357dacad5SJay Sternberg } 65457dacad5SJay Sternberg list[0] = prp_list; 65557dacad5SJay Sternberg iod->first_dma = prp_dma; 65657dacad5SJay Sternberg i = 0; 65757dacad5SJay Sternberg for (;;) { 6586c3c05b0SChaitanya Kulkarni if (i == NVME_CTRL_PAGE_SIZE >> 3) { 65957dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 66069d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 66157dacad5SJay Sternberg if (!prp_list) 662fa073216SChristoph Hellwig goto free_prps; 663c372cdd1SKeith Busch list[iod->nr_allocations++] = prp_list; 66457dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 66557dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 66657dacad5SJay Sternberg i = 1; 66757dacad5SJay Sternberg } 66857dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 6696c3c05b0SChaitanya Kulkarni dma_len -= NVME_CTRL_PAGE_SIZE; 6706c3c05b0SChaitanya Kulkarni dma_addr += NVME_CTRL_PAGE_SIZE; 6716c3c05b0SChaitanya Kulkarni length -= NVME_CTRL_PAGE_SIZE; 67257dacad5SJay Sternberg if (length <= 0) 67357dacad5SJay Sternberg break; 67457dacad5SJay Sternberg if (dma_len > 0) 67557dacad5SJay Sternberg continue; 67686eea289SKeith Busch if (unlikely(dma_len < 0)) 67786eea289SKeith Busch goto bad_sgl; 67857dacad5SJay Sternberg sg = sg_next(sg); 67957dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 68057dacad5SJay Sternberg dma_len = sg_dma_len(sg); 68157dacad5SJay Sternberg } 682a7a7cbe3SChaitanya Kulkarni done: 68391fb2b60SLogan Gunthorpe cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); 684a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 68586eea289SKeith Busch return BLK_STS_OK; 686fa073216SChristoph Hellwig free_prps: 687fa073216SChristoph Hellwig nvme_free_prps(dev, req); 688fa073216SChristoph Hellwig return BLK_STS_RESOURCE; 68986eea289SKeith Busch bad_sgl: 69091fb2b60SLogan Gunthorpe WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), 691d0877473SKeith Busch "Invalid SGL for payload:%d nents:%d\n", 69291fb2b60SLogan Gunthorpe blk_rq_payload_bytes(req), iod->sgt.nents); 69386eea289SKeith Busch return BLK_STS_IOERR; 69457dacad5SJay Sternberg } 69557dacad5SJay Sternberg 696a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 697a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg) 698a7a7cbe3SChaitanya Kulkarni { 699a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(sg_dma_address(sg)); 700a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(sg_dma_len(sg)); 701a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_DATA_DESC << 4; 702a7a7cbe3SChaitanya Kulkarni } 703a7a7cbe3SChaitanya Kulkarni 704a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 705a7a7cbe3SChaitanya Kulkarni dma_addr_t dma_addr, int entries) 706a7a7cbe3SChaitanya Kulkarni { 707a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(dma_addr); 708a7a7cbe3SChaitanya Kulkarni if (entries < SGES_PER_PAGE) { 709a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(entries * sizeof(*sge)); 710a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 711a7a7cbe3SChaitanya Kulkarni } else { 71284173423SKeith Busch sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE); 713a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_SEG_DESC << 4; 714a7a7cbe3SChaitanya Kulkarni } 715a7a7cbe3SChaitanya Kulkarni } 716a7a7cbe3SChaitanya Kulkarni 717a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 71891fb2b60SLogan Gunthorpe struct request *req, struct nvme_rw_command *cmd) 719a7a7cbe3SChaitanya Kulkarni { 720a7a7cbe3SChaitanya Kulkarni struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 721a7a7cbe3SChaitanya Kulkarni struct dma_pool *pool; 722a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *sg_list; 72391fb2b60SLogan Gunthorpe struct scatterlist *sg = iod->sgt.sgl; 72491fb2b60SLogan Gunthorpe unsigned int entries = iod->sgt.nents; 725a7a7cbe3SChaitanya Kulkarni dma_addr_t sgl_dma; 726b0f2853bSChristoph Hellwig int i = 0; 727a7a7cbe3SChaitanya Kulkarni 728a7a7cbe3SChaitanya Kulkarni /* setting the transfer type as SGL */ 729a7a7cbe3SChaitanya Kulkarni cmd->flags = NVME_CMD_SGL_METABUF; 730a7a7cbe3SChaitanya Kulkarni 731b0f2853bSChristoph Hellwig if (entries == 1) { 732a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 733a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 734a7a7cbe3SChaitanya Kulkarni } 735a7a7cbe3SChaitanya Kulkarni 736a7a7cbe3SChaitanya Kulkarni if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 737a7a7cbe3SChaitanya Kulkarni pool = dev->prp_small_pool; 738c372cdd1SKeith Busch iod->nr_allocations = 0; 739a7a7cbe3SChaitanya Kulkarni } else { 740a7a7cbe3SChaitanya Kulkarni pool = dev->prp_page_pool; 741c372cdd1SKeith Busch iod->nr_allocations = 1; 742a7a7cbe3SChaitanya Kulkarni } 743a7a7cbe3SChaitanya Kulkarni 744a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 745a7a7cbe3SChaitanya Kulkarni if (!sg_list) { 746c372cdd1SKeith Busch iod->nr_allocations = -1; 747a7a7cbe3SChaitanya Kulkarni return BLK_STS_RESOURCE; 748a7a7cbe3SChaitanya Kulkarni } 749a7a7cbe3SChaitanya Kulkarni 750a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[0] = sg_list; 751a7a7cbe3SChaitanya Kulkarni iod->first_dma = sgl_dma; 752a7a7cbe3SChaitanya Kulkarni 753a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 754a7a7cbe3SChaitanya Kulkarni 755a7a7cbe3SChaitanya Kulkarni do { 756a7a7cbe3SChaitanya Kulkarni if (i == SGES_PER_PAGE) { 757a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *old_sg_desc = sg_list; 758a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; 759a7a7cbe3SChaitanya Kulkarni 760a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 761a7a7cbe3SChaitanya Kulkarni if (!sg_list) 762fa073216SChristoph Hellwig goto free_sgls; 763a7a7cbe3SChaitanya Kulkarni 764a7a7cbe3SChaitanya Kulkarni i = 0; 765c372cdd1SKeith Busch nvme_pci_iod_list(req)[iod->nr_allocations++] = sg_list; 766a7a7cbe3SChaitanya Kulkarni sg_list[i++] = *link; 767a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(link, sgl_dma, entries); 768a7a7cbe3SChaitanya Kulkarni } 769a7a7cbe3SChaitanya Kulkarni 770a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&sg_list[i++], sg); 771a7a7cbe3SChaitanya Kulkarni sg = sg_next(sg); 772b0f2853bSChristoph Hellwig } while (--entries > 0); 773a7a7cbe3SChaitanya Kulkarni 774a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 775fa073216SChristoph Hellwig free_sgls: 776fa073216SChristoph Hellwig nvme_free_sgls(dev, req); 777fa073216SChristoph Hellwig return BLK_STS_RESOURCE; 778a7a7cbe3SChaitanya Kulkarni } 779a7a7cbe3SChaitanya Kulkarni 780dff824b2SChristoph Hellwig static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, 781dff824b2SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 782dff824b2SChristoph Hellwig struct bio_vec *bv) 783dff824b2SChristoph Hellwig { 784dff824b2SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 7856c3c05b0SChaitanya Kulkarni unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); 7866c3c05b0SChaitanya Kulkarni unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; 787dff824b2SChristoph Hellwig 788dff824b2SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 789dff824b2SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 790dff824b2SChristoph Hellwig return BLK_STS_RESOURCE; 791dff824b2SChristoph Hellwig iod->dma_len = bv->bv_len; 792dff824b2SChristoph Hellwig 793dff824b2SChristoph Hellwig cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); 794dff824b2SChristoph Hellwig if (bv->bv_len > first_prp_len) 795dff824b2SChristoph Hellwig cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); 796a56ea614SLei Rao else 797a56ea614SLei Rao cmnd->dptr.prp2 = 0; 798359c1f88SBaolin Wang return BLK_STS_OK; 799dff824b2SChristoph Hellwig } 800dff824b2SChristoph Hellwig 80129791057SChristoph Hellwig static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, 80229791057SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 80329791057SChristoph Hellwig struct bio_vec *bv) 80429791057SChristoph Hellwig { 80529791057SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 80629791057SChristoph Hellwig 80729791057SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 80829791057SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 80929791057SChristoph Hellwig return BLK_STS_RESOURCE; 81029791057SChristoph Hellwig iod->dma_len = bv->bv_len; 81129791057SChristoph Hellwig 812049bf372SKlaus Birkelund Jensen cmnd->flags = NVME_CMD_SGL_METABUF; 81329791057SChristoph Hellwig cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); 81429791057SChristoph Hellwig cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); 81529791057SChristoph Hellwig cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; 816359c1f88SBaolin Wang return BLK_STS_OK; 81729791057SChristoph Hellwig } 81829791057SChristoph Hellwig 819fc17b653SChristoph Hellwig static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 820b131c61dSChristoph Hellwig struct nvme_command *cmnd) 82157dacad5SJay Sternberg { 822f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 82370479b71SChristoph Hellwig blk_status_t ret = BLK_STS_RESOURCE; 82491fb2b60SLogan Gunthorpe int rc; 82557dacad5SJay Sternberg 826dff824b2SChristoph Hellwig if (blk_rq_nr_phys_segments(req) == 1) { 827a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 828dff824b2SChristoph Hellwig struct bio_vec bv = req_bvec(req); 829dff824b2SChristoph Hellwig 830dff824b2SChristoph Hellwig if (!is_pci_p2pdma_page(bv.bv_page)) { 8316c3c05b0SChaitanya Kulkarni if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) 832dff824b2SChristoph Hellwig return nvme_setup_prp_simple(dev, req, 833dff824b2SChristoph Hellwig &cmnd->rw, &bv); 83429791057SChristoph Hellwig 835a53232cbSKeith Busch if (nvmeq->qid && sgl_threshold && 836253a0b76SChaitanya Kulkarni nvme_ctrl_sgl_supported(&dev->ctrl)) 83729791057SChristoph Hellwig return nvme_setup_sgl_simple(dev, req, 83829791057SChristoph Hellwig &cmnd->rw, &bv); 839dff824b2SChristoph Hellwig } 840dff824b2SChristoph Hellwig } 841dff824b2SChristoph Hellwig 842dff824b2SChristoph Hellwig iod->dma_len = 0; 84391fb2b60SLogan Gunthorpe iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 84491fb2b60SLogan Gunthorpe if (!iod->sgt.sgl) 8459b048119SChristoph Hellwig return BLK_STS_RESOURCE; 84691fb2b60SLogan Gunthorpe sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); 84791fb2b60SLogan Gunthorpe iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); 84891fb2b60SLogan Gunthorpe if (!iod->sgt.orig_nents) 849fa073216SChristoph Hellwig goto out_free_sg; 850ba1ca37eSChristoph Hellwig 85191fb2b60SLogan Gunthorpe rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 85291fb2b60SLogan Gunthorpe DMA_ATTR_NO_WARN); 85391fb2b60SLogan Gunthorpe if (rc) { 85491fb2b60SLogan Gunthorpe if (rc == -EREMOTEIO) 85591fb2b60SLogan Gunthorpe ret = BLK_STS_TARGET; 856fa073216SChristoph Hellwig goto out_free_sg; 85791fb2b60SLogan Gunthorpe } 858ba1ca37eSChristoph Hellwig 85970479b71SChristoph Hellwig iod->use_sgl = nvme_pci_use_sgls(dev, req); 860955b1b5aSMinwoo Im if (iod->use_sgl) 86191fb2b60SLogan Gunthorpe ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); 862a7a7cbe3SChaitanya Kulkarni else 863a7a7cbe3SChaitanya Kulkarni ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 8644aedb705SChristoph Hellwig if (ret != BLK_STS_OK) 865fa073216SChristoph Hellwig goto out_unmap_sg; 866fa073216SChristoph Hellwig return BLK_STS_OK; 867fa073216SChristoph Hellwig 868fa073216SChristoph Hellwig out_unmap_sg: 86991fb2b60SLogan Gunthorpe dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 870fa073216SChristoph Hellwig out_free_sg: 87191fb2b60SLogan Gunthorpe mempool_free(iod->sgt.sgl, dev->iod_mempool); 872ba1ca37eSChristoph Hellwig return ret; 87357dacad5SJay Sternberg } 87457dacad5SJay Sternberg 8754aedb705SChristoph Hellwig static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, 8764aedb705SChristoph Hellwig struct nvme_command *cmnd) 8774aedb705SChristoph Hellwig { 8784aedb705SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 8794aedb705SChristoph Hellwig 8804aedb705SChristoph Hellwig iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), 8814aedb705SChristoph Hellwig rq_dma_dir(req), 0); 8824aedb705SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->meta_dma)) 8834aedb705SChristoph Hellwig return BLK_STS_IOERR; 8844aedb705SChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); 885359c1f88SBaolin Wang return BLK_STS_OK; 8864aedb705SChristoph Hellwig } 8874aedb705SChristoph Hellwig 88862451a2bSJens Axboe static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) 88962451a2bSJens Axboe { 89062451a2bSJens Axboe struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 89162451a2bSJens Axboe blk_status_t ret; 89262451a2bSJens Axboe 89352da4f3fSKeith Busch iod->aborted = false; 894c372cdd1SKeith Busch iod->nr_allocations = -1; 89591fb2b60SLogan Gunthorpe iod->sgt.nents = 0; 89662451a2bSJens Axboe 89762451a2bSJens Axboe ret = nvme_setup_cmd(req->q->queuedata, req); 89862451a2bSJens Axboe if (ret) 89962451a2bSJens Axboe return ret; 90062451a2bSJens Axboe 90162451a2bSJens Axboe if (blk_rq_nr_phys_segments(req)) { 90262451a2bSJens Axboe ret = nvme_map_data(dev, req, &iod->cmd); 90362451a2bSJens Axboe if (ret) 90462451a2bSJens Axboe goto out_free_cmd; 90562451a2bSJens Axboe } 90662451a2bSJens Axboe 90762451a2bSJens Axboe if (blk_integrity_rq(req)) { 90862451a2bSJens Axboe ret = nvme_map_metadata(dev, req, &iod->cmd); 90962451a2bSJens Axboe if (ret) 91062451a2bSJens Axboe goto out_unmap_data; 91162451a2bSJens Axboe } 91262451a2bSJens Axboe 9136887fc64SSagi Grimberg nvme_start_request(req); 91462451a2bSJens Axboe return BLK_STS_OK; 91562451a2bSJens Axboe out_unmap_data: 91662451a2bSJens Axboe nvme_unmap_data(dev, req); 91762451a2bSJens Axboe out_free_cmd: 91862451a2bSJens Axboe nvme_cleanup_cmd(req); 91962451a2bSJens Axboe return ret; 92062451a2bSJens Axboe } 92162451a2bSJens Axboe 92257dacad5SJay Sternberg /* 92357dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 92457dacad5SJay Sternberg */ 925fc17b653SChristoph Hellwig static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 92657dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 92757dacad5SJay Sternberg { 92857dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 92957dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 93057dacad5SJay Sternberg struct request *req = bd->rq; 9319b048119SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 932ebe6d874SChristoph Hellwig blk_status_t ret; 93357dacad5SJay Sternberg 934d1f06f4aSJens Axboe /* 935d1f06f4aSJens Axboe * We should not need to do this, but we're still using this to 936d1f06f4aSJens Axboe * ensure we can drain requests on a dying queue. 937d1f06f4aSJens Axboe */ 9384e224106SChristoph Hellwig if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 939d1f06f4aSJens Axboe return BLK_STS_IOERR; 940d1f06f4aSJens Axboe 94162451a2bSJens Axboe if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) 942d4060d2bSTao Chiu return nvme_fail_nonready_command(&dev->ctrl, req); 943d4060d2bSTao Chiu 94462451a2bSJens Axboe ret = nvme_prep_rq(dev, req); 94562451a2bSJens Axboe if (unlikely(ret)) 946f4800d6dSChristoph Hellwig return ret; 9473233b94cSJens Axboe spin_lock(&nvmeq->sq_lock); 9483233b94cSJens Axboe nvme_sq_copy_cmd(nvmeq, &iod->cmd); 9493233b94cSJens Axboe nvme_write_sq_db(nvmeq, bd->last); 9503233b94cSJens Axboe spin_unlock(&nvmeq->sq_lock); 951fc17b653SChristoph Hellwig return BLK_STS_OK; 95257dacad5SJay Sternberg } 95357dacad5SJay Sternberg 954d62cbcf6SJens Axboe static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) 955d62cbcf6SJens Axboe { 956d62cbcf6SJens Axboe spin_lock(&nvmeq->sq_lock); 957d62cbcf6SJens Axboe while (!rq_list_empty(*rqlist)) { 958d62cbcf6SJens Axboe struct request *req = rq_list_pop(rqlist); 959d62cbcf6SJens Axboe struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 960d62cbcf6SJens Axboe 961d62cbcf6SJens Axboe nvme_sq_copy_cmd(nvmeq, &iod->cmd); 962d62cbcf6SJens Axboe } 963d62cbcf6SJens Axboe nvme_write_sq_db(nvmeq, true); 964d62cbcf6SJens Axboe spin_unlock(&nvmeq->sq_lock); 965d62cbcf6SJens Axboe } 966d62cbcf6SJens Axboe 967d62cbcf6SJens Axboe static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) 968d62cbcf6SJens Axboe { 969d62cbcf6SJens Axboe /* 970d62cbcf6SJens Axboe * We should not need to do this, but we're still using this to 971d62cbcf6SJens Axboe * ensure we can drain requests on a dying queue. 972d62cbcf6SJens Axboe */ 973d62cbcf6SJens Axboe if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 974d62cbcf6SJens Axboe return false; 975d62cbcf6SJens Axboe if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) 976d62cbcf6SJens Axboe return false; 977d62cbcf6SJens Axboe 978d62cbcf6SJens Axboe req->mq_hctx->tags->rqs[req->tag] = req; 979d62cbcf6SJens Axboe return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; 980d62cbcf6SJens Axboe } 981d62cbcf6SJens Axboe 982d62cbcf6SJens Axboe static void nvme_queue_rqs(struct request **rqlist) 983d62cbcf6SJens Axboe { 9846bfec799SKeith Busch struct request *req, *next, *prev = NULL; 985d62cbcf6SJens Axboe struct request *requeue_list = NULL; 986d62cbcf6SJens Axboe 9876bfec799SKeith Busch rq_list_for_each_safe(rqlist, req, next) { 988d62cbcf6SJens Axboe struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 989d62cbcf6SJens Axboe 990d62cbcf6SJens Axboe if (!nvme_prep_rq_batch(nvmeq, req)) { 991d62cbcf6SJens Axboe /* detach 'req' and add to remainder list */ 9926bfec799SKeith Busch rq_list_move(rqlist, &requeue_list, req, prev); 9936bfec799SKeith Busch 9946bfec799SKeith Busch req = prev; 9956bfec799SKeith Busch if (!req) 9966bfec799SKeith Busch continue; 997d62cbcf6SJens Axboe } 998d62cbcf6SJens Axboe 9996bfec799SKeith Busch if (!next || req->mq_hctx != next->mq_hctx) { 1000d62cbcf6SJens Axboe /* detach rest of list, and submit */ 10016bfec799SKeith Busch req->rq_next = NULL; 1002d62cbcf6SJens Axboe nvme_submit_cmds(nvmeq, rqlist); 10036bfec799SKeith Busch *rqlist = next; 10046bfec799SKeith Busch prev = NULL; 10056bfec799SKeith Busch } else 10066bfec799SKeith Busch prev = req; 1007d62cbcf6SJens Axboe } 1008d62cbcf6SJens Axboe 1009d62cbcf6SJens Axboe *rqlist = requeue_list; 1010d62cbcf6SJens Axboe } 1011d62cbcf6SJens Axboe 1012c234a653SJens Axboe static __always_inline void nvme_pci_unmap_rq(struct request *req) 1013eee417b0SChristoph Hellwig { 1014a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1015a53232cbSKeith Busch struct nvme_dev *dev = nvmeq->dev; 1016eee417b0SChristoph Hellwig 1017a53232cbSKeith Busch if (blk_integrity_rq(req)) { 1018a53232cbSKeith Busch struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1019a53232cbSKeith Busch 10204aedb705SChristoph Hellwig dma_unmap_page(dev->dev, iod->meta_dma, 10214aedb705SChristoph Hellwig rq_integrity_vec(req)->bv_len, rq_data_dir(req)); 1022a53232cbSKeith Busch } 1023a53232cbSKeith Busch 1024b15c592dSChristoph Hellwig if (blk_rq_nr_phys_segments(req)) 10254aedb705SChristoph Hellwig nvme_unmap_data(dev, req); 1026c234a653SJens Axboe } 1027c234a653SJens Axboe 1028c234a653SJens Axboe static void nvme_pci_complete_rq(struct request *req) 1029c234a653SJens Axboe { 1030c234a653SJens Axboe nvme_pci_unmap_rq(req); 103177f02a7aSChristoph Hellwig nvme_complete_rq(req); 103257dacad5SJay Sternberg } 103357dacad5SJay Sternberg 1034c234a653SJens Axboe static void nvme_pci_complete_batch(struct io_comp_batch *iob) 1035c234a653SJens Axboe { 1036c234a653SJens Axboe nvme_complete_batch(iob, nvme_pci_unmap_rq); 1037c234a653SJens Axboe } 1038c234a653SJens Axboe 1039d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 1040750dde44SChristoph Hellwig static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 1041d783e0bdSMarta Rybczynska { 104274943d45SKeith Busch struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 104374943d45SKeith Busch 104474943d45SKeith Busch return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; 1045d783e0bdSMarta Rybczynska } 1046d783e0bdSMarta Rybczynska 1047eb281c82SSagi Grimberg static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 104857dacad5SJay Sternberg { 1049eb281c82SSagi Grimberg u16 head = nvmeq->cq_head; 105057dacad5SJay Sternberg 1051eb281c82SSagi Grimberg if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 1052eb281c82SSagi Grimberg nvmeq->dbbuf_cq_ei)) 1053eb281c82SSagi Grimberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 1054eb281c82SSagi Grimberg } 1055adf68f21SChristoph Hellwig 1056cfa27356SChristoph Hellwig static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) 1057cfa27356SChristoph Hellwig { 1058cfa27356SChristoph Hellwig if (!nvmeq->qid) 1059cfa27356SChristoph Hellwig return nvmeq->dev->admin_tagset.tags[0]; 1060cfa27356SChristoph Hellwig return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; 1061cfa27356SChristoph Hellwig } 1062cfa27356SChristoph Hellwig 1063c234a653SJens Axboe static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, 1064c234a653SJens Axboe struct io_comp_batch *iob, u16 idx) 106557dacad5SJay Sternberg { 106674943d45SKeith Busch struct nvme_completion *cqe = &nvmeq->cqes[idx]; 106762df8016SLalithambika Krishnakumar __u16 command_id = READ_ONCE(cqe->command_id); 106857dacad5SJay Sternberg struct request *req; 1069adf68f21SChristoph Hellwig 1070adf68f21SChristoph Hellwig /* 1071adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 1072adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 1073adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 1074adf68f21SChristoph Hellwig * for them but rather special case them here. 1075adf68f21SChristoph Hellwig */ 107662df8016SLalithambika Krishnakumar if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 10777bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 107883a12fb7SSagi Grimberg cqe->status, &cqe->result); 1079a0fa9647SJens Axboe return; 108057dacad5SJay Sternberg } 108157dacad5SJay Sternberg 1082e7006de6SSagi Grimberg req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); 108350b7c243SXianting Tian if (unlikely(!req)) { 108450b7c243SXianting Tian dev_warn(nvmeq->dev->ctrl.device, 108550b7c243SXianting Tian "invalid id %d completed on queue %d\n", 108662df8016SLalithambika Krishnakumar command_id, le16_to_cpu(cqe->sq_id)); 108750b7c243SXianting Tian return; 108850b7c243SXianting Tian } 108950b7c243SXianting Tian 1090604c01d5Syupeng trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 1091c234a653SJens Axboe if (!nvme_try_complete_req(req, cqe->status, cqe->result) && 1092c234a653SJens Axboe !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, 1093c234a653SJens Axboe nvme_pci_complete_batch)) 1094ff029451SChristoph Hellwig nvme_pci_complete_rq(req); 109583a12fb7SSagi Grimberg } 109657dacad5SJay Sternberg 10975cb525c8SJens Axboe static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 10985cb525c8SJens Axboe { 1099a0aac973SJK Kim u32 tmp = nvmeq->cq_head + 1; 1100a8de6639SAlexey Dobriyan 1101a8de6639SAlexey Dobriyan if (tmp == nvmeq->q_depth) { 1102920d13a8SSagi Grimberg nvmeq->cq_head = 0; 1103e2a366a4SAlexey Dobriyan nvmeq->cq_phase ^= 1; 1104a8de6639SAlexey Dobriyan } else { 1105a8de6639SAlexey Dobriyan nvmeq->cq_head = tmp; 1106920d13a8SSagi Grimberg } 1107a0fa9647SJens Axboe } 1108a0fa9647SJens Axboe 1109c234a653SJens Axboe static inline int nvme_poll_cq(struct nvme_queue *nvmeq, 1110c234a653SJens Axboe struct io_comp_batch *iob) 1111a0fa9647SJens Axboe { 11121052b8acSJens Axboe int found = 0; 111383a12fb7SSagi Grimberg 11141052b8acSJens Axboe while (nvme_cqe_pending(nvmeq)) { 11151052b8acSJens Axboe found++; 1116b69e2ef2SKeith Busch /* 1117b69e2ef2SKeith Busch * load-load control dependency between phase and the rest of 1118b69e2ef2SKeith Busch * the cqe requires a full read memory barrier 1119b69e2ef2SKeith Busch */ 1120b69e2ef2SKeith Busch dma_rmb(); 1121c234a653SJens Axboe nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); 11225cb525c8SJens Axboe nvme_update_cq_head(nvmeq); 112357dacad5SJay Sternberg } 112457dacad5SJay Sternberg 1125324b494cSKeith Busch if (found) 1126eb281c82SSagi Grimberg nvme_ring_cq_doorbell(nvmeq); 11275cb525c8SJens Axboe return found; 112857dacad5SJay Sternberg } 112957dacad5SJay Sternberg 113057dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 113157dacad5SJay Sternberg { 113257dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 11334f502245SJens Axboe DEFINE_IO_COMP_BATCH(iob); 11345cb525c8SJens Axboe 11354f502245SJens Axboe if (nvme_poll_cq(nvmeq, &iob)) { 11364f502245SJens Axboe if (!rq_list_empty(iob.req_list)) 11374f502245SJens Axboe nvme_pci_complete_batch(&iob); 113805fae499SChaitanya Kulkarni return IRQ_HANDLED; 11394f502245SJens Axboe } 114005fae499SChaitanya Kulkarni return IRQ_NONE; 114157dacad5SJay Sternberg } 114257dacad5SJay Sternberg 114357dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 114457dacad5SJay Sternberg { 114557dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 11464e523547SBaolin Wang 1147750dde44SChristoph Hellwig if (nvme_cqe_pending(nvmeq)) 114857dacad5SJay Sternberg return IRQ_WAKE_THREAD; 1149d783e0bdSMarta Rybczynska return IRQ_NONE; 115057dacad5SJay Sternberg } 115157dacad5SJay Sternberg 11520b2a8a9fSChristoph Hellwig /* 1153fa059b85SKeith Busch * Poll for completions for any interrupt driven queue 11540b2a8a9fSChristoph Hellwig * Can be called from any context. 11550b2a8a9fSChristoph Hellwig */ 1156fa059b85SKeith Busch static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1157a0fa9647SJens Axboe { 11583a7afd8eSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1159a0fa9647SJens Axboe 1160fa059b85SKeith Busch WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1161fa059b85SKeith Busch 11623a7afd8eSChristoph Hellwig disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1163c234a653SJens Axboe nvme_poll_cq(nvmeq, NULL); 11643a7afd8eSChristoph Hellwig enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 116591a509f8SChristoph Hellwig } 1166442e19b7SSagi Grimberg 11675a72e899SJens Axboe static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 11687776db1cSKeith Busch { 11697776db1cSKeith Busch struct nvme_queue *nvmeq = hctx->driver_data; 1170dabcefabSJens Axboe bool found; 1171dabcefabSJens Axboe 1172dabcefabSJens Axboe if (!nvme_cqe_pending(nvmeq)) 1173dabcefabSJens Axboe return 0; 1174dabcefabSJens Axboe 11753a7afd8eSChristoph Hellwig spin_lock(&nvmeq->cq_poll_lock); 1176c234a653SJens Axboe found = nvme_poll_cq(nvmeq, iob); 11773a7afd8eSChristoph Hellwig spin_unlock(&nvmeq->cq_poll_lock); 1178dabcefabSJens Axboe 1179dabcefabSJens Axboe return found; 1180dabcefabSJens Axboe } 1181dabcefabSJens Axboe 1182ad22c355SKeith Busch static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 118357dacad5SJay Sternberg { 1184f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 1185147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 1186f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 118757dacad5SJay Sternberg 118857dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 1189ad22c355SKeith Busch c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 11903233b94cSJens Axboe 11913233b94cSJens Axboe spin_lock(&nvmeq->sq_lock); 11923233b94cSJens Axboe nvme_sq_copy_cmd(nvmeq, &c); 11933233b94cSJens Axboe nvme_write_sq_db(nvmeq, true); 11943233b94cSJens Axboe spin_unlock(&nvmeq->sq_lock); 119557dacad5SJay Sternberg } 119657dacad5SJay Sternberg 119757dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 119857dacad5SJay Sternberg { 1199f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 120057dacad5SJay Sternberg 120157dacad5SJay Sternberg c.delete_queue.opcode = opcode; 120257dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 120357dacad5SJay Sternberg 12041c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 120557dacad5SJay Sternberg } 120657dacad5SJay Sternberg 120757dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1208a8e3e0bbSJianchao Wang struct nvme_queue *nvmeq, s16 vector) 120957dacad5SJay Sternberg { 1210f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 12114b04cc6aSJens Axboe int flags = NVME_QUEUE_PHYS_CONTIG; 12124b04cc6aSJens Axboe 12137c349ddeSKeith Busch if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 12144b04cc6aSJens Axboe flags |= NVME_CQ_IRQ_ENABLED; 121557dacad5SJay Sternberg 121657dacad5SJay Sternberg /* 121716772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 121857dacad5SJay Sternberg * is attached to the request. 121957dacad5SJay Sternberg */ 122057dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 122157dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 122257dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 122357dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 122457dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 1225a8e3e0bbSJianchao Wang c.create_cq.irq_vector = cpu_to_le16(vector); 122657dacad5SJay Sternberg 12271c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 122857dacad5SJay Sternberg } 122957dacad5SJay Sternberg 123057dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 123157dacad5SJay Sternberg struct nvme_queue *nvmeq) 123257dacad5SJay Sternberg { 12339abd68efSJens Axboe struct nvme_ctrl *ctrl = &dev->ctrl; 1234f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 123581c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 123657dacad5SJay Sternberg 123757dacad5SJay Sternberg /* 12389abd68efSJens Axboe * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 12399abd68efSJens Axboe * set. Since URGENT priority is zeroes, it makes all queues 12409abd68efSJens Axboe * URGENT. 12419abd68efSJens Axboe */ 12429abd68efSJens Axboe if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 12439abd68efSJens Axboe flags |= NVME_SQ_PRIO_MEDIUM; 12449abd68efSJens Axboe 12459abd68efSJens Axboe /* 124616772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 124757dacad5SJay Sternberg * is attached to the request. 124857dacad5SJay Sternberg */ 124957dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 125057dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 125157dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 125257dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 125357dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 125457dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 125557dacad5SJay Sternberg 12561c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 125757dacad5SJay Sternberg } 125857dacad5SJay Sternberg 125957dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 126057dacad5SJay Sternberg { 126157dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 126257dacad5SJay Sternberg } 126357dacad5SJay Sternberg 126457dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 126557dacad5SJay Sternberg { 126657dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 126757dacad5SJay Sternberg } 126857dacad5SJay Sternberg 1269de671d61SJens Axboe static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) 127057dacad5SJay Sternberg { 1271a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 127257dacad5SJay Sternberg 127327fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 127427fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 1275e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1276e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 1277de671d61SJens Axboe return RQ_END_IO_NONE; 127857dacad5SJay Sternberg } 127957dacad5SJay Sternberg 1280b2a0eb1aSKeith Busch static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1281b2a0eb1aSKeith Busch { 1282b2a0eb1aSKeith Busch /* If true, indicates loss of adapter communication, possibly by a 1283b2a0eb1aSKeith Busch * NVMe Subsystem reset. 1284b2a0eb1aSKeith Busch */ 1285b2a0eb1aSKeith Busch bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1286b2a0eb1aSKeith Busch 1287ad70062cSJianchao Wang /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1288ad70062cSJianchao Wang switch (dev->ctrl.state) { 1289ad70062cSJianchao Wang case NVME_CTRL_RESETTING: 1290ad6a0a52SMax Gurtovoy case NVME_CTRL_CONNECTING: 1291b2a0eb1aSKeith Busch return false; 1292ad70062cSJianchao Wang default: 1293ad70062cSJianchao Wang break; 1294ad70062cSJianchao Wang } 1295b2a0eb1aSKeith Busch 1296b2a0eb1aSKeith Busch /* We shouldn't reset unless the controller is on fatal error state 1297b2a0eb1aSKeith Busch * _or_ if we lost the communication with it. 1298b2a0eb1aSKeith Busch */ 1299b2a0eb1aSKeith Busch if (!(csts & NVME_CSTS_CFS) && !nssro) 1300b2a0eb1aSKeith Busch return false; 1301b2a0eb1aSKeith Busch 1302b2a0eb1aSKeith Busch return true; 1303b2a0eb1aSKeith Busch } 1304b2a0eb1aSKeith Busch 1305b2a0eb1aSKeith Busch static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1306b2a0eb1aSKeith Busch { 1307b2a0eb1aSKeith Busch /* Read a config register to help see what died. */ 1308b2a0eb1aSKeith Busch u16 pci_status; 1309b2a0eb1aSKeith Busch int result; 1310b2a0eb1aSKeith Busch 1311b2a0eb1aSKeith Busch result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1312b2a0eb1aSKeith Busch &pci_status); 1313b2a0eb1aSKeith Busch if (result == PCIBIOS_SUCCESSFUL) 1314b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1315b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1316b2a0eb1aSKeith Busch csts, pci_status); 1317b2a0eb1aSKeith Busch else 1318b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1319b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1320b2a0eb1aSKeith Busch csts, result); 13214641a8e6SKeith Busch 13224641a8e6SKeith Busch if (csts != ~0) 13234641a8e6SKeith Busch return; 13244641a8e6SKeith Busch 13254641a8e6SKeith Busch dev_warn(dev->ctrl.device, 13264641a8e6SKeith Busch "Does your device have a faulty power saving mode enabled?\n"); 13274641a8e6SKeith Busch dev_warn(dev->ctrl.device, 13284641a8e6SKeith Busch "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); 1329b2a0eb1aSKeith Busch } 1330b2a0eb1aSKeith Busch 13319bdb4833SJohn Garry static enum blk_eh_timer_return nvme_timeout(struct request *req) 133257dacad5SJay Sternberg { 1333f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1334a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 133557dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 133657dacad5SJay Sternberg struct request *abort_req; 1337f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 1338b2a0eb1aSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1339b2a0eb1aSKeith Busch 1340651438bbSWen Xiong /* If PCI error recovery process is happening, we cannot reset or 1341651438bbSWen Xiong * the recovery mechanism will surely fail. 1342651438bbSWen Xiong */ 1343651438bbSWen Xiong mb(); 1344651438bbSWen Xiong if (pci_channel_offline(to_pci_dev(dev->dev))) 1345651438bbSWen Xiong return BLK_EH_RESET_TIMER; 1346651438bbSWen Xiong 1347b2a0eb1aSKeith Busch /* 1348b2a0eb1aSKeith Busch * Reset immediately if the controller is failed 1349b2a0eb1aSKeith Busch */ 1350b2a0eb1aSKeith Busch if (nvme_should_reset(dev, csts)) { 1351b2a0eb1aSKeith Busch nvme_warn_reset(dev, csts); 1352b2a0eb1aSKeith Busch nvme_dev_disable(dev, false); 1353d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1354db8c48e4SChristoph Hellwig return BLK_EH_DONE; 1355b2a0eb1aSKeith Busch } 135657dacad5SJay Sternberg 135731c7c7d2SChristoph Hellwig /* 13587776db1cSKeith Busch * Did we miss an interrupt? 13597776db1cSKeith Busch */ 1360fa059b85SKeith Busch if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) 13615a72e899SJens Axboe nvme_poll(req->mq_hctx, NULL); 1362fa059b85SKeith Busch else 1363bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 1364fa059b85SKeith Busch 13651c584208SKeith Busch if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { 13667776db1cSKeith Busch dev_warn(dev->ctrl.device, 13677776db1cSKeith Busch "I/O %d QID %d timeout, completion polled\n", 13687776db1cSKeith Busch req->tag, nvmeq->qid); 1369db8c48e4SChristoph Hellwig return BLK_EH_DONE; 13707776db1cSKeith Busch } 13717776db1cSKeith Busch 13727776db1cSKeith Busch /* 1373fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 1374fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 1375fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 1376db8c48e4SChristoph Hellwig * shutdown, so we return BLK_EH_DONE. 1377fd634f41SChristoph Hellwig */ 13784244140dSKeith Busch switch (dev->ctrl.state) { 13794244140dSKeith Busch case NVME_CTRL_CONNECTING: 13802036f726SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1381df561f66SGustavo A. R. Silva fallthrough; 13822036f726SKeith Busch case NVME_CTRL_DELETING: 1383b9cac43cSKeith Busch dev_warn_ratelimited(dev->ctrl.device, 1384fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 1385fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 138627fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 13877ad92f65STong Zhang nvme_dev_disable(dev, true); 1388db8c48e4SChristoph Hellwig return BLK_EH_DONE; 138939a9dd81SKeith Busch case NVME_CTRL_RESETTING: 139039a9dd81SKeith Busch return BLK_EH_RESET_TIMER; 13914244140dSKeith Busch default: 13924244140dSKeith Busch break; 1393fd634f41SChristoph Hellwig } 1394fd634f41SChristoph Hellwig 1395fd634f41SChristoph Hellwig /* 1396e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 1397e1569a16SKeith Busch * command was already aborted once before and still hasn't been 1398e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 139931c7c7d2SChristoph Hellwig */ 1400f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 14011b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 140257dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 140357dacad5SJay Sternberg req->tag, nvmeq->qid); 14047ad92f65STong Zhang nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1405a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1406d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1407e1569a16SKeith Busch 1408db8c48e4SChristoph Hellwig return BLK_EH_DONE; 140957dacad5SJay Sternberg } 141057dacad5SJay Sternberg 1411e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1412e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 1413e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1414e7a2a87dSChristoph Hellwig } 141552da4f3fSKeith Busch iod->aborted = true; 141657dacad5SJay Sternberg 141757dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 141885f74acfSKeith Busch cmd.abort.cid = nvme_cid(req); 141957dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 142057dacad5SJay Sternberg 14211b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 142286141440SChristoph Hellwig "I/O %d (%s) QID %d timeout, aborting\n", 142386141440SChristoph Hellwig req->tag, 142486141440SChristoph Hellwig nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode), 142586141440SChristoph Hellwig nvmeq->qid); 1426e7a2a87dSChristoph Hellwig 1427e559398fSChristoph Hellwig abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), 142839dfe844SChaitanya Kulkarni BLK_MQ_REQ_NOWAIT); 14296bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 14306bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 143131c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 143257dacad5SJay Sternberg } 1433e559398fSChristoph Hellwig nvme_init_request(abort_req, &cmd); 143457dacad5SJay Sternberg 1435e2e53086SChristoph Hellwig abort_req->end_io = abort_endio; 1436e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 1437e2e53086SChristoph Hellwig blk_execute_rq_nowait(abort_req, false); 143857dacad5SJay Sternberg 143957dacad5SJay Sternberg /* 144057dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 144157dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 144257dacad5SJay Sternberg * as the device then is in a faulty state. 144357dacad5SJay Sternberg */ 144457dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 144557dacad5SJay Sternberg } 144657dacad5SJay Sternberg 144757dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 144857dacad5SJay Sternberg { 14498a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 145057dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 145163223078SChristoph Hellwig if (!nvmeq->sq_cmds) 145263223078SChristoph Hellwig return; 14530f238ff5SLogan Gunthorpe 145463223078SChristoph Hellwig if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 145588a041f4SKeith Busch pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 14568a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 145763223078SChristoph Hellwig } else { 14588a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 145963223078SChristoph Hellwig nvmeq->sq_cmds, nvmeq->sq_dma_addr); 14600f238ff5SLogan Gunthorpe } 146157dacad5SJay Sternberg } 146257dacad5SJay Sternberg 146357dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 146457dacad5SJay Sternberg { 146557dacad5SJay Sternberg int i; 146657dacad5SJay Sternberg 1467d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1468d858e5f0SSagi Grimberg dev->ctrl.queue_count--; 1469147b27e4SSagi Grimberg nvme_free_queue(&dev->queues[i]); 147057dacad5SJay Sternberg } 147157dacad5SJay Sternberg } 147257dacad5SJay Sternberg 147310981f23SChristoph Hellwig static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid) 147457dacad5SJay Sternberg { 147510981f23SChristoph Hellwig struct nvme_queue *nvmeq = &dev->queues[qid]; 147610981f23SChristoph Hellwig 14774e224106SChristoph Hellwig if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 147810981f23SChristoph Hellwig return; 147957dacad5SJay Sternberg 14804e224106SChristoph Hellwig /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1481d1f06f4aSJens Axboe mb(); 148257dacad5SJay Sternberg 14834e224106SChristoph Hellwig nvmeq->dev->online_queues--; 14841c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 14859f27bd70SChristoph Hellwig nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); 14867c349ddeSKeith Busch if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 148710981f23SChristoph Hellwig pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); 148857dacad5SJay Sternberg } 148957dacad5SJay Sternberg 14908fae268bSKeith Busch static void nvme_suspend_io_queues(struct nvme_dev *dev) 14918fae268bSKeith Busch { 14928fae268bSKeith Busch int i; 14938fae268bSKeith Busch 14948fae268bSKeith Busch for (i = dev->ctrl.queue_count - 1; i > 0; i--) 149510981f23SChristoph Hellwig nvme_suspend_queue(dev, i); 149657dacad5SJay Sternberg } 149757dacad5SJay Sternberg 1498fa46c6fbSKeith Busch /* 1499fa46c6fbSKeith Busch * Called only on a device that has been disabled and after all other threads 15009210c075SDongli Zhang * that can check this device's completion queues have synced, except 15019210c075SDongli Zhang * nvme_poll(). This is the last chance for the driver to see a natural 15029210c075SDongli Zhang * completion before nvme_cancel_request() terminates all incomplete requests. 1503fa46c6fbSKeith Busch */ 1504fa46c6fbSKeith Busch static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1505fa46c6fbSKeith Busch { 1506fa46c6fbSKeith Busch int i; 1507fa46c6fbSKeith Busch 15089210c075SDongli Zhang for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 15099210c075SDongli Zhang spin_lock(&dev->queues[i].cq_poll_lock); 1510c234a653SJens Axboe nvme_poll_cq(&dev->queues[i], NULL); 15119210c075SDongli Zhang spin_unlock(&dev->queues[i].cq_poll_lock); 15129210c075SDongli Zhang } 1513fa46c6fbSKeith Busch } 1514fa46c6fbSKeith Busch 151557dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 151657dacad5SJay Sternberg int entry_size) 151757dacad5SJay Sternberg { 151857dacad5SJay Sternberg int q_depth = dev->q_depth; 15195fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 15206c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 152157dacad5SJay Sternberg 152257dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 152357dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 15244e523547SBaolin Wang 15256c3c05b0SChaitanya Kulkarni mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); 152657dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 152757dacad5SJay Sternberg 152857dacad5SJay Sternberg /* 152957dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 153057dacad5SJay Sternberg * would be better to map queues in system memory with the 153157dacad5SJay Sternberg * original depth 153257dacad5SJay Sternberg */ 153357dacad5SJay Sternberg if (q_depth < 64) 153457dacad5SJay Sternberg return -ENOMEM; 153557dacad5SJay Sternberg } 153657dacad5SJay Sternberg 153757dacad5SJay Sternberg return q_depth; 153857dacad5SJay Sternberg } 153957dacad5SJay Sternberg 154057dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 15418a1d09a6SBenjamin Herrenschmidt int qid) 154257dacad5SJay Sternberg { 15430f238ff5SLogan Gunthorpe struct pci_dev *pdev = to_pci_dev(dev->dev); 1544815c6704SKeith Busch 15450f238ff5SLogan Gunthorpe if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 15468a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 1547bfac8e9fSAlan Mikhak if (nvmeq->sq_cmds) { 15480f238ff5SLogan Gunthorpe nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 15490f238ff5SLogan Gunthorpe nvmeq->sq_cmds); 155063223078SChristoph Hellwig if (nvmeq->sq_dma_addr) { 155163223078SChristoph Hellwig set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 155263223078SChristoph Hellwig return 0; 155363223078SChristoph Hellwig } 1554bfac8e9fSAlan Mikhak 15558a1d09a6SBenjamin Herrenschmidt pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1556bfac8e9fSAlan Mikhak } 15570f238ff5SLogan Gunthorpe } 15580f238ff5SLogan Gunthorpe 15598a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 156057dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 156157dacad5SJay Sternberg if (!nvmeq->sq_cmds) 156257dacad5SJay Sternberg return -ENOMEM; 156357dacad5SJay Sternberg return 0; 156457dacad5SJay Sternberg } 156557dacad5SJay Sternberg 1566a6ff7262SKeith Busch static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 156757dacad5SJay Sternberg { 1568147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[qid]; 156957dacad5SJay Sternberg 157062314e40SKeith Busch if (dev->ctrl.queue_count > qid) 157162314e40SKeith Busch return 0; 157257dacad5SJay Sternberg 1573c1e0cc7eSBenjamin Herrenschmidt nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; 15748a1d09a6SBenjamin Herrenschmidt nvmeq->q_depth = depth; 15758a1d09a6SBenjamin Herrenschmidt nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 157657dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 157757dacad5SJay Sternberg if (!nvmeq->cqes) 157857dacad5SJay Sternberg goto free_nvmeq; 157957dacad5SJay Sternberg 15808a1d09a6SBenjamin Herrenschmidt if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 158157dacad5SJay Sternberg goto free_cqdma; 158257dacad5SJay Sternberg 158357dacad5SJay Sternberg nvmeq->dev = dev; 15841ab0cd69SJens Axboe spin_lock_init(&nvmeq->sq_lock); 15853a7afd8eSChristoph Hellwig spin_lock_init(&nvmeq->cq_poll_lock); 158657dacad5SJay Sternberg nvmeq->cq_head = 0; 158757dacad5SJay Sternberg nvmeq->cq_phase = 1; 158857dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 158957dacad5SJay Sternberg nvmeq->qid = qid; 1590d858e5f0SSagi Grimberg dev->ctrl.queue_count++; 159157dacad5SJay Sternberg 1592147b27e4SSagi Grimberg return 0; 159357dacad5SJay Sternberg 159457dacad5SJay Sternberg free_cqdma: 15958a1d09a6SBenjamin Herrenschmidt dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 159657dacad5SJay Sternberg nvmeq->cq_dma_addr); 159757dacad5SJay Sternberg free_nvmeq: 1598147b27e4SSagi Grimberg return -ENOMEM; 159957dacad5SJay Sternberg } 160057dacad5SJay Sternberg 1601dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 160257dacad5SJay Sternberg { 16030ff199cbSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 16040ff199cbSChristoph Hellwig int nr = nvmeq->dev->ctrl.instance; 16050ff199cbSChristoph Hellwig 16060ff199cbSChristoph Hellwig if (use_threaded_interrupts) { 16070ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 16080ff199cbSChristoph Hellwig nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 16090ff199cbSChristoph Hellwig } else { 16100ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 16110ff199cbSChristoph Hellwig NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 16120ff199cbSChristoph Hellwig } 161357dacad5SJay Sternberg } 161457dacad5SJay Sternberg 161557dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 161657dacad5SJay Sternberg { 161757dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 161857dacad5SJay Sternberg 161957dacad5SJay Sternberg nvmeq->sq_tail = 0; 162038210800SKeith Busch nvmeq->last_sq_tail = 0; 162157dacad5SJay Sternberg nvmeq->cq_head = 0; 162257dacad5SJay Sternberg nvmeq->cq_phase = 1; 162357dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 16248a1d09a6SBenjamin Herrenschmidt memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 1625f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 162657dacad5SJay Sternberg dev->online_queues++; 16273a7afd8eSChristoph Hellwig wmb(); /* ensure the first interrupt sees the initialization */ 162857dacad5SJay Sternberg } 162957dacad5SJay Sternberg 1630e4b9852aSCasey Chen /* 1631e4b9852aSCasey Chen * Try getting shutdown_lock while setting up IO queues. 1632e4b9852aSCasey Chen */ 1633e4b9852aSCasey Chen static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) 1634e4b9852aSCasey Chen { 1635e4b9852aSCasey Chen /* 1636e4b9852aSCasey Chen * Give up if the lock is being held by nvme_dev_disable. 1637e4b9852aSCasey Chen */ 1638e4b9852aSCasey Chen if (!mutex_trylock(&dev->shutdown_lock)) 1639e4b9852aSCasey Chen return -ENODEV; 1640e4b9852aSCasey Chen 1641e4b9852aSCasey Chen /* 1642e4b9852aSCasey Chen * Controller is in wrong state, fail early. 1643e4b9852aSCasey Chen */ 1644e4b9852aSCasey Chen if (dev->ctrl.state != NVME_CTRL_CONNECTING) { 1645e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 1646e4b9852aSCasey Chen return -ENODEV; 1647e4b9852aSCasey Chen } 1648e4b9852aSCasey Chen 1649e4b9852aSCasey Chen return 0; 1650e4b9852aSCasey Chen } 1651e4b9852aSCasey Chen 16524b04cc6aSJens Axboe static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 165357dacad5SJay Sternberg { 165457dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 165557dacad5SJay Sternberg int result; 16567c349ddeSKeith Busch u16 vector = 0; 165757dacad5SJay Sternberg 1658d1ed6aa1SChristoph Hellwig clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1659d1ed6aa1SChristoph Hellwig 166022b55601SKeith Busch /* 166122b55601SKeith Busch * A queue's vector matches the queue identifier unless the controller 166222b55601SKeith Busch * has only one vector available. 166322b55601SKeith Busch */ 16644b04cc6aSJens Axboe if (!polled) 1665a8e3e0bbSJianchao Wang vector = dev->num_vecs == 1 ? 0 : qid; 16664b04cc6aSJens Axboe else 16677c349ddeSKeith Busch set_bit(NVMEQ_POLLED, &nvmeq->flags); 16684b04cc6aSJens Axboe 1669a8e3e0bbSJianchao Wang result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1670ded45505SKeith Busch if (result) 1671ded45505SKeith Busch return result; 167257dacad5SJay Sternberg 167357dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 167457dacad5SJay Sternberg if (result < 0) 1675ded45505SKeith Busch return result; 1676c80b36cdSEdmund Nadolski if (result) 167757dacad5SJay Sternberg goto release_cq; 167857dacad5SJay Sternberg 1679a8e3e0bbSJianchao Wang nvmeq->cq_vector = vector; 16804b04cc6aSJens Axboe 1681e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 1682e4b9852aSCasey Chen if (result) 1683e4b9852aSCasey Chen return result; 1684e4b9852aSCasey Chen nvme_init_queue(nvmeq, qid); 16857c349ddeSKeith Busch if (!polled) { 1686dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 168757dacad5SJay Sternberg if (result < 0) 168857dacad5SJay Sternberg goto release_sq; 16894b04cc6aSJens Axboe } 169057dacad5SJay Sternberg 16914e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1692e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 169357dacad5SJay Sternberg return result; 169457dacad5SJay Sternberg 169557dacad5SJay Sternberg release_sq: 1696f25a2dfcSJianchao Wang dev->online_queues--; 1697e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 169857dacad5SJay Sternberg adapter_delete_sq(dev, qid); 169957dacad5SJay Sternberg release_cq: 170057dacad5SJay Sternberg adapter_delete_cq(dev, qid); 170157dacad5SJay Sternberg return result; 170257dacad5SJay Sternberg } 170357dacad5SJay Sternberg 1704f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 170557dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 170677f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 170757dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 1708e559398fSChristoph Hellwig .init_request = nvme_pci_init_request, 170957dacad5SJay Sternberg .timeout = nvme_timeout, 171057dacad5SJay Sternberg }; 171157dacad5SJay Sternberg 1712f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 1713376f7ef8SChristoph Hellwig .queue_rq = nvme_queue_rq, 1714d62cbcf6SJens Axboe .queue_rqs = nvme_queue_rqs, 1715376f7ef8SChristoph Hellwig .complete = nvme_pci_complete_rq, 1716376f7ef8SChristoph Hellwig .commit_rqs = nvme_commit_rqs, 1717376f7ef8SChristoph Hellwig .init_hctx = nvme_init_hctx, 1718e559398fSChristoph Hellwig .init_request = nvme_pci_init_request, 1719376f7ef8SChristoph Hellwig .map_queues = nvme_pci_map_queues, 1720376f7ef8SChristoph Hellwig .timeout = nvme_timeout, 1721c6d962aeSChristoph Hellwig .poll = nvme_poll, 1722dabcefabSJens Axboe }; 1723dabcefabSJens Axboe 172457dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 172557dacad5SJay Sternberg { 17261c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 172769d9a99cSKeith Busch /* 172869d9a99cSKeith Busch * If the controller was reset during removal, it's possible 172969d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 173069d9a99cSKeith Busch * queue to flush these to completion. 173169d9a99cSKeith Busch */ 17329f27bd70SChristoph Hellwig nvme_unquiesce_admin_queue(&dev->ctrl); 17330da7feaaSChristoph Hellwig nvme_remove_admin_tag_set(&dev->ctrl); 173457dacad5SJay Sternberg } 173557dacad5SJay Sternberg } 173657dacad5SJay Sternberg 173797f6ef64SXu Yu static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 173897f6ef64SXu Yu { 173997f6ef64SXu Yu return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 174097f6ef64SXu Yu } 174197f6ef64SXu Yu 174297f6ef64SXu Yu static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 174397f6ef64SXu Yu { 174497f6ef64SXu Yu struct pci_dev *pdev = to_pci_dev(dev->dev); 174597f6ef64SXu Yu 174697f6ef64SXu Yu if (size <= dev->bar_mapped_size) 174797f6ef64SXu Yu return 0; 174897f6ef64SXu Yu if (size > pci_resource_len(pdev, 0)) 174997f6ef64SXu Yu return -ENOMEM; 175097f6ef64SXu Yu if (dev->bar) 175197f6ef64SXu Yu iounmap(dev->bar); 175297f6ef64SXu Yu dev->bar = ioremap(pci_resource_start(pdev, 0), size); 175397f6ef64SXu Yu if (!dev->bar) { 175497f6ef64SXu Yu dev->bar_mapped_size = 0; 175597f6ef64SXu Yu return -ENOMEM; 175697f6ef64SXu Yu } 175797f6ef64SXu Yu dev->bar_mapped_size = size; 175897f6ef64SXu Yu dev->dbs = dev->bar + NVME_REG_DBS; 175997f6ef64SXu Yu 176097f6ef64SXu Yu return 0; 176197f6ef64SXu Yu } 176297f6ef64SXu Yu 176301ad0990SSagi Grimberg static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 176457dacad5SJay Sternberg { 176557dacad5SJay Sternberg int result; 176657dacad5SJay Sternberg u32 aqa; 176757dacad5SJay Sternberg struct nvme_queue *nvmeq; 176857dacad5SJay Sternberg 176997f6ef64SXu Yu result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 177097f6ef64SXu Yu if (result < 0) 177197f6ef64SXu Yu return result; 177297f6ef64SXu Yu 17738ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 177420d0dfe6SSagi Grimberg NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 177557dacad5SJay Sternberg 17767a67cbeaSChristoph Hellwig if (dev->subsystem && 17777a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 17787a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 177957dacad5SJay Sternberg 1780285b6e9bSChristoph Hellwig /* 1781285b6e9bSChristoph Hellwig * If the device has been passed off to us in an enabled state, just 1782285b6e9bSChristoph Hellwig * clear the enabled bit. The spec says we should set the 'shutdown 1783285b6e9bSChristoph Hellwig * notification bits', but doing so may cause the device to complete 1784285b6e9bSChristoph Hellwig * commands to the admin queue ... and we don't know what memory that 1785285b6e9bSChristoph Hellwig * might be pointing at! 1786285b6e9bSChristoph Hellwig */ 1787285b6e9bSChristoph Hellwig result = nvme_disable_ctrl(&dev->ctrl, false); 178857dacad5SJay Sternberg if (result < 0) 178957dacad5SJay Sternberg return result; 179057dacad5SJay Sternberg 1791a6ff7262SKeith Busch result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1792147b27e4SSagi Grimberg if (result) 1793147b27e4SSagi Grimberg return result; 179457dacad5SJay Sternberg 1795635333e4SMax Gurtovoy dev->ctrl.numa_node = dev_to_node(dev->dev); 1796635333e4SMax Gurtovoy 1797147b27e4SSagi Grimberg nvmeq = &dev->queues[0]; 179857dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 179957dacad5SJay Sternberg aqa |= aqa << 16; 180057dacad5SJay Sternberg 18017a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 18027a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 18037a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 180457dacad5SJay Sternberg 1805c0f2f45bSSagi Grimberg result = nvme_enable_ctrl(&dev->ctrl); 180657dacad5SJay Sternberg if (result) 1807d4875622SKeith Busch return result; 180857dacad5SJay Sternberg 180957dacad5SJay Sternberg nvmeq->cq_vector = 0; 1810161b8be2SKeith Busch nvme_init_queue(nvmeq, 0); 1811dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 181257dacad5SJay Sternberg if (result) { 18137c349ddeSKeith Busch dev->online_queues--; 1814d4875622SKeith Busch return result; 181557dacad5SJay Sternberg } 181657dacad5SJay Sternberg 18174e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 181857dacad5SJay Sternberg return result; 181957dacad5SJay Sternberg } 182057dacad5SJay Sternberg 1821749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 182257dacad5SJay Sternberg { 18234b04cc6aSJens Axboe unsigned i, max, rw_queues; 1824749941f2SChristoph Hellwig int ret = 0; 182557dacad5SJay Sternberg 1826d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1827a6ff7262SKeith Busch if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1828749941f2SChristoph Hellwig ret = -ENOMEM; 182957dacad5SJay Sternberg break; 1830749941f2SChristoph Hellwig } 1831749941f2SChristoph Hellwig } 183257dacad5SJay Sternberg 1833d858e5f0SSagi Grimberg max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1834e20ba6e1SChristoph Hellwig if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1835e20ba6e1SChristoph Hellwig rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1836e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ]; 18374b04cc6aSJens Axboe } else { 18384b04cc6aSJens Axboe rw_queues = max; 18394b04cc6aSJens Axboe } 18404b04cc6aSJens Axboe 1841949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 18424b04cc6aSJens Axboe bool polled = i > rw_queues; 18434b04cc6aSJens Axboe 18444b04cc6aSJens Axboe ret = nvme_create_queue(&dev->queues[i], i, polled); 1845d4875622SKeith Busch if (ret) 184657dacad5SJay Sternberg break; 184757dacad5SJay Sternberg } 184857dacad5SJay Sternberg 1849749941f2SChristoph Hellwig /* 1850749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 18518adb8c14SMinwoo Im * than the desired amount of queues, and even a controller without 18528adb8c14SMinwoo Im * I/O queues can still be used to issue admin commands. This might 1853749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1854749941f2SChristoph Hellwig */ 1855749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 185657dacad5SJay Sternberg } 185757dacad5SJay Sternberg 185888de4598SChristoph Hellwig static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 185957dacad5SJay Sternberg { 186088de4598SChristoph Hellwig u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 186188de4598SChristoph Hellwig 186288de4598SChristoph Hellwig return 1ULL << (12 + 4 * szu); 186388de4598SChristoph Hellwig } 186488de4598SChristoph Hellwig 186588de4598SChristoph Hellwig static u32 nvme_cmb_size(struct nvme_dev *dev) 186688de4598SChristoph Hellwig { 186788de4598SChristoph Hellwig return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 186888de4598SChristoph Hellwig } 186988de4598SChristoph Hellwig 1870f65efd6dSChristoph Hellwig static void nvme_map_cmb(struct nvme_dev *dev) 187157dacad5SJay Sternberg { 187288de4598SChristoph Hellwig u64 size, offset; 187357dacad5SJay Sternberg resource_size_t bar_size; 187457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 18758969f1f8SChristoph Hellwig int bar; 187657dacad5SJay Sternberg 18779fe5c59fSKeith Busch if (dev->cmb_size) 18789fe5c59fSKeith Busch return; 18799fe5c59fSKeith Busch 188020d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) 188120d3bb92SKlaus Jensen writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); 188220d3bb92SKlaus Jensen 18837a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1884f65efd6dSChristoph Hellwig if (!dev->cmbsz) 1885f65efd6dSChristoph Hellwig return; 1886202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 188757dacad5SJay Sternberg 188888de4598SChristoph Hellwig size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 188988de4598SChristoph Hellwig offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 18908969f1f8SChristoph Hellwig bar = NVME_CMB_BIR(dev->cmbloc); 18918969f1f8SChristoph Hellwig bar_size = pci_resource_len(pdev, bar); 189257dacad5SJay Sternberg 189357dacad5SJay Sternberg if (offset > bar_size) 1894f65efd6dSChristoph Hellwig return; 189557dacad5SJay Sternberg 189657dacad5SJay Sternberg /* 189720d3bb92SKlaus Jensen * Tell the controller about the host side address mapping the CMB, 189820d3bb92SKlaus Jensen * and enable CMB decoding for the NVMe 1.4+ scheme: 189920d3bb92SKlaus Jensen */ 190020d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) { 190120d3bb92SKlaus Jensen hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | 190220d3bb92SKlaus Jensen (pci_bus_address(pdev, bar) + offset), 190320d3bb92SKlaus Jensen dev->bar + NVME_REG_CMBMSC); 190420d3bb92SKlaus Jensen } 190520d3bb92SKlaus Jensen 190620d3bb92SKlaus Jensen /* 190757dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 190857dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 190957dacad5SJay Sternberg * the reported size of the BAR 191057dacad5SJay Sternberg */ 191157dacad5SJay Sternberg if (size > bar_size - offset) 191257dacad5SJay Sternberg size = bar_size - offset; 191357dacad5SJay Sternberg 19140f238ff5SLogan Gunthorpe if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 19150f238ff5SLogan Gunthorpe dev_warn(dev->ctrl.device, 19160f238ff5SLogan Gunthorpe "failed to register the CMB\n"); 1917f65efd6dSChristoph Hellwig return; 19180f238ff5SLogan Gunthorpe } 19190f238ff5SLogan Gunthorpe 192057dacad5SJay Sternberg dev->cmb_size = size; 19210f238ff5SLogan Gunthorpe dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 19220f238ff5SLogan Gunthorpe 19230f238ff5SLogan Gunthorpe if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 19240f238ff5SLogan Gunthorpe (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 19250f238ff5SLogan Gunthorpe pci_p2pmem_publish(pdev, true); 192657dacad5SJay Sternberg } 192757dacad5SJay Sternberg 192887ad72a5SChristoph Hellwig static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 192957dacad5SJay Sternberg { 19306c3c05b0SChaitanya Kulkarni u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; 19314033f35dSChristoph Hellwig u64 dma_addr = dev->host_mem_descs_dma; 1932f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 193387ad72a5SChristoph Hellwig int ret; 193487ad72a5SChristoph Hellwig 193587ad72a5SChristoph Hellwig c.features.opcode = nvme_admin_set_features; 193687ad72a5SChristoph Hellwig c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 193787ad72a5SChristoph Hellwig c.features.dword11 = cpu_to_le32(bits); 19386c3c05b0SChaitanya Kulkarni c.features.dword12 = cpu_to_le32(host_mem_size); 193987ad72a5SChristoph Hellwig c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 194087ad72a5SChristoph Hellwig c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 194187ad72a5SChristoph Hellwig c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 194287ad72a5SChristoph Hellwig 194387ad72a5SChristoph Hellwig ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 194487ad72a5SChristoph Hellwig if (ret) { 194587ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 194687ad72a5SChristoph Hellwig "failed to set host mem (err %d, flags %#x).\n", 194787ad72a5SChristoph Hellwig ret, bits); 1948a5df5e79SKeith Busch } else 1949a5df5e79SKeith Busch dev->hmb = bits & NVME_HOST_MEM_ENABLE; 1950a5df5e79SKeith Busch 195187ad72a5SChristoph Hellwig return ret; 195287ad72a5SChristoph Hellwig } 195387ad72a5SChristoph Hellwig 195487ad72a5SChristoph Hellwig static void nvme_free_host_mem(struct nvme_dev *dev) 195587ad72a5SChristoph Hellwig { 195687ad72a5SChristoph Hellwig int i; 195787ad72a5SChristoph Hellwig 195887ad72a5SChristoph Hellwig for (i = 0; i < dev->nr_host_mem_descs; i++) { 195987ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 19606c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; 196187ad72a5SChristoph Hellwig 1962cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 1963cc667f6dSLiviu Dudau le64_to_cpu(desc->addr), 1964cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 196587ad72a5SChristoph Hellwig } 196687ad72a5SChristoph Hellwig 196787ad72a5SChristoph Hellwig kfree(dev->host_mem_desc_bufs); 196887ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = NULL; 19694033f35dSChristoph Hellwig dma_free_coherent(dev->dev, 19704033f35dSChristoph Hellwig dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 19714033f35dSChristoph Hellwig dev->host_mem_descs, dev->host_mem_descs_dma); 197287ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 19737e5dd57eSMinwoo Im dev->nr_host_mem_descs = 0; 197487ad72a5SChristoph Hellwig } 197587ad72a5SChristoph Hellwig 197692dc6895SChristoph Hellwig static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, 197792dc6895SChristoph Hellwig u32 chunk_size) 197887ad72a5SChristoph Hellwig { 197987ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *descs; 198092dc6895SChristoph Hellwig u32 max_entries, len; 19814033f35dSChristoph Hellwig dma_addr_t descs_dma; 19822ee0e4edSDan Carpenter int i = 0; 198387ad72a5SChristoph Hellwig void **bufs; 19846fbcde66SMinwoo Im u64 size, tmp; 198587ad72a5SChristoph Hellwig 198687ad72a5SChristoph Hellwig tmp = (preferred + chunk_size - 1); 198787ad72a5SChristoph Hellwig do_div(tmp, chunk_size); 198887ad72a5SChristoph Hellwig max_entries = tmp; 1989044a9df1SChristoph Hellwig 1990044a9df1SChristoph Hellwig if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1991044a9df1SChristoph Hellwig max_entries = dev->ctrl.hmmaxd; 1992044a9df1SChristoph Hellwig 1993750afb08SLuis Chamberlain descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 19944033f35dSChristoph Hellwig &descs_dma, GFP_KERNEL); 199587ad72a5SChristoph Hellwig if (!descs) 199687ad72a5SChristoph Hellwig goto out; 199787ad72a5SChristoph Hellwig 199887ad72a5SChristoph Hellwig bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 199987ad72a5SChristoph Hellwig if (!bufs) 200087ad72a5SChristoph Hellwig goto out_free_descs; 200187ad72a5SChristoph Hellwig 2002244a8fe4SMinwoo Im for (size = 0; size < preferred && i < max_entries; size += len) { 200387ad72a5SChristoph Hellwig dma_addr_t dma_addr; 200487ad72a5SChristoph Hellwig 200550cdb7c6SChristoph Hellwig len = min_t(u64, chunk_size, preferred - size); 200687ad72a5SChristoph Hellwig bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 200787ad72a5SChristoph Hellwig DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 200887ad72a5SChristoph Hellwig if (!bufs[i]) 200987ad72a5SChristoph Hellwig break; 201087ad72a5SChristoph Hellwig 201187ad72a5SChristoph Hellwig descs[i].addr = cpu_to_le64(dma_addr); 20126c3c05b0SChaitanya Kulkarni descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); 201387ad72a5SChristoph Hellwig i++; 201487ad72a5SChristoph Hellwig } 201587ad72a5SChristoph Hellwig 201692dc6895SChristoph Hellwig if (!size) 201787ad72a5SChristoph Hellwig goto out_free_bufs; 201887ad72a5SChristoph Hellwig 201987ad72a5SChristoph Hellwig dev->nr_host_mem_descs = i; 202087ad72a5SChristoph Hellwig dev->host_mem_size = size; 202187ad72a5SChristoph Hellwig dev->host_mem_descs = descs; 20224033f35dSChristoph Hellwig dev->host_mem_descs_dma = descs_dma; 202387ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = bufs; 202487ad72a5SChristoph Hellwig return 0; 202587ad72a5SChristoph Hellwig 202687ad72a5SChristoph Hellwig out_free_bufs: 202787ad72a5SChristoph Hellwig while (--i >= 0) { 20286c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; 202987ad72a5SChristoph Hellwig 2030cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, bufs[i], 2031cc667f6dSLiviu Dudau le64_to_cpu(descs[i].addr), 2032cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 203387ad72a5SChristoph Hellwig } 203487ad72a5SChristoph Hellwig 203587ad72a5SChristoph Hellwig kfree(bufs); 203687ad72a5SChristoph Hellwig out_free_descs: 20374033f35dSChristoph Hellwig dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, 20384033f35dSChristoph Hellwig descs_dma); 203987ad72a5SChristoph Hellwig out: 204087ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 204187ad72a5SChristoph Hellwig return -ENOMEM; 204287ad72a5SChristoph Hellwig } 204387ad72a5SChristoph Hellwig 204492dc6895SChristoph Hellwig static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 204592dc6895SChristoph Hellwig { 20469dc54a0dSChaitanya Kulkarni u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 20479dc54a0dSChaitanya Kulkarni u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 20489dc54a0dSChaitanya Kulkarni u64 chunk_size; 204992dc6895SChristoph Hellwig 205092dc6895SChristoph Hellwig /* start big and work our way down */ 20519dc54a0dSChaitanya Kulkarni for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { 205292dc6895SChristoph Hellwig if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { 205392dc6895SChristoph Hellwig if (!min || dev->host_mem_size >= min) 205492dc6895SChristoph Hellwig return 0; 205592dc6895SChristoph Hellwig nvme_free_host_mem(dev); 205692dc6895SChristoph Hellwig } 205792dc6895SChristoph Hellwig } 205892dc6895SChristoph Hellwig 205992dc6895SChristoph Hellwig return -ENOMEM; 206092dc6895SChristoph Hellwig } 206192dc6895SChristoph Hellwig 20629620cfbaSChristoph Hellwig static int nvme_setup_host_mem(struct nvme_dev *dev) 206387ad72a5SChristoph Hellwig { 206487ad72a5SChristoph Hellwig u64 max = (u64)max_host_mem_size_mb * SZ_1M; 206587ad72a5SChristoph Hellwig u64 preferred = (u64)dev->ctrl.hmpre * 4096; 206687ad72a5SChristoph Hellwig u64 min = (u64)dev->ctrl.hmmin * 4096; 206787ad72a5SChristoph Hellwig u32 enable_bits = NVME_HOST_MEM_ENABLE; 20686fbcde66SMinwoo Im int ret; 206987ad72a5SChristoph Hellwig 2070acb71e53SChristoph Hellwig if (!dev->ctrl.hmpre) 2071acb71e53SChristoph Hellwig return 0; 2072acb71e53SChristoph Hellwig 207387ad72a5SChristoph Hellwig preferred = min(preferred, max); 207487ad72a5SChristoph Hellwig if (min > max) { 207587ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 207687ad72a5SChristoph Hellwig "min host memory (%lld MiB) above limit (%d MiB).\n", 207787ad72a5SChristoph Hellwig min >> ilog2(SZ_1M), max_host_mem_size_mb); 207887ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20799620cfbaSChristoph Hellwig return 0; 208087ad72a5SChristoph Hellwig } 208187ad72a5SChristoph Hellwig 208287ad72a5SChristoph Hellwig /* 208387ad72a5SChristoph Hellwig * If we already have a buffer allocated check if we can reuse it. 208487ad72a5SChristoph Hellwig */ 208587ad72a5SChristoph Hellwig if (dev->host_mem_descs) { 208687ad72a5SChristoph Hellwig if (dev->host_mem_size >= min) 208787ad72a5SChristoph Hellwig enable_bits |= NVME_HOST_MEM_RETURN; 208887ad72a5SChristoph Hellwig else 208987ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 209087ad72a5SChristoph Hellwig } 209187ad72a5SChristoph Hellwig 209287ad72a5SChristoph Hellwig if (!dev->host_mem_descs) { 209392dc6895SChristoph Hellwig if (nvme_alloc_host_mem(dev, min, preferred)) { 209492dc6895SChristoph Hellwig dev_warn(dev->ctrl.device, 209592dc6895SChristoph Hellwig "failed to allocate host memory buffer.\n"); 20969620cfbaSChristoph Hellwig return 0; /* controller must work without HMB */ 209787ad72a5SChristoph Hellwig } 209887ad72a5SChristoph Hellwig 209992dc6895SChristoph Hellwig dev_info(dev->ctrl.device, 210092dc6895SChristoph Hellwig "allocated %lld MiB host memory buffer.\n", 210192dc6895SChristoph Hellwig dev->host_mem_size >> ilog2(SZ_1M)); 210292dc6895SChristoph Hellwig } 210392dc6895SChristoph Hellwig 21049620cfbaSChristoph Hellwig ret = nvme_set_host_mem(dev, enable_bits); 21059620cfbaSChristoph Hellwig if (ret) 210687ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 21079620cfbaSChristoph Hellwig return ret; 210857dacad5SJay Sternberg } 210957dacad5SJay Sternberg 21100521905eSKeith Busch static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, 21110521905eSKeith Busch char *buf) 21120521905eSKeith Busch { 21130521905eSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 21140521905eSKeith Busch 21150521905eSKeith Busch return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", 21160521905eSKeith Busch ndev->cmbloc, ndev->cmbsz); 21170521905eSKeith Busch } 21180521905eSKeith Busch static DEVICE_ATTR_RO(cmb); 21190521905eSKeith Busch 21201751e97aSKeith Busch static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, 21211751e97aSKeith Busch char *buf) 21221751e97aSKeith Busch { 21231751e97aSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 21241751e97aSKeith Busch 21251751e97aSKeith Busch return sysfs_emit(buf, "%u\n", ndev->cmbloc); 21261751e97aSKeith Busch } 21271751e97aSKeith Busch static DEVICE_ATTR_RO(cmbloc); 21281751e97aSKeith Busch 21291751e97aSKeith Busch static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, 21301751e97aSKeith Busch char *buf) 21311751e97aSKeith Busch { 21321751e97aSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 21331751e97aSKeith Busch 21341751e97aSKeith Busch return sysfs_emit(buf, "%u\n", ndev->cmbsz); 21351751e97aSKeith Busch } 21361751e97aSKeith Busch static DEVICE_ATTR_RO(cmbsz); 21371751e97aSKeith Busch 2138a5df5e79SKeith Busch static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, 2139a5df5e79SKeith Busch char *buf) 2140a5df5e79SKeith Busch { 2141a5df5e79SKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2142a5df5e79SKeith Busch 2143a5df5e79SKeith Busch return sysfs_emit(buf, "%d\n", ndev->hmb); 2144a5df5e79SKeith Busch } 2145a5df5e79SKeith Busch 2146a5df5e79SKeith Busch static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, 2147a5df5e79SKeith Busch const char *buf, size_t count) 2148a5df5e79SKeith Busch { 2149a5df5e79SKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2150a5df5e79SKeith Busch bool new; 2151a5df5e79SKeith Busch int ret; 2152a5df5e79SKeith Busch 215399722c8aSChristophe JAILLET if (kstrtobool(buf, &new) < 0) 2154a5df5e79SKeith Busch return -EINVAL; 2155a5df5e79SKeith Busch 2156a5df5e79SKeith Busch if (new == ndev->hmb) 2157a5df5e79SKeith Busch return count; 2158a5df5e79SKeith Busch 2159a5df5e79SKeith Busch if (new) { 2160a5df5e79SKeith Busch ret = nvme_setup_host_mem(ndev); 2161a5df5e79SKeith Busch } else { 2162a5df5e79SKeith Busch ret = nvme_set_host_mem(ndev, 0); 2163a5df5e79SKeith Busch if (!ret) 2164a5df5e79SKeith Busch nvme_free_host_mem(ndev); 2165a5df5e79SKeith Busch } 2166a5df5e79SKeith Busch 2167a5df5e79SKeith Busch if (ret < 0) 2168a5df5e79SKeith Busch return ret; 2169a5df5e79SKeith Busch 2170a5df5e79SKeith Busch return count; 2171a5df5e79SKeith Busch } 2172a5df5e79SKeith Busch static DEVICE_ATTR_RW(hmb); 2173a5df5e79SKeith Busch 21740521905eSKeith Busch static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, 21750521905eSKeith Busch struct attribute *a, int n) 21760521905eSKeith Busch { 21770521905eSKeith Busch struct nvme_ctrl *ctrl = 21780521905eSKeith Busch dev_get_drvdata(container_of(kobj, struct device, kobj)); 21790521905eSKeith Busch struct nvme_dev *dev = to_nvme_dev(ctrl); 21800521905eSKeith Busch 21811751e97aSKeith Busch if (a == &dev_attr_cmb.attr || 21821751e97aSKeith Busch a == &dev_attr_cmbloc.attr || 21831751e97aSKeith Busch a == &dev_attr_cmbsz.attr) { 21841751e97aSKeith Busch if (!dev->cmbsz) 21850521905eSKeith Busch return 0; 21861751e97aSKeith Busch } 2187a5df5e79SKeith Busch if (a == &dev_attr_hmb.attr && !ctrl->hmpre) 2188a5df5e79SKeith Busch return 0; 2189a5df5e79SKeith Busch 21900521905eSKeith Busch return a->mode; 21910521905eSKeith Busch } 21920521905eSKeith Busch 21930521905eSKeith Busch static struct attribute *nvme_pci_attrs[] = { 21940521905eSKeith Busch &dev_attr_cmb.attr, 21951751e97aSKeith Busch &dev_attr_cmbloc.attr, 21961751e97aSKeith Busch &dev_attr_cmbsz.attr, 2197a5df5e79SKeith Busch &dev_attr_hmb.attr, 21980521905eSKeith Busch NULL, 21990521905eSKeith Busch }; 22000521905eSKeith Busch 220186adbf0cSChristoph Hellwig static const struct attribute_group nvme_pci_dev_attrs_group = { 22020521905eSKeith Busch .attrs = nvme_pci_attrs, 22030521905eSKeith Busch .is_visible = nvme_pci_attrs_are_visible, 22040521905eSKeith Busch }; 22050521905eSKeith Busch 220686adbf0cSChristoph Hellwig static const struct attribute_group *nvme_pci_dev_attr_groups[] = { 220786adbf0cSChristoph Hellwig &nvme_dev_attrs_group, 220886adbf0cSChristoph Hellwig &nvme_pci_dev_attrs_group, 220986adbf0cSChristoph Hellwig NULL, 221086adbf0cSChristoph Hellwig }; 221186adbf0cSChristoph Hellwig 2212612b7286SMing Lei /* 2213612b7286SMing Lei * nirqs is the number of interrupts available for write and read 2214612b7286SMing Lei * queues. The core already reserved an interrupt for the admin queue. 2215612b7286SMing Lei */ 2216612b7286SMing Lei static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 22173b6592f7SJens Axboe { 2218612b7286SMing Lei struct nvme_dev *dev = affd->priv; 22192a5bcfddSWeiping Zhang unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; 2220c45b1fa2SMing Lei 22213b6592f7SJens Axboe /* 2222ee0d96d3SBaolin Wang * If there is no interrupt available for queues, ensure that 2223612b7286SMing Lei * the default queue is set to 1. The affinity set size is 2224612b7286SMing Lei * also set to one, but the irq core ignores it for this case. 2225612b7286SMing Lei * 2226612b7286SMing Lei * If only one interrupt is available or 'write_queue' == 0, combine 2227612b7286SMing Lei * write and read queues. 2228612b7286SMing Lei * 2229612b7286SMing Lei * If 'write_queues' > 0, ensure it leaves room for at least one read 2230612b7286SMing Lei * queue. 22313b6592f7SJens Axboe */ 2232612b7286SMing Lei if (!nrirqs) { 2233612b7286SMing Lei nrirqs = 1; 2234612b7286SMing Lei nr_read_queues = 0; 22352a5bcfddSWeiping Zhang } else if (nrirqs == 1 || !nr_write_queues) { 2236612b7286SMing Lei nr_read_queues = 0; 22372a5bcfddSWeiping Zhang } else if (nr_write_queues >= nrirqs) { 2238612b7286SMing Lei nr_read_queues = 1; 22393b6592f7SJens Axboe } else { 22402a5bcfddSWeiping Zhang nr_read_queues = nrirqs - nr_write_queues; 22413b6592f7SJens Axboe } 2242612b7286SMing Lei 2243612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2244612b7286SMing Lei affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2245612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2246612b7286SMing Lei affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2247612b7286SMing Lei affd->nr_sets = nr_read_queues ? 2 : 1; 22483b6592f7SJens Axboe } 22493b6592f7SJens Axboe 22506451fe73SJens Axboe static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 22513b6592f7SJens Axboe { 22523b6592f7SJens Axboe struct pci_dev *pdev = to_pci_dev(dev->dev); 22533b6592f7SJens Axboe struct irq_affinity affd = { 22543b6592f7SJens Axboe .pre_vectors = 1, 2255612b7286SMing Lei .calc_sets = nvme_calc_irq_sets, 2256612b7286SMing Lei .priv = dev, 22573b6592f7SJens Axboe }; 225821cc2f3fSJeffle Xu unsigned int irq_queues, poll_queues; 22596451fe73SJens Axboe 22606451fe73SJens Axboe /* 226121cc2f3fSJeffle Xu * Poll queues don't need interrupts, but we need at least one I/O queue 226221cc2f3fSJeffle Xu * left over for non-polled I/O. 22636451fe73SJens Axboe */ 226421cc2f3fSJeffle Xu poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); 226521cc2f3fSJeffle Xu dev->io_queues[HCTX_TYPE_POLL] = poll_queues; 22663b6592f7SJens Axboe 226721cc2f3fSJeffle Xu /* 226821cc2f3fSJeffle Xu * Initialize for the single interrupt case, will be updated in 226921cc2f3fSJeffle Xu * nvme_calc_irq_sets(). 227021cc2f3fSJeffle Xu */ 2271612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2272612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = 0; 22733b6592f7SJens Axboe 227466341331SBenjamin Herrenschmidt /* 227521cc2f3fSJeffle Xu * We need interrupts for the admin queue and each non-polled I/O queue, 227621cc2f3fSJeffle Xu * but some Apple controllers require all queues to use the first 227721cc2f3fSJeffle Xu * vector. 227866341331SBenjamin Herrenschmidt */ 227966341331SBenjamin Herrenschmidt irq_queues = 1; 228021cc2f3fSJeffle Xu if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) 228121cc2f3fSJeffle Xu irq_queues += (nr_io_queues - poll_queues); 2282612b7286SMing Lei return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, 22833b6592f7SJens Axboe PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 22843b6592f7SJens Axboe } 22853b6592f7SJens Axboe 22862a5bcfddSWeiping Zhang static unsigned int nvme_max_io_queues(struct nvme_dev *dev) 22872a5bcfddSWeiping Zhang { 2288e3aef095SNiklas Schnelle /* 2289e3aef095SNiklas Schnelle * If tags are shared with admin queue (Apple bug), then 2290e3aef095SNiklas Schnelle * make sure we only use one IO queue. 2291e3aef095SNiklas Schnelle */ 2292e3aef095SNiklas Schnelle if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2293e3aef095SNiklas Schnelle return 1; 22942a5bcfddSWeiping Zhang return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; 22952a5bcfddSWeiping Zhang } 22962a5bcfddSWeiping Zhang 229757dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 229857dacad5SJay Sternberg { 2299147b27e4SSagi Grimberg struct nvme_queue *adminq = &dev->queues[0]; 230057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 23012a5bcfddSWeiping Zhang unsigned int nr_io_queues; 230297f6ef64SXu Yu unsigned long size; 23032a5bcfddSWeiping Zhang int result; 230457dacad5SJay Sternberg 23052a5bcfddSWeiping Zhang /* 23062a5bcfddSWeiping Zhang * Sample the module parameters once at reset time so that we have 23072a5bcfddSWeiping Zhang * stable values to work with. 23082a5bcfddSWeiping Zhang */ 23092a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 23102a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 2311d38e9f04SBenjamin Herrenschmidt 2312ff4e5fbaSNiklas Schnelle nr_io_queues = dev->nr_allocated_queues - 1; 23139a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 23149a0be7abSChristoph Hellwig if (result < 0) 231557dacad5SJay Sternberg return result; 23169a0be7abSChristoph Hellwig 2317f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 2318a5229050SKeith Busch return 0; 231957dacad5SJay Sternberg 2320e4b9852aSCasey Chen /* 2321e4b9852aSCasey Chen * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions 2322e4b9852aSCasey Chen * from set to unset. If there is a window to it is truely freed, 2323e4b9852aSCasey Chen * pci_free_irq_vectors() jumping into this window will crash. 2324e4b9852aSCasey Chen * And take lock to avoid racing with pci_free_irq_vectors() in 2325e4b9852aSCasey Chen * nvme_dev_disable() path. 2326e4b9852aSCasey Chen */ 2327e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2328e4b9852aSCasey Chen if (result) 2329e4b9852aSCasey Chen return result; 2330e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2331e4b9852aSCasey Chen pci_free_irq(pdev, 0, adminq); 23324e224106SChristoph Hellwig 23330f238ff5SLogan Gunthorpe if (dev->cmb_use_sqes) { 233457dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 233557dacad5SJay Sternberg sizeof(struct nvme_command)); 233688d356caSChristoph Hellwig if (result > 0) { 233757dacad5SJay Sternberg dev->q_depth = result; 233888d356caSChristoph Hellwig dev->ctrl.sqsize = result - 1; 233988d356caSChristoph Hellwig } else { 23400f238ff5SLogan Gunthorpe dev->cmb_use_sqes = false; 234157dacad5SJay Sternberg } 234288d356caSChristoph Hellwig } 234357dacad5SJay Sternberg 234457dacad5SJay Sternberg do { 234597f6ef64SXu Yu size = db_bar_size(dev, nr_io_queues); 234697f6ef64SXu Yu result = nvme_remap_bar(dev, size); 234797f6ef64SXu Yu if (!result) 234857dacad5SJay Sternberg break; 2349e4b9852aSCasey Chen if (!--nr_io_queues) { 2350e4b9852aSCasey Chen result = -ENOMEM; 2351e4b9852aSCasey Chen goto out_unlock; 2352e4b9852aSCasey Chen } 235357dacad5SJay Sternberg } while (1); 235457dacad5SJay Sternberg adminq->q_db = dev->dbs; 235557dacad5SJay Sternberg 23568fae268bSKeith Busch retry: 235757dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 2358e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 23590ff199cbSChristoph Hellwig pci_free_irq(pdev, 0, adminq); 236057dacad5SJay Sternberg 236157dacad5SJay Sternberg /* 236257dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 236357dacad5SJay Sternberg * setting up the full range we need. 236457dacad5SJay Sternberg */ 2365dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 23663b6592f7SJens Axboe 23673b6592f7SJens Axboe result = nvme_setup_irqs(dev, nr_io_queues); 2368e4b9852aSCasey Chen if (result <= 0) { 2369e4b9852aSCasey Chen result = -EIO; 2370e4b9852aSCasey Chen goto out_unlock; 2371e4b9852aSCasey Chen } 23723b6592f7SJens Axboe 237322b55601SKeith Busch dev->num_vecs = result; 23744b04cc6aSJens Axboe result = max(result - 1, 1); 2375e20ba6e1SChristoph Hellwig dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 237657dacad5SJay Sternberg 237757dacad5SJay Sternberg /* 237857dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 237957dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 238057dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 238157dacad5SJay Sternberg * number of interrupts. 238257dacad5SJay Sternberg */ 2383dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 23847c349ddeSKeith Busch if (result) 2385e4b9852aSCasey Chen goto out_unlock; 23864e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &adminq->flags); 2387e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 23888fae268bSKeith Busch 23898fae268bSKeith Busch result = nvme_create_io_queues(dev); 23908fae268bSKeith Busch if (result || dev->online_queues < 2) 23918fae268bSKeith Busch return result; 23928fae268bSKeith Busch 23938fae268bSKeith Busch if (dev->online_queues - 1 < dev->max_qid) { 23948fae268bSKeith Busch nr_io_queues = dev->online_queues - 1; 23957d879c90SChristoph Hellwig nvme_delete_io_queues(dev); 2396e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2397e4b9852aSCasey Chen if (result) 2398e4b9852aSCasey Chen return result; 23998fae268bSKeith Busch nvme_suspend_io_queues(dev); 24008fae268bSKeith Busch goto retry; 24018fae268bSKeith Busch } 24028fae268bSKeith Busch dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 24038fae268bSKeith Busch dev->io_queues[HCTX_TYPE_DEFAULT], 24048fae268bSKeith Busch dev->io_queues[HCTX_TYPE_READ], 24058fae268bSKeith Busch dev->io_queues[HCTX_TYPE_POLL]); 24068fae268bSKeith Busch return 0; 2407e4b9852aSCasey Chen out_unlock: 2408e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 2409e4b9852aSCasey Chen return result; 241057dacad5SJay Sternberg } 241157dacad5SJay Sternberg 2412de671d61SJens Axboe static enum rq_end_io_ret nvme_del_queue_end(struct request *req, 2413de671d61SJens Axboe blk_status_t error) 2414db3cbfffSKeith Busch { 2415db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2416db3cbfffSKeith Busch 2417db3cbfffSKeith Busch blk_mq_free_request(req); 2418d1ed6aa1SChristoph Hellwig complete(&nvmeq->delete_done); 2419de671d61SJens Axboe return RQ_END_IO_NONE; 2420db3cbfffSKeith Busch } 2421db3cbfffSKeith Busch 2422de671d61SJens Axboe static enum rq_end_io_ret nvme_del_cq_end(struct request *req, 2423de671d61SJens Axboe blk_status_t error) 2424db3cbfffSKeith Busch { 2425db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2426db3cbfffSKeith Busch 2427d1ed6aa1SChristoph Hellwig if (error) 2428d1ed6aa1SChristoph Hellwig set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2429db3cbfffSKeith Busch 2430de671d61SJens Axboe return nvme_del_queue_end(req, error); 2431db3cbfffSKeith Busch } 2432db3cbfffSKeith Busch 2433db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2434db3cbfffSKeith Busch { 2435db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2436db3cbfffSKeith Busch struct request *req; 2437f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 2438db3cbfffSKeith Busch 2439db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 2440db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2441db3cbfffSKeith Busch 2442e559398fSChristoph Hellwig req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); 2443db3cbfffSKeith Busch if (IS_ERR(req)) 2444db3cbfffSKeith Busch return PTR_ERR(req); 2445e559398fSChristoph Hellwig nvme_init_request(req, &cmd); 2446db3cbfffSKeith Busch 2447e2e53086SChristoph Hellwig if (opcode == nvme_admin_delete_cq) 2448e2e53086SChristoph Hellwig req->end_io = nvme_del_cq_end; 2449e2e53086SChristoph Hellwig else 2450e2e53086SChristoph Hellwig req->end_io = nvme_del_queue_end; 2451db3cbfffSKeith Busch req->end_io_data = nvmeq; 2452db3cbfffSKeith Busch 2453d1ed6aa1SChristoph Hellwig init_completion(&nvmeq->delete_done); 2454e2e53086SChristoph Hellwig blk_execute_rq_nowait(req, false); 2455db3cbfffSKeith Busch return 0; 2456db3cbfffSKeith Busch } 2457db3cbfffSKeith Busch 24587d879c90SChristoph Hellwig static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode) 2459db3cbfffSKeith Busch { 24605271edd4SChristoph Hellwig int nr_queues = dev->online_queues - 1, sent = 0; 2461db3cbfffSKeith Busch unsigned long timeout; 2462db3cbfffSKeith Busch 2463db3cbfffSKeith Busch retry: 2464dc96f938SChaitanya Kulkarni timeout = NVME_ADMIN_TIMEOUT; 24655271edd4SChristoph Hellwig while (nr_queues > 0) { 24665271edd4SChristoph Hellwig if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2467db3cbfffSKeith Busch break; 24685271edd4SChristoph Hellwig nr_queues--; 24695271edd4SChristoph Hellwig sent++; 24705271edd4SChristoph Hellwig } 2471d1ed6aa1SChristoph Hellwig while (sent) { 2472d1ed6aa1SChristoph Hellwig struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2473d1ed6aa1SChristoph Hellwig 2474d1ed6aa1SChristoph Hellwig timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 24755271edd4SChristoph Hellwig timeout); 2476db3cbfffSKeith Busch if (timeout == 0) 24775271edd4SChristoph Hellwig return false; 2478d1ed6aa1SChristoph Hellwig 2479d1ed6aa1SChristoph Hellwig sent--; 24805271edd4SChristoph Hellwig if (nr_queues) 2481db3cbfffSKeith Busch goto retry; 2482db3cbfffSKeith Busch } 24835271edd4SChristoph Hellwig return true; 2484db3cbfffSKeith Busch } 2485db3cbfffSKeith Busch 24867d879c90SChristoph Hellwig static void nvme_delete_io_queues(struct nvme_dev *dev) 248757dacad5SJay Sternberg { 24887d879c90SChristoph Hellwig if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq)) 24897d879c90SChristoph Hellwig __nvme_delete_io_queues(dev, nvme_admin_delete_cq); 24902b1b7e78SJianchao Wang } 24917d879c90SChristoph Hellwig 24920da7feaaSChristoph Hellwig static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) 249357dacad5SJay Sternberg { 249457dacad5SJay Sternberg if (dev->io_queues[HCTX_TYPE_POLL]) 24950da7feaaSChristoph Hellwig return 3; 24960da7feaaSChristoph Hellwig if (dev->io_queues[HCTX_TYPE_READ]) 24970da7feaaSChristoph Hellwig return 2; 24980da7feaaSChristoph Hellwig return 1; 249957dacad5SJay Sternberg } 2500949928c1SKeith Busch 25012455a4b7SChristoph Hellwig static void nvme_pci_update_nr_queues(struct nvme_dev *dev) 25022455a4b7SChristoph Hellwig { 25032455a4b7SChristoph Hellwig blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 25042455a4b7SChristoph Hellwig /* free previously allocated queues that are no longer usable */ 25052455a4b7SChristoph Hellwig nvme_free_queues(dev, dev->online_queues); 250657dacad5SJay Sternberg } 250757dacad5SJay Sternberg 2508b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 250957dacad5SJay Sternberg { 2510b00a726aSKeith Busch int result = -ENOMEM; 251157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 25124bdf2603SFilippo Sironi int dma_address_bits = 64; 251357dacad5SJay Sternberg 251457dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 251557dacad5SJay Sternberg return result; 251657dacad5SJay Sternberg 251757dacad5SJay Sternberg pci_set_master(pdev); 251857dacad5SJay Sternberg 25194bdf2603SFilippo Sironi if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) 25204bdf2603SFilippo Sironi dma_address_bits = 48; 25214bdf2603SFilippo Sironi if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits))) 252257dacad5SJay Sternberg goto disable; 252357dacad5SJay Sternberg 25247a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 252557dacad5SJay Sternberg result = -ENODEV; 2526b00a726aSKeith Busch goto disable; 252757dacad5SJay Sternberg } 252857dacad5SJay Sternberg 252957dacad5SJay Sternberg /* 2530a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 2531a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2532a5229050SKeith Busch * adjust this later. 253357dacad5SJay Sternberg */ 2534dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2535dca51e78SChristoph Hellwig if (result < 0) 253609113abfSTong Zhang goto disable; 253757dacad5SJay Sternberg 253820d0dfe6SSagi Grimberg dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 25397a67cbeaSChristoph Hellwig 25407442ddceSJohn Garry dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2541b27c1e68Sweiping zhang io_queue_depth); 254220d0dfe6SSagi Grimberg dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 25437a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 25441f390c1fSStephan Günther 25451f390c1fSStephan Günther /* 254666341331SBenjamin Herrenschmidt * Some Apple controllers require a non-standard SQE size. 254766341331SBenjamin Herrenschmidt * Interestingly they also seem to ignore the CC:IOSQES register 254866341331SBenjamin Herrenschmidt * so we don't bother updating it here. 254966341331SBenjamin Herrenschmidt */ 255066341331SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) 255166341331SBenjamin Herrenschmidt dev->io_sqes = 7; 255266341331SBenjamin Herrenschmidt else 2553c1e0cc7eSBenjamin Herrenschmidt dev->io_sqes = NVME_NVM_IOSQES; 25541f390c1fSStephan Günther 25551f390c1fSStephan Günther /* 25561f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 25571f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 25581f390c1fSStephan Günther */ 25591f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 25601f390c1fSStephan Günther dev->q_depth = 2; 25619bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 25629bdcfb10SChristoph Hellwig "set queue depth=%u to work around controller resets\n", 25631f390c1fSStephan Günther dev->q_depth); 2564d554b5e1SMartin K. Petersen } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2565d554b5e1SMartin K. Petersen (pdev->device == 0xa821 || pdev->device == 0xa822) && 256620d0dfe6SSagi Grimberg NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2567d554b5e1SMartin K. Petersen dev->q_depth = 64; 2568d554b5e1SMartin K. Petersen dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2569d554b5e1SMartin K. Petersen "set queue depth=%u\n", dev->q_depth); 25701f390c1fSStephan Günther } 25711f390c1fSStephan Günther 2572d38e9f04SBenjamin Herrenschmidt /* 2573d38e9f04SBenjamin Herrenschmidt * Controllers with the shared tags quirk need the IO queue to be 2574d38e9f04SBenjamin Herrenschmidt * big enough so that we get 32 tags for the admin queue 2575d38e9f04SBenjamin Herrenschmidt */ 2576d38e9f04SBenjamin Herrenschmidt if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && 2577d38e9f04SBenjamin Herrenschmidt (dev->q_depth < (NVME_AQ_DEPTH + 2))) { 2578d38e9f04SBenjamin Herrenschmidt dev->q_depth = NVME_AQ_DEPTH + 2; 2579d38e9f04SBenjamin Herrenschmidt dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", 2580d38e9f04SBenjamin Herrenschmidt dev->q_depth); 2581d38e9f04SBenjamin Herrenschmidt } 258288d356caSChristoph Hellwig dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ 2583d38e9f04SBenjamin Herrenschmidt 2584f65efd6dSChristoph Hellwig nvme_map_cmb(dev); 2585202021c1SStephen Bates 2586a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 2587a0a3408eSKeith Busch pci_save_state(pdev); 2588a6ee7f19SChristoph Hellwig 258909113abfSTong Zhang result = nvme_pci_configure_admin_queue(dev); 259009113abfSTong Zhang if (result) 259109113abfSTong Zhang goto free_irq; 259209113abfSTong Zhang return result; 259357dacad5SJay Sternberg 259409113abfSTong Zhang free_irq: 259509113abfSTong Zhang pci_free_irq_vectors(pdev); 259657dacad5SJay Sternberg disable: 259757dacad5SJay Sternberg pci_disable_device(pdev); 259857dacad5SJay Sternberg return result; 259957dacad5SJay Sternberg } 260057dacad5SJay Sternberg 260157dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 260257dacad5SJay Sternberg { 2603b00a726aSKeith Busch if (dev->bar) 2604b00a726aSKeith Busch iounmap(dev->bar); 2605a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 2606b00a726aSKeith Busch } 2607b00a726aSKeith Busch 260868e81ebaSChristoph Hellwig static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev) 2609b00a726aSKeith Busch { 261057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 2611081f5e75SKeith Busch u32 csts; 261257dacad5SJay Sternberg 261368e81ebaSChristoph Hellwig if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev)) 261468e81ebaSChristoph Hellwig return true; 261568e81ebaSChristoph Hellwig if (pdev->error_state != pci_channel_io_normal) 261668e81ebaSChristoph Hellwig return true; 261757dacad5SJay Sternberg 261868e81ebaSChristoph Hellwig csts = readl(dev->bar + NVME_REG_CSTS); 261968e81ebaSChristoph Hellwig return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY); 2620a0a3408eSKeith Busch } 262157dacad5SJay Sternberg 2622a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 262357dacad5SJay Sternberg { 2624302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 262568e81ebaSChristoph Hellwig bool dead; 262657dacad5SJay Sternberg 262777bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 262868e81ebaSChristoph Hellwig dead = nvme_pci_ctrl_is_dead(dev); 2629ebef7368SKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE || 2630e43269e6SKeith Busch dev->ctrl.state == NVME_CTRL_RESETTING) { 263168e81ebaSChristoph Hellwig if (pci_is_enabled(pdev)) 2632302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 2633302ad8ccSKeith Busch /* 263468e81ebaSChristoph Hellwig * Give the controller a chance to complete all entered requests 263568e81ebaSChristoph Hellwig * if doing a safe shutdown. 2636302ad8ccSKeith Busch */ 263768e81ebaSChristoph Hellwig if (!dead && shutdown) 2638302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 263968e81ebaSChristoph Hellwig } 264087ad72a5SChristoph Hellwig 26419f27bd70SChristoph Hellwig nvme_quiesce_io_queues(&dev->ctrl); 26429a915a5bSJianchao Wang 264364ee0ac0SKeith Busch if (!dead && dev->ctrl.queue_count > 0) { 26447d879c90SChristoph Hellwig nvme_delete_io_queues(dev); 264547d42d22SChristoph Hellwig nvme_disable_ctrl(&dev->ctrl, shutdown); 264647d42d22SChristoph Hellwig nvme_poll_irqdisable(&dev->queues[0]); 264757dacad5SJay Sternberg } 26488fae268bSKeith Busch nvme_suspend_io_queues(dev); 264910981f23SChristoph Hellwig nvme_suspend_queue(dev, 0); 2650c80767f7SChristoph Hellwig pci_free_irq_vectors(pdev); 2651c80767f7SChristoph Hellwig if (pci_is_enabled(pdev)) { 2652c80767f7SChristoph Hellwig pci_disable_pcie_error_reporting(pdev); 2653c80767f7SChristoph Hellwig pci_disable_device(pdev); 2654c80767f7SChristoph Hellwig } 2655fa46c6fbSKeith Busch nvme_reap_pending_cqes(dev); 265657dacad5SJay Sternberg 26571fcfca78SGuixin Liu nvme_cancel_tagset(&dev->ctrl); 26581fcfca78SGuixin Liu nvme_cancel_admin_tagset(&dev->ctrl); 2659302ad8ccSKeith Busch 2660302ad8ccSKeith Busch /* 2661302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 2662302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 2663302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 2664302ad8ccSKeith Busch */ 2665c8e9e9b7SKeith Busch if (shutdown) { 26669f27bd70SChristoph Hellwig nvme_unquiesce_io_queues(&dev->ctrl); 2667c8e9e9b7SKeith Busch if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) 26689f27bd70SChristoph Hellwig nvme_unquiesce_admin_queue(&dev->ctrl); 2669c8e9e9b7SKeith Busch } 267077bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 267157dacad5SJay Sternberg } 267257dacad5SJay Sternberg 2673c1ac9a4bSKeith Busch static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) 2674c1ac9a4bSKeith Busch { 2675c1ac9a4bSKeith Busch if (!nvme_wait_reset(&dev->ctrl)) 2676c1ac9a4bSKeith Busch return -EBUSY; 2677c1ac9a4bSKeith Busch nvme_dev_disable(dev, shutdown); 2678c1ac9a4bSKeith Busch return 0; 2679c1ac9a4bSKeith Busch } 2680c1ac9a4bSKeith Busch 268157dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 268257dacad5SJay Sternberg { 268357dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 2684c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 2685c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 0); 268657dacad5SJay Sternberg if (!dev->prp_page_pool) 268757dacad5SJay Sternberg return -ENOMEM; 268857dacad5SJay Sternberg 268957dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 269057dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 269157dacad5SJay Sternberg 256, 256, 0); 269257dacad5SJay Sternberg if (!dev->prp_small_pool) { 269357dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 269457dacad5SJay Sternberg return -ENOMEM; 269557dacad5SJay Sternberg } 269657dacad5SJay Sternberg return 0; 269757dacad5SJay Sternberg } 269857dacad5SJay Sternberg 269957dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 270057dacad5SJay Sternberg { 270157dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 270257dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 270357dacad5SJay Sternberg } 270457dacad5SJay Sternberg 2705081a7d95SChristoph Hellwig static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) 2706081a7d95SChristoph Hellwig { 2707081a7d95SChristoph Hellwig size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl()); 2708081a7d95SChristoph Hellwig size_t alloc_size = sizeof(__le64 *) * npages + 2709081a7d95SChristoph Hellwig sizeof(struct scatterlist) * NVME_MAX_SEGS; 2710081a7d95SChristoph Hellwig 2711081a7d95SChristoph Hellwig WARN_ON_ONCE(alloc_size > PAGE_SIZE); 2712081a7d95SChristoph Hellwig dev->iod_mempool = mempool_create_node(1, 2713081a7d95SChristoph Hellwig mempool_kmalloc, mempool_kfree, 2714081a7d95SChristoph Hellwig (void *)alloc_size, GFP_KERNEL, 2715081a7d95SChristoph Hellwig dev_to_node(dev->dev)); 2716081a7d95SChristoph Hellwig if (!dev->iod_mempool) 2717081a7d95SChristoph Hellwig return -ENOMEM; 2718081a7d95SChristoph Hellwig return 0; 2719081a7d95SChristoph Hellwig } 2720081a7d95SChristoph Hellwig 2721770597ecSKeith Busch static void nvme_free_tagset(struct nvme_dev *dev) 2722770597ecSKeith Busch { 2723770597ecSKeith Busch if (dev->tagset.tags) 27240da7feaaSChristoph Hellwig nvme_remove_io_tag_set(&dev->ctrl); 2725770597ecSKeith Busch dev->ctrl.tagset = NULL; 2726770597ecSKeith Busch } 2727770597ecSKeith Busch 27282e87570bSChristoph Hellwig /* pairs with nvme_pci_alloc_dev */ 27291673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 273057dacad5SJay Sternberg { 27311673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 273257dacad5SJay Sternberg 2733770597ecSKeith Busch nvme_free_tagset(dev); 2734253fd4acSIsrael Rukshin put_device(dev->dev); 2735253fd4acSIsrael Rukshin kfree(dev->queues); 273657dacad5SJay Sternberg kfree(dev); 273757dacad5SJay Sternberg } 273857dacad5SJay Sternberg 2739fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 274057dacad5SJay Sternberg { 2741d86c4d8eSChristoph Hellwig struct nvme_dev *dev = 2742d86c4d8eSChristoph Hellwig container_of(work, struct nvme_dev, ctrl.reset_work); 2743a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2744e71afda4SChaitanya Kulkarni int result; 274557dacad5SJay Sternberg 27467764656bSZhihao Cheng if (dev->ctrl.state != NVME_CTRL_RESETTING) { 27477764656bSZhihao Cheng dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", 27487764656bSZhihao Cheng dev->ctrl.state); 27498cb9f10bSChristoph Hellwig return; 2750e71afda4SChaitanya Kulkarni } 2751fd634f41SChristoph Hellwig 2752fd634f41SChristoph Hellwig /* 2753fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 2754fd634f41SChristoph Hellwig * moving on. 2755fd634f41SChristoph Hellwig */ 2756b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2757a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2758d6135c3aSKeith Busch nvme_sync_queues(&dev->ctrl); 2759fd634f41SChristoph Hellwig 27605c959d73SKeith Busch mutex_lock(&dev->shutdown_lock); 2761b00a726aSKeith Busch result = nvme_pci_enable(dev); 276257dacad5SJay Sternberg if (result) 27634726bcf3SKeith Busch goto out_unlock; 27649f27bd70SChristoph Hellwig nvme_unquiesce_admin_queue(&dev->ctrl); 27655c959d73SKeith Busch mutex_unlock(&dev->shutdown_lock); 27665c959d73SKeith Busch 27675c959d73SKeith Busch /* 27685c959d73SKeith Busch * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 27695c959d73SKeith Busch * initializing procedure here. 27705c959d73SKeith Busch */ 27715c959d73SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 27725c959d73SKeith Busch dev_warn(dev->ctrl.device, 27735c959d73SKeith Busch "failed to mark controller CONNECTING\n"); 2774cee6c269SMinwoo Im result = -EBUSY; 27755c959d73SKeith Busch goto out; 27765c959d73SKeith Busch } 2777943e942eSJens Axboe 277894cc781fSChristoph Hellwig result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); 2779ce4541f4SChristoph Hellwig if (result) 2780f58944e2SKeith Busch goto out; 2781ce4541f4SChristoph Hellwig 278265a54646SChristoph Hellwig nvme_dbbuf_dma_alloc(dev); 2783a98e58e5SScott Bauer 27849620cfbaSChristoph Hellwig result = nvme_setup_host_mem(dev); 27859620cfbaSChristoph Hellwig if (result < 0) 27869620cfbaSChristoph Hellwig goto out; 278787ad72a5SChristoph Hellwig 278857dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 278957dacad5SJay Sternberg if (result) 2790f58944e2SKeith Busch goto out; 279157dacad5SJay Sternberg 279221f033f7SKeith Busch /* 27930ffc7e98SChristoph Hellwig * Freeze and update the number of I/O queues as thos might have 2794eac3ef26SChristoph Hellwig * changed. If there are no I/O queues left after this reset, keep the 2795eac3ef26SChristoph Hellwig * controller around but remove all namespaces. 279657dacad5SJay Sternberg */ 27970ffc7e98SChristoph Hellwig if (dev->online_queues > 1) { 27989f27bd70SChristoph Hellwig nvme_unquiesce_io_queues(&dev->ctrl); 2799302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 28002455a4b7SChristoph Hellwig nvme_pci_update_nr_queues(dev); 28012455a4b7SChristoph Hellwig nvme_dbbuf_set(dev); 2802302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 28030ffc7e98SChristoph Hellwig } else { 28040ffc7e98SChristoph Hellwig dev_warn(dev->ctrl.device, "IO queues lost\n"); 2805cd50f9b2SChristoph Hellwig nvme_mark_namespaces_dead(&dev->ctrl); 28069f27bd70SChristoph Hellwig nvme_unquiesce_io_queues(&dev->ctrl); 28070ffc7e98SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 28080ffc7e98SChristoph Hellwig nvme_free_tagset(dev); 280957dacad5SJay Sternberg } 281057dacad5SJay Sternberg 28112b1b7e78SJianchao Wang /* 28122b1b7e78SJianchao Wang * If only admin queue live, keep it to do further investigation or 28132b1b7e78SJianchao Wang * recovery. 28142b1b7e78SJianchao Wang */ 28155d02a5c1SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 28162b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 28175d02a5c1SKeith Busch "failed to mark controller live state\n"); 2818e71afda4SChaitanya Kulkarni result = -ENODEV; 2819bb8d261eSChristoph Hellwig goto out; 2820bb8d261eSChristoph Hellwig } 282192911a55SChristoph Hellwig 2822d09f2b45SSagi Grimberg nvme_start_ctrl(&dev->ctrl); 282357dacad5SJay Sternberg return; 282457dacad5SJay Sternberg 28254726bcf3SKeith Busch out_unlock: 28264726bcf3SKeith Busch mutex_unlock(&dev->shutdown_lock); 282757dacad5SJay Sternberg out: 2828c7c16c5bSChristoph Hellwig /* 2829c7c16c5bSChristoph Hellwig * Set state to deleting now to avoid blocking nvme_wait_reset(), which 2830c7c16c5bSChristoph Hellwig * may be holding this pci_dev's device lock. 2831c7c16c5bSChristoph Hellwig */ 2832c7c16c5bSChristoph Hellwig dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", 2833c7c16c5bSChristoph Hellwig result); 2834c7c16c5bSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2835c7c16c5bSChristoph Hellwig nvme_dev_disable(dev, true); 2836c7c16c5bSChristoph Hellwig nvme_mark_namespaces_dead(&dev->ctrl); 2837c7c16c5bSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 283857dacad5SJay Sternberg } 283957dacad5SJay Sternberg 28401c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 284157dacad5SJay Sternberg { 28421c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 28431c63dc66SChristoph Hellwig return 0; 284457dacad5SJay Sternberg } 28451c63dc66SChristoph Hellwig 28465fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 28475fd4ce1bSChristoph Hellwig { 28485fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 28495fd4ce1bSChristoph Hellwig return 0; 28505fd4ce1bSChristoph Hellwig } 28515fd4ce1bSChristoph Hellwig 28527fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 28537fd8930fSChristoph Hellwig { 28543a8ecc93SArd Biesheuvel *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); 28557fd8930fSChristoph Hellwig return 0; 28567fd8930fSChristoph Hellwig } 28577fd8930fSChristoph Hellwig 285897c12223SKeith Busch static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 285997c12223SKeith Busch { 286097c12223SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 286197c12223SKeith Busch 28622db24e4aSMax Gurtovoy return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); 286397c12223SKeith Busch } 286497c12223SKeith Busch 28652f0dad17SKeith Busch static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) 28662f0dad17SKeith Busch { 28672f0dad17SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 28682f0dad17SKeith Busch struct nvme_subsystem *subsys = ctrl->subsys; 28692f0dad17SKeith Busch 28702f0dad17SKeith Busch dev_err(ctrl->device, 28712f0dad17SKeith Busch "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", 28722f0dad17SKeith Busch pdev->vendor, pdev->device, 28732f0dad17SKeith Busch nvme_strlen(subsys->model, sizeof(subsys->model)), 28742f0dad17SKeith Busch subsys->model, nvme_strlen(subsys->firmware_rev, 28752f0dad17SKeith Busch sizeof(subsys->firmware_rev)), 28762f0dad17SKeith Busch subsys->firmware_rev); 28772f0dad17SKeith Busch } 28782f0dad17SKeith Busch 28792f859441SLogan Gunthorpe static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) 28802f859441SLogan Gunthorpe { 28812f859441SLogan Gunthorpe struct nvme_dev *dev = to_nvme_dev(ctrl); 28822f859441SLogan Gunthorpe 28832f859441SLogan Gunthorpe return dma_pci_p2pdma_supported(dev->dev); 28842f859441SLogan Gunthorpe } 28852f859441SLogan Gunthorpe 28861c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 28871a353d85SMing Lin .name = "pcie", 2888e439bb12SSagi Grimberg .module = THIS_MODULE, 28892f859441SLogan Gunthorpe .flags = NVME_F_METADATA_SUPPORTED, 289086adbf0cSChristoph Hellwig .dev_attr_groups = nvme_pci_dev_attr_groups, 28911c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 28925fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 28937fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 28941673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 2895f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 289697c12223SKeith Busch .get_address = nvme_pci_get_address, 28972f0dad17SKeith Busch .print_device_info = nvme_pci_print_device_info, 28982f859441SLogan Gunthorpe .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, 28991c63dc66SChristoph Hellwig }; 290057dacad5SJay Sternberg 2901b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 2902b00a726aSKeith Busch { 2903b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 2904b00a726aSKeith Busch 2905a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 2906b00a726aSKeith Busch return -ENODEV; 2907b00a726aSKeith Busch 290897f6ef64SXu Yu if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 2909b00a726aSKeith Busch goto release; 2910b00a726aSKeith Busch 2911b00a726aSKeith Busch return 0; 2912b00a726aSKeith Busch release: 2913a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 2914b00a726aSKeith Busch return -ENODEV; 2915b00a726aSKeith Busch } 2916b00a726aSKeith Busch 29178427bbc2SKai-Heng Feng static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 2918ff5350a8SAndy Lutomirski { 2919ff5350a8SAndy Lutomirski if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2920ff5350a8SAndy Lutomirski /* 2921ff5350a8SAndy Lutomirski * Several Samsung devices seem to drop off the PCIe bus 2922ff5350a8SAndy Lutomirski * randomly when APST is on and uses the deepest sleep state. 2923ff5350a8SAndy Lutomirski * This has been observed on a Samsung "SM951 NVMe SAMSUNG 2924ff5350a8SAndy Lutomirski * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 2925ff5350a8SAndy Lutomirski * 950 PRO 256GB", but it seems to be restricted to two Dell 2926ff5350a8SAndy Lutomirski * laptops. 2927ff5350a8SAndy Lutomirski */ 2928ff5350a8SAndy Lutomirski if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 2929ff5350a8SAndy Lutomirski (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2930ff5350a8SAndy Lutomirski dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2931ff5350a8SAndy Lutomirski return NVME_QUIRK_NO_DEEPEST_PS; 29328427bbc2SKai-Heng Feng } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 29338427bbc2SKai-Heng Feng /* 29348427bbc2SKai-Heng Feng * Samsung SSD 960 EVO drops off the PCIe bus after system 2935467c77d4SJarosław Janik * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 2936467c77d4SJarosław Janik * within few minutes after bootup on a Coffee Lake board - 2937467c77d4SJarosław Janik * ASUS PRIME Z370-A 29388427bbc2SKai-Heng Feng */ 29398427bbc2SKai-Heng Feng if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 2940467c77d4SJarosław Janik (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 2941467c77d4SJarosław Janik dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 29428427bbc2SKai-Heng Feng return NVME_QUIRK_NO_APST; 29431fae37acSShyjumon N } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || 29441fae37acSShyjumon N pdev->device == 0xa808 || pdev->device == 0xa809)) || 29451fae37acSShyjumon N (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { 29461fae37acSShyjumon N /* 29471fae37acSShyjumon N * Forcing to use host managed nvme power settings for 29481fae37acSShyjumon N * lowest idle power with quick resume latency on 29491fae37acSShyjumon N * Samsung and Toshiba SSDs based on suspend behavior 29501fae37acSShyjumon N * on Coffee Lake board for LENOVO C640 29511fae37acSShyjumon N */ 29521fae37acSShyjumon N if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && 29531fae37acSShyjumon N dmi_match(DMI_BOARD_NAME, "LNVNB161216")) 29541fae37acSShyjumon N return NVME_QUIRK_SIMPLE_SUSPEND; 2955ff5350a8SAndy Lutomirski } 2956ff5350a8SAndy Lutomirski 2957ff5350a8SAndy Lutomirski return 0; 2958ff5350a8SAndy Lutomirski } 2959ff5350a8SAndy Lutomirski 29602e87570bSChristoph Hellwig static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, 29612e87570bSChristoph Hellwig const struct pci_device_id *id) 296218119775SKeith Busch { 2963ff5350a8SAndy Lutomirski unsigned long quirks = id->driver_data; 29642e87570bSChristoph Hellwig int node = dev_to_node(&pdev->dev); 29652e87570bSChristoph Hellwig struct nvme_dev *dev; 29662e87570bSChristoph Hellwig int ret = -ENOMEM; 296757dacad5SJay Sternberg 296857dacad5SJay Sternberg if (node == NUMA_NO_NODE) 29692fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 297057dacad5SJay Sternberg 297157dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 297257dacad5SJay Sternberg if (!dev) 29732e87570bSChristoph Hellwig return NULL; 29742e87570bSChristoph Hellwig INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 29752e87570bSChristoph Hellwig mutex_init(&dev->shutdown_lock); 2976147b27e4SSagi Grimberg 29772a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 29782a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 29792a5bcfddSWeiping Zhang dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; 29802a5bcfddSWeiping Zhang dev->queues = kcalloc_node(dev->nr_allocated_queues, 29812a5bcfddSWeiping Zhang sizeof(struct nvme_queue), GFP_KERNEL, node); 298257dacad5SJay Sternberg if (!dev->queues) 29832e87570bSChristoph Hellwig goto out_free_dev; 298457dacad5SJay Sternberg 298557dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 2986f3ca80fcSChristoph Hellwig 29878427bbc2SKai-Heng Feng quirks |= check_vendor_combination_bug(pdev); 29882744d7a0SMario Limonciello if (!noacpi && acpi_storage_d3(&pdev->dev)) { 2989df4f9bc4SDavid E. Box /* 2990df4f9bc4SDavid E. Box * Some systems use a bios work around to ask for D3 on 2991df4f9bc4SDavid E. Box * platforms that support kernel managed suspend. 2992df4f9bc4SDavid E. Box */ 2993df4f9bc4SDavid E. Box dev_info(&pdev->dev, 2994df4f9bc4SDavid E. Box "platform quirk: setting simple suspend\n"); 2995df4f9bc4SDavid E. Box quirks |= NVME_QUIRK_SIMPLE_SUSPEND; 2996df4f9bc4SDavid E. Box } 29972e87570bSChristoph Hellwig ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 29982e87570bSChristoph Hellwig quirks); 29992e87570bSChristoph Hellwig if (ret) 30002e87570bSChristoph Hellwig goto out_put_device; 30013f30a79cSChristoph Hellwig 30023f30a79cSChristoph Hellwig dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); 30033f30a79cSChristoph Hellwig dma_set_max_seg_size(&pdev->dev, 0xffffffff); 3004df4f9bc4SDavid E. Box 3005943e942eSJens Axboe /* 30063f30a79cSChristoph Hellwig * Limit the max command size to prevent iod->sg allocations going 30073f30a79cSChristoph Hellwig * over a single page. 3008943e942eSJens Axboe */ 30093f30a79cSChristoph Hellwig dev->ctrl.max_hw_sectors = min_t(u32, 30103f30a79cSChristoph Hellwig NVME_MAX_KB_SZ << 1, dma_max_mapping_size(&pdev->dev) >> 9); 30113f30a79cSChristoph Hellwig dev->ctrl.max_segments = NVME_MAX_SEGS; 3012943e942eSJens Axboe 30133f30a79cSChristoph Hellwig /* 30143f30a79cSChristoph Hellwig * There is no support for SGLs for metadata (yet), so we are limited to 30153f30a79cSChristoph Hellwig * a single integrity segment for the separate metadata pointer. 30163f30a79cSChristoph Hellwig */ 30173f30a79cSChristoph Hellwig dev->ctrl.max_integrity_segments = 1; 30182e87570bSChristoph Hellwig return dev; 30192e87570bSChristoph Hellwig 30202e87570bSChristoph Hellwig out_put_device: 30212e87570bSChristoph Hellwig put_device(dev->dev); 30222e87570bSChristoph Hellwig kfree(dev->queues); 30232e87570bSChristoph Hellwig out_free_dev: 30242e87570bSChristoph Hellwig kfree(dev); 30252e87570bSChristoph Hellwig return ERR_PTR(ret); 3026943e942eSJens Axboe } 3027943e942eSJens Axboe 30282e87570bSChristoph Hellwig static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 30292e87570bSChristoph Hellwig { 30302e87570bSChristoph Hellwig struct nvme_dev *dev; 30312e87570bSChristoph Hellwig int result = -ENOMEM; 30322e87570bSChristoph Hellwig 30332e87570bSChristoph Hellwig dev = nvme_pci_alloc_dev(pdev, id); 30342e87570bSChristoph Hellwig if (!dev) 30352e87570bSChristoph Hellwig return -ENOMEM; 30362e87570bSChristoph Hellwig 30372e87570bSChristoph Hellwig result = nvme_dev_map(dev); 3038b6e44b4cSKeith Busch if (result) 30392e87570bSChristoph Hellwig goto out_uninit_ctrl; 30402e87570bSChristoph Hellwig 30412e87570bSChristoph Hellwig result = nvme_setup_prp_pools(dev); 30422e87570bSChristoph Hellwig if (result) 30432e87570bSChristoph Hellwig goto out_dev_unmap; 304457dacad5SJay Sternberg 3045081a7d95SChristoph Hellwig result = nvme_pci_alloc_iod_mempool(dev); 3046081a7d95SChristoph Hellwig if (result) 30472e87570bSChristoph Hellwig goto out_release_prp_pools; 3048b6e44b4cSKeith Busch 304957dacad5SJay Sternberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 305057dacad5SJay Sternberg 3051eac3ef26SChristoph Hellwig result = nvme_pci_enable(dev); 3052eac3ef26SChristoph Hellwig if (result) 3053eac3ef26SChristoph Hellwig goto out_release_iod_mempool; 305457dacad5SJay Sternberg 30550da7feaaSChristoph Hellwig result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, 30560da7feaaSChristoph Hellwig &nvme_mq_admin_ops, sizeof(struct nvme_iod)); 3057eac3ef26SChristoph Hellwig if (result) 3058eac3ef26SChristoph Hellwig goto out_disable; 3059eac3ef26SChristoph Hellwig 3060eac3ef26SChristoph Hellwig /* 3061eac3ef26SChristoph Hellwig * Mark the controller as connecting before sending admin commands to 3062eac3ef26SChristoph Hellwig * allow the timeout handler to do the right thing. 3063eac3ef26SChristoph Hellwig */ 3064eac3ef26SChristoph Hellwig if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 3065eac3ef26SChristoph Hellwig dev_warn(dev->ctrl.device, 3066eac3ef26SChristoph Hellwig "failed to mark controller CONNECTING\n"); 3067eac3ef26SChristoph Hellwig result = -EBUSY; 3068eac3ef26SChristoph Hellwig goto out_disable; 3069eac3ef26SChristoph Hellwig } 3070eac3ef26SChristoph Hellwig 3071eac3ef26SChristoph Hellwig result = nvme_init_ctrl_finish(&dev->ctrl, false); 3072eac3ef26SChristoph Hellwig if (result) 3073eac3ef26SChristoph Hellwig goto out_disable; 3074eac3ef26SChristoph Hellwig 3075eac3ef26SChristoph Hellwig nvme_dbbuf_dma_alloc(dev); 3076eac3ef26SChristoph Hellwig 3077eac3ef26SChristoph Hellwig result = nvme_setup_host_mem(dev); 3078eac3ef26SChristoph Hellwig if (result < 0) 3079eac3ef26SChristoph Hellwig goto out_disable; 3080eac3ef26SChristoph Hellwig 3081eac3ef26SChristoph Hellwig result = nvme_setup_io_queues(dev); 3082eac3ef26SChristoph Hellwig if (result) 3083eac3ef26SChristoph Hellwig goto out_disable; 3084eac3ef26SChristoph Hellwig 3085eac3ef26SChristoph Hellwig if (dev->online_queues > 1) { 30860da7feaaSChristoph Hellwig nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, 30870da7feaaSChristoph Hellwig nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); 3088eac3ef26SChristoph Hellwig nvme_dbbuf_set(dev); 3089eac3ef26SChristoph Hellwig } 3090eac3ef26SChristoph Hellwig 30910da7feaaSChristoph Hellwig if (!dev->ctrl.tagset) 30920da7feaaSChristoph Hellwig dev_warn(dev->ctrl.device, "IO queues not created\n"); 30930da7feaaSChristoph Hellwig 3094eac3ef26SChristoph Hellwig if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 3095eac3ef26SChristoph Hellwig dev_warn(dev->ctrl.device, 3096eac3ef26SChristoph Hellwig "failed to mark controller live state\n"); 3097eac3ef26SChristoph Hellwig result = -ENODEV; 3098eac3ef26SChristoph Hellwig goto out_disable; 3099eac3ef26SChristoph Hellwig } 3100eac3ef26SChristoph Hellwig 31012e87570bSChristoph Hellwig pci_set_drvdata(pdev, dev); 310257dacad5SJay Sternberg 3103eac3ef26SChristoph Hellwig nvme_start_ctrl(&dev->ctrl); 3104eac3ef26SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 3105*5a5754a4SKeith Busch flush_work(&dev->ctrl.scan_work); 310657dacad5SJay Sternberg return 0; 310757dacad5SJay Sternberg 3108eac3ef26SChristoph Hellwig out_disable: 3109eac3ef26SChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 3110eac3ef26SChristoph Hellwig nvme_dev_disable(dev, true); 3111eac3ef26SChristoph Hellwig nvme_free_host_mem(dev); 3112eac3ef26SChristoph Hellwig nvme_dev_remove_admin(dev); 3113eac3ef26SChristoph Hellwig nvme_dbbuf_dma_free(dev); 3114eac3ef26SChristoph Hellwig nvme_free_queues(dev, 0); 3115eac3ef26SChristoph Hellwig out_release_iod_mempool: 3116b6e44b4cSKeith Busch mempool_destroy(dev->iod_mempool); 31172e87570bSChristoph Hellwig out_release_prp_pools: 311857dacad5SJay Sternberg nvme_release_prp_pools(dev); 31192e87570bSChristoph Hellwig out_dev_unmap: 312057dacad5SJay Sternberg nvme_dev_unmap(dev); 31212e87570bSChristoph Hellwig out_uninit_ctrl: 31222e87570bSChristoph Hellwig nvme_uninit_ctrl(&dev->ctrl); 312357dacad5SJay Sternberg return result; 312457dacad5SJay Sternberg } 312557dacad5SJay Sternberg 3126775755edSChristoph Hellwig static void nvme_reset_prepare(struct pci_dev *pdev) 312757dacad5SJay Sternberg { 312857dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 3129c1ac9a4bSKeith Busch 3130c1ac9a4bSKeith Busch /* 3131c1ac9a4bSKeith Busch * We don't need to check the return value from waiting for the reset 3132c1ac9a4bSKeith Busch * state as pci_dev device lock is held, making it impossible to race 3133c1ac9a4bSKeith Busch * with ->remove(). 3134c1ac9a4bSKeith Busch */ 3135c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, false); 3136c1ac9a4bSKeith Busch nvme_sync_queues(&dev->ctrl); 3137775755edSChristoph Hellwig } 313857dacad5SJay Sternberg 3139775755edSChristoph Hellwig static void nvme_reset_done(struct pci_dev *pdev) 3140775755edSChristoph Hellwig { 3141f263fbb8SLinus Torvalds struct nvme_dev *dev = pci_get_drvdata(pdev); 3142c1ac9a4bSKeith Busch 3143c1ac9a4bSKeith Busch if (!nvme_try_sched_reset(&dev->ctrl)) 3144c1ac9a4bSKeith Busch flush_work(&dev->ctrl.reset_work); 314557dacad5SJay Sternberg } 314657dacad5SJay Sternberg 314757dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 314857dacad5SJay Sternberg { 314957dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 31504e523547SBaolin Wang 3151c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, true); 315257dacad5SJay Sternberg } 315357dacad5SJay Sternberg 3154f58944e2SKeith Busch /* 3155f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 3156f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 3157f58944e2SKeith Busch * order to proceed. 3158f58944e2SKeith Busch */ 315957dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 316057dacad5SJay Sternberg { 316157dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 316257dacad5SJay Sternberg 3163bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 316457dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 31650ff9d4e1SKeith Busch 31666db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 31670ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 31681d39e692SKeith Busch nvme_dev_disable(dev, true); 31696db28edaSKeith Busch } 31700ff9d4e1SKeith Busch 3171d86c4d8eSChristoph Hellwig flush_work(&dev->ctrl.reset_work); 3172d09f2b45SSagi Grimberg nvme_stop_ctrl(&dev->ctrl); 3173d09f2b45SSagi Grimberg nvme_remove_namespaces(&dev->ctrl); 3174a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 317587ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 317657dacad5SJay Sternberg nvme_dev_remove_admin(dev); 3177c11b7716SChristoph Hellwig nvme_dbbuf_dma_free(dev); 317857dacad5SJay Sternberg nvme_free_queues(dev, 0); 3179c11b7716SChristoph Hellwig mempool_destroy(dev->iod_mempool); 318057dacad5SJay Sternberg nvme_release_prp_pools(dev); 3181b00a726aSKeith Busch nvme_dev_unmap(dev); 3182726612b6SIsrael Rukshin nvme_uninit_ctrl(&dev->ctrl); 318357dacad5SJay Sternberg } 318457dacad5SJay Sternberg 318557dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 3186d916b1beSKeith Busch static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) 3187d916b1beSKeith Busch { 3188d916b1beSKeith Busch return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); 3189d916b1beSKeith Busch } 3190d916b1beSKeith Busch 3191d916b1beSKeith Busch static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) 3192d916b1beSKeith Busch { 3193d916b1beSKeith Busch return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); 3194d916b1beSKeith Busch } 3195d916b1beSKeith Busch 3196d916b1beSKeith Busch static int nvme_resume(struct device *dev) 3197d916b1beSKeith Busch { 3198d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3199d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3200d916b1beSKeith Busch 32014eaefe8cSRafael J. Wysocki if (ndev->last_ps == U32_MAX || 3202d916b1beSKeith Busch nvme_set_power_state(ctrl, ndev->last_ps) != 0) 3203e5ad96f3SKeith Busch goto reset; 3204e5ad96f3SKeith Busch if (ctrl->hmpre && nvme_setup_host_mem(ndev)) 3205e5ad96f3SKeith Busch goto reset; 3206e5ad96f3SKeith Busch 3207d916b1beSKeith Busch return 0; 3208e5ad96f3SKeith Busch reset: 3209e5ad96f3SKeith Busch return nvme_try_sched_reset(ctrl); 3210d916b1beSKeith Busch } 3211d916b1beSKeith Busch 321257dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 321357dacad5SJay Sternberg { 321457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 321557dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 3216d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3217d916b1beSKeith Busch int ret = -EBUSY; 3218d916b1beSKeith Busch 32194eaefe8cSRafael J. Wysocki ndev->last_ps = U32_MAX; 32204eaefe8cSRafael J. Wysocki 3221d916b1beSKeith Busch /* 3222d916b1beSKeith Busch * The platform does not remove power for a kernel managed suspend so 3223d916b1beSKeith Busch * use host managed nvme power settings for lowest idle power if 3224d916b1beSKeith Busch * possible. This should have quicker resume latency than a full device 3225d916b1beSKeith Busch * shutdown. But if the firmware is involved after the suspend or the 3226d916b1beSKeith Busch * device does not support any non-default power states, shut down the 3227d916b1beSKeith Busch * device fully. 32284eaefe8cSRafael J. Wysocki * 32294eaefe8cSRafael J. Wysocki * If ASPM is not enabled for the device, shut down the device and allow 32304eaefe8cSRafael J. Wysocki * the PCI bus layer to put it into D3 in order to take the PCIe link 32314eaefe8cSRafael J. Wysocki * down, so as to allow the platform to achieve its minimum low-power 32324eaefe8cSRafael J. Wysocki * state (which may not be possible if the link is up). 3233d916b1beSKeith Busch */ 32344eaefe8cSRafael J. Wysocki if (pm_suspend_via_firmware() || !ctrl->npss || 3235cb32de1bSMario Limonciello !pcie_aspm_enabled(pdev) || 3236c1ac9a4bSKeith Busch (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) 3237c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 3238d916b1beSKeith Busch 3239d916b1beSKeith Busch nvme_start_freeze(ctrl); 3240d916b1beSKeith Busch nvme_wait_freeze(ctrl); 3241d916b1beSKeith Busch nvme_sync_queues(ctrl); 3242d916b1beSKeith Busch 32435d02a5c1SKeith Busch if (ctrl->state != NVME_CTRL_LIVE) 3244d916b1beSKeith Busch goto unfreeze; 3245d916b1beSKeith Busch 3246e5ad96f3SKeith Busch /* 3247e5ad96f3SKeith Busch * Host memory access may not be successful in a system suspend state, 3248e5ad96f3SKeith Busch * but the specification allows the controller to access memory in a 3249e5ad96f3SKeith Busch * non-operational power state. 3250e5ad96f3SKeith Busch */ 3251e5ad96f3SKeith Busch if (ndev->hmb) { 3252e5ad96f3SKeith Busch ret = nvme_set_host_mem(ndev, 0); 3253e5ad96f3SKeith Busch if (ret < 0) 3254e5ad96f3SKeith Busch goto unfreeze; 3255e5ad96f3SKeith Busch } 3256e5ad96f3SKeith Busch 3257d916b1beSKeith Busch ret = nvme_get_power_state(ctrl, &ndev->last_ps); 3258d916b1beSKeith Busch if (ret < 0) 3259d916b1beSKeith Busch goto unfreeze; 3260d916b1beSKeith Busch 32617cbb5c6fSMario Limonciello /* 32627cbb5c6fSMario Limonciello * A saved state prevents pci pm from generically controlling the 32637cbb5c6fSMario Limonciello * device's power. If we're using protocol specific settings, we don't 32647cbb5c6fSMario Limonciello * want pci interfering. 32657cbb5c6fSMario Limonciello */ 32667cbb5c6fSMario Limonciello pci_save_state(pdev); 32677cbb5c6fSMario Limonciello 3268d916b1beSKeith Busch ret = nvme_set_power_state(ctrl, ctrl->npss); 3269d916b1beSKeith Busch if (ret < 0) 3270d916b1beSKeith Busch goto unfreeze; 3271d916b1beSKeith Busch 3272d916b1beSKeith Busch if (ret) { 32737cbb5c6fSMario Limonciello /* discard the saved state */ 32747cbb5c6fSMario Limonciello pci_load_saved_state(pdev, NULL); 32757cbb5c6fSMario Limonciello 3276d916b1beSKeith Busch /* 3277d916b1beSKeith Busch * Clearing npss forces a controller reset on resume. The 327805d3046fSGeert Uytterhoeven * correct value will be rediscovered then. 3279d916b1beSKeith Busch */ 3280c1ac9a4bSKeith Busch ret = nvme_disable_prepare_reset(ndev, true); 3281d916b1beSKeith Busch ctrl->npss = 0; 3282d916b1beSKeith Busch } 3283d916b1beSKeith Busch unfreeze: 3284d916b1beSKeith Busch nvme_unfreeze(ctrl); 3285d916b1beSKeith Busch return ret; 3286d916b1beSKeith Busch } 3287d916b1beSKeith Busch 3288d916b1beSKeith Busch static int nvme_simple_suspend(struct device *dev) 3289d916b1beSKeith Busch { 3290d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 32914e523547SBaolin Wang 3292c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 329357dacad5SJay Sternberg } 329457dacad5SJay Sternberg 3295d916b1beSKeith Busch static int nvme_simple_resume(struct device *dev) 329657dacad5SJay Sternberg { 329757dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 329857dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 329957dacad5SJay Sternberg 3300c1ac9a4bSKeith Busch return nvme_try_sched_reset(&ndev->ctrl); 330157dacad5SJay Sternberg } 330257dacad5SJay Sternberg 330321774222SYueHaibing static const struct dev_pm_ops nvme_dev_pm_ops = { 3304d916b1beSKeith Busch .suspend = nvme_suspend, 3305d916b1beSKeith Busch .resume = nvme_resume, 3306d916b1beSKeith Busch .freeze = nvme_simple_suspend, 3307d916b1beSKeith Busch .thaw = nvme_simple_resume, 3308d916b1beSKeith Busch .poweroff = nvme_simple_suspend, 3309d916b1beSKeith Busch .restore = nvme_simple_resume, 3310d916b1beSKeith Busch }; 3311d916b1beSKeith Busch #endif /* CONFIG_PM_SLEEP */ 331257dacad5SJay Sternberg 3313a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 3314a0a3408eSKeith Busch pci_channel_state_t state) 3315a0a3408eSKeith Busch { 3316a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3317a0a3408eSKeith Busch 3318a0a3408eSKeith Busch /* 3319a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 3320a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 3321a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 3322a0a3408eSKeith Busch */ 3323a0a3408eSKeith Busch switch (state) { 3324a0a3408eSKeith Busch case pci_channel_io_normal: 3325a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 3326a0a3408eSKeith Busch case pci_channel_io_frozen: 3327d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3328d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 3329a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 3330a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3331a0a3408eSKeith Busch case pci_channel_io_perm_failure: 3332d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3333d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 3334a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 3335a0a3408eSKeith Busch } 3336a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3337a0a3408eSKeith Busch } 3338a0a3408eSKeith Busch 3339a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 3340a0a3408eSKeith Busch { 3341a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3342a0a3408eSKeith Busch 33431b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 3344a0a3408eSKeith Busch pci_restore_state(pdev); 3345d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 3346a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 3347a0a3408eSKeith Busch } 3348a0a3408eSKeith Busch 3349a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 3350a0a3408eSKeith Busch { 335172cd4cc2SKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 335272cd4cc2SKeith Busch 335372cd4cc2SKeith Busch flush_work(&dev->ctrl.reset_work); 3354a0a3408eSKeith Busch } 3355a0a3408eSKeith Busch 335657dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 335757dacad5SJay Sternberg .error_detected = nvme_error_detected, 335857dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 335957dacad5SJay Sternberg .resume = nvme_error_resume, 3360775755edSChristoph Hellwig .reset_prepare = nvme_reset_prepare, 3361775755edSChristoph Hellwig .reset_done = nvme_reset_done, 336257dacad5SJay Sternberg }; 336357dacad5SJay Sternberg 336457dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 3365972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ 336608095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3367e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3368972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ 336999466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3370e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3371972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ 337299466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 337325e58af4SWu Zheng NVME_QUIRK_DEALLOCATE_ZEROES | 337425e58af4SWu Zheng NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3375972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ 3376f99cb7afSDavid Wayne Fugate .driver_data = NVME_QUIRK_STRIPE_SIZE | 3377f99cb7afSDavid Wayne Fugate NVME_QUIRK_DEALLOCATE_ZEROES, }, 337850af47d0SAndy Lutomirski { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 33799abd68efSJens Axboe .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 33806c6aa2f2SAkinobu Mita NVME_QUIRK_MEDIUM_PRIO_SQ | 3381ce4cc313SDavid Milburn NVME_QUIRK_NO_TEMP_THRESH_CHANGE | 3382ce4cc313SDavid Milburn NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 33836299358dSJames Dingwall { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 33846299358dSJames Dingwall .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3385540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 33867b210e4eSChristoph Hellwig .driver_data = NVME_QUIRK_IDENTIFY_CNS | 338766dd346bSChristoph Hellwig NVME_QUIRK_DISABLE_WRITE_ZEROES | 338866dd346bSChristoph Hellwig NVME_QUIRK_BOGUS_NID, }, 338966dd346bSChristoph Hellwig { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ 339066dd346bSChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 33915bedd3afSChristoph Hellwig { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ 3392c98a8793SKeith Busch .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 3393c98a8793SKeith Busch NVME_QUIRK_BOGUS_NID, }, 33940302ae60SMicah Parrish { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 33955e112d3fSJulian Einwag .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 33965e112d3fSJulian Einwag NVME_QUIRK_NO_NS_DESC_LIST, }, 339754adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 339854adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 33998c97eeccSJeff Lien { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 34008c97eeccSJeff Lien .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3401015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 3402015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3403d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3404d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3405d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 34067ee5c78cSGopal Tiwari .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3407abbb5f59SDmitry Monakhov NVME_QUIRK_DISABLE_WRITE_ZEROES| 34087ee5c78cSGopal Tiwari NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 34092cf7a77eSKeith Busch { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ 34102cf7a77eSKeith Busch .driver_data = NVME_QUIRK_BOGUS_NID, }, 3411c9e95c39SClaus Stovgaard { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ 341273029c9bSKeith Busch .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 341373029c9bSKeith Busch NVME_QUIRK_BOGUS_NID, }, 3414d14c2731STina Hsu { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ 3415d14c2731STina Hsu .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3416d14c2731STina Hsu { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ 3417d14c2731STina Hsu .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 34186e6a6828SPascal Terjan { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ 34196e6a6828SPascal Terjan .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 34206e6a6828SPascal Terjan NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3421e1c70d79SLamarque Vieira Souza { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ 3422e1c70d79SLamarque Vieira Souza .driver_data = NVME_QUIRK_BOGUS_NID, }, 342308b903b5SMisha Nasledov { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 34241629de0eSPablo Greco .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 34251629de0eSPablo Greco NVME_QUIRK_BOGUS_NID, }, 3426f03e42c6SGabriel Craciunescu { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ 3427f03e42c6SGabriel Craciunescu .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3428f03e42c6SGabriel Craciunescu NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 342941f38043SLeo Savernik { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ 343041f38043SLeo Savernik .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, 3431d5ceb4d1SBean Huo { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ 3432d5ceb4d1SBean Huo .driver_data = NVME_QUIRK_BOGUS_NID, }, 34335611ec2bSKai-Heng Feng { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ 34345611ec2bSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3435c4f01a77SKeith Busch { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ 3436c4f01a77SKeith Busch .driver_data = NVME_QUIRK_BOGUS_NID, }, 343702ca079cSKai-Heng Feng { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ 343802ca079cSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 343989919929SChaitanya Kulkarni { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ 344089919929SChaitanya Kulkarni .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 344143047e08Srasheed.hsueh { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ 344243047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 344343047e08Srasheed.hsueh { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ 344443047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 344543047e08Srasheed.hsueh { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ 344643047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 344743047e08Srasheed.hsueh { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ 344843047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3449dc22c1c0SZoltán Böszörményi { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ 3450dc22c1c0SZoltán Böszörményi .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3451538e4a8cSThorsten Leemhuis { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ 3452538e4a8cSThorsten Leemhuis .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3453ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ 3454ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3455ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ 3456ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3457ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ 3458ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3459ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ 3460ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3461ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ 3462ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 34638d6e38f6STiago Dias Ferreira { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ 34648d6e38f6STiago Dias Ferreira .driver_data = NVME_QUIRK_BOGUS_NID, }, 346570ce3455SChristoph Hellwig { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ 346670ce3455SChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 3467a98a945bSChristoph Hellwig { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ 3468a98a945bSChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 3469a98a945bSChristoph Hellwig { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ 3470a98a945bSChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 34713765fad5SStefan Reiter { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ 34723765fad5SStefan Reiter .driver_data = NVME_QUIRK_BOGUS_NID, }, 3473f37527a0SDennis P. Kliem { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ 3474f37527a0SDennis P. Kliem .driver_data = NVME_QUIRK_BOGUS_NID, }, 3475d5d3c100SXi Ruoyao { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ 3476d5d3c100SXi Ruoyao .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 34776b961bceSNing Wang { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ 34786b961bceSNing Wang .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3479d6c52fa3STobias Gruetzmacher { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ 3480d6c52fa3STobias Gruetzmacher .driver_data = NVME_QUIRK_BOGUS_NID, }, 3481200dccd0SShyamin Ayesh { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ 3482200dccd0SShyamin Ayesh .driver_data = NVME_QUIRK_BOGUS_NID, }, 348380b26240SAbhijit { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ 348480b26240SAbhijit .driver_data = NVME_QUIRK_BOGUS_NID, }, 34854bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), 34864bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34874bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), 34884bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34894bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), 34904bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34914bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), 34924bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34934bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), 34944bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34954bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), 34964bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 349798f7b86aSAndy Shevchenko { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 349898f7b86aSAndy Shevchenko .driver_data = NVME_QUIRK_SINGLE_VECTOR }, 3499124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 350066341331SBenjamin Herrenschmidt { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 350166341331SBenjamin Herrenschmidt .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3502d38e9f04SBenjamin Herrenschmidt NVME_QUIRK_128_BYTES_SQES | 3503a2941f6aSKeith Busch NVME_QUIRK_SHARED_TAGS | 3504453116a4SHector Martin NVME_QUIRK_SKIP_CID_GEN | 3505453116a4SHector Martin NVME_QUIRK_IDENTIFY_CNS }, 35060b85f59dSAndy Shevchenko { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 350757dacad5SJay Sternberg { 0, } 350857dacad5SJay Sternberg }; 350957dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 351057dacad5SJay Sternberg 351157dacad5SJay Sternberg static struct pci_driver nvme_driver = { 351257dacad5SJay Sternberg .name = "nvme", 351357dacad5SJay Sternberg .id_table = nvme_id_table, 351457dacad5SJay Sternberg .probe = nvme_probe, 351557dacad5SJay Sternberg .remove = nvme_remove, 351657dacad5SJay Sternberg .shutdown = nvme_shutdown, 351757dacad5SJay Sternberg .driver = { 3518eac3ef26SChristoph Hellwig .probe_type = PROBE_PREFER_ASYNCHRONOUS, 3519eac3ef26SChristoph Hellwig #ifdef CONFIG_PM_SLEEP 352057dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 3521d916b1beSKeith Busch #endif 3522eac3ef26SChristoph Hellwig }, 352374d986abSAlexander Duyck .sriov_configure = pci_sriov_configure_simple, 352457dacad5SJay Sternberg .err_handler = &nvme_err_handler, 352557dacad5SJay Sternberg }; 352657dacad5SJay Sternberg 352757dacad5SJay Sternberg static int __init nvme_init(void) 352857dacad5SJay Sternberg { 352981101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 353081101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 353181101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 3532612b7286SMing Lei BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 3533c372cdd1SKeith Busch BUILD_BUG_ON(DIV_ROUND_UP(nvme_pci_npages_prp(), NVME_CTRL_PAGE_SIZE) > 3534c372cdd1SKeith Busch S8_MAX); 353517c33167SKeith Busch 35369a6327d2SSagi Grimberg return pci_register_driver(&nvme_driver); 353757dacad5SJay Sternberg } 353857dacad5SJay Sternberg 353957dacad5SJay Sternberg static void __exit nvme_exit(void) 354057dacad5SJay Sternberg { 354157dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 354203e0f3a6SMing Lei flush_workqueue(nvme_wq); 354357dacad5SJay Sternberg } 354457dacad5SJay Sternberg 354557dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 354657dacad5SJay Sternberg MODULE_LICENSE("GPL"); 354757dacad5SJay Sternberg MODULE_VERSION("1.0"); 354857dacad5SJay Sternberg module_init(nvme_init); 354957dacad5SJay Sternberg module_exit(nvme_exit); 3550