157dacad5SJay Sternberg /* 257dacad5SJay Sternberg * NVM Express device driver 357dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 457dacad5SJay Sternberg * 557dacad5SJay Sternberg * This program is free software; you can redistribute it and/or modify it 657dacad5SJay Sternberg * under the terms and conditions of the GNU General Public License, 757dacad5SJay Sternberg * version 2, as published by the Free Software Foundation. 857dacad5SJay Sternberg * 957dacad5SJay Sternberg * This program is distributed in the hope it will be useful, but WITHOUT 1057dacad5SJay Sternberg * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1157dacad5SJay Sternberg * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 1257dacad5SJay Sternberg * more details. 1357dacad5SJay Sternberg */ 1457dacad5SJay Sternberg 15a0a3408eSKeith Busch #include <linux/aer.h> 1657dacad5SJay Sternberg #include <linux/bitops.h> 1757dacad5SJay Sternberg #include <linux/blkdev.h> 1857dacad5SJay Sternberg #include <linux/blk-mq.h> 19dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 20ff5350a8SAndy Lutomirski #include <linux/dmi.h> 2157dacad5SJay Sternberg #include <linux/init.h> 2257dacad5SJay Sternberg #include <linux/interrupt.h> 2357dacad5SJay Sternberg #include <linux/io.h> 2457dacad5SJay Sternberg #include <linux/mm.h> 2557dacad5SJay Sternberg #include <linux/module.h> 2677bf25eaSKeith Busch #include <linux/mutex.h> 2757dacad5SJay Sternberg #include <linux/pci.h> 2857dacad5SJay Sternberg #include <linux/poison.h> 2957dacad5SJay Sternberg #include <linux/t10-pi.h> 302d55cd5fSChristoph Hellwig #include <linux/timer.h> 3157dacad5SJay Sternberg #include <linux/types.h> 329cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 331d277a63SKeith Busch #include <asm/unaligned.h> 34a98e58e5SScott Bauer #include <linux/sed-opal.h> 3557dacad5SJay Sternberg 3657dacad5SJay Sternberg #include "nvme.h" 3757dacad5SJay Sternberg 3857dacad5SJay Sternberg #define NVME_Q_DEPTH 1024 3957dacad5SJay Sternberg #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 4057dacad5SJay Sternberg #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 4157dacad5SJay Sternberg 42adf68f21SChristoph Hellwig /* 43adf68f21SChristoph Hellwig * We handle AEN commands ourselves and don't even let the 44adf68f21SChristoph Hellwig * block layer know about them. 45adf68f21SChristoph Hellwig */ 46f866fc42SChristoph Hellwig #define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AERS) 47adf68f21SChristoph Hellwig 4857dacad5SJay Sternberg static int use_threaded_interrupts; 4957dacad5SJay Sternberg module_param(use_threaded_interrupts, int, 0); 5057dacad5SJay Sternberg 5157dacad5SJay Sternberg static bool use_cmb_sqes = true; 5257dacad5SJay Sternberg module_param(use_cmb_sqes, bool, 0644); 5357dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 5457dacad5SJay Sternberg 5587ad72a5SChristoph Hellwig static unsigned int max_host_mem_size_mb = 128; 5687ad72a5SChristoph Hellwig module_param(max_host_mem_size_mb, uint, 0444); 5787ad72a5SChristoph Hellwig MODULE_PARM_DESC(max_host_mem_size_mb, 5887ad72a5SChristoph Hellwig "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 5987ad72a5SChristoph Hellwig 601c63dc66SChristoph Hellwig struct nvme_dev; 611c63dc66SChristoph Hellwig struct nvme_queue; 6257dacad5SJay Sternberg 63a0fa9647SJens Axboe static void nvme_process_cq(struct nvme_queue *nvmeq); 64a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 6557dacad5SJay Sternberg 6657dacad5SJay Sternberg /* 671c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 681c63dc66SChristoph Hellwig */ 691c63dc66SChristoph Hellwig struct nvme_dev { 701c63dc66SChristoph Hellwig struct nvme_queue **queues; 711c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 721c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 731c63dc66SChristoph Hellwig u32 __iomem *dbs; 741c63dc66SChristoph Hellwig struct device *dev; 751c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 761c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 771c63dc66SChristoph Hellwig unsigned online_queues; 781c63dc66SChristoph Hellwig unsigned max_qid; 791c63dc66SChristoph Hellwig int q_depth; 801c63dc66SChristoph Hellwig u32 db_stride; 811c63dc66SChristoph Hellwig void __iomem *bar; 8297f6ef64SXu Yu unsigned long bar_mapped_size; 835c8809e6SChristoph Hellwig struct work_struct remove_work; 8477bf25eaSKeith Busch struct mutex shutdown_lock; 851c63dc66SChristoph Hellwig bool subsystem; 861c63dc66SChristoph Hellwig void __iomem *cmb; 871c63dc66SChristoph Hellwig dma_addr_t cmb_dma_addr; 881c63dc66SChristoph Hellwig u64 cmb_size; 891c63dc66SChristoph Hellwig u32 cmbsz; 90202021c1SStephen Bates u32 cmbloc; 911c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 92db3cbfffSKeith Busch struct completion ioq_wait; 9387ad72a5SChristoph Hellwig 9487ad72a5SChristoph Hellwig /* shadow doorbell buffer support: */ 95f9f38e33SHelen Koike u32 *dbbuf_dbs; 96f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 97f9f38e33SHelen Koike u32 *dbbuf_eis; 98f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 9987ad72a5SChristoph Hellwig 10087ad72a5SChristoph Hellwig /* host memory buffer support: */ 10187ad72a5SChristoph Hellwig u64 host_mem_size; 10287ad72a5SChristoph Hellwig u32 nr_host_mem_descs; 10387ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *host_mem_descs; 10487ad72a5SChristoph Hellwig void **host_mem_desc_bufs; 10557dacad5SJay Sternberg }; 10657dacad5SJay Sternberg 107f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 108f9f38e33SHelen Koike { 109f9f38e33SHelen Koike return qid * 2 * stride; 110f9f38e33SHelen Koike } 111f9f38e33SHelen Koike 112f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 113f9f38e33SHelen Koike { 114f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 115f9f38e33SHelen Koike } 116f9f38e33SHelen Koike 1171c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1181c63dc66SChristoph Hellwig { 1191c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1201c63dc66SChristoph Hellwig } 1211c63dc66SChristoph Hellwig 12257dacad5SJay Sternberg /* 12357dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 12457dacad5SJay Sternberg * commands and one for I/O commands). 12557dacad5SJay Sternberg */ 12657dacad5SJay Sternberg struct nvme_queue { 12757dacad5SJay Sternberg struct device *q_dmadev; 12857dacad5SJay Sternberg struct nvme_dev *dev; 12957dacad5SJay Sternberg spinlock_t q_lock; 13057dacad5SJay Sternberg struct nvme_command *sq_cmds; 13157dacad5SJay Sternberg struct nvme_command __iomem *sq_cmds_io; 13257dacad5SJay Sternberg volatile struct nvme_completion *cqes; 13357dacad5SJay Sternberg struct blk_mq_tags **tags; 13457dacad5SJay Sternberg dma_addr_t sq_dma_addr; 13557dacad5SJay Sternberg dma_addr_t cq_dma_addr; 13657dacad5SJay Sternberg u32 __iomem *q_db; 13757dacad5SJay Sternberg u16 q_depth; 13857dacad5SJay Sternberg s16 cq_vector; 13957dacad5SJay Sternberg u16 sq_tail; 14057dacad5SJay Sternberg u16 cq_head; 14157dacad5SJay Sternberg u16 qid; 14257dacad5SJay Sternberg u8 cq_phase; 14357dacad5SJay Sternberg u8 cqe_seen; 144f9f38e33SHelen Koike u32 *dbbuf_sq_db; 145f9f38e33SHelen Koike u32 *dbbuf_cq_db; 146f9f38e33SHelen Koike u32 *dbbuf_sq_ei; 147f9f38e33SHelen Koike u32 *dbbuf_cq_ei; 14857dacad5SJay Sternberg }; 14957dacad5SJay Sternberg 15057dacad5SJay Sternberg /* 15171bd150cSChristoph Hellwig * The nvme_iod describes the data in an I/O, including the list of PRP 15271bd150cSChristoph Hellwig * entries. You can't see it in this data structure because C doesn't let 153f4800d6dSChristoph Hellwig * me express that. Use nvme_init_iod to ensure there's enough space 15471bd150cSChristoph Hellwig * allocated to store the PRP list. 15571bd150cSChristoph Hellwig */ 15671bd150cSChristoph Hellwig struct nvme_iod { 157d49187e9SChristoph Hellwig struct nvme_request req; 158f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq; 159f4800d6dSChristoph Hellwig int aborted; 16071bd150cSChristoph Hellwig int npages; /* In the PRP list. 0 means small pool in use */ 16171bd150cSChristoph Hellwig int nents; /* Used in scatterlist */ 16271bd150cSChristoph Hellwig int length; /* Of data, in bytes */ 16371bd150cSChristoph Hellwig dma_addr_t first_dma; 164bf684057SChristoph Hellwig struct scatterlist meta_sg; /* metadata requires single contiguous buffer */ 165f4800d6dSChristoph Hellwig struct scatterlist *sg; 166f4800d6dSChristoph Hellwig struct scatterlist inline_sg[0]; 16757dacad5SJay Sternberg }; 16857dacad5SJay Sternberg 16957dacad5SJay Sternberg /* 17057dacad5SJay Sternberg * Check we didin't inadvertently grow the command struct 17157dacad5SJay Sternberg */ 17257dacad5SJay Sternberg static inline void _nvme_check_size(void) 17357dacad5SJay Sternberg { 17457dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 17557dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 17657dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 17757dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 17857dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 17957dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 18057dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 18157dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 1820add5e8eSJohannes Thumshirn BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 1830add5e8eSJohannes Thumshirn BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 18457dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 18557dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 186f9f38e33SHelen Koike BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 187f9f38e33SHelen Koike } 188f9f38e33SHelen Koike 189f9f38e33SHelen Koike static inline unsigned int nvme_dbbuf_size(u32 stride) 190f9f38e33SHelen Koike { 191f9f38e33SHelen Koike return ((num_possible_cpus() + 1) * 8 * stride); 192f9f38e33SHelen Koike } 193f9f38e33SHelen Koike 194f9f38e33SHelen Koike static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 195f9f38e33SHelen Koike { 196f9f38e33SHelen Koike unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 197f9f38e33SHelen Koike 198f9f38e33SHelen Koike if (dev->dbbuf_dbs) 199f9f38e33SHelen Koike return 0; 200f9f38e33SHelen Koike 201f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 202f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 203f9f38e33SHelen Koike GFP_KERNEL); 204f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 205f9f38e33SHelen Koike return -ENOMEM; 206f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 207f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 208f9f38e33SHelen Koike GFP_KERNEL); 209f9f38e33SHelen Koike if (!dev->dbbuf_eis) { 210f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 211f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 212f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 213f9f38e33SHelen Koike return -ENOMEM; 214f9f38e33SHelen Koike } 215f9f38e33SHelen Koike 216f9f38e33SHelen Koike return 0; 217f9f38e33SHelen Koike } 218f9f38e33SHelen Koike 219f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 220f9f38e33SHelen Koike { 221f9f38e33SHelen Koike unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 222f9f38e33SHelen Koike 223f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 224f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 225f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 226f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 227f9f38e33SHelen Koike } 228f9f38e33SHelen Koike if (dev->dbbuf_eis) { 229f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 230f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 231f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 232f9f38e33SHelen Koike } 233f9f38e33SHelen Koike } 234f9f38e33SHelen Koike 235f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 236f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 237f9f38e33SHelen Koike { 238f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 239f9f38e33SHelen Koike return; 240f9f38e33SHelen Koike 241f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 242f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 243f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 244f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 245f9f38e33SHelen Koike } 246f9f38e33SHelen Koike 247f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 248f9f38e33SHelen Koike { 249f9f38e33SHelen Koike struct nvme_command c; 250f9f38e33SHelen Koike 251f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 252f9f38e33SHelen Koike return; 253f9f38e33SHelen Koike 254f9f38e33SHelen Koike memset(&c, 0, sizeof(c)); 255f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 256f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 257f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 258f9f38e33SHelen Koike 259f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 2609bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 261f9f38e33SHelen Koike /* Free memory and continue on */ 262f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 263f9f38e33SHelen Koike } 264f9f38e33SHelen Koike } 265f9f38e33SHelen Koike 266f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 267f9f38e33SHelen Koike { 268f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 269f9f38e33SHelen Koike } 270f9f38e33SHelen Koike 271f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 272f9f38e33SHelen Koike static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 273f9f38e33SHelen Koike volatile u32 *dbbuf_ei) 274f9f38e33SHelen Koike { 275f9f38e33SHelen Koike if (dbbuf_db) { 276f9f38e33SHelen Koike u16 old_value; 277f9f38e33SHelen Koike 278f9f38e33SHelen Koike /* 279f9f38e33SHelen Koike * Ensure that the queue is written before updating 280f9f38e33SHelen Koike * the doorbell in memory 281f9f38e33SHelen Koike */ 282f9f38e33SHelen Koike wmb(); 283f9f38e33SHelen Koike 284f9f38e33SHelen Koike old_value = *dbbuf_db; 285f9f38e33SHelen Koike *dbbuf_db = value; 286f9f38e33SHelen Koike 287f9f38e33SHelen Koike if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 288f9f38e33SHelen Koike return false; 289f9f38e33SHelen Koike } 290f9f38e33SHelen Koike 291f9f38e33SHelen Koike return true; 29257dacad5SJay Sternberg } 29357dacad5SJay Sternberg 29457dacad5SJay Sternberg /* 29557dacad5SJay Sternberg * Max size of iod being embedded in the request payload 29657dacad5SJay Sternberg */ 29757dacad5SJay Sternberg #define NVME_INT_PAGES 2 2985fd4ce1bSChristoph Hellwig #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size) 29957dacad5SJay Sternberg 30057dacad5SJay Sternberg /* 30157dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 30257dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 30357dacad5SJay Sternberg * the I/O. 30457dacad5SJay Sternberg */ 30557dacad5SJay Sternberg static int nvme_npages(unsigned size, struct nvme_dev *dev) 30657dacad5SJay Sternberg { 3075fd4ce1bSChristoph Hellwig unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size, 3085fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 30957dacad5SJay Sternberg return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 31057dacad5SJay Sternberg } 31157dacad5SJay Sternberg 312f4800d6dSChristoph Hellwig static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev, 313f4800d6dSChristoph Hellwig unsigned int size, unsigned int nseg) 314f4800d6dSChristoph Hellwig { 315f4800d6dSChristoph Hellwig return sizeof(__le64 *) * nvme_npages(size, dev) + 316f4800d6dSChristoph Hellwig sizeof(struct scatterlist) * nseg; 317f4800d6dSChristoph Hellwig } 318f4800d6dSChristoph Hellwig 31957dacad5SJay Sternberg static unsigned int nvme_cmd_size(struct nvme_dev *dev) 32057dacad5SJay Sternberg { 321f4800d6dSChristoph Hellwig return sizeof(struct nvme_iod) + 322f4800d6dSChristoph Hellwig nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES); 32357dacad5SJay Sternberg } 32457dacad5SJay Sternberg 32557dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 32657dacad5SJay Sternberg unsigned int hctx_idx) 32757dacad5SJay Sternberg { 32857dacad5SJay Sternberg struct nvme_dev *dev = data; 32957dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[0]; 33057dacad5SJay Sternberg 33157dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 33257dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 33357dacad5SJay Sternberg WARN_ON(nvmeq->tags); 33457dacad5SJay Sternberg 33557dacad5SJay Sternberg hctx->driver_data = nvmeq; 33657dacad5SJay Sternberg nvmeq->tags = &dev->admin_tagset.tags[0]; 33757dacad5SJay Sternberg return 0; 33857dacad5SJay Sternberg } 33957dacad5SJay Sternberg 34057dacad5SJay Sternberg static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 34157dacad5SJay Sternberg { 34257dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 34357dacad5SJay Sternberg 34457dacad5SJay Sternberg nvmeq->tags = NULL; 34557dacad5SJay Sternberg } 34657dacad5SJay Sternberg 34757dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 34857dacad5SJay Sternberg unsigned int hctx_idx) 34957dacad5SJay Sternberg { 35057dacad5SJay Sternberg struct nvme_dev *dev = data; 35157dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; 35257dacad5SJay Sternberg 35357dacad5SJay Sternberg if (!nvmeq->tags) 35457dacad5SJay Sternberg nvmeq->tags = &dev->tagset.tags[hctx_idx]; 35557dacad5SJay Sternberg 35657dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 35757dacad5SJay Sternberg hctx->driver_data = nvmeq; 35857dacad5SJay Sternberg return 0; 35957dacad5SJay Sternberg } 36057dacad5SJay Sternberg 361d6296d39SChristoph Hellwig static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, 362d6296d39SChristoph Hellwig unsigned int hctx_idx, unsigned int numa_node) 36357dacad5SJay Sternberg { 364d6296d39SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 365f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 3660350815aSChristoph Hellwig int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; 3670350815aSChristoph Hellwig struct nvme_queue *nvmeq = dev->queues[queue_idx]; 36857dacad5SJay Sternberg 36957dacad5SJay Sternberg BUG_ON(!nvmeq); 370f4800d6dSChristoph Hellwig iod->nvmeq = nvmeq; 37157dacad5SJay Sternberg return 0; 37257dacad5SJay Sternberg } 37357dacad5SJay Sternberg 374dca51e78SChristoph Hellwig static int nvme_pci_map_queues(struct blk_mq_tag_set *set) 375dca51e78SChristoph Hellwig { 376dca51e78SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 377dca51e78SChristoph Hellwig 378dca51e78SChristoph Hellwig return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev)); 379dca51e78SChristoph Hellwig } 380dca51e78SChristoph Hellwig 38157dacad5SJay Sternberg /** 382adf68f21SChristoph Hellwig * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 38357dacad5SJay Sternberg * @nvmeq: The queue to use 38457dacad5SJay Sternberg * @cmd: The command to send 38557dacad5SJay Sternberg * 38657dacad5SJay Sternberg * Safe to use from interrupt context 38757dacad5SJay Sternberg */ 38857dacad5SJay Sternberg static void __nvme_submit_cmd(struct nvme_queue *nvmeq, 38957dacad5SJay Sternberg struct nvme_command *cmd) 39057dacad5SJay Sternberg { 39157dacad5SJay Sternberg u16 tail = nvmeq->sq_tail; 39257dacad5SJay Sternberg 39357dacad5SJay Sternberg if (nvmeq->sq_cmds_io) 39457dacad5SJay Sternberg memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd)); 39557dacad5SJay Sternberg else 39657dacad5SJay Sternberg memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 39757dacad5SJay Sternberg 39857dacad5SJay Sternberg if (++tail == nvmeq->q_depth) 39957dacad5SJay Sternberg tail = 0; 400f9f38e33SHelen Koike if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db, 401f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei)) 40257dacad5SJay Sternberg writel(tail, nvmeq->q_db); 40357dacad5SJay Sternberg nvmeq->sq_tail = tail; 40457dacad5SJay Sternberg } 40557dacad5SJay Sternberg 406f4800d6dSChristoph Hellwig static __le64 **iod_list(struct request *req) 40757dacad5SJay Sternberg { 408f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 409f9d03f96SChristoph Hellwig return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); 41057dacad5SJay Sternberg } 41157dacad5SJay Sternberg 412fc17b653SChristoph Hellwig static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) 41357dacad5SJay Sternberg { 414f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 415f9d03f96SChristoph Hellwig int nseg = blk_rq_nr_phys_segments(rq); 416b131c61dSChristoph Hellwig unsigned int size = blk_rq_payload_bytes(rq); 417f4800d6dSChristoph Hellwig 418f4800d6dSChristoph Hellwig if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 419f4800d6dSChristoph Hellwig iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); 420f4800d6dSChristoph Hellwig if (!iod->sg) 421fc17b653SChristoph Hellwig return BLK_STS_RESOURCE; 422f4800d6dSChristoph Hellwig } else { 423f4800d6dSChristoph Hellwig iod->sg = iod->inline_sg; 42457dacad5SJay Sternberg } 42557dacad5SJay Sternberg 426f4800d6dSChristoph Hellwig iod->aborted = 0; 42757dacad5SJay Sternberg iod->npages = -1; 42857dacad5SJay Sternberg iod->nents = 0; 429f4800d6dSChristoph Hellwig iod->length = size; 430f80ec966SKeith Busch 431fc17b653SChristoph Hellwig return BLK_STS_OK; 43257dacad5SJay Sternberg } 43357dacad5SJay Sternberg 434f4800d6dSChristoph Hellwig static void nvme_free_iod(struct nvme_dev *dev, struct request *req) 43557dacad5SJay Sternberg { 436f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 4375fd4ce1bSChristoph Hellwig const int last_prp = dev->ctrl.page_size / 8 - 1; 43857dacad5SJay Sternberg int i; 439f4800d6dSChristoph Hellwig __le64 **list = iod_list(req); 44057dacad5SJay Sternberg dma_addr_t prp_dma = iod->first_dma; 44157dacad5SJay Sternberg 44257dacad5SJay Sternberg if (iod->npages == 0) 44357dacad5SJay Sternberg dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 44457dacad5SJay Sternberg for (i = 0; i < iod->npages; i++) { 44557dacad5SJay Sternberg __le64 *prp_list = list[i]; 44657dacad5SJay Sternberg dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 44757dacad5SJay Sternberg dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 44857dacad5SJay Sternberg prp_dma = next_prp_dma; 44957dacad5SJay Sternberg } 45057dacad5SJay Sternberg 451f4800d6dSChristoph Hellwig if (iod->sg != iod->inline_sg) 452f4800d6dSChristoph Hellwig kfree(iod->sg); 45357dacad5SJay Sternberg } 45457dacad5SJay Sternberg 45557dacad5SJay Sternberg #ifdef CONFIG_BLK_DEV_INTEGRITY 45657dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) 45757dacad5SJay Sternberg { 45857dacad5SJay Sternberg if (be32_to_cpu(pi->ref_tag) == v) 45957dacad5SJay Sternberg pi->ref_tag = cpu_to_be32(p); 46057dacad5SJay Sternberg } 46157dacad5SJay Sternberg 46257dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) 46357dacad5SJay Sternberg { 46457dacad5SJay Sternberg if (be32_to_cpu(pi->ref_tag) == p) 46557dacad5SJay Sternberg pi->ref_tag = cpu_to_be32(v); 46657dacad5SJay Sternberg } 46757dacad5SJay Sternberg 46857dacad5SJay Sternberg /** 46957dacad5SJay Sternberg * nvme_dif_remap - remaps ref tags to bip seed and physical lba 47057dacad5SJay Sternberg * 47157dacad5SJay Sternberg * The virtual start sector is the one that was originally submitted by the 47257dacad5SJay Sternberg * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical 47357dacad5SJay Sternberg * start sector may be different. Remap protection information to match the 47457dacad5SJay Sternberg * physical LBA on writes, and back to the original seed on reads. 47557dacad5SJay Sternberg * 47657dacad5SJay Sternberg * Type 0 and 3 do not have a ref tag, so no remapping required. 47757dacad5SJay Sternberg */ 47857dacad5SJay Sternberg static void nvme_dif_remap(struct request *req, 47957dacad5SJay Sternberg void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) 48057dacad5SJay Sternberg { 48157dacad5SJay Sternberg struct nvme_ns *ns = req->rq_disk->private_data; 48257dacad5SJay Sternberg struct bio_integrity_payload *bip; 48357dacad5SJay Sternberg struct t10_pi_tuple *pi; 48457dacad5SJay Sternberg void *p, *pmap; 48557dacad5SJay Sternberg u32 i, nlb, ts, phys, virt; 48657dacad5SJay Sternberg 48757dacad5SJay Sternberg if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3) 48857dacad5SJay Sternberg return; 48957dacad5SJay Sternberg 49057dacad5SJay Sternberg bip = bio_integrity(req->bio); 49157dacad5SJay Sternberg if (!bip) 49257dacad5SJay Sternberg return; 49357dacad5SJay Sternberg 49457dacad5SJay Sternberg pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; 49557dacad5SJay Sternberg 49657dacad5SJay Sternberg p = pmap; 49757dacad5SJay Sternberg virt = bip_get_seed(bip); 49857dacad5SJay Sternberg phys = nvme_block_nr(ns, blk_rq_pos(req)); 49957dacad5SJay Sternberg nlb = (blk_rq_bytes(req) >> ns->lba_shift); 500ac6fc48cSDan Williams ts = ns->disk->queue->integrity.tuple_size; 50157dacad5SJay Sternberg 50257dacad5SJay Sternberg for (i = 0; i < nlb; i++, virt++, phys++) { 50357dacad5SJay Sternberg pi = (struct t10_pi_tuple *)p; 50457dacad5SJay Sternberg dif_swap(phys, virt, pi); 50557dacad5SJay Sternberg p += ts; 50657dacad5SJay Sternberg } 50757dacad5SJay Sternberg kunmap_atomic(pmap); 50857dacad5SJay Sternberg } 50957dacad5SJay Sternberg #else /* CONFIG_BLK_DEV_INTEGRITY */ 51057dacad5SJay Sternberg static void nvme_dif_remap(struct request *req, 51157dacad5SJay Sternberg void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) 51257dacad5SJay Sternberg { 51357dacad5SJay Sternberg } 51457dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) 51557dacad5SJay Sternberg { 51657dacad5SJay Sternberg } 51757dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) 51857dacad5SJay Sternberg { 51957dacad5SJay Sternberg } 52057dacad5SJay Sternberg #endif 52157dacad5SJay Sternberg 522b131c61dSChristoph Hellwig static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) 52357dacad5SJay Sternberg { 524f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 52557dacad5SJay Sternberg struct dma_pool *pool; 526b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 52757dacad5SJay Sternberg struct scatterlist *sg = iod->sg; 52857dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 52957dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 5305fd4ce1bSChristoph Hellwig u32 page_size = dev->ctrl.page_size; 53157dacad5SJay Sternberg int offset = dma_addr & (page_size - 1); 53257dacad5SJay Sternberg __le64 *prp_list; 533f4800d6dSChristoph Hellwig __le64 **list = iod_list(req); 53457dacad5SJay Sternberg dma_addr_t prp_dma; 53557dacad5SJay Sternberg int nprps, i; 53657dacad5SJay Sternberg 53757dacad5SJay Sternberg length -= (page_size - offset); 53857dacad5SJay Sternberg if (length <= 0) 53969d2b571SChristoph Hellwig return true; 54057dacad5SJay Sternberg 54157dacad5SJay Sternberg dma_len -= (page_size - offset); 54257dacad5SJay Sternberg if (dma_len) { 54357dacad5SJay Sternberg dma_addr += (page_size - offset); 54457dacad5SJay Sternberg } else { 54557dacad5SJay Sternberg sg = sg_next(sg); 54657dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 54757dacad5SJay Sternberg dma_len = sg_dma_len(sg); 54857dacad5SJay Sternberg } 54957dacad5SJay Sternberg 55057dacad5SJay Sternberg if (length <= page_size) { 55157dacad5SJay Sternberg iod->first_dma = dma_addr; 55269d2b571SChristoph Hellwig return true; 55357dacad5SJay Sternberg } 55457dacad5SJay Sternberg 55557dacad5SJay Sternberg nprps = DIV_ROUND_UP(length, page_size); 55657dacad5SJay Sternberg if (nprps <= (256 / 8)) { 55757dacad5SJay Sternberg pool = dev->prp_small_pool; 55857dacad5SJay Sternberg iod->npages = 0; 55957dacad5SJay Sternberg } else { 56057dacad5SJay Sternberg pool = dev->prp_page_pool; 56157dacad5SJay Sternberg iod->npages = 1; 56257dacad5SJay Sternberg } 56357dacad5SJay Sternberg 56469d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 56557dacad5SJay Sternberg if (!prp_list) { 56657dacad5SJay Sternberg iod->first_dma = dma_addr; 56757dacad5SJay Sternberg iod->npages = -1; 56869d2b571SChristoph Hellwig return false; 56957dacad5SJay Sternberg } 57057dacad5SJay Sternberg list[0] = prp_list; 57157dacad5SJay Sternberg iod->first_dma = prp_dma; 57257dacad5SJay Sternberg i = 0; 57357dacad5SJay Sternberg for (;;) { 57457dacad5SJay Sternberg if (i == page_size >> 3) { 57557dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 57669d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 57757dacad5SJay Sternberg if (!prp_list) 57869d2b571SChristoph Hellwig return false; 57957dacad5SJay Sternberg list[iod->npages++] = prp_list; 58057dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 58157dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 58257dacad5SJay Sternberg i = 1; 58357dacad5SJay Sternberg } 58457dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 58557dacad5SJay Sternberg dma_len -= page_size; 58657dacad5SJay Sternberg dma_addr += page_size; 58757dacad5SJay Sternberg length -= page_size; 58857dacad5SJay Sternberg if (length <= 0) 58957dacad5SJay Sternberg break; 59057dacad5SJay Sternberg if (dma_len > 0) 59157dacad5SJay Sternberg continue; 59257dacad5SJay Sternberg BUG_ON(dma_len < 0); 59357dacad5SJay Sternberg sg = sg_next(sg); 59457dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 59557dacad5SJay Sternberg dma_len = sg_dma_len(sg); 59657dacad5SJay Sternberg } 59757dacad5SJay Sternberg 59869d2b571SChristoph Hellwig return true; 59957dacad5SJay Sternberg } 60057dacad5SJay Sternberg 601fc17b653SChristoph Hellwig static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 602b131c61dSChristoph Hellwig struct nvme_command *cmnd) 60357dacad5SJay Sternberg { 604f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 605ba1ca37eSChristoph Hellwig struct request_queue *q = req->q; 606ba1ca37eSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 607ba1ca37eSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 608fc17b653SChristoph Hellwig blk_status_t ret = BLK_STS_IOERR; 60957dacad5SJay Sternberg 610f9d03f96SChristoph Hellwig sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 611ba1ca37eSChristoph Hellwig iod->nents = blk_rq_map_sg(q, req, iod->sg); 612ba1ca37eSChristoph Hellwig if (!iod->nents) 613ba1ca37eSChristoph Hellwig goto out; 614ba1ca37eSChristoph Hellwig 615fc17b653SChristoph Hellwig ret = BLK_STS_RESOURCE; 6162b6b535dSMauricio Faria de Oliveira if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, 6172b6b535dSMauricio Faria de Oliveira DMA_ATTR_NO_WARN)) 618ba1ca37eSChristoph Hellwig goto out; 619ba1ca37eSChristoph Hellwig 620b131c61dSChristoph Hellwig if (!nvme_setup_prps(dev, req)) 621ba1ca37eSChristoph Hellwig goto out_unmap; 622ba1ca37eSChristoph Hellwig 623fc17b653SChristoph Hellwig ret = BLK_STS_IOERR; 624ba1ca37eSChristoph Hellwig if (blk_integrity_rq(req)) { 625ba1ca37eSChristoph Hellwig if (blk_rq_count_integrity_sg(q, req->bio) != 1) 626ba1ca37eSChristoph Hellwig goto out_unmap; 627ba1ca37eSChristoph Hellwig 628bf684057SChristoph Hellwig sg_init_table(&iod->meta_sg, 1); 629bf684057SChristoph Hellwig if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) 630ba1ca37eSChristoph Hellwig goto out_unmap; 631ba1ca37eSChristoph Hellwig 632ba1ca37eSChristoph Hellwig if (rq_data_dir(req)) 633ba1ca37eSChristoph Hellwig nvme_dif_remap(req, nvme_dif_prep); 634ba1ca37eSChristoph Hellwig 635bf684057SChristoph Hellwig if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) 636ba1ca37eSChristoph Hellwig goto out_unmap; 63757dacad5SJay Sternberg } 63857dacad5SJay Sternberg 639eb793e2cSChristoph Hellwig cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 640eb793e2cSChristoph Hellwig cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma); 641ba1ca37eSChristoph Hellwig if (blk_integrity_rq(req)) 642bf684057SChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); 643fc17b653SChristoph Hellwig return BLK_STS_OK; 644ba1ca37eSChristoph Hellwig 645ba1ca37eSChristoph Hellwig out_unmap: 646ba1ca37eSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 647ba1ca37eSChristoph Hellwig out: 648ba1ca37eSChristoph Hellwig return ret; 64957dacad5SJay Sternberg } 65057dacad5SJay Sternberg 651f4800d6dSChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 652d4f6c3abSChristoph Hellwig { 653f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 654d4f6c3abSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 655d4f6c3abSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 656d4f6c3abSChristoph Hellwig 657d4f6c3abSChristoph Hellwig if (iod->nents) { 658d4f6c3abSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 659d4f6c3abSChristoph Hellwig if (blk_integrity_rq(req)) { 660d4f6c3abSChristoph Hellwig if (!rq_data_dir(req)) 661d4f6c3abSChristoph Hellwig nvme_dif_remap(req, nvme_dif_complete); 662bf684057SChristoph Hellwig dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); 663d4f6c3abSChristoph Hellwig } 664d4f6c3abSChristoph Hellwig } 665d4f6c3abSChristoph Hellwig 666f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 667f4800d6dSChristoph Hellwig nvme_free_iod(dev, req); 66857dacad5SJay Sternberg } 66957dacad5SJay Sternberg 67057dacad5SJay Sternberg /* 67157dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 67257dacad5SJay Sternberg */ 673fc17b653SChristoph Hellwig static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 67457dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 67557dacad5SJay Sternberg { 67657dacad5SJay Sternberg struct nvme_ns *ns = hctx->queue->queuedata; 67757dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 67857dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 67957dacad5SJay Sternberg struct request *req = bd->rq; 680ba1ca37eSChristoph Hellwig struct nvme_command cmnd; 681ebe6d874SChristoph Hellwig blk_status_t ret; 68257dacad5SJay Sternberg 683f9d03f96SChristoph Hellwig ret = nvme_setup_cmd(ns, req, &cmnd); 684fc17b653SChristoph Hellwig if (ret) 685f4800d6dSChristoph Hellwig return ret; 68657dacad5SJay Sternberg 687b131c61dSChristoph Hellwig ret = nvme_init_iod(req, dev); 688fc17b653SChristoph Hellwig if (ret) 689f9d03f96SChristoph Hellwig goto out_free_cmd; 69057dacad5SJay Sternberg 691fc17b653SChristoph Hellwig if (blk_rq_nr_phys_segments(req)) { 692b131c61dSChristoph Hellwig ret = nvme_map_data(dev, req, &cmnd); 693fc17b653SChristoph Hellwig if (ret) 694f9d03f96SChristoph Hellwig goto out_cleanup_iod; 695fc17b653SChristoph Hellwig } 696ba1ca37eSChristoph Hellwig 697aae239e1SChristoph Hellwig blk_mq_start_request(req); 698ba1ca37eSChristoph Hellwig 699ba1ca37eSChristoph Hellwig spin_lock_irq(&nvmeq->q_lock); 700ae1fba20SKeith Busch if (unlikely(nvmeq->cq_vector < 0)) { 701fc17b653SChristoph Hellwig ret = BLK_STS_IOERR; 702ae1fba20SKeith Busch spin_unlock_irq(&nvmeq->q_lock); 703f9d03f96SChristoph Hellwig goto out_cleanup_iod; 704ae1fba20SKeith Busch } 705ba1ca37eSChristoph Hellwig __nvme_submit_cmd(nvmeq, &cmnd); 70657dacad5SJay Sternberg nvme_process_cq(nvmeq); 70757dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 708fc17b653SChristoph Hellwig return BLK_STS_OK; 709f9d03f96SChristoph Hellwig out_cleanup_iod: 710f4800d6dSChristoph Hellwig nvme_free_iod(dev, req); 711f9d03f96SChristoph Hellwig out_free_cmd: 712f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 713ba1ca37eSChristoph Hellwig return ret; 71457dacad5SJay Sternberg } 71557dacad5SJay Sternberg 71677f02a7aSChristoph Hellwig static void nvme_pci_complete_rq(struct request *req) 717eee417b0SChristoph Hellwig { 718f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 719eee417b0SChristoph Hellwig 72077f02a7aSChristoph Hellwig nvme_unmap_data(iod->nvmeq->dev, req); 72177f02a7aSChristoph Hellwig nvme_complete_rq(req); 72257dacad5SJay Sternberg } 72357dacad5SJay Sternberg 724d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 725d783e0bdSMarta Rybczynska static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head, 726d783e0bdSMarta Rybczynska u16 phase) 727d783e0bdSMarta Rybczynska { 728d783e0bdSMarta Rybczynska return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase; 729d783e0bdSMarta Rybczynska } 730d783e0bdSMarta Rybczynska 731eb281c82SSagi Grimberg static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 732eb281c82SSagi Grimberg { 733eb281c82SSagi Grimberg u16 head = nvmeq->cq_head; 734eb281c82SSagi Grimberg 735eb281c82SSagi Grimberg if (likely(nvmeq->cq_vector >= 0)) { 736eb281c82SSagi Grimberg if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 737eb281c82SSagi Grimberg nvmeq->dbbuf_cq_ei)) 738eb281c82SSagi Grimberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 739eb281c82SSagi Grimberg } 740eb281c82SSagi Grimberg } 741eb281c82SSagi Grimberg 74283a12fb7SSagi Grimberg static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, 74383a12fb7SSagi Grimberg struct nvme_completion *cqe) 74457dacad5SJay Sternberg { 745eee417b0SChristoph Hellwig struct request *req; 746adf68f21SChristoph Hellwig 74783a12fb7SSagi Grimberg if (unlikely(cqe->command_id >= nvmeq->q_depth)) { 7481b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 749aae239e1SChristoph Hellwig "invalid id %d completed on queue %d\n", 75083a12fb7SSagi Grimberg cqe->command_id, le16_to_cpu(cqe->sq_id)); 75183a12fb7SSagi Grimberg return; 752aae239e1SChristoph Hellwig } 753aae239e1SChristoph Hellwig 754adf68f21SChristoph Hellwig /* 755adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 756adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 757adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 758adf68f21SChristoph Hellwig * for them but rather special case them here. 759adf68f21SChristoph Hellwig */ 760adf68f21SChristoph Hellwig if (unlikely(nvmeq->qid == 0 && 76183a12fb7SSagi Grimberg cqe->command_id >= NVME_AQ_BLKMQ_DEPTH)) { 7627bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 76383a12fb7SSagi Grimberg cqe->status, &cqe->result); 76483a12fb7SSagi Grimberg return; 765adf68f21SChristoph Hellwig } 766adf68f21SChristoph Hellwig 76783a12fb7SSagi Grimberg req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); 76883a12fb7SSagi Grimberg nvme_end_request(req, cqe->status, cqe->result); 76983a12fb7SSagi Grimberg } 77083a12fb7SSagi Grimberg 771920d13a8SSagi Grimberg static inline bool nvme_read_cqe(struct nvme_queue *nvmeq, 772920d13a8SSagi Grimberg struct nvme_completion *cqe) 77383a12fb7SSagi Grimberg { 774920d13a8SSagi Grimberg if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) { 775920d13a8SSagi Grimberg *cqe = nvmeq->cqes[nvmeq->cq_head]; 77683a12fb7SSagi Grimberg 777920d13a8SSagi Grimberg if (++nvmeq->cq_head == nvmeq->q_depth) { 778920d13a8SSagi Grimberg nvmeq->cq_head = 0; 779920d13a8SSagi Grimberg nvmeq->cq_phase = !nvmeq->cq_phase; 780920d13a8SSagi Grimberg } 781920d13a8SSagi Grimberg return true; 782920d13a8SSagi Grimberg } 783920d13a8SSagi Grimberg return false; 78483a12fb7SSagi Grimberg } 78583a12fb7SSagi Grimberg 786442e19b7SSagi Grimberg static void nvme_process_cq(struct nvme_queue *nvmeq) 787920d13a8SSagi Grimberg { 788920d13a8SSagi Grimberg struct nvme_completion cqe; 789920d13a8SSagi Grimberg int consumed = 0; 79083a12fb7SSagi Grimberg 791920d13a8SSagi Grimberg while (nvme_read_cqe(nvmeq, &cqe)) { 79283a12fb7SSagi Grimberg nvme_handle_cqe(nvmeq, &cqe); 793920d13a8SSagi Grimberg consumed++; 79457dacad5SJay Sternberg } 79557dacad5SJay Sternberg 796920d13a8SSagi Grimberg if (consumed) { 797eb281c82SSagi Grimberg nvme_ring_cq_doorbell(nvmeq); 79857dacad5SJay Sternberg nvmeq->cqe_seen = 1; 799a0fa9647SJens Axboe } 800920d13a8SSagi Grimberg } 801a0fa9647SJens Axboe 80257dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 80357dacad5SJay Sternberg { 80457dacad5SJay Sternberg irqreturn_t result; 80557dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 80657dacad5SJay Sternberg spin_lock(&nvmeq->q_lock); 80757dacad5SJay Sternberg nvme_process_cq(nvmeq); 80857dacad5SJay Sternberg result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE; 80957dacad5SJay Sternberg nvmeq->cqe_seen = 0; 81057dacad5SJay Sternberg spin_unlock(&nvmeq->q_lock); 81157dacad5SJay Sternberg return result; 81257dacad5SJay Sternberg } 81357dacad5SJay Sternberg 81457dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 81557dacad5SJay Sternberg { 81657dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 817d783e0bdSMarta Rybczynska if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) 81857dacad5SJay Sternberg return IRQ_WAKE_THREAD; 819d783e0bdSMarta Rybczynska return IRQ_NONE; 82057dacad5SJay Sternberg } 82157dacad5SJay Sternberg 8227776db1cSKeith Busch static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag) 823a0fa9647SJens Axboe { 824442e19b7SSagi Grimberg struct nvme_completion cqe; 825442e19b7SSagi Grimberg int found = 0, consumed = 0; 826a0fa9647SJens Axboe 827442e19b7SSagi Grimberg if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) 828442e19b7SSagi Grimberg return 0; 829442e19b7SSagi Grimberg 830442e19b7SSagi Grimberg spin_lock_irq(&nvmeq->q_lock); 831442e19b7SSagi Grimberg while (nvme_read_cqe(nvmeq, &cqe)) { 832442e19b7SSagi Grimberg nvme_handle_cqe(nvmeq, &cqe); 833442e19b7SSagi Grimberg consumed++; 834442e19b7SSagi Grimberg 835442e19b7SSagi Grimberg if (tag == cqe.command_id) { 836442e19b7SSagi Grimberg found = 1; 837442e19b7SSagi Grimberg break; 838442e19b7SSagi Grimberg } 839a0fa9647SJens Axboe } 840a0fa9647SJens Axboe 841442e19b7SSagi Grimberg if (consumed) 842442e19b7SSagi Grimberg nvme_ring_cq_doorbell(nvmeq); 843442e19b7SSagi Grimberg spin_unlock_irq(&nvmeq->q_lock); 844442e19b7SSagi Grimberg 845442e19b7SSagi Grimberg return found; 846a0fa9647SJens Axboe } 847a0fa9647SJens Axboe 8487776db1cSKeith Busch static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) 8497776db1cSKeith Busch { 8507776db1cSKeith Busch struct nvme_queue *nvmeq = hctx->driver_data; 8517776db1cSKeith Busch 8527776db1cSKeith Busch return __nvme_poll(nvmeq, tag); 8537776db1cSKeith Busch } 8547776db1cSKeith Busch 855f866fc42SChristoph Hellwig static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) 85657dacad5SJay Sternberg { 857f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 8589396dec9SChristoph Hellwig struct nvme_queue *nvmeq = dev->queues[0]; 85957dacad5SJay Sternberg struct nvme_command c; 86057dacad5SJay Sternberg 86157dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 86257dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 863f866fc42SChristoph Hellwig c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx; 86457dacad5SJay Sternberg 8659396dec9SChristoph Hellwig spin_lock_irq(&nvmeq->q_lock); 8669396dec9SChristoph Hellwig __nvme_submit_cmd(nvmeq, &c); 8679396dec9SChristoph Hellwig spin_unlock_irq(&nvmeq->q_lock); 86857dacad5SJay Sternberg } 86957dacad5SJay Sternberg 87057dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 87157dacad5SJay Sternberg { 87257dacad5SJay Sternberg struct nvme_command c; 87357dacad5SJay Sternberg 87457dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 87557dacad5SJay Sternberg c.delete_queue.opcode = opcode; 87657dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 87757dacad5SJay Sternberg 8781c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 87957dacad5SJay Sternberg } 88057dacad5SJay Sternberg 88157dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 88257dacad5SJay Sternberg struct nvme_queue *nvmeq) 88357dacad5SJay Sternberg { 88457dacad5SJay Sternberg struct nvme_command c; 88557dacad5SJay Sternberg int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 88657dacad5SJay Sternberg 88757dacad5SJay Sternberg /* 88857dacad5SJay Sternberg * Note: we (ab)use the fact the the prp fields survive if no data 88957dacad5SJay Sternberg * is attached to the request. 89057dacad5SJay Sternberg */ 89157dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 89257dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 89357dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 89457dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 89557dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 89657dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 89757dacad5SJay Sternberg c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 89857dacad5SJay Sternberg 8991c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 90057dacad5SJay Sternberg } 90157dacad5SJay Sternberg 90257dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 90357dacad5SJay Sternberg struct nvme_queue *nvmeq) 90457dacad5SJay Sternberg { 90557dacad5SJay Sternberg struct nvme_command c; 90681c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 90757dacad5SJay Sternberg 90857dacad5SJay Sternberg /* 90957dacad5SJay Sternberg * Note: we (ab)use the fact the the prp fields survive if no data 91057dacad5SJay Sternberg * is attached to the request. 91157dacad5SJay Sternberg */ 91257dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 91357dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 91457dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 91557dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 91657dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 91757dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 91857dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 91957dacad5SJay Sternberg 9201c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 92157dacad5SJay Sternberg } 92257dacad5SJay Sternberg 92357dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 92457dacad5SJay Sternberg { 92557dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 92657dacad5SJay Sternberg } 92757dacad5SJay Sternberg 92857dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 92957dacad5SJay Sternberg { 93057dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 93157dacad5SJay Sternberg } 93257dacad5SJay Sternberg 9332a842acaSChristoph Hellwig static void abort_endio(struct request *req, blk_status_t error) 93457dacad5SJay Sternberg { 935f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 936f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 93757dacad5SJay Sternberg 93827fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 93927fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 940e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 941e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 94257dacad5SJay Sternberg } 94357dacad5SJay Sternberg 944b2a0eb1aSKeith Busch static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 945b2a0eb1aSKeith Busch { 946b2a0eb1aSKeith Busch 947b2a0eb1aSKeith Busch /* If true, indicates loss of adapter communication, possibly by a 948b2a0eb1aSKeith Busch * NVMe Subsystem reset. 949b2a0eb1aSKeith Busch */ 950b2a0eb1aSKeith Busch bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 951b2a0eb1aSKeith Busch 952b2a0eb1aSKeith Busch /* If there is a reset ongoing, we shouldn't reset again. */ 953b2a0eb1aSKeith Busch if (dev->ctrl.state == NVME_CTRL_RESETTING) 954b2a0eb1aSKeith Busch return false; 955b2a0eb1aSKeith Busch 956b2a0eb1aSKeith Busch /* We shouldn't reset unless the controller is on fatal error state 957b2a0eb1aSKeith Busch * _or_ if we lost the communication with it. 958b2a0eb1aSKeith Busch */ 959b2a0eb1aSKeith Busch if (!(csts & NVME_CSTS_CFS) && !nssro) 960b2a0eb1aSKeith Busch return false; 961b2a0eb1aSKeith Busch 962b2a0eb1aSKeith Busch /* If PCI error recovery process is happening, we cannot reset or 963b2a0eb1aSKeith Busch * the recovery mechanism will surely fail. 964b2a0eb1aSKeith Busch */ 965b2a0eb1aSKeith Busch if (pci_channel_offline(to_pci_dev(dev->dev))) 966b2a0eb1aSKeith Busch return false; 967b2a0eb1aSKeith Busch 968b2a0eb1aSKeith Busch return true; 969b2a0eb1aSKeith Busch } 970b2a0eb1aSKeith Busch 971b2a0eb1aSKeith Busch static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 972b2a0eb1aSKeith Busch { 973b2a0eb1aSKeith Busch /* Read a config register to help see what died. */ 974b2a0eb1aSKeith Busch u16 pci_status; 975b2a0eb1aSKeith Busch int result; 976b2a0eb1aSKeith Busch 977b2a0eb1aSKeith Busch result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 978b2a0eb1aSKeith Busch &pci_status); 979b2a0eb1aSKeith Busch if (result == PCIBIOS_SUCCESSFUL) 980b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 981b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 982b2a0eb1aSKeith Busch csts, pci_status); 983b2a0eb1aSKeith Busch else 984b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 985b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 986b2a0eb1aSKeith Busch csts, result); 987b2a0eb1aSKeith Busch } 988b2a0eb1aSKeith Busch 98931c7c7d2SChristoph Hellwig static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 99057dacad5SJay Sternberg { 991f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 992f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 99357dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 99457dacad5SJay Sternberg struct request *abort_req; 99557dacad5SJay Sternberg struct nvme_command cmd; 996b2a0eb1aSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 997b2a0eb1aSKeith Busch 998b2a0eb1aSKeith Busch /* 999b2a0eb1aSKeith Busch * Reset immediately if the controller is failed 1000b2a0eb1aSKeith Busch */ 1001b2a0eb1aSKeith Busch if (nvme_should_reset(dev, csts)) { 1002b2a0eb1aSKeith Busch nvme_warn_reset(dev, csts); 1003b2a0eb1aSKeith Busch nvme_dev_disable(dev, false); 1004d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1005b2a0eb1aSKeith Busch return BLK_EH_HANDLED; 1006b2a0eb1aSKeith Busch } 100757dacad5SJay Sternberg 100831c7c7d2SChristoph Hellwig /* 10097776db1cSKeith Busch * Did we miss an interrupt? 10107776db1cSKeith Busch */ 10117776db1cSKeith Busch if (__nvme_poll(nvmeq, req->tag)) { 10127776db1cSKeith Busch dev_warn(dev->ctrl.device, 10137776db1cSKeith Busch "I/O %d QID %d timeout, completion polled\n", 10147776db1cSKeith Busch req->tag, nvmeq->qid); 10157776db1cSKeith Busch return BLK_EH_HANDLED; 10167776db1cSKeith Busch } 10177776db1cSKeith Busch 10187776db1cSKeith Busch /* 1019fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 1020fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 1021fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 1022fd634f41SChristoph Hellwig * shutdown, so we return BLK_EH_HANDLED. 1023fd634f41SChristoph Hellwig */ 1024bb8d261eSChristoph Hellwig if (dev->ctrl.state == NVME_CTRL_RESETTING) { 10251b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 1026fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 1027fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 1028a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 102927fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1030fd634f41SChristoph Hellwig return BLK_EH_HANDLED; 1031fd634f41SChristoph Hellwig } 1032fd634f41SChristoph Hellwig 1033fd634f41SChristoph Hellwig /* 1034e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 1035e1569a16SKeith Busch * command was already aborted once before and still hasn't been 1036e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 103731c7c7d2SChristoph Hellwig */ 1038f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 10391b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 104057dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 104157dacad5SJay Sternberg req->tag, nvmeq->qid); 1042a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1043d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1044e1569a16SKeith Busch 1045e1569a16SKeith Busch /* 1046e1569a16SKeith Busch * Mark the request as handled, since the inline shutdown 1047e1569a16SKeith Busch * forces all outstanding requests to complete. 1048e1569a16SKeith Busch */ 104927fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1050e1569a16SKeith Busch return BLK_EH_HANDLED; 105157dacad5SJay Sternberg } 105257dacad5SJay Sternberg 1053e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1054e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 1055e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1056e7a2a87dSChristoph Hellwig } 10577bf7d778SKeith Busch iod->aborted = 1; 105857dacad5SJay Sternberg 105957dacad5SJay Sternberg memset(&cmd, 0, sizeof(cmd)); 106057dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 106157dacad5SJay Sternberg cmd.abort.cid = req->tag; 106257dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 106357dacad5SJay Sternberg 10641b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 10651b3c47c1SSagi Grimberg "I/O %d QID %d timeout, aborting\n", 106657dacad5SJay Sternberg req->tag, nvmeq->qid); 1067e7a2a87dSChristoph Hellwig 1068e7a2a87dSChristoph Hellwig abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, 1069eb71f435SChristoph Hellwig BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 10706bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 10716bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 107231c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 107357dacad5SJay Sternberg } 107457dacad5SJay Sternberg 1075e7a2a87dSChristoph Hellwig abort_req->timeout = ADMIN_TIMEOUT; 1076e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 1077e7a2a87dSChristoph Hellwig blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); 107857dacad5SJay Sternberg 107957dacad5SJay Sternberg /* 108057dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 108157dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 108257dacad5SJay Sternberg * as the device then is in a faulty state. 108357dacad5SJay Sternberg */ 108457dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 108557dacad5SJay Sternberg } 108657dacad5SJay Sternberg 108757dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 108857dacad5SJay Sternberg { 108957dacad5SJay Sternberg dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 109057dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 109157dacad5SJay Sternberg if (nvmeq->sq_cmds) 109257dacad5SJay Sternberg dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 109357dacad5SJay Sternberg nvmeq->sq_cmds, nvmeq->sq_dma_addr); 109457dacad5SJay Sternberg kfree(nvmeq); 109557dacad5SJay Sternberg } 109657dacad5SJay Sternberg 109757dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 109857dacad5SJay Sternberg { 109957dacad5SJay Sternberg int i; 110057dacad5SJay Sternberg 1101d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 110257dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[i]; 1103d858e5f0SSagi Grimberg dev->ctrl.queue_count--; 110457dacad5SJay Sternberg dev->queues[i] = NULL; 110557dacad5SJay Sternberg nvme_free_queue(nvmeq); 110657dacad5SJay Sternberg } 110757dacad5SJay Sternberg } 110857dacad5SJay Sternberg 110957dacad5SJay Sternberg /** 111057dacad5SJay Sternberg * nvme_suspend_queue - put queue into suspended state 111157dacad5SJay Sternberg * @nvmeq - queue to suspend 111257dacad5SJay Sternberg */ 111357dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq) 111457dacad5SJay Sternberg { 111557dacad5SJay Sternberg int vector; 111657dacad5SJay Sternberg 111757dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 111857dacad5SJay Sternberg if (nvmeq->cq_vector == -1) { 111957dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 112057dacad5SJay Sternberg return 1; 112157dacad5SJay Sternberg } 11220ff199cbSChristoph Hellwig vector = nvmeq->cq_vector; 112357dacad5SJay Sternberg nvmeq->dev->online_queues--; 112457dacad5SJay Sternberg nvmeq->cq_vector = -1; 112557dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 112657dacad5SJay Sternberg 11271c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 1128c81545f9SSagi Grimberg blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); 112957dacad5SJay Sternberg 11300ff199cbSChristoph Hellwig pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq); 113157dacad5SJay Sternberg 113257dacad5SJay Sternberg return 0; 113357dacad5SJay Sternberg } 113457dacad5SJay Sternberg 1135a5cdb68cSKeith Busch static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 113657dacad5SJay Sternberg { 1137a5cdb68cSKeith Busch struct nvme_queue *nvmeq = dev->queues[0]; 113857dacad5SJay Sternberg 113957dacad5SJay Sternberg if (!nvmeq) 114057dacad5SJay Sternberg return; 114157dacad5SJay Sternberg if (nvme_suspend_queue(nvmeq)) 114257dacad5SJay Sternberg return; 114357dacad5SJay Sternberg 1144a5cdb68cSKeith Busch if (shutdown) 1145a5cdb68cSKeith Busch nvme_shutdown_ctrl(&dev->ctrl); 1146a5cdb68cSKeith Busch else 114720d0dfe6SSagi Grimberg nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); 114857dacad5SJay Sternberg 114957dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 115057dacad5SJay Sternberg nvme_process_cq(nvmeq); 115157dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 115257dacad5SJay Sternberg } 115357dacad5SJay Sternberg 115457dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 115557dacad5SJay Sternberg int entry_size) 115657dacad5SJay Sternberg { 115757dacad5SJay Sternberg int q_depth = dev->q_depth; 11585fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 11595fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 116057dacad5SJay Sternberg 116157dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 116257dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 11635fd4ce1bSChristoph Hellwig mem_per_q = round_down(mem_per_q, dev->ctrl.page_size); 116457dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 116557dacad5SJay Sternberg 116657dacad5SJay Sternberg /* 116757dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 116857dacad5SJay Sternberg * would be better to map queues in system memory with the 116957dacad5SJay Sternberg * original depth 117057dacad5SJay Sternberg */ 117157dacad5SJay Sternberg if (q_depth < 64) 117257dacad5SJay Sternberg return -ENOMEM; 117357dacad5SJay Sternberg } 117457dacad5SJay Sternberg 117557dacad5SJay Sternberg return q_depth; 117657dacad5SJay Sternberg } 117757dacad5SJay Sternberg 117857dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 117957dacad5SJay Sternberg int qid, int depth) 118057dacad5SJay Sternberg { 118157dacad5SJay Sternberg if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { 11825fd4ce1bSChristoph Hellwig unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 11835fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 118457dacad5SJay Sternberg nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; 118557dacad5SJay Sternberg nvmeq->sq_cmds_io = dev->cmb + offset; 118657dacad5SJay Sternberg } else { 118757dacad5SJay Sternberg nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 118857dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 118957dacad5SJay Sternberg if (!nvmeq->sq_cmds) 119057dacad5SJay Sternberg return -ENOMEM; 119157dacad5SJay Sternberg } 119257dacad5SJay Sternberg 119357dacad5SJay Sternberg return 0; 119457dacad5SJay Sternberg } 119557dacad5SJay Sternberg 119657dacad5SJay Sternberg static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1197d3af3ecdSShaohua Li int depth, int node) 119857dacad5SJay Sternberg { 1199d3af3ecdSShaohua Li struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL, 1200d3af3ecdSShaohua Li node); 120157dacad5SJay Sternberg if (!nvmeq) 120257dacad5SJay Sternberg return NULL; 120357dacad5SJay Sternberg 120457dacad5SJay Sternberg nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 120557dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 120657dacad5SJay Sternberg if (!nvmeq->cqes) 120757dacad5SJay Sternberg goto free_nvmeq; 120857dacad5SJay Sternberg 120957dacad5SJay Sternberg if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) 121057dacad5SJay Sternberg goto free_cqdma; 121157dacad5SJay Sternberg 121257dacad5SJay Sternberg nvmeq->q_dmadev = dev->dev; 121357dacad5SJay Sternberg nvmeq->dev = dev; 121457dacad5SJay Sternberg spin_lock_init(&nvmeq->q_lock); 121557dacad5SJay Sternberg nvmeq->cq_head = 0; 121657dacad5SJay Sternberg nvmeq->cq_phase = 1; 121757dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 121857dacad5SJay Sternberg nvmeq->q_depth = depth; 121957dacad5SJay Sternberg nvmeq->qid = qid; 122057dacad5SJay Sternberg nvmeq->cq_vector = -1; 122157dacad5SJay Sternberg dev->queues[qid] = nvmeq; 1222d858e5f0SSagi Grimberg dev->ctrl.queue_count++; 122357dacad5SJay Sternberg 122457dacad5SJay Sternberg return nvmeq; 122557dacad5SJay Sternberg 122657dacad5SJay Sternberg free_cqdma: 122757dacad5SJay Sternberg dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, 122857dacad5SJay Sternberg nvmeq->cq_dma_addr); 122957dacad5SJay Sternberg free_nvmeq: 123057dacad5SJay Sternberg kfree(nvmeq); 123157dacad5SJay Sternberg return NULL; 123257dacad5SJay Sternberg } 123357dacad5SJay Sternberg 1234dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 123557dacad5SJay Sternberg { 12360ff199cbSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 12370ff199cbSChristoph Hellwig int nr = nvmeq->dev->ctrl.instance; 12380ff199cbSChristoph Hellwig 12390ff199cbSChristoph Hellwig if (use_threaded_interrupts) { 12400ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 12410ff199cbSChristoph Hellwig nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 12420ff199cbSChristoph Hellwig } else { 12430ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 12440ff199cbSChristoph Hellwig NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 12450ff199cbSChristoph Hellwig } 124657dacad5SJay Sternberg } 124757dacad5SJay Sternberg 124857dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 124957dacad5SJay Sternberg { 125057dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 125157dacad5SJay Sternberg 125257dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 125357dacad5SJay Sternberg nvmeq->sq_tail = 0; 125457dacad5SJay Sternberg nvmeq->cq_head = 0; 125557dacad5SJay Sternberg nvmeq->cq_phase = 1; 125657dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 125757dacad5SJay Sternberg memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 1258f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 125957dacad5SJay Sternberg dev->online_queues++; 126057dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 126157dacad5SJay Sternberg } 126257dacad5SJay Sternberg 126357dacad5SJay Sternberg static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) 126457dacad5SJay Sternberg { 126557dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 126657dacad5SJay Sternberg int result; 126757dacad5SJay Sternberg 126857dacad5SJay Sternberg nvmeq->cq_vector = qid - 1; 126957dacad5SJay Sternberg result = adapter_alloc_cq(dev, qid, nvmeq); 127057dacad5SJay Sternberg if (result < 0) 127157dacad5SJay Sternberg return result; 127257dacad5SJay Sternberg 127357dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 127457dacad5SJay Sternberg if (result < 0) 127557dacad5SJay Sternberg goto release_cq; 127657dacad5SJay Sternberg 1277dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 127857dacad5SJay Sternberg if (result < 0) 127957dacad5SJay Sternberg goto release_sq; 128057dacad5SJay Sternberg 128157dacad5SJay Sternberg nvme_init_queue(nvmeq, qid); 128257dacad5SJay Sternberg return result; 128357dacad5SJay Sternberg 128457dacad5SJay Sternberg release_sq: 128557dacad5SJay Sternberg adapter_delete_sq(dev, qid); 128657dacad5SJay Sternberg release_cq: 128757dacad5SJay Sternberg adapter_delete_cq(dev, qid); 128857dacad5SJay Sternberg return result; 128957dacad5SJay Sternberg } 129057dacad5SJay Sternberg 1291f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 129257dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 129377f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 129457dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 129557dacad5SJay Sternberg .exit_hctx = nvme_admin_exit_hctx, 12960350815aSChristoph Hellwig .init_request = nvme_init_request, 129757dacad5SJay Sternberg .timeout = nvme_timeout, 129857dacad5SJay Sternberg }; 129957dacad5SJay Sternberg 1300f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 130157dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 130277f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 130357dacad5SJay Sternberg .init_hctx = nvme_init_hctx, 130457dacad5SJay Sternberg .init_request = nvme_init_request, 1305dca51e78SChristoph Hellwig .map_queues = nvme_pci_map_queues, 130657dacad5SJay Sternberg .timeout = nvme_timeout, 1307a0fa9647SJens Axboe .poll = nvme_poll, 130857dacad5SJay Sternberg }; 130957dacad5SJay Sternberg 131057dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 131157dacad5SJay Sternberg { 13121c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 131369d9a99cSKeith Busch /* 131469d9a99cSKeith Busch * If the controller was reset during removal, it's possible 131569d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 131669d9a99cSKeith Busch * queue to flush these to completion. 131769d9a99cSKeith Busch */ 1318c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 13191c63dc66SChristoph Hellwig blk_cleanup_queue(dev->ctrl.admin_q); 132057dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 132157dacad5SJay Sternberg } 132257dacad5SJay Sternberg } 132357dacad5SJay Sternberg 132457dacad5SJay Sternberg static int nvme_alloc_admin_tags(struct nvme_dev *dev) 132557dacad5SJay Sternberg { 13261c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q) { 132757dacad5SJay Sternberg dev->admin_tagset.ops = &nvme_mq_admin_ops; 132857dacad5SJay Sternberg dev->admin_tagset.nr_hw_queues = 1; 1329e3e9d50cSKeith Busch 1330e3e9d50cSKeith Busch /* 1331e3e9d50cSKeith Busch * Subtract one to leave an empty queue entry for 'Full Queue' 1332e3e9d50cSKeith Busch * condition. See NVM-Express 1.2 specification, section 4.1.2. 1333e3e9d50cSKeith Busch */ 1334e3e9d50cSKeith Busch dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1; 133557dacad5SJay Sternberg dev->admin_tagset.timeout = ADMIN_TIMEOUT; 133657dacad5SJay Sternberg dev->admin_tagset.numa_node = dev_to_node(dev->dev); 133757dacad5SJay Sternberg dev->admin_tagset.cmd_size = nvme_cmd_size(dev); 1338d3484991SJens Axboe dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; 133957dacad5SJay Sternberg dev->admin_tagset.driver_data = dev; 134057dacad5SJay Sternberg 134157dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 134257dacad5SJay Sternberg return -ENOMEM; 134357dacad5SJay Sternberg 13441c63dc66SChristoph Hellwig dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 13451c63dc66SChristoph Hellwig if (IS_ERR(dev->ctrl.admin_q)) { 134657dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 134757dacad5SJay Sternberg return -ENOMEM; 134857dacad5SJay Sternberg } 13491c63dc66SChristoph Hellwig if (!blk_get_queue(dev->ctrl.admin_q)) { 135057dacad5SJay Sternberg nvme_dev_remove_admin(dev); 13511c63dc66SChristoph Hellwig dev->ctrl.admin_q = NULL; 135257dacad5SJay Sternberg return -ENODEV; 135357dacad5SJay Sternberg } 135457dacad5SJay Sternberg } else 1355c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 135657dacad5SJay Sternberg 135757dacad5SJay Sternberg return 0; 135857dacad5SJay Sternberg } 135957dacad5SJay Sternberg 136097f6ef64SXu Yu static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 136197f6ef64SXu Yu { 136297f6ef64SXu Yu return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 136397f6ef64SXu Yu } 136497f6ef64SXu Yu 136597f6ef64SXu Yu static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 136697f6ef64SXu Yu { 136797f6ef64SXu Yu struct pci_dev *pdev = to_pci_dev(dev->dev); 136897f6ef64SXu Yu 136997f6ef64SXu Yu if (size <= dev->bar_mapped_size) 137097f6ef64SXu Yu return 0; 137197f6ef64SXu Yu if (size > pci_resource_len(pdev, 0)) 137297f6ef64SXu Yu return -ENOMEM; 137397f6ef64SXu Yu if (dev->bar) 137497f6ef64SXu Yu iounmap(dev->bar); 137597f6ef64SXu Yu dev->bar = ioremap(pci_resource_start(pdev, 0), size); 137697f6ef64SXu Yu if (!dev->bar) { 137797f6ef64SXu Yu dev->bar_mapped_size = 0; 137897f6ef64SXu Yu return -ENOMEM; 137997f6ef64SXu Yu } 138097f6ef64SXu Yu dev->bar_mapped_size = size; 138197f6ef64SXu Yu dev->dbs = dev->bar + NVME_REG_DBS; 138297f6ef64SXu Yu 138397f6ef64SXu Yu return 0; 138497f6ef64SXu Yu } 138597f6ef64SXu Yu 138601ad0990SSagi Grimberg static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 138757dacad5SJay Sternberg { 138857dacad5SJay Sternberg int result; 138957dacad5SJay Sternberg u32 aqa; 139057dacad5SJay Sternberg struct nvme_queue *nvmeq; 139157dacad5SJay Sternberg 139297f6ef64SXu Yu result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 139397f6ef64SXu Yu if (result < 0) 139497f6ef64SXu Yu return result; 139597f6ef64SXu Yu 13968ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 139720d0dfe6SSagi Grimberg NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 139857dacad5SJay Sternberg 13997a67cbeaSChristoph Hellwig if (dev->subsystem && 14007a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 14017a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 140257dacad5SJay Sternberg 140320d0dfe6SSagi Grimberg result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); 140457dacad5SJay Sternberg if (result < 0) 140557dacad5SJay Sternberg return result; 140657dacad5SJay Sternberg 140757dacad5SJay Sternberg nvmeq = dev->queues[0]; 140857dacad5SJay Sternberg if (!nvmeq) { 1409d3af3ecdSShaohua Li nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 1410d3af3ecdSShaohua Li dev_to_node(dev->dev)); 141157dacad5SJay Sternberg if (!nvmeq) 141257dacad5SJay Sternberg return -ENOMEM; 141357dacad5SJay Sternberg } 141457dacad5SJay Sternberg 141557dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 141657dacad5SJay Sternberg aqa |= aqa << 16; 141757dacad5SJay Sternberg 14187a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 14197a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 14207a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 142157dacad5SJay Sternberg 142220d0dfe6SSagi Grimberg result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap); 142357dacad5SJay Sternberg if (result) 1424d4875622SKeith Busch return result; 142557dacad5SJay Sternberg 142657dacad5SJay Sternberg nvmeq->cq_vector = 0; 1427dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 142857dacad5SJay Sternberg if (result) { 142957dacad5SJay Sternberg nvmeq->cq_vector = -1; 1430d4875622SKeith Busch return result; 143157dacad5SJay Sternberg } 143257dacad5SJay Sternberg 143357dacad5SJay Sternberg return result; 143457dacad5SJay Sternberg } 143557dacad5SJay Sternberg 1436749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 143757dacad5SJay Sternberg { 1438949928c1SKeith Busch unsigned i, max; 1439749941f2SChristoph Hellwig int ret = 0; 144057dacad5SJay Sternberg 1441d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1442d3af3ecdSShaohua Li /* vector == qid - 1, match nvme_create_queue */ 1443d3af3ecdSShaohua Li if (!nvme_alloc_queue(dev, i, dev->q_depth, 1444d3af3ecdSShaohua Li pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { 1445749941f2SChristoph Hellwig ret = -ENOMEM; 144657dacad5SJay Sternberg break; 1447749941f2SChristoph Hellwig } 1448749941f2SChristoph Hellwig } 144957dacad5SJay Sternberg 1450d858e5f0SSagi Grimberg max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1451949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 1452749941f2SChristoph Hellwig ret = nvme_create_queue(dev->queues[i], i); 1453d4875622SKeith Busch if (ret) 145457dacad5SJay Sternberg break; 145557dacad5SJay Sternberg } 145657dacad5SJay Sternberg 1457749941f2SChristoph Hellwig /* 1458749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 1459749941f2SChristoph Hellwig * than the desired aount of queues, and even a controller without 1460749941f2SChristoph Hellwig * I/O queues an still be used to issue admin commands. This might 1461749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1462749941f2SChristoph Hellwig */ 1463749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 146457dacad5SJay Sternberg } 146557dacad5SJay Sternberg 1466202021c1SStephen Bates static ssize_t nvme_cmb_show(struct device *dev, 1467202021c1SStephen Bates struct device_attribute *attr, 1468202021c1SStephen Bates char *buf) 1469202021c1SStephen Bates { 1470202021c1SStephen Bates struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 1471202021c1SStephen Bates 1472c965809cSStephen Bates return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", 1473202021c1SStephen Bates ndev->cmbloc, ndev->cmbsz); 1474202021c1SStephen Bates } 1475202021c1SStephen Bates static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); 1476202021c1SStephen Bates 147757dacad5SJay Sternberg static void __iomem *nvme_map_cmb(struct nvme_dev *dev) 147857dacad5SJay Sternberg { 147957dacad5SJay Sternberg u64 szu, size, offset; 148057dacad5SJay Sternberg resource_size_t bar_size; 148157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 148257dacad5SJay Sternberg void __iomem *cmb; 148357dacad5SJay Sternberg dma_addr_t dma_addr; 148457dacad5SJay Sternberg 14857a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 148657dacad5SJay Sternberg if (!(NVME_CMB_SZ(dev->cmbsz))) 148757dacad5SJay Sternberg return NULL; 1488202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 148957dacad5SJay Sternberg 1490202021c1SStephen Bates if (!use_cmb_sqes) 1491202021c1SStephen Bates return NULL; 149257dacad5SJay Sternberg 149357dacad5SJay Sternberg szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 149457dacad5SJay Sternberg size = szu * NVME_CMB_SZ(dev->cmbsz); 1495202021c1SStephen Bates offset = szu * NVME_CMB_OFST(dev->cmbloc); 1496202021c1SStephen Bates bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); 149757dacad5SJay Sternberg 149857dacad5SJay Sternberg if (offset > bar_size) 149957dacad5SJay Sternberg return NULL; 150057dacad5SJay Sternberg 150157dacad5SJay Sternberg /* 150257dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 150357dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 150457dacad5SJay Sternberg * the reported size of the BAR 150557dacad5SJay Sternberg */ 150657dacad5SJay Sternberg if (size > bar_size - offset) 150757dacad5SJay Sternberg size = bar_size - offset; 150857dacad5SJay Sternberg 1509202021c1SStephen Bates dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; 151057dacad5SJay Sternberg cmb = ioremap_wc(dma_addr, size); 151157dacad5SJay Sternberg if (!cmb) 151257dacad5SJay Sternberg return NULL; 151357dacad5SJay Sternberg 151457dacad5SJay Sternberg dev->cmb_dma_addr = dma_addr; 151557dacad5SJay Sternberg dev->cmb_size = size; 151657dacad5SJay Sternberg return cmb; 151757dacad5SJay Sternberg } 151857dacad5SJay Sternberg 151957dacad5SJay Sternberg static inline void nvme_release_cmb(struct nvme_dev *dev) 152057dacad5SJay Sternberg { 152157dacad5SJay Sternberg if (dev->cmb) { 152257dacad5SJay Sternberg iounmap(dev->cmb); 152357dacad5SJay Sternberg dev->cmb = NULL; 1524f63572dfSJon Derrick if (dev->cmbsz) { 1525f63572dfSJon Derrick sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1526f63572dfSJon Derrick &dev_attr_cmb.attr, NULL); 1527f63572dfSJon Derrick dev->cmbsz = 0; 1528f63572dfSJon Derrick } 152957dacad5SJay Sternberg } 153057dacad5SJay Sternberg } 153157dacad5SJay Sternberg 153287ad72a5SChristoph Hellwig static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 153387ad72a5SChristoph Hellwig { 153487ad72a5SChristoph Hellwig size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs); 153587ad72a5SChristoph Hellwig struct nvme_command c; 153687ad72a5SChristoph Hellwig u64 dma_addr; 153787ad72a5SChristoph Hellwig int ret; 153887ad72a5SChristoph Hellwig 153987ad72a5SChristoph Hellwig dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len, 154087ad72a5SChristoph Hellwig DMA_TO_DEVICE); 154187ad72a5SChristoph Hellwig if (dma_mapping_error(dev->dev, dma_addr)) 154287ad72a5SChristoph Hellwig return -ENOMEM; 154387ad72a5SChristoph Hellwig 154487ad72a5SChristoph Hellwig memset(&c, 0, sizeof(c)); 154587ad72a5SChristoph Hellwig c.features.opcode = nvme_admin_set_features; 154687ad72a5SChristoph Hellwig c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 154787ad72a5SChristoph Hellwig c.features.dword11 = cpu_to_le32(bits); 154887ad72a5SChristoph Hellwig c.features.dword12 = cpu_to_le32(dev->host_mem_size >> 154987ad72a5SChristoph Hellwig ilog2(dev->ctrl.page_size)); 155087ad72a5SChristoph Hellwig c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 155187ad72a5SChristoph Hellwig c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 155287ad72a5SChristoph Hellwig c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 155387ad72a5SChristoph Hellwig 155487ad72a5SChristoph Hellwig ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 155587ad72a5SChristoph Hellwig if (ret) { 155687ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 155787ad72a5SChristoph Hellwig "failed to set host mem (err %d, flags %#x).\n", 155887ad72a5SChristoph Hellwig ret, bits); 155987ad72a5SChristoph Hellwig } 156087ad72a5SChristoph Hellwig dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE); 156187ad72a5SChristoph Hellwig return ret; 156287ad72a5SChristoph Hellwig } 156387ad72a5SChristoph Hellwig 156487ad72a5SChristoph Hellwig static void nvme_free_host_mem(struct nvme_dev *dev) 156587ad72a5SChristoph Hellwig { 156687ad72a5SChristoph Hellwig int i; 156787ad72a5SChristoph Hellwig 156887ad72a5SChristoph Hellwig for (i = 0; i < dev->nr_host_mem_descs; i++) { 156987ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 157087ad72a5SChristoph Hellwig size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; 157187ad72a5SChristoph Hellwig 157287ad72a5SChristoph Hellwig dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], 157387ad72a5SChristoph Hellwig le64_to_cpu(desc->addr)); 157487ad72a5SChristoph Hellwig } 157587ad72a5SChristoph Hellwig 157687ad72a5SChristoph Hellwig kfree(dev->host_mem_desc_bufs); 157787ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = NULL; 157887ad72a5SChristoph Hellwig kfree(dev->host_mem_descs); 157987ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 158087ad72a5SChristoph Hellwig } 158187ad72a5SChristoph Hellwig 158287ad72a5SChristoph Hellwig static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 158387ad72a5SChristoph Hellwig { 158487ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *descs; 158587ad72a5SChristoph Hellwig u32 chunk_size, max_entries, i = 0; 158687ad72a5SChristoph Hellwig void **bufs; 158787ad72a5SChristoph Hellwig u64 size, tmp; 158887ad72a5SChristoph Hellwig 158987ad72a5SChristoph Hellwig /* start big and work our way down */ 159087ad72a5SChristoph Hellwig chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER); 159187ad72a5SChristoph Hellwig retry: 159287ad72a5SChristoph Hellwig tmp = (preferred + chunk_size - 1); 159387ad72a5SChristoph Hellwig do_div(tmp, chunk_size); 159487ad72a5SChristoph Hellwig max_entries = tmp; 159587ad72a5SChristoph Hellwig descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL); 159687ad72a5SChristoph Hellwig if (!descs) 159787ad72a5SChristoph Hellwig goto out; 159887ad72a5SChristoph Hellwig 159987ad72a5SChristoph Hellwig bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 160087ad72a5SChristoph Hellwig if (!bufs) 160187ad72a5SChristoph Hellwig goto out_free_descs; 160287ad72a5SChristoph Hellwig 160387ad72a5SChristoph Hellwig for (size = 0; size < preferred; size += chunk_size) { 160487ad72a5SChristoph Hellwig u32 len = min_t(u64, chunk_size, preferred - size); 160587ad72a5SChristoph Hellwig dma_addr_t dma_addr; 160687ad72a5SChristoph Hellwig 160787ad72a5SChristoph Hellwig bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 160887ad72a5SChristoph Hellwig DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 160987ad72a5SChristoph Hellwig if (!bufs[i]) 161087ad72a5SChristoph Hellwig break; 161187ad72a5SChristoph Hellwig 161287ad72a5SChristoph Hellwig descs[i].addr = cpu_to_le64(dma_addr); 161387ad72a5SChristoph Hellwig descs[i].size = cpu_to_le32(len / dev->ctrl.page_size); 161487ad72a5SChristoph Hellwig i++; 161587ad72a5SChristoph Hellwig } 161687ad72a5SChristoph Hellwig 161787ad72a5SChristoph Hellwig if (!size || (min && size < min)) { 161887ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 161987ad72a5SChristoph Hellwig "failed to allocate host memory buffer.\n"); 162087ad72a5SChristoph Hellwig goto out_free_bufs; 162187ad72a5SChristoph Hellwig } 162287ad72a5SChristoph Hellwig 162387ad72a5SChristoph Hellwig dev_info(dev->ctrl.device, 162487ad72a5SChristoph Hellwig "allocated %lld MiB host memory buffer.\n", 162587ad72a5SChristoph Hellwig size >> ilog2(SZ_1M)); 162687ad72a5SChristoph Hellwig dev->nr_host_mem_descs = i; 162787ad72a5SChristoph Hellwig dev->host_mem_size = size; 162887ad72a5SChristoph Hellwig dev->host_mem_descs = descs; 162987ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = bufs; 163087ad72a5SChristoph Hellwig return 0; 163187ad72a5SChristoph Hellwig 163287ad72a5SChristoph Hellwig out_free_bufs: 163387ad72a5SChristoph Hellwig while (--i >= 0) { 163487ad72a5SChristoph Hellwig size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; 163587ad72a5SChristoph Hellwig 163687ad72a5SChristoph Hellwig dma_free_coherent(dev->dev, size, bufs[i], 163787ad72a5SChristoph Hellwig le64_to_cpu(descs[i].addr)); 163887ad72a5SChristoph Hellwig } 163987ad72a5SChristoph Hellwig 164087ad72a5SChristoph Hellwig kfree(bufs); 164187ad72a5SChristoph Hellwig out_free_descs: 164287ad72a5SChristoph Hellwig kfree(descs); 164387ad72a5SChristoph Hellwig out: 164487ad72a5SChristoph Hellwig /* try a smaller chunk size if we failed early */ 164587ad72a5SChristoph Hellwig if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) { 164687ad72a5SChristoph Hellwig chunk_size /= 2; 164787ad72a5SChristoph Hellwig goto retry; 164887ad72a5SChristoph Hellwig } 164987ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 165087ad72a5SChristoph Hellwig return -ENOMEM; 165187ad72a5SChristoph Hellwig } 165287ad72a5SChristoph Hellwig 165387ad72a5SChristoph Hellwig static void nvme_setup_host_mem(struct nvme_dev *dev) 165487ad72a5SChristoph Hellwig { 165587ad72a5SChristoph Hellwig u64 max = (u64)max_host_mem_size_mb * SZ_1M; 165687ad72a5SChristoph Hellwig u64 preferred = (u64)dev->ctrl.hmpre * 4096; 165787ad72a5SChristoph Hellwig u64 min = (u64)dev->ctrl.hmmin * 4096; 165887ad72a5SChristoph Hellwig u32 enable_bits = NVME_HOST_MEM_ENABLE; 165987ad72a5SChristoph Hellwig 166087ad72a5SChristoph Hellwig preferred = min(preferred, max); 166187ad72a5SChristoph Hellwig if (min > max) { 166287ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 166387ad72a5SChristoph Hellwig "min host memory (%lld MiB) above limit (%d MiB).\n", 166487ad72a5SChristoph Hellwig min >> ilog2(SZ_1M), max_host_mem_size_mb); 166587ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 166687ad72a5SChristoph Hellwig return; 166787ad72a5SChristoph Hellwig } 166887ad72a5SChristoph Hellwig 166987ad72a5SChristoph Hellwig /* 167087ad72a5SChristoph Hellwig * If we already have a buffer allocated check if we can reuse it. 167187ad72a5SChristoph Hellwig */ 167287ad72a5SChristoph Hellwig if (dev->host_mem_descs) { 167387ad72a5SChristoph Hellwig if (dev->host_mem_size >= min) 167487ad72a5SChristoph Hellwig enable_bits |= NVME_HOST_MEM_RETURN; 167587ad72a5SChristoph Hellwig else 167687ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 167787ad72a5SChristoph Hellwig } 167887ad72a5SChristoph Hellwig 167987ad72a5SChristoph Hellwig if (!dev->host_mem_descs) { 168087ad72a5SChristoph Hellwig if (nvme_alloc_host_mem(dev, min, preferred)) 168187ad72a5SChristoph Hellwig return; 168287ad72a5SChristoph Hellwig } 168387ad72a5SChristoph Hellwig 168487ad72a5SChristoph Hellwig if (nvme_set_host_mem(dev, enable_bits)) 168587ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 168687ad72a5SChristoph Hellwig } 168787ad72a5SChristoph Hellwig 168857dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 168957dacad5SJay Sternberg { 169057dacad5SJay Sternberg struct nvme_queue *adminq = dev->queues[0]; 169157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 169297f6ef64SXu Yu int result, nr_io_queues; 169397f6ef64SXu Yu unsigned long size; 169457dacad5SJay Sternberg 16952800b8e7SKeith Busch nr_io_queues = num_online_cpus(); 16969a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 16979a0be7abSChristoph Hellwig if (result < 0) 169857dacad5SJay Sternberg return result; 16999a0be7abSChristoph Hellwig 1700f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 1701a5229050SKeith Busch return 0; 170257dacad5SJay Sternberg 170357dacad5SJay Sternberg if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) { 170457dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 170557dacad5SJay Sternberg sizeof(struct nvme_command)); 170657dacad5SJay Sternberg if (result > 0) 170757dacad5SJay Sternberg dev->q_depth = result; 170857dacad5SJay Sternberg else 170957dacad5SJay Sternberg nvme_release_cmb(dev); 171057dacad5SJay Sternberg } 171157dacad5SJay Sternberg 171257dacad5SJay Sternberg do { 171397f6ef64SXu Yu size = db_bar_size(dev, nr_io_queues); 171497f6ef64SXu Yu result = nvme_remap_bar(dev, size); 171597f6ef64SXu Yu if (!result) 171657dacad5SJay Sternberg break; 171757dacad5SJay Sternberg if (!--nr_io_queues) 171857dacad5SJay Sternberg return -ENOMEM; 171957dacad5SJay Sternberg } while (1); 172057dacad5SJay Sternberg adminq->q_db = dev->dbs; 172157dacad5SJay Sternberg 172257dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 17230ff199cbSChristoph Hellwig pci_free_irq(pdev, 0, adminq); 172457dacad5SJay Sternberg 172557dacad5SJay Sternberg /* 172657dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 172757dacad5SJay Sternberg * setting up the full range we need. 172857dacad5SJay Sternberg */ 1729dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 1730dca51e78SChristoph Hellwig nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues, 1731dca51e78SChristoph Hellwig PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY); 1732dca51e78SChristoph Hellwig if (nr_io_queues <= 0) 1733dca51e78SChristoph Hellwig return -EIO; 1734dca51e78SChristoph Hellwig dev->max_qid = nr_io_queues; 173557dacad5SJay Sternberg 173657dacad5SJay Sternberg /* 173757dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 173857dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 173957dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 174057dacad5SJay Sternberg * number of interrupts. 174157dacad5SJay Sternberg */ 174257dacad5SJay Sternberg 1743dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 174457dacad5SJay Sternberg if (result) { 174557dacad5SJay Sternberg adminq->cq_vector = -1; 1746d4875622SKeith Busch return result; 174757dacad5SJay Sternberg } 1748749941f2SChristoph Hellwig return nvme_create_io_queues(dev); 174957dacad5SJay Sternberg } 175057dacad5SJay Sternberg 17512a842acaSChristoph Hellwig static void nvme_del_queue_end(struct request *req, blk_status_t error) 1752db3cbfffSKeith Busch { 1753db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 1754db3cbfffSKeith Busch 1755db3cbfffSKeith Busch blk_mq_free_request(req); 1756db3cbfffSKeith Busch complete(&nvmeq->dev->ioq_wait); 1757db3cbfffSKeith Busch } 1758db3cbfffSKeith Busch 17592a842acaSChristoph Hellwig static void nvme_del_cq_end(struct request *req, blk_status_t error) 1760db3cbfffSKeith Busch { 1761db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 1762db3cbfffSKeith Busch 1763db3cbfffSKeith Busch if (!error) { 1764db3cbfffSKeith Busch unsigned long flags; 1765db3cbfffSKeith Busch 17662e39e0f6SMing Lin /* 17672e39e0f6SMing Lin * We might be called with the AQ q_lock held 17682e39e0f6SMing Lin * and the I/O queue q_lock should always 17692e39e0f6SMing Lin * nest inside the AQ one. 17702e39e0f6SMing Lin */ 17712e39e0f6SMing Lin spin_lock_irqsave_nested(&nvmeq->q_lock, flags, 17722e39e0f6SMing Lin SINGLE_DEPTH_NESTING); 1773db3cbfffSKeith Busch nvme_process_cq(nvmeq); 1774db3cbfffSKeith Busch spin_unlock_irqrestore(&nvmeq->q_lock, flags); 1775db3cbfffSKeith Busch } 1776db3cbfffSKeith Busch 1777db3cbfffSKeith Busch nvme_del_queue_end(req, error); 1778db3cbfffSKeith Busch } 1779db3cbfffSKeith Busch 1780db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 1781db3cbfffSKeith Busch { 1782db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 1783db3cbfffSKeith Busch struct request *req; 1784db3cbfffSKeith Busch struct nvme_command cmd; 1785db3cbfffSKeith Busch 1786db3cbfffSKeith Busch memset(&cmd, 0, sizeof(cmd)); 1787db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 1788db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 1789db3cbfffSKeith Busch 1790eb71f435SChristoph Hellwig req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 1791db3cbfffSKeith Busch if (IS_ERR(req)) 1792db3cbfffSKeith Busch return PTR_ERR(req); 1793db3cbfffSKeith Busch 1794db3cbfffSKeith Busch req->timeout = ADMIN_TIMEOUT; 1795db3cbfffSKeith Busch req->end_io_data = nvmeq; 1796db3cbfffSKeith Busch 1797db3cbfffSKeith Busch blk_execute_rq_nowait(q, NULL, req, false, 1798db3cbfffSKeith Busch opcode == nvme_admin_delete_cq ? 1799db3cbfffSKeith Busch nvme_del_cq_end : nvme_del_queue_end); 1800db3cbfffSKeith Busch return 0; 1801db3cbfffSKeith Busch } 1802db3cbfffSKeith Busch 180370659060SKeith Busch static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) 1804db3cbfffSKeith Busch { 180570659060SKeith Busch int pass; 1806db3cbfffSKeith Busch unsigned long timeout; 1807db3cbfffSKeith Busch u8 opcode = nvme_admin_delete_sq; 1808db3cbfffSKeith Busch 1809db3cbfffSKeith Busch for (pass = 0; pass < 2; pass++) { 1810014a0d60SKeith Busch int sent = 0, i = queues; 1811db3cbfffSKeith Busch 1812db3cbfffSKeith Busch reinit_completion(&dev->ioq_wait); 1813db3cbfffSKeith Busch retry: 1814db3cbfffSKeith Busch timeout = ADMIN_TIMEOUT; 1815c21377f8SGabriel Krisman Bertazi for (; i > 0; i--, sent++) 1816c21377f8SGabriel Krisman Bertazi if (nvme_delete_queue(dev->queues[i], opcode)) 1817db3cbfffSKeith Busch break; 1818c21377f8SGabriel Krisman Bertazi 1819db3cbfffSKeith Busch while (sent--) { 1820db3cbfffSKeith Busch timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout); 1821db3cbfffSKeith Busch if (timeout == 0) 1822db3cbfffSKeith Busch return; 1823db3cbfffSKeith Busch if (i) 1824db3cbfffSKeith Busch goto retry; 1825db3cbfffSKeith Busch } 1826db3cbfffSKeith Busch opcode = nvme_admin_delete_cq; 1827db3cbfffSKeith Busch } 1828db3cbfffSKeith Busch } 1829db3cbfffSKeith Busch 183057dacad5SJay Sternberg /* 183157dacad5SJay Sternberg * Return: error value if an error occurred setting up the queues or calling 183257dacad5SJay Sternberg * Identify Device. 0 if these succeeded, even if adding some of the 183357dacad5SJay Sternberg * namespaces failed. At the moment, these failures are silent. TBD which 183457dacad5SJay Sternberg * failures should be reported. 183557dacad5SJay Sternberg */ 183657dacad5SJay Sternberg static int nvme_dev_add(struct nvme_dev *dev) 183757dacad5SJay Sternberg { 18385bae7f73SChristoph Hellwig if (!dev->ctrl.tagset) { 183957dacad5SJay Sternberg dev->tagset.ops = &nvme_mq_ops; 184057dacad5SJay Sternberg dev->tagset.nr_hw_queues = dev->online_queues - 1; 184157dacad5SJay Sternberg dev->tagset.timeout = NVME_IO_TIMEOUT; 184257dacad5SJay Sternberg dev->tagset.numa_node = dev_to_node(dev->dev); 184357dacad5SJay Sternberg dev->tagset.queue_depth = 184457dacad5SJay Sternberg min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; 184557dacad5SJay Sternberg dev->tagset.cmd_size = nvme_cmd_size(dev); 184657dacad5SJay Sternberg dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 184757dacad5SJay Sternberg dev->tagset.driver_data = dev; 184857dacad5SJay Sternberg 184957dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->tagset)) 185057dacad5SJay Sternberg return 0; 18515bae7f73SChristoph Hellwig dev->ctrl.tagset = &dev->tagset; 1852f9f38e33SHelen Koike 1853f9f38e33SHelen Koike nvme_dbbuf_set(dev); 1854949928c1SKeith Busch } else { 1855949928c1SKeith Busch blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 1856949928c1SKeith Busch 1857949928c1SKeith Busch /* Free previously allocated queues that are no longer usable */ 1858949928c1SKeith Busch nvme_free_queues(dev, dev->online_queues); 185957dacad5SJay Sternberg } 1860949928c1SKeith Busch 186157dacad5SJay Sternberg return 0; 186257dacad5SJay Sternberg } 186357dacad5SJay Sternberg 1864b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 186557dacad5SJay Sternberg { 1866b00a726aSKeith Busch int result = -ENOMEM; 186757dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 186857dacad5SJay Sternberg 186957dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 187057dacad5SJay Sternberg return result; 187157dacad5SJay Sternberg 187257dacad5SJay Sternberg pci_set_master(pdev); 187357dacad5SJay Sternberg 187457dacad5SJay Sternberg if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 187557dacad5SJay Sternberg dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) 187657dacad5SJay Sternberg goto disable; 187757dacad5SJay Sternberg 18787a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 187957dacad5SJay Sternberg result = -ENODEV; 1880b00a726aSKeith Busch goto disable; 188157dacad5SJay Sternberg } 188257dacad5SJay Sternberg 188357dacad5SJay Sternberg /* 1884a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 1885a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 1886a5229050SKeith Busch * adjust this later. 188757dacad5SJay Sternberg */ 1888dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 1889dca51e78SChristoph Hellwig if (result < 0) 1890dca51e78SChristoph Hellwig return result; 189157dacad5SJay Sternberg 189220d0dfe6SSagi Grimberg dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 18937a67cbeaSChristoph Hellwig 189420d0dfe6SSagi Grimberg dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1, 189520d0dfe6SSagi Grimberg NVME_Q_DEPTH); 189620d0dfe6SSagi Grimberg dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 18977a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 18981f390c1fSStephan Günther 18991f390c1fSStephan Günther /* 19001f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 19011f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 19021f390c1fSStephan Günther */ 19031f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 19041f390c1fSStephan Günther dev->q_depth = 2; 19059bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 19069bdcfb10SChristoph Hellwig "set queue depth=%u to work around controller resets\n", 19071f390c1fSStephan Günther dev->q_depth); 1908d554b5e1SMartin K. Petersen } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 1909d554b5e1SMartin K. Petersen (pdev->device == 0xa821 || pdev->device == 0xa822) && 191020d0dfe6SSagi Grimberg NVME_CAP_MQES(dev->ctrl.cap) == 0) { 1911d554b5e1SMartin K. Petersen dev->q_depth = 64; 1912d554b5e1SMartin K. Petersen dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 1913d554b5e1SMartin K. Petersen "set queue depth=%u\n", dev->q_depth); 19141f390c1fSStephan Günther } 19151f390c1fSStephan Günther 1916202021c1SStephen Bates /* 1917202021c1SStephen Bates * CMBs can currently only exist on >=1.2 PCIe devices. We only 1918202021c1SStephen Bates * populate sysfs if a CMB is implemented. Note that we add the 1919202021c1SStephen Bates * CMB attribute to the nvme_ctrl kobj which removes the need to remove 1920202021c1SStephen Bates * it on exit. Since nvme_dev_attrs_group has no name we can pass 1921202021c1SStephen Bates * NULL as final argument to sysfs_add_file_to_group. 1922202021c1SStephen Bates */ 1923202021c1SStephen Bates 19248ef2074dSGabriel Krisman Bertazi if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { 192557dacad5SJay Sternberg dev->cmb = nvme_map_cmb(dev); 192657dacad5SJay Sternberg 1927202021c1SStephen Bates if (dev->cmbsz) { 1928202021c1SStephen Bates if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1929202021c1SStephen Bates &dev_attr_cmb.attr, NULL)) 19309bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, 1931202021c1SStephen Bates "failed to add sysfs attribute for CMB\n"); 1932202021c1SStephen Bates } 1933202021c1SStephen Bates } 1934202021c1SStephen Bates 1935a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 1936a0a3408eSKeith Busch pci_save_state(pdev); 193757dacad5SJay Sternberg return 0; 193857dacad5SJay Sternberg 193957dacad5SJay Sternberg disable: 194057dacad5SJay Sternberg pci_disable_device(pdev); 194157dacad5SJay Sternberg return result; 194257dacad5SJay Sternberg } 194357dacad5SJay Sternberg 194457dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 194557dacad5SJay Sternberg { 1946b00a726aSKeith Busch if (dev->bar) 1947b00a726aSKeith Busch iounmap(dev->bar); 1948a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 1949b00a726aSKeith Busch } 1950b00a726aSKeith Busch 1951b00a726aSKeith Busch static void nvme_pci_disable(struct nvme_dev *dev) 1952b00a726aSKeith Busch { 195357dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 195457dacad5SJay Sternberg 1955f63572dfSJon Derrick nvme_release_cmb(dev); 1956dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 195757dacad5SJay Sternberg 1958a0a3408eSKeith Busch if (pci_is_enabled(pdev)) { 1959a0a3408eSKeith Busch pci_disable_pcie_error_reporting(pdev); 196057dacad5SJay Sternberg pci_disable_device(pdev); 196157dacad5SJay Sternberg } 1962a0a3408eSKeith Busch } 196357dacad5SJay Sternberg 1964a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 196557dacad5SJay Sternberg { 196670659060SKeith Busch int i, queues; 1967302ad8ccSKeith Busch bool dead = true; 1968302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 196957dacad5SJay Sternberg 197077bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 1971302ad8ccSKeith Busch if (pci_is_enabled(pdev)) { 1972302ad8ccSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1973302ad8ccSKeith Busch 1974302ad8ccSKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE) 1975302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 1976302ad8ccSKeith Busch dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 1977302ad8ccSKeith Busch pdev->error_state != pci_channel_io_normal); 197857dacad5SJay Sternberg } 1979c21377f8SGabriel Krisman Bertazi 1980302ad8ccSKeith Busch /* 1981302ad8ccSKeith Busch * Give the controller a chance to complete all entered requests if 1982302ad8ccSKeith Busch * doing a safe shutdown. 1983302ad8ccSKeith Busch */ 198487ad72a5SChristoph Hellwig if (!dead) { 198587ad72a5SChristoph Hellwig if (shutdown) 1986302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 198787ad72a5SChristoph Hellwig 198887ad72a5SChristoph Hellwig /* 198987ad72a5SChristoph Hellwig * If the controller is still alive tell it to stop using the 199087ad72a5SChristoph Hellwig * host memory buffer. In theory the shutdown / reset should 199187ad72a5SChristoph Hellwig * make sure that it doesn't access the host memoery anymore, 199287ad72a5SChristoph Hellwig * but I'd rather be safe than sorry.. 199387ad72a5SChristoph Hellwig */ 199487ad72a5SChristoph Hellwig if (dev->host_mem_descs) 199587ad72a5SChristoph Hellwig nvme_set_host_mem(dev, 0); 199687ad72a5SChristoph Hellwig 199787ad72a5SChristoph Hellwig } 1998302ad8ccSKeith Busch nvme_stop_queues(&dev->ctrl); 1999302ad8ccSKeith Busch 200070659060SKeith Busch queues = dev->online_queues - 1; 2001d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count - 1; i > 0; i--) 2002c21377f8SGabriel Krisman Bertazi nvme_suspend_queue(dev->queues[i]); 2003c21377f8SGabriel Krisman Bertazi 2004302ad8ccSKeith Busch if (dead) { 200582469c59SGabriel Krisman Bertazi /* A device might become IO incapable very soon during 200682469c59SGabriel Krisman Bertazi * probe, before the admin queue is configured. Thus, 200782469c59SGabriel Krisman Bertazi * queue_count can be 0 here. 200882469c59SGabriel Krisman Bertazi */ 2009d858e5f0SSagi Grimberg if (dev->ctrl.queue_count) 2010c21377f8SGabriel Krisman Bertazi nvme_suspend_queue(dev->queues[0]); 201157dacad5SJay Sternberg } else { 201270659060SKeith Busch nvme_disable_io_queues(dev, queues); 2013a5cdb68cSKeith Busch nvme_disable_admin_queue(dev, shutdown); 201457dacad5SJay Sternberg } 2015b00a726aSKeith Busch nvme_pci_disable(dev); 201657dacad5SJay Sternberg 2017e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2018e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); 2019302ad8ccSKeith Busch 2020302ad8ccSKeith Busch /* 2021302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 2022302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 2023302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 2024302ad8ccSKeith Busch */ 2025302ad8ccSKeith Busch if (shutdown) 2026302ad8ccSKeith Busch nvme_start_queues(&dev->ctrl); 202777bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 202857dacad5SJay Sternberg } 202957dacad5SJay Sternberg 203057dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 203157dacad5SJay Sternberg { 203257dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 203357dacad5SJay Sternberg PAGE_SIZE, PAGE_SIZE, 0); 203457dacad5SJay Sternberg if (!dev->prp_page_pool) 203557dacad5SJay Sternberg return -ENOMEM; 203657dacad5SJay Sternberg 203757dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 203857dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 203957dacad5SJay Sternberg 256, 256, 0); 204057dacad5SJay Sternberg if (!dev->prp_small_pool) { 204157dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 204257dacad5SJay Sternberg return -ENOMEM; 204357dacad5SJay Sternberg } 204457dacad5SJay Sternberg return 0; 204557dacad5SJay Sternberg } 204657dacad5SJay Sternberg 204757dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 204857dacad5SJay Sternberg { 204957dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 205057dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 205157dacad5SJay Sternberg } 205257dacad5SJay Sternberg 20531673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 205457dacad5SJay Sternberg { 20551673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 205657dacad5SJay Sternberg 2057f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 205857dacad5SJay Sternberg put_device(dev->dev); 205957dacad5SJay Sternberg if (dev->tagset.tags) 206057dacad5SJay Sternberg blk_mq_free_tag_set(&dev->tagset); 20611c63dc66SChristoph Hellwig if (dev->ctrl.admin_q) 20621c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 206357dacad5SJay Sternberg kfree(dev->queues); 2064e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 206557dacad5SJay Sternberg kfree(dev); 206657dacad5SJay Sternberg } 206757dacad5SJay Sternberg 2068f58944e2SKeith Busch static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status) 2069f58944e2SKeith Busch { 2070237045fcSLinus Torvalds dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status); 2071f58944e2SKeith Busch 2072f58944e2SKeith Busch kref_get(&dev->ctrl.kref); 207369d9a99cSKeith Busch nvme_dev_disable(dev, false); 2074f58944e2SKeith Busch if (!schedule_work(&dev->remove_work)) 2075f58944e2SKeith Busch nvme_put_ctrl(&dev->ctrl); 2076f58944e2SKeith Busch } 2077f58944e2SKeith Busch 2078fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 207957dacad5SJay Sternberg { 2080d86c4d8eSChristoph Hellwig struct nvme_dev *dev = 2081d86c4d8eSChristoph Hellwig container_of(work, struct nvme_dev, ctrl.reset_work); 2082a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2083f58944e2SKeith Busch int result = -ENODEV; 208457dacad5SJay Sternberg 208582b057caSRakesh Pandit if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) 2086fd634f41SChristoph Hellwig goto out; 2087fd634f41SChristoph Hellwig 2088fd634f41SChristoph Hellwig /* 2089fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 2090fd634f41SChristoph Hellwig * moving on. 2091fd634f41SChristoph Hellwig */ 2092b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2093a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2094fd634f41SChristoph Hellwig 2095b00a726aSKeith Busch result = nvme_pci_enable(dev); 209657dacad5SJay Sternberg if (result) 209757dacad5SJay Sternberg goto out; 209857dacad5SJay Sternberg 209901ad0990SSagi Grimberg result = nvme_pci_configure_admin_queue(dev); 210057dacad5SJay Sternberg if (result) 2101f58944e2SKeith Busch goto out; 210257dacad5SJay Sternberg 210357dacad5SJay Sternberg nvme_init_queue(dev->queues[0], 0); 210457dacad5SJay Sternberg result = nvme_alloc_admin_tags(dev); 210557dacad5SJay Sternberg if (result) 2106f58944e2SKeith Busch goto out; 210757dacad5SJay Sternberg 2108ce4541f4SChristoph Hellwig result = nvme_init_identify(&dev->ctrl); 2109ce4541f4SChristoph Hellwig if (result) 2110f58944e2SKeith Busch goto out; 2111ce4541f4SChristoph Hellwig 2112e286bcfcSScott Bauer if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 2113e286bcfcSScott Bauer if (!dev->ctrl.opal_dev) 21144f1244c8SChristoph Hellwig dev->ctrl.opal_dev = 21154f1244c8SChristoph Hellwig init_opal_dev(&dev->ctrl, &nvme_sec_submit); 2116e286bcfcSScott Bauer else if (was_suspend) 21174f1244c8SChristoph Hellwig opal_unlock_from_suspend(dev->ctrl.opal_dev); 2118e286bcfcSScott Bauer } else { 2119e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2120e286bcfcSScott Bauer dev->ctrl.opal_dev = NULL; 2121e286bcfcSScott Bauer } 2122a98e58e5SScott Bauer 2123f9f38e33SHelen Koike if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 2124f9f38e33SHelen Koike result = nvme_dbbuf_dma_alloc(dev); 2125f9f38e33SHelen Koike if (result) 2126f9f38e33SHelen Koike dev_warn(dev->dev, 2127f9f38e33SHelen Koike "unable to allocate dma for dbbuf\n"); 2128f9f38e33SHelen Koike } 2129f9f38e33SHelen Koike 213087ad72a5SChristoph Hellwig if (dev->ctrl.hmpre) 213187ad72a5SChristoph Hellwig nvme_setup_host_mem(dev); 213287ad72a5SChristoph Hellwig 213357dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 213457dacad5SJay Sternberg if (result) 2135f58944e2SKeith Busch goto out; 213657dacad5SJay Sternberg 213721f033f7SKeith Busch /* 213857dacad5SJay Sternberg * Keep the controller around but remove all namespaces if we don't have 213957dacad5SJay Sternberg * any working I/O queue. 214057dacad5SJay Sternberg */ 214157dacad5SJay Sternberg if (dev->online_queues < 2) { 21421b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, "IO queues not created\n"); 21433b24774eSKeith Busch nvme_kill_queues(&dev->ctrl); 21445bae7f73SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 214557dacad5SJay Sternberg } else { 214625646264SKeith Busch nvme_start_queues(&dev->ctrl); 2147302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 214857dacad5SJay Sternberg nvme_dev_add(dev); 2149302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 215057dacad5SJay Sternberg } 215157dacad5SJay Sternberg 2152bb8d261eSChristoph Hellwig if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 2153bb8d261eSChristoph Hellwig dev_warn(dev->ctrl.device, "failed to mark controller live\n"); 2154bb8d261eSChristoph Hellwig goto out; 2155bb8d261eSChristoph Hellwig } 215692911a55SChristoph Hellwig 2157d09f2b45SSagi Grimberg nvme_start_ctrl(&dev->ctrl); 215857dacad5SJay Sternberg return; 215957dacad5SJay Sternberg 216057dacad5SJay Sternberg out: 2161f58944e2SKeith Busch nvme_remove_dead_ctrl(dev, result); 216257dacad5SJay Sternberg } 216357dacad5SJay Sternberg 21645c8809e6SChristoph Hellwig static void nvme_remove_dead_ctrl_work(struct work_struct *work) 216557dacad5SJay Sternberg { 21665c8809e6SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 216757dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 216857dacad5SJay Sternberg 216969d9a99cSKeith Busch nvme_kill_queues(&dev->ctrl); 217057dacad5SJay Sternberg if (pci_get_drvdata(pdev)) 2171921920abSKeith Busch device_release_driver(&pdev->dev); 21721673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 217357dacad5SJay Sternberg } 217457dacad5SJay Sternberg 21751c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 217657dacad5SJay Sternberg { 21771c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 21781c63dc66SChristoph Hellwig return 0; 217957dacad5SJay Sternberg } 21801c63dc66SChristoph Hellwig 21815fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 21825fd4ce1bSChristoph Hellwig { 21835fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 21845fd4ce1bSChristoph Hellwig return 0; 21855fd4ce1bSChristoph Hellwig } 21865fd4ce1bSChristoph Hellwig 21877fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 21887fd8930fSChristoph Hellwig { 21897fd8930fSChristoph Hellwig *val = readq(to_nvme_dev(ctrl)->bar + off); 21907fd8930fSChristoph Hellwig return 0; 21917fd8930fSChristoph Hellwig } 21927fd8930fSChristoph Hellwig 21931c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 21941a353d85SMing Lin .name = "pcie", 2195e439bb12SSagi Grimberg .module = THIS_MODULE, 2196c81bfba9SChristoph Hellwig .flags = NVME_F_METADATA_SUPPORTED, 21971c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 21985fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 21997fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 22001673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 2201f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 22021c63dc66SChristoph Hellwig }; 220357dacad5SJay Sternberg 2204b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 2205b00a726aSKeith Busch { 2206b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 2207b00a726aSKeith Busch 2208a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 2209b00a726aSKeith Busch return -ENODEV; 2210b00a726aSKeith Busch 221197f6ef64SXu Yu if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 2212b00a726aSKeith Busch goto release; 2213b00a726aSKeith Busch 2214b00a726aSKeith Busch return 0; 2215b00a726aSKeith Busch release: 2216a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 2217b00a726aSKeith Busch return -ENODEV; 2218b00a726aSKeith Busch } 2219b00a726aSKeith Busch 2220ff5350a8SAndy Lutomirski static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) 2221ff5350a8SAndy Lutomirski { 2222ff5350a8SAndy Lutomirski if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2223ff5350a8SAndy Lutomirski /* 2224ff5350a8SAndy Lutomirski * Several Samsung devices seem to drop off the PCIe bus 2225ff5350a8SAndy Lutomirski * randomly when APST is on and uses the deepest sleep state. 2226ff5350a8SAndy Lutomirski * This has been observed on a Samsung "SM951 NVMe SAMSUNG 2227ff5350a8SAndy Lutomirski * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 2228ff5350a8SAndy Lutomirski * 950 PRO 256GB", but it seems to be restricted to two Dell 2229ff5350a8SAndy Lutomirski * laptops. 2230ff5350a8SAndy Lutomirski */ 2231ff5350a8SAndy Lutomirski if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 2232ff5350a8SAndy Lutomirski (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2233ff5350a8SAndy Lutomirski dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2234ff5350a8SAndy Lutomirski return NVME_QUIRK_NO_DEEPEST_PS; 2235ff5350a8SAndy Lutomirski } 2236ff5350a8SAndy Lutomirski 2237ff5350a8SAndy Lutomirski return 0; 2238ff5350a8SAndy Lutomirski } 2239ff5350a8SAndy Lutomirski 224057dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 224157dacad5SJay Sternberg { 224257dacad5SJay Sternberg int node, result = -ENOMEM; 224357dacad5SJay Sternberg struct nvme_dev *dev; 2244ff5350a8SAndy Lutomirski unsigned long quirks = id->driver_data; 224557dacad5SJay Sternberg 224657dacad5SJay Sternberg node = dev_to_node(&pdev->dev); 224757dacad5SJay Sternberg if (node == NUMA_NO_NODE) 22482fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 224957dacad5SJay Sternberg 225057dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 225157dacad5SJay Sternberg if (!dev) 225257dacad5SJay Sternberg return -ENOMEM; 225357dacad5SJay Sternberg dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *), 225457dacad5SJay Sternberg GFP_KERNEL, node); 225557dacad5SJay Sternberg if (!dev->queues) 225657dacad5SJay Sternberg goto free; 225757dacad5SJay Sternberg 225857dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 225957dacad5SJay Sternberg pci_set_drvdata(pdev, dev); 226057dacad5SJay Sternberg 2261b00a726aSKeith Busch result = nvme_dev_map(dev); 2262b00a726aSKeith Busch if (result) 2263b00a726aSKeith Busch goto free; 2264b00a726aSKeith Busch 2265d86c4d8eSChristoph Hellwig INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 22665c8809e6SChristoph Hellwig INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 226777bf25eaSKeith Busch mutex_init(&dev->shutdown_lock); 2268db3cbfffSKeith Busch init_completion(&dev->ioq_wait); 2269f3ca80fcSChristoph Hellwig 2270f3ca80fcSChristoph Hellwig result = nvme_setup_prp_pools(dev); 2271f3ca80fcSChristoph Hellwig if (result) 2272f3ca80fcSChristoph Hellwig goto put_pci; 2273f3ca80fcSChristoph Hellwig 2274ff5350a8SAndy Lutomirski quirks |= check_dell_samsung_bug(pdev); 2275ff5350a8SAndy Lutomirski 2276f3ca80fcSChristoph Hellwig result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2277ff5350a8SAndy Lutomirski quirks); 2278f3ca80fcSChristoph Hellwig if (result) 2279f3ca80fcSChristoph Hellwig goto release_pools; 2280f3ca80fcSChristoph Hellwig 228182b057caSRakesh Pandit nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING); 22821b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 22831b3c47c1SSagi Grimberg 2284d86c4d8eSChristoph Hellwig queue_work(nvme_wq, &dev->ctrl.reset_work); 228557dacad5SJay Sternberg return 0; 228657dacad5SJay Sternberg 228757dacad5SJay Sternberg release_pools: 228857dacad5SJay Sternberg nvme_release_prp_pools(dev); 228957dacad5SJay Sternberg put_pci: 229057dacad5SJay Sternberg put_device(dev->dev); 2291b00a726aSKeith Busch nvme_dev_unmap(dev); 229257dacad5SJay Sternberg free: 229357dacad5SJay Sternberg kfree(dev->queues); 229457dacad5SJay Sternberg kfree(dev); 229557dacad5SJay Sternberg return result; 229657dacad5SJay Sternberg } 229757dacad5SJay Sternberg 229857dacad5SJay Sternberg static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) 229957dacad5SJay Sternberg { 230057dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 230157dacad5SJay Sternberg 230257dacad5SJay Sternberg if (prepare) 2303a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 230457dacad5SJay Sternberg else 2305d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 230657dacad5SJay Sternberg } 230757dacad5SJay Sternberg 230857dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 230957dacad5SJay Sternberg { 231057dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 2311a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 231257dacad5SJay Sternberg } 231357dacad5SJay Sternberg 2314f58944e2SKeith Busch /* 2315f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 2316f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 2317f58944e2SKeith Busch * order to proceed. 2318f58944e2SKeith Busch */ 231957dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 232057dacad5SJay Sternberg { 232157dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 232257dacad5SJay Sternberg 2323bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2324bb8d261eSChristoph Hellwig 2325d86c4d8eSChristoph Hellwig cancel_work_sync(&dev->ctrl.reset_work); 232657dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 23270ff9d4e1SKeith Busch 23286db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 23290ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 23306db28edaSKeith Busch nvme_dev_disable(dev, false); 23316db28edaSKeith Busch } 23320ff9d4e1SKeith Busch 2333d86c4d8eSChristoph Hellwig flush_work(&dev->ctrl.reset_work); 2334d09f2b45SSagi Grimberg nvme_stop_ctrl(&dev->ctrl); 2335d09f2b45SSagi Grimberg nvme_remove_namespaces(&dev->ctrl); 2336a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 233787ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 233857dacad5SJay Sternberg nvme_dev_remove_admin(dev); 233957dacad5SJay Sternberg nvme_free_queues(dev, 0); 2340d09f2b45SSagi Grimberg nvme_uninit_ctrl(&dev->ctrl); 234157dacad5SJay Sternberg nvme_release_prp_pools(dev); 2342b00a726aSKeith Busch nvme_dev_unmap(dev); 23431673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 234457dacad5SJay Sternberg } 234557dacad5SJay Sternberg 234613880f5bSKeith Busch static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs) 234713880f5bSKeith Busch { 234813880f5bSKeith Busch int ret = 0; 234913880f5bSKeith Busch 235013880f5bSKeith Busch if (numvfs == 0) { 235113880f5bSKeith Busch if (pci_vfs_assigned(pdev)) { 235213880f5bSKeith Busch dev_warn(&pdev->dev, 235313880f5bSKeith Busch "Cannot disable SR-IOV VFs while assigned\n"); 235413880f5bSKeith Busch return -EPERM; 235513880f5bSKeith Busch } 235613880f5bSKeith Busch pci_disable_sriov(pdev); 235713880f5bSKeith Busch return 0; 235813880f5bSKeith Busch } 235913880f5bSKeith Busch 236013880f5bSKeith Busch ret = pci_enable_sriov(pdev, numvfs); 236113880f5bSKeith Busch return ret ? ret : numvfs; 236213880f5bSKeith Busch } 236313880f5bSKeith Busch 236457dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 236557dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 236657dacad5SJay Sternberg { 236757dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 236857dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 236957dacad5SJay Sternberg 2370a5cdb68cSKeith Busch nvme_dev_disable(ndev, true); 237157dacad5SJay Sternberg return 0; 237257dacad5SJay Sternberg } 237357dacad5SJay Sternberg 237457dacad5SJay Sternberg static int nvme_resume(struct device *dev) 237557dacad5SJay Sternberg { 237657dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 237757dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 237857dacad5SJay Sternberg 2379d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&ndev->ctrl); 238057dacad5SJay Sternberg return 0; 238157dacad5SJay Sternberg } 238257dacad5SJay Sternberg #endif 238357dacad5SJay Sternberg 238457dacad5SJay Sternberg static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 238557dacad5SJay Sternberg 2386a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 2387a0a3408eSKeith Busch pci_channel_state_t state) 2388a0a3408eSKeith Busch { 2389a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 2390a0a3408eSKeith Busch 2391a0a3408eSKeith Busch /* 2392a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 2393a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 2394a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 2395a0a3408eSKeith Busch */ 2396a0a3408eSKeith Busch switch (state) { 2397a0a3408eSKeith Busch case pci_channel_io_normal: 2398a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 2399a0a3408eSKeith Busch case pci_channel_io_frozen: 2400d011fb31SKeith Busch dev_warn(dev->ctrl.device, 2401d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 2402a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2403a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 2404a0a3408eSKeith Busch case pci_channel_io_perm_failure: 2405d011fb31SKeith Busch dev_warn(dev->ctrl.device, 2406d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 2407a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 2408a0a3408eSKeith Busch } 2409a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 2410a0a3408eSKeith Busch } 2411a0a3408eSKeith Busch 2412a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 2413a0a3408eSKeith Busch { 2414a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 2415a0a3408eSKeith Busch 24161b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 2417a0a3408eSKeith Busch pci_restore_state(pdev); 2418d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 2419a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 2420a0a3408eSKeith Busch } 2421a0a3408eSKeith Busch 2422a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 2423a0a3408eSKeith Busch { 2424a0a3408eSKeith Busch pci_cleanup_aer_uncorrect_error_status(pdev); 2425a0a3408eSKeith Busch } 2426a0a3408eSKeith Busch 242757dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 242857dacad5SJay Sternberg .error_detected = nvme_error_detected, 242957dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 243057dacad5SJay Sternberg .resume = nvme_error_resume, 243157dacad5SJay Sternberg .reset_notify = nvme_reset_notify, 243257dacad5SJay Sternberg }; 243357dacad5SJay Sternberg 243457dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 2435106198edSChristoph Hellwig { PCI_VDEVICE(INTEL, 0x0953), 243608095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2437e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 243899466e70SKeith Busch { PCI_VDEVICE(INTEL, 0x0a53), 243999466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2440e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 244199466e70SKeith Busch { PCI_VDEVICE(INTEL, 0x0a54), 244299466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2443e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 244450af47d0SAndy Lutomirski { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 244550af47d0SAndy Lutomirski .driver_data = NVME_QUIRK_NO_DEEPEST_PS }, 2446540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2447540c801cSKeith Busch .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 244854adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 244954adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2450015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 2451015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2452d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 2453d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2454d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 2455d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 245657dacad5SJay Sternberg { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2457c74dc780SStephan Günther { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 2458124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 245957dacad5SJay Sternberg { 0, } 246057dacad5SJay Sternberg }; 246157dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 246257dacad5SJay Sternberg 246357dacad5SJay Sternberg static struct pci_driver nvme_driver = { 246457dacad5SJay Sternberg .name = "nvme", 246557dacad5SJay Sternberg .id_table = nvme_id_table, 246657dacad5SJay Sternberg .probe = nvme_probe, 246757dacad5SJay Sternberg .remove = nvme_remove, 246857dacad5SJay Sternberg .shutdown = nvme_shutdown, 246957dacad5SJay Sternberg .driver = { 247057dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 247157dacad5SJay Sternberg }, 247213880f5bSKeith Busch .sriov_configure = nvme_pci_sriov_configure, 247357dacad5SJay Sternberg .err_handler = &nvme_err_handler, 247457dacad5SJay Sternberg }; 247557dacad5SJay Sternberg 247657dacad5SJay Sternberg static int __init nvme_init(void) 247757dacad5SJay Sternberg { 24789a6327d2SSagi Grimberg return pci_register_driver(&nvme_driver); 247957dacad5SJay Sternberg } 248057dacad5SJay Sternberg 248157dacad5SJay Sternberg static void __exit nvme_exit(void) 248257dacad5SJay Sternberg { 248357dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 248457dacad5SJay Sternberg _nvme_check_size(); 248557dacad5SJay Sternberg } 248657dacad5SJay Sternberg 248757dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 248857dacad5SJay Sternberg MODULE_LICENSE("GPL"); 248957dacad5SJay Sternberg MODULE_VERSION("1.0"); 249057dacad5SJay Sternberg module_init(nvme_init); 249157dacad5SJay Sternberg module_exit(nvme_exit); 2492