157dacad5SJay Sternberg /* 257dacad5SJay Sternberg * NVM Express device driver 357dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 457dacad5SJay Sternberg * 557dacad5SJay Sternberg * This program is free software; you can redistribute it and/or modify it 657dacad5SJay Sternberg * under the terms and conditions of the GNU General Public License, 757dacad5SJay Sternberg * version 2, as published by the Free Software Foundation. 857dacad5SJay Sternberg * 957dacad5SJay Sternberg * This program is distributed in the hope it will be useful, but WITHOUT 1057dacad5SJay Sternberg * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1157dacad5SJay Sternberg * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 1257dacad5SJay Sternberg * more details. 1357dacad5SJay Sternberg */ 1457dacad5SJay Sternberg 15a0a3408eSKeith Busch #include <linux/aer.h> 1657dacad5SJay Sternberg #include <linux/bitops.h> 1757dacad5SJay Sternberg #include <linux/blkdev.h> 1857dacad5SJay Sternberg #include <linux/blk-mq.h> 19dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 2057dacad5SJay Sternberg #include <linux/cpu.h> 2157dacad5SJay Sternberg #include <linux/delay.h> 2257dacad5SJay Sternberg #include <linux/errno.h> 2357dacad5SJay Sternberg #include <linux/fs.h> 2457dacad5SJay Sternberg #include <linux/genhd.h> 2557dacad5SJay Sternberg #include <linux/hdreg.h> 2657dacad5SJay Sternberg #include <linux/idr.h> 2757dacad5SJay Sternberg #include <linux/init.h> 2857dacad5SJay Sternberg #include <linux/interrupt.h> 2957dacad5SJay Sternberg #include <linux/io.h> 3057dacad5SJay Sternberg #include <linux/kdev_t.h> 3157dacad5SJay Sternberg #include <linux/kernel.h> 3257dacad5SJay Sternberg #include <linux/mm.h> 3357dacad5SJay Sternberg #include <linux/module.h> 3457dacad5SJay Sternberg #include <linux/moduleparam.h> 3577bf25eaSKeith Busch #include <linux/mutex.h> 3657dacad5SJay Sternberg #include <linux/pci.h> 3757dacad5SJay Sternberg #include <linux/poison.h> 3857dacad5SJay Sternberg #include <linux/ptrace.h> 3957dacad5SJay Sternberg #include <linux/sched.h> 4057dacad5SJay Sternberg #include <linux/slab.h> 4157dacad5SJay Sternberg #include <linux/t10-pi.h> 422d55cd5fSChristoph Hellwig #include <linux/timer.h> 4357dacad5SJay Sternberg #include <linux/types.h> 449cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 451d277a63SKeith Busch #include <asm/unaligned.h> 46a98e58e5SScott Bauer #include <linux/sed-opal.h> 4757dacad5SJay Sternberg 4857dacad5SJay Sternberg #include "nvme.h" 4957dacad5SJay Sternberg 5057dacad5SJay Sternberg #define NVME_Q_DEPTH 1024 5157dacad5SJay Sternberg #define NVME_AQ_DEPTH 256 5257dacad5SJay Sternberg #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 5357dacad5SJay Sternberg #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 5457dacad5SJay Sternberg 55adf68f21SChristoph Hellwig /* 56adf68f21SChristoph Hellwig * We handle AEN commands ourselves and don't even let the 57adf68f21SChristoph Hellwig * block layer know about them. 58adf68f21SChristoph Hellwig */ 59f866fc42SChristoph Hellwig #define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AERS) 60adf68f21SChristoph Hellwig 6157dacad5SJay Sternberg static int use_threaded_interrupts; 6257dacad5SJay Sternberg module_param(use_threaded_interrupts, int, 0); 6357dacad5SJay Sternberg 6457dacad5SJay Sternberg static bool use_cmb_sqes = true; 6557dacad5SJay Sternberg module_param(use_cmb_sqes, bool, 0644); 6657dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 6757dacad5SJay Sternberg 6857dacad5SJay Sternberg static struct workqueue_struct *nvme_workq; 6957dacad5SJay Sternberg 701c63dc66SChristoph Hellwig struct nvme_dev; 711c63dc66SChristoph Hellwig struct nvme_queue; 7257dacad5SJay Sternberg 7357dacad5SJay Sternberg static int nvme_reset(struct nvme_dev *dev); 74a0fa9647SJens Axboe static void nvme_process_cq(struct nvme_queue *nvmeq); 75a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 7657dacad5SJay Sternberg 7757dacad5SJay Sternberg /* 781c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 791c63dc66SChristoph Hellwig */ 801c63dc66SChristoph Hellwig struct nvme_dev { 811c63dc66SChristoph Hellwig struct nvme_queue **queues; 821c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 831c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 841c63dc66SChristoph Hellwig u32 __iomem *dbs; 851c63dc66SChristoph Hellwig struct device *dev; 861c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 871c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 881c63dc66SChristoph Hellwig unsigned queue_count; 891c63dc66SChristoph Hellwig unsigned online_queues; 901c63dc66SChristoph Hellwig unsigned max_qid; 911c63dc66SChristoph Hellwig int q_depth; 921c63dc66SChristoph Hellwig u32 db_stride; 931c63dc66SChristoph Hellwig void __iomem *bar; 941c63dc66SChristoph Hellwig struct work_struct reset_work; 955c8809e6SChristoph Hellwig struct work_struct remove_work; 962d55cd5fSChristoph Hellwig struct timer_list watchdog_timer; 9777bf25eaSKeith Busch struct mutex shutdown_lock; 981c63dc66SChristoph Hellwig bool subsystem; 991c63dc66SChristoph Hellwig void __iomem *cmb; 1001c63dc66SChristoph Hellwig dma_addr_t cmb_dma_addr; 1011c63dc66SChristoph Hellwig u64 cmb_size; 1021c63dc66SChristoph Hellwig u32 cmbsz; 103202021c1SStephen Bates u32 cmbloc; 1041c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 105db3cbfffSKeith Busch struct completion ioq_wait; 106f9f38e33SHelen Koike u32 *dbbuf_dbs; 107f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 108f9f38e33SHelen Koike u32 *dbbuf_eis; 109f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 11057dacad5SJay Sternberg }; 11157dacad5SJay Sternberg 112f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 113f9f38e33SHelen Koike { 114f9f38e33SHelen Koike return qid * 2 * stride; 115f9f38e33SHelen Koike } 116f9f38e33SHelen Koike 117f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 118f9f38e33SHelen Koike { 119f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 120f9f38e33SHelen Koike } 121f9f38e33SHelen Koike 1221c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1231c63dc66SChristoph Hellwig { 1241c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1251c63dc66SChristoph Hellwig } 1261c63dc66SChristoph Hellwig 12757dacad5SJay Sternberg /* 12857dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 12957dacad5SJay Sternberg * commands and one for I/O commands). 13057dacad5SJay Sternberg */ 13157dacad5SJay Sternberg struct nvme_queue { 13257dacad5SJay Sternberg struct device *q_dmadev; 13357dacad5SJay Sternberg struct nvme_dev *dev; 13457dacad5SJay Sternberg char irqname[24]; /* nvme4294967295-65535\0 */ 13557dacad5SJay Sternberg spinlock_t q_lock; 13657dacad5SJay Sternberg struct nvme_command *sq_cmds; 13757dacad5SJay Sternberg struct nvme_command __iomem *sq_cmds_io; 13857dacad5SJay Sternberg volatile struct nvme_completion *cqes; 13957dacad5SJay Sternberg struct blk_mq_tags **tags; 14057dacad5SJay Sternberg dma_addr_t sq_dma_addr; 14157dacad5SJay Sternberg dma_addr_t cq_dma_addr; 14257dacad5SJay Sternberg u32 __iomem *q_db; 14357dacad5SJay Sternberg u16 q_depth; 14457dacad5SJay Sternberg s16 cq_vector; 14557dacad5SJay Sternberg u16 sq_tail; 14657dacad5SJay Sternberg u16 cq_head; 14757dacad5SJay Sternberg u16 qid; 14857dacad5SJay Sternberg u8 cq_phase; 14957dacad5SJay Sternberg u8 cqe_seen; 150f9f38e33SHelen Koike u32 *dbbuf_sq_db; 151f9f38e33SHelen Koike u32 *dbbuf_cq_db; 152f9f38e33SHelen Koike u32 *dbbuf_sq_ei; 153f9f38e33SHelen Koike u32 *dbbuf_cq_ei; 15457dacad5SJay Sternberg }; 15557dacad5SJay Sternberg 15657dacad5SJay Sternberg /* 15771bd150cSChristoph Hellwig * The nvme_iod describes the data in an I/O, including the list of PRP 15871bd150cSChristoph Hellwig * entries. You can't see it in this data structure because C doesn't let 159f4800d6dSChristoph Hellwig * me express that. Use nvme_init_iod to ensure there's enough space 16071bd150cSChristoph Hellwig * allocated to store the PRP list. 16171bd150cSChristoph Hellwig */ 16271bd150cSChristoph Hellwig struct nvme_iod { 163d49187e9SChristoph Hellwig struct nvme_request req; 164f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq; 165f4800d6dSChristoph Hellwig int aborted; 16671bd150cSChristoph Hellwig int npages; /* In the PRP list. 0 means small pool in use */ 16771bd150cSChristoph Hellwig int nents; /* Used in scatterlist */ 16871bd150cSChristoph Hellwig int length; /* Of data, in bytes */ 16971bd150cSChristoph Hellwig dma_addr_t first_dma; 170bf684057SChristoph Hellwig struct scatterlist meta_sg; /* metadata requires single contiguous buffer */ 171f4800d6dSChristoph Hellwig struct scatterlist *sg; 172f4800d6dSChristoph Hellwig struct scatterlist inline_sg[0]; 17357dacad5SJay Sternberg }; 17457dacad5SJay Sternberg 17557dacad5SJay Sternberg /* 17657dacad5SJay Sternberg * Check we didin't inadvertently grow the command struct 17757dacad5SJay Sternberg */ 17857dacad5SJay Sternberg static inline void _nvme_check_size(void) 17957dacad5SJay Sternberg { 18057dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 18157dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 18257dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 18357dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 18457dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 18557dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 18657dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 18757dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 18857dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 18957dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 19057dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 19157dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 192f9f38e33SHelen Koike BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 193f9f38e33SHelen Koike } 194f9f38e33SHelen Koike 195f9f38e33SHelen Koike static inline unsigned int nvme_dbbuf_size(u32 stride) 196f9f38e33SHelen Koike { 197f9f38e33SHelen Koike return ((num_possible_cpus() + 1) * 8 * stride); 198f9f38e33SHelen Koike } 199f9f38e33SHelen Koike 200f9f38e33SHelen Koike static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 201f9f38e33SHelen Koike { 202f9f38e33SHelen Koike unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 203f9f38e33SHelen Koike 204f9f38e33SHelen Koike if (dev->dbbuf_dbs) 205f9f38e33SHelen Koike return 0; 206f9f38e33SHelen Koike 207f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 208f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 209f9f38e33SHelen Koike GFP_KERNEL); 210f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 211f9f38e33SHelen Koike return -ENOMEM; 212f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 213f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 214f9f38e33SHelen Koike GFP_KERNEL); 215f9f38e33SHelen Koike if (!dev->dbbuf_eis) { 216f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 217f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 218f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 219f9f38e33SHelen Koike return -ENOMEM; 220f9f38e33SHelen Koike } 221f9f38e33SHelen Koike 222f9f38e33SHelen Koike return 0; 223f9f38e33SHelen Koike } 224f9f38e33SHelen Koike 225f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 226f9f38e33SHelen Koike { 227f9f38e33SHelen Koike unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 228f9f38e33SHelen Koike 229f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 230f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 231f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 232f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 233f9f38e33SHelen Koike } 234f9f38e33SHelen Koike if (dev->dbbuf_eis) { 235f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 236f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 237f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 238f9f38e33SHelen Koike } 239f9f38e33SHelen Koike } 240f9f38e33SHelen Koike 241f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 242f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 243f9f38e33SHelen Koike { 244f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 245f9f38e33SHelen Koike return; 246f9f38e33SHelen Koike 247f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 248f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 249f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 250f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 251f9f38e33SHelen Koike } 252f9f38e33SHelen Koike 253f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 254f9f38e33SHelen Koike { 255f9f38e33SHelen Koike struct nvme_command c; 256f9f38e33SHelen Koike 257f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 258f9f38e33SHelen Koike return; 259f9f38e33SHelen Koike 260f9f38e33SHelen Koike memset(&c, 0, sizeof(c)); 261f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 262f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 263f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 264f9f38e33SHelen Koike 265f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 266f9f38e33SHelen Koike dev_warn(dev->dev, "unable to set dbbuf\n"); 267f9f38e33SHelen Koike /* Free memory and continue on */ 268f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 269f9f38e33SHelen Koike } 270f9f38e33SHelen Koike } 271f9f38e33SHelen Koike 272f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 273f9f38e33SHelen Koike { 274f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 275f9f38e33SHelen Koike } 276f9f38e33SHelen Koike 277f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 278f9f38e33SHelen Koike static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 279f9f38e33SHelen Koike volatile u32 *dbbuf_ei) 280f9f38e33SHelen Koike { 281f9f38e33SHelen Koike if (dbbuf_db) { 282f9f38e33SHelen Koike u16 old_value; 283f9f38e33SHelen Koike 284f9f38e33SHelen Koike /* 285f9f38e33SHelen Koike * Ensure that the queue is written before updating 286f9f38e33SHelen Koike * the doorbell in memory 287f9f38e33SHelen Koike */ 288f9f38e33SHelen Koike wmb(); 289f9f38e33SHelen Koike 290f9f38e33SHelen Koike old_value = *dbbuf_db; 291f9f38e33SHelen Koike *dbbuf_db = value; 292f9f38e33SHelen Koike 293f9f38e33SHelen Koike if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 294f9f38e33SHelen Koike return false; 295f9f38e33SHelen Koike } 296f9f38e33SHelen Koike 297f9f38e33SHelen Koike return true; 29857dacad5SJay Sternberg } 29957dacad5SJay Sternberg 30057dacad5SJay Sternberg /* 30157dacad5SJay Sternberg * Max size of iod being embedded in the request payload 30257dacad5SJay Sternberg */ 30357dacad5SJay Sternberg #define NVME_INT_PAGES 2 3045fd4ce1bSChristoph Hellwig #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size) 30557dacad5SJay Sternberg 30657dacad5SJay Sternberg /* 30757dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 30857dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 30957dacad5SJay Sternberg * the I/O. 31057dacad5SJay Sternberg */ 31157dacad5SJay Sternberg static int nvme_npages(unsigned size, struct nvme_dev *dev) 31257dacad5SJay Sternberg { 3135fd4ce1bSChristoph Hellwig unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size, 3145fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 31557dacad5SJay Sternberg return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 31657dacad5SJay Sternberg } 31757dacad5SJay Sternberg 318f4800d6dSChristoph Hellwig static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev, 319f4800d6dSChristoph Hellwig unsigned int size, unsigned int nseg) 320f4800d6dSChristoph Hellwig { 321f4800d6dSChristoph Hellwig return sizeof(__le64 *) * nvme_npages(size, dev) + 322f4800d6dSChristoph Hellwig sizeof(struct scatterlist) * nseg; 323f4800d6dSChristoph Hellwig } 324f4800d6dSChristoph Hellwig 32557dacad5SJay Sternberg static unsigned int nvme_cmd_size(struct nvme_dev *dev) 32657dacad5SJay Sternberg { 327f4800d6dSChristoph Hellwig return sizeof(struct nvme_iod) + 328f4800d6dSChristoph Hellwig nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES); 32957dacad5SJay Sternberg } 33057dacad5SJay Sternberg 331dca51e78SChristoph Hellwig static int nvmeq_irq(struct nvme_queue *nvmeq) 332dca51e78SChristoph Hellwig { 333dca51e78SChristoph Hellwig return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector); 334dca51e78SChristoph Hellwig } 335dca51e78SChristoph Hellwig 33657dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 33757dacad5SJay Sternberg unsigned int hctx_idx) 33857dacad5SJay Sternberg { 33957dacad5SJay Sternberg struct nvme_dev *dev = data; 34057dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[0]; 34157dacad5SJay Sternberg 34257dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 34357dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 34457dacad5SJay Sternberg WARN_ON(nvmeq->tags); 34557dacad5SJay Sternberg 34657dacad5SJay Sternberg hctx->driver_data = nvmeq; 34757dacad5SJay Sternberg nvmeq->tags = &dev->admin_tagset.tags[0]; 34857dacad5SJay Sternberg return 0; 34957dacad5SJay Sternberg } 35057dacad5SJay Sternberg 35157dacad5SJay Sternberg static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 35257dacad5SJay Sternberg { 35357dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 35457dacad5SJay Sternberg 35557dacad5SJay Sternberg nvmeq->tags = NULL; 35657dacad5SJay Sternberg } 35757dacad5SJay Sternberg 35857dacad5SJay Sternberg static int nvme_admin_init_request(void *data, struct request *req, 35957dacad5SJay Sternberg unsigned int hctx_idx, unsigned int rq_idx, 36057dacad5SJay Sternberg unsigned int numa_node) 36157dacad5SJay Sternberg { 36257dacad5SJay Sternberg struct nvme_dev *dev = data; 363f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 36457dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[0]; 36557dacad5SJay Sternberg 36657dacad5SJay Sternberg BUG_ON(!nvmeq); 367f4800d6dSChristoph Hellwig iod->nvmeq = nvmeq; 36857dacad5SJay Sternberg return 0; 36957dacad5SJay Sternberg } 37057dacad5SJay Sternberg 37157dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 37257dacad5SJay Sternberg unsigned int hctx_idx) 37357dacad5SJay Sternberg { 37457dacad5SJay Sternberg struct nvme_dev *dev = data; 37557dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; 37657dacad5SJay Sternberg 37757dacad5SJay Sternberg if (!nvmeq->tags) 37857dacad5SJay Sternberg nvmeq->tags = &dev->tagset.tags[hctx_idx]; 37957dacad5SJay Sternberg 38057dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 38157dacad5SJay Sternberg hctx->driver_data = nvmeq; 38257dacad5SJay Sternberg return 0; 38357dacad5SJay Sternberg } 38457dacad5SJay Sternberg 38557dacad5SJay Sternberg static int nvme_init_request(void *data, struct request *req, 38657dacad5SJay Sternberg unsigned int hctx_idx, unsigned int rq_idx, 38757dacad5SJay Sternberg unsigned int numa_node) 38857dacad5SJay Sternberg { 38957dacad5SJay Sternberg struct nvme_dev *dev = data; 390f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 39157dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; 39257dacad5SJay Sternberg 39357dacad5SJay Sternberg BUG_ON(!nvmeq); 394f4800d6dSChristoph Hellwig iod->nvmeq = nvmeq; 39557dacad5SJay Sternberg return 0; 39657dacad5SJay Sternberg } 39757dacad5SJay Sternberg 398dca51e78SChristoph Hellwig static int nvme_pci_map_queues(struct blk_mq_tag_set *set) 399dca51e78SChristoph Hellwig { 400dca51e78SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 401dca51e78SChristoph Hellwig 402dca51e78SChristoph Hellwig return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev)); 403dca51e78SChristoph Hellwig } 404dca51e78SChristoph Hellwig 40557dacad5SJay Sternberg /** 406adf68f21SChristoph Hellwig * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 40757dacad5SJay Sternberg * @nvmeq: The queue to use 40857dacad5SJay Sternberg * @cmd: The command to send 40957dacad5SJay Sternberg * 41057dacad5SJay Sternberg * Safe to use from interrupt context 41157dacad5SJay Sternberg */ 41257dacad5SJay Sternberg static void __nvme_submit_cmd(struct nvme_queue *nvmeq, 41357dacad5SJay Sternberg struct nvme_command *cmd) 41457dacad5SJay Sternberg { 41557dacad5SJay Sternberg u16 tail = nvmeq->sq_tail; 41657dacad5SJay Sternberg 41757dacad5SJay Sternberg if (nvmeq->sq_cmds_io) 41857dacad5SJay Sternberg memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd)); 41957dacad5SJay Sternberg else 42057dacad5SJay Sternberg memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 42157dacad5SJay Sternberg 42257dacad5SJay Sternberg if (++tail == nvmeq->q_depth) 42357dacad5SJay Sternberg tail = 0; 424f9f38e33SHelen Koike if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db, 425f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei)) 42657dacad5SJay Sternberg writel(tail, nvmeq->q_db); 42757dacad5SJay Sternberg nvmeq->sq_tail = tail; 42857dacad5SJay Sternberg } 42957dacad5SJay Sternberg 430f4800d6dSChristoph Hellwig static __le64 **iod_list(struct request *req) 43157dacad5SJay Sternberg { 432f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 433f9d03f96SChristoph Hellwig return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); 43457dacad5SJay Sternberg } 43557dacad5SJay Sternberg 436b131c61dSChristoph Hellwig static int nvme_init_iod(struct request *rq, struct nvme_dev *dev) 43757dacad5SJay Sternberg { 438f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 439f9d03f96SChristoph Hellwig int nseg = blk_rq_nr_phys_segments(rq); 440b131c61dSChristoph Hellwig unsigned int size = blk_rq_payload_bytes(rq); 441f4800d6dSChristoph Hellwig 442f4800d6dSChristoph Hellwig if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 443f4800d6dSChristoph Hellwig iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); 444f4800d6dSChristoph Hellwig if (!iod->sg) 445f4800d6dSChristoph Hellwig return BLK_MQ_RQ_QUEUE_BUSY; 446f4800d6dSChristoph Hellwig } else { 447f4800d6dSChristoph Hellwig iod->sg = iod->inline_sg; 44857dacad5SJay Sternberg } 44957dacad5SJay Sternberg 450f4800d6dSChristoph Hellwig iod->aborted = 0; 45157dacad5SJay Sternberg iod->npages = -1; 45257dacad5SJay Sternberg iod->nents = 0; 453f4800d6dSChristoph Hellwig iod->length = size; 454f80ec966SKeith Busch 455bac0000aSOmar Sandoval return BLK_MQ_RQ_QUEUE_OK; 45657dacad5SJay Sternberg } 45757dacad5SJay Sternberg 458f4800d6dSChristoph Hellwig static void nvme_free_iod(struct nvme_dev *dev, struct request *req) 45957dacad5SJay Sternberg { 460f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 4615fd4ce1bSChristoph Hellwig const int last_prp = dev->ctrl.page_size / 8 - 1; 46257dacad5SJay Sternberg int i; 463f4800d6dSChristoph Hellwig __le64 **list = iod_list(req); 46457dacad5SJay Sternberg dma_addr_t prp_dma = iod->first_dma; 46557dacad5SJay Sternberg 46657dacad5SJay Sternberg if (iod->npages == 0) 46757dacad5SJay Sternberg dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 46857dacad5SJay Sternberg for (i = 0; i < iod->npages; i++) { 46957dacad5SJay Sternberg __le64 *prp_list = list[i]; 47057dacad5SJay Sternberg dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 47157dacad5SJay Sternberg dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 47257dacad5SJay Sternberg prp_dma = next_prp_dma; 47357dacad5SJay Sternberg } 47457dacad5SJay Sternberg 475f4800d6dSChristoph Hellwig if (iod->sg != iod->inline_sg) 476f4800d6dSChristoph Hellwig kfree(iod->sg); 47757dacad5SJay Sternberg } 47857dacad5SJay Sternberg 47957dacad5SJay Sternberg #ifdef CONFIG_BLK_DEV_INTEGRITY 48057dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) 48157dacad5SJay Sternberg { 48257dacad5SJay Sternberg if (be32_to_cpu(pi->ref_tag) == v) 48357dacad5SJay Sternberg pi->ref_tag = cpu_to_be32(p); 48457dacad5SJay Sternberg } 48557dacad5SJay Sternberg 48657dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) 48757dacad5SJay Sternberg { 48857dacad5SJay Sternberg if (be32_to_cpu(pi->ref_tag) == p) 48957dacad5SJay Sternberg pi->ref_tag = cpu_to_be32(v); 49057dacad5SJay Sternberg } 49157dacad5SJay Sternberg 49257dacad5SJay Sternberg /** 49357dacad5SJay Sternberg * nvme_dif_remap - remaps ref tags to bip seed and physical lba 49457dacad5SJay Sternberg * 49557dacad5SJay Sternberg * The virtual start sector is the one that was originally submitted by the 49657dacad5SJay Sternberg * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical 49757dacad5SJay Sternberg * start sector may be different. Remap protection information to match the 49857dacad5SJay Sternberg * physical LBA on writes, and back to the original seed on reads. 49957dacad5SJay Sternberg * 50057dacad5SJay Sternberg * Type 0 and 3 do not have a ref tag, so no remapping required. 50157dacad5SJay Sternberg */ 50257dacad5SJay Sternberg static void nvme_dif_remap(struct request *req, 50357dacad5SJay Sternberg void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) 50457dacad5SJay Sternberg { 50557dacad5SJay Sternberg struct nvme_ns *ns = req->rq_disk->private_data; 50657dacad5SJay Sternberg struct bio_integrity_payload *bip; 50757dacad5SJay Sternberg struct t10_pi_tuple *pi; 50857dacad5SJay Sternberg void *p, *pmap; 50957dacad5SJay Sternberg u32 i, nlb, ts, phys, virt; 51057dacad5SJay Sternberg 51157dacad5SJay Sternberg if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3) 51257dacad5SJay Sternberg return; 51357dacad5SJay Sternberg 51457dacad5SJay Sternberg bip = bio_integrity(req->bio); 51557dacad5SJay Sternberg if (!bip) 51657dacad5SJay Sternberg return; 51757dacad5SJay Sternberg 51857dacad5SJay Sternberg pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; 51957dacad5SJay Sternberg 52057dacad5SJay Sternberg p = pmap; 52157dacad5SJay Sternberg virt = bip_get_seed(bip); 52257dacad5SJay Sternberg phys = nvme_block_nr(ns, blk_rq_pos(req)); 52357dacad5SJay Sternberg nlb = (blk_rq_bytes(req) >> ns->lba_shift); 524ac6fc48cSDan Williams ts = ns->disk->queue->integrity.tuple_size; 52557dacad5SJay Sternberg 52657dacad5SJay Sternberg for (i = 0; i < nlb; i++, virt++, phys++) { 52757dacad5SJay Sternberg pi = (struct t10_pi_tuple *)p; 52857dacad5SJay Sternberg dif_swap(phys, virt, pi); 52957dacad5SJay Sternberg p += ts; 53057dacad5SJay Sternberg } 53157dacad5SJay Sternberg kunmap_atomic(pmap); 53257dacad5SJay Sternberg } 53357dacad5SJay Sternberg #else /* CONFIG_BLK_DEV_INTEGRITY */ 53457dacad5SJay Sternberg static void nvme_dif_remap(struct request *req, 53557dacad5SJay Sternberg void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) 53657dacad5SJay Sternberg { 53757dacad5SJay Sternberg } 53857dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) 53957dacad5SJay Sternberg { 54057dacad5SJay Sternberg } 54157dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) 54257dacad5SJay Sternberg { 54357dacad5SJay Sternberg } 54457dacad5SJay Sternberg #endif 54557dacad5SJay Sternberg 546b131c61dSChristoph Hellwig static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) 54757dacad5SJay Sternberg { 548f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 54957dacad5SJay Sternberg struct dma_pool *pool; 550b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 55157dacad5SJay Sternberg struct scatterlist *sg = iod->sg; 55257dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 55357dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 5545fd4ce1bSChristoph Hellwig u32 page_size = dev->ctrl.page_size; 55557dacad5SJay Sternberg int offset = dma_addr & (page_size - 1); 55657dacad5SJay Sternberg __le64 *prp_list; 557f4800d6dSChristoph Hellwig __le64 **list = iod_list(req); 55857dacad5SJay Sternberg dma_addr_t prp_dma; 55957dacad5SJay Sternberg int nprps, i; 56057dacad5SJay Sternberg 56157dacad5SJay Sternberg length -= (page_size - offset); 56257dacad5SJay Sternberg if (length <= 0) 56369d2b571SChristoph Hellwig return true; 56457dacad5SJay Sternberg 56557dacad5SJay Sternberg dma_len -= (page_size - offset); 56657dacad5SJay Sternberg if (dma_len) { 56757dacad5SJay Sternberg dma_addr += (page_size - offset); 56857dacad5SJay Sternberg } else { 56957dacad5SJay Sternberg sg = sg_next(sg); 57057dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 57157dacad5SJay Sternberg dma_len = sg_dma_len(sg); 57257dacad5SJay Sternberg } 57357dacad5SJay Sternberg 57457dacad5SJay Sternberg if (length <= page_size) { 57557dacad5SJay Sternberg iod->first_dma = dma_addr; 57669d2b571SChristoph Hellwig return true; 57757dacad5SJay Sternberg } 57857dacad5SJay Sternberg 57957dacad5SJay Sternberg nprps = DIV_ROUND_UP(length, page_size); 58057dacad5SJay Sternberg if (nprps <= (256 / 8)) { 58157dacad5SJay Sternberg pool = dev->prp_small_pool; 58257dacad5SJay Sternberg iod->npages = 0; 58357dacad5SJay Sternberg } else { 58457dacad5SJay Sternberg pool = dev->prp_page_pool; 58557dacad5SJay Sternberg iod->npages = 1; 58657dacad5SJay Sternberg } 58757dacad5SJay Sternberg 58869d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 58957dacad5SJay Sternberg if (!prp_list) { 59057dacad5SJay Sternberg iod->first_dma = dma_addr; 59157dacad5SJay Sternberg iod->npages = -1; 59269d2b571SChristoph Hellwig return false; 59357dacad5SJay Sternberg } 59457dacad5SJay Sternberg list[0] = prp_list; 59557dacad5SJay Sternberg iod->first_dma = prp_dma; 59657dacad5SJay Sternberg i = 0; 59757dacad5SJay Sternberg for (;;) { 59857dacad5SJay Sternberg if (i == page_size >> 3) { 59957dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 60069d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 60157dacad5SJay Sternberg if (!prp_list) 60269d2b571SChristoph Hellwig return false; 60357dacad5SJay Sternberg list[iod->npages++] = prp_list; 60457dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 60557dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 60657dacad5SJay Sternberg i = 1; 60757dacad5SJay Sternberg } 60857dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 60957dacad5SJay Sternberg dma_len -= page_size; 61057dacad5SJay Sternberg dma_addr += page_size; 61157dacad5SJay Sternberg length -= page_size; 61257dacad5SJay Sternberg if (length <= 0) 61357dacad5SJay Sternberg break; 61457dacad5SJay Sternberg if (dma_len > 0) 61557dacad5SJay Sternberg continue; 61657dacad5SJay Sternberg BUG_ON(dma_len < 0); 61757dacad5SJay Sternberg sg = sg_next(sg); 61857dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 61957dacad5SJay Sternberg dma_len = sg_dma_len(sg); 62057dacad5SJay Sternberg } 62157dacad5SJay Sternberg 62269d2b571SChristoph Hellwig return true; 62357dacad5SJay Sternberg } 62457dacad5SJay Sternberg 625f4800d6dSChristoph Hellwig static int nvme_map_data(struct nvme_dev *dev, struct request *req, 626b131c61dSChristoph Hellwig struct nvme_command *cmnd) 62757dacad5SJay Sternberg { 628f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 629ba1ca37eSChristoph Hellwig struct request_queue *q = req->q; 630ba1ca37eSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 631ba1ca37eSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 632ba1ca37eSChristoph Hellwig int ret = BLK_MQ_RQ_QUEUE_ERROR; 63357dacad5SJay Sternberg 634f9d03f96SChristoph Hellwig sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 635ba1ca37eSChristoph Hellwig iod->nents = blk_rq_map_sg(q, req, iod->sg); 636ba1ca37eSChristoph Hellwig if (!iod->nents) 637ba1ca37eSChristoph Hellwig goto out; 638ba1ca37eSChristoph Hellwig 639ba1ca37eSChristoph Hellwig ret = BLK_MQ_RQ_QUEUE_BUSY; 6402b6b535dSMauricio Faria de Oliveira if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, 6412b6b535dSMauricio Faria de Oliveira DMA_ATTR_NO_WARN)) 642ba1ca37eSChristoph Hellwig goto out; 643ba1ca37eSChristoph Hellwig 644b131c61dSChristoph Hellwig if (!nvme_setup_prps(dev, req)) 645ba1ca37eSChristoph Hellwig goto out_unmap; 646ba1ca37eSChristoph Hellwig 647ba1ca37eSChristoph Hellwig ret = BLK_MQ_RQ_QUEUE_ERROR; 648ba1ca37eSChristoph Hellwig if (blk_integrity_rq(req)) { 649ba1ca37eSChristoph Hellwig if (blk_rq_count_integrity_sg(q, req->bio) != 1) 650ba1ca37eSChristoph Hellwig goto out_unmap; 651ba1ca37eSChristoph Hellwig 652bf684057SChristoph Hellwig sg_init_table(&iod->meta_sg, 1); 653bf684057SChristoph Hellwig if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) 654ba1ca37eSChristoph Hellwig goto out_unmap; 655ba1ca37eSChristoph Hellwig 656ba1ca37eSChristoph Hellwig if (rq_data_dir(req)) 657ba1ca37eSChristoph Hellwig nvme_dif_remap(req, nvme_dif_prep); 658ba1ca37eSChristoph Hellwig 659bf684057SChristoph Hellwig if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) 660ba1ca37eSChristoph Hellwig goto out_unmap; 66157dacad5SJay Sternberg } 66257dacad5SJay Sternberg 663eb793e2cSChristoph Hellwig cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 664eb793e2cSChristoph Hellwig cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma); 665ba1ca37eSChristoph Hellwig if (blk_integrity_rq(req)) 666bf684057SChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); 667ba1ca37eSChristoph Hellwig return BLK_MQ_RQ_QUEUE_OK; 668ba1ca37eSChristoph Hellwig 669ba1ca37eSChristoph Hellwig out_unmap: 670ba1ca37eSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 671ba1ca37eSChristoph Hellwig out: 672ba1ca37eSChristoph Hellwig return ret; 67357dacad5SJay Sternberg } 67457dacad5SJay Sternberg 675f4800d6dSChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 676d4f6c3abSChristoph Hellwig { 677f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 678d4f6c3abSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 679d4f6c3abSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 680d4f6c3abSChristoph Hellwig 681d4f6c3abSChristoph Hellwig if (iod->nents) { 682d4f6c3abSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 683d4f6c3abSChristoph Hellwig if (blk_integrity_rq(req)) { 684d4f6c3abSChristoph Hellwig if (!rq_data_dir(req)) 685d4f6c3abSChristoph Hellwig nvme_dif_remap(req, nvme_dif_complete); 686bf684057SChristoph Hellwig dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); 687d4f6c3abSChristoph Hellwig } 688d4f6c3abSChristoph Hellwig } 689d4f6c3abSChristoph Hellwig 690f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 691f4800d6dSChristoph Hellwig nvme_free_iod(dev, req); 69257dacad5SJay Sternberg } 69357dacad5SJay Sternberg 69457dacad5SJay Sternberg /* 69557dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 69657dacad5SJay Sternberg */ 69757dacad5SJay Sternberg static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 69857dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 69957dacad5SJay Sternberg { 70057dacad5SJay Sternberg struct nvme_ns *ns = hctx->queue->queuedata; 70157dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 70257dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 70357dacad5SJay Sternberg struct request *req = bd->rq; 704ba1ca37eSChristoph Hellwig struct nvme_command cmnd; 705ba1ca37eSChristoph Hellwig int ret = BLK_MQ_RQ_QUEUE_OK; 70657dacad5SJay Sternberg 70757dacad5SJay Sternberg /* 70857dacad5SJay Sternberg * If formated with metadata, require the block layer provide a buffer 70957dacad5SJay Sternberg * unless this namespace is formated such that the metadata can be 71057dacad5SJay Sternberg * stripped/generated by the controller with PRACT=1. 71157dacad5SJay Sternberg */ 71257dacad5SJay Sternberg if (ns && ns->ms && !blk_integrity_rq(req)) { 71357dacad5SJay Sternberg if (!(ns->pi_type && ns->ms == 8) && 71457292b58SChristoph Hellwig !blk_rq_is_passthrough(req)) { 715eee417b0SChristoph Hellwig blk_mq_end_request(req, -EFAULT); 71657dacad5SJay Sternberg return BLK_MQ_RQ_QUEUE_OK; 71757dacad5SJay Sternberg } 71857dacad5SJay Sternberg } 71957dacad5SJay Sternberg 720f9d03f96SChristoph Hellwig ret = nvme_setup_cmd(ns, req, &cmnd); 721bac0000aSOmar Sandoval if (ret != BLK_MQ_RQ_QUEUE_OK) 722f4800d6dSChristoph Hellwig return ret; 72357dacad5SJay Sternberg 724b131c61dSChristoph Hellwig ret = nvme_init_iod(req, dev); 725bac0000aSOmar Sandoval if (ret != BLK_MQ_RQ_QUEUE_OK) 726f9d03f96SChristoph Hellwig goto out_free_cmd; 72757dacad5SJay Sternberg 728f9d03f96SChristoph Hellwig if (blk_rq_nr_phys_segments(req)) 729b131c61dSChristoph Hellwig ret = nvme_map_data(dev, req, &cmnd); 730ba1ca37eSChristoph Hellwig 731bac0000aSOmar Sandoval if (ret != BLK_MQ_RQ_QUEUE_OK) 732f9d03f96SChristoph Hellwig goto out_cleanup_iod; 733ba1ca37eSChristoph Hellwig 734aae239e1SChristoph Hellwig blk_mq_start_request(req); 735ba1ca37eSChristoph Hellwig 736ba1ca37eSChristoph Hellwig spin_lock_irq(&nvmeq->q_lock); 737ae1fba20SKeith Busch if (unlikely(nvmeq->cq_vector < 0)) { 73869d9a99cSKeith Busch ret = BLK_MQ_RQ_QUEUE_ERROR; 739ae1fba20SKeith Busch spin_unlock_irq(&nvmeq->q_lock); 740f9d03f96SChristoph Hellwig goto out_cleanup_iod; 741ae1fba20SKeith Busch } 742ba1ca37eSChristoph Hellwig __nvme_submit_cmd(nvmeq, &cmnd); 74357dacad5SJay Sternberg nvme_process_cq(nvmeq); 74457dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 74557dacad5SJay Sternberg return BLK_MQ_RQ_QUEUE_OK; 746f9d03f96SChristoph Hellwig out_cleanup_iod: 747f4800d6dSChristoph Hellwig nvme_free_iod(dev, req); 748f9d03f96SChristoph Hellwig out_free_cmd: 749f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 750ba1ca37eSChristoph Hellwig return ret; 75157dacad5SJay Sternberg } 75257dacad5SJay Sternberg 75377f02a7aSChristoph Hellwig static void nvme_pci_complete_rq(struct request *req) 754eee417b0SChristoph Hellwig { 755f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 756eee417b0SChristoph Hellwig 75777f02a7aSChristoph Hellwig nvme_unmap_data(iod->nvmeq->dev, req); 75877f02a7aSChristoph Hellwig nvme_complete_rq(req); 75957dacad5SJay Sternberg } 76057dacad5SJay Sternberg 761d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 762d783e0bdSMarta Rybczynska static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head, 763d783e0bdSMarta Rybczynska u16 phase) 764d783e0bdSMarta Rybczynska { 765d783e0bdSMarta Rybczynska return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase; 766d783e0bdSMarta Rybczynska } 767d783e0bdSMarta Rybczynska 768a0fa9647SJens Axboe static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) 76957dacad5SJay Sternberg { 77057dacad5SJay Sternberg u16 head, phase; 77157dacad5SJay Sternberg 77257dacad5SJay Sternberg head = nvmeq->cq_head; 77357dacad5SJay Sternberg phase = nvmeq->cq_phase; 77457dacad5SJay Sternberg 775d783e0bdSMarta Rybczynska while (nvme_cqe_valid(nvmeq, head, phase)) { 77657dacad5SJay Sternberg struct nvme_completion cqe = nvmeq->cqes[head]; 777eee417b0SChristoph Hellwig struct request *req; 778adf68f21SChristoph Hellwig 77957dacad5SJay Sternberg if (++head == nvmeq->q_depth) { 78057dacad5SJay Sternberg head = 0; 78157dacad5SJay Sternberg phase = !phase; 78257dacad5SJay Sternberg } 783adf68f21SChristoph Hellwig 784a0fa9647SJens Axboe if (tag && *tag == cqe.command_id) 785a0fa9647SJens Axboe *tag = -1; 786adf68f21SChristoph Hellwig 787aae239e1SChristoph Hellwig if (unlikely(cqe.command_id >= nvmeq->q_depth)) { 7881b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 789aae239e1SChristoph Hellwig "invalid id %d completed on queue %d\n", 790aae239e1SChristoph Hellwig cqe.command_id, le16_to_cpu(cqe.sq_id)); 791aae239e1SChristoph Hellwig continue; 792aae239e1SChristoph Hellwig } 793aae239e1SChristoph Hellwig 794adf68f21SChristoph Hellwig /* 795adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 796adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 797adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 798adf68f21SChristoph Hellwig * for them but rather special case them here. 799adf68f21SChristoph Hellwig */ 800adf68f21SChristoph Hellwig if (unlikely(nvmeq->qid == 0 && 801adf68f21SChristoph Hellwig cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) { 8027bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 8037bf58533SChristoph Hellwig cqe.status, &cqe.result); 804adf68f21SChristoph Hellwig continue; 805adf68f21SChristoph Hellwig } 806adf68f21SChristoph Hellwig 807eee417b0SChristoph Hellwig req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); 80827fa9bc5SChristoph Hellwig nvme_end_request(req, cqe.status, cqe.result); 80957dacad5SJay Sternberg } 81057dacad5SJay Sternberg 81157dacad5SJay Sternberg if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 812a0fa9647SJens Axboe return; 81357dacad5SJay Sternberg 814604e8c8dSKeith Busch if (likely(nvmeq->cq_vector >= 0)) 815f9f38e33SHelen Koike if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 816f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei)) 81757dacad5SJay Sternberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 81857dacad5SJay Sternberg nvmeq->cq_head = head; 81957dacad5SJay Sternberg nvmeq->cq_phase = phase; 82057dacad5SJay Sternberg 82157dacad5SJay Sternberg nvmeq->cqe_seen = 1; 822a0fa9647SJens Axboe } 823a0fa9647SJens Axboe 824a0fa9647SJens Axboe static void nvme_process_cq(struct nvme_queue *nvmeq) 825a0fa9647SJens Axboe { 826a0fa9647SJens Axboe __nvme_process_cq(nvmeq, NULL); 82757dacad5SJay Sternberg } 82857dacad5SJay Sternberg 82957dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 83057dacad5SJay Sternberg { 83157dacad5SJay Sternberg irqreturn_t result; 83257dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 83357dacad5SJay Sternberg spin_lock(&nvmeq->q_lock); 83457dacad5SJay Sternberg nvme_process_cq(nvmeq); 83557dacad5SJay Sternberg result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE; 83657dacad5SJay Sternberg nvmeq->cqe_seen = 0; 83757dacad5SJay Sternberg spin_unlock(&nvmeq->q_lock); 83857dacad5SJay Sternberg return result; 83957dacad5SJay Sternberg } 84057dacad5SJay Sternberg 84157dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 84257dacad5SJay Sternberg { 84357dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 844d783e0bdSMarta Rybczynska if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) 84557dacad5SJay Sternberg return IRQ_WAKE_THREAD; 846d783e0bdSMarta Rybczynska return IRQ_NONE; 84757dacad5SJay Sternberg } 84857dacad5SJay Sternberg 849a0fa9647SJens Axboe static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) 850a0fa9647SJens Axboe { 851a0fa9647SJens Axboe struct nvme_queue *nvmeq = hctx->driver_data; 852a0fa9647SJens Axboe 853d783e0bdSMarta Rybczynska if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) { 854a0fa9647SJens Axboe spin_lock_irq(&nvmeq->q_lock); 855a0fa9647SJens Axboe __nvme_process_cq(nvmeq, &tag); 856a0fa9647SJens Axboe spin_unlock_irq(&nvmeq->q_lock); 857a0fa9647SJens Axboe 858a0fa9647SJens Axboe if (tag == -1) 859a0fa9647SJens Axboe return 1; 860a0fa9647SJens Axboe } 861a0fa9647SJens Axboe 862a0fa9647SJens Axboe return 0; 863a0fa9647SJens Axboe } 864a0fa9647SJens Axboe 865f866fc42SChristoph Hellwig static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) 86657dacad5SJay Sternberg { 867f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 8689396dec9SChristoph Hellwig struct nvme_queue *nvmeq = dev->queues[0]; 86957dacad5SJay Sternberg struct nvme_command c; 87057dacad5SJay Sternberg 87157dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 87257dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 873f866fc42SChristoph Hellwig c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx; 87457dacad5SJay Sternberg 8759396dec9SChristoph Hellwig spin_lock_irq(&nvmeq->q_lock); 8769396dec9SChristoph Hellwig __nvme_submit_cmd(nvmeq, &c); 8779396dec9SChristoph Hellwig spin_unlock_irq(&nvmeq->q_lock); 87857dacad5SJay Sternberg } 87957dacad5SJay Sternberg 88057dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 88157dacad5SJay Sternberg { 88257dacad5SJay Sternberg struct nvme_command c; 88357dacad5SJay Sternberg 88457dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 88557dacad5SJay Sternberg c.delete_queue.opcode = opcode; 88657dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 88757dacad5SJay Sternberg 8881c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 88957dacad5SJay Sternberg } 89057dacad5SJay Sternberg 89157dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 89257dacad5SJay Sternberg struct nvme_queue *nvmeq) 89357dacad5SJay Sternberg { 89457dacad5SJay Sternberg struct nvme_command c; 89557dacad5SJay Sternberg int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 89657dacad5SJay Sternberg 89757dacad5SJay Sternberg /* 89857dacad5SJay Sternberg * Note: we (ab)use the fact the the prp fields survive if no data 89957dacad5SJay Sternberg * is attached to the request. 90057dacad5SJay Sternberg */ 90157dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 90257dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 90357dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 90457dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 90557dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 90657dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 90757dacad5SJay Sternberg c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 90857dacad5SJay Sternberg 9091c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 91057dacad5SJay Sternberg } 91157dacad5SJay Sternberg 91257dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 91357dacad5SJay Sternberg struct nvme_queue *nvmeq) 91457dacad5SJay Sternberg { 91557dacad5SJay Sternberg struct nvme_command c; 91681c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 91757dacad5SJay Sternberg 91857dacad5SJay Sternberg /* 91957dacad5SJay Sternberg * Note: we (ab)use the fact the the prp fields survive if no data 92057dacad5SJay Sternberg * is attached to the request. 92157dacad5SJay Sternberg */ 92257dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 92357dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 92457dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 92557dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 92657dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 92757dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 92857dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 92957dacad5SJay Sternberg 9301c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 93157dacad5SJay Sternberg } 93257dacad5SJay Sternberg 93357dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 93457dacad5SJay Sternberg { 93557dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 93657dacad5SJay Sternberg } 93757dacad5SJay Sternberg 93857dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 93957dacad5SJay Sternberg { 94057dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 94157dacad5SJay Sternberg } 94257dacad5SJay Sternberg 943e7a2a87dSChristoph Hellwig static void abort_endio(struct request *req, int error) 94457dacad5SJay Sternberg { 945f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 946f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 94757dacad5SJay Sternberg 94827fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 94927fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 950e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 951e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 95257dacad5SJay Sternberg } 95357dacad5SJay Sternberg 95431c7c7d2SChristoph Hellwig static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 95557dacad5SJay Sternberg { 956f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 957f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 95857dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 95957dacad5SJay Sternberg struct request *abort_req; 96057dacad5SJay Sternberg struct nvme_command cmd; 96157dacad5SJay Sternberg 96231c7c7d2SChristoph Hellwig /* 963fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 964fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 965fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 966fd634f41SChristoph Hellwig * shutdown, so we return BLK_EH_HANDLED. 967fd634f41SChristoph Hellwig */ 968bb8d261eSChristoph Hellwig if (dev->ctrl.state == NVME_CTRL_RESETTING) { 9691b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 970fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 971fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 972a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 97327fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 974fd634f41SChristoph Hellwig return BLK_EH_HANDLED; 975fd634f41SChristoph Hellwig } 976fd634f41SChristoph Hellwig 977fd634f41SChristoph Hellwig /* 978e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 979e1569a16SKeith Busch * command was already aborted once before and still hasn't been 980e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 98131c7c7d2SChristoph Hellwig */ 982f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 9831b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 98457dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 98557dacad5SJay Sternberg req->tag, nvmeq->qid); 986a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 987c5f6ce97SKeith Busch nvme_reset(dev); 988e1569a16SKeith Busch 989e1569a16SKeith Busch /* 990e1569a16SKeith Busch * Mark the request as handled, since the inline shutdown 991e1569a16SKeith Busch * forces all outstanding requests to complete. 992e1569a16SKeith Busch */ 99327fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 994e1569a16SKeith Busch return BLK_EH_HANDLED; 99557dacad5SJay Sternberg } 99657dacad5SJay Sternberg 997e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 998e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 999e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1000e7a2a87dSChristoph Hellwig } 10017bf7d778SKeith Busch iod->aborted = 1; 100257dacad5SJay Sternberg 100357dacad5SJay Sternberg memset(&cmd, 0, sizeof(cmd)); 100457dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 100557dacad5SJay Sternberg cmd.abort.cid = req->tag; 100657dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 100757dacad5SJay Sternberg 10081b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 10091b3c47c1SSagi Grimberg "I/O %d QID %d timeout, aborting\n", 101057dacad5SJay Sternberg req->tag, nvmeq->qid); 1011e7a2a87dSChristoph Hellwig 1012e7a2a87dSChristoph Hellwig abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, 1013eb71f435SChristoph Hellwig BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 10146bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 10156bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 101631c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 101757dacad5SJay Sternberg } 101857dacad5SJay Sternberg 1019e7a2a87dSChristoph Hellwig abort_req->timeout = ADMIN_TIMEOUT; 1020e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 1021e7a2a87dSChristoph Hellwig blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); 102257dacad5SJay Sternberg 102357dacad5SJay Sternberg /* 102457dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 102557dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 102657dacad5SJay Sternberg * as the device then is in a faulty state. 102757dacad5SJay Sternberg */ 102857dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 102957dacad5SJay Sternberg } 103057dacad5SJay Sternberg 103157dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 103257dacad5SJay Sternberg { 103357dacad5SJay Sternberg dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 103457dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 103557dacad5SJay Sternberg if (nvmeq->sq_cmds) 103657dacad5SJay Sternberg dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 103757dacad5SJay Sternberg nvmeq->sq_cmds, nvmeq->sq_dma_addr); 103857dacad5SJay Sternberg kfree(nvmeq); 103957dacad5SJay Sternberg } 104057dacad5SJay Sternberg 104157dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 104257dacad5SJay Sternberg { 104357dacad5SJay Sternberg int i; 104457dacad5SJay Sternberg 104557dacad5SJay Sternberg for (i = dev->queue_count - 1; i >= lowest; i--) { 104657dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[i]; 104757dacad5SJay Sternberg dev->queue_count--; 104857dacad5SJay Sternberg dev->queues[i] = NULL; 104957dacad5SJay Sternberg nvme_free_queue(nvmeq); 105057dacad5SJay Sternberg } 105157dacad5SJay Sternberg } 105257dacad5SJay Sternberg 105357dacad5SJay Sternberg /** 105457dacad5SJay Sternberg * nvme_suspend_queue - put queue into suspended state 105557dacad5SJay Sternberg * @nvmeq - queue to suspend 105657dacad5SJay Sternberg */ 105757dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq) 105857dacad5SJay Sternberg { 105957dacad5SJay Sternberg int vector; 106057dacad5SJay Sternberg 106157dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 106257dacad5SJay Sternberg if (nvmeq->cq_vector == -1) { 106357dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 106457dacad5SJay Sternberg return 1; 106557dacad5SJay Sternberg } 1066dca51e78SChristoph Hellwig vector = nvmeq_irq(nvmeq); 106757dacad5SJay Sternberg nvmeq->dev->online_queues--; 106857dacad5SJay Sternberg nvmeq->cq_vector = -1; 106957dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 107057dacad5SJay Sternberg 10711c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 107225646264SKeith Busch blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q); 107357dacad5SJay Sternberg 107457dacad5SJay Sternberg free_irq(vector, nvmeq); 107557dacad5SJay Sternberg 107657dacad5SJay Sternberg return 0; 107757dacad5SJay Sternberg } 107857dacad5SJay Sternberg 1079a5cdb68cSKeith Busch static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 108057dacad5SJay Sternberg { 1081a5cdb68cSKeith Busch struct nvme_queue *nvmeq = dev->queues[0]; 108257dacad5SJay Sternberg 108357dacad5SJay Sternberg if (!nvmeq) 108457dacad5SJay Sternberg return; 108557dacad5SJay Sternberg if (nvme_suspend_queue(nvmeq)) 108657dacad5SJay Sternberg return; 108757dacad5SJay Sternberg 1088a5cdb68cSKeith Busch if (shutdown) 1089a5cdb68cSKeith Busch nvme_shutdown_ctrl(&dev->ctrl); 1090a5cdb68cSKeith Busch else 1091a5cdb68cSKeith Busch nvme_disable_ctrl(&dev->ctrl, lo_hi_readq( 1092a5cdb68cSKeith Busch dev->bar + NVME_REG_CAP)); 109357dacad5SJay Sternberg 109457dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 109557dacad5SJay Sternberg nvme_process_cq(nvmeq); 109657dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 109757dacad5SJay Sternberg } 109857dacad5SJay Sternberg 109957dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 110057dacad5SJay Sternberg int entry_size) 110157dacad5SJay Sternberg { 110257dacad5SJay Sternberg int q_depth = dev->q_depth; 11035fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 11045fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 110557dacad5SJay Sternberg 110657dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 110757dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 11085fd4ce1bSChristoph Hellwig mem_per_q = round_down(mem_per_q, dev->ctrl.page_size); 110957dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 111057dacad5SJay Sternberg 111157dacad5SJay Sternberg /* 111257dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 111357dacad5SJay Sternberg * would be better to map queues in system memory with the 111457dacad5SJay Sternberg * original depth 111557dacad5SJay Sternberg */ 111657dacad5SJay Sternberg if (q_depth < 64) 111757dacad5SJay Sternberg return -ENOMEM; 111857dacad5SJay Sternberg } 111957dacad5SJay Sternberg 112057dacad5SJay Sternberg return q_depth; 112157dacad5SJay Sternberg } 112257dacad5SJay Sternberg 112357dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 112457dacad5SJay Sternberg int qid, int depth) 112557dacad5SJay Sternberg { 112657dacad5SJay Sternberg if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { 11275fd4ce1bSChristoph Hellwig unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 11285fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 112957dacad5SJay Sternberg nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; 113057dacad5SJay Sternberg nvmeq->sq_cmds_io = dev->cmb + offset; 113157dacad5SJay Sternberg } else { 113257dacad5SJay Sternberg nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 113357dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 113457dacad5SJay Sternberg if (!nvmeq->sq_cmds) 113557dacad5SJay Sternberg return -ENOMEM; 113657dacad5SJay Sternberg } 113757dacad5SJay Sternberg 113857dacad5SJay Sternberg return 0; 113957dacad5SJay Sternberg } 114057dacad5SJay Sternberg 114157dacad5SJay Sternberg static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1142d3af3ecdSShaohua Li int depth, int node) 114357dacad5SJay Sternberg { 1144d3af3ecdSShaohua Li struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL, 1145d3af3ecdSShaohua Li node); 114657dacad5SJay Sternberg if (!nvmeq) 114757dacad5SJay Sternberg return NULL; 114857dacad5SJay Sternberg 114957dacad5SJay Sternberg nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 115057dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 115157dacad5SJay Sternberg if (!nvmeq->cqes) 115257dacad5SJay Sternberg goto free_nvmeq; 115357dacad5SJay Sternberg 115457dacad5SJay Sternberg if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) 115557dacad5SJay Sternberg goto free_cqdma; 115657dacad5SJay Sternberg 115757dacad5SJay Sternberg nvmeq->q_dmadev = dev->dev; 115857dacad5SJay Sternberg nvmeq->dev = dev; 115957dacad5SJay Sternberg snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", 11601c63dc66SChristoph Hellwig dev->ctrl.instance, qid); 116157dacad5SJay Sternberg spin_lock_init(&nvmeq->q_lock); 116257dacad5SJay Sternberg nvmeq->cq_head = 0; 116357dacad5SJay Sternberg nvmeq->cq_phase = 1; 116457dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 116557dacad5SJay Sternberg nvmeq->q_depth = depth; 116657dacad5SJay Sternberg nvmeq->qid = qid; 116757dacad5SJay Sternberg nvmeq->cq_vector = -1; 116857dacad5SJay Sternberg dev->queues[qid] = nvmeq; 116957dacad5SJay Sternberg dev->queue_count++; 117057dacad5SJay Sternberg 117157dacad5SJay Sternberg return nvmeq; 117257dacad5SJay Sternberg 117357dacad5SJay Sternberg free_cqdma: 117457dacad5SJay Sternberg dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, 117557dacad5SJay Sternberg nvmeq->cq_dma_addr); 117657dacad5SJay Sternberg free_nvmeq: 117757dacad5SJay Sternberg kfree(nvmeq); 117857dacad5SJay Sternberg return NULL; 117957dacad5SJay Sternberg } 118057dacad5SJay Sternberg 1181dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 118257dacad5SJay Sternberg { 118357dacad5SJay Sternberg if (use_threaded_interrupts) 1184dca51e78SChristoph Hellwig return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check, 1185dca51e78SChristoph Hellwig nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq); 1186dca51e78SChristoph Hellwig else 1187dca51e78SChristoph Hellwig return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED, 1188dca51e78SChristoph Hellwig nvmeq->irqname, nvmeq); 118957dacad5SJay Sternberg } 119057dacad5SJay Sternberg 119157dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 119257dacad5SJay Sternberg { 119357dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 119457dacad5SJay Sternberg 119557dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 119657dacad5SJay Sternberg nvmeq->sq_tail = 0; 119757dacad5SJay Sternberg nvmeq->cq_head = 0; 119857dacad5SJay Sternberg nvmeq->cq_phase = 1; 119957dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 120057dacad5SJay Sternberg memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 1201f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 120257dacad5SJay Sternberg dev->online_queues++; 120357dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 120457dacad5SJay Sternberg } 120557dacad5SJay Sternberg 120657dacad5SJay Sternberg static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) 120757dacad5SJay Sternberg { 120857dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 120957dacad5SJay Sternberg int result; 121057dacad5SJay Sternberg 121157dacad5SJay Sternberg nvmeq->cq_vector = qid - 1; 121257dacad5SJay Sternberg result = adapter_alloc_cq(dev, qid, nvmeq); 121357dacad5SJay Sternberg if (result < 0) 121457dacad5SJay Sternberg return result; 121557dacad5SJay Sternberg 121657dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 121757dacad5SJay Sternberg if (result < 0) 121857dacad5SJay Sternberg goto release_cq; 121957dacad5SJay Sternberg 1220dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 122157dacad5SJay Sternberg if (result < 0) 122257dacad5SJay Sternberg goto release_sq; 122357dacad5SJay Sternberg 122457dacad5SJay Sternberg nvme_init_queue(nvmeq, qid); 122557dacad5SJay Sternberg return result; 122657dacad5SJay Sternberg 122757dacad5SJay Sternberg release_sq: 122857dacad5SJay Sternberg adapter_delete_sq(dev, qid); 122957dacad5SJay Sternberg release_cq: 123057dacad5SJay Sternberg adapter_delete_cq(dev, qid); 123157dacad5SJay Sternberg return result; 123257dacad5SJay Sternberg } 123357dacad5SJay Sternberg 1234f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 123557dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 123677f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 123757dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 123857dacad5SJay Sternberg .exit_hctx = nvme_admin_exit_hctx, 123957dacad5SJay Sternberg .init_request = nvme_admin_init_request, 124057dacad5SJay Sternberg .timeout = nvme_timeout, 124157dacad5SJay Sternberg }; 124257dacad5SJay Sternberg 1243f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 124457dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 124577f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 124657dacad5SJay Sternberg .init_hctx = nvme_init_hctx, 124757dacad5SJay Sternberg .init_request = nvme_init_request, 1248dca51e78SChristoph Hellwig .map_queues = nvme_pci_map_queues, 124957dacad5SJay Sternberg .timeout = nvme_timeout, 1250a0fa9647SJens Axboe .poll = nvme_poll, 125157dacad5SJay Sternberg }; 125257dacad5SJay Sternberg 125357dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 125457dacad5SJay Sternberg { 12551c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 125669d9a99cSKeith Busch /* 125769d9a99cSKeith Busch * If the controller was reset during removal, it's possible 125869d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 125969d9a99cSKeith Busch * queue to flush these to completion. 126069d9a99cSKeith Busch */ 126169d9a99cSKeith Busch blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true); 12621c63dc66SChristoph Hellwig blk_cleanup_queue(dev->ctrl.admin_q); 126357dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 126457dacad5SJay Sternberg } 126557dacad5SJay Sternberg } 126657dacad5SJay Sternberg 126757dacad5SJay Sternberg static int nvme_alloc_admin_tags(struct nvme_dev *dev) 126857dacad5SJay Sternberg { 12691c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q) { 127057dacad5SJay Sternberg dev->admin_tagset.ops = &nvme_mq_admin_ops; 127157dacad5SJay Sternberg dev->admin_tagset.nr_hw_queues = 1; 1272e3e9d50cSKeith Busch 1273e3e9d50cSKeith Busch /* 1274e3e9d50cSKeith Busch * Subtract one to leave an empty queue entry for 'Full Queue' 1275e3e9d50cSKeith Busch * condition. See NVM-Express 1.2 specification, section 4.1.2. 1276e3e9d50cSKeith Busch */ 1277e3e9d50cSKeith Busch dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1; 127857dacad5SJay Sternberg dev->admin_tagset.timeout = ADMIN_TIMEOUT; 127957dacad5SJay Sternberg dev->admin_tagset.numa_node = dev_to_node(dev->dev); 128057dacad5SJay Sternberg dev->admin_tagset.cmd_size = nvme_cmd_size(dev); 1281d3484991SJens Axboe dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; 128257dacad5SJay Sternberg dev->admin_tagset.driver_data = dev; 128357dacad5SJay Sternberg 128457dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 128557dacad5SJay Sternberg return -ENOMEM; 128657dacad5SJay Sternberg 12871c63dc66SChristoph Hellwig dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 12881c63dc66SChristoph Hellwig if (IS_ERR(dev->ctrl.admin_q)) { 128957dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 129057dacad5SJay Sternberg return -ENOMEM; 129157dacad5SJay Sternberg } 12921c63dc66SChristoph Hellwig if (!blk_get_queue(dev->ctrl.admin_q)) { 129357dacad5SJay Sternberg nvme_dev_remove_admin(dev); 12941c63dc66SChristoph Hellwig dev->ctrl.admin_q = NULL; 129557dacad5SJay Sternberg return -ENODEV; 129657dacad5SJay Sternberg } 129757dacad5SJay Sternberg } else 129825646264SKeith Busch blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true); 129957dacad5SJay Sternberg 130057dacad5SJay Sternberg return 0; 130157dacad5SJay Sternberg } 130257dacad5SJay Sternberg 130357dacad5SJay Sternberg static int nvme_configure_admin_queue(struct nvme_dev *dev) 130457dacad5SJay Sternberg { 130557dacad5SJay Sternberg int result; 130657dacad5SJay Sternberg u32 aqa; 13077a67cbeaSChristoph Hellwig u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 130857dacad5SJay Sternberg struct nvme_queue *nvmeq; 130957dacad5SJay Sternberg 13108ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 131157dacad5SJay Sternberg NVME_CAP_NSSRC(cap) : 0; 131257dacad5SJay Sternberg 13137a67cbeaSChristoph Hellwig if (dev->subsystem && 13147a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 13157a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 131657dacad5SJay Sternberg 13175fd4ce1bSChristoph Hellwig result = nvme_disable_ctrl(&dev->ctrl, cap); 131857dacad5SJay Sternberg if (result < 0) 131957dacad5SJay Sternberg return result; 132057dacad5SJay Sternberg 132157dacad5SJay Sternberg nvmeq = dev->queues[0]; 132257dacad5SJay Sternberg if (!nvmeq) { 1323d3af3ecdSShaohua Li nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 1324d3af3ecdSShaohua Li dev_to_node(dev->dev)); 132557dacad5SJay Sternberg if (!nvmeq) 132657dacad5SJay Sternberg return -ENOMEM; 132757dacad5SJay Sternberg } 132857dacad5SJay Sternberg 132957dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 133057dacad5SJay Sternberg aqa |= aqa << 16; 133157dacad5SJay Sternberg 13327a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 13337a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 13347a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 133557dacad5SJay Sternberg 13365fd4ce1bSChristoph Hellwig result = nvme_enable_ctrl(&dev->ctrl, cap); 133757dacad5SJay Sternberg if (result) 1338d4875622SKeith Busch return result; 133957dacad5SJay Sternberg 134057dacad5SJay Sternberg nvmeq->cq_vector = 0; 1341dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 134257dacad5SJay Sternberg if (result) { 134357dacad5SJay Sternberg nvmeq->cq_vector = -1; 1344d4875622SKeith Busch return result; 134557dacad5SJay Sternberg } 134657dacad5SJay Sternberg 134757dacad5SJay Sternberg return result; 134857dacad5SJay Sternberg } 134957dacad5SJay Sternberg 1350c875a709SGuilherme G. Piccoli static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1351c875a709SGuilherme G. Piccoli { 1352c875a709SGuilherme G. Piccoli 1353c875a709SGuilherme G. Piccoli /* If true, indicates loss of adapter communication, possibly by a 1354c875a709SGuilherme G. Piccoli * NVMe Subsystem reset. 1355c875a709SGuilherme G. Piccoli */ 1356c875a709SGuilherme G. Piccoli bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1357c875a709SGuilherme G. Piccoli 1358c875a709SGuilherme G. Piccoli /* If there is a reset ongoing, we shouldn't reset again. */ 1359c875a709SGuilherme G. Piccoli if (work_busy(&dev->reset_work)) 1360c875a709SGuilherme G. Piccoli return false; 1361c875a709SGuilherme G. Piccoli 1362c875a709SGuilherme G. Piccoli /* We shouldn't reset unless the controller is on fatal error state 1363c875a709SGuilherme G. Piccoli * _or_ if we lost the communication with it. 1364c875a709SGuilherme G. Piccoli */ 1365c875a709SGuilherme G. Piccoli if (!(csts & NVME_CSTS_CFS) && !nssro) 1366c875a709SGuilherme G. Piccoli return false; 1367c875a709SGuilherme G. Piccoli 1368c875a709SGuilherme G. Piccoli /* If PCI error recovery process is happening, we cannot reset or 1369c875a709SGuilherme G. Piccoli * the recovery mechanism will surely fail. 1370c875a709SGuilherme G. Piccoli */ 1371c875a709SGuilherme G. Piccoli if (pci_channel_offline(to_pci_dev(dev->dev))) 1372c875a709SGuilherme G. Piccoli return false; 1373c875a709SGuilherme G. Piccoli 1374c875a709SGuilherme G. Piccoli return true; 1375c875a709SGuilherme G. Piccoli } 1376c875a709SGuilherme G. Piccoli 1377d2a61918SAndy Lutomirski static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1378d2a61918SAndy Lutomirski { 1379d2a61918SAndy Lutomirski /* Read a config register to help see what died. */ 1380d2a61918SAndy Lutomirski u16 pci_status; 1381d2a61918SAndy Lutomirski int result; 1382d2a61918SAndy Lutomirski 1383d2a61918SAndy Lutomirski result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1384d2a61918SAndy Lutomirski &pci_status); 1385d2a61918SAndy Lutomirski if (result == PCIBIOS_SUCCESSFUL) 1386d2a61918SAndy Lutomirski dev_warn(dev->dev, 1387d2a61918SAndy Lutomirski "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1388d2a61918SAndy Lutomirski csts, pci_status); 1389d2a61918SAndy Lutomirski else 1390d2a61918SAndy Lutomirski dev_warn(dev->dev, 1391d2a61918SAndy Lutomirski "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1392d2a61918SAndy Lutomirski csts, result); 1393d2a61918SAndy Lutomirski } 1394d2a61918SAndy Lutomirski 13952d55cd5fSChristoph Hellwig static void nvme_watchdog_timer(unsigned long data) 139657dacad5SJay Sternberg { 13972d55cd5fSChristoph Hellwig struct nvme_dev *dev = (struct nvme_dev *)data; 13987a67cbeaSChristoph Hellwig u32 csts = readl(dev->bar + NVME_REG_CSTS); 139957dacad5SJay Sternberg 1400c875a709SGuilherme G. Piccoli /* Skip controllers under certain specific conditions. */ 1401c875a709SGuilherme G. Piccoli if (nvme_should_reset(dev, csts)) { 1402c5f6ce97SKeith Busch if (!nvme_reset(dev)) 1403d2a61918SAndy Lutomirski nvme_warn_reset(dev, csts); 14042d55cd5fSChristoph Hellwig return; 140557dacad5SJay Sternberg } 140657dacad5SJay Sternberg 14072d55cd5fSChristoph Hellwig mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ)); 140857dacad5SJay Sternberg } 140957dacad5SJay Sternberg 1410749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 141157dacad5SJay Sternberg { 1412949928c1SKeith Busch unsigned i, max; 1413749941f2SChristoph Hellwig int ret = 0; 141457dacad5SJay Sternberg 1415749941f2SChristoph Hellwig for (i = dev->queue_count; i <= dev->max_qid; i++) { 1416d3af3ecdSShaohua Li /* vector == qid - 1, match nvme_create_queue */ 1417d3af3ecdSShaohua Li if (!nvme_alloc_queue(dev, i, dev->q_depth, 1418d3af3ecdSShaohua Li pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { 1419749941f2SChristoph Hellwig ret = -ENOMEM; 142057dacad5SJay Sternberg break; 1421749941f2SChristoph Hellwig } 1422749941f2SChristoph Hellwig } 142357dacad5SJay Sternberg 1424949928c1SKeith Busch max = min(dev->max_qid, dev->queue_count - 1); 1425949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 1426749941f2SChristoph Hellwig ret = nvme_create_queue(dev->queues[i], i); 1427d4875622SKeith Busch if (ret) 142857dacad5SJay Sternberg break; 142957dacad5SJay Sternberg } 143057dacad5SJay Sternberg 1431749941f2SChristoph Hellwig /* 1432749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 1433749941f2SChristoph Hellwig * than the desired aount of queues, and even a controller without 1434749941f2SChristoph Hellwig * I/O queues an still be used to issue admin commands. This might 1435749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1436749941f2SChristoph Hellwig */ 1437749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 143857dacad5SJay Sternberg } 143957dacad5SJay Sternberg 1440202021c1SStephen Bates static ssize_t nvme_cmb_show(struct device *dev, 1441202021c1SStephen Bates struct device_attribute *attr, 1442202021c1SStephen Bates char *buf) 1443202021c1SStephen Bates { 1444202021c1SStephen Bates struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 1445202021c1SStephen Bates 1446c965809cSStephen Bates return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", 1447202021c1SStephen Bates ndev->cmbloc, ndev->cmbsz); 1448202021c1SStephen Bates } 1449202021c1SStephen Bates static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); 1450202021c1SStephen Bates 145157dacad5SJay Sternberg static void __iomem *nvme_map_cmb(struct nvme_dev *dev) 145257dacad5SJay Sternberg { 145357dacad5SJay Sternberg u64 szu, size, offset; 145457dacad5SJay Sternberg resource_size_t bar_size; 145557dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 145657dacad5SJay Sternberg void __iomem *cmb; 145757dacad5SJay Sternberg dma_addr_t dma_addr; 145857dacad5SJay Sternberg 14597a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 146057dacad5SJay Sternberg if (!(NVME_CMB_SZ(dev->cmbsz))) 146157dacad5SJay Sternberg return NULL; 1462202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 146357dacad5SJay Sternberg 1464202021c1SStephen Bates if (!use_cmb_sqes) 1465202021c1SStephen Bates return NULL; 146657dacad5SJay Sternberg 146757dacad5SJay Sternberg szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 146857dacad5SJay Sternberg size = szu * NVME_CMB_SZ(dev->cmbsz); 1469202021c1SStephen Bates offset = szu * NVME_CMB_OFST(dev->cmbloc); 1470202021c1SStephen Bates bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); 147157dacad5SJay Sternberg 147257dacad5SJay Sternberg if (offset > bar_size) 147357dacad5SJay Sternberg return NULL; 147457dacad5SJay Sternberg 147557dacad5SJay Sternberg /* 147657dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 147757dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 147857dacad5SJay Sternberg * the reported size of the BAR 147957dacad5SJay Sternberg */ 148057dacad5SJay Sternberg if (size > bar_size - offset) 148157dacad5SJay Sternberg size = bar_size - offset; 148257dacad5SJay Sternberg 1483202021c1SStephen Bates dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; 148457dacad5SJay Sternberg cmb = ioremap_wc(dma_addr, size); 148557dacad5SJay Sternberg if (!cmb) 148657dacad5SJay Sternberg return NULL; 148757dacad5SJay Sternberg 148857dacad5SJay Sternberg dev->cmb_dma_addr = dma_addr; 148957dacad5SJay Sternberg dev->cmb_size = size; 149057dacad5SJay Sternberg return cmb; 149157dacad5SJay Sternberg } 149257dacad5SJay Sternberg 149357dacad5SJay Sternberg static inline void nvme_release_cmb(struct nvme_dev *dev) 149457dacad5SJay Sternberg { 149557dacad5SJay Sternberg if (dev->cmb) { 149657dacad5SJay Sternberg iounmap(dev->cmb); 149757dacad5SJay Sternberg dev->cmb = NULL; 149857dacad5SJay Sternberg } 149957dacad5SJay Sternberg } 150057dacad5SJay Sternberg 150157dacad5SJay Sternberg static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 150257dacad5SJay Sternberg { 150357dacad5SJay Sternberg return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); 150457dacad5SJay Sternberg } 150557dacad5SJay Sternberg 150657dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 150757dacad5SJay Sternberg { 150857dacad5SJay Sternberg struct nvme_queue *adminq = dev->queues[0]; 150957dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 1510dca51e78SChristoph Hellwig int result, nr_io_queues, size; 151157dacad5SJay Sternberg 15122800b8e7SKeith Busch nr_io_queues = num_online_cpus(); 15139a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 15149a0be7abSChristoph Hellwig if (result < 0) 151557dacad5SJay Sternberg return result; 15169a0be7abSChristoph Hellwig 1517f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 1518a5229050SKeith Busch return 0; 151957dacad5SJay Sternberg 152057dacad5SJay Sternberg if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) { 152157dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 152257dacad5SJay Sternberg sizeof(struct nvme_command)); 152357dacad5SJay Sternberg if (result > 0) 152457dacad5SJay Sternberg dev->q_depth = result; 152557dacad5SJay Sternberg else 152657dacad5SJay Sternberg nvme_release_cmb(dev); 152757dacad5SJay Sternberg } 152857dacad5SJay Sternberg 152957dacad5SJay Sternberg size = db_bar_size(dev, nr_io_queues); 153057dacad5SJay Sternberg if (size > 8192) { 153157dacad5SJay Sternberg iounmap(dev->bar); 153257dacad5SJay Sternberg do { 153357dacad5SJay Sternberg dev->bar = ioremap(pci_resource_start(pdev, 0), size); 153457dacad5SJay Sternberg if (dev->bar) 153557dacad5SJay Sternberg break; 153657dacad5SJay Sternberg if (!--nr_io_queues) 153757dacad5SJay Sternberg return -ENOMEM; 153857dacad5SJay Sternberg size = db_bar_size(dev, nr_io_queues); 153957dacad5SJay Sternberg } while (1); 15407a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 154157dacad5SJay Sternberg adminq->q_db = dev->dbs; 154257dacad5SJay Sternberg } 154357dacad5SJay Sternberg 154457dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 1545dca51e78SChristoph Hellwig free_irq(pci_irq_vector(pdev, 0), adminq); 154657dacad5SJay Sternberg 154757dacad5SJay Sternberg /* 154857dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 154957dacad5SJay Sternberg * setting up the full range we need. 155057dacad5SJay Sternberg */ 1551dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 1552dca51e78SChristoph Hellwig nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues, 1553dca51e78SChristoph Hellwig PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY); 1554dca51e78SChristoph Hellwig if (nr_io_queues <= 0) 1555dca51e78SChristoph Hellwig return -EIO; 1556dca51e78SChristoph Hellwig dev->max_qid = nr_io_queues; 155757dacad5SJay Sternberg 155857dacad5SJay Sternberg /* 155957dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 156057dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 156157dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 156257dacad5SJay Sternberg * number of interrupts. 156357dacad5SJay Sternberg */ 156457dacad5SJay Sternberg 1565dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 156657dacad5SJay Sternberg if (result) { 156757dacad5SJay Sternberg adminq->cq_vector = -1; 1568d4875622SKeith Busch return result; 156957dacad5SJay Sternberg } 1570749941f2SChristoph Hellwig return nvme_create_io_queues(dev); 157157dacad5SJay Sternberg } 157257dacad5SJay Sternberg 1573db3cbfffSKeith Busch static void nvme_del_queue_end(struct request *req, int error) 1574db3cbfffSKeith Busch { 1575db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 1576db3cbfffSKeith Busch 1577db3cbfffSKeith Busch blk_mq_free_request(req); 1578db3cbfffSKeith Busch complete(&nvmeq->dev->ioq_wait); 1579db3cbfffSKeith Busch } 1580db3cbfffSKeith Busch 1581db3cbfffSKeith Busch static void nvme_del_cq_end(struct request *req, int error) 1582db3cbfffSKeith Busch { 1583db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 1584db3cbfffSKeith Busch 1585db3cbfffSKeith Busch if (!error) { 1586db3cbfffSKeith Busch unsigned long flags; 1587db3cbfffSKeith Busch 15882e39e0f6SMing Lin /* 15892e39e0f6SMing Lin * We might be called with the AQ q_lock held 15902e39e0f6SMing Lin * and the I/O queue q_lock should always 15912e39e0f6SMing Lin * nest inside the AQ one. 15922e39e0f6SMing Lin */ 15932e39e0f6SMing Lin spin_lock_irqsave_nested(&nvmeq->q_lock, flags, 15942e39e0f6SMing Lin SINGLE_DEPTH_NESTING); 1595db3cbfffSKeith Busch nvme_process_cq(nvmeq); 1596db3cbfffSKeith Busch spin_unlock_irqrestore(&nvmeq->q_lock, flags); 1597db3cbfffSKeith Busch } 1598db3cbfffSKeith Busch 1599db3cbfffSKeith Busch nvme_del_queue_end(req, error); 1600db3cbfffSKeith Busch } 1601db3cbfffSKeith Busch 1602db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 1603db3cbfffSKeith Busch { 1604db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 1605db3cbfffSKeith Busch struct request *req; 1606db3cbfffSKeith Busch struct nvme_command cmd; 1607db3cbfffSKeith Busch 1608db3cbfffSKeith Busch memset(&cmd, 0, sizeof(cmd)); 1609db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 1610db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 1611db3cbfffSKeith Busch 1612eb71f435SChristoph Hellwig req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 1613db3cbfffSKeith Busch if (IS_ERR(req)) 1614db3cbfffSKeith Busch return PTR_ERR(req); 1615db3cbfffSKeith Busch 1616db3cbfffSKeith Busch req->timeout = ADMIN_TIMEOUT; 1617db3cbfffSKeith Busch req->end_io_data = nvmeq; 1618db3cbfffSKeith Busch 1619db3cbfffSKeith Busch blk_execute_rq_nowait(q, NULL, req, false, 1620db3cbfffSKeith Busch opcode == nvme_admin_delete_cq ? 1621db3cbfffSKeith Busch nvme_del_cq_end : nvme_del_queue_end); 1622db3cbfffSKeith Busch return 0; 1623db3cbfffSKeith Busch } 1624db3cbfffSKeith Busch 162570659060SKeith Busch static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) 1626db3cbfffSKeith Busch { 162770659060SKeith Busch int pass; 1628db3cbfffSKeith Busch unsigned long timeout; 1629db3cbfffSKeith Busch u8 opcode = nvme_admin_delete_sq; 1630db3cbfffSKeith Busch 1631db3cbfffSKeith Busch for (pass = 0; pass < 2; pass++) { 1632014a0d60SKeith Busch int sent = 0, i = queues; 1633db3cbfffSKeith Busch 1634db3cbfffSKeith Busch reinit_completion(&dev->ioq_wait); 1635db3cbfffSKeith Busch retry: 1636db3cbfffSKeith Busch timeout = ADMIN_TIMEOUT; 1637c21377f8SGabriel Krisman Bertazi for (; i > 0; i--, sent++) 1638c21377f8SGabriel Krisman Bertazi if (nvme_delete_queue(dev->queues[i], opcode)) 1639db3cbfffSKeith Busch break; 1640c21377f8SGabriel Krisman Bertazi 1641db3cbfffSKeith Busch while (sent--) { 1642db3cbfffSKeith Busch timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout); 1643db3cbfffSKeith Busch if (timeout == 0) 1644db3cbfffSKeith Busch return; 1645db3cbfffSKeith Busch if (i) 1646db3cbfffSKeith Busch goto retry; 1647db3cbfffSKeith Busch } 1648db3cbfffSKeith Busch opcode = nvme_admin_delete_cq; 1649db3cbfffSKeith Busch } 1650db3cbfffSKeith Busch } 1651db3cbfffSKeith Busch 165257dacad5SJay Sternberg /* 165357dacad5SJay Sternberg * Return: error value if an error occurred setting up the queues or calling 165457dacad5SJay Sternberg * Identify Device. 0 if these succeeded, even if adding some of the 165557dacad5SJay Sternberg * namespaces failed. At the moment, these failures are silent. TBD which 165657dacad5SJay Sternberg * failures should be reported. 165757dacad5SJay Sternberg */ 165857dacad5SJay Sternberg static int nvme_dev_add(struct nvme_dev *dev) 165957dacad5SJay Sternberg { 16605bae7f73SChristoph Hellwig if (!dev->ctrl.tagset) { 166157dacad5SJay Sternberg dev->tagset.ops = &nvme_mq_ops; 166257dacad5SJay Sternberg dev->tagset.nr_hw_queues = dev->online_queues - 1; 166357dacad5SJay Sternberg dev->tagset.timeout = NVME_IO_TIMEOUT; 166457dacad5SJay Sternberg dev->tagset.numa_node = dev_to_node(dev->dev); 166557dacad5SJay Sternberg dev->tagset.queue_depth = 166657dacad5SJay Sternberg min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; 166757dacad5SJay Sternberg dev->tagset.cmd_size = nvme_cmd_size(dev); 166857dacad5SJay Sternberg dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 166957dacad5SJay Sternberg dev->tagset.driver_data = dev; 167057dacad5SJay Sternberg 167157dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->tagset)) 167257dacad5SJay Sternberg return 0; 16735bae7f73SChristoph Hellwig dev->ctrl.tagset = &dev->tagset; 1674f9f38e33SHelen Koike 1675f9f38e33SHelen Koike nvme_dbbuf_set(dev); 1676949928c1SKeith Busch } else { 1677949928c1SKeith Busch blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 1678949928c1SKeith Busch 1679949928c1SKeith Busch /* Free previously allocated queues that are no longer usable */ 1680949928c1SKeith Busch nvme_free_queues(dev, dev->online_queues); 168157dacad5SJay Sternberg } 1682949928c1SKeith Busch 168357dacad5SJay Sternberg return 0; 168457dacad5SJay Sternberg } 168557dacad5SJay Sternberg 1686b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 168757dacad5SJay Sternberg { 168857dacad5SJay Sternberg u64 cap; 1689b00a726aSKeith Busch int result = -ENOMEM; 169057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 169157dacad5SJay Sternberg 169257dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 169357dacad5SJay Sternberg return result; 169457dacad5SJay Sternberg 169557dacad5SJay Sternberg pci_set_master(pdev); 169657dacad5SJay Sternberg 169757dacad5SJay Sternberg if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 169857dacad5SJay Sternberg dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) 169957dacad5SJay Sternberg goto disable; 170057dacad5SJay Sternberg 17017a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 170257dacad5SJay Sternberg result = -ENODEV; 1703b00a726aSKeith Busch goto disable; 170457dacad5SJay Sternberg } 170557dacad5SJay Sternberg 170657dacad5SJay Sternberg /* 1707a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 1708a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 1709a5229050SKeith Busch * adjust this later. 171057dacad5SJay Sternberg */ 1711dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 1712dca51e78SChristoph Hellwig if (result < 0) 1713dca51e78SChristoph Hellwig return result; 171457dacad5SJay Sternberg 17157a67cbeaSChristoph Hellwig cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 17167a67cbeaSChristoph Hellwig 171757dacad5SJay Sternberg dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); 171857dacad5SJay Sternberg dev->db_stride = 1 << NVME_CAP_STRIDE(cap); 17197a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 17201f390c1fSStephan Günther 17211f390c1fSStephan Günther /* 17221f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 17231f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 17241f390c1fSStephan Günther */ 17251f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 17261f390c1fSStephan Günther dev->q_depth = 2; 17271f390c1fSStephan Günther dev_warn(dev->dev, "detected Apple NVMe controller, set " 17281f390c1fSStephan Günther "queue depth=%u to work around controller resets\n", 17291f390c1fSStephan Günther dev->q_depth); 17301f390c1fSStephan Günther } 17311f390c1fSStephan Günther 1732202021c1SStephen Bates /* 1733202021c1SStephen Bates * CMBs can currently only exist on >=1.2 PCIe devices. We only 1734202021c1SStephen Bates * populate sysfs if a CMB is implemented. Note that we add the 1735202021c1SStephen Bates * CMB attribute to the nvme_ctrl kobj which removes the need to remove 1736202021c1SStephen Bates * it on exit. Since nvme_dev_attrs_group has no name we can pass 1737202021c1SStephen Bates * NULL as final argument to sysfs_add_file_to_group. 1738202021c1SStephen Bates */ 1739202021c1SStephen Bates 17408ef2074dSGabriel Krisman Bertazi if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { 174157dacad5SJay Sternberg dev->cmb = nvme_map_cmb(dev); 174257dacad5SJay Sternberg 1743202021c1SStephen Bates if (dev->cmbsz) { 1744202021c1SStephen Bates if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1745202021c1SStephen Bates &dev_attr_cmb.attr, NULL)) 1746202021c1SStephen Bates dev_warn(dev->dev, 1747202021c1SStephen Bates "failed to add sysfs attribute for CMB\n"); 1748202021c1SStephen Bates } 1749202021c1SStephen Bates } 1750202021c1SStephen Bates 1751a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 1752a0a3408eSKeith Busch pci_save_state(pdev); 175357dacad5SJay Sternberg return 0; 175457dacad5SJay Sternberg 175557dacad5SJay Sternberg disable: 175657dacad5SJay Sternberg pci_disable_device(pdev); 175757dacad5SJay Sternberg return result; 175857dacad5SJay Sternberg } 175957dacad5SJay Sternberg 176057dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 176157dacad5SJay Sternberg { 1762b00a726aSKeith Busch if (dev->bar) 1763b00a726aSKeith Busch iounmap(dev->bar); 1764a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 1765b00a726aSKeith Busch } 1766b00a726aSKeith Busch 1767b00a726aSKeith Busch static void nvme_pci_disable(struct nvme_dev *dev) 1768b00a726aSKeith Busch { 176957dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 177057dacad5SJay Sternberg 1771dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 177257dacad5SJay Sternberg 1773a0a3408eSKeith Busch if (pci_is_enabled(pdev)) { 1774a0a3408eSKeith Busch pci_disable_pcie_error_reporting(pdev); 177557dacad5SJay Sternberg pci_disable_device(pdev); 177657dacad5SJay Sternberg } 1777a0a3408eSKeith Busch } 177857dacad5SJay Sternberg 1779a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 178057dacad5SJay Sternberg { 178170659060SKeith Busch int i, queues; 1782302ad8ccSKeith Busch bool dead = true; 1783302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 178457dacad5SJay Sternberg 17852d55cd5fSChristoph Hellwig del_timer_sync(&dev->watchdog_timer); 178657dacad5SJay Sternberg 178777bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 1788302ad8ccSKeith Busch if (pci_is_enabled(pdev)) { 1789302ad8ccSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1790302ad8ccSKeith Busch 1791302ad8ccSKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE) 1792302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 1793302ad8ccSKeith Busch dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 1794302ad8ccSKeith Busch pdev->error_state != pci_channel_io_normal); 179557dacad5SJay Sternberg } 1796c21377f8SGabriel Krisman Bertazi 1797302ad8ccSKeith Busch /* 1798302ad8ccSKeith Busch * Give the controller a chance to complete all entered requests if 1799302ad8ccSKeith Busch * doing a safe shutdown. 1800302ad8ccSKeith Busch */ 1801302ad8ccSKeith Busch if (!dead && shutdown) 1802302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 1803302ad8ccSKeith Busch nvme_stop_queues(&dev->ctrl); 1804302ad8ccSKeith Busch 180570659060SKeith Busch queues = dev->online_queues - 1; 1806c21377f8SGabriel Krisman Bertazi for (i = dev->queue_count - 1; i > 0; i--) 1807c21377f8SGabriel Krisman Bertazi nvme_suspend_queue(dev->queues[i]); 1808c21377f8SGabriel Krisman Bertazi 1809302ad8ccSKeith Busch if (dead) { 181082469c59SGabriel Krisman Bertazi /* A device might become IO incapable very soon during 181182469c59SGabriel Krisman Bertazi * probe, before the admin queue is configured. Thus, 181282469c59SGabriel Krisman Bertazi * queue_count can be 0 here. 181382469c59SGabriel Krisman Bertazi */ 181482469c59SGabriel Krisman Bertazi if (dev->queue_count) 1815c21377f8SGabriel Krisman Bertazi nvme_suspend_queue(dev->queues[0]); 181657dacad5SJay Sternberg } else { 181770659060SKeith Busch nvme_disable_io_queues(dev, queues); 1818a5cdb68cSKeith Busch nvme_disable_admin_queue(dev, shutdown); 181957dacad5SJay Sternberg } 1820b00a726aSKeith Busch nvme_pci_disable(dev); 182157dacad5SJay Sternberg 1822e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 1823e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); 1824302ad8ccSKeith Busch 1825302ad8ccSKeith Busch /* 1826302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 1827302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 1828302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 1829302ad8ccSKeith Busch */ 1830302ad8ccSKeith Busch if (shutdown) 1831302ad8ccSKeith Busch nvme_start_queues(&dev->ctrl); 183277bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 183357dacad5SJay Sternberg } 183457dacad5SJay Sternberg 183557dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 183657dacad5SJay Sternberg { 183757dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 183857dacad5SJay Sternberg PAGE_SIZE, PAGE_SIZE, 0); 183957dacad5SJay Sternberg if (!dev->prp_page_pool) 184057dacad5SJay Sternberg return -ENOMEM; 184157dacad5SJay Sternberg 184257dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 184357dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 184457dacad5SJay Sternberg 256, 256, 0); 184557dacad5SJay Sternberg if (!dev->prp_small_pool) { 184657dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 184757dacad5SJay Sternberg return -ENOMEM; 184857dacad5SJay Sternberg } 184957dacad5SJay Sternberg return 0; 185057dacad5SJay Sternberg } 185157dacad5SJay Sternberg 185257dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 185357dacad5SJay Sternberg { 185457dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 185557dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 185657dacad5SJay Sternberg } 185757dacad5SJay Sternberg 18581673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 185957dacad5SJay Sternberg { 18601673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 186157dacad5SJay Sternberg 1862f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 186357dacad5SJay Sternberg put_device(dev->dev); 186457dacad5SJay Sternberg if (dev->tagset.tags) 186557dacad5SJay Sternberg blk_mq_free_tag_set(&dev->tagset); 18661c63dc66SChristoph Hellwig if (dev->ctrl.admin_q) 18671c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 186857dacad5SJay Sternberg kfree(dev->queues); 1869e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 187057dacad5SJay Sternberg kfree(dev); 187157dacad5SJay Sternberg } 187257dacad5SJay Sternberg 1873f58944e2SKeith Busch static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status) 1874f58944e2SKeith Busch { 1875237045fcSLinus Torvalds dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status); 1876f58944e2SKeith Busch 1877f58944e2SKeith Busch kref_get(&dev->ctrl.kref); 187869d9a99cSKeith Busch nvme_dev_disable(dev, false); 1879f58944e2SKeith Busch if (!schedule_work(&dev->remove_work)) 1880f58944e2SKeith Busch nvme_put_ctrl(&dev->ctrl); 1881f58944e2SKeith Busch } 1882f58944e2SKeith Busch 1883fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 188457dacad5SJay Sternberg { 1885fd634f41SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); 1886a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 1887f58944e2SKeith Busch int result = -ENODEV; 188857dacad5SJay Sternberg 1889bb8d261eSChristoph Hellwig if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) 1890fd634f41SChristoph Hellwig goto out; 1891fd634f41SChristoph Hellwig 1892fd634f41SChristoph Hellwig /* 1893fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 1894fd634f41SChristoph Hellwig * moving on. 1895fd634f41SChristoph Hellwig */ 1896b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 1897a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1898fd634f41SChristoph Hellwig 1899bb8d261eSChristoph Hellwig if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) 19009bf2b972SKeith Busch goto out; 19019bf2b972SKeith Busch 1902b00a726aSKeith Busch result = nvme_pci_enable(dev); 190357dacad5SJay Sternberg if (result) 190457dacad5SJay Sternberg goto out; 190557dacad5SJay Sternberg 190657dacad5SJay Sternberg result = nvme_configure_admin_queue(dev); 190757dacad5SJay Sternberg if (result) 1908f58944e2SKeith Busch goto out; 190957dacad5SJay Sternberg 191057dacad5SJay Sternberg nvme_init_queue(dev->queues[0], 0); 191157dacad5SJay Sternberg result = nvme_alloc_admin_tags(dev); 191257dacad5SJay Sternberg if (result) 1913f58944e2SKeith Busch goto out; 191457dacad5SJay Sternberg 1915ce4541f4SChristoph Hellwig result = nvme_init_identify(&dev->ctrl); 1916ce4541f4SChristoph Hellwig if (result) 1917f58944e2SKeith Busch goto out; 1918ce4541f4SChristoph Hellwig 1919e286bcfcSScott Bauer if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 1920e286bcfcSScott Bauer if (!dev->ctrl.opal_dev) 19214f1244c8SChristoph Hellwig dev->ctrl.opal_dev = 19224f1244c8SChristoph Hellwig init_opal_dev(&dev->ctrl, &nvme_sec_submit); 1923e286bcfcSScott Bauer else if (was_suspend) 19244f1244c8SChristoph Hellwig opal_unlock_from_suspend(dev->ctrl.opal_dev); 1925e286bcfcSScott Bauer } else { 1926e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 1927e286bcfcSScott Bauer dev->ctrl.opal_dev = NULL; 1928e286bcfcSScott Bauer } 1929a98e58e5SScott Bauer 1930f9f38e33SHelen Koike if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 1931f9f38e33SHelen Koike result = nvme_dbbuf_dma_alloc(dev); 1932f9f38e33SHelen Koike if (result) 1933f9f38e33SHelen Koike dev_warn(dev->dev, 1934f9f38e33SHelen Koike "unable to allocate dma for dbbuf\n"); 1935f9f38e33SHelen Koike } 1936f9f38e33SHelen Koike 193757dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 193857dacad5SJay Sternberg if (result) 1939f58944e2SKeith Busch goto out; 194057dacad5SJay Sternberg 194121f033f7SKeith Busch /* 194221f033f7SKeith Busch * A controller that can not execute IO typically requires user 194321f033f7SKeith Busch * intervention to correct. For such degraded controllers, the driver 194421f033f7SKeith Busch * should not submit commands the user did not request, so skip 194521f033f7SKeith Busch * registering for asynchronous event notification on this condition. 194621f033f7SKeith Busch */ 1947f866fc42SChristoph Hellwig if (dev->online_queues > 1) 1948f866fc42SChristoph Hellwig nvme_queue_async_events(&dev->ctrl); 194957dacad5SJay Sternberg 19502d55cd5fSChristoph Hellwig mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ)); 195157dacad5SJay Sternberg 195257dacad5SJay Sternberg /* 195357dacad5SJay Sternberg * Keep the controller around but remove all namespaces if we don't have 195457dacad5SJay Sternberg * any working I/O queue. 195557dacad5SJay Sternberg */ 195657dacad5SJay Sternberg if (dev->online_queues < 2) { 19571b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, "IO queues not created\n"); 19583b24774eSKeith Busch nvme_kill_queues(&dev->ctrl); 19595bae7f73SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 196057dacad5SJay Sternberg } else { 196125646264SKeith Busch nvme_start_queues(&dev->ctrl); 1962302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 196357dacad5SJay Sternberg nvme_dev_add(dev); 1964302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 196557dacad5SJay Sternberg } 196657dacad5SJay Sternberg 1967bb8d261eSChristoph Hellwig if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 1968bb8d261eSChristoph Hellwig dev_warn(dev->ctrl.device, "failed to mark controller live\n"); 1969bb8d261eSChristoph Hellwig goto out; 1970bb8d261eSChristoph Hellwig } 197192911a55SChristoph Hellwig 197292911a55SChristoph Hellwig if (dev->online_queues > 1) 19735955be21SChristoph Hellwig nvme_queue_scan(&dev->ctrl); 197457dacad5SJay Sternberg return; 197557dacad5SJay Sternberg 197657dacad5SJay Sternberg out: 1977f58944e2SKeith Busch nvme_remove_dead_ctrl(dev, result); 197857dacad5SJay Sternberg } 197957dacad5SJay Sternberg 19805c8809e6SChristoph Hellwig static void nvme_remove_dead_ctrl_work(struct work_struct *work) 198157dacad5SJay Sternberg { 19825c8809e6SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 198357dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 198457dacad5SJay Sternberg 198569d9a99cSKeith Busch nvme_kill_queues(&dev->ctrl); 198657dacad5SJay Sternberg if (pci_get_drvdata(pdev)) 1987921920abSKeith Busch device_release_driver(&pdev->dev); 19881673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 198957dacad5SJay Sternberg } 199057dacad5SJay Sternberg 199157dacad5SJay Sternberg static int nvme_reset(struct nvme_dev *dev) 199257dacad5SJay Sternberg { 19931c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 199457dacad5SJay Sternberg return -ENODEV; 1995c5f6ce97SKeith Busch if (work_busy(&dev->reset_work)) 1996c5f6ce97SKeith Busch return -ENODEV; 1997846cc05fSChristoph Hellwig if (!queue_work(nvme_workq, &dev->reset_work)) 1998846cc05fSChristoph Hellwig return -EBUSY; 199957dacad5SJay Sternberg return 0; 200057dacad5SJay Sternberg } 200157dacad5SJay Sternberg 20021c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 200357dacad5SJay Sternberg { 20041c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 20051c63dc66SChristoph Hellwig return 0; 200657dacad5SJay Sternberg } 20071c63dc66SChristoph Hellwig 20085fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 20095fd4ce1bSChristoph Hellwig { 20105fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 20115fd4ce1bSChristoph Hellwig return 0; 20125fd4ce1bSChristoph Hellwig } 20135fd4ce1bSChristoph Hellwig 20147fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 20157fd8930fSChristoph Hellwig { 20167fd8930fSChristoph Hellwig *val = readq(to_nvme_dev(ctrl)->bar + off); 20177fd8930fSChristoph Hellwig return 0; 20187fd8930fSChristoph Hellwig } 20197fd8930fSChristoph Hellwig 2020f3ca80fcSChristoph Hellwig static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) 2021f3ca80fcSChristoph Hellwig { 2022c5f6ce97SKeith Busch struct nvme_dev *dev = to_nvme_dev(ctrl); 2023c5f6ce97SKeith Busch int ret = nvme_reset(dev); 2024c5f6ce97SKeith Busch 2025c5f6ce97SKeith Busch if (!ret) 2026c5f6ce97SKeith Busch flush_work(&dev->reset_work); 2027c5f6ce97SKeith Busch return ret; 2028f3ca80fcSChristoph Hellwig } 2029f3ca80fcSChristoph Hellwig 20301c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 20311a353d85SMing Lin .name = "pcie", 2032e439bb12SSagi Grimberg .module = THIS_MODULE, 20331c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 20345fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 20357fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 2036f3ca80fcSChristoph Hellwig .reset_ctrl = nvme_pci_reset_ctrl, 20371673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 2038f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 20391c63dc66SChristoph Hellwig }; 204057dacad5SJay Sternberg 2041b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 2042b00a726aSKeith Busch { 2043b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 2044b00a726aSKeith Busch 2045a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 2046b00a726aSKeith Busch return -ENODEV; 2047b00a726aSKeith Busch 2048b00a726aSKeith Busch dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 2049b00a726aSKeith Busch if (!dev->bar) 2050b00a726aSKeith Busch goto release; 2051b00a726aSKeith Busch 2052b00a726aSKeith Busch return 0; 2053b00a726aSKeith Busch release: 2054a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 2055b00a726aSKeith Busch return -ENODEV; 2056b00a726aSKeith Busch } 2057b00a726aSKeith Busch 205857dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 205957dacad5SJay Sternberg { 206057dacad5SJay Sternberg int node, result = -ENOMEM; 206157dacad5SJay Sternberg struct nvme_dev *dev; 206257dacad5SJay Sternberg 206357dacad5SJay Sternberg node = dev_to_node(&pdev->dev); 206457dacad5SJay Sternberg if (node == NUMA_NO_NODE) 20652fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 206657dacad5SJay Sternberg 206757dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 206857dacad5SJay Sternberg if (!dev) 206957dacad5SJay Sternberg return -ENOMEM; 207057dacad5SJay Sternberg dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *), 207157dacad5SJay Sternberg GFP_KERNEL, node); 207257dacad5SJay Sternberg if (!dev->queues) 207357dacad5SJay Sternberg goto free; 207457dacad5SJay Sternberg 207557dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 207657dacad5SJay Sternberg pci_set_drvdata(pdev, dev); 207757dacad5SJay Sternberg 2078b00a726aSKeith Busch result = nvme_dev_map(dev); 2079b00a726aSKeith Busch if (result) 2080b00a726aSKeith Busch goto free; 2081b00a726aSKeith Busch 2082f3ca80fcSChristoph Hellwig INIT_WORK(&dev->reset_work, nvme_reset_work); 20835c8809e6SChristoph Hellwig INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 20842d55cd5fSChristoph Hellwig setup_timer(&dev->watchdog_timer, nvme_watchdog_timer, 20852d55cd5fSChristoph Hellwig (unsigned long)dev); 208677bf25eaSKeith Busch mutex_init(&dev->shutdown_lock); 2087db3cbfffSKeith Busch init_completion(&dev->ioq_wait); 2088f3ca80fcSChristoph Hellwig 2089f3ca80fcSChristoph Hellwig result = nvme_setup_prp_pools(dev); 2090f3ca80fcSChristoph Hellwig if (result) 2091f3ca80fcSChristoph Hellwig goto put_pci; 2092f3ca80fcSChristoph Hellwig 2093f3ca80fcSChristoph Hellwig result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2094f3ca80fcSChristoph Hellwig id->driver_data); 2095f3ca80fcSChristoph Hellwig if (result) 2096f3ca80fcSChristoph Hellwig goto release_pools; 2097f3ca80fcSChristoph Hellwig 20981b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 20991b3c47c1SSagi Grimberg 210092f7a162SKeith Busch queue_work(nvme_workq, &dev->reset_work); 210157dacad5SJay Sternberg return 0; 210257dacad5SJay Sternberg 210357dacad5SJay Sternberg release_pools: 210457dacad5SJay Sternberg nvme_release_prp_pools(dev); 210557dacad5SJay Sternberg put_pci: 210657dacad5SJay Sternberg put_device(dev->dev); 2107b00a726aSKeith Busch nvme_dev_unmap(dev); 210857dacad5SJay Sternberg free: 210957dacad5SJay Sternberg kfree(dev->queues); 211057dacad5SJay Sternberg kfree(dev); 211157dacad5SJay Sternberg return result; 211257dacad5SJay Sternberg } 211357dacad5SJay Sternberg 211457dacad5SJay Sternberg static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) 211557dacad5SJay Sternberg { 211657dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 211757dacad5SJay Sternberg 211857dacad5SJay Sternberg if (prepare) 2119a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 212057dacad5SJay Sternberg else 2121c5f6ce97SKeith Busch nvme_reset(dev); 212257dacad5SJay Sternberg } 212357dacad5SJay Sternberg 212457dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 212557dacad5SJay Sternberg { 212657dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 2127a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 212857dacad5SJay Sternberg } 212957dacad5SJay Sternberg 2130f58944e2SKeith Busch /* 2131f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 2132f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 2133f58944e2SKeith Busch * order to proceed. 2134f58944e2SKeith Busch */ 213557dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 213657dacad5SJay Sternberg { 213757dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 213857dacad5SJay Sternberg 2139bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2140bb8d261eSChristoph Hellwig 214157dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 21420ff9d4e1SKeith Busch 21436db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 21440ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 21456db28edaSKeith Busch nvme_dev_disable(dev, false); 21466db28edaSKeith Busch } 21470ff9d4e1SKeith Busch 21489bf2b972SKeith Busch flush_work(&dev->reset_work); 214953029b04SKeith Busch nvme_uninit_ctrl(&dev->ctrl); 2150a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 215157dacad5SJay Sternberg nvme_dev_remove_admin(dev); 215257dacad5SJay Sternberg nvme_free_queues(dev, 0); 215357dacad5SJay Sternberg nvme_release_cmb(dev); 215457dacad5SJay Sternberg nvme_release_prp_pools(dev); 2155b00a726aSKeith Busch nvme_dev_unmap(dev); 21561673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 215757dacad5SJay Sternberg } 215857dacad5SJay Sternberg 215913880f5bSKeith Busch static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs) 216013880f5bSKeith Busch { 216113880f5bSKeith Busch int ret = 0; 216213880f5bSKeith Busch 216313880f5bSKeith Busch if (numvfs == 0) { 216413880f5bSKeith Busch if (pci_vfs_assigned(pdev)) { 216513880f5bSKeith Busch dev_warn(&pdev->dev, 216613880f5bSKeith Busch "Cannot disable SR-IOV VFs while assigned\n"); 216713880f5bSKeith Busch return -EPERM; 216813880f5bSKeith Busch } 216913880f5bSKeith Busch pci_disable_sriov(pdev); 217013880f5bSKeith Busch return 0; 217113880f5bSKeith Busch } 217213880f5bSKeith Busch 217313880f5bSKeith Busch ret = pci_enable_sriov(pdev, numvfs); 217413880f5bSKeith Busch return ret ? ret : numvfs; 217513880f5bSKeith Busch } 217613880f5bSKeith Busch 217757dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 217857dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 217957dacad5SJay Sternberg { 218057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 218157dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 218257dacad5SJay Sternberg 2183a5cdb68cSKeith Busch nvme_dev_disable(ndev, true); 218457dacad5SJay Sternberg return 0; 218557dacad5SJay Sternberg } 218657dacad5SJay Sternberg 218757dacad5SJay Sternberg static int nvme_resume(struct device *dev) 218857dacad5SJay Sternberg { 218957dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 219057dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 219157dacad5SJay Sternberg 2192c5f6ce97SKeith Busch nvme_reset(ndev); 219357dacad5SJay Sternberg return 0; 219457dacad5SJay Sternberg } 219557dacad5SJay Sternberg #endif 219657dacad5SJay Sternberg 219757dacad5SJay Sternberg static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 219857dacad5SJay Sternberg 2199a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 2200a0a3408eSKeith Busch pci_channel_state_t state) 2201a0a3408eSKeith Busch { 2202a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 2203a0a3408eSKeith Busch 2204a0a3408eSKeith Busch /* 2205a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 2206a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 2207a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 2208a0a3408eSKeith Busch */ 2209a0a3408eSKeith Busch switch (state) { 2210a0a3408eSKeith Busch case pci_channel_io_normal: 2211a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 2212a0a3408eSKeith Busch case pci_channel_io_frozen: 2213d011fb31SKeith Busch dev_warn(dev->ctrl.device, 2214d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 2215a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2216a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 2217a0a3408eSKeith Busch case pci_channel_io_perm_failure: 2218d011fb31SKeith Busch dev_warn(dev->ctrl.device, 2219d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 2220a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 2221a0a3408eSKeith Busch } 2222a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 2223a0a3408eSKeith Busch } 2224a0a3408eSKeith Busch 2225a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 2226a0a3408eSKeith Busch { 2227a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 2228a0a3408eSKeith Busch 22291b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 2230a0a3408eSKeith Busch pci_restore_state(pdev); 2231c5f6ce97SKeith Busch nvme_reset(dev); 2232a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 2233a0a3408eSKeith Busch } 2234a0a3408eSKeith Busch 2235a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 2236a0a3408eSKeith Busch { 2237a0a3408eSKeith Busch pci_cleanup_aer_uncorrect_error_status(pdev); 2238a0a3408eSKeith Busch } 2239a0a3408eSKeith Busch 224057dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 224157dacad5SJay Sternberg .error_detected = nvme_error_detected, 224257dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 224357dacad5SJay Sternberg .resume = nvme_error_resume, 224457dacad5SJay Sternberg .reset_notify = nvme_reset_notify, 224557dacad5SJay Sternberg }; 224657dacad5SJay Sternberg 224757dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 2248106198edSChristoph Hellwig { PCI_VDEVICE(INTEL, 0x0953), 224908095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2250e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 225199466e70SKeith Busch { PCI_VDEVICE(INTEL, 0x0a53), 225299466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2253e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 225499466e70SKeith Busch { PCI_VDEVICE(INTEL, 0x0a54), 225599466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2256e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 2257540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2258540c801cSKeith Busch .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 225954adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 226054adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2261015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 2262015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 226357dacad5SJay Sternberg { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2264c74dc780SStephan Günther { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 2265124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 226657dacad5SJay Sternberg { 0, } 226757dacad5SJay Sternberg }; 226857dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 226957dacad5SJay Sternberg 227057dacad5SJay Sternberg static struct pci_driver nvme_driver = { 227157dacad5SJay Sternberg .name = "nvme", 227257dacad5SJay Sternberg .id_table = nvme_id_table, 227357dacad5SJay Sternberg .probe = nvme_probe, 227457dacad5SJay Sternberg .remove = nvme_remove, 227557dacad5SJay Sternberg .shutdown = nvme_shutdown, 227657dacad5SJay Sternberg .driver = { 227757dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 227857dacad5SJay Sternberg }, 227913880f5bSKeith Busch .sriov_configure = nvme_pci_sriov_configure, 228057dacad5SJay Sternberg .err_handler = &nvme_err_handler, 228157dacad5SJay Sternberg }; 228257dacad5SJay Sternberg 228357dacad5SJay Sternberg static int __init nvme_init(void) 228457dacad5SJay Sternberg { 228557dacad5SJay Sternberg int result; 228657dacad5SJay Sternberg 228792f7a162SKeith Busch nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0); 228857dacad5SJay Sternberg if (!nvme_workq) 228957dacad5SJay Sternberg return -ENOMEM; 229057dacad5SJay Sternberg 229157dacad5SJay Sternberg result = pci_register_driver(&nvme_driver); 229257dacad5SJay Sternberg if (result) 229357dacad5SJay Sternberg destroy_workqueue(nvme_workq); 229457dacad5SJay Sternberg return result; 229557dacad5SJay Sternberg } 229657dacad5SJay Sternberg 229757dacad5SJay Sternberg static void __exit nvme_exit(void) 229857dacad5SJay Sternberg { 229957dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 230057dacad5SJay Sternberg destroy_workqueue(nvme_workq); 230157dacad5SJay Sternberg _nvme_check_size(); 230257dacad5SJay Sternberg } 230357dacad5SJay Sternberg 230457dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 230557dacad5SJay Sternberg MODULE_LICENSE("GPL"); 230657dacad5SJay Sternberg MODULE_VERSION("1.0"); 230757dacad5SJay Sternberg module_init(nvme_init); 230857dacad5SJay Sternberg module_exit(nvme_exit); 2309