157dacad5SJay Sternberg /* 257dacad5SJay Sternberg * NVM Express device driver 357dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 457dacad5SJay Sternberg * 557dacad5SJay Sternberg * This program is free software; you can redistribute it and/or modify it 657dacad5SJay Sternberg * under the terms and conditions of the GNU General Public License, 757dacad5SJay Sternberg * version 2, as published by the Free Software Foundation. 857dacad5SJay Sternberg * 957dacad5SJay Sternberg * This program is distributed in the hope it will be useful, but WITHOUT 1057dacad5SJay Sternberg * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1157dacad5SJay Sternberg * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 1257dacad5SJay Sternberg * more details. 1357dacad5SJay Sternberg */ 1457dacad5SJay Sternberg 15a0a3408eSKeith Busch #include <linux/aer.h> 1657dacad5SJay Sternberg #include <linux/bitops.h> 1757dacad5SJay Sternberg #include <linux/blkdev.h> 1857dacad5SJay Sternberg #include <linux/blk-mq.h> 19dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 2057dacad5SJay Sternberg #include <linux/cpu.h> 2157dacad5SJay Sternberg #include <linux/delay.h> 22ff5350a8SAndy Lutomirski #include <linux/dmi.h> 2357dacad5SJay Sternberg #include <linux/errno.h> 2457dacad5SJay Sternberg #include <linux/fs.h> 2557dacad5SJay Sternberg #include <linux/genhd.h> 2657dacad5SJay Sternberg #include <linux/hdreg.h> 2757dacad5SJay Sternberg #include <linux/idr.h> 2857dacad5SJay Sternberg #include <linux/init.h> 2957dacad5SJay Sternberg #include <linux/interrupt.h> 3057dacad5SJay Sternberg #include <linux/io.h> 3157dacad5SJay Sternberg #include <linux/kdev_t.h> 3257dacad5SJay Sternberg #include <linux/kernel.h> 3357dacad5SJay Sternberg #include <linux/mm.h> 3457dacad5SJay Sternberg #include <linux/module.h> 3557dacad5SJay Sternberg #include <linux/moduleparam.h> 3677bf25eaSKeith Busch #include <linux/mutex.h> 3757dacad5SJay Sternberg #include <linux/pci.h> 3857dacad5SJay Sternberg #include <linux/poison.h> 3957dacad5SJay Sternberg #include <linux/ptrace.h> 4057dacad5SJay Sternberg #include <linux/sched.h> 4157dacad5SJay Sternberg #include <linux/slab.h> 4257dacad5SJay Sternberg #include <linux/t10-pi.h> 432d55cd5fSChristoph Hellwig #include <linux/timer.h> 4457dacad5SJay Sternberg #include <linux/types.h> 459cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 461d277a63SKeith Busch #include <asm/unaligned.h> 47a98e58e5SScott Bauer #include <linux/sed-opal.h> 4857dacad5SJay Sternberg 4957dacad5SJay Sternberg #include "nvme.h" 5057dacad5SJay Sternberg 5157dacad5SJay Sternberg #define NVME_Q_DEPTH 1024 5257dacad5SJay Sternberg #define NVME_AQ_DEPTH 256 5357dacad5SJay Sternberg #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 5457dacad5SJay Sternberg #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 5557dacad5SJay Sternberg 56adf68f21SChristoph Hellwig /* 57adf68f21SChristoph Hellwig * We handle AEN commands ourselves and don't even let the 58adf68f21SChristoph Hellwig * block layer know about them. 59adf68f21SChristoph Hellwig */ 60f866fc42SChristoph Hellwig #define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AERS) 61adf68f21SChristoph Hellwig 6257dacad5SJay Sternberg static int use_threaded_interrupts; 6357dacad5SJay Sternberg module_param(use_threaded_interrupts, int, 0); 6457dacad5SJay Sternberg 6557dacad5SJay Sternberg static bool use_cmb_sqes = true; 6657dacad5SJay Sternberg module_param(use_cmb_sqes, bool, 0644); 6757dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 6857dacad5SJay Sternberg 6957dacad5SJay Sternberg static struct workqueue_struct *nvme_workq; 7057dacad5SJay Sternberg 711c63dc66SChristoph Hellwig struct nvme_dev; 721c63dc66SChristoph Hellwig struct nvme_queue; 7357dacad5SJay Sternberg 7457dacad5SJay Sternberg static int nvme_reset(struct nvme_dev *dev); 75a0fa9647SJens Axboe static void nvme_process_cq(struct nvme_queue *nvmeq); 76a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 7757dacad5SJay Sternberg 7857dacad5SJay Sternberg /* 791c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 801c63dc66SChristoph Hellwig */ 811c63dc66SChristoph Hellwig struct nvme_dev { 821c63dc66SChristoph Hellwig struct nvme_queue **queues; 831c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 841c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 851c63dc66SChristoph Hellwig u32 __iomem *dbs; 861c63dc66SChristoph Hellwig struct device *dev; 871c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 881c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 891c63dc66SChristoph Hellwig unsigned queue_count; 901c63dc66SChristoph Hellwig unsigned online_queues; 911c63dc66SChristoph Hellwig unsigned max_qid; 921c63dc66SChristoph Hellwig int q_depth; 931c63dc66SChristoph Hellwig u32 db_stride; 941c63dc66SChristoph Hellwig void __iomem *bar; 951c63dc66SChristoph Hellwig struct work_struct reset_work; 965c8809e6SChristoph Hellwig struct work_struct remove_work; 972d55cd5fSChristoph Hellwig struct timer_list watchdog_timer; 9877bf25eaSKeith Busch struct mutex shutdown_lock; 991c63dc66SChristoph Hellwig bool subsystem; 1001c63dc66SChristoph Hellwig void __iomem *cmb; 1011c63dc66SChristoph Hellwig dma_addr_t cmb_dma_addr; 1021c63dc66SChristoph Hellwig u64 cmb_size; 1031c63dc66SChristoph Hellwig u32 cmbsz; 104202021c1SStephen Bates u32 cmbloc; 1051c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 106db3cbfffSKeith Busch struct completion ioq_wait; 107f9f38e33SHelen Koike u32 *dbbuf_dbs; 108f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 109f9f38e33SHelen Koike u32 *dbbuf_eis; 110f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 11157dacad5SJay Sternberg }; 11257dacad5SJay Sternberg 113f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 114f9f38e33SHelen Koike { 115f9f38e33SHelen Koike return qid * 2 * stride; 116f9f38e33SHelen Koike } 117f9f38e33SHelen Koike 118f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 119f9f38e33SHelen Koike { 120f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 121f9f38e33SHelen Koike } 122f9f38e33SHelen Koike 1231c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1241c63dc66SChristoph Hellwig { 1251c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1261c63dc66SChristoph Hellwig } 1271c63dc66SChristoph Hellwig 12857dacad5SJay Sternberg /* 12957dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 13057dacad5SJay Sternberg * commands and one for I/O commands). 13157dacad5SJay Sternberg */ 13257dacad5SJay Sternberg struct nvme_queue { 13357dacad5SJay Sternberg struct device *q_dmadev; 13457dacad5SJay Sternberg struct nvme_dev *dev; 13557dacad5SJay Sternberg spinlock_t q_lock; 13657dacad5SJay Sternberg struct nvme_command *sq_cmds; 13757dacad5SJay Sternberg struct nvme_command __iomem *sq_cmds_io; 13857dacad5SJay Sternberg volatile struct nvme_completion *cqes; 13957dacad5SJay Sternberg struct blk_mq_tags **tags; 14057dacad5SJay Sternberg dma_addr_t sq_dma_addr; 14157dacad5SJay Sternberg dma_addr_t cq_dma_addr; 14257dacad5SJay Sternberg u32 __iomem *q_db; 14357dacad5SJay Sternberg u16 q_depth; 14457dacad5SJay Sternberg s16 cq_vector; 14557dacad5SJay Sternberg u16 sq_tail; 14657dacad5SJay Sternberg u16 cq_head; 14757dacad5SJay Sternberg u16 qid; 14857dacad5SJay Sternberg u8 cq_phase; 14957dacad5SJay Sternberg u8 cqe_seen; 150f9f38e33SHelen Koike u32 *dbbuf_sq_db; 151f9f38e33SHelen Koike u32 *dbbuf_cq_db; 152f9f38e33SHelen Koike u32 *dbbuf_sq_ei; 153f9f38e33SHelen Koike u32 *dbbuf_cq_ei; 15457dacad5SJay Sternberg }; 15557dacad5SJay Sternberg 15657dacad5SJay Sternberg /* 15771bd150cSChristoph Hellwig * The nvme_iod describes the data in an I/O, including the list of PRP 15871bd150cSChristoph Hellwig * entries. You can't see it in this data structure because C doesn't let 159f4800d6dSChristoph Hellwig * me express that. Use nvme_init_iod to ensure there's enough space 16071bd150cSChristoph Hellwig * allocated to store the PRP list. 16171bd150cSChristoph Hellwig */ 16271bd150cSChristoph Hellwig struct nvme_iod { 163d49187e9SChristoph Hellwig struct nvme_request req; 164f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq; 165f4800d6dSChristoph Hellwig int aborted; 16671bd150cSChristoph Hellwig int npages; /* In the PRP list. 0 means small pool in use */ 16771bd150cSChristoph Hellwig int nents; /* Used in scatterlist */ 16871bd150cSChristoph Hellwig int length; /* Of data, in bytes */ 16971bd150cSChristoph Hellwig dma_addr_t first_dma; 170bf684057SChristoph Hellwig struct scatterlist meta_sg; /* metadata requires single contiguous buffer */ 171f4800d6dSChristoph Hellwig struct scatterlist *sg; 172f4800d6dSChristoph Hellwig struct scatterlist inline_sg[0]; 17357dacad5SJay Sternberg }; 17457dacad5SJay Sternberg 17557dacad5SJay Sternberg /* 17657dacad5SJay Sternberg * Check we didin't inadvertently grow the command struct 17757dacad5SJay Sternberg */ 17857dacad5SJay Sternberg static inline void _nvme_check_size(void) 17957dacad5SJay Sternberg { 18057dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 18157dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 18257dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 18357dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 18457dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 18557dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 18657dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 18757dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 18857dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 18957dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 19057dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 19157dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 192f9f38e33SHelen Koike BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 193f9f38e33SHelen Koike } 194f9f38e33SHelen Koike 195f9f38e33SHelen Koike static inline unsigned int nvme_dbbuf_size(u32 stride) 196f9f38e33SHelen Koike { 197f9f38e33SHelen Koike return ((num_possible_cpus() + 1) * 8 * stride); 198f9f38e33SHelen Koike } 199f9f38e33SHelen Koike 200f9f38e33SHelen Koike static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 201f9f38e33SHelen Koike { 202f9f38e33SHelen Koike unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 203f9f38e33SHelen Koike 204f9f38e33SHelen Koike if (dev->dbbuf_dbs) 205f9f38e33SHelen Koike return 0; 206f9f38e33SHelen Koike 207f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 208f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 209f9f38e33SHelen Koike GFP_KERNEL); 210f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 211f9f38e33SHelen Koike return -ENOMEM; 212f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 213f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 214f9f38e33SHelen Koike GFP_KERNEL); 215f9f38e33SHelen Koike if (!dev->dbbuf_eis) { 216f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 217f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 218f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 219f9f38e33SHelen Koike return -ENOMEM; 220f9f38e33SHelen Koike } 221f9f38e33SHelen Koike 222f9f38e33SHelen Koike return 0; 223f9f38e33SHelen Koike } 224f9f38e33SHelen Koike 225f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 226f9f38e33SHelen Koike { 227f9f38e33SHelen Koike unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); 228f9f38e33SHelen Koike 229f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 230f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 231f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 232f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 233f9f38e33SHelen Koike } 234f9f38e33SHelen Koike if (dev->dbbuf_eis) { 235f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 236f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 237f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 238f9f38e33SHelen Koike } 239f9f38e33SHelen Koike } 240f9f38e33SHelen Koike 241f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 242f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 243f9f38e33SHelen Koike { 244f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 245f9f38e33SHelen Koike return; 246f9f38e33SHelen Koike 247f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 248f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 249f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 250f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 251f9f38e33SHelen Koike } 252f9f38e33SHelen Koike 253f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 254f9f38e33SHelen Koike { 255f9f38e33SHelen Koike struct nvme_command c; 256f9f38e33SHelen Koike 257f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 258f9f38e33SHelen Koike return; 259f9f38e33SHelen Koike 260f9f38e33SHelen Koike memset(&c, 0, sizeof(c)); 261f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 262f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 263f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 264f9f38e33SHelen Koike 265f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 266f9f38e33SHelen Koike dev_warn(dev->dev, "unable to set dbbuf\n"); 267f9f38e33SHelen Koike /* Free memory and continue on */ 268f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 269f9f38e33SHelen Koike } 270f9f38e33SHelen Koike } 271f9f38e33SHelen Koike 272f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 273f9f38e33SHelen Koike { 274f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 275f9f38e33SHelen Koike } 276f9f38e33SHelen Koike 277f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 278f9f38e33SHelen Koike static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 279f9f38e33SHelen Koike volatile u32 *dbbuf_ei) 280f9f38e33SHelen Koike { 281f9f38e33SHelen Koike if (dbbuf_db) { 282f9f38e33SHelen Koike u16 old_value; 283f9f38e33SHelen Koike 284f9f38e33SHelen Koike /* 285f9f38e33SHelen Koike * Ensure that the queue is written before updating 286f9f38e33SHelen Koike * the doorbell in memory 287f9f38e33SHelen Koike */ 288f9f38e33SHelen Koike wmb(); 289f9f38e33SHelen Koike 290f9f38e33SHelen Koike old_value = *dbbuf_db; 291f9f38e33SHelen Koike *dbbuf_db = value; 292f9f38e33SHelen Koike 293f9f38e33SHelen Koike if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 294f9f38e33SHelen Koike return false; 295f9f38e33SHelen Koike } 296f9f38e33SHelen Koike 297f9f38e33SHelen Koike return true; 29857dacad5SJay Sternberg } 29957dacad5SJay Sternberg 30057dacad5SJay Sternberg /* 30157dacad5SJay Sternberg * Max size of iod being embedded in the request payload 30257dacad5SJay Sternberg */ 30357dacad5SJay Sternberg #define NVME_INT_PAGES 2 3045fd4ce1bSChristoph Hellwig #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size) 30557dacad5SJay Sternberg 30657dacad5SJay Sternberg /* 30757dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 30857dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 30957dacad5SJay Sternberg * the I/O. 31057dacad5SJay Sternberg */ 31157dacad5SJay Sternberg static int nvme_npages(unsigned size, struct nvme_dev *dev) 31257dacad5SJay Sternberg { 3135fd4ce1bSChristoph Hellwig unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size, 3145fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 31557dacad5SJay Sternberg return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 31657dacad5SJay Sternberg } 31757dacad5SJay Sternberg 318f4800d6dSChristoph Hellwig static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev, 319f4800d6dSChristoph Hellwig unsigned int size, unsigned int nseg) 320f4800d6dSChristoph Hellwig { 321f4800d6dSChristoph Hellwig return sizeof(__le64 *) * nvme_npages(size, dev) + 322f4800d6dSChristoph Hellwig sizeof(struct scatterlist) * nseg; 323f4800d6dSChristoph Hellwig } 324f4800d6dSChristoph Hellwig 32557dacad5SJay Sternberg static unsigned int nvme_cmd_size(struct nvme_dev *dev) 32657dacad5SJay Sternberg { 327f4800d6dSChristoph Hellwig return sizeof(struct nvme_iod) + 328f4800d6dSChristoph Hellwig nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES); 32957dacad5SJay Sternberg } 33057dacad5SJay Sternberg 33157dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 33257dacad5SJay Sternberg unsigned int hctx_idx) 33357dacad5SJay Sternberg { 33457dacad5SJay Sternberg struct nvme_dev *dev = data; 33557dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[0]; 33657dacad5SJay Sternberg 33757dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 33857dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 33957dacad5SJay Sternberg WARN_ON(nvmeq->tags); 34057dacad5SJay Sternberg 34157dacad5SJay Sternberg hctx->driver_data = nvmeq; 34257dacad5SJay Sternberg nvmeq->tags = &dev->admin_tagset.tags[0]; 34357dacad5SJay Sternberg return 0; 34457dacad5SJay Sternberg } 34557dacad5SJay Sternberg 34657dacad5SJay Sternberg static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 34757dacad5SJay Sternberg { 34857dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 34957dacad5SJay Sternberg 35057dacad5SJay Sternberg nvmeq->tags = NULL; 35157dacad5SJay Sternberg } 35257dacad5SJay Sternberg 353d6296d39SChristoph Hellwig static int nvme_admin_init_request(struct blk_mq_tag_set *set, 354d6296d39SChristoph Hellwig struct request *req, unsigned int hctx_idx, 35557dacad5SJay Sternberg unsigned int numa_node) 35657dacad5SJay Sternberg { 357d6296d39SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 358f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 35957dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[0]; 36057dacad5SJay Sternberg 36157dacad5SJay Sternberg BUG_ON(!nvmeq); 362f4800d6dSChristoph Hellwig iod->nvmeq = nvmeq; 36357dacad5SJay Sternberg return 0; 36457dacad5SJay Sternberg } 36557dacad5SJay Sternberg 36657dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 36757dacad5SJay Sternberg unsigned int hctx_idx) 36857dacad5SJay Sternberg { 36957dacad5SJay Sternberg struct nvme_dev *dev = data; 37057dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; 37157dacad5SJay Sternberg 37257dacad5SJay Sternberg if (!nvmeq->tags) 37357dacad5SJay Sternberg nvmeq->tags = &dev->tagset.tags[hctx_idx]; 37457dacad5SJay Sternberg 37557dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 37657dacad5SJay Sternberg hctx->driver_data = nvmeq; 37757dacad5SJay Sternberg return 0; 37857dacad5SJay Sternberg } 37957dacad5SJay Sternberg 380d6296d39SChristoph Hellwig static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, 381d6296d39SChristoph Hellwig unsigned int hctx_idx, unsigned int numa_node) 38257dacad5SJay Sternberg { 383d6296d39SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 384f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 38557dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; 38657dacad5SJay Sternberg 38757dacad5SJay Sternberg BUG_ON(!nvmeq); 388f4800d6dSChristoph Hellwig iod->nvmeq = nvmeq; 38957dacad5SJay Sternberg return 0; 39057dacad5SJay Sternberg } 39157dacad5SJay Sternberg 392dca51e78SChristoph Hellwig static int nvme_pci_map_queues(struct blk_mq_tag_set *set) 393dca51e78SChristoph Hellwig { 394dca51e78SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 395dca51e78SChristoph Hellwig 396dca51e78SChristoph Hellwig return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev)); 397dca51e78SChristoph Hellwig } 398dca51e78SChristoph Hellwig 39957dacad5SJay Sternberg /** 400adf68f21SChristoph Hellwig * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 40157dacad5SJay Sternberg * @nvmeq: The queue to use 40257dacad5SJay Sternberg * @cmd: The command to send 40357dacad5SJay Sternberg * 40457dacad5SJay Sternberg * Safe to use from interrupt context 40557dacad5SJay Sternberg */ 40657dacad5SJay Sternberg static void __nvme_submit_cmd(struct nvme_queue *nvmeq, 40757dacad5SJay Sternberg struct nvme_command *cmd) 40857dacad5SJay Sternberg { 40957dacad5SJay Sternberg u16 tail = nvmeq->sq_tail; 41057dacad5SJay Sternberg 41157dacad5SJay Sternberg if (nvmeq->sq_cmds_io) 41257dacad5SJay Sternberg memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd)); 41357dacad5SJay Sternberg else 41457dacad5SJay Sternberg memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 41557dacad5SJay Sternberg 41657dacad5SJay Sternberg if (++tail == nvmeq->q_depth) 41757dacad5SJay Sternberg tail = 0; 418f9f38e33SHelen Koike if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db, 419f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei)) 42057dacad5SJay Sternberg writel(tail, nvmeq->q_db); 42157dacad5SJay Sternberg nvmeq->sq_tail = tail; 42257dacad5SJay Sternberg } 42357dacad5SJay Sternberg 424f4800d6dSChristoph Hellwig static __le64 **iod_list(struct request *req) 42557dacad5SJay Sternberg { 426f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 427f9d03f96SChristoph Hellwig return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); 42857dacad5SJay Sternberg } 42957dacad5SJay Sternberg 430b131c61dSChristoph Hellwig static int nvme_init_iod(struct request *rq, struct nvme_dev *dev) 43157dacad5SJay Sternberg { 432f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 433f9d03f96SChristoph Hellwig int nseg = blk_rq_nr_phys_segments(rq); 434b131c61dSChristoph Hellwig unsigned int size = blk_rq_payload_bytes(rq); 435f4800d6dSChristoph Hellwig 436f4800d6dSChristoph Hellwig if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 437f4800d6dSChristoph Hellwig iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); 438f4800d6dSChristoph Hellwig if (!iod->sg) 439f4800d6dSChristoph Hellwig return BLK_MQ_RQ_QUEUE_BUSY; 440f4800d6dSChristoph Hellwig } else { 441f4800d6dSChristoph Hellwig iod->sg = iod->inline_sg; 44257dacad5SJay Sternberg } 44357dacad5SJay Sternberg 444f4800d6dSChristoph Hellwig iod->aborted = 0; 44557dacad5SJay Sternberg iod->npages = -1; 44657dacad5SJay Sternberg iod->nents = 0; 447f4800d6dSChristoph Hellwig iod->length = size; 448f80ec966SKeith Busch 449bac0000aSOmar Sandoval return BLK_MQ_RQ_QUEUE_OK; 45057dacad5SJay Sternberg } 45157dacad5SJay Sternberg 452f4800d6dSChristoph Hellwig static void nvme_free_iod(struct nvme_dev *dev, struct request *req) 45357dacad5SJay Sternberg { 454f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 4555fd4ce1bSChristoph Hellwig const int last_prp = dev->ctrl.page_size / 8 - 1; 45657dacad5SJay Sternberg int i; 457f4800d6dSChristoph Hellwig __le64 **list = iod_list(req); 45857dacad5SJay Sternberg dma_addr_t prp_dma = iod->first_dma; 45957dacad5SJay Sternberg 46057dacad5SJay Sternberg if (iod->npages == 0) 46157dacad5SJay Sternberg dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 46257dacad5SJay Sternberg for (i = 0; i < iod->npages; i++) { 46357dacad5SJay Sternberg __le64 *prp_list = list[i]; 46457dacad5SJay Sternberg dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 46557dacad5SJay Sternberg dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 46657dacad5SJay Sternberg prp_dma = next_prp_dma; 46757dacad5SJay Sternberg } 46857dacad5SJay Sternberg 469f4800d6dSChristoph Hellwig if (iod->sg != iod->inline_sg) 470f4800d6dSChristoph Hellwig kfree(iod->sg); 47157dacad5SJay Sternberg } 47257dacad5SJay Sternberg 47357dacad5SJay Sternberg #ifdef CONFIG_BLK_DEV_INTEGRITY 47457dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) 47557dacad5SJay Sternberg { 47657dacad5SJay Sternberg if (be32_to_cpu(pi->ref_tag) == v) 47757dacad5SJay Sternberg pi->ref_tag = cpu_to_be32(p); 47857dacad5SJay Sternberg } 47957dacad5SJay Sternberg 48057dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) 48157dacad5SJay Sternberg { 48257dacad5SJay Sternberg if (be32_to_cpu(pi->ref_tag) == p) 48357dacad5SJay Sternberg pi->ref_tag = cpu_to_be32(v); 48457dacad5SJay Sternberg } 48557dacad5SJay Sternberg 48657dacad5SJay Sternberg /** 48757dacad5SJay Sternberg * nvme_dif_remap - remaps ref tags to bip seed and physical lba 48857dacad5SJay Sternberg * 48957dacad5SJay Sternberg * The virtual start sector is the one that was originally submitted by the 49057dacad5SJay Sternberg * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical 49157dacad5SJay Sternberg * start sector may be different. Remap protection information to match the 49257dacad5SJay Sternberg * physical LBA on writes, and back to the original seed on reads. 49357dacad5SJay Sternberg * 49457dacad5SJay Sternberg * Type 0 and 3 do not have a ref tag, so no remapping required. 49557dacad5SJay Sternberg */ 49657dacad5SJay Sternberg static void nvme_dif_remap(struct request *req, 49757dacad5SJay Sternberg void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) 49857dacad5SJay Sternberg { 49957dacad5SJay Sternberg struct nvme_ns *ns = req->rq_disk->private_data; 50057dacad5SJay Sternberg struct bio_integrity_payload *bip; 50157dacad5SJay Sternberg struct t10_pi_tuple *pi; 50257dacad5SJay Sternberg void *p, *pmap; 50357dacad5SJay Sternberg u32 i, nlb, ts, phys, virt; 50457dacad5SJay Sternberg 50557dacad5SJay Sternberg if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3) 50657dacad5SJay Sternberg return; 50757dacad5SJay Sternberg 50857dacad5SJay Sternberg bip = bio_integrity(req->bio); 50957dacad5SJay Sternberg if (!bip) 51057dacad5SJay Sternberg return; 51157dacad5SJay Sternberg 51257dacad5SJay Sternberg pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; 51357dacad5SJay Sternberg 51457dacad5SJay Sternberg p = pmap; 51557dacad5SJay Sternberg virt = bip_get_seed(bip); 51657dacad5SJay Sternberg phys = nvme_block_nr(ns, blk_rq_pos(req)); 51757dacad5SJay Sternberg nlb = (blk_rq_bytes(req) >> ns->lba_shift); 518ac6fc48cSDan Williams ts = ns->disk->queue->integrity.tuple_size; 51957dacad5SJay Sternberg 52057dacad5SJay Sternberg for (i = 0; i < nlb; i++, virt++, phys++) { 52157dacad5SJay Sternberg pi = (struct t10_pi_tuple *)p; 52257dacad5SJay Sternberg dif_swap(phys, virt, pi); 52357dacad5SJay Sternberg p += ts; 52457dacad5SJay Sternberg } 52557dacad5SJay Sternberg kunmap_atomic(pmap); 52657dacad5SJay Sternberg } 52757dacad5SJay Sternberg #else /* CONFIG_BLK_DEV_INTEGRITY */ 52857dacad5SJay Sternberg static void nvme_dif_remap(struct request *req, 52957dacad5SJay Sternberg void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) 53057dacad5SJay Sternberg { 53157dacad5SJay Sternberg } 53257dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) 53357dacad5SJay Sternberg { 53457dacad5SJay Sternberg } 53557dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) 53657dacad5SJay Sternberg { 53757dacad5SJay Sternberg } 53857dacad5SJay Sternberg #endif 53957dacad5SJay Sternberg 540b131c61dSChristoph Hellwig static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) 54157dacad5SJay Sternberg { 542f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 54357dacad5SJay Sternberg struct dma_pool *pool; 544b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 54557dacad5SJay Sternberg struct scatterlist *sg = iod->sg; 54657dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 54757dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 5485fd4ce1bSChristoph Hellwig u32 page_size = dev->ctrl.page_size; 54957dacad5SJay Sternberg int offset = dma_addr & (page_size - 1); 55057dacad5SJay Sternberg __le64 *prp_list; 551f4800d6dSChristoph Hellwig __le64 **list = iod_list(req); 55257dacad5SJay Sternberg dma_addr_t prp_dma; 55357dacad5SJay Sternberg int nprps, i; 55457dacad5SJay Sternberg 55557dacad5SJay Sternberg length -= (page_size - offset); 55657dacad5SJay Sternberg if (length <= 0) 55769d2b571SChristoph Hellwig return true; 55857dacad5SJay Sternberg 55957dacad5SJay Sternberg dma_len -= (page_size - offset); 56057dacad5SJay Sternberg if (dma_len) { 56157dacad5SJay Sternberg dma_addr += (page_size - offset); 56257dacad5SJay Sternberg } else { 56357dacad5SJay Sternberg sg = sg_next(sg); 56457dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 56557dacad5SJay Sternberg dma_len = sg_dma_len(sg); 56657dacad5SJay Sternberg } 56757dacad5SJay Sternberg 56857dacad5SJay Sternberg if (length <= page_size) { 56957dacad5SJay Sternberg iod->first_dma = dma_addr; 57069d2b571SChristoph Hellwig return true; 57157dacad5SJay Sternberg } 57257dacad5SJay Sternberg 57357dacad5SJay Sternberg nprps = DIV_ROUND_UP(length, page_size); 57457dacad5SJay Sternberg if (nprps <= (256 / 8)) { 57557dacad5SJay Sternberg pool = dev->prp_small_pool; 57657dacad5SJay Sternberg iod->npages = 0; 57757dacad5SJay Sternberg } else { 57857dacad5SJay Sternberg pool = dev->prp_page_pool; 57957dacad5SJay Sternberg iod->npages = 1; 58057dacad5SJay Sternberg } 58157dacad5SJay Sternberg 58269d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 58357dacad5SJay Sternberg if (!prp_list) { 58457dacad5SJay Sternberg iod->first_dma = dma_addr; 58557dacad5SJay Sternberg iod->npages = -1; 58669d2b571SChristoph Hellwig return false; 58757dacad5SJay Sternberg } 58857dacad5SJay Sternberg list[0] = prp_list; 58957dacad5SJay Sternberg iod->first_dma = prp_dma; 59057dacad5SJay Sternberg i = 0; 59157dacad5SJay Sternberg for (;;) { 59257dacad5SJay Sternberg if (i == page_size >> 3) { 59357dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 59469d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 59557dacad5SJay Sternberg if (!prp_list) 59669d2b571SChristoph Hellwig return false; 59757dacad5SJay Sternberg list[iod->npages++] = prp_list; 59857dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 59957dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 60057dacad5SJay Sternberg i = 1; 60157dacad5SJay Sternberg } 60257dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 60357dacad5SJay Sternberg dma_len -= page_size; 60457dacad5SJay Sternberg dma_addr += page_size; 60557dacad5SJay Sternberg length -= page_size; 60657dacad5SJay Sternberg if (length <= 0) 60757dacad5SJay Sternberg break; 60857dacad5SJay Sternberg if (dma_len > 0) 60957dacad5SJay Sternberg continue; 61057dacad5SJay Sternberg BUG_ON(dma_len < 0); 61157dacad5SJay Sternberg sg = sg_next(sg); 61257dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 61357dacad5SJay Sternberg dma_len = sg_dma_len(sg); 61457dacad5SJay Sternberg } 61557dacad5SJay Sternberg 61669d2b571SChristoph Hellwig return true; 61757dacad5SJay Sternberg } 61857dacad5SJay Sternberg 619f4800d6dSChristoph Hellwig static int nvme_map_data(struct nvme_dev *dev, struct request *req, 620b131c61dSChristoph Hellwig struct nvme_command *cmnd) 62157dacad5SJay Sternberg { 622f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 623ba1ca37eSChristoph Hellwig struct request_queue *q = req->q; 624ba1ca37eSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 625ba1ca37eSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 626ba1ca37eSChristoph Hellwig int ret = BLK_MQ_RQ_QUEUE_ERROR; 62757dacad5SJay Sternberg 628f9d03f96SChristoph Hellwig sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 629ba1ca37eSChristoph Hellwig iod->nents = blk_rq_map_sg(q, req, iod->sg); 630ba1ca37eSChristoph Hellwig if (!iod->nents) 631ba1ca37eSChristoph Hellwig goto out; 632ba1ca37eSChristoph Hellwig 633ba1ca37eSChristoph Hellwig ret = BLK_MQ_RQ_QUEUE_BUSY; 6342b6b535dSMauricio Faria de Oliveira if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, 6352b6b535dSMauricio Faria de Oliveira DMA_ATTR_NO_WARN)) 636ba1ca37eSChristoph Hellwig goto out; 637ba1ca37eSChristoph Hellwig 638b131c61dSChristoph Hellwig if (!nvme_setup_prps(dev, req)) 639ba1ca37eSChristoph Hellwig goto out_unmap; 640ba1ca37eSChristoph Hellwig 641ba1ca37eSChristoph Hellwig ret = BLK_MQ_RQ_QUEUE_ERROR; 642ba1ca37eSChristoph Hellwig if (blk_integrity_rq(req)) { 643ba1ca37eSChristoph Hellwig if (blk_rq_count_integrity_sg(q, req->bio) != 1) 644ba1ca37eSChristoph Hellwig goto out_unmap; 645ba1ca37eSChristoph Hellwig 646bf684057SChristoph Hellwig sg_init_table(&iod->meta_sg, 1); 647bf684057SChristoph Hellwig if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) 648ba1ca37eSChristoph Hellwig goto out_unmap; 649ba1ca37eSChristoph Hellwig 650ba1ca37eSChristoph Hellwig if (rq_data_dir(req)) 651ba1ca37eSChristoph Hellwig nvme_dif_remap(req, nvme_dif_prep); 652ba1ca37eSChristoph Hellwig 653bf684057SChristoph Hellwig if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) 654ba1ca37eSChristoph Hellwig goto out_unmap; 65557dacad5SJay Sternberg } 65657dacad5SJay Sternberg 657eb793e2cSChristoph Hellwig cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 658eb793e2cSChristoph Hellwig cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma); 659ba1ca37eSChristoph Hellwig if (blk_integrity_rq(req)) 660bf684057SChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); 661ba1ca37eSChristoph Hellwig return BLK_MQ_RQ_QUEUE_OK; 662ba1ca37eSChristoph Hellwig 663ba1ca37eSChristoph Hellwig out_unmap: 664ba1ca37eSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 665ba1ca37eSChristoph Hellwig out: 666ba1ca37eSChristoph Hellwig return ret; 66757dacad5SJay Sternberg } 66857dacad5SJay Sternberg 669f4800d6dSChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 670d4f6c3abSChristoph Hellwig { 671f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 672d4f6c3abSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 673d4f6c3abSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 674d4f6c3abSChristoph Hellwig 675d4f6c3abSChristoph Hellwig if (iod->nents) { 676d4f6c3abSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 677d4f6c3abSChristoph Hellwig if (blk_integrity_rq(req)) { 678d4f6c3abSChristoph Hellwig if (!rq_data_dir(req)) 679d4f6c3abSChristoph Hellwig nvme_dif_remap(req, nvme_dif_complete); 680bf684057SChristoph Hellwig dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); 681d4f6c3abSChristoph Hellwig } 682d4f6c3abSChristoph Hellwig } 683d4f6c3abSChristoph Hellwig 684f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 685f4800d6dSChristoph Hellwig nvme_free_iod(dev, req); 68657dacad5SJay Sternberg } 68757dacad5SJay Sternberg 68857dacad5SJay Sternberg /* 68957dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 69057dacad5SJay Sternberg */ 69157dacad5SJay Sternberg static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 69257dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 69357dacad5SJay Sternberg { 69457dacad5SJay Sternberg struct nvme_ns *ns = hctx->queue->queuedata; 69557dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 69657dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 69757dacad5SJay Sternberg struct request *req = bd->rq; 698ba1ca37eSChristoph Hellwig struct nvme_command cmnd; 699ba1ca37eSChristoph Hellwig int ret = BLK_MQ_RQ_QUEUE_OK; 70057dacad5SJay Sternberg 70157dacad5SJay Sternberg /* 70257dacad5SJay Sternberg * If formated with metadata, require the block layer provide a buffer 70357dacad5SJay Sternberg * unless this namespace is formated such that the metadata can be 70457dacad5SJay Sternberg * stripped/generated by the controller with PRACT=1. 70557dacad5SJay Sternberg */ 70657dacad5SJay Sternberg if (ns && ns->ms && !blk_integrity_rq(req)) { 70757dacad5SJay Sternberg if (!(ns->pi_type && ns->ms == 8) && 70857292b58SChristoph Hellwig !blk_rq_is_passthrough(req)) { 709eee417b0SChristoph Hellwig blk_mq_end_request(req, -EFAULT); 71057dacad5SJay Sternberg return BLK_MQ_RQ_QUEUE_OK; 71157dacad5SJay Sternberg } 71257dacad5SJay Sternberg } 71357dacad5SJay Sternberg 714f9d03f96SChristoph Hellwig ret = nvme_setup_cmd(ns, req, &cmnd); 715bac0000aSOmar Sandoval if (ret != BLK_MQ_RQ_QUEUE_OK) 716f4800d6dSChristoph Hellwig return ret; 71757dacad5SJay Sternberg 718b131c61dSChristoph Hellwig ret = nvme_init_iod(req, dev); 719bac0000aSOmar Sandoval if (ret != BLK_MQ_RQ_QUEUE_OK) 720f9d03f96SChristoph Hellwig goto out_free_cmd; 72157dacad5SJay Sternberg 722f9d03f96SChristoph Hellwig if (blk_rq_nr_phys_segments(req)) 723b131c61dSChristoph Hellwig ret = nvme_map_data(dev, req, &cmnd); 724ba1ca37eSChristoph Hellwig 725bac0000aSOmar Sandoval if (ret != BLK_MQ_RQ_QUEUE_OK) 726f9d03f96SChristoph Hellwig goto out_cleanup_iod; 727ba1ca37eSChristoph Hellwig 728aae239e1SChristoph Hellwig blk_mq_start_request(req); 729ba1ca37eSChristoph Hellwig 730ba1ca37eSChristoph Hellwig spin_lock_irq(&nvmeq->q_lock); 731ae1fba20SKeith Busch if (unlikely(nvmeq->cq_vector < 0)) { 73269d9a99cSKeith Busch ret = BLK_MQ_RQ_QUEUE_ERROR; 733ae1fba20SKeith Busch spin_unlock_irq(&nvmeq->q_lock); 734f9d03f96SChristoph Hellwig goto out_cleanup_iod; 735ae1fba20SKeith Busch } 736ba1ca37eSChristoph Hellwig __nvme_submit_cmd(nvmeq, &cmnd); 73757dacad5SJay Sternberg nvme_process_cq(nvmeq); 73857dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 73957dacad5SJay Sternberg return BLK_MQ_RQ_QUEUE_OK; 740f9d03f96SChristoph Hellwig out_cleanup_iod: 741f4800d6dSChristoph Hellwig nvme_free_iod(dev, req); 742f9d03f96SChristoph Hellwig out_free_cmd: 743f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 744ba1ca37eSChristoph Hellwig return ret; 74557dacad5SJay Sternberg } 74657dacad5SJay Sternberg 74777f02a7aSChristoph Hellwig static void nvme_pci_complete_rq(struct request *req) 748eee417b0SChristoph Hellwig { 749f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 750eee417b0SChristoph Hellwig 75177f02a7aSChristoph Hellwig nvme_unmap_data(iod->nvmeq->dev, req); 75277f02a7aSChristoph Hellwig nvme_complete_rq(req); 75357dacad5SJay Sternberg } 75457dacad5SJay Sternberg 755d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 756d783e0bdSMarta Rybczynska static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head, 757d783e0bdSMarta Rybczynska u16 phase) 758d783e0bdSMarta Rybczynska { 759d783e0bdSMarta Rybczynska return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase; 760d783e0bdSMarta Rybczynska } 761d783e0bdSMarta Rybczynska 762a0fa9647SJens Axboe static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) 76357dacad5SJay Sternberg { 76457dacad5SJay Sternberg u16 head, phase; 76557dacad5SJay Sternberg 76657dacad5SJay Sternberg head = nvmeq->cq_head; 76757dacad5SJay Sternberg phase = nvmeq->cq_phase; 76857dacad5SJay Sternberg 769d783e0bdSMarta Rybczynska while (nvme_cqe_valid(nvmeq, head, phase)) { 77057dacad5SJay Sternberg struct nvme_completion cqe = nvmeq->cqes[head]; 771eee417b0SChristoph Hellwig struct request *req; 772adf68f21SChristoph Hellwig 77357dacad5SJay Sternberg if (++head == nvmeq->q_depth) { 77457dacad5SJay Sternberg head = 0; 77557dacad5SJay Sternberg phase = !phase; 77657dacad5SJay Sternberg } 777adf68f21SChristoph Hellwig 778a0fa9647SJens Axboe if (tag && *tag == cqe.command_id) 779a0fa9647SJens Axboe *tag = -1; 780adf68f21SChristoph Hellwig 781aae239e1SChristoph Hellwig if (unlikely(cqe.command_id >= nvmeq->q_depth)) { 7821b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 783aae239e1SChristoph Hellwig "invalid id %d completed on queue %d\n", 784aae239e1SChristoph Hellwig cqe.command_id, le16_to_cpu(cqe.sq_id)); 785aae239e1SChristoph Hellwig continue; 786aae239e1SChristoph Hellwig } 787aae239e1SChristoph Hellwig 788adf68f21SChristoph Hellwig /* 789adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 790adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 791adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 792adf68f21SChristoph Hellwig * for them but rather special case them here. 793adf68f21SChristoph Hellwig */ 794adf68f21SChristoph Hellwig if (unlikely(nvmeq->qid == 0 && 795adf68f21SChristoph Hellwig cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) { 7967bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 7977bf58533SChristoph Hellwig cqe.status, &cqe.result); 798adf68f21SChristoph Hellwig continue; 799adf68f21SChristoph Hellwig } 800adf68f21SChristoph Hellwig 801eee417b0SChristoph Hellwig req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); 80227fa9bc5SChristoph Hellwig nvme_end_request(req, cqe.status, cqe.result); 80357dacad5SJay Sternberg } 80457dacad5SJay Sternberg 80557dacad5SJay Sternberg if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 806a0fa9647SJens Axboe return; 80757dacad5SJay Sternberg 808604e8c8dSKeith Busch if (likely(nvmeq->cq_vector >= 0)) 809f9f38e33SHelen Koike if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 810f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei)) 81157dacad5SJay Sternberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 81257dacad5SJay Sternberg nvmeq->cq_head = head; 81357dacad5SJay Sternberg nvmeq->cq_phase = phase; 81457dacad5SJay Sternberg 81557dacad5SJay Sternberg nvmeq->cqe_seen = 1; 816a0fa9647SJens Axboe } 817a0fa9647SJens Axboe 818a0fa9647SJens Axboe static void nvme_process_cq(struct nvme_queue *nvmeq) 819a0fa9647SJens Axboe { 820a0fa9647SJens Axboe __nvme_process_cq(nvmeq, NULL); 82157dacad5SJay Sternberg } 82257dacad5SJay Sternberg 82357dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 82457dacad5SJay Sternberg { 82557dacad5SJay Sternberg irqreturn_t result; 82657dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 82757dacad5SJay Sternberg spin_lock(&nvmeq->q_lock); 82857dacad5SJay Sternberg nvme_process_cq(nvmeq); 82957dacad5SJay Sternberg result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE; 83057dacad5SJay Sternberg nvmeq->cqe_seen = 0; 83157dacad5SJay Sternberg spin_unlock(&nvmeq->q_lock); 83257dacad5SJay Sternberg return result; 83357dacad5SJay Sternberg } 83457dacad5SJay Sternberg 83557dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 83657dacad5SJay Sternberg { 83757dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 838d783e0bdSMarta Rybczynska if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) 83957dacad5SJay Sternberg return IRQ_WAKE_THREAD; 840d783e0bdSMarta Rybczynska return IRQ_NONE; 84157dacad5SJay Sternberg } 84257dacad5SJay Sternberg 8437776db1cSKeith Busch static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag) 844a0fa9647SJens Axboe { 845d783e0bdSMarta Rybczynska if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) { 846a0fa9647SJens Axboe spin_lock_irq(&nvmeq->q_lock); 847a0fa9647SJens Axboe __nvme_process_cq(nvmeq, &tag); 848a0fa9647SJens Axboe spin_unlock_irq(&nvmeq->q_lock); 849a0fa9647SJens Axboe 850a0fa9647SJens Axboe if (tag == -1) 851a0fa9647SJens Axboe return 1; 852a0fa9647SJens Axboe } 853a0fa9647SJens Axboe 854a0fa9647SJens Axboe return 0; 855a0fa9647SJens Axboe } 856a0fa9647SJens Axboe 8577776db1cSKeith Busch static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) 8587776db1cSKeith Busch { 8597776db1cSKeith Busch struct nvme_queue *nvmeq = hctx->driver_data; 8607776db1cSKeith Busch 8617776db1cSKeith Busch return __nvme_poll(nvmeq, tag); 8627776db1cSKeith Busch } 8637776db1cSKeith Busch 864f866fc42SChristoph Hellwig static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) 86557dacad5SJay Sternberg { 866f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 8679396dec9SChristoph Hellwig struct nvme_queue *nvmeq = dev->queues[0]; 86857dacad5SJay Sternberg struct nvme_command c; 86957dacad5SJay Sternberg 87057dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 87157dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 872f866fc42SChristoph Hellwig c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx; 87357dacad5SJay Sternberg 8749396dec9SChristoph Hellwig spin_lock_irq(&nvmeq->q_lock); 8759396dec9SChristoph Hellwig __nvme_submit_cmd(nvmeq, &c); 8769396dec9SChristoph Hellwig spin_unlock_irq(&nvmeq->q_lock); 87757dacad5SJay Sternberg } 87857dacad5SJay Sternberg 87957dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 88057dacad5SJay Sternberg { 88157dacad5SJay Sternberg struct nvme_command c; 88257dacad5SJay Sternberg 88357dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 88457dacad5SJay Sternberg c.delete_queue.opcode = opcode; 88557dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 88657dacad5SJay Sternberg 8871c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 88857dacad5SJay Sternberg } 88957dacad5SJay Sternberg 89057dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 89157dacad5SJay Sternberg struct nvme_queue *nvmeq) 89257dacad5SJay Sternberg { 89357dacad5SJay Sternberg struct nvme_command c; 89457dacad5SJay Sternberg int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 89557dacad5SJay Sternberg 89657dacad5SJay Sternberg /* 89757dacad5SJay Sternberg * Note: we (ab)use the fact the the prp fields survive if no data 89857dacad5SJay Sternberg * is attached to the request. 89957dacad5SJay Sternberg */ 90057dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 90157dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 90257dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 90357dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 90457dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 90557dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 90657dacad5SJay Sternberg c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 90757dacad5SJay Sternberg 9081c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 90957dacad5SJay Sternberg } 91057dacad5SJay Sternberg 91157dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 91257dacad5SJay Sternberg struct nvme_queue *nvmeq) 91357dacad5SJay Sternberg { 91457dacad5SJay Sternberg struct nvme_command c; 91581c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 91657dacad5SJay Sternberg 91757dacad5SJay Sternberg /* 91857dacad5SJay Sternberg * Note: we (ab)use the fact the the prp fields survive if no data 91957dacad5SJay Sternberg * is attached to the request. 92057dacad5SJay Sternberg */ 92157dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 92257dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 92357dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 92457dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 92557dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 92657dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 92757dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 92857dacad5SJay Sternberg 9291c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 93057dacad5SJay Sternberg } 93157dacad5SJay Sternberg 93257dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 93357dacad5SJay Sternberg { 93457dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 93557dacad5SJay Sternberg } 93657dacad5SJay Sternberg 93757dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 93857dacad5SJay Sternberg { 93957dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 94057dacad5SJay Sternberg } 94157dacad5SJay Sternberg 942e7a2a87dSChristoph Hellwig static void abort_endio(struct request *req, int error) 94357dacad5SJay Sternberg { 944f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 945f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 94657dacad5SJay Sternberg 94727fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 94827fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 949e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 950e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 95157dacad5SJay Sternberg } 95257dacad5SJay Sternberg 95331c7c7d2SChristoph Hellwig static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 95457dacad5SJay Sternberg { 955f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 956f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 95757dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 95857dacad5SJay Sternberg struct request *abort_req; 95957dacad5SJay Sternberg struct nvme_command cmd; 96057dacad5SJay Sternberg 96131c7c7d2SChristoph Hellwig /* 9627776db1cSKeith Busch * Did we miss an interrupt? 9637776db1cSKeith Busch */ 9647776db1cSKeith Busch if (__nvme_poll(nvmeq, req->tag)) { 9657776db1cSKeith Busch dev_warn(dev->ctrl.device, 9667776db1cSKeith Busch "I/O %d QID %d timeout, completion polled\n", 9677776db1cSKeith Busch req->tag, nvmeq->qid); 9687776db1cSKeith Busch return BLK_EH_HANDLED; 9697776db1cSKeith Busch } 9707776db1cSKeith Busch 9717776db1cSKeith Busch /* 972fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 973fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 974fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 975fd634f41SChristoph Hellwig * shutdown, so we return BLK_EH_HANDLED. 976fd634f41SChristoph Hellwig */ 977bb8d261eSChristoph Hellwig if (dev->ctrl.state == NVME_CTRL_RESETTING) { 9781b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 979fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 980fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 981a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 98227fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 983fd634f41SChristoph Hellwig return BLK_EH_HANDLED; 984fd634f41SChristoph Hellwig } 985fd634f41SChristoph Hellwig 986fd634f41SChristoph Hellwig /* 987e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 988e1569a16SKeith Busch * command was already aborted once before and still hasn't been 989e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 99031c7c7d2SChristoph Hellwig */ 991f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 9921b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 99357dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 99457dacad5SJay Sternberg req->tag, nvmeq->qid); 995a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 996c5f6ce97SKeith Busch nvme_reset(dev); 997e1569a16SKeith Busch 998e1569a16SKeith Busch /* 999e1569a16SKeith Busch * Mark the request as handled, since the inline shutdown 1000e1569a16SKeith Busch * forces all outstanding requests to complete. 1001e1569a16SKeith Busch */ 100227fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1003e1569a16SKeith Busch return BLK_EH_HANDLED; 100457dacad5SJay Sternberg } 100557dacad5SJay Sternberg 1006e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1007e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 1008e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1009e7a2a87dSChristoph Hellwig } 10107bf7d778SKeith Busch iod->aborted = 1; 101157dacad5SJay Sternberg 101257dacad5SJay Sternberg memset(&cmd, 0, sizeof(cmd)); 101357dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 101457dacad5SJay Sternberg cmd.abort.cid = req->tag; 101557dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 101657dacad5SJay Sternberg 10171b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 10181b3c47c1SSagi Grimberg "I/O %d QID %d timeout, aborting\n", 101957dacad5SJay Sternberg req->tag, nvmeq->qid); 1020e7a2a87dSChristoph Hellwig 1021e7a2a87dSChristoph Hellwig abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, 1022eb71f435SChristoph Hellwig BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 10236bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 10246bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 102531c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 102657dacad5SJay Sternberg } 102757dacad5SJay Sternberg 1028e7a2a87dSChristoph Hellwig abort_req->timeout = ADMIN_TIMEOUT; 1029e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 1030e7a2a87dSChristoph Hellwig blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); 103157dacad5SJay Sternberg 103257dacad5SJay Sternberg /* 103357dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 103457dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 103557dacad5SJay Sternberg * as the device then is in a faulty state. 103657dacad5SJay Sternberg */ 103757dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 103857dacad5SJay Sternberg } 103957dacad5SJay Sternberg 104057dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 104157dacad5SJay Sternberg { 104257dacad5SJay Sternberg dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 104357dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 104457dacad5SJay Sternberg if (nvmeq->sq_cmds) 104557dacad5SJay Sternberg dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 104657dacad5SJay Sternberg nvmeq->sq_cmds, nvmeq->sq_dma_addr); 104757dacad5SJay Sternberg kfree(nvmeq); 104857dacad5SJay Sternberg } 104957dacad5SJay Sternberg 105057dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 105157dacad5SJay Sternberg { 105257dacad5SJay Sternberg int i; 105357dacad5SJay Sternberg 105457dacad5SJay Sternberg for (i = dev->queue_count - 1; i >= lowest; i--) { 105557dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[i]; 105657dacad5SJay Sternberg dev->queue_count--; 105757dacad5SJay Sternberg dev->queues[i] = NULL; 105857dacad5SJay Sternberg nvme_free_queue(nvmeq); 105957dacad5SJay Sternberg } 106057dacad5SJay Sternberg } 106157dacad5SJay Sternberg 106257dacad5SJay Sternberg /** 106357dacad5SJay Sternberg * nvme_suspend_queue - put queue into suspended state 106457dacad5SJay Sternberg * @nvmeq - queue to suspend 106557dacad5SJay Sternberg */ 106657dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq) 106757dacad5SJay Sternberg { 106857dacad5SJay Sternberg int vector; 106957dacad5SJay Sternberg 107057dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 107157dacad5SJay Sternberg if (nvmeq->cq_vector == -1) { 107257dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 107357dacad5SJay Sternberg return 1; 107457dacad5SJay Sternberg } 10750ff199cbSChristoph Hellwig vector = nvmeq->cq_vector; 107657dacad5SJay Sternberg nvmeq->dev->online_queues--; 107757dacad5SJay Sternberg nvmeq->cq_vector = -1; 107857dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 107957dacad5SJay Sternberg 10801c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 108125646264SKeith Busch blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q); 108257dacad5SJay Sternberg 10830ff199cbSChristoph Hellwig pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq); 108457dacad5SJay Sternberg 108557dacad5SJay Sternberg return 0; 108657dacad5SJay Sternberg } 108757dacad5SJay Sternberg 1088a5cdb68cSKeith Busch static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 108957dacad5SJay Sternberg { 1090a5cdb68cSKeith Busch struct nvme_queue *nvmeq = dev->queues[0]; 109157dacad5SJay Sternberg 109257dacad5SJay Sternberg if (!nvmeq) 109357dacad5SJay Sternberg return; 109457dacad5SJay Sternberg if (nvme_suspend_queue(nvmeq)) 109557dacad5SJay Sternberg return; 109657dacad5SJay Sternberg 1097a5cdb68cSKeith Busch if (shutdown) 1098a5cdb68cSKeith Busch nvme_shutdown_ctrl(&dev->ctrl); 1099a5cdb68cSKeith Busch else 1100a5cdb68cSKeith Busch nvme_disable_ctrl(&dev->ctrl, lo_hi_readq( 1101a5cdb68cSKeith Busch dev->bar + NVME_REG_CAP)); 110257dacad5SJay Sternberg 110357dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 110457dacad5SJay Sternberg nvme_process_cq(nvmeq); 110557dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 110657dacad5SJay Sternberg } 110757dacad5SJay Sternberg 110857dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 110957dacad5SJay Sternberg int entry_size) 111057dacad5SJay Sternberg { 111157dacad5SJay Sternberg int q_depth = dev->q_depth; 11125fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 11135fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 111457dacad5SJay Sternberg 111557dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 111657dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 11175fd4ce1bSChristoph Hellwig mem_per_q = round_down(mem_per_q, dev->ctrl.page_size); 111857dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 111957dacad5SJay Sternberg 112057dacad5SJay Sternberg /* 112157dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 112257dacad5SJay Sternberg * would be better to map queues in system memory with the 112357dacad5SJay Sternberg * original depth 112457dacad5SJay Sternberg */ 112557dacad5SJay Sternberg if (q_depth < 64) 112657dacad5SJay Sternberg return -ENOMEM; 112757dacad5SJay Sternberg } 112857dacad5SJay Sternberg 112957dacad5SJay Sternberg return q_depth; 113057dacad5SJay Sternberg } 113157dacad5SJay Sternberg 113257dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 113357dacad5SJay Sternberg int qid, int depth) 113457dacad5SJay Sternberg { 113557dacad5SJay Sternberg if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { 11365fd4ce1bSChristoph Hellwig unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 11375fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 113857dacad5SJay Sternberg nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; 113957dacad5SJay Sternberg nvmeq->sq_cmds_io = dev->cmb + offset; 114057dacad5SJay Sternberg } else { 114157dacad5SJay Sternberg nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 114257dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 114357dacad5SJay Sternberg if (!nvmeq->sq_cmds) 114457dacad5SJay Sternberg return -ENOMEM; 114557dacad5SJay Sternberg } 114657dacad5SJay Sternberg 114757dacad5SJay Sternberg return 0; 114857dacad5SJay Sternberg } 114957dacad5SJay Sternberg 115057dacad5SJay Sternberg static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1151d3af3ecdSShaohua Li int depth, int node) 115257dacad5SJay Sternberg { 1153d3af3ecdSShaohua Li struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL, 1154d3af3ecdSShaohua Li node); 115557dacad5SJay Sternberg if (!nvmeq) 115657dacad5SJay Sternberg return NULL; 115757dacad5SJay Sternberg 115857dacad5SJay Sternberg nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 115957dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 116057dacad5SJay Sternberg if (!nvmeq->cqes) 116157dacad5SJay Sternberg goto free_nvmeq; 116257dacad5SJay Sternberg 116357dacad5SJay Sternberg if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) 116457dacad5SJay Sternberg goto free_cqdma; 116557dacad5SJay Sternberg 116657dacad5SJay Sternberg nvmeq->q_dmadev = dev->dev; 116757dacad5SJay Sternberg nvmeq->dev = dev; 116857dacad5SJay Sternberg spin_lock_init(&nvmeq->q_lock); 116957dacad5SJay Sternberg nvmeq->cq_head = 0; 117057dacad5SJay Sternberg nvmeq->cq_phase = 1; 117157dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 117257dacad5SJay Sternberg nvmeq->q_depth = depth; 117357dacad5SJay Sternberg nvmeq->qid = qid; 117457dacad5SJay Sternberg nvmeq->cq_vector = -1; 117557dacad5SJay Sternberg dev->queues[qid] = nvmeq; 117657dacad5SJay Sternberg dev->queue_count++; 117757dacad5SJay Sternberg 117857dacad5SJay Sternberg return nvmeq; 117957dacad5SJay Sternberg 118057dacad5SJay Sternberg free_cqdma: 118157dacad5SJay Sternberg dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, 118257dacad5SJay Sternberg nvmeq->cq_dma_addr); 118357dacad5SJay Sternberg free_nvmeq: 118457dacad5SJay Sternberg kfree(nvmeq); 118557dacad5SJay Sternberg return NULL; 118657dacad5SJay Sternberg } 118757dacad5SJay Sternberg 1188dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 118957dacad5SJay Sternberg { 11900ff199cbSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 11910ff199cbSChristoph Hellwig int nr = nvmeq->dev->ctrl.instance; 11920ff199cbSChristoph Hellwig 11930ff199cbSChristoph Hellwig if (use_threaded_interrupts) { 11940ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 11950ff199cbSChristoph Hellwig nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 11960ff199cbSChristoph Hellwig } else { 11970ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 11980ff199cbSChristoph Hellwig NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 11990ff199cbSChristoph Hellwig } 120057dacad5SJay Sternberg } 120157dacad5SJay Sternberg 120257dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 120357dacad5SJay Sternberg { 120457dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 120557dacad5SJay Sternberg 120657dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 120757dacad5SJay Sternberg nvmeq->sq_tail = 0; 120857dacad5SJay Sternberg nvmeq->cq_head = 0; 120957dacad5SJay Sternberg nvmeq->cq_phase = 1; 121057dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 121157dacad5SJay Sternberg memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 1212f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 121357dacad5SJay Sternberg dev->online_queues++; 121457dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 121557dacad5SJay Sternberg } 121657dacad5SJay Sternberg 121757dacad5SJay Sternberg static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) 121857dacad5SJay Sternberg { 121957dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 122057dacad5SJay Sternberg int result; 122157dacad5SJay Sternberg 122257dacad5SJay Sternberg nvmeq->cq_vector = qid - 1; 122357dacad5SJay Sternberg result = adapter_alloc_cq(dev, qid, nvmeq); 122457dacad5SJay Sternberg if (result < 0) 122557dacad5SJay Sternberg return result; 122657dacad5SJay Sternberg 122757dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 122857dacad5SJay Sternberg if (result < 0) 122957dacad5SJay Sternberg goto release_cq; 123057dacad5SJay Sternberg 1231dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 123257dacad5SJay Sternberg if (result < 0) 123357dacad5SJay Sternberg goto release_sq; 123457dacad5SJay Sternberg 123557dacad5SJay Sternberg nvme_init_queue(nvmeq, qid); 123657dacad5SJay Sternberg return result; 123757dacad5SJay Sternberg 123857dacad5SJay Sternberg release_sq: 123957dacad5SJay Sternberg adapter_delete_sq(dev, qid); 124057dacad5SJay Sternberg release_cq: 124157dacad5SJay Sternberg adapter_delete_cq(dev, qid); 124257dacad5SJay Sternberg return result; 124357dacad5SJay Sternberg } 124457dacad5SJay Sternberg 1245f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 124657dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 124777f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 124857dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 124957dacad5SJay Sternberg .exit_hctx = nvme_admin_exit_hctx, 125057dacad5SJay Sternberg .init_request = nvme_admin_init_request, 125157dacad5SJay Sternberg .timeout = nvme_timeout, 125257dacad5SJay Sternberg }; 125357dacad5SJay Sternberg 1254f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 125557dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 125677f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 125757dacad5SJay Sternberg .init_hctx = nvme_init_hctx, 125857dacad5SJay Sternberg .init_request = nvme_init_request, 1259dca51e78SChristoph Hellwig .map_queues = nvme_pci_map_queues, 126057dacad5SJay Sternberg .timeout = nvme_timeout, 1261a0fa9647SJens Axboe .poll = nvme_poll, 126257dacad5SJay Sternberg }; 126357dacad5SJay Sternberg 126457dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 126557dacad5SJay Sternberg { 12661c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 126769d9a99cSKeith Busch /* 126869d9a99cSKeith Busch * If the controller was reset during removal, it's possible 126969d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 127069d9a99cSKeith Busch * queue to flush these to completion. 127169d9a99cSKeith Busch */ 127269d9a99cSKeith Busch blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true); 12731c63dc66SChristoph Hellwig blk_cleanup_queue(dev->ctrl.admin_q); 127457dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 127557dacad5SJay Sternberg } 127657dacad5SJay Sternberg } 127757dacad5SJay Sternberg 127857dacad5SJay Sternberg static int nvme_alloc_admin_tags(struct nvme_dev *dev) 127957dacad5SJay Sternberg { 12801c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q) { 128157dacad5SJay Sternberg dev->admin_tagset.ops = &nvme_mq_admin_ops; 128257dacad5SJay Sternberg dev->admin_tagset.nr_hw_queues = 1; 1283e3e9d50cSKeith Busch 1284e3e9d50cSKeith Busch /* 1285e3e9d50cSKeith Busch * Subtract one to leave an empty queue entry for 'Full Queue' 1286e3e9d50cSKeith Busch * condition. See NVM-Express 1.2 specification, section 4.1.2. 1287e3e9d50cSKeith Busch */ 1288e3e9d50cSKeith Busch dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1; 128957dacad5SJay Sternberg dev->admin_tagset.timeout = ADMIN_TIMEOUT; 129057dacad5SJay Sternberg dev->admin_tagset.numa_node = dev_to_node(dev->dev); 129157dacad5SJay Sternberg dev->admin_tagset.cmd_size = nvme_cmd_size(dev); 1292d3484991SJens Axboe dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; 129357dacad5SJay Sternberg dev->admin_tagset.driver_data = dev; 129457dacad5SJay Sternberg 129557dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 129657dacad5SJay Sternberg return -ENOMEM; 129757dacad5SJay Sternberg 12981c63dc66SChristoph Hellwig dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 12991c63dc66SChristoph Hellwig if (IS_ERR(dev->ctrl.admin_q)) { 130057dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 130157dacad5SJay Sternberg return -ENOMEM; 130257dacad5SJay Sternberg } 13031c63dc66SChristoph Hellwig if (!blk_get_queue(dev->ctrl.admin_q)) { 130457dacad5SJay Sternberg nvme_dev_remove_admin(dev); 13051c63dc66SChristoph Hellwig dev->ctrl.admin_q = NULL; 130657dacad5SJay Sternberg return -ENODEV; 130757dacad5SJay Sternberg } 130857dacad5SJay Sternberg } else 130925646264SKeith Busch blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true); 131057dacad5SJay Sternberg 131157dacad5SJay Sternberg return 0; 131257dacad5SJay Sternberg } 131357dacad5SJay Sternberg 131457dacad5SJay Sternberg static int nvme_configure_admin_queue(struct nvme_dev *dev) 131557dacad5SJay Sternberg { 131657dacad5SJay Sternberg int result; 131757dacad5SJay Sternberg u32 aqa; 13187a67cbeaSChristoph Hellwig u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 131957dacad5SJay Sternberg struct nvme_queue *nvmeq; 132057dacad5SJay Sternberg 13218ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 132257dacad5SJay Sternberg NVME_CAP_NSSRC(cap) : 0; 132357dacad5SJay Sternberg 13247a67cbeaSChristoph Hellwig if (dev->subsystem && 13257a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 13267a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 132757dacad5SJay Sternberg 13285fd4ce1bSChristoph Hellwig result = nvme_disable_ctrl(&dev->ctrl, cap); 132957dacad5SJay Sternberg if (result < 0) 133057dacad5SJay Sternberg return result; 133157dacad5SJay Sternberg 133257dacad5SJay Sternberg nvmeq = dev->queues[0]; 133357dacad5SJay Sternberg if (!nvmeq) { 1334d3af3ecdSShaohua Li nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 1335d3af3ecdSShaohua Li dev_to_node(dev->dev)); 133657dacad5SJay Sternberg if (!nvmeq) 133757dacad5SJay Sternberg return -ENOMEM; 133857dacad5SJay Sternberg } 133957dacad5SJay Sternberg 134057dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 134157dacad5SJay Sternberg aqa |= aqa << 16; 134257dacad5SJay Sternberg 13437a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 13447a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 13457a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 134657dacad5SJay Sternberg 13475fd4ce1bSChristoph Hellwig result = nvme_enable_ctrl(&dev->ctrl, cap); 134857dacad5SJay Sternberg if (result) 1349d4875622SKeith Busch return result; 135057dacad5SJay Sternberg 135157dacad5SJay Sternberg nvmeq->cq_vector = 0; 1352dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 135357dacad5SJay Sternberg if (result) { 135457dacad5SJay Sternberg nvmeq->cq_vector = -1; 1355d4875622SKeith Busch return result; 135657dacad5SJay Sternberg } 135757dacad5SJay Sternberg 135857dacad5SJay Sternberg return result; 135957dacad5SJay Sternberg } 136057dacad5SJay Sternberg 1361c875a709SGuilherme G. Piccoli static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1362c875a709SGuilherme G. Piccoli { 1363c875a709SGuilherme G. Piccoli 1364c875a709SGuilherme G. Piccoli /* If true, indicates loss of adapter communication, possibly by a 1365c875a709SGuilherme G. Piccoli * NVMe Subsystem reset. 1366c875a709SGuilherme G. Piccoli */ 1367c875a709SGuilherme G. Piccoli bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1368c875a709SGuilherme G. Piccoli 1369c875a709SGuilherme G. Piccoli /* If there is a reset ongoing, we shouldn't reset again. */ 1370c875a709SGuilherme G. Piccoli if (work_busy(&dev->reset_work)) 1371c875a709SGuilherme G. Piccoli return false; 1372c875a709SGuilherme G. Piccoli 1373c875a709SGuilherme G. Piccoli /* We shouldn't reset unless the controller is on fatal error state 1374c875a709SGuilherme G. Piccoli * _or_ if we lost the communication with it. 1375c875a709SGuilherme G. Piccoli */ 1376c875a709SGuilherme G. Piccoli if (!(csts & NVME_CSTS_CFS) && !nssro) 1377c875a709SGuilherme G. Piccoli return false; 1378c875a709SGuilherme G. Piccoli 1379c875a709SGuilherme G. Piccoli /* If PCI error recovery process is happening, we cannot reset or 1380c875a709SGuilherme G. Piccoli * the recovery mechanism will surely fail. 1381c875a709SGuilherme G. Piccoli */ 1382c875a709SGuilherme G. Piccoli if (pci_channel_offline(to_pci_dev(dev->dev))) 1383c875a709SGuilherme G. Piccoli return false; 1384c875a709SGuilherme G. Piccoli 1385c875a709SGuilherme G. Piccoli return true; 1386c875a709SGuilherme G. Piccoli } 1387c875a709SGuilherme G. Piccoli 1388d2a61918SAndy Lutomirski static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1389d2a61918SAndy Lutomirski { 1390d2a61918SAndy Lutomirski /* Read a config register to help see what died. */ 1391d2a61918SAndy Lutomirski u16 pci_status; 1392d2a61918SAndy Lutomirski int result; 1393d2a61918SAndy Lutomirski 1394d2a61918SAndy Lutomirski result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1395d2a61918SAndy Lutomirski &pci_status); 1396d2a61918SAndy Lutomirski if (result == PCIBIOS_SUCCESSFUL) 1397d2a61918SAndy Lutomirski dev_warn(dev->dev, 1398d2a61918SAndy Lutomirski "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1399d2a61918SAndy Lutomirski csts, pci_status); 1400d2a61918SAndy Lutomirski else 1401d2a61918SAndy Lutomirski dev_warn(dev->dev, 1402d2a61918SAndy Lutomirski "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1403d2a61918SAndy Lutomirski csts, result); 1404d2a61918SAndy Lutomirski } 1405d2a61918SAndy Lutomirski 14062d55cd5fSChristoph Hellwig static void nvme_watchdog_timer(unsigned long data) 140757dacad5SJay Sternberg { 14082d55cd5fSChristoph Hellwig struct nvme_dev *dev = (struct nvme_dev *)data; 14097a67cbeaSChristoph Hellwig u32 csts = readl(dev->bar + NVME_REG_CSTS); 141057dacad5SJay Sternberg 1411c875a709SGuilherme G. Piccoli /* Skip controllers under certain specific conditions. */ 1412c875a709SGuilherme G. Piccoli if (nvme_should_reset(dev, csts)) { 1413c5f6ce97SKeith Busch if (!nvme_reset(dev)) 1414d2a61918SAndy Lutomirski nvme_warn_reset(dev, csts); 14152d55cd5fSChristoph Hellwig return; 141657dacad5SJay Sternberg } 141757dacad5SJay Sternberg 14182d55cd5fSChristoph Hellwig mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ)); 141957dacad5SJay Sternberg } 142057dacad5SJay Sternberg 1421749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 142257dacad5SJay Sternberg { 1423949928c1SKeith Busch unsigned i, max; 1424749941f2SChristoph Hellwig int ret = 0; 142557dacad5SJay Sternberg 1426749941f2SChristoph Hellwig for (i = dev->queue_count; i <= dev->max_qid; i++) { 1427d3af3ecdSShaohua Li /* vector == qid - 1, match nvme_create_queue */ 1428d3af3ecdSShaohua Li if (!nvme_alloc_queue(dev, i, dev->q_depth, 1429d3af3ecdSShaohua Li pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { 1430749941f2SChristoph Hellwig ret = -ENOMEM; 143157dacad5SJay Sternberg break; 1432749941f2SChristoph Hellwig } 1433749941f2SChristoph Hellwig } 143457dacad5SJay Sternberg 1435949928c1SKeith Busch max = min(dev->max_qid, dev->queue_count - 1); 1436949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 1437749941f2SChristoph Hellwig ret = nvme_create_queue(dev->queues[i], i); 1438d4875622SKeith Busch if (ret) 143957dacad5SJay Sternberg break; 144057dacad5SJay Sternberg } 144157dacad5SJay Sternberg 1442749941f2SChristoph Hellwig /* 1443749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 1444749941f2SChristoph Hellwig * than the desired aount of queues, and even a controller without 1445749941f2SChristoph Hellwig * I/O queues an still be used to issue admin commands. This might 1446749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1447749941f2SChristoph Hellwig */ 1448749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 144957dacad5SJay Sternberg } 145057dacad5SJay Sternberg 1451202021c1SStephen Bates static ssize_t nvme_cmb_show(struct device *dev, 1452202021c1SStephen Bates struct device_attribute *attr, 1453202021c1SStephen Bates char *buf) 1454202021c1SStephen Bates { 1455202021c1SStephen Bates struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 1456202021c1SStephen Bates 1457c965809cSStephen Bates return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", 1458202021c1SStephen Bates ndev->cmbloc, ndev->cmbsz); 1459202021c1SStephen Bates } 1460202021c1SStephen Bates static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); 1461202021c1SStephen Bates 146257dacad5SJay Sternberg static void __iomem *nvme_map_cmb(struct nvme_dev *dev) 146357dacad5SJay Sternberg { 146457dacad5SJay Sternberg u64 szu, size, offset; 146557dacad5SJay Sternberg resource_size_t bar_size; 146657dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 146757dacad5SJay Sternberg void __iomem *cmb; 146857dacad5SJay Sternberg dma_addr_t dma_addr; 146957dacad5SJay Sternberg 14707a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 147157dacad5SJay Sternberg if (!(NVME_CMB_SZ(dev->cmbsz))) 147257dacad5SJay Sternberg return NULL; 1473202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 147457dacad5SJay Sternberg 1475202021c1SStephen Bates if (!use_cmb_sqes) 1476202021c1SStephen Bates return NULL; 147757dacad5SJay Sternberg 147857dacad5SJay Sternberg szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 147957dacad5SJay Sternberg size = szu * NVME_CMB_SZ(dev->cmbsz); 1480202021c1SStephen Bates offset = szu * NVME_CMB_OFST(dev->cmbloc); 1481202021c1SStephen Bates bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); 148257dacad5SJay Sternberg 148357dacad5SJay Sternberg if (offset > bar_size) 148457dacad5SJay Sternberg return NULL; 148557dacad5SJay Sternberg 148657dacad5SJay Sternberg /* 148757dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 148857dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 148957dacad5SJay Sternberg * the reported size of the BAR 149057dacad5SJay Sternberg */ 149157dacad5SJay Sternberg if (size > bar_size - offset) 149257dacad5SJay Sternberg size = bar_size - offset; 149357dacad5SJay Sternberg 1494202021c1SStephen Bates dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; 149557dacad5SJay Sternberg cmb = ioremap_wc(dma_addr, size); 149657dacad5SJay Sternberg if (!cmb) 149757dacad5SJay Sternberg return NULL; 149857dacad5SJay Sternberg 149957dacad5SJay Sternberg dev->cmb_dma_addr = dma_addr; 150057dacad5SJay Sternberg dev->cmb_size = size; 150157dacad5SJay Sternberg return cmb; 150257dacad5SJay Sternberg } 150357dacad5SJay Sternberg 150457dacad5SJay Sternberg static inline void nvme_release_cmb(struct nvme_dev *dev) 150557dacad5SJay Sternberg { 150657dacad5SJay Sternberg if (dev->cmb) { 150757dacad5SJay Sternberg iounmap(dev->cmb); 150857dacad5SJay Sternberg dev->cmb = NULL; 150957dacad5SJay Sternberg } 151057dacad5SJay Sternberg } 151157dacad5SJay Sternberg 151257dacad5SJay Sternberg static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 151357dacad5SJay Sternberg { 151457dacad5SJay Sternberg return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); 151557dacad5SJay Sternberg } 151657dacad5SJay Sternberg 151757dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 151857dacad5SJay Sternberg { 151957dacad5SJay Sternberg struct nvme_queue *adminq = dev->queues[0]; 152057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 1521dca51e78SChristoph Hellwig int result, nr_io_queues, size; 152257dacad5SJay Sternberg 15232800b8e7SKeith Busch nr_io_queues = num_online_cpus(); 15249a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 15259a0be7abSChristoph Hellwig if (result < 0) 152657dacad5SJay Sternberg return result; 15279a0be7abSChristoph Hellwig 1528f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 1529a5229050SKeith Busch return 0; 153057dacad5SJay Sternberg 153157dacad5SJay Sternberg if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) { 153257dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 153357dacad5SJay Sternberg sizeof(struct nvme_command)); 153457dacad5SJay Sternberg if (result > 0) 153557dacad5SJay Sternberg dev->q_depth = result; 153657dacad5SJay Sternberg else 153757dacad5SJay Sternberg nvme_release_cmb(dev); 153857dacad5SJay Sternberg } 153957dacad5SJay Sternberg 154057dacad5SJay Sternberg size = db_bar_size(dev, nr_io_queues); 154157dacad5SJay Sternberg if (size > 8192) { 154257dacad5SJay Sternberg iounmap(dev->bar); 154357dacad5SJay Sternberg do { 154457dacad5SJay Sternberg dev->bar = ioremap(pci_resource_start(pdev, 0), size); 154557dacad5SJay Sternberg if (dev->bar) 154657dacad5SJay Sternberg break; 154757dacad5SJay Sternberg if (!--nr_io_queues) 154857dacad5SJay Sternberg return -ENOMEM; 154957dacad5SJay Sternberg size = db_bar_size(dev, nr_io_queues); 155057dacad5SJay Sternberg } while (1); 15517a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 155257dacad5SJay Sternberg adminq->q_db = dev->dbs; 155357dacad5SJay Sternberg } 155457dacad5SJay Sternberg 155557dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 15560ff199cbSChristoph Hellwig pci_free_irq(pdev, 0, adminq); 155757dacad5SJay Sternberg 155857dacad5SJay Sternberg /* 155957dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 156057dacad5SJay Sternberg * setting up the full range we need. 156157dacad5SJay Sternberg */ 1562dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 1563dca51e78SChristoph Hellwig nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues, 1564dca51e78SChristoph Hellwig PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY); 1565dca51e78SChristoph Hellwig if (nr_io_queues <= 0) 1566dca51e78SChristoph Hellwig return -EIO; 1567dca51e78SChristoph Hellwig dev->max_qid = nr_io_queues; 156857dacad5SJay Sternberg 156957dacad5SJay Sternberg /* 157057dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 157157dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 157257dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 157357dacad5SJay Sternberg * number of interrupts. 157457dacad5SJay Sternberg */ 157557dacad5SJay Sternberg 1576dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 157757dacad5SJay Sternberg if (result) { 157857dacad5SJay Sternberg adminq->cq_vector = -1; 1579d4875622SKeith Busch return result; 158057dacad5SJay Sternberg } 1581749941f2SChristoph Hellwig return nvme_create_io_queues(dev); 158257dacad5SJay Sternberg } 158357dacad5SJay Sternberg 1584db3cbfffSKeith Busch static void nvme_del_queue_end(struct request *req, int error) 1585db3cbfffSKeith Busch { 1586db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 1587db3cbfffSKeith Busch 1588db3cbfffSKeith Busch blk_mq_free_request(req); 1589db3cbfffSKeith Busch complete(&nvmeq->dev->ioq_wait); 1590db3cbfffSKeith Busch } 1591db3cbfffSKeith Busch 1592db3cbfffSKeith Busch static void nvme_del_cq_end(struct request *req, int error) 1593db3cbfffSKeith Busch { 1594db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 1595db3cbfffSKeith Busch 1596db3cbfffSKeith Busch if (!error) { 1597db3cbfffSKeith Busch unsigned long flags; 1598db3cbfffSKeith Busch 15992e39e0f6SMing Lin /* 16002e39e0f6SMing Lin * We might be called with the AQ q_lock held 16012e39e0f6SMing Lin * and the I/O queue q_lock should always 16022e39e0f6SMing Lin * nest inside the AQ one. 16032e39e0f6SMing Lin */ 16042e39e0f6SMing Lin spin_lock_irqsave_nested(&nvmeq->q_lock, flags, 16052e39e0f6SMing Lin SINGLE_DEPTH_NESTING); 1606db3cbfffSKeith Busch nvme_process_cq(nvmeq); 1607db3cbfffSKeith Busch spin_unlock_irqrestore(&nvmeq->q_lock, flags); 1608db3cbfffSKeith Busch } 1609db3cbfffSKeith Busch 1610db3cbfffSKeith Busch nvme_del_queue_end(req, error); 1611db3cbfffSKeith Busch } 1612db3cbfffSKeith Busch 1613db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 1614db3cbfffSKeith Busch { 1615db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 1616db3cbfffSKeith Busch struct request *req; 1617db3cbfffSKeith Busch struct nvme_command cmd; 1618db3cbfffSKeith Busch 1619db3cbfffSKeith Busch memset(&cmd, 0, sizeof(cmd)); 1620db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 1621db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 1622db3cbfffSKeith Busch 1623eb71f435SChristoph Hellwig req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 1624db3cbfffSKeith Busch if (IS_ERR(req)) 1625db3cbfffSKeith Busch return PTR_ERR(req); 1626db3cbfffSKeith Busch 1627db3cbfffSKeith Busch req->timeout = ADMIN_TIMEOUT; 1628db3cbfffSKeith Busch req->end_io_data = nvmeq; 1629db3cbfffSKeith Busch 1630db3cbfffSKeith Busch blk_execute_rq_nowait(q, NULL, req, false, 1631db3cbfffSKeith Busch opcode == nvme_admin_delete_cq ? 1632db3cbfffSKeith Busch nvme_del_cq_end : nvme_del_queue_end); 1633db3cbfffSKeith Busch return 0; 1634db3cbfffSKeith Busch } 1635db3cbfffSKeith Busch 163670659060SKeith Busch static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) 1637db3cbfffSKeith Busch { 163870659060SKeith Busch int pass; 1639db3cbfffSKeith Busch unsigned long timeout; 1640db3cbfffSKeith Busch u8 opcode = nvme_admin_delete_sq; 1641db3cbfffSKeith Busch 1642db3cbfffSKeith Busch for (pass = 0; pass < 2; pass++) { 1643014a0d60SKeith Busch int sent = 0, i = queues; 1644db3cbfffSKeith Busch 1645db3cbfffSKeith Busch reinit_completion(&dev->ioq_wait); 1646db3cbfffSKeith Busch retry: 1647db3cbfffSKeith Busch timeout = ADMIN_TIMEOUT; 1648c21377f8SGabriel Krisman Bertazi for (; i > 0; i--, sent++) 1649c21377f8SGabriel Krisman Bertazi if (nvme_delete_queue(dev->queues[i], opcode)) 1650db3cbfffSKeith Busch break; 1651c21377f8SGabriel Krisman Bertazi 1652db3cbfffSKeith Busch while (sent--) { 1653db3cbfffSKeith Busch timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout); 1654db3cbfffSKeith Busch if (timeout == 0) 1655db3cbfffSKeith Busch return; 1656db3cbfffSKeith Busch if (i) 1657db3cbfffSKeith Busch goto retry; 1658db3cbfffSKeith Busch } 1659db3cbfffSKeith Busch opcode = nvme_admin_delete_cq; 1660db3cbfffSKeith Busch } 1661db3cbfffSKeith Busch } 1662db3cbfffSKeith Busch 166357dacad5SJay Sternberg /* 166457dacad5SJay Sternberg * Return: error value if an error occurred setting up the queues or calling 166557dacad5SJay Sternberg * Identify Device. 0 if these succeeded, even if adding some of the 166657dacad5SJay Sternberg * namespaces failed. At the moment, these failures are silent. TBD which 166757dacad5SJay Sternberg * failures should be reported. 166857dacad5SJay Sternberg */ 166957dacad5SJay Sternberg static int nvme_dev_add(struct nvme_dev *dev) 167057dacad5SJay Sternberg { 16715bae7f73SChristoph Hellwig if (!dev->ctrl.tagset) { 167257dacad5SJay Sternberg dev->tagset.ops = &nvme_mq_ops; 167357dacad5SJay Sternberg dev->tagset.nr_hw_queues = dev->online_queues - 1; 167457dacad5SJay Sternberg dev->tagset.timeout = NVME_IO_TIMEOUT; 167557dacad5SJay Sternberg dev->tagset.numa_node = dev_to_node(dev->dev); 167657dacad5SJay Sternberg dev->tagset.queue_depth = 167757dacad5SJay Sternberg min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; 167857dacad5SJay Sternberg dev->tagset.cmd_size = nvme_cmd_size(dev); 167957dacad5SJay Sternberg dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 168057dacad5SJay Sternberg dev->tagset.driver_data = dev; 168157dacad5SJay Sternberg 168257dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->tagset)) 168357dacad5SJay Sternberg return 0; 16845bae7f73SChristoph Hellwig dev->ctrl.tagset = &dev->tagset; 1685f9f38e33SHelen Koike 1686f9f38e33SHelen Koike nvme_dbbuf_set(dev); 1687949928c1SKeith Busch } else { 1688949928c1SKeith Busch blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 1689949928c1SKeith Busch 1690949928c1SKeith Busch /* Free previously allocated queues that are no longer usable */ 1691949928c1SKeith Busch nvme_free_queues(dev, dev->online_queues); 169257dacad5SJay Sternberg } 1693949928c1SKeith Busch 169457dacad5SJay Sternberg return 0; 169557dacad5SJay Sternberg } 169657dacad5SJay Sternberg 1697b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 169857dacad5SJay Sternberg { 169957dacad5SJay Sternberg u64 cap; 1700b00a726aSKeith Busch int result = -ENOMEM; 170157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 170257dacad5SJay Sternberg 170357dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 170457dacad5SJay Sternberg return result; 170557dacad5SJay Sternberg 170657dacad5SJay Sternberg pci_set_master(pdev); 170757dacad5SJay Sternberg 170857dacad5SJay Sternberg if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 170957dacad5SJay Sternberg dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) 171057dacad5SJay Sternberg goto disable; 171157dacad5SJay Sternberg 17127a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 171357dacad5SJay Sternberg result = -ENODEV; 1714b00a726aSKeith Busch goto disable; 171557dacad5SJay Sternberg } 171657dacad5SJay Sternberg 171757dacad5SJay Sternberg /* 1718a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 1719a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 1720a5229050SKeith Busch * adjust this later. 172157dacad5SJay Sternberg */ 1722dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 1723dca51e78SChristoph Hellwig if (result < 0) 1724dca51e78SChristoph Hellwig return result; 172557dacad5SJay Sternberg 17267a67cbeaSChristoph Hellwig cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 17277a67cbeaSChristoph Hellwig 172857dacad5SJay Sternberg dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); 172957dacad5SJay Sternberg dev->db_stride = 1 << NVME_CAP_STRIDE(cap); 17307a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 17311f390c1fSStephan Günther 17321f390c1fSStephan Günther /* 17331f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 17341f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 17351f390c1fSStephan Günther */ 17361f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 17371f390c1fSStephan Günther dev->q_depth = 2; 17381f390c1fSStephan Günther dev_warn(dev->dev, "detected Apple NVMe controller, set " 17391f390c1fSStephan Günther "queue depth=%u to work around controller resets\n", 17401f390c1fSStephan Günther dev->q_depth); 17411f390c1fSStephan Günther } 17421f390c1fSStephan Günther 1743202021c1SStephen Bates /* 1744202021c1SStephen Bates * CMBs can currently only exist on >=1.2 PCIe devices. We only 1745202021c1SStephen Bates * populate sysfs if a CMB is implemented. Note that we add the 1746202021c1SStephen Bates * CMB attribute to the nvme_ctrl kobj which removes the need to remove 1747202021c1SStephen Bates * it on exit. Since nvme_dev_attrs_group has no name we can pass 1748202021c1SStephen Bates * NULL as final argument to sysfs_add_file_to_group. 1749202021c1SStephen Bates */ 1750202021c1SStephen Bates 17518ef2074dSGabriel Krisman Bertazi if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { 175257dacad5SJay Sternberg dev->cmb = nvme_map_cmb(dev); 175357dacad5SJay Sternberg 1754202021c1SStephen Bates if (dev->cmbsz) { 1755202021c1SStephen Bates if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1756202021c1SStephen Bates &dev_attr_cmb.attr, NULL)) 1757202021c1SStephen Bates dev_warn(dev->dev, 1758202021c1SStephen Bates "failed to add sysfs attribute for CMB\n"); 1759202021c1SStephen Bates } 1760202021c1SStephen Bates } 1761202021c1SStephen Bates 1762a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 1763a0a3408eSKeith Busch pci_save_state(pdev); 176457dacad5SJay Sternberg return 0; 176557dacad5SJay Sternberg 176657dacad5SJay Sternberg disable: 176757dacad5SJay Sternberg pci_disable_device(pdev); 176857dacad5SJay Sternberg return result; 176957dacad5SJay Sternberg } 177057dacad5SJay Sternberg 177157dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 177257dacad5SJay Sternberg { 1773b00a726aSKeith Busch if (dev->bar) 1774b00a726aSKeith Busch iounmap(dev->bar); 1775a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 1776b00a726aSKeith Busch } 1777b00a726aSKeith Busch 1778b00a726aSKeith Busch static void nvme_pci_disable(struct nvme_dev *dev) 1779b00a726aSKeith Busch { 178057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 178157dacad5SJay Sternberg 1782dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 178357dacad5SJay Sternberg 1784a0a3408eSKeith Busch if (pci_is_enabled(pdev)) { 1785a0a3408eSKeith Busch pci_disable_pcie_error_reporting(pdev); 178657dacad5SJay Sternberg pci_disable_device(pdev); 178757dacad5SJay Sternberg } 1788a0a3408eSKeith Busch } 178957dacad5SJay Sternberg 1790a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 179157dacad5SJay Sternberg { 179270659060SKeith Busch int i, queues; 1793302ad8ccSKeith Busch bool dead = true; 1794302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 179557dacad5SJay Sternberg 17962d55cd5fSChristoph Hellwig del_timer_sync(&dev->watchdog_timer); 179757dacad5SJay Sternberg 179877bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 1799302ad8ccSKeith Busch if (pci_is_enabled(pdev)) { 1800302ad8ccSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1801302ad8ccSKeith Busch 1802302ad8ccSKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE) 1803302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 1804302ad8ccSKeith Busch dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 1805302ad8ccSKeith Busch pdev->error_state != pci_channel_io_normal); 180657dacad5SJay Sternberg } 1807c21377f8SGabriel Krisman Bertazi 1808302ad8ccSKeith Busch /* 1809302ad8ccSKeith Busch * Give the controller a chance to complete all entered requests if 1810302ad8ccSKeith Busch * doing a safe shutdown. 1811302ad8ccSKeith Busch */ 1812302ad8ccSKeith Busch if (!dead && shutdown) 1813302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 1814302ad8ccSKeith Busch nvme_stop_queues(&dev->ctrl); 1815302ad8ccSKeith Busch 181670659060SKeith Busch queues = dev->online_queues - 1; 1817c21377f8SGabriel Krisman Bertazi for (i = dev->queue_count - 1; i > 0; i--) 1818c21377f8SGabriel Krisman Bertazi nvme_suspend_queue(dev->queues[i]); 1819c21377f8SGabriel Krisman Bertazi 1820302ad8ccSKeith Busch if (dead) { 182182469c59SGabriel Krisman Bertazi /* A device might become IO incapable very soon during 182282469c59SGabriel Krisman Bertazi * probe, before the admin queue is configured. Thus, 182382469c59SGabriel Krisman Bertazi * queue_count can be 0 here. 182482469c59SGabriel Krisman Bertazi */ 182582469c59SGabriel Krisman Bertazi if (dev->queue_count) 1826c21377f8SGabriel Krisman Bertazi nvme_suspend_queue(dev->queues[0]); 182757dacad5SJay Sternberg } else { 182870659060SKeith Busch nvme_disable_io_queues(dev, queues); 1829a5cdb68cSKeith Busch nvme_disable_admin_queue(dev, shutdown); 183057dacad5SJay Sternberg } 1831b00a726aSKeith Busch nvme_pci_disable(dev); 183257dacad5SJay Sternberg 1833e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 1834e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); 1835302ad8ccSKeith Busch 1836302ad8ccSKeith Busch /* 1837302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 1838302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 1839302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 1840302ad8ccSKeith Busch */ 1841302ad8ccSKeith Busch if (shutdown) 1842302ad8ccSKeith Busch nvme_start_queues(&dev->ctrl); 184377bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 184457dacad5SJay Sternberg } 184557dacad5SJay Sternberg 184657dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 184757dacad5SJay Sternberg { 184857dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 184957dacad5SJay Sternberg PAGE_SIZE, PAGE_SIZE, 0); 185057dacad5SJay Sternberg if (!dev->prp_page_pool) 185157dacad5SJay Sternberg return -ENOMEM; 185257dacad5SJay Sternberg 185357dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 185457dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 185557dacad5SJay Sternberg 256, 256, 0); 185657dacad5SJay Sternberg if (!dev->prp_small_pool) { 185757dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 185857dacad5SJay Sternberg return -ENOMEM; 185957dacad5SJay Sternberg } 186057dacad5SJay Sternberg return 0; 186157dacad5SJay Sternberg } 186257dacad5SJay Sternberg 186357dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 186457dacad5SJay Sternberg { 186557dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 186657dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 186757dacad5SJay Sternberg } 186857dacad5SJay Sternberg 18691673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 187057dacad5SJay Sternberg { 18711673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 187257dacad5SJay Sternberg 1873f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 187457dacad5SJay Sternberg put_device(dev->dev); 187557dacad5SJay Sternberg if (dev->tagset.tags) 187657dacad5SJay Sternberg blk_mq_free_tag_set(&dev->tagset); 18771c63dc66SChristoph Hellwig if (dev->ctrl.admin_q) 18781c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 187957dacad5SJay Sternberg kfree(dev->queues); 1880e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 188157dacad5SJay Sternberg kfree(dev); 188257dacad5SJay Sternberg } 188357dacad5SJay Sternberg 1884f58944e2SKeith Busch static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status) 1885f58944e2SKeith Busch { 1886237045fcSLinus Torvalds dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status); 1887f58944e2SKeith Busch 1888f58944e2SKeith Busch kref_get(&dev->ctrl.kref); 188969d9a99cSKeith Busch nvme_dev_disable(dev, false); 1890f58944e2SKeith Busch if (!schedule_work(&dev->remove_work)) 1891f58944e2SKeith Busch nvme_put_ctrl(&dev->ctrl); 1892f58944e2SKeith Busch } 1893f58944e2SKeith Busch 1894fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 189557dacad5SJay Sternberg { 1896fd634f41SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); 1897a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 1898f58944e2SKeith Busch int result = -ENODEV; 189957dacad5SJay Sternberg 1900bb8d261eSChristoph Hellwig if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) 1901fd634f41SChristoph Hellwig goto out; 1902fd634f41SChristoph Hellwig 1903fd634f41SChristoph Hellwig /* 1904fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 1905fd634f41SChristoph Hellwig * moving on. 1906fd634f41SChristoph Hellwig */ 1907b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 1908a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1909fd634f41SChristoph Hellwig 1910bb8d261eSChristoph Hellwig if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) 19119bf2b972SKeith Busch goto out; 19129bf2b972SKeith Busch 1913b00a726aSKeith Busch result = nvme_pci_enable(dev); 191457dacad5SJay Sternberg if (result) 191557dacad5SJay Sternberg goto out; 191657dacad5SJay Sternberg 191757dacad5SJay Sternberg result = nvme_configure_admin_queue(dev); 191857dacad5SJay Sternberg if (result) 1919f58944e2SKeith Busch goto out; 192057dacad5SJay Sternberg 192157dacad5SJay Sternberg nvme_init_queue(dev->queues[0], 0); 192257dacad5SJay Sternberg result = nvme_alloc_admin_tags(dev); 192357dacad5SJay Sternberg if (result) 1924f58944e2SKeith Busch goto out; 192557dacad5SJay Sternberg 1926ce4541f4SChristoph Hellwig result = nvme_init_identify(&dev->ctrl); 1927ce4541f4SChristoph Hellwig if (result) 1928f58944e2SKeith Busch goto out; 1929ce4541f4SChristoph Hellwig 1930e286bcfcSScott Bauer if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 1931e286bcfcSScott Bauer if (!dev->ctrl.opal_dev) 19324f1244c8SChristoph Hellwig dev->ctrl.opal_dev = 19334f1244c8SChristoph Hellwig init_opal_dev(&dev->ctrl, &nvme_sec_submit); 1934e286bcfcSScott Bauer else if (was_suspend) 19354f1244c8SChristoph Hellwig opal_unlock_from_suspend(dev->ctrl.opal_dev); 1936e286bcfcSScott Bauer } else { 1937e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 1938e286bcfcSScott Bauer dev->ctrl.opal_dev = NULL; 1939e286bcfcSScott Bauer } 1940a98e58e5SScott Bauer 1941f9f38e33SHelen Koike if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 1942f9f38e33SHelen Koike result = nvme_dbbuf_dma_alloc(dev); 1943f9f38e33SHelen Koike if (result) 1944f9f38e33SHelen Koike dev_warn(dev->dev, 1945f9f38e33SHelen Koike "unable to allocate dma for dbbuf\n"); 1946f9f38e33SHelen Koike } 1947f9f38e33SHelen Koike 194857dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 194957dacad5SJay Sternberg if (result) 1950f58944e2SKeith Busch goto out; 195157dacad5SJay Sternberg 195221f033f7SKeith Busch /* 195321f033f7SKeith Busch * A controller that can not execute IO typically requires user 195421f033f7SKeith Busch * intervention to correct. For such degraded controllers, the driver 195521f033f7SKeith Busch * should not submit commands the user did not request, so skip 195621f033f7SKeith Busch * registering for asynchronous event notification on this condition. 195721f033f7SKeith Busch */ 1958f866fc42SChristoph Hellwig if (dev->online_queues > 1) 1959f866fc42SChristoph Hellwig nvme_queue_async_events(&dev->ctrl); 196057dacad5SJay Sternberg 19612d55cd5fSChristoph Hellwig mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ)); 196257dacad5SJay Sternberg 196357dacad5SJay Sternberg /* 196457dacad5SJay Sternberg * Keep the controller around but remove all namespaces if we don't have 196557dacad5SJay Sternberg * any working I/O queue. 196657dacad5SJay Sternberg */ 196757dacad5SJay Sternberg if (dev->online_queues < 2) { 19681b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, "IO queues not created\n"); 19693b24774eSKeith Busch nvme_kill_queues(&dev->ctrl); 19705bae7f73SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 197157dacad5SJay Sternberg } else { 197225646264SKeith Busch nvme_start_queues(&dev->ctrl); 1973302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 197457dacad5SJay Sternberg nvme_dev_add(dev); 1975302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 197657dacad5SJay Sternberg } 197757dacad5SJay Sternberg 1978bb8d261eSChristoph Hellwig if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 1979bb8d261eSChristoph Hellwig dev_warn(dev->ctrl.device, "failed to mark controller live\n"); 1980bb8d261eSChristoph Hellwig goto out; 1981bb8d261eSChristoph Hellwig } 198292911a55SChristoph Hellwig 198392911a55SChristoph Hellwig if (dev->online_queues > 1) 19845955be21SChristoph Hellwig nvme_queue_scan(&dev->ctrl); 198557dacad5SJay Sternberg return; 198657dacad5SJay Sternberg 198757dacad5SJay Sternberg out: 1988f58944e2SKeith Busch nvme_remove_dead_ctrl(dev, result); 198957dacad5SJay Sternberg } 199057dacad5SJay Sternberg 19915c8809e6SChristoph Hellwig static void nvme_remove_dead_ctrl_work(struct work_struct *work) 199257dacad5SJay Sternberg { 19935c8809e6SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 199457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 199557dacad5SJay Sternberg 199669d9a99cSKeith Busch nvme_kill_queues(&dev->ctrl); 199757dacad5SJay Sternberg if (pci_get_drvdata(pdev)) 1998921920abSKeith Busch device_release_driver(&pdev->dev); 19991673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 200057dacad5SJay Sternberg } 200157dacad5SJay Sternberg 200257dacad5SJay Sternberg static int nvme_reset(struct nvme_dev *dev) 200357dacad5SJay Sternberg { 20041c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 200557dacad5SJay Sternberg return -ENODEV; 2006c5f6ce97SKeith Busch if (work_busy(&dev->reset_work)) 2007c5f6ce97SKeith Busch return -ENODEV; 2008846cc05fSChristoph Hellwig if (!queue_work(nvme_workq, &dev->reset_work)) 2009846cc05fSChristoph Hellwig return -EBUSY; 201057dacad5SJay Sternberg return 0; 201157dacad5SJay Sternberg } 201257dacad5SJay Sternberg 20131c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 201457dacad5SJay Sternberg { 20151c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 20161c63dc66SChristoph Hellwig return 0; 201757dacad5SJay Sternberg } 20181c63dc66SChristoph Hellwig 20195fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 20205fd4ce1bSChristoph Hellwig { 20215fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 20225fd4ce1bSChristoph Hellwig return 0; 20235fd4ce1bSChristoph Hellwig } 20245fd4ce1bSChristoph Hellwig 20257fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 20267fd8930fSChristoph Hellwig { 20277fd8930fSChristoph Hellwig *val = readq(to_nvme_dev(ctrl)->bar + off); 20287fd8930fSChristoph Hellwig return 0; 20297fd8930fSChristoph Hellwig } 20307fd8930fSChristoph Hellwig 2031f3ca80fcSChristoph Hellwig static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) 2032f3ca80fcSChristoph Hellwig { 2033c5f6ce97SKeith Busch struct nvme_dev *dev = to_nvme_dev(ctrl); 2034c5f6ce97SKeith Busch int ret = nvme_reset(dev); 2035c5f6ce97SKeith Busch 2036c5f6ce97SKeith Busch if (!ret) 2037c5f6ce97SKeith Busch flush_work(&dev->reset_work); 2038c5f6ce97SKeith Busch return ret; 2039f3ca80fcSChristoph Hellwig } 2040f3ca80fcSChristoph Hellwig 20411c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 20421a353d85SMing Lin .name = "pcie", 2043e439bb12SSagi Grimberg .module = THIS_MODULE, 20441c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 20455fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 20467fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 2047f3ca80fcSChristoph Hellwig .reset_ctrl = nvme_pci_reset_ctrl, 20481673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 2049f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 20501c63dc66SChristoph Hellwig }; 205157dacad5SJay Sternberg 2052b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 2053b00a726aSKeith Busch { 2054b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 2055b00a726aSKeith Busch 2056a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 2057b00a726aSKeith Busch return -ENODEV; 2058b00a726aSKeith Busch 2059b00a726aSKeith Busch dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 2060b00a726aSKeith Busch if (!dev->bar) 2061b00a726aSKeith Busch goto release; 2062b00a726aSKeith Busch 2063b00a726aSKeith Busch return 0; 2064b00a726aSKeith Busch release: 2065a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 2066b00a726aSKeith Busch return -ENODEV; 2067b00a726aSKeith Busch } 2068b00a726aSKeith Busch 2069ff5350a8SAndy Lutomirski static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) 2070ff5350a8SAndy Lutomirski { 2071ff5350a8SAndy Lutomirski if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2072ff5350a8SAndy Lutomirski /* 2073ff5350a8SAndy Lutomirski * Several Samsung devices seem to drop off the PCIe bus 2074ff5350a8SAndy Lutomirski * randomly when APST is on and uses the deepest sleep state. 2075ff5350a8SAndy Lutomirski * This has been observed on a Samsung "SM951 NVMe SAMSUNG 2076ff5350a8SAndy Lutomirski * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 2077ff5350a8SAndy Lutomirski * 950 PRO 256GB", but it seems to be restricted to two Dell 2078ff5350a8SAndy Lutomirski * laptops. 2079ff5350a8SAndy Lutomirski */ 2080ff5350a8SAndy Lutomirski if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 2081ff5350a8SAndy Lutomirski (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2082ff5350a8SAndy Lutomirski dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2083ff5350a8SAndy Lutomirski return NVME_QUIRK_NO_DEEPEST_PS; 2084ff5350a8SAndy Lutomirski } 2085ff5350a8SAndy Lutomirski 2086ff5350a8SAndy Lutomirski return 0; 2087ff5350a8SAndy Lutomirski } 2088ff5350a8SAndy Lutomirski 208957dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 209057dacad5SJay Sternberg { 209157dacad5SJay Sternberg int node, result = -ENOMEM; 209257dacad5SJay Sternberg struct nvme_dev *dev; 2093ff5350a8SAndy Lutomirski unsigned long quirks = id->driver_data; 209457dacad5SJay Sternberg 209557dacad5SJay Sternberg node = dev_to_node(&pdev->dev); 209657dacad5SJay Sternberg if (node == NUMA_NO_NODE) 20972fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 209857dacad5SJay Sternberg 209957dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 210057dacad5SJay Sternberg if (!dev) 210157dacad5SJay Sternberg return -ENOMEM; 210257dacad5SJay Sternberg dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *), 210357dacad5SJay Sternberg GFP_KERNEL, node); 210457dacad5SJay Sternberg if (!dev->queues) 210557dacad5SJay Sternberg goto free; 210657dacad5SJay Sternberg 210757dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 210857dacad5SJay Sternberg pci_set_drvdata(pdev, dev); 210957dacad5SJay Sternberg 2110b00a726aSKeith Busch result = nvme_dev_map(dev); 2111b00a726aSKeith Busch if (result) 2112b00a726aSKeith Busch goto free; 2113b00a726aSKeith Busch 2114f3ca80fcSChristoph Hellwig INIT_WORK(&dev->reset_work, nvme_reset_work); 21155c8809e6SChristoph Hellwig INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 21162d55cd5fSChristoph Hellwig setup_timer(&dev->watchdog_timer, nvme_watchdog_timer, 21172d55cd5fSChristoph Hellwig (unsigned long)dev); 211877bf25eaSKeith Busch mutex_init(&dev->shutdown_lock); 2119db3cbfffSKeith Busch init_completion(&dev->ioq_wait); 2120f3ca80fcSChristoph Hellwig 2121f3ca80fcSChristoph Hellwig result = nvme_setup_prp_pools(dev); 2122f3ca80fcSChristoph Hellwig if (result) 2123f3ca80fcSChristoph Hellwig goto put_pci; 2124f3ca80fcSChristoph Hellwig 2125ff5350a8SAndy Lutomirski quirks |= check_dell_samsung_bug(pdev); 2126ff5350a8SAndy Lutomirski 2127f3ca80fcSChristoph Hellwig result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2128ff5350a8SAndy Lutomirski quirks); 2129f3ca80fcSChristoph Hellwig if (result) 2130f3ca80fcSChristoph Hellwig goto release_pools; 2131f3ca80fcSChristoph Hellwig 21321b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 21331b3c47c1SSagi Grimberg 213492f7a162SKeith Busch queue_work(nvme_workq, &dev->reset_work); 213557dacad5SJay Sternberg return 0; 213657dacad5SJay Sternberg 213757dacad5SJay Sternberg release_pools: 213857dacad5SJay Sternberg nvme_release_prp_pools(dev); 213957dacad5SJay Sternberg put_pci: 214057dacad5SJay Sternberg put_device(dev->dev); 2141b00a726aSKeith Busch nvme_dev_unmap(dev); 214257dacad5SJay Sternberg free: 214357dacad5SJay Sternberg kfree(dev->queues); 214457dacad5SJay Sternberg kfree(dev); 214557dacad5SJay Sternberg return result; 214657dacad5SJay Sternberg } 214757dacad5SJay Sternberg 2148775755edSChristoph Hellwig static void nvme_reset_prepare(struct pci_dev *pdev) 214957dacad5SJay Sternberg { 2150775755edSChristoph Hellwig nvme_dev_disable(pci_get_drvdata(pdev), false); 2151775755edSChristoph Hellwig } 215257dacad5SJay Sternberg 2153775755edSChristoph Hellwig static void nvme_reset_done(struct pci_dev *pdev) 2154775755edSChristoph Hellwig { 2155775755edSChristoph Hellwig nvme_reset(pci_get_drvdata(pdev)); 215657dacad5SJay Sternberg } 215757dacad5SJay Sternberg 215857dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 215957dacad5SJay Sternberg { 216057dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 2161a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 216257dacad5SJay Sternberg } 216357dacad5SJay Sternberg 2164f58944e2SKeith Busch /* 2165f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 2166f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 2167f58944e2SKeith Busch * order to proceed. 2168f58944e2SKeith Busch */ 216957dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 217057dacad5SJay Sternberg { 217157dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 217257dacad5SJay Sternberg 2173bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2174bb8d261eSChristoph Hellwig 217557dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 21760ff9d4e1SKeith Busch 21776db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 21780ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 21796db28edaSKeith Busch nvme_dev_disable(dev, false); 21806db28edaSKeith Busch } 21810ff9d4e1SKeith Busch 21829bf2b972SKeith Busch flush_work(&dev->reset_work); 218353029b04SKeith Busch nvme_uninit_ctrl(&dev->ctrl); 2184a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 218557dacad5SJay Sternberg nvme_dev_remove_admin(dev); 218657dacad5SJay Sternberg nvme_free_queues(dev, 0); 218757dacad5SJay Sternberg nvme_release_cmb(dev); 218857dacad5SJay Sternberg nvme_release_prp_pools(dev); 2189b00a726aSKeith Busch nvme_dev_unmap(dev); 21901673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 219157dacad5SJay Sternberg } 219257dacad5SJay Sternberg 219313880f5bSKeith Busch static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs) 219413880f5bSKeith Busch { 219513880f5bSKeith Busch int ret = 0; 219613880f5bSKeith Busch 219713880f5bSKeith Busch if (numvfs == 0) { 219813880f5bSKeith Busch if (pci_vfs_assigned(pdev)) { 219913880f5bSKeith Busch dev_warn(&pdev->dev, 220013880f5bSKeith Busch "Cannot disable SR-IOV VFs while assigned\n"); 220113880f5bSKeith Busch return -EPERM; 220213880f5bSKeith Busch } 220313880f5bSKeith Busch pci_disable_sriov(pdev); 220413880f5bSKeith Busch return 0; 220513880f5bSKeith Busch } 220613880f5bSKeith Busch 220713880f5bSKeith Busch ret = pci_enable_sriov(pdev, numvfs); 220813880f5bSKeith Busch return ret ? ret : numvfs; 220913880f5bSKeith Busch } 221013880f5bSKeith Busch 221157dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 221257dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 221357dacad5SJay Sternberg { 221457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 221557dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 221657dacad5SJay Sternberg 2217a5cdb68cSKeith Busch nvme_dev_disable(ndev, true); 221857dacad5SJay Sternberg return 0; 221957dacad5SJay Sternberg } 222057dacad5SJay Sternberg 222157dacad5SJay Sternberg static int nvme_resume(struct device *dev) 222257dacad5SJay Sternberg { 222357dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 222457dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 222557dacad5SJay Sternberg 2226c5f6ce97SKeith Busch nvme_reset(ndev); 222757dacad5SJay Sternberg return 0; 222857dacad5SJay Sternberg } 222957dacad5SJay Sternberg #endif 223057dacad5SJay Sternberg 223157dacad5SJay Sternberg static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 223257dacad5SJay Sternberg 2233a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 2234a0a3408eSKeith Busch pci_channel_state_t state) 2235a0a3408eSKeith Busch { 2236a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 2237a0a3408eSKeith Busch 2238a0a3408eSKeith Busch /* 2239a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 2240a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 2241a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 2242a0a3408eSKeith Busch */ 2243a0a3408eSKeith Busch switch (state) { 2244a0a3408eSKeith Busch case pci_channel_io_normal: 2245a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 2246a0a3408eSKeith Busch case pci_channel_io_frozen: 2247d011fb31SKeith Busch dev_warn(dev->ctrl.device, 2248d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 2249a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2250a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 2251a0a3408eSKeith Busch case pci_channel_io_perm_failure: 2252d011fb31SKeith Busch dev_warn(dev->ctrl.device, 2253d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 2254a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 2255a0a3408eSKeith Busch } 2256a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 2257a0a3408eSKeith Busch } 2258a0a3408eSKeith Busch 2259a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 2260a0a3408eSKeith Busch { 2261a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 2262a0a3408eSKeith Busch 22631b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 2264a0a3408eSKeith Busch pci_restore_state(pdev); 2265c5f6ce97SKeith Busch nvme_reset(dev); 2266a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 2267a0a3408eSKeith Busch } 2268a0a3408eSKeith Busch 2269a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 2270a0a3408eSKeith Busch { 2271a0a3408eSKeith Busch pci_cleanup_aer_uncorrect_error_status(pdev); 2272a0a3408eSKeith Busch } 2273a0a3408eSKeith Busch 227457dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 227557dacad5SJay Sternberg .error_detected = nvme_error_detected, 227657dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 227757dacad5SJay Sternberg .resume = nvme_error_resume, 2278775755edSChristoph Hellwig .reset_prepare = nvme_reset_prepare, 2279775755edSChristoph Hellwig .reset_done = nvme_reset_done, 228057dacad5SJay Sternberg }; 228157dacad5SJay Sternberg 228257dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 2283106198edSChristoph Hellwig { PCI_VDEVICE(INTEL, 0x0953), 228408095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2285e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 228699466e70SKeith Busch { PCI_VDEVICE(INTEL, 0x0a53), 228799466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2288e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 228999466e70SKeith Busch { PCI_VDEVICE(INTEL, 0x0a54), 229099466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 2291e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 2292540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2293540c801cSKeith Busch .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 229454adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 229554adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2296015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 2297015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 229857dacad5SJay Sternberg { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2299c74dc780SStephan Günther { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 2300124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 230157dacad5SJay Sternberg { 0, } 230257dacad5SJay Sternberg }; 230357dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 230457dacad5SJay Sternberg 230557dacad5SJay Sternberg static struct pci_driver nvme_driver = { 230657dacad5SJay Sternberg .name = "nvme", 230757dacad5SJay Sternberg .id_table = nvme_id_table, 230857dacad5SJay Sternberg .probe = nvme_probe, 230957dacad5SJay Sternberg .remove = nvme_remove, 231057dacad5SJay Sternberg .shutdown = nvme_shutdown, 231157dacad5SJay Sternberg .driver = { 231257dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 231357dacad5SJay Sternberg }, 231413880f5bSKeith Busch .sriov_configure = nvme_pci_sriov_configure, 231557dacad5SJay Sternberg .err_handler = &nvme_err_handler, 231657dacad5SJay Sternberg }; 231757dacad5SJay Sternberg 231857dacad5SJay Sternberg static int __init nvme_init(void) 231957dacad5SJay Sternberg { 232057dacad5SJay Sternberg int result; 232157dacad5SJay Sternberg 232292f7a162SKeith Busch nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0); 232357dacad5SJay Sternberg if (!nvme_workq) 232457dacad5SJay Sternberg return -ENOMEM; 232557dacad5SJay Sternberg 232657dacad5SJay Sternberg result = pci_register_driver(&nvme_driver); 232757dacad5SJay Sternberg if (result) 232857dacad5SJay Sternberg destroy_workqueue(nvme_workq); 232957dacad5SJay Sternberg return result; 233057dacad5SJay Sternberg } 233157dacad5SJay Sternberg 233257dacad5SJay Sternberg static void __exit nvme_exit(void) 233357dacad5SJay Sternberg { 233457dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 233557dacad5SJay Sternberg destroy_workqueue(nvme_workq); 233657dacad5SJay Sternberg _nvme_check_size(); 233757dacad5SJay Sternberg } 233857dacad5SJay Sternberg 233957dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 234057dacad5SJay Sternberg MODULE_LICENSE("GPL"); 234157dacad5SJay Sternberg MODULE_VERSION("1.0"); 234257dacad5SJay Sternberg module_init(nvme_init); 234357dacad5SJay Sternberg module_exit(nvme_exit); 2344