157dacad5SJay Sternberg /* 257dacad5SJay Sternberg * NVM Express device driver 357dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 457dacad5SJay Sternberg * 557dacad5SJay Sternberg * This program is free software; you can redistribute it and/or modify it 657dacad5SJay Sternberg * under the terms and conditions of the GNU General Public License, 757dacad5SJay Sternberg * version 2, as published by the Free Software Foundation. 857dacad5SJay Sternberg * 957dacad5SJay Sternberg * This program is distributed in the hope it will be useful, but WITHOUT 1057dacad5SJay Sternberg * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1157dacad5SJay Sternberg * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 1257dacad5SJay Sternberg * more details. 1357dacad5SJay Sternberg */ 1457dacad5SJay Sternberg 1557dacad5SJay Sternberg #include <linux/bitops.h> 1657dacad5SJay Sternberg #include <linux/blkdev.h> 1757dacad5SJay Sternberg #include <linux/blk-mq.h> 1857dacad5SJay Sternberg #include <linux/cpu.h> 1957dacad5SJay Sternberg #include <linux/delay.h> 2057dacad5SJay Sternberg #include <linux/errno.h> 2157dacad5SJay Sternberg #include <linux/fs.h> 2257dacad5SJay Sternberg #include <linux/genhd.h> 2357dacad5SJay Sternberg #include <linux/hdreg.h> 2457dacad5SJay Sternberg #include <linux/idr.h> 2557dacad5SJay Sternberg #include <linux/init.h> 2657dacad5SJay Sternberg #include <linux/interrupt.h> 2757dacad5SJay Sternberg #include <linux/io.h> 2857dacad5SJay Sternberg #include <linux/kdev_t.h> 2957dacad5SJay Sternberg #include <linux/kthread.h> 3057dacad5SJay Sternberg #include <linux/kernel.h> 3157dacad5SJay Sternberg #include <linux/mm.h> 3257dacad5SJay Sternberg #include <linux/module.h> 3357dacad5SJay Sternberg #include <linux/moduleparam.h> 3457dacad5SJay Sternberg #include <linux/pci.h> 3557dacad5SJay Sternberg #include <linux/poison.h> 3657dacad5SJay Sternberg #include <linux/ptrace.h> 3757dacad5SJay Sternberg #include <linux/sched.h> 3857dacad5SJay Sternberg #include <linux/slab.h> 3957dacad5SJay Sternberg #include <linux/t10-pi.h> 4057dacad5SJay Sternberg #include <linux/types.h> 419cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 421d277a63SKeith Busch #include <asm/unaligned.h> 4357dacad5SJay Sternberg 4457dacad5SJay Sternberg #include "nvme.h" 4557dacad5SJay Sternberg 4657dacad5SJay Sternberg #define NVME_Q_DEPTH 1024 4757dacad5SJay Sternberg #define NVME_AQ_DEPTH 256 4857dacad5SJay Sternberg #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 4957dacad5SJay Sternberg #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 5057dacad5SJay Sternberg 5121d34711SChristoph Hellwig unsigned char admin_timeout = 60; 5257dacad5SJay Sternberg module_param(admin_timeout, byte, 0644); 5357dacad5SJay Sternberg MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 5457dacad5SJay Sternberg 5557dacad5SJay Sternberg unsigned char nvme_io_timeout = 30; 5657dacad5SJay Sternberg module_param_named(io_timeout, nvme_io_timeout, byte, 0644); 5757dacad5SJay Sternberg MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 5857dacad5SJay Sternberg 595fd4ce1bSChristoph Hellwig unsigned char shutdown_timeout = 5; 6057dacad5SJay Sternberg module_param(shutdown_timeout, byte, 0644); 6157dacad5SJay Sternberg MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 6257dacad5SJay Sternberg 6357dacad5SJay Sternberg static int use_threaded_interrupts; 6457dacad5SJay Sternberg module_param(use_threaded_interrupts, int, 0); 6557dacad5SJay Sternberg 6657dacad5SJay Sternberg static bool use_cmb_sqes = true; 6757dacad5SJay Sternberg module_param(use_cmb_sqes, bool, 0644); 6857dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 6957dacad5SJay Sternberg 7057dacad5SJay Sternberg static LIST_HEAD(dev_list); 7157dacad5SJay Sternberg static struct task_struct *nvme_thread; 7257dacad5SJay Sternberg static struct workqueue_struct *nvme_workq; 7357dacad5SJay Sternberg static wait_queue_head_t nvme_kthread_wait; 7457dacad5SJay Sternberg 751c63dc66SChristoph Hellwig struct nvme_dev; 761c63dc66SChristoph Hellwig struct nvme_queue; 77d4f6c3abSChristoph Hellwig struct nvme_iod; 781c63dc66SChristoph Hellwig 7957dacad5SJay Sternberg static int __nvme_reset(struct nvme_dev *dev); 8057dacad5SJay Sternberg static int nvme_reset(struct nvme_dev *dev); 81a0fa9647SJens Axboe static void nvme_process_cq(struct nvme_queue *nvmeq); 82d4f6c3abSChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_iod *iod); 8357dacad5SJay Sternberg static void nvme_dead_ctrl(struct nvme_dev *dev); 8457dacad5SJay Sternberg 8557dacad5SJay Sternberg struct async_cmd_info { 8657dacad5SJay Sternberg struct kthread_work work; 8757dacad5SJay Sternberg struct kthread_worker *worker; 8857dacad5SJay Sternberg struct request *req; 8957dacad5SJay Sternberg u32 result; 9057dacad5SJay Sternberg int status; 9157dacad5SJay Sternberg void *ctx; 9257dacad5SJay Sternberg }; 9357dacad5SJay Sternberg 9457dacad5SJay Sternberg /* 951c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 961c63dc66SChristoph Hellwig */ 971c63dc66SChristoph Hellwig struct nvme_dev { 981c63dc66SChristoph Hellwig struct list_head node; 991c63dc66SChristoph Hellwig struct nvme_queue **queues; 1001c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 1011c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 1021c63dc66SChristoph Hellwig u32 __iomem *dbs; 1031c63dc66SChristoph Hellwig struct device *dev; 1041c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 1051c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 1061c63dc66SChristoph Hellwig unsigned queue_count; 1071c63dc66SChristoph Hellwig unsigned online_queues; 1081c63dc66SChristoph Hellwig unsigned max_qid; 1091c63dc66SChristoph Hellwig int q_depth; 1101c63dc66SChristoph Hellwig u32 db_stride; 1111c63dc66SChristoph Hellwig struct msix_entry *entry; 1121c63dc66SChristoph Hellwig void __iomem *bar; 1131c63dc66SChristoph Hellwig struct work_struct reset_work; 1141c63dc66SChristoph Hellwig struct work_struct probe_work; 1151c63dc66SChristoph Hellwig struct work_struct scan_work; 1161c63dc66SChristoph Hellwig bool subsystem; 1171c63dc66SChristoph Hellwig void __iomem *cmb; 1181c63dc66SChristoph Hellwig dma_addr_t cmb_dma_addr; 1191c63dc66SChristoph Hellwig u64 cmb_size; 1201c63dc66SChristoph Hellwig u32 cmbsz; 1211c63dc66SChristoph Hellwig 1221c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 1231c63dc66SChristoph Hellwig }; 1241c63dc66SChristoph Hellwig 1251c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1261c63dc66SChristoph Hellwig { 1271c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1281c63dc66SChristoph Hellwig } 1291c63dc66SChristoph Hellwig 1301c63dc66SChristoph Hellwig /* 13157dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 13257dacad5SJay Sternberg * commands and one for I/O commands). 13357dacad5SJay Sternberg */ 13457dacad5SJay Sternberg struct nvme_queue { 13557dacad5SJay Sternberg struct device *q_dmadev; 13657dacad5SJay Sternberg struct nvme_dev *dev; 13757dacad5SJay Sternberg char irqname[24]; /* nvme4294967295-65535\0 */ 13857dacad5SJay Sternberg spinlock_t q_lock; 13957dacad5SJay Sternberg struct nvme_command *sq_cmds; 14057dacad5SJay Sternberg struct nvme_command __iomem *sq_cmds_io; 14157dacad5SJay Sternberg volatile struct nvme_completion *cqes; 14257dacad5SJay Sternberg struct blk_mq_tags **tags; 14357dacad5SJay Sternberg dma_addr_t sq_dma_addr; 14457dacad5SJay Sternberg dma_addr_t cq_dma_addr; 14557dacad5SJay Sternberg u32 __iomem *q_db; 14657dacad5SJay Sternberg u16 q_depth; 14757dacad5SJay Sternberg s16 cq_vector; 14857dacad5SJay Sternberg u16 sq_head; 14957dacad5SJay Sternberg u16 sq_tail; 15057dacad5SJay Sternberg u16 cq_head; 15157dacad5SJay Sternberg u16 qid; 15257dacad5SJay Sternberg u8 cq_phase; 15357dacad5SJay Sternberg u8 cqe_seen; 15457dacad5SJay Sternberg struct async_cmd_info cmdinfo; 15557dacad5SJay Sternberg }; 15657dacad5SJay Sternberg 15757dacad5SJay Sternberg /* 15871bd150cSChristoph Hellwig * The nvme_iod describes the data in an I/O, including the list of PRP 15971bd150cSChristoph Hellwig * entries. You can't see it in this data structure because C doesn't let 16071bd150cSChristoph Hellwig * me express that. Use nvme_alloc_iod to ensure there's enough space 16171bd150cSChristoph Hellwig * allocated to store the PRP list. 16271bd150cSChristoph Hellwig */ 16371bd150cSChristoph Hellwig struct nvme_iod { 16471bd150cSChristoph Hellwig unsigned long private; /* For the use of the submitter of the I/O */ 16571bd150cSChristoph Hellwig int npages; /* In the PRP list. 0 means small pool in use */ 16671bd150cSChristoph Hellwig int offset; /* Of PRP list */ 16771bd150cSChristoph Hellwig int nents; /* Used in scatterlist */ 16871bd150cSChristoph Hellwig int length; /* Of data, in bytes */ 16971bd150cSChristoph Hellwig dma_addr_t first_dma; 17071bd150cSChristoph Hellwig struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */ 17171bd150cSChristoph Hellwig struct scatterlist sg[0]; 17271bd150cSChristoph Hellwig }; 17371bd150cSChristoph Hellwig 17471bd150cSChristoph Hellwig /* 17557dacad5SJay Sternberg * Check we didin't inadvertently grow the command struct 17657dacad5SJay Sternberg */ 17757dacad5SJay Sternberg static inline void _nvme_check_size(void) 17857dacad5SJay Sternberg { 17957dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 18057dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 18157dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 18257dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 18357dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 18457dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 18557dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 18657dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 18757dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 18857dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 18957dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 19057dacad5SJay Sternberg BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 19157dacad5SJay Sternberg } 19257dacad5SJay Sternberg 19357dacad5SJay Sternberg typedef void (*nvme_completion_fn)(struct nvme_queue *, void *, 19457dacad5SJay Sternberg struct nvme_completion *); 19557dacad5SJay Sternberg 19657dacad5SJay Sternberg struct nvme_cmd_info { 19757dacad5SJay Sternberg nvme_completion_fn fn; 19857dacad5SJay Sternberg void *ctx; 19957dacad5SJay Sternberg int aborted; 20057dacad5SJay Sternberg struct nvme_queue *nvmeq; 20157dacad5SJay Sternberg struct nvme_iod iod[0]; 20257dacad5SJay Sternberg }; 20357dacad5SJay Sternberg 20457dacad5SJay Sternberg /* 20557dacad5SJay Sternberg * Max size of iod being embedded in the request payload 20657dacad5SJay Sternberg */ 20757dacad5SJay Sternberg #define NVME_INT_PAGES 2 2085fd4ce1bSChristoph Hellwig #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size) 20957dacad5SJay Sternberg #define NVME_INT_MASK 0x01 21057dacad5SJay Sternberg 21157dacad5SJay Sternberg /* 21257dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 21357dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 21457dacad5SJay Sternberg * the I/O. 21557dacad5SJay Sternberg */ 21657dacad5SJay Sternberg static int nvme_npages(unsigned size, struct nvme_dev *dev) 21757dacad5SJay Sternberg { 2185fd4ce1bSChristoph Hellwig unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size, 2195fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 22057dacad5SJay Sternberg return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 22157dacad5SJay Sternberg } 22257dacad5SJay Sternberg 22357dacad5SJay Sternberg static unsigned int nvme_cmd_size(struct nvme_dev *dev) 22457dacad5SJay Sternberg { 22557dacad5SJay Sternberg unsigned int ret = sizeof(struct nvme_cmd_info); 22657dacad5SJay Sternberg 22757dacad5SJay Sternberg ret += sizeof(struct nvme_iod); 22857dacad5SJay Sternberg ret += sizeof(__le64 *) * nvme_npages(NVME_INT_BYTES(dev), dev); 22957dacad5SJay Sternberg ret += sizeof(struct scatterlist) * NVME_INT_PAGES; 23057dacad5SJay Sternberg 23157dacad5SJay Sternberg return ret; 23257dacad5SJay Sternberg } 23357dacad5SJay Sternberg 23457dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 23557dacad5SJay Sternberg unsigned int hctx_idx) 23657dacad5SJay Sternberg { 23757dacad5SJay Sternberg struct nvme_dev *dev = data; 23857dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[0]; 23957dacad5SJay Sternberg 24057dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 24157dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 24257dacad5SJay Sternberg WARN_ON(nvmeq->tags); 24357dacad5SJay Sternberg 24457dacad5SJay Sternberg hctx->driver_data = nvmeq; 24557dacad5SJay Sternberg nvmeq->tags = &dev->admin_tagset.tags[0]; 24657dacad5SJay Sternberg return 0; 24757dacad5SJay Sternberg } 24857dacad5SJay Sternberg 24957dacad5SJay Sternberg static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 25057dacad5SJay Sternberg { 25157dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 25257dacad5SJay Sternberg 25357dacad5SJay Sternberg nvmeq->tags = NULL; 25457dacad5SJay Sternberg } 25557dacad5SJay Sternberg 25657dacad5SJay Sternberg static int nvme_admin_init_request(void *data, struct request *req, 25757dacad5SJay Sternberg unsigned int hctx_idx, unsigned int rq_idx, 25857dacad5SJay Sternberg unsigned int numa_node) 25957dacad5SJay Sternberg { 26057dacad5SJay Sternberg struct nvme_dev *dev = data; 26157dacad5SJay Sternberg struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 26257dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[0]; 26357dacad5SJay Sternberg 26457dacad5SJay Sternberg BUG_ON(!nvmeq); 26557dacad5SJay Sternberg cmd->nvmeq = nvmeq; 26657dacad5SJay Sternberg return 0; 26757dacad5SJay Sternberg } 26857dacad5SJay Sternberg 26957dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 27057dacad5SJay Sternberg unsigned int hctx_idx) 27157dacad5SJay Sternberg { 27257dacad5SJay Sternberg struct nvme_dev *dev = data; 27357dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; 27457dacad5SJay Sternberg 27557dacad5SJay Sternberg if (!nvmeq->tags) 27657dacad5SJay Sternberg nvmeq->tags = &dev->tagset.tags[hctx_idx]; 27757dacad5SJay Sternberg 27857dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 27957dacad5SJay Sternberg hctx->driver_data = nvmeq; 28057dacad5SJay Sternberg return 0; 28157dacad5SJay Sternberg } 28257dacad5SJay Sternberg 28357dacad5SJay Sternberg static int nvme_init_request(void *data, struct request *req, 28457dacad5SJay Sternberg unsigned int hctx_idx, unsigned int rq_idx, 28557dacad5SJay Sternberg unsigned int numa_node) 28657dacad5SJay Sternberg { 28757dacad5SJay Sternberg struct nvme_dev *dev = data; 28857dacad5SJay Sternberg struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 28957dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; 29057dacad5SJay Sternberg 29157dacad5SJay Sternberg BUG_ON(!nvmeq); 29257dacad5SJay Sternberg cmd->nvmeq = nvmeq; 29357dacad5SJay Sternberg return 0; 29457dacad5SJay Sternberg } 29557dacad5SJay Sternberg 29657dacad5SJay Sternberg static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx, 29757dacad5SJay Sternberg nvme_completion_fn handler) 29857dacad5SJay Sternberg { 29957dacad5SJay Sternberg cmd->fn = handler; 30057dacad5SJay Sternberg cmd->ctx = ctx; 30157dacad5SJay Sternberg cmd->aborted = 0; 30257dacad5SJay Sternberg blk_mq_start_request(blk_mq_rq_from_pdu(cmd)); 30357dacad5SJay Sternberg } 30457dacad5SJay Sternberg 30557dacad5SJay Sternberg static void *iod_get_private(struct nvme_iod *iod) 30657dacad5SJay Sternberg { 30757dacad5SJay Sternberg return (void *) (iod->private & ~0x1UL); 30857dacad5SJay Sternberg } 30957dacad5SJay Sternberg 31057dacad5SJay Sternberg /* 31157dacad5SJay Sternberg * If bit 0 is set, the iod is embedded in the request payload. 31257dacad5SJay Sternberg */ 31357dacad5SJay Sternberg static bool iod_should_kfree(struct nvme_iod *iod) 31457dacad5SJay Sternberg { 31557dacad5SJay Sternberg return (iod->private & NVME_INT_MASK) == 0; 31657dacad5SJay Sternberg } 31757dacad5SJay Sternberg 31857dacad5SJay Sternberg /* Special values must be less than 0x1000 */ 31957dacad5SJay Sternberg #define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) 32057dacad5SJay Sternberg #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 32157dacad5SJay Sternberg #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 32257dacad5SJay Sternberg #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 32357dacad5SJay Sternberg 32457dacad5SJay Sternberg static void special_completion(struct nvme_queue *nvmeq, void *ctx, 32557dacad5SJay Sternberg struct nvme_completion *cqe) 32657dacad5SJay Sternberg { 32757dacad5SJay Sternberg if (ctx == CMD_CTX_CANCELLED) 32857dacad5SJay Sternberg return; 32957dacad5SJay Sternberg if (ctx == CMD_CTX_COMPLETED) { 33057dacad5SJay Sternberg dev_warn(nvmeq->q_dmadev, 33157dacad5SJay Sternberg "completed id %d twice on queue %d\n", 33257dacad5SJay Sternberg cqe->command_id, le16_to_cpup(&cqe->sq_id)); 33357dacad5SJay Sternberg return; 33457dacad5SJay Sternberg } 33557dacad5SJay Sternberg if (ctx == CMD_CTX_INVALID) { 33657dacad5SJay Sternberg dev_warn(nvmeq->q_dmadev, 33757dacad5SJay Sternberg "invalid id %d completed on queue %d\n", 33857dacad5SJay Sternberg cqe->command_id, le16_to_cpup(&cqe->sq_id)); 33957dacad5SJay Sternberg return; 34057dacad5SJay Sternberg } 34157dacad5SJay Sternberg dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx); 34257dacad5SJay Sternberg } 34357dacad5SJay Sternberg 34457dacad5SJay Sternberg static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn) 34557dacad5SJay Sternberg { 34657dacad5SJay Sternberg void *ctx; 34757dacad5SJay Sternberg 34857dacad5SJay Sternberg if (fn) 34957dacad5SJay Sternberg *fn = cmd->fn; 35057dacad5SJay Sternberg ctx = cmd->ctx; 35157dacad5SJay Sternberg cmd->fn = special_completion; 35257dacad5SJay Sternberg cmd->ctx = CMD_CTX_CANCELLED; 35357dacad5SJay Sternberg return ctx; 35457dacad5SJay Sternberg } 35557dacad5SJay Sternberg 35657dacad5SJay Sternberg static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, 35757dacad5SJay Sternberg struct nvme_completion *cqe) 35857dacad5SJay Sternberg { 35957dacad5SJay Sternberg u32 result = le32_to_cpup(&cqe->result); 36057dacad5SJay Sternberg u16 status = le16_to_cpup(&cqe->status) >> 1; 36157dacad5SJay Sternberg 36257dacad5SJay Sternberg if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) 3631c63dc66SChristoph Hellwig ++nvmeq->dev->ctrl.event_limit; 36457dacad5SJay Sternberg if (status != NVME_SC_SUCCESS) 36557dacad5SJay Sternberg return; 36657dacad5SJay Sternberg 36757dacad5SJay Sternberg switch (result & 0xff07) { 36857dacad5SJay Sternberg case NVME_AER_NOTICE_NS_CHANGED: 36957dacad5SJay Sternberg dev_info(nvmeq->q_dmadev, "rescanning\n"); 37057dacad5SJay Sternberg schedule_work(&nvmeq->dev->scan_work); 37157dacad5SJay Sternberg default: 37257dacad5SJay Sternberg dev_warn(nvmeq->q_dmadev, "async event result %08x\n", result); 37357dacad5SJay Sternberg } 37457dacad5SJay Sternberg } 37557dacad5SJay Sternberg 37657dacad5SJay Sternberg static void abort_completion(struct nvme_queue *nvmeq, void *ctx, 37757dacad5SJay Sternberg struct nvme_completion *cqe) 37857dacad5SJay Sternberg { 37957dacad5SJay Sternberg struct request *req = ctx; 38057dacad5SJay Sternberg 38157dacad5SJay Sternberg u16 status = le16_to_cpup(&cqe->status) >> 1; 38257dacad5SJay Sternberg u32 result = le32_to_cpup(&cqe->result); 38357dacad5SJay Sternberg 38457dacad5SJay Sternberg blk_mq_free_request(req); 38557dacad5SJay Sternberg 38657dacad5SJay Sternberg dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result); 3871c63dc66SChristoph Hellwig ++nvmeq->dev->ctrl.abort_limit; 38857dacad5SJay Sternberg } 38957dacad5SJay Sternberg 39057dacad5SJay Sternberg static void async_completion(struct nvme_queue *nvmeq, void *ctx, 39157dacad5SJay Sternberg struct nvme_completion *cqe) 39257dacad5SJay Sternberg { 39357dacad5SJay Sternberg struct async_cmd_info *cmdinfo = ctx; 39457dacad5SJay Sternberg cmdinfo->result = le32_to_cpup(&cqe->result); 39557dacad5SJay Sternberg cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 39657dacad5SJay Sternberg queue_kthread_work(cmdinfo->worker, &cmdinfo->work); 39757dacad5SJay Sternberg blk_mq_free_request(cmdinfo->req); 39857dacad5SJay Sternberg } 39957dacad5SJay Sternberg 40057dacad5SJay Sternberg static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq, 40157dacad5SJay Sternberg unsigned int tag) 40257dacad5SJay Sternberg { 40357dacad5SJay Sternberg struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, tag); 40457dacad5SJay Sternberg 40557dacad5SJay Sternberg return blk_mq_rq_to_pdu(req); 40657dacad5SJay Sternberg } 40757dacad5SJay Sternberg 40857dacad5SJay Sternberg /* 40957dacad5SJay Sternberg * Called with local interrupts disabled and the q_lock held. May not sleep. 41057dacad5SJay Sternberg */ 41157dacad5SJay Sternberg static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag, 41257dacad5SJay Sternberg nvme_completion_fn *fn) 41357dacad5SJay Sternberg { 41457dacad5SJay Sternberg struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag); 41557dacad5SJay Sternberg void *ctx; 41657dacad5SJay Sternberg if (tag >= nvmeq->q_depth) { 41757dacad5SJay Sternberg *fn = special_completion; 41857dacad5SJay Sternberg return CMD_CTX_INVALID; 41957dacad5SJay Sternberg } 42057dacad5SJay Sternberg if (fn) 42157dacad5SJay Sternberg *fn = cmd->fn; 42257dacad5SJay Sternberg ctx = cmd->ctx; 42357dacad5SJay Sternberg cmd->fn = special_completion; 42457dacad5SJay Sternberg cmd->ctx = CMD_CTX_COMPLETED; 42557dacad5SJay Sternberg return ctx; 42657dacad5SJay Sternberg } 42757dacad5SJay Sternberg 42857dacad5SJay Sternberg /** 42957dacad5SJay Sternberg * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 43057dacad5SJay Sternberg * @nvmeq: The queue to use 43157dacad5SJay Sternberg * @cmd: The command to send 43257dacad5SJay Sternberg * 43357dacad5SJay Sternberg * Safe to use from interrupt context 43457dacad5SJay Sternberg */ 43557dacad5SJay Sternberg static void __nvme_submit_cmd(struct nvme_queue *nvmeq, 43657dacad5SJay Sternberg struct nvme_command *cmd) 43757dacad5SJay Sternberg { 43857dacad5SJay Sternberg u16 tail = nvmeq->sq_tail; 43957dacad5SJay Sternberg 44057dacad5SJay Sternberg if (nvmeq->sq_cmds_io) 44157dacad5SJay Sternberg memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd)); 44257dacad5SJay Sternberg else 44357dacad5SJay Sternberg memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 44457dacad5SJay Sternberg 44557dacad5SJay Sternberg if (++tail == nvmeq->q_depth) 44657dacad5SJay Sternberg tail = 0; 44757dacad5SJay Sternberg writel(tail, nvmeq->q_db); 44857dacad5SJay Sternberg nvmeq->sq_tail = tail; 44957dacad5SJay Sternberg } 45057dacad5SJay Sternberg 45157dacad5SJay Sternberg static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) 45257dacad5SJay Sternberg { 45357dacad5SJay Sternberg unsigned long flags; 45457dacad5SJay Sternberg spin_lock_irqsave(&nvmeq->q_lock, flags); 45557dacad5SJay Sternberg __nvme_submit_cmd(nvmeq, cmd); 45657dacad5SJay Sternberg spin_unlock_irqrestore(&nvmeq->q_lock, flags); 45757dacad5SJay Sternberg } 45857dacad5SJay Sternberg 45957dacad5SJay Sternberg static __le64 **iod_list(struct nvme_iod *iod) 46057dacad5SJay Sternberg { 46157dacad5SJay Sternberg return ((void *)iod) + iod->offset; 46257dacad5SJay Sternberg } 46357dacad5SJay Sternberg 46457dacad5SJay Sternberg static inline void iod_init(struct nvme_iod *iod, unsigned nbytes, 46557dacad5SJay Sternberg unsigned nseg, unsigned long private) 46657dacad5SJay Sternberg { 46757dacad5SJay Sternberg iod->private = private; 46857dacad5SJay Sternberg iod->offset = offsetof(struct nvme_iod, sg[nseg]); 46957dacad5SJay Sternberg iod->npages = -1; 47057dacad5SJay Sternberg iod->length = nbytes; 47157dacad5SJay Sternberg iod->nents = 0; 47257dacad5SJay Sternberg } 47357dacad5SJay Sternberg 47457dacad5SJay Sternberg static struct nvme_iod * 47557dacad5SJay Sternberg __nvme_alloc_iod(unsigned nseg, unsigned bytes, struct nvme_dev *dev, 47657dacad5SJay Sternberg unsigned long priv, gfp_t gfp) 47757dacad5SJay Sternberg { 47857dacad5SJay Sternberg struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + 47957dacad5SJay Sternberg sizeof(__le64 *) * nvme_npages(bytes, dev) + 48057dacad5SJay Sternberg sizeof(struct scatterlist) * nseg, gfp); 48157dacad5SJay Sternberg 48257dacad5SJay Sternberg if (iod) 48357dacad5SJay Sternberg iod_init(iod, bytes, nseg, priv); 48457dacad5SJay Sternberg 48557dacad5SJay Sternberg return iod; 48657dacad5SJay Sternberg } 48757dacad5SJay Sternberg 48857dacad5SJay Sternberg static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev, 48957dacad5SJay Sternberg gfp_t gfp) 49057dacad5SJay Sternberg { 49157dacad5SJay Sternberg unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) : 49257dacad5SJay Sternberg sizeof(struct nvme_dsm_range); 49357dacad5SJay Sternberg struct nvme_iod *iod; 49457dacad5SJay Sternberg 49557dacad5SJay Sternberg if (rq->nr_phys_segments <= NVME_INT_PAGES && 49657dacad5SJay Sternberg size <= NVME_INT_BYTES(dev)) { 49757dacad5SJay Sternberg struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq); 49857dacad5SJay Sternberg 49957dacad5SJay Sternberg iod = cmd->iod; 50057dacad5SJay Sternberg iod_init(iod, size, rq->nr_phys_segments, 50157dacad5SJay Sternberg (unsigned long) rq | NVME_INT_MASK); 50257dacad5SJay Sternberg return iod; 50357dacad5SJay Sternberg } 50457dacad5SJay Sternberg 50557dacad5SJay Sternberg return __nvme_alloc_iod(rq->nr_phys_segments, size, dev, 50657dacad5SJay Sternberg (unsigned long) rq, gfp); 50757dacad5SJay Sternberg } 50857dacad5SJay Sternberg 50957dacad5SJay Sternberg static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) 51057dacad5SJay Sternberg { 5115fd4ce1bSChristoph Hellwig const int last_prp = dev->ctrl.page_size / 8 - 1; 51257dacad5SJay Sternberg int i; 51357dacad5SJay Sternberg __le64 **list = iod_list(iod); 51457dacad5SJay Sternberg dma_addr_t prp_dma = iod->first_dma; 51557dacad5SJay Sternberg 51657dacad5SJay Sternberg if (iod->npages == 0) 51757dacad5SJay Sternberg dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 51857dacad5SJay Sternberg for (i = 0; i < iod->npages; i++) { 51957dacad5SJay Sternberg __le64 *prp_list = list[i]; 52057dacad5SJay Sternberg dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 52157dacad5SJay Sternberg dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 52257dacad5SJay Sternberg prp_dma = next_prp_dma; 52357dacad5SJay Sternberg } 52457dacad5SJay Sternberg 52557dacad5SJay Sternberg if (iod_should_kfree(iod)) 52657dacad5SJay Sternberg kfree(iod); 52757dacad5SJay Sternberg } 52857dacad5SJay Sternberg 52957dacad5SJay Sternberg #ifdef CONFIG_BLK_DEV_INTEGRITY 53057dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) 53157dacad5SJay Sternberg { 53257dacad5SJay Sternberg if (be32_to_cpu(pi->ref_tag) == v) 53357dacad5SJay Sternberg pi->ref_tag = cpu_to_be32(p); 53457dacad5SJay Sternberg } 53557dacad5SJay Sternberg 53657dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) 53757dacad5SJay Sternberg { 53857dacad5SJay Sternberg if (be32_to_cpu(pi->ref_tag) == p) 53957dacad5SJay Sternberg pi->ref_tag = cpu_to_be32(v); 54057dacad5SJay Sternberg } 54157dacad5SJay Sternberg 54257dacad5SJay Sternberg /** 54357dacad5SJay Sternberg * nvme_dif_remap - remaps ref tags to bip seed and physical lba 54457dacad5SJay Sternberg * 54557dacad5SJay Sternberg * The virtual start sector is the one that was originally submitted by the 54657dacad5SJay Sternberg * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical 54757dacad5SJay Sternberg * start sector may be different. Remap protection information to match the 54857dacad5SJay Sternberg * physical LBA on writes, and back to the original seed on reads. 54957dacad5SJay Sternberg * 55057dacad5SJay Sternberg * Type 0 and 3 do not have a ref tag, so no remapping required. 55157dacad5SJay Sternberg */ 55257dacad5SJay Sternberg static void nvme_dif_remap(struct request *req, 55357dacad5SJay Sternberg void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) 55457dacad5SJay Sternberg { 55557dacad5SJay Sternberg struct nvme_ns *ns = req->rq_disk->private_data; 55657dacad5SJay Sternberg struct bio_integrity_payload *bip; 55757dacad5SJay Sternberg struct t10_pi_tuple *pi; 55857dacad5SJay Sternberg void *p, *pmap; 55957dacad5SJay Sternberg u32 i, nlb, ts, phys, virt; 56057dacad5SJay Sternberg 56157dacad5SJay Sternberg if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3) 56257dacad5SJay Sternberg return; 56357dacad5SJay Sternberg 56457dacad5SJay Sternberg bip = bio_integrity(req->bio); 56557dacad5SJay Sternberg if (!bip) 56657dacad5SJay Sternberg return; 56757dacad5SJay Sternberg 56857dacad5SJay Sternberg pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; 56957dacad5SJay Sternberg 57057dacad5SJay Sternberg p = pmap; 57157dacad5SJay Sternberg virt = bip_get_seed(bip); 57257dacad5SJay Sternberg phys = nvme_block_nr(ns, blk_rq_pos(req)); 57357dacad5SJay Sternberg nlb = (blk_rq_bytes(req) >> ns->lba_shift); 574ac6fc48cSDan Williams ts = ns->disk->queue->integrity.tuple_size; 57557dacad5SJay Sternberg 57657dacad5SJay Sternberg for (i = 0; i < nlb; i++, virt++, phys++) { 57757dacad5SJay Sternberg pi = (struct t10_pi_tuple *)p; 57857dacad5SJay Sternberg dif_swap(phys, virt, pi); 57957dacad5SJay Sternberg p += ts; 58057dacad5SJay Sternberg } 58157dacad5SJay Sternberg kunmap_atomic(pmap); 58257dacad5SJay Sternberg } 58357dacad5SJay Sternberg #else /* CONFIG_BLK_DEV_INTEGRITY */ 58457dacad5SJay Sternberg static void nvme_dif_remap(struct request *req, 58557dacad5SJay Sternberg void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) 58657dacad5SJay Sternberg { 58757dacad5SJay Sternberg } 58857dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) 58957dacad5SJay Sternberg { 59057dacad5SJay Sternberg } 59157dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) 59257dacad5SJay Sternberg { 59357dacad5SJay Sternberg } 59457dacad5SJay Sternberg #endif 59557dacad5SJay Sternberg 59657dacad5SJay Sternberg static void req_completion(struct nvme_queue *nvmeq, void *ctx, 59757dacad5SJay Sternberg struct nvme_completion *cqe) 59857dacad5SJay Sternberg { 59957dacad5SJay Sternberg struct nvme_iod *iod = ctx; 60057dacad5SJay Sternberg struct request *req = iod_get_private(iod); 60157dacad5SJay Sternberg struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 60257dacad5SJay Sternberg u16 status = le16_to_cpup(&cqe->status) >> 1; 603ef658fc2SJens Axboe int error = 0; 60457dacad5SJay Sternberg 60557dacad5SJay Sternberg if (unlikely(status)) { 60657dacad5SJay Sternberg if (!(status & NVME_SC_DNR || blk_noretry_request(req)) 60757dacad5SJay Sternberg && (jiffies - req->start_time) < req->timeout) { 60857dacad5SJay Sternberg unsigned long flags; 60957dacad5SJay Sternberg 610d4f6c3abSChristoph Hellwig nvme_unmap_data(nvmeq->dev, iod); 611d4f6c3abSChristoph Hellwig 61257dacad5SJay Sternberg blk_mq_requeue_request(req); 61357dacad5SJay Sternberg spin_lock_irqsave(req->q->queue_lock, flags); 61457dacad5SJay Sternberg if (!blk_queue_stopped(req->q)) 61557dacad5SJay Sternberg blk_mq_kick_requeue_list(req->q); 61657dacad5SJay Sternberg spin_unlock_irqrestore(req->q->queue_lock, flags); 617d4f6c3abSChristoph Hellwig return; 61857dacad5SJay Sternberg } 61957dacad5SJay Sternberg 62057dacad5SJay Sternberg if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 62157dacad5SJay Sternberg if (cmd_rq->ctx == CMD_CTX_CANCELLED) 6221951feaeSChristoph Hellwig error = -EINTR; 6231951feaeSChristoph Hellwig else 6241951feaeSChristoph Hellwig error = status; 62557dacad5SJay Sternberg } else { 6261951feaeSChristoph Hellwig error = nvme_error_status(status); 62757dacad5SJay Sternberg } 62857dacad5SJay Sternberg } 62957dacad5SJay Sternberg 63057dacad5SJay Sternberg if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 63157dacad5SJay Sternberg u32 result = le32_to_cpup(&cqe->result); 63257dacad5SJay Sternberg req->special = (void *)(uintptr_t)result; 63357dacad5SJay Sternberg } 63457dacad5SJay Sternberg 63557dacad5SJay Sternberg if (cmd_rq->aborted) 63657dacad5SJay Sternberg dev_warn(nvmeq->dev->dev, 63757dacad5SJay Sternberg "completing aborted command with status:%04x\n", 6381951feaeSChristoph Hellwig error); 63957dacad5SJay Sternberg 640d4f6c3abSChristoph Hellwig nvme_unmap_data(nvmeq->dev, iod); 6411951feaeSChristoph Hellwig blk_mq_complete_request(req, error); 64257dacad5SJay Sternberg } 64357dacad5SJay Sternberg 64469d2b571SChristoph Hellwig static bool nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, 64569d2b571SChristoph Hellwig int total_len) 64657dacad5SJay Sternberg { 64757dacad5SJay Sternberg struct dma_pool *pool; 64857dacad5SJay Sternberg int length = total_len; 64957dacad5SJay Sternberg struct scatterlist *sg = iod->sg; 65057dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 65157dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 6525fd4ce1bSChristoph Hellwig u32 page_size = dev->ctrl.page_size; 65357dacad5SJay Sternberg int offset = dma_addr & (page_size - 1); 65457dacad5SJay Sternberg __le64 *prp_list; 65557dacad5SJay Sternberg __le64 **list = iod_list(iod); 65657dacad5SJay Sternberg dma_addr_t prp_dma; 65757dacad5SJay Sternberg int nprps, i; 65857dacad5SJay Sternberg 65957dacad5SJay Sternberg length -= (page_size - offset); 66057dacad5SJay Sternberg if (length <= 0) 66169d2b571SChristoph Hellwig return true; 66257dacad5SJay Sternberg 66357dacad5SJay Sternberg dma_len -= (page_size - offset); 66457dacad5SJay Sternberg if (dma_len) { 66557dacad5SJay Sternberg dma_addr += (page_size - offset); 66657dacad5SJay Sternberg } else { 66757dacad5SJay Sternberg sg = sg_next(sg); 66857dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 66957dacad5SJay Sternberg dma_len = sg_dma_len(sg); 67057dacad5SJay Sternberg } 67157dacad5SJay Sternberg 67257dacad5SJay Sternberg if (length <= page_size) { 67357dacad5SJay Sternberg iod->first_dma = dma_addr; 67469d2b571SChristoph Hellwig return true; 67557dacad5SJay Sternberg } 67657dacad5SJay Sternberg 67757dacad5SJay Sternberg nprps = DIV_ROUND_UP(length, page_size); 67857dacad5SJay Sternberg if (nprps <= (256 / 8)) { 67957dacad5SJay Sternberg pool = dev->prp_small_pool; 68057dacad5SJay Sternberg iod->npages = 0; 68157dacad5SJay Sternberg } else { 68257dacad5SJay Sternberg pool = dev->prp_page_pool; 68357dacad5SJay Sternberg iod->npages = 1; 68457dacad5SJay Sternberg } 68557dacad5SJay Sternberg 68669d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 68757dacad5SJay Sternberg if (!prp_list) { 68857dacad5SJay Sternberg iod->first_dma = dma_addr; 68957dacad5SJay Sternberg iod->npages = -1; 69069d2b571SChristoph Hellwig return false; 69157dacad5SJay Sternberg } 69257dacad5SJay Sternberg list[0] = prp_list; 69357dacad5SJay Sternberg iod->first_dma = prp_dma; 69457dacad5SJay Sternberg i = 0; 69557dacad5SJay Sternberg for (;;) { 69657dacad5SJay Sternberg if (i == page_size >> 3) { 69757dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 69869d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 69957dacad5SJay Sternberg if (!prp_list) 70069d2b571SChristoph Hellwig return false; 70157dacad5SJay Sternberg list[iod->npages++] = prp_list; 70257dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 70357dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 70457dacad5SJay Sternberg i = 1; 70557dacad5SJay Sternberg } 70657dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 70757dacad5SJay Sternberg dma_len -= page_size; 70857dacad5SJay Sternberg dma_addr += page_size; 70957dacad5SJay Sternberg length -= page_size; 71057dacad5SJay Sternberg if (length <= 0) 71157dacad5SJay Sternberg break; 71257dacad5SJay Sternberg if (dma_len > 0) 71357dacad5SJay Sternberg continue; 71457dacad5SJay Sternberg BUG_ON(dma_len < 0); 71557dacad5SJay Sternberg sg = sg_next(sg); 71657dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 71757dacad5SJay Sternberg dma_len = sg_dma_len(sg); 71857dacad5SJay Sternberg } 71957dacad5SJay Sternberg 72069d2b571SChristoph Hellwig return true; 72157dacad5SJay Sternberg } 72257dacad5SJay Sternberg 723ba1ca37eSChristoph Hellwig static int nvme_map_data(struct nvme_dev *dev, struct nvme_iod *iod, 724ba1ca37eSChristoph Hellwig struct nvme_command *cmnd) 72557dacad5SJay Sternberg { 726ba1ca37eSChristoph Hellwig struct request *req = iod_get_private(iod); 727ba1ca37eSChristoph Hellwig struct request_queue *q = req->q; 728ba1ca37eSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 729ba1ca37eSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 730ba1ca37eSChristoph Hellwig int ret = BLK_MQ_RQ_QUEUE_ERROR; 73157dacad5SJay Sternberg 732ba1ca37eSChristoph Hellwig sg_init_table(iod->sg, req->nr_phys_segments); 733ba1ca37eSChristoph Hellwig iod->nents = blk_rq_map_sg(q, req, iod->sg); 734ba1ca37eSChristoph Hellwig if (!iod->nents) 735ba1ca37eSChristoph Hellwig goto out; 736ba1ca37eSChristoph Hellwig 737ba1ca37eSChristoph Hellwig ret = BLK_MQ_RQ_QUEUE_BUSY; 738ba1ca37eSChristoph Hellwig if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir)) 739ba1ca37eSChristoph Hellwig goto out; 740ba1ca37eSChristoph Hellwig 741ba1ca37eSChristoph Hellwig if (!nvme_setup_prps(dev, iod, blk_rq_bytes(req))) 742ba1ca37eSChristoph Hellwig goto out_unmap; 743ba1ca37eSChristoph Hellwig 744ba1ca37eSChristoph Hellwig ret = BLK_MQ_RQ_QUEUE_ERROR; 745ba1ca37eSChristoph Hellwig if (blk_integrity_rq(req)) { 746ba1ca37eSChristoph Hellwig if (blk_rq_count_integrity_sg(q, req->bio) != 1) 747ba1ca37eSChristoph Hellwig goto out_unmap; 748ba1ca37eSChristoph Hellwig 749ba1ca37eSChristoph Hellwig sg_init_table(iod->meta_sg, 1); 750ba1ca37eSChristoph Hellwig if (blk_rq_map_integrity_sg(q, req->bio, iod->meta_sg) != 1) 751ba1ca37eSChristoph Hellwig goto out_unmap; 752ba1ca37eSChristoph Hellwig 753ba1ca37eSChristoph Hellwig if (rq_data_dir(req)) 754ba1ca37eSChristoph Hellwig nvme_dif_remap(req, nvme_dif_prep); 755ba1ca37eSChristoph Hellwig 756ba1ca37eSChristoph Hellwig if (!dma_map_sg(dev->dev, iod->meta_sg, 1, dma_dir)) 757ba1ca37eSChristoph Hellwig goto out_unmap; 75857dacad5SJay Sternberg } 75957dacad5SJay Sternberg 760ba1ca37eSChristoph Hellwig cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 761ba1ca37eSChristoph Hellwig cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); 762ba1ca37eSChristoph Hellwig if (blk_integrity_rq(req)) 763ba1ca37eSChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg)); 764ba1ca37eSChristoph Hellwig return BLK_MQ_RQ_QUEUE_OK; 765ba1ca37eSChristoph Hellwig 766ba1ca37eSChristoph Hellwig out_unmap: 767ba1ca37eSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 768ba1ca37eSChristoph Hellwig out: 769ba1ca37eSChristoph Hellwig return ret; 77057dacad5SJay Sternberg } 77157dacad5SJay Sternberg 772d4f6c3abSChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_iod *iod) 773d4f6c3abSChristoph Hellwig { 774d4f6c3abSChristoph Hellwig struct request *req = iod_get_private(iod); 775d4f6c3abSChristoph Hellwig enum dma_data_direction dma_dir = rq_data_dir(req) ? 776d4f6c3abSChristoph Hellwig DMA_TO_DEVICE : DMA_FROM_DEVICE; 777d4f6c3abSChristoph Hellwig 778d4f6c3abSChristoph Hellwig if (iod->nents) { 779d4f6c3abSChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 780d4f6c3abSChristoph Hellwig if (blk_integrity_rq(req)) { 781d4f6c3abSChristoph Hellwig if (!rq_data_dir(req)) 782d4f6c3abSChristoph Hellwig nvme_dif_remap(req, nvme_dif_complete); 783d4f6c3abSChristoph Hellwig dma_unmap_sg(dev->dev, iod->meta_sg, 1, dma_dir); 784d4f6c3abSChristoph Hellwig } 785d4f6c3abSChristoph Hellwig } 786d4f6c3abSChristoph Hellwig 787d4f6c3abSChristoph Hellwig nvme_free_iod(dev, iod); 788d4f6c3abSChristoph Hellwig } 789d4f6c3abSChristoph Hellwig 79057dacad5SJay Sternberg /* 79157dacad5SJay Sternberg * We reuse the small pool to allocate the 16-byte range here as it is not 79257dacad5SJay Sternberg * worth having a special pool for these or additional cases to handle freeing 79357dacad5SJay Sternberg * the iod. 79457dacad5SJay Sternberg */ 795ba1ca37eSChristoph Hellwig static int nvme_setup_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, 796ba1ca37eSChristoph Hellwig struct nvme_iod *iod, struct nvme_command *cmnd) 79757dacad5SJay Sternberg { 798ba1ca37eSChristoph Hellwig struct request *req = iod_get_private(iod); 799ba1ca37eSChristoph Hellwig struct nvme_dsm_range *range; 800ba1ca37eSChristoph Hellwig 801ba1ca37eSChristoph Hellwig range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC, 802ba1ca37eSChristoph Hellwig &iod->first_dma); 803ba1ca37eSChristoph Hellwig if (!range) 804ba1ca37eSChristoph Hellwig return BLK_MQ_RQ_QUEUE_BUSY; 805ba1ca37eSChristoph Hellwig iod_list(iod)[0] = (__le64 *)range; 806ba1ca37eSChristoph Hellwig iod->npages = 0; 80757dacad5SJay Sternberg 80857dacad5SJay Sternberg range->cattr = cpu_to_le32(0); 80957dacad5SJay Sternberg range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift); 81057dacad5SJay Sternberg range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 81157dacad5SJay Sternberg 812ba1ca37eSChristoph Hellwig memset(cmnd, 0, sizeof(*cmnd)); 813ba1ca37eSChristoph Hellwig cmnd->dsm.opcode = nvme_cmd_dsm; 814ba1ca37eSChristoph Hellwig cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 815ba1ca37eSChristoph Hellwig cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); 816ba1ca37eSChristoph Hellwig cmnd->dsm.nr = 0; 817ba1ca37eSChristoph Hellwig cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 818ba1ca37eSChristoph Hellwig return BLK_MQ_RQ_QUEUE_OK; 81957dacad5SJay Sternberg } 82057dacad5SJay Sternberg 82157dacad5SJay Sternberg /* 82257dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 82357dacad5SJay Sternberg */ 82457dacad5SJay Sternberg static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 82557dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 82657dacad5SJay Sternberg { 82757dacad5SJay Sternberg struct nvme_ns *ns = hctx->queue->queuedata; 82857dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 82957dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 83057dacad5SJay Sternberg struct request *req = bd->rq; 83157dacad5SJay Sternberg struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 83257dacad5SJay Sternberg struct nvme_iod *iod; 833ba1ca37eSChristoph Hellwig struct nvme_command cmnd; 834ba1ca37eSChristoph Hellwig int ret = BLK_MQ_RQ_QUEUE_OK; 83557dacad5SJay Sternberg 83657dacad5SJay Sternberg /* 83757dacad5SJay Sternberg * If formated with metadata, require the block layer provide a buffer 83857dacad5SJay Sternberg * unless this namespace is formated such that the metadata can be 83957dacad5SJay Sternberg * stripped/generated by the controller with PRACT=1. 84057dacad5SJay Sternberg */ 84157dacad5SJay Sternberg if (ns && ns->ms && !blk_integrity_rq(req)) { 84257dacad5SJay Sternberg if (!(ns->pi_type && ns->ms == 8) && 84357dacad5SJay Sternberg req->cmd_type != REQ_TYPE_DRV_PRIV) { 84457dacad5SJay Sternberg blk_mq_complete_request(req, -EFAULT); 84557dacad5SJay Sternberg return BLK_MQ_RQ_QUEUE_OK; 84657dacad5SJay Sternberg } 84757dacad5SJay Sternberg } 84857dacad5SJay Sternberg 84957dacad5SJay Sternberg iod = nvme_alloc_iod(req, dev, GFP_ATOMIC); 85057dacad5SJay Sternberg if (!iod) 85157dacad5SJay Sternberg return BLK_MQ_RQ_QUEUE_BUSY; 85257dacad5SJay Sternberg 85357dacad5SJay Sternberg if (req->cmd_flags & REQ_DISCARD) { 854ba1ca37eSChristoph Hellwig ret = nvme_setup_discard(nvmeq, ns, iod, &cmnd); 855ba1ca37eSChristoph Hellwig } else { 85657dacad5SJay Sternberg if (req->cmd_type == REQ_TYPE_DRV_PRIV) 857ba1ca37eSChristoph Hellwig memcpy(&cmnd, req->cmd, sizeof(cmnd)); 85857dacad5SJay Sternberg else if (req->cmd_flags & REQ_FLUSH) 859ba1ca37eSChristoph Hellwig nvme_setup_flush(ns, &cmnd); 86057dacad5SJay Sternberg else 861ba1ca37eSChristoph Hellwig nvme_setup_rw(ns, req, &cmnd); 86257dacad5SJay Sternberg 863ba1ca37eSChristoph Hellwig if (req->nr_phys_segments) 864ba1ca37eSChristoph Hellwig ret = nvme_map_data(dev, iod, &cmnd); 865ba1ca37eSChristoph Hellwig } 866ba1ca37eSChristoph Hellwig 867ba1ca37eSChristoph Hellwig if (ret) 868ba1ca37eSChristoph Hellwig goto out; 869ba1ca37eSChristoph Hellwig 870ba1ca37eSChristoph Hellwig cmnd.common.command_id = req->tag; 871ba1ca37eSChristoph Hellwig nvme_set_info(cmd, iod, req_completion); 872ba1ca37eSChristoph Hellwig 873ba1ca37eSChristoph Hellwig spin_lock_irq(&nvmeq->q_lock); 874ba1ca37eSChristoph Hellwig __nvme_submit_cmd(nvmeq, &cmnd); 87557dacad5SJay Sternberg nvme_process_cq(nvmeq); 87657dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 87757dacad5SJay Sternberg return BLK_MQ_RQ_QUEUE_OK; 878ba1ca37eSChristoph Hellwig out: 87957dacad5SJay Sternberg nvme_free_iod(dev, iod); 880ba1ca37eSChristoph Hellwig return ret; 88157dacad5SJay Sternberg } 88257dacad5SJay Sternberg 883a0fa9647SJens Axboe static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) 88457dacad5SJay Sternberg { 88557dacad5SJay Sternberg u16 head, phase; 88657dacad5SJay Sternberg 88757dacad5SJay Sternberg head = nvmeq->cq_head; 88857dacad5SJay Sternberg phase = nvmeq->cq_phase; 88957dacad5SJay Sternberg 89057dacad5SJay Sternberg for (;;) { 89157dacad5SJay Sternberg void *ctx; 89257dacad5SJay Sternberg nvme_completion_fn fn; 89357dacad5SJay Sternberg struct nvme_completion cqe = nvmeq->cqes[head]; 89457dacad5SJay Sternberg if ((le16_to_cpu(cqe.status) & 1) != phase) 89557dacad5SJay Sternberg break; 89657dacad5SJay Sternberg nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 89757dacad5SJay Sternberg if (++head == nvmeq->q_depth) { 89857dacad5SJay Sternberg head = 0; 89957dacad5SJay Sternberg phase = !phase; 90057dacad5SJay Sternberg } 901a0fa9647SJens Axboe if (tag && *tag == cqe.command_id) 902a0fa9647SJens Axboe *tag = -1; 90357dacad5SJay Sternberg ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn); 90457dacad5SJay Sternberg fn(nvmeq, ctx, &cqe); 90557dacad5SJay Sternberg } 90657dacad5SJay Sternberg 90757dacad5SJay Sternberg /* If the controller ignores the cq head doorbell and continuously 90857dacad5SJay Sternberg * writes to the queue, it is theoretically possible to wrap around 90957dacad5SJay Sternberg * the queue twice and mistakenly return IRQ_NONE. Linux only 91057dacad5SJay Sternberg * requires that 0.1% of your interrupts are handled, so this isn't 91157dacad5SJay Sternberg * a big problem. 91257dacad5SJay Sternberg */ 91357dacad5SJay Sternberg if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 914a0fa9647SJens Axboe return; 91557dacad5SJay Sternberg 916604e8c8dSKeith Busch if (likely(nvmeq->cq_vector >= 0)) 91757dacad5SJay Sternberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 91857dacad5SJay Sternberg nvmeq->cq_head = head; 91957dacad5SJay Sternberg nvmeq->cq_phase = phase; 92057dacad5SJay Sternberg 92157dacad5SJay Sternberg nvmeq->cqe_seen = 1; 922a0fa9647SJens Axboe } 923a0fa9647SJens Axboe 924a0fa9647SJens Axboe static void nvme_process_cq(struct nvme_queue *nvmeq) 925a0fa9647SJens Axboe { 926a0fa9647SJens Axboe __nvme_process_cq(nvmeq, NULL); 92757dacad5SJay Sternberg } 92857dacad5SJay Sternberg 92957dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 93057dacad5SJay Sternberg { 93157dacad5SJay Sternberg irqreturn_t result; 93257dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 93357dacad5SJay Sternberg spin_lock(&nvmeq->q_lock); 93457dacad5SJay Sternberg nvme_process_cq(nvmeq); 93557dacad5SJay Sternberg result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE; 93657dacad5SJay Sternberg nvmeq->cqe_seen = 0; 93757dacad5SJay Sternberg spin_unlock(&nvmeq->q_lock); 93857dacad5SJay Sternberg return result; 93957dacad5SJay Sternberg } 94057dacad5SJay Sternberg 94157dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 94257dacad5SJay Sternberg { 94357dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 94457dacad5SJay Sternberg struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; 94557dacad5SJay Sternberg if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) 94657dacad5SJay Sternberg return IRQ_NONE; 94757dacad5SJay Sternberg return IRQ_WAKE_THREAD; 94857dacad5SJay Sternberg } 94957dacad5SJay Sternberg 950a0fa9647SJens Axboe static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) 951a0fa9647SJens Axboe { 952a0fa9647SJens Axboe struct nvme_queue *nvmeq = hctx->driver_data; 953a0fa9647SJens Axboe 954a0fa9647SJens Axboe if ((le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) == 955a0fa9647SJens Axboe nvmeq->cq_phase) { 956a0fa9647SJens Axboe spin_lock_irq(&nvmeq->q_lock); 957a0fa9647SJens Axboe __nvme_process_cq(nvmeq, &tag); 958a0fa9647SJens Axboe spin_unlock_irq(&nvmeq->q_lock); 959a0fa9647SJens Axboe 960a0fa9647SJens Axboe if (tag == -1) 961a0fa9647SJens Axboe return 1; 962a0fa9647SJens Axboe } 963a0fa9647SJens Axboe 964a0fa9647SJens Axboe return 0; 965a0fa9647SJens Axboe } 966a0fa9647SJens Axboe 96757dacad5SJay Sternberg static int nvme_submit_async_admin_req(struct nvme_dev *dev) 96857dacad5SJay Sternberg { 96957dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[0]; 97057dacad5SJay Sternberg struct nvme_command c; 97157dacad5SJay Sternberg struct nvme_cmd_info *cmd_info; 97257dacad5SJay Sternberg struct request *req; 97357dacad5SJay Sternberg 9741c63dc66SChristoph Hellwig req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, 9756f3b0e8bSChristoph Hellwig BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED); 97657dacad5SJay Sternberg if (IS_ERR(req)) 97757dacad5SJay Sternberg return PTR_ERR(req); 97857dacad5SJay Sternberg 97957dacad5SJay Sternberg req->cmd_flags |= REQ_NO_TIMEOUT; 98057dacad5SJay Sternberg cmd_info = blk_mq_rq_to_pdu(req); 98157dacad5SJay Sternberg nvme_set_info(cmd_info, NULL, async_req_completion); 98257dacad5SJay Sternberg 98357dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 98457dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 98557dacad5SJay Sternberg c.common.command_id = req->tag; 98657dacad5SJay Sternberg 98757dacad5SJay Sternberg blk_mq_free_request(req); 98857dacad5SJay Sternberg __nvme_submit_cmd(nvmeq, &c); 98957dacad5SJay Sternberg return 0; 99057dacad5SJay Sternberg } 99157dacad5SJay Sternberg 99257dacad5SJay Sternberg static int nvme_submit_admin_async_cmd(struct nvme_dev *dev, 99357dacad5SJay Sternberg struct nvme_command *cmd, 99457dacad5SJay Sternberg struct async_cmd_info *cmdinfo, unsigned timeout) 99557dacad5SJay Sternberg { 99657dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[0]; 99757dacad5SJay Sternberg struct request *req; 99857dacad5SJay Sternberg struct nvme_cmd_info *cmd_rq; 99957dacad5SJay Sternberg 10001c63dc66SChristoph Hellwig req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, 0); 100157dacad5SJay Sternberg if (IS_ERR(req)) 100257dacad5SJay Sternberg return PTR_ERR(req); 100357dacad5SJay Sternberg 100457dacad5SJay Sternberg req->timeout = timeout; 100557dacad5SJay Sternberg cmd_rq = blk_mq_rq_to_pdu(req); 100657dacad5SJay Sternberg cmdinfo->req = req; 100757dacad5SJay Sternberg nvme_set_info(cmd_rq, cmdinfo, async_completion); 100857dacad5SJay Sternberg cmdinfo->status = -EINTR; 100957dacad5SJay Sternberg 101057dacad5SJay Sternberg cmd->common.command_id = req->tag; 101157dacad5SJay Sternberg 101257dacad5SJay Sternberg nvme_submit_cmd(nvmeq, cmd); 101357dacad5SJay Sternberg return 0; 101457dacad5SJay Sternberg } 101557dacad5SJay Sternberg 101657dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 101757dacad5SJay Sternberg { 101857dacad5SJay Sternberg struct nvme_command c; 101957dacad5SJay Sternberg 102057dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 102157dacad5SJay Sternberg c.delete_queue.opcode = opcode; 102257dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 102357dacad5SJay Sternberg 10241c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 102557dacad5SJay Sternberg } 102657dacad5SJay Sternberg 102757dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 102857dacad5SJay Sternberg struct nvme_queue *nvmeq) 102957dacad5SJay Sternberg { 103057dacad5SJay Sternberg struct nvme_command c; 103157dacad5SJay Sternberg int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 103257dacad5SJay Sternberg 103357dacad5SJay Sternberg /* 103457dacad5SJay Sternberg * Note: we (ab)use the fact the the prp fields survive if no data 103557dacad5SJay Sternberg * is attached to the request. 103657dacad5SJay Sternberg */ 103757dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 103857dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 103957dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 104057dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 104157dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 104257dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 104357dacad5SJay Sternberg c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 104457dacad5SJay Sternberg 10451c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 104657dacad5SJay Sternberg } 104757dacad5SJay Sternberg 104857dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 104957dacad5SJay Sternberg struct nvme_queue *nvmeq) 105057dacad5SJay Sternberg { 105157dacad5SJay Sternberg struct nvme_command c; 105257dacad5SJay Sternberg int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; 105357dacad5SJay Sternberg 105457dacad5SJay Sternberg /* 105557dacad5SJay Sternberg * Note: we (ab)use the fact the the prp fields survive if no data 105657dacad5SJay Sternberg * is attached to the request. 105757dacad5SJay Sternberg */ 105857dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 105957dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 106057dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 106157dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 106257dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 106357dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 106457dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 106557dacad5SJay Sternberg 10661c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 106757dacad5SJay Sternberg } 106857dacad5SJay Sternberg 106957dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 107057dacad5SJay Sternberg { 107157dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 107257dacad5SJay Sternberg } 107357dacad5SJay Sternberg 107457dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 107557dacad5SJay Sternberg { 107657dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 107757dacad5SJay Sternberg } 107857dacad5SJay Sternberg 107957dacad5SJay Sternberg /** 108057dacad5SJay Sternberg * nvme_abort_req - Attempt aborting a request 108157dacad5SJay Sternberg * 108257dacad5SJay Sternberg * Schedule controller reset if the command was already aborted once before and 108357dacad5SJay Sternberg * still hasn't been returned to the driver, or if this is the admin queue. 108457dacad5SJay Sternberg */ 108557dacad5SJay Sternberg static void nvme_abort_req(struct request *req) 108657dacad5SJay Sternberg { 108757dacad5SJay Sternberg struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 108857dacad5SJay Sternberg struct nvme_queue *nvmeq = cmd_rq->nvmeq; 108957dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 109057dacad5SJay Sternberg struct request *abort_req; 109157dacad5SJay Sternberg struct nvme_cmd_info *abort_cmd; 109257dacad5SJay Sternberg struct nvme_command cmd; 109357dacad5SJay Sternberg 109457dacad5SJay Sternberg if (!nvmeq->qid || cmd_rq->aborted) { 109557dacad5SJay Sternberg spin_lock(&dev_list_lock); 109657dacad5SJay Sternberg if (!__nvme_reset(dev)) { 109757dacad5SJay Sternberg dev_warn(dev->dev, 109857dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 109957dacad5SJay Sternberg req->tag, nvmeq->qid); 110057dacad5SJay Sternberg } 110157dacad5SJay Sternberg spin_unlock(&dev_list_lock); 110257dacad5SJay Sternberg return; 110357dacad5SJay Sternberg } 110457dacad5SJay Sternberg 11051c63dc66SChristoph Hellwig if (!dev->ctrl.abort_limit) 110657dacad5SJay Sternberg return; 110757dacad5SJay Sternberg 11081c63dc66SChristoph Hellwig abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, 11096f3b0e8bSChristoph Hellwig BLK_MQ_REQ_NOWAIT); 111057dacad5SJay Sternberg if (IS_ERR(abort_req)) 111157dacad5SJay Sternberg return; 111257dacad5SJay Sternberg 111357dacad5SJay Sternberg abort_cmd = blk_mq_rq_to_pdu(abort_req); 111457dacad5SJay Sternberg nvme_set_info(abort_cmd, abort_req, abort_completion); 111557dacad5SJay Sternberg 111657dacad5SJay Sternberg memset(&cmd, 0, sizeof(cmd)); 111757dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 111857dacad5SJay Sternberg cmd.abort.cid = req->tag; 111957dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 112057dacad5SJay Sternberg cmd.abort.command_id = abort_req->tag; 112157dacad5SJay Sternberg 11221c63dc66SChristoph Hellwig --dev->ctrl.abort_limit; 112357dacad5SJay Sternberg cmd_rq->aborted = 1; 112457dacad5SJay Sternberg 112557dacad5SJay Sternberg dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag, 112657dacad5SJay Sternberg nvmeq->qid); 112757dacad5SJay Sternberg nvme_submit_cmd(dev->queues[0], &cmd); 112857dacad5SJay Sternberg } 112957dacad5SJay Sternberg 113057dacad5SJay Sternberg static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved) 113157dacad5SJay Sternberg { 113257dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 113357dacad5SJay Sternberg void *ctx; 113457dacad5SJay Sternberg nvme_completion_fn fn; 113557dacad5SJay Sternberg struct nvme_cmd_info *cmd; 113657dacad5SJay Sternberg struct nvme_completion cqe; 113757dacad5SJay Sternberg 113857dacad5SJay Sternberg if (!blk_mq_request_started(req)) 113957dacad5SJay Sternberg return; 114057dacad5SJay Sternberg 114157dacad5SJay Sternberg cmd = blk_mq_rq_to_pdu(req); 114257dacad5SJay Sternberg 114357dacad5SJay Sternberg if (cmd->ctx == CMD_CTX_CANCELLED) 114457dacad5SJay Sternberg return; 114557dacad5SJay Sternberg 114657dacad5SJay Sternberg if (blk_queue_dying(req->q)) 114757dacad5SJay Sternberg cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); 114857dacad5SJay Sternberg else 114957dacad5SJay Sternberg cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); 115057dacad5SJay Sternberg 115157dacad5SJay Sternberg 115257dacad5SJay Sternberg dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", 115357dacad5SJay Sternberg req->tag, nvmeq->qid); 115457dacad5SJay Sternberg ctx = cancel_cmd_info(cmd, &fn); 115557dacad5SJay Sternberg fn(nvmeq, ctx, &cqe); 115657dacad5SJay Sternberg } 115757dacad5SJay Sternberg 115857dacad5SJay Sternberg static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 115957dacad5SJay Sternberg { 116057dacad5SJay Sternberg struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 116157dacad5SJay Sternberg struct nvme_queue *nvmeq = cmd->nvmeq; 116257dacad5SJay Sternberg 116357dacad5SJay Sternberg dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 116457dacad5SJay Sternberg nvmeq->qid); 116557dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 116657dacad5SJay Sternberg nvme_abort_req(req); 116757dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 116857dacad5SJay Sternberg 116957dacad5SJay Sternberg /* 117057dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 117157dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 117257dacad5SJay Sternberg * as the device then is in a faulty state. 117357dacad5SJay Sternberg */ 117457dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 117557dacad5SJay Sternberg } 117657dacad5SJay Sternberg 117757dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 117857dacad5SJay Sternberg { 117957dacad5SJay Sternberg dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 118057dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 118157dacad5SJay Sternberg if (nvmeq->sq_cmds) 118257dacad5SJay Sternberg dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 118357dacad5SJay Sternberg nvmeq->sq_cmds, nvmeq->sq_dma_addr); 118457dacad5SJay Sternberg kfree(nvmeq); 118557dacad5SJay Sternberg } 118657dacad5SJay Sternberg 118757dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 118857dacad5SJay Sternberg { 118957dacad5SJay Sternberg int i; 119057dacad5SJay Sternberg 119157dacad5SJay Sternberg for (i = dev->queue_count - 1; i >= lowest; i--) { 119257dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[i]; 119357dacad5SJay Sternberg dev->queue_count--; 119457dacad5SJay Sternberg dev->queues[i] = NULL; 119557dacad5SJay Sternberg nvme_free_queue(nvmeq); 119657dacad5SJay Sternberg } 119757dacad5SJay Sternberg } 119857dacad5SJay Sternberg 119957dacad5SJay Sternberg /** 120057dacad5SJay Sternberg * nvme_suspend_queue - put queue into suspended state 120157dacad5SJay Sternberg * @nvmeq - queue to suspend 120257dacad5SJay Sternberg */ 120357dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq) 120457dacad5SJay Sternberg { 120557dacad5SJay Sternberg int vector; 120657dacad5SJay Sternberg 120757dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 120857dacad5SJay Sternberg if (nvmeq->cq_vector == -1) { 120957dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 121057dacad5SJay Sternberg return 1; 121157dacad5SJay Sternberg } 121257dacad5SJay Sternberg vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; 121357dacad5SJay Sternberg nvmeq->dev->online_queues--; 121457dacad5SJay Sternberg nvmeq->cq_vector = -1; 121557dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 121657dacad5SJay Sternberg 12171c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 12181c63dc66SChristoph Hellwig blk_mq_freeze_queue_start(nvmeq->dev->ctrl.admin_q); 121957dacad5SJay Sternberg 122057dacad5SJay Sternberg irq_set_affinity_hint(vector, NULL); 122157dacad5SJay Sternberg free_irq(vector, nvmeq); 122257dacad5SJay Sternberg 122357dacad5SJay Sternberg return 0; 122457dacad5SJay Sternberg } 122557dacad5SJay Sternberg 122657dacad5SJay Sternberg static void nvme_clear_queue(struct nvme_queue *nvmeq) 122757dacad5SJay Sternberg { 122857dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 122957dacad5SJay Sternberg if (nvmeq->tags && *nvmeq->tags) 123057dacad5SJay Sternberg blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq); 123157dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 123257dacad5SJay Sternberg } 123357dacad5SJay Sternberg 123457dacad5SJay Sternberg static void nvme_disable_queue(struct nvme_dev *dev, int qid) 123557dacad5SJay Sternberg { 123657dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[qid]; 123757dacad5SJay Sternberg 123857dacad5SJay Sternberg if (!nvmeq) 123957dacad5SJay Sternberg return; 124057dacad5SJay Sternberg if (nvme_suspend_queue(nvmeq)) 124157dacad5SJay Sternberg return; 124257dacad5SJay Sternberg 124357dacad5SJay Sternberg /* Don't tell the adapter to delete the admin queue. 124457dacad5SJay Sternberg * Don't tell a removed adapter to delete IO queues. */ 12457a67cbeaSChristoph Hellwig if (qid && readl(dev->bar + NVME_REG_CSTS) != -1) { 124657dacad5SJay Sternberg adapter_delete_sq(dev, qid); 124757dacad5SJay Sternberg adapter_delete_cq(dev, qid); 124857dacad5SJay Sternberg } 124957dacad5SJay Sternberg 125057dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 125157dacad5SJay Sternberg nvme_process_cq(nvmeq); 125257dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 125357dacad5SJay Sternberg } 125457dacad5SJay Sternberg 125557dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 125657dacad5SJay Sternberg int entry_size) 125757dacad5SJay Sternberg { 125857dacad5SJay Sternberg int q_depth = dev->q_depth; 12595fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 12605fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 126157dacad5SJay Sternberg 126257dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 126357dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 12645fd4ce1bSChristoph Hellwig mem_per_q = round_down(mem_per_q, dev->ctrl.page_size); 126557dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 126657dacad5SJay Sternberg 126757dacad5SJay Sternberg /* 126857dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 126957dacad5SJay Sternberg * would be better to map queues in system memory with the 127057dacad5SJay Sternberg * original depth 127157dacad5SJay Sternberg */ 127257dacad5SJay Sternberg if (q_depth < 64) 127357dacad5SJay Sternberg return -ENOMEM; 127457dacad5SJay Sternberg } 127557dacad5SJay Sternberg 127657dacad5SJay Sternberg return q_depth; 127757dacad5SJay Sternberg } 127857dacad5SJay Sternberg 127957dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 128057dacad5SJay Sternberg int qid, int depth) 128157dacad5SJay Sternberg { 128257dacad5SJay Sternberg if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { 12835fd4ce1bSChristoph Hellwig unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 12845fd4ce1bSChristoph Hellwig dev->ctrl.page_size); 128557dacad5SJay Sternberg nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; 128657dacad5SJay Sternberg nvmeq->sq_cmds_io = dev->cmb + offset; 128757dacad5SJay Sternberg } else { 128857dacad5SJay Sternberg nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 128957dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 129057dacad5SJay Sternberg if (!nvmeq->sq_cmds) 129157dacad5SJay Sternberg return -ENOMEM; 129257dacad5SJay Sternberg } 129357dacad5SJay Sternberg 129457dacad5SJay Sternberg return 0; 129557dacad5SJay Sternberg } 129657dacad5SJay Sternberg 129757dacad5SJay Sternberg static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 129857dacad5SJay Sternberg int depth) 129957dacad5SJay Sternberg { 130057dacad5SJay Sternberg struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); 130157dacad5SJay Sternberg if (!nvmeq) 130257dacad5SJay Sternberg return NULL; 130357dacad5SJay Sternberg 130457dacad5SJay Sternberg nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 130557dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 130657dacad5SJay Sternberg if (!nvmeq->cqes) 130757dacad5SJay Sternberg goto free_nvmeq; 130857dacad5SJay Sternberg 130957dacad5SJay Sternberg if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) 131057dacad5SJay Sternberg goto free_cqdma; 131157dacad5SJay Sternberg 131257dacad5SJay Sternberg nvmeq->q_dmadev = dev->dev; 131357dacad5SJay Sternberg nvmeq->dev = dev; 131457dacad5SJay Sternberg snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", 13151c63dc66SChristoph Hellwig dev->ctrl.instance, qid); 131657dacad5SJay Sternberg spin_lock_init(&nvmeq->q_lock); 131757dacad5SJay Sternberg nvmeq->cq_head = 0; 131857dacad5SJay Sternberg nvmeq->cq_phase = 1; 131957dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 132057dacad5SJay Sternberg nvmeq->q_depth = depth; 132157dacad5SJay Sternberg nvmeq->qid = qid; 132257dacad5SJay Sternberg nvmeq->cq_vector = -1; 132357dacad5SJay Sternberg dev->queues[qid] = nvmeq; 132457dacad5SJay Sternberg 132557dacad5SJay Sternberg /* make sure queue descriptor is set before queue count, for kthread */ 132657dacad5SJay Sternberg mb(); 132757dacad5SJay Sternberg dev->queue_count++; 132857dacad5SJay Sternberg 132957dacad5SJay Sternberg return nvmeq; 133057dacad5SJay Sternberg 133157dacad5SJay Sternberg free_cqdma: 133257dacad5SJay Sternberg dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, 133357dacad5SJay Sternberg nvmeq->cq_dma_addr); 133457dacad5SJay Sternberg free_nvmeq: 133557dacad5SJay Sternberg kfree(nvmeq); 133657dacad5SJay Sternberg return NULL; 133757dacad5SJay Sternberg } 133857dacad5SJay Sternberg 133957dacad5SJay Sternberg static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, 134057dacad5SJay Sternberg const char *name) 134157dacad5SJay Sternberg { 134257dacad5SJay Sternberg if (use_threaded_interrupts) 134357dacad5SJay Sternberg return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, 134457dacad5SJay Sternberg nvme_irq_check, nvme_irq, IRQF_SHARED, 134557dacad5SJay Sternberg name, nvmeq); 134657dacad5SJay Sternberg return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 134757dacad5SJay Sternberg IRQF_SHARED, name, nvmeq); 134857dacad5SJay Sternberg } 134957dacad5SJay Sternberg 135057dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 135157dacad5SJay Sternberg { 135257dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 135357dacad5SJay Sternberg 135457dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 135557dacad5SJay Sternberg nvmeq->sq_tail = 0; 135657dacad5SJay Sternberg nvmeq->cq_head = 0; 135757dacad5SJay Sternberg nvmeq->cq_phase = 1; 135857dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 135957dacad5SJay Sternberg memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 136057dacad5SJay Sternberg dev->online_queues++; 136157dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 136257dacad5SJay Sternberg } 136357dacad5SJay Sternberg 136457dacad5SJay Sternberg static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) 136557dacad5SJay Sternberg { 136657dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 136757dacad5SJay Sternberg int result; 136857dacad5SJay Sternberg 136957dacad5SJay Sternberg nvmeq->cq_vector = qid - 1; 137057dacad5SJay Sternberg result = adapter_alloc_cq(dev, qid, nvmeq); 137157dacad5SJay Sternberg if (result < 0) 137257dacad5SJay Sternberg return result; 137357dacad5SJay Sternberg 137457dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 137557dacad5SJay Sternberg if (result < 0) 137657dacad5SJay Sternberg goto release_cq; 137757dacad5SJay Sternberg 137857dacad5SJay Sternberg result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 137957dacad5SJay Sternberg if (result < 0) 138057dacad5SJay Sternberg goto release_sq; 138157dacad5SJay Sternberg 138257dacad5SJay Sternberg nvme_init_queue(nvmeq, qid); 138357dacad5SJay Sternberg return result; 138457dacad5SJay Sternberg 138557dacad5SJay Sternberg release_sq: 138657dacad5SJay Sternberg adapter_delete_sq(dev, qid); 138757dacad5SJay Sternberg release_cq: 138857dacad5SJay Sternberg adapter_delete_cq(dev, qid); 138957dacad5SJay Sternberg return result; 139057dacad5SJay Sternberg } 139157dacad5SJay Sternberg 139257dacad5SJay Sternberg static struct blk_mq_ops nvme_mq_admin_ops = { 139357dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 139457dacad5SJay Sternberg .map_queue = blk_mq_map_queue, 139557dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 139657dacad5SJay Sternberg .exit_hctx = nvme_admin_exit_hctx, 139757dacad5SJay Sternberg .init_request = nvme_admin_init_request, 139857dacad5SJay Sternberg .timeout = nvme_timeout, 139957dacad5SJay Sternberg }; 140057dacad5SJay Sternberg 140157dacad5SJay Sternberg static struct blk_mq_ops nvme_mq_ops = { 140257dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 140357dacad5SJay Sternberg .map_queue = blk_mq_map_queue, 140457dacad5SJay Sternberg .init_hctx = nvme_init_hctx, 140557dacad5SJay Sternberg .init_request = nvme_init_request, 140657dacad5SJay Sternberg .timeout = nvme_timeout, 1407a0fa9647SJens Axboe .poll = nvme_poll, 140857dacad5SJay Sternberg }; 140957dacad5SJay Sternberg 141057dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 141157dacad5SJay Sternberg { 14121c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 14131c63dc66SChristoph Hellwig blk_cleanup_queue(dev->ctrl.admin_q); 141457dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 141557dacad5SJay Sternberg } 141657dacad5SJay Sternberg } 141757dacad5SJay Sternberg 141857dacad5SJay Sternberg static int nvme_alloc_admin_tags(struct nvme_dev *dev) 141957dacad5SJay Sternberg { 14201c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q) { 142157dacad5SJay Sternberg dev->admin_tagset.ops = &nvme_mq_admin_ops; 142257dacad5SJay Sternberg dev->admin_tagset.nr_hw_queues = 1; 142357dacad5SJay Sternberg dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1; 142457dacad5SJay Sternberg dev->admin_tagset.reserved_tags = 1; 142557dacad5SJay Sternberg dev->admin_tagset.timeout = ADMIN_TIMEOUT; 142657dacad5SJay Sternberg dev->admin_tagset.numa_node = dev_to_node(dev->dev); 142757dacad5SJay Sternberg dev->admin_tagset.cmd_size = nvme_cmd_size(dev); 142857dacad5SJay Sternberg dev->admin_tagset.driver_data = dev; 142957dacad5SJay Sternberg 143057dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 143157dacad5SJay Sternberg return -ENOMEM; 143257dacad5SJay Sternberg 14331c63dc66SChristoph Hellwig dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 14341c63dc66SChristoph Hellwig if (IS_ERR(dev->ctrl.admin_q)) { 143557dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 143657dacad5SJay Sternberg return -ENOMEM; 143757dacad5SJay Sternberg } 14381c63dc66SChristoph Hellwig if (!blk_get_queue(dev->ctrl.admin_q)) { 143957dacad5SJay Sternberg nvme_dev_remove_admin(dev); 14401c63dc66SChristoph Hellwig dev->ctrl.admin_q = NULL; 144157dacad5SJay Sternberg return -ENODEV; 144257dacad5SJay Sternberg } 144357dacad5SJay Sternberg } else 14441c63dc66SChristoph Hellwig blk_mq_unfreeze_queue(dev->ctrl.admin_q); 144557dacad5SJay Sternberg 144657dacad5SJay Sternberg return 0; 144757dacad5SJay Sternberg } 144857dacad5SJay Sternberg 144957dacad5SJay Sternberg static int nvme_configure_admin_queue(struct nvme_dev *dev) 145057dacad5SJay Sternberg { 145157dacad5SJay Sternberg int result; 145257dacad5SJay Sternberg u32 aqa; 14537a67cbeaSChristoph Hellwig u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 145457dacad5SJay Sternberg struct nvme_queue *nvmeq; 145557dacad5SJay Sternberg 14567a67cbeaSChristoph Hellwig dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ? 145757dacad5SJay Sternberg NVME_CAP_NSSRC(cap) : 0; 145857dacad5SJay Sternberg 14597a67cbeaSChristoph Hellwig if (dev->subsystem && 14607a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 14617a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 146257dacad5SJay Sternberg 14635fd4ce1bSChristoph Hellwig result = nvme_disable_ctrl(&dev->ctrl, cap); 146457dacad5SJay Sternberg if (result < 0) 146557dacad5SJay Sternberg return result; 146657dacad5SJay Sternberg 146757dacad5SJay Sternberg nvmeq = dev->queues[0]; 146857dacad5SJay Sternberg if (!nvmeq) { 146957dacad5SJay Sternberg nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 147057dacad5SJay Sternberg if (!nvmeq) 147157dacad5SJay Sternberg return -ENOMEM; 147257dacad5SJay Sternberg } 147357dacad5SJay Sternberg 147457dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 147557dacad5SJay Sternberg aqa |= aqa << 16; 147657dacad5SJay Sternberg 14777a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 14787a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 14797a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 148057dacad5SJay Sternberg 14815fd4ce1bSChristoph Hellwig result = nvme_enable_ctrl(&dev->ctrl, cap); 148257dacad5SJay Sternberg if (result) 148357dacad5SJay Sternberg goto free_nvmeq; 148457dacad5SJay Sternberg 148557dacad5SJay Sternberg nvmeq->cq_vector = 0; 148657dacad5SJay Sternberg result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 148757dacad5SJay Sternberg if (result) { 148857dacad5SJay Sternberg nvmeq->cq_vector = -1; 148957dacad5SJay Sternberg goto free_nvmeq; 149057dacad5SJay Sternberg } 149157dacad5SJay Sternberg 149257dacad5SJay Sternberg return result; 149357dacad5SJay Sternberg 149457dacad5SJay Sternberg free_nvmeq: 149557dacad5SJay Sternberg nvme_free_queues(dev, 0); 149657dacad5SJay Sternberg return result; 149757dacad5SJay Sternberg } 149857dacad5SJay Sternberg 149957dacad5SJay Sternberg static int nvme_kthread(void *data) 150057dacad5SJay Sternberg { 150157dacad5SJay Sternberg struct nvme_dev *dev, *next; 150257dacad5SJay Sternberg 150357dacad5SJay Sternberg while (!kthread_should_stop()) { 150457dacad5SJay Sternberg set_current_state(TASK_INTERRUPTIBLE); 150557dacad5SJay Sternberg spin_lock(&dev_list_lock); 150657dacad5SJay Sternberg list_for_each_entry_safe(dev, next, &dev_list, node) { 150757dacad5SJay Sternberg int i; 15087a67cbeaSChristoph Hellwig u32 csts = readl(dev->bar + NVME_REG_CSTS); 150957dacad5SJay Sternberg 151057dacad5SJay Sternberg if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) || 151157dacad5SJay Sternberg csts & NVME_CSTS_CFS) { 151257dacad5SJay Sternberg if (!__nvme_reset(dev)) { 151357dacad5SJay Sternberg dev_warn(dev->dev, 151457dacad5SJay Sternberg "Failed status: %x, reset controller\n", 15157a67cbeaSChristoph Hellwig readl(dev->bar + NVME_REG_CSTS)); 151657dacad5SJay Sternberg } 151757dacad5SJay Sternberg continue; 151857dacad5SJay Sternberg } 151957dacad5SJay Sternberg for (i = 0; i < dev->queue_count; i++) { 152057dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[i]; 152157dacad5SJay Sternberg if (!nvmeq) 152257dacad5SJay Sternberg continue; 152357dacad5SJay Sternberg spin_lock_irq(&nvmeq->q_lock); 152457dacad5SJay Sternberg nvme_process_cq(nvmeq); 152557dacad5SJay Sternberg 15261c63dc66SChristoph Hellwig while (i == 0 && dev->ctrl.event_limit > 0) { 152757dacad5SJay Sternberg if (nvme_submit_async_admin_req(dev)) 152857dacad5SJay Sternberg break; 15291c63dc66SChristoph Hellwig dev->ctrl.event_limit--; 153057dacad5SJay Sternberg } 153157dacad5SJay Sternberg spin_unlock_irq(&nvmeq->q_lock); 153257dacad5SJay Sternberg } 153357dacad5SJay Sternberg } 153457dacad5SJay Sternberg spin_unlock(&dev_list_lock); 153557dacad5SJay Sternberg schedule_timeout(round_jiffies_relative(HZ)); 153657dacad5SJay Sternberg } 153757dacad5SJay Sternberg return 0; 153857dacad5SJay Sternberg } 153957dacad5SJay Sternberg 154057dacad5SJay Sternberg /* 154157dacad5SJay Sternberg * Create I/O queues. Failing to create an I/O queue is not an issue, 154257dacad5SJay Sternberg * we can continue with less than the desired amount of queues, and 154357dacad5SJay Sternberg * even a controller without I/O queues an still be used to issue 154457dacad5SJay Sternberg * admin commands. This might be useful to upgrade a buggy firmware 154557dacad5SJay Sternberg * for example. 154657dacad5SJay Sternberg */ 154757dacad5SJay Sternberg static void nvme_create_io_queues(struct nvme_dev *dev) 154857dacad5SJay Sternberg { 154957dacad5SJay Sternberg unsigned i; 155057dacad5SJay Sternberg 155157dacad5SJay Sternberg for (i = dev->queue_count; i <= dev->max_qid; i++) 155257dacad5SJay Sternberg if (!nvme_alloc_queue(dev, i, dev->q_depth)) 155357dacad5SJay Sternberg break; 155457dacad5SJay Sternberg 155557dacad5SJay Sternberg for (i = dev->online_queues; i <= dev->queue_count - 1; i++) 155657dacad5SJay Sternberg if (nvme_create_queue(dev->queues[i], i)) { 155757dacad5SJay Sternberg nvme_free_queues(dev, i); 155857dacad5SJay Sternberg break; 155957dacad5SJay Sternberg } 156057dacad5SJay Sternberg } 156157dacad5SJay Sternberg 156257dacad5SJay Sternberg static void __iomem *nvme_map_cmb(struct nvme_dev *dev) 156357dacad5SJay Sternberg { 156457dacad5SJay Sternberg u64 szu, size, offset; 156557dacad5SJay Sternberg u32 cmbloc; 156657dacad5SJay Sternberg resource_size_t bar_size; 156757dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 156857dacad5SJay Sternberg void __iomem *cmb; 156957dacad5SJay Sternberg dma_addr_t dma_addr; 157057dacad5SJay Sternberg 157157dacad5SJay Sternberg if (!use_cmb_sqes) 157257dacad5SJay Sternberg return NULL; 157357dacad5SJay Sternberg 15747a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 157557dacad5SJay Sternberg if (!(NVME_CMB_SZ(dev->cmbsz))) 157657dacad5SJay Sternberg return NULL; 157757dacad5SJay Sternberg 15787a67cbeaSChristoph Hellwig cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 157957dacad5SJay Sternberg 158057dacad5SJay Sternberg szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 158157dacad5SJay Sternberg size = szu * NVME_CMB_SZ(dev->cmbsz); 158257dacad5SJay Sternberg offset = szu * NVME_CMB_OFST(cmbloc); 158357dacad5SJay Sternberg bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc)); 158457dacad5SJay Sternberg 158557dacad5SJay Sternberg if (offset > bar_size) 158657dacad5SJay Sternberg return NULL; 158757dacad5SJay Sternberg 158857dacad5SJay Sternberg /* 158957dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 159057dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 159157dacad5SJay Sternberg * the reported size of the BAR 159257dacad5SJay Sternberg */ 159357dacad5SJay Sternberg if (size > bar_size - offset) 159457dacad5SJay Sternberg size = bar_size - offset; 159557dacad5SJay Sternberg 159657dacad5SJay Sternberg dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset; 159757dacad5SJay Sternberg cmb = ioremap_wc(dma_addr, size); 159857dacad5SJay Sternberg if (!cmb) 159957dacad5SJay Sternberg return NULL; 160057dacad5SJay Sternberg 160157dacad5SJay Sternberg dev->cmb_dma_addr = dma_addr; 160257dacad5SJay Sternberg dev->cmb_size = size; 160357dacad5SJay Sternberg return cmb; 160457dacad5SJay Sternberg } 160557dacad5SJay Sternberg 160657dacad5SJay Sternberg static inline void nvme_release_cmb(struct nvme_dev *dev) 160757dacad5SJay Sternberg { 160857dacad5SJay Sternberg if (dev->cmb) { 160957dacad5SJay Sternberg iounmap(dev->cmb); 161057dacad5SJay Sternberg dev->cmb = NULL; 161157dacad5SJay Sternberg } 161257dacad5SJay Sternberg } 161357dacad5SJay Sternberg 161457dacad5SJay Sternberg static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 161557dacad5SJay Sternberg { 161657dacad5SJay Sternberg return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); 161757dacad5SJay Sternberg } 161857dacad5SJay Sternberg 161957dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 162057dacad5SJay Sternberg { 162157dacad5SJay Sternberg struct nvme_queue *adminq = dev->queues[0]; 162257dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 162357dacad5SJay Sternberg int result, i, vecs, nr_io_queues, size; 162457dacad5SJay Sternberg 162557dacad5SJay Sternberg nr_io_queues = num_possible_cpus(); 1626*9a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 1627*9a0be7abSChristoph Hellwig if (result < 0) 162857dacad5SJay Sternberg return result; 1629*9a0be7abSChristoph Hellwig 1630*9a0be7abSChristoph Hellwig /* 1631*9a0be7abSChristoph Hellwig * Degraded controllers might return an error when setting the queue 1632*9a0be7abSChristoph Hellwig * count. We still want to be able to bring them online and offer 1633*9a0be7abSChristoph Hellwig * access to the admin queue, as that might be only way to fix them up. 1634*9a0be7abSChristoph Hellwig */ 1635*9a0be7abSChristoph Hellwig if (result > 0) { 1636*9a0be7abSChristoph Hellwig dev_err(dev->dev, "Could not set queue count (%d)\n", result); 1637*9a0be7abSChristoph Hellwig nr_io_queues = 0; 1638*9a0be7abSChristoph Hellwig result = 0; 1639*9a0be7abSChristoph Hellwig } 164057dacad5SJay Sternberg 164157dacad5SJay Sternberg if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) { 164257dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 164357dacad5SJay Sternberg sizeof(struct nvme_command)); 164457dacad5SJay Sternberg if (result > 0) 164557dacad5SJay Sternberg dev->q_depth = result; 164657dacad5SJay Sternberg else 164757dacad5SJay Sternberg nvme_release_cmb(dev); 164857dacad5SJay Sternberg } 164957dacad5SJay Sternberg 165057dacad5SJay Sternberg size = db_bar_size(dev, nr_io_queues); 165157dacad5SJay Sternberg if (size > 8192) { 165257dacad5SJay Sternberg iounmap(dev->bar); 165357dacad5SJay Sternberg do { 165457dacad5SJay Sternberg dev->bar = ioremap(pci_resource_start(pdev, 0), size); 165557dacad5SJay Sternberg if (dev->bar) 165657dacad5SJay Sternberg break; 165757dacad5SJay Sternberg if (!--nr_io_queues) 165857dacad5SJay Sternberg return -ENOMEM; 165957dacad5SJay Sternberg size = db_bar_size(dev, nr_io_queues); 166057dacad5SJay Sternberg } while (1); 16617a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 166257dacad5SJay Sternberg adminq->q_db = dev->dbs; 166357dacad5SJay Sternberg } 166457dacad5SJay Sternberg 166557dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 166657dacad5SJay Sternberg free_irq(dev->entry[0].vector, adminq); 166757dacad5SJay Sternberg 166857dacad5SJay Sternberg /* 166957dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 167057dacad5SJay Sternberg * setting up the full range we need. 167157dacad5SJay Sternberg */ 167257dacad5SJay Sternberg if (!pdev->irq) 167357dacad5SJay Sternberg pci_disable_msix(pdev); 167457dacad5SJay Sternberg 167557dacad5SJay Sternberg for (i = 0; i < nr_io_queues; i++) 167657dacad5SJay Sternberg dev->entry[i].entry = i; 167757dacad5SJay Sternberg vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues); 167857dacad5SJay Sternberg if (vecs < 0) { 167957dacad5SJay Sternberg vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32)); 168057dacad5SJay Sternberg if (vecs < 0) { 168157dacad5SJay Sternberg vecs = 1; 168257dacad5SJay Sternberg } else { 168357dacad5SJay Sternberg for (i = 0; i < vecs; i++) 168457dacad5SJay Sternberg dev->entry[i].vector = i + pdev->irq; 168557dacad5SJay Sternberg } 168657dacad5SJay Sternberg } 168757dacad5SJay Sternberg 168857dacad5SJay Sternberg /* 168957dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 169057dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 169157dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 169257dacad5SJay Sternberg * number of interrupts. 169357dacad5SJay Sternberg */ 169457dacad5SJay Sternberg nr_io_queues = vecs; 169557dacad5SJay Sternberg dev->max_qid = nr_io_queues; 169657dacad5SJay Sternberg 169757dacad5SJay Sternberg result = queue_request_irq(dev, adminq, adminq->irqname); 169857dacad5SJay Sternberg if (result) { 169957dacad5SJay Sternberg adminq->cq_vector = -1; 170057dacad5SJay Sternberg goto free_queues; 170157dacad5SJay Sternberg } 170257dacad5SJay Sternberg 170357dacad5SJay Sternberg /* Free previously allocated queues that are no longer usable */ 170457dacad5SJay Sternberg nvme_free_queues(dev, nr_io_queues + 1); 170557dacad5SJay Sternberg nvme_create_io_queues(dev); 170657dacad5SJay Sternberg 170757dacad5SJay Sternberg return 0; 170857dacad5SJay Sternberg 170957dacad5SJay Sternberg free_queues: 171057dacad5SJay Sternberg nvme_free_queues(dev, 1); 171157dacad5SJay Sternberg return result; 171257dacad5SJay Sternberg } 171357dacad5SJay Sternberg 171457dacad5SJay Sternberg static void nvme_set_irq_hints(struct nvme_dev *dev) 171557dacad5SJay Sternberg { 171657dacad5SJay Sternberg struct nvme_queue *nvmeq; 171757dacad5SJay Sternberg int i; 171857dacad5SJay Sternberg 171957dacad5SJay Sternberg for (i = 0; i < dev->online_queues; i++) { 172057dacad5SJay Sternberg nvmeq = dev->queues[i]; 172157dacad5SJay Sternberg 172257dacad5SJay Sternberg if (!nvmeq->tags || !(*nvmeq->tags)) 172357dacad5SJay Sternberg continue; 172457dacad5SJay Sternberg 172557dacad5SJay Sternberg irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, 172657dacad5SJay Sternberg blk_mq_tags_cpumask(*nvmeq->tags)); 172757dacad5SJay Sternberg } 172857dacad5SJay Sternberg } 172957dacad5SJay Sternberg 173057dacad5SJay Sternberg static void nvme_dev_scan(struct work_struct *work) 173157dacad5SJay Sternberg { 173257dacad5SJay Sternberg struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); 173357dacad5SJay Sternberg 173457dacad5SJay Sternberg if (!dev->tagset.tags) 173557dacad5SJay Sternberg return; 17365bae7f73SChristoph Hellwig nvme_scan_namespaces(&dev->ctrl); 173757dacad5SJay Sternberg nvme_set_irq_hints(dev); 173857dacad5SJay Sternberg } 173957dacad5SJay Sternberg 174057dacad5SJay Sternberg /* 174157dacad5SJay Sternberg * Return: error value if an error occurred setting up the queues or calling 174257dacad5SJay Sternberg * Identify Device. 0 if these succeeded, even if adding some of the 174357dacad5SJay Sternberg * namespaces failed. At the moment, these failures are silent. TBD which 174457dacad5SJay Sternberg * failures should be reported. 174557dacad5SJay Sternberg */ 174657dacad5SJay Sternberg static int nvme_dev_add(struct nvme_dev *dev) 174757dacad5SJay Sternberg { 17485bae7f73SChristoph Hellwig if (!dev->ctrl.tagset) { 174957dacad5SJay Sternberg dev->tagset.ops = &nvme_mq_ops; 175057dacad5SJay Sternberg dev->tagset.nr_hw_queues = dev->online_queues - 1; 175157dacad5SJay Sternberg dev->tagset.timeout = NVME_IO_TIMEOUT; 175257dacad5SJay Sternberg dev->tagset.numa_node = dev_to_node(dev->dev); 175357dacad5SJay Sternberg dev->tagset.queue_depth = 175457dacad5SJay Sternberg min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; 175557dacad5SJay Sternberg dev->tagset.cmd_size = nvme_cmd_size(dev); 175657dacad5SJay Sternberg dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 175757dacad5SJay Sternberg dev->tagset.driver_data = dev; 175857dacad5SJay Sternberg 175957dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->tagset)) 176057dacad5SJay Sternberg return 0; 17615bae7f73SChristoph Hellwig dev->ctrl.tagset = &dev->tagset; 176257dacad5SJay Sternberg } 176357dacad5SJay Sternberg schedule_work(&dev->scan_work); 176457dacad5SJay Sternberg return 0; 176557dacad5SJay Sternberg } 176657dacad5SJay Sternberg 176757dacad5SJay Sternberg static int nvme_dev_map(struct nvme_dev *dev) 176857dacad5SJay Sternberg { 176957dacad5SJay Sternberg u64 cap; 177057dacad5SJay Sternberg int bars, result = -ENOMEM; 177157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 177257dacad5SJay Sternberg 177357dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 177457dacad5SJay Sternberg return result; 177557dacad5SJay Sternberg 177657dacad5SJay Sternberg dev->entry[0].vector = pdev->irq; 177757dacad5SJay Sternberg pci_set_master(pdev); 177857dacad5SJay Sternberg bars = pci_select_bars(pdev, IORESOURCE_MEM); 177957dacad5SJay Sternberg if (!bars) 178057dacad5SJay Sternberg goto disable_pci; 178157dacad5SJay Sternberg 178257dacad5SJay Sternberg if (pci_request_selected_regions(pdev, bars, "nvme")) 178357dacad5SJay Sternberg goto disable_pci; 178457dacad5SJay Sternberg 178557dacad5SJay Sternberg if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 178657dacad5SJay Sternberg dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) 178757dacad5SJay Sternberg goto disable; 178857dacad5SJay Sternberg 178957dacad5SJay Sternberg dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 179057dacad5SJay Sternberg if (!dev->bar) 179157dacad5SJay Sternberg goto disable; 179257dacad5SJay Sternberg 17937a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 179457dacad5SJay Sternberg result = -ENODEV; 179557dacad5SJay Sternberg goto unmap; 179657dacad5SJay Sternberg } 179757dacad5SJay Sternberg 179857dacad5SJay Sternberg /* 179957dacad5SJay Sternberg * Some devices don't advertse INTx interrupts, pre-enable a single 180057dacad5SJay Sternberg * MSIX vec for setup. We'll adjust this later. 180157dacad5SJay Sternberg */ 180257dacad5SJay Sternberg if (!pdev->irq) { 180357dacad5SJay Sternberg result = pci_enable_msix(pdev, dev->entry, 1); 180457dacad5SJay Sternberg if (result < 0) 180557dacad5SJay Sternberg goto unmap; 180657dacad5SJay Sternberg } 180757dacad5SJay Sternberg 18087a67cbeaSChristoph Hellwig cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 18097a67cbeaSChristoph Hellwig 181057dacad5SJay Sternberg dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); 181157dacad5SJay Sternberg dev->db_stride = 1 << NVME_CAP_STRIDE(cap); 18127a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 18137a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) 181457dacad5SJay Sternberg dev->cmb = nvme_map_cmb(dev); 181557dacad5SJay Sternberg 181657dacad5SJay Sternberg return 0; 181757dacad5SJay Sternberg 181857dacad5SJay Sternberg unmap: 181957dacad5SJay Sternberg iounmap(dev->bar); 182057dacad5SJay Sternberg dev->bar = NULL; 182157dacad5SJay Sternberg disable: 182257dacad5SJay Sternberg pci_release_regions(pdev); 182357dacad5SJay Sternberg disable_pci: 182457dacad5SJay Sternberg pci_disable_device(pdev); 182557dacad5SJay Sternberg return result; 182657dacad5SJay Sternberg } 182757dacad5SJay Sternberg 182857dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 182957dacad5SJay Sternberg { 183057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 183157dacad5SJay Sternberg 183257dacad5SJay Sternberg if (pdev->msi_enabled) 183357dacad5SJay Sternberg pci_disable_msi(pdev); 183457dacad5SJay Sternberg else if (pdev->msix_enabled) 183557dacad5SJay Sternberg pci_disable_msix(pdev); 183657dacad5SJay Sternberg 183757dacad5SJay Sternberg if (dev->bar) { 183857dacad5SJay Sternberg iounmap(dev->bar); 183957dacad5SJay Sternberg dev->bar = NULL; 184057dacad5SJay Sternberg pci_release_regions(pdev); 184157dacad5SJay Sternberg } 184257dacad5SJay Sternberg 184357dacad5SJay Sternberg if (pci_is_enabled(pdev)) 184457dacad5SJay Sternberg pci_disable_device(pdev); 184557dacad5SJay Sternberg } 184657dacad5SJay Sternberg 184757dacad5SJay Sternberg struct nvme_delq_ctx { 184857dacad5SJay Sternberg struct task_struct *waiter; 184957dacad5SJay Sternberg struct kthread_worker *worker; 185057dacad5SJay Sternberg atomic_t refcount; 185157dacad5SJay Sternberg }; 185257dacad5SJay Sternberg 185357dacad5SJay Sternberg static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) 185457dacad5SJay Sternberg { 185557dacad5SJay Sternberg dq->waiter = current; 185657dacad5SJay Sternberg mb(); 185757dacad5SJay Sternberg 185857dacad5SJay Sternberg for (;;) { 185957dacad5SJay Sternberg set_current_state(TASK_KILLABLE); 186057dacad5SJay Sternberg if (!atomic_read(&dq->refcount)) 186157dacad5SJay Sternberg break; 186257dacad5SJay Sternberg if (!schedule_timeout(ADMIN_TIMEOUT) || 186357dacad5SJay Sternberg fatal_signal_pending(current)) { 186457dacad5SJay Sternberg /* 186557dacad5SJay Sternberg * Disable the controller first since we can't trust it 186657dacad5SJay Sternberg * at this point, but leave the admin queue enabled 186757dacad5SJay Sternberg * until all queue deletion requests are flushed. 186857dacad5SJay Sternberg * FIXME: This may take a while if there are more h/w 186957dacad5SJay Sternberg * queues than admin tags. 187057dacad5SJay Sternberg */ 187157dacad5SJay Sternberg set_current_state(TASK_RUNNING); 18725fd4ce1bSChristoph Hellwig nvme_disable_ctrl(&dev->ctrl, 18737a67cbeaSChristoph Hellwig lo_hi_readq(dev->bar + NVME_REG_CAP)); 187457dacad5SJay Sternberg nvme_clear_queue(dev->queues[0]); 187557dacad5SJay Sternberg flush_kthread_worker(dq->worker); 187657dacad5SJay Sternberg nvme_disable_queue(dev, 0); 187757dacad5SJay Sternberg return; 187857dacad5SJay Sternberg } 187957dacad5SJay Sternberg } 188057dacad5SJay Sternberg set_current_state(TASK_RUNNING); 188157dacad5SJay Sternberg } 188257dacad5SJay Sternberg 188357dacad5SJay Sternberg static void nvme_put_dq(struct nvme_delq_ctx *dq) 188457dacad5SJay Sternberg { 188557dacad5SJay Sternberg atomic_dec(&dq->refcount); 188657dacad5SJay Sternberg if (dq->waiter) 188757dacad5SJay Sternberg wake_up_process(dq->waiter); 188857dacad5SJay Sternberg } 188957dacad5SJay Sternberg 189057dacad5SJay Sternberg static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq) 189157dacad5SJay Sternberg { 189257dacad5SJay Sternberg atomic_inc(&dq->refcount); 189357dacad5SJay Sternberg return dq; 189457dacad5SJay Sternberg } 189557dacad5SJay Sternberg 189657dacad5SJay Sternberg static void nvme_del_queue_end(struct nvme_queue *nvmeq) 189757dacad5SJay Sternberg { 189857dacad5SJay Sternberg struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 189957dacad5SJay Sternberg nvme_put_dq(dq); 1900604e8c8dSKeith Busch 1901604e8c8dSKeith Busch spin_lock_irq(&nvmeq->q_lock); 1902604e8c8dSKeith Busch nvme_process_cq(nvmeq); 1903604e8c8dSKeith Busch spin_unlock_irq(&nvmeq->q_lock); 190457dacad5SJay Sternberg } 190557dacad5SJay Sternberg 190657dacad5SJay Sternberg static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, 190757dacad5SJay Sternberg kthread_work_func_t fn) 190857dacad5SJay Sternberg { 190957dacad5SJay Sternberg struct nvme_command c; 191057dacad5SJay Sternberg 191157dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 191257dacad5SJay Sternberg c.delete_queue.opcode = opcode; 191357dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(nvmeq->qid); 191457dacad5SJay Sternberg 191557dacad5SJay Sternberg init_kthread_work(&nvmeq->cmdinfo.work, fn); 191657dacad5SJay Sternberg return nvme_submit_admin_async_cmd(nvmeq->dev, &c, &nvmeq->cmdinfo, 191757dacad5SJay Sternberg ADMIN_TIMEOUT); 191857dacad5SJay Sternberg } 191957dacad5SJay Sternberg 192057dacad5SJay Sternberg static void nvme_del_cq_work_handler(struct kthread_work *work) 192157dacad5SJay Sternberg { 192257dacad5SJay Sternberg struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 192357dacad5SJay Sternberg cmdinfo.work); 192457dacad5SJay Sternberg nvme_del_queue_end(nvmeq); 192557dacad5SJay Sternberg } 192657dacad5SJay Sternberg 192757dacad5SJay Sternberg static int nvme_delete_cq(struct nvme_queue *nvmeq) 192857dacad5SJay Sternberg { 192957dacad5SJay Sternberg return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq, 193057dacad5SJay Sternberg nvme_del_cq_work_handler); 193157dacad5SJay Sternberg } 193257dacad5SJay Sternberg 193357dacad5SJay Sternberg static void nvme_del_sq_work_handler(struct kthread_work *work) 193457dacad5SJay Sternberg { 193557dacad5SJay Sternberg struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 193657dacad5SJay Sternberg cmdinfo.work); 193757dacad5SJay Sternberg int status = nvmeq->cmdinfo.status; 193857dacad5SJay Sternberg 193957dacad5SJay Sternberg if (!status) 194057dacad5SJay Sternberg status = nvme_delete_cq(nvmeq); 194157dacad5SJay Sternberg if (status) 194257dacad5SJay Sternberg nvme_del_queue_end(nvmeq); 194357dacad5SJay Sternberg } 194457dacad5SJay Sternberg 194557dacad5SJay Sternberg static int nvme_delete_sq(struct nvme_queue *nvmeq) 194657dacad5SJay Sternberg { 194757dacad5SJay Sternberg return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq, 194857dacad5SJay Sternberg nvme_del_sq_work_handler); 194957dacad5SJay Sternberg } 195057dacad5SJay Sternberg 195157dacad5SJay Sternberg static void nvme_del_queue_start(struct kthread_work *work) 195257dacad5SJay Sternberg { 195357dacad5SJay Sternberg struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 195457dacad5SJay Sternberg cmdinfo.work); 195557dacad5SJay Sternberg if (nvme_delete_sq(nvmeq)) 195657dacad5SJay Sternberg nvme_del_queue_end(nvmeq); 195757dacad5SJay Sternberg } 195857dacad5SJay Sternberg 195957dacad5SJay Sternberg static void nvme_disable_io_queues(struct nvme_dev *dev) 196057dacad5SJay Sternberg { 196157dacad5SJay Sternberg int i; 196257dacad5SJay Sternberg DEFINE_KTHREAD_WORKER_ONSTACK(worker); 196357dacad5SJay Sternberg struct nvme_delq_ctx dq; 196457dacad5SJay Sternberg struct task_struct *kworker_task = kthread_run(kthread_worker_fn, 19651c63dc66SChristoph Hellwig &worker, "nvme%d", dev->ctrl.instance); 196657dacad5SJay Sternberg 196757dacad5SJay Sternberg if (IS_ERR(kworker_task)) { 196857dacad5SJay Sternberg dev_err(dev->dev, 196957dacad5SJay Sternberg "Failed to create queue del task\n"); 197057dacad5SJay Sternberg for (i = dev->queue_count - 1; i > 0; i--) 197157dacad5SJay Sternberg nvme_disable_queue(dev, i); 197257dacad5SJay Sternberg return; 197357dacad5SJay Sternberg } 197457dacad5SJay Sternberg 197557dacad5SJay Sternberg dq.waiter = NULL; 197657dacad5SJay Sternberg atomic_set(&dq.refcount, 0); 197757dacad5SJay Sternberg dq.worker = &worker; 197857dacad5SJay Sternberg for (i = dev->queue_count - 1; i > 0; i--) { 197957dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[i]; 198057dacad5SJay Sternberg 198157dacad5SJay Sternberg if (nvme_suspend_queue(nvmeq)) 198257dacad5SJay Sternberg continue; 198357dacad5SJay Sternberg nvmeq->cmdinfo.ctx = nvme_get_dq(&dq); 198457dacad5SJay Sternberg nvmeq->cmdinfo.worker = dq.worker; 198557dacad5SJay Sternberg init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start); 198657dacad5SJay Sternberg queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work); 198757dacad5SJay Sternberg } 198857dacad5SJay Sternberg nvme_wait_dq(&dq, dev); 198957dacad5SJay Sternberg kthread_stop(kworker_task); 199057dacad5SJay Sternberg } 199157dacad5SJay Sternberg 199257dacad5SJay Sternberg /* 199357dacad5SJay Sternberg * Remove the node from the device list and check 199457dacad5SJay Sternberg * for whether or not we need to stop the nvme_thread. 199557dacad5SJay Sternberg */ 199657dacad5SJay Sternberg static void nvme_dev_list_remove(struct nvme_dev *dev) 199757dacad5SJay Sternberg { 199857dacad5SJay Sternberg struct task_struct *tmp = NULL; 199957dacad5SJay Sternberg 200057dacad5SJay Sternberg spin_lock(&dev_list_lock); 200157dacad5SJay Sternberg list_del_init(&dev->node); 200257dacad5SJay Sternberg if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) { 200357dacad5SJay Sternberg tmp = nvme_thread; 200457dacad5SJay Sternberg nvme_thread = NULL; 200557dacad5SJay Sternberg } 200657dacad5SJay Sternberg spin_unlock(&dev_list_lock); 200757dacad5SJay Sternberg 200857dacad5SJay Sternberg if (tmp) 200957dacad5SJay Sternberg kthread_stop(tmp); 201057dacad5SJay Sternberg } 201157dacad5SJay Sternberg 201257dacad5SJay Sternberg static void nvme_freeze_queues(struct nvme_dev *dev) 201357dacad5SJay Sternberg { 201457dacad5SJay Sternberg struct nvme_ns *ns; 201557dacad5SJay Sternberg 20165bae7f73SChristoph Hellwig list_for_each_entry(ns, &dev->ctrl.namespaces, list) { 201757dacad5SJay Sternberg blk_mq_freeze_queue_start(ns->queue); 201857dacad5SJay Sternberg 201957dacad5SJay Sternberg spin_lock_irq(ns->queue->queue_lock); 202057dacad5SJay Sternberg queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); 202157dacad5SJay Sternberg spin_unlock_irq(ns->queue->queue_lock); 202257dacad5SJay Sternberg 202357dacad5SJay Sternberg blk_mq_cancel_requeue_work(ns->queue); 202457dacad5SJay Sternberg blk_mq_stop_hw_queues(ns->queue); 202557dacad5SJay Sternberg } 202657dacad5SJay Sternberg } 202757dacad5SJay Sternberg 202857dacad5SJay Sternberg static void nvme_unfreeze_queues(struct nvme_dev *dev) 202957dacad5SJay Sternberg { 203057dacad5SJay Sternberg struct nvme_ns *ns; 203157dacad5SJay Sternberg 20325bae7f73SChristoph Hellwig list_for_each_entry(ns, &dev->ctrl.namespaces, list) { 203357dacad5SJay Sternberg queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); 203457dacad5SJay Sternberg blk_mq_unfreeze_queue(ns->queue); 203557dacad5SJay Sternberg blk_mq_start_stopped_hw_queues(ns->queue, true); 203657dacad5SJay Sternberg blk_mq_kick_requeue_list(ns->queue); 203757dacad5SJay Sternberg } 203857dacad5SJay Sternberg } 203957dacad5SJay Sternberg 204057dacad5SJay Sternberg static void nvme_dev_shutdown(struct nvme_dev *dev) 204157dacad5SJay Sternberg { 204257dacad5SJay Sternberg int i; 204357dacad5SJay Sternberg u32 csts = -1; 204457dacad5SJay Sternberg 204557dacad5SJay Sternberg nvme_dev_list_remove(dev); 204657dacad5SJay Sternberg 204757dacad5SJay Sternberg if (dev->bar) { 204857dacad5SJay Sternberg nvme_freeze_queues(dev); 20497a67cbeaSChristoph Hellwig csts = readl(dev->bar + NVME_REG_CSTS); 205057dacad5SJay Sternberg } 205157dacad5SJay Sternberg if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { 205257dacad5SJay Sternberg for (i = dev->queue_count - 1; i >= 0; i--) { 205357dacad5SJay Sternberg struct nvme_queue *nvmeq = dev->queues[i]; 205457dacad5SJay Sternberg nvme_suspend_queue(nvmeq); 205557dacad5SJay Sternberg } 205657dacad5SJay Sternberg } else { 205757dacad5SJay Sternberg nvme_disable_io_queues(dev); 20585fd4ce1bSChristoph Hellwig nvme_shutdown_ctrl(&dev->ctrl); 205957dacad5SJay Sternberg nvme_disable_queue(dev, 0); 206057dacad5SJay Sternberg } 206157dacad5SJay Sternberg nvme_dev_unmap(dev); 206257dacad5SJay Sternberg 206357dacad5SJay Sternberg for (i = dev->queue_count - 1; i >= 0; i--) 206457dacad5SJay Sternberg nvme_clear_queue(dev->queues[i]); 206557dacad5SJay Sternberg } 206657dacad5SJay Sternberg 206757dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 206857dacad5SJay Sternberg { 206957dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 207057dacad5SJay Sternberg PAGE_SIZE, PAGE_SIZE, 0); 207157dacad5SJay Sternberg if (!dev->prp_page_pool) 207257dacad5SJay Sternberg return -ENOMEM; 207357dacad5SJay Sternberg 207457dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 207557dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 207657dacad5SJay Sternberg 256, 256, 0); 207757dacad5SJay Sternberg if (!dev->prp_small_pool) { 207857dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 207957dacad5SJay Sternberg return -ENOMEM; 208057dacad5SJay Sternberg } 208157dacad5SJay Sternberg return 0; 208257dacad5SJay Sternberg } 208357dacad5SJay Sternberg 208457dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 208557dacad5SJay Sternberg { 208657dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 208757dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 208857dacad5SJay Sternberg } 208957dacad5SJay Sternberg 20901673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 209157dacad5SJay Sternberg { 20921673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 209357dacad5SJay Sternberg 209457dacad5SJay Sternberg put_device(dev->dev); 209557dacad5SJay Sternberg if (dev->tagset.tags) 209657dacad5SJay Sternberg blk_mq_free_tag_set(&dev->tagset); 20971c63dc66SChristoph Hellwig if (dev->ctrl.admin_q) 20981c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 209957dacad5SJay Sternberg kfree(dev->queues); 210057dacad5SJay Sternberg kfree(dev->entry); 210157dacad5SJay Sternberg kfree(dev); 210257dacad5SJay Sternberg } 210357dacad5SJay Sternberg 210457dacad5SJay Sternberg static void nvme_probe_work(struct work_struct *work) 210557dacad5SJay Sternberg { 210657dacad5SJay Sternberg struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work); 210757dacad5SJay Sternberg bool start_thread = false; 210857dacad5SJay Sternberg int result; 210957dacad5SJay Sternberg 211057dacad5SJay Sternberg result = nvme_dev_map(dev); 211157dacad5SJay Sternberg if (result) 211257dacad5SJay Sternberg goto out; 211357dacad5SJay Sternberg 211457dacad5SJay Sternberg result = nvme_configure_admin_queue(dev); 211557dacad5SJay Sternberg if (result) 211657dacad5SJay Sternberg goto unmap; 211757dacad5SJay Sternberg 211857dacad5SJay Sternberg spin_lock(&dev_list_lock); 211957dacad5SJay Sternberg if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) { 212057dacad5SJay Sternberg start_thread = true; 212157dacad5SJay Sternberg nvme_thread = NULL; 212257dacad5SJay Sternberg } 212357dacad5SJay Sternberg list_add(&dev->node, &dev_list); 212457dacad5SJay Sternberg spin_unlock(&dev_list_lock); 212557dacad5SJay Sternberg 212657dacad5SJay Sternberg if (start_thread) { 212757dacad5SJay Sternberg nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 212857dacad5SJay Sternberg wake_up_all(&nvme_kthread_wait); 212957dacad5SJay Sternberg } else 213057dacad5SJay Sternberg wait_event_killable(nvme_kthread_wait, nvme_thread); 213157dacad5SJay Sternberg 213257dacad5SJay Sternberg if (IS_ERR_OR_NULL(nvme_thread)) { 213357dacad5SJay Sternberg result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR; 213457dacad5SJay Sternberg goto disable; 213557dacad5SJay Sternberg } 213657dacad5SJay Sternberg 213757dacad5SJay Sternberg nvme_init_queue(dev->queues[0], 0); 213857dacad5SJay Sternberg result = nvme_alloc_admin_tags(dev); 213957dacad5SJay Sternberg if (result) 214057dacad5SJay Sternberg goto disable; 214157dacad5SJay Sternberg 2142ce4541f4SChristoph Hellwig result = nvme_init_identify(&dev->ctrl); 2143ce4541f4SChristoph Hellwig if (result) 2144ce4541f4SChristoph Hellwig goto free_tags; 2145ce4541f4SChristoph Hellwig 214657dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 214757dacad5SJay Sternberg if (result) 214857dacad5SJay Sternberg goto free_tags; 214957dacad5SJay Sternberg 21501c63dc66SChristoph Hellwig dev->ctrl.event_limit = 1; 215157dacad5SJay Sternberg 215257dacad5SJay Sternberg /* 215357dacad5SJay Sternberg * Keep the controller around but remove all namespaces if we don't have 215457dacad5SJay Sternberg * any working I/O queue. 215557dacad5SJay Sternberg */ 215657dacad5SJay Sternberg if (dev->online_queues < 2) { 215757dacad5SJay Sternberg dev_warn(dev->dev, "IO queues not created\n"); 21585bae7f73SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 215957dacad5SJay Sternberg } else { 216057dacad5SJay Sternberg nvme_unfreeze_queues(dev); 216157dacad5SJay Sternberg nvme_dev_add(dev); 216257dacad5SJay Sternberg } 216357dacad5SJay Sternberg 216457dacad5SJay Sternberg return; 216557dacad5SJay Sternberg 216657dacad5SJay Sternberg free_tags: 216757dacad5SJay Sternberg nvme_dev_remove_admin(dev); 21681c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 21691c63dc66SChristoph Hellwig dev->ctrl.admin_q = NULL; 217057dacad5SJay Sternberg dev->queues[0]->tags = NULL; 217157dacad5SJay Sternberg disable: 217257dacad5SJay Sternberg nvme_disable_queue(dev, 0); 217357dacad5SJay Sternberg nvme_dev_list_remove(dev); 217457dacad5SJay Sternberg unmap: 217557dacad5SJay Sternberg nvme_dev_unmap(dev); 217657dacad5SJay Sternberg out: 217757dacad5SJay Sternberg if (!work_busy(&dev->reset_work)) 217857dacad5SJay Sternberg nvme_dead_ctrl(dev); 217957dacad5SJay Sternberg } 218057dacad5SJay Sternberg 218157dacad5SJay Sternberg static int nvme_remove_dead_ctrl(void *arg) 218257dacad5SJay Sternberg { 218357dacad5SJay Sternberg struct nvme_dev *dev = (struct nvme_dev *)arg; 218457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 218557dacad5SJay Sternberg 218657dacad5SJay Sternberg if (pci_get_drvdata(pdev)) 218757dacad5SJay Sternberg pci_stop_and_remove_bus_device_locked(pdev); 21881673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 218957dacad5SJay Sternberg return 0; 219057dacad5SJay Sternberg } 219157dacad5SJay Sternberg 219257dacad5SJay Sternberg static void nvme_dead_ctrl(struct nvme_dev *dev) 219357dacad5SJay Sternberg { 219457dacad5SJay Sternberg dev_warn(dev->dev, "Device failed to resume\n"); 21951673f1f0SChristoph Hellwig kref_get(&dev->ctrl.kref); 219657dacad5SJay Sternberg if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d", 21971c63dc66SChristoph Hellwig dev->ctrl.instance))) { 219857dacad5SJay Sternberg dev_err(dev->dev, 219957dacad5SJay Sternberg "Failed to start controller remove task\n"); 22001673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 220157dacad5SJay Sternberg } 220257dacad5SJay Sternberg } 220357dacad5SJay Sternberg 220457dacad5SJay Sternberg static void nvme_reset_work(struct work_struct *ws) 220557dacad5SJay Sternberg { 220657dacad5SJay Sternberg struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); 220757dacad5SJay Sternberg bool in_probe = work_busy(&dev->probe_work); 220857dacad5SJay Sternberg 220957dacad5SJay Sternberg nvme_dev_shutdown(dev); 221057dacad5SJay Sternberg 221157dacad5SJay Sternberg /* Synchronize with device probe so that work will see failure status 221257dacad5SJay Sternberg * and exit gracefully without trying to schedule another reset */ 221357dacad5SJay Sternberg flush_work(&dev->probe_work); 221457dacad5SJay Sternberg 221557dacad5SJay Sternberg /* Fail this device if reset occured during probe to avoid 221657dacad5SJay Sternberg * infinite initialization loops. */ 221757dacad5SJay Sternberg if (in_probe) { 221857dacad5SJay Sternberg nvme_dead_ctrl(dev); 221957dacad5SJay Sternberg return; 222057dacad5SJay Sternberg } 222157dacad5SJay Sternberg /* Schedule device resume asynchronously so the reset work is available 222257dacad5SJay Sternberg * to cleanup errors that may occur during reinitialization */ 222357dacad5SJay Sternberg schedule_work(&dev->probe_work); 222457dacad5SJay Sternberg } 222557dacad5SJay Sternberg 222657dacad5SJay Sternberg static int __nvme_reset(struct nvme_dev *dev) 222757dacad5SJay Sternberg { 222857dacad5SJay Sternberg if (work_pending(&dev->reset_work)) 222957dacad5SJay Sternberg return -EBUSY; 223057dacad5SJay Sternberg list_del_init(&dev->node); 223157dacad5SJay Sternberg queue_work(nvme_workq, &dev->reset_work); 223257dacad5SJay Sternberg return 0; 223357dacad5SJay Sternberg } 223457dacad5SJay Sternberg 223557dacad5SJay Sternberg static int nvme_reset(struct nvme_dev *dev) 223657dacad5SJay Sternberg { 223757dacad5SJay Sternberg int ret; 223857dacad5SJay Sternberg 22391c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 224057dacad5SJay Sternberg return -ENODEV; 224157dacad5SJay Sternberg 224257dacad5SJay Sternberg spin_lock(&dev_list_lock); 224357dacad5SJay Sternberg ret = __nvme_reset(dev); 224457dacad5SJay Sternberg spin_unlock(&dev_list_lock); 224557dacad5SJay Sternberg 224657dacad5SJay Sternberg if (!ret) { 224757dacad5SJay Sternberg flush_work(&dev->reset_work); 224857dacad5SJay Sternberg flush_work(&dev->probe_work); 224957dacad5SJay Sternberg return 0; 225057dacad5SJay Sternberg } 225157dacad5SJay Sternberg 225257dacad5SJay Sternberg return ret; 225357dacad5SJay Sternberg } 225457dacad5SJay Sternberg 22551c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 22561c63dc66SChristoph Hellwig { 22571c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 22581c63dc66SChristoph Hellwig return 0; 22591c63dc66SChristoph Hellwig } 22601c63dc66SChristoph Hellwig 22615fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 22625fd4ce1bSChristoph Hellwig { 22635fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 22645fd4ce1bSChristoph Hellwig return 0; 22655fd4ce1bSChristoph Hellwig } 22665fd4ce1bSChristoph Hellwig 22677fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 22687fd8930fSChristoph Hellwig { 22697fd8930fSChristoph Hellwig *val = readq(to_nvme_dev(ctrl)->bar + off); 22707fd8930fSChristoph Hellwig return 0; 22717fd8930fSChristoph Hellwig } 22727fd8930fSChristoph Hellwig 22735bae7f73SChristoph Hellwig static bool nvme_pci_io_incapable(struct nvme_ctrl *ctrl) 22745bae7f73SChristoph Hellwig { 22755bae7f73SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 22765bae7f73SChristoph Hellwig 22775bae7f73SChristoph Hellwig return !dev->bar || dev->online_queues < 2; 22785bae7f73SChristoph Hellwig } 22795bae7f73SChristoph Hellwig 2280f3ca80fcSChristoph Hellwig static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) 2281f3ca80fcSChristoph Hellwig { 2282f3ca80fcSChristoph Hellwig return nvme_reset(to_nvme_dev(ctrl)); 2283f3ca80fcSChristoph Hellwig } 2284f3ca80fcSChristoph Hellwig 22851c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 22861c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 22875fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 22887fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 22895bae7f73SChristoph Hellwig .io_incapable = nvme_pci_io_incapable, 2290f3ca80fcSChristoph Hellwig .reset_ctrl = nvme_pci_reset_ctrl, 22911673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 22921c63dc66SChristoph Hellwig }; 22931c63dc66SChristoph Hellwig 229457dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 229557dacad5SJay Sternberg { 229657dacad5SJay Sternberg int node, result = -ENOMEM; 229757dacad5SJay Sternberg struct nvme_dev *dev; 229857dacad5SJay Sternberg 229957dacad5SJay Sternberg node = dev_to_node(&pdev->dev); 230057dacad5SJay Sternberg if (node == NUMA_NO_NODE) 230157dacad5SJay Sternberg set_dev_node(&pdev->dev, 0); 230257dacad5SJay Sternberg 230357dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 230457dacad5SJay Sternberg if (!dev) 230557dacad5SJay Sternberg return -ENOMEM; 230657dacad5SJay Sternberg dev->entry = kzalloc_node(num_possible_cpus() * sizeof(*dev->entry), 230757dacad5SJay Sternberg GFP_KERNEL, node); 230857dacad5SJay Sternberg if (!dev->entry) 230957dacad5SJay Sternberg goto free; 231057dacad5SJay Sternberg dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *), 231157dacad5SJay Sternberg GFP_KERNEL, node); 231257dacad5SJay Sternberg if (!dev->queues) 231357dacad5SJay Sternberg goto free; 231457dacad5SJay Sternberg 231557dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 231657dacad5SJay Sternberg pci_set_drvdata(pdev, dev); 23171c63dc66SChristoph Hellwig 231857dacad5SJay Sternberg INIT_LIST_HEAD(&dev->node); 231957dacad5SJay Sternberg INIT_WORK(&dev->scan_work, nvme_dev_scan); 232057dacad5SJay Sternberg INIT_WORK(&dev->probe_work, nvme_probe_work); 2321f3ca80fcSChristoph Hellwig INIT_WORK(&dev->reset_work, nvme_reset_work); 2322f3ca80fcSChristoph Hellwig 2323f3ca80fcSChristoph Hellwig result = nvme_setup_prp_pools(dev); 2324f3ca80fcSChristoph Hellwig if (result) 2325f3ca80fcSChristoph Hellwig goto put_pci; 2326f3ca80fcSChristoph Hellwig 2327f3ca80fcSChristoph Hellwig result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2328f3ca80fcSChristoph Hellwig id->driver_data); 2329f3ca80fcSChristoph Hellwig if (result) 2330f3ca80fcSChristoph Hellwig goto release_pools; 2331f3ca80fcSChristoph Hellwig 233257dacad5SJay Sternberg schedule_work(&dev->probe_work); 233357dacad5SJay Sternberg return 0; 233457dacad5SJay Sternberg 233557dacad5SJay Sternberg release_pools: 233657dacad5SJay Sternberg nvme_release_prp_pools(dev); 233757dacad5SJay Sternberg put_pci: 233857dacad5SJay Sternberg put_device(dev->dev); 233957dacad5SJay Sternberg free: 234057dacad5SJay Sternberg kfree(dev->queues); 234157dacad5SJay Sternberg kfree(dev->entry); 234257dacad5SJay Sternberg kfree(dev); 234357dacad5SJay Sternberg return result; 234457dacad5SJay Sternberg } 234557dacad5SJay Sternberg 234657dacad5SJay Sternberg static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) 234757dacad5SJay Sternberg { 234857dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 234957dacad5SJay Sternberg 235057dacad5SJay Sternberg if (prepare) 235157dacad5SJay Sternberg nvme_dev_shutdown(dev); 235257dacad5SJay Sternberg else 235357dacad5SJay Sternberg schedule_work(&dev->probe_work); 235457dacad5SJay Sternberg } 235557dacad5SJay Sternberg 235657dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 235757dacad5SJay Sternberg { 235857dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 235957dacad5SJay Sternberg nvme_dev_shutdown(dev); 236057dacad5SJay Sternberg } 236157dacad5SJay Sternberg 236257dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 236357dacad5SJay Sternberg { 236457dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 236557dacad5SJay Sternberg 236657dacad5SJay Sternberg spin_lock(&dev_list_lock); 236757dacad5SJay Sternberg list_del_init(&dev->node); 236857dacad5SJay Sternberg spin_unlock(&dev_list_lock); 236957dacad5SJay Sternberg 237057dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 237157dacad5SJay Sternberg flush_work(&dev->probe_work); 237257dacad5SJay Sternberg flush_work(&dev->reset_work); 237357dacad5SJay Sternberg flush_work(&dev->scan_work); 23745bae7f73SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 237557dacad5SJay Sternberg nvme_dev_shutdown(dev); 237657dacad5SJay Sternberg nvme_dev_remove_admin(dev); 237757dacad5SJay Sternberg nvme_free_queues(dev, 0); 237857dacad5SJay Sternberg nvme_release_cmb(dev); 237957dacad5SJay Sternberg nvme_release_prp_pools(dev); 23801673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 238157dacad5SJay Sternberg } 238257dacad5SJay Sternberg 238357dacad5SJay Sternberg /* These functions are yet to be implemented */ 238457dacad5SJay Sternberg #define nvme_error_detected NULL 238557dacad5SJay Sternberg #define nvme_dump_registers NULL 238657dacad5SJay Sternberg #define nvme_link_reset NULL 238757dacad5SJay Sternberg #define nvme_slot_reset NULL 238857dacad5SJay Sternberg #define nvme_error_resume NULL 238957dacad5SJay Sternberg 239057dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 239157dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 239257dacad5SJay Sternberg { 239357dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 239457dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 239557dacad5SJay Sternberg 239657dacad5SJay Sternberg nvme_dev_shutdown(ndev); 239757dacad5SJay Sternberg return 0; 239857dacad5SJay Sternberg } 239957dacad5SJay Sternberg 240057dacad5SJay Sternberg static int nvme_resume(struct device *dev) 240157dacad5SJay Sternberg { 240257dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 240357dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 240457dacad5SJay Sternberg 240557dacad5SJay Sternberg schedule_work(&ndev->probe_work); 240657dacad5SJay Sternberg return 0; 240757dacad5SJay Sternberg } 240857dacad5SJay Sternberg #endif 240957dacad5SJay Sternberg 241057dacad5SJay Sternberg static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 241157dacad5SJay Sternberg 241257dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 241357dacad5SJay Sternberg .error_detected = nvme_error_detected, 241457dacad5SJay Sternberg .mmio_enabled = nvme_dump_registers, 241557dacad5SJay Sternberg .link_reset = nvme_link_reset, 241657dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 241757dacad5SJay Sternberg .resume = nvme_error_resume, 241857dacad5SJay Sternberg .reset_notify = nvme_reset_notify, 241957dacad5SJay Sternberg }; 242057dacad5SJay Sternberg 242157dacad5SJay Sternberg /* Move to pci_ids.h later */ 242257dacad5SJay Sternberg #define PCI_CLASS_STORAGE_EXPRESS 0x010802 242357dacad5SJay Sternberg 242457dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 2425106198edSChristoph Hellwig { PCI_VDEVICE(INTEL, 0x0953), 2426106198edSChristoph Hellwig .driver_data = NVME_QUIRK_STRIPE_SIZE, }, 242757dacad5SJay Sternberg { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2428c74dc780SStephan Günther { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 242957dacad5SJay Sternberg { 0, } 243057dacad5SJay Sternberg }; 243157dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 243257dacad5SJay Sternberg 243357dacad5SJay Sternberg static struct pci_driver nvme_driver = { 243457dacad5SJay Sternberg .name = "nvme", 243557dacad5SJay Sternberg .id_table = nvme_id_table, 243657dacad5SJay Sternberg .probe = nvme_probe, 243757dacad5SJay Sternberg .remove = nvme_remove, 243857dacad5SJay Sternberg .shutdown = nvme_shutdown, 243957dacad5SJay Sternberg .driver = { 244057dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 244157dacad5SJay Sternberg }, 244257dacad5SJay Sternberg .err_handler = &nvme_err_handler, 244357dacad5SJay Sternberg }; 244457dacad5SJay Sternberg 244557dacad5SJay Sternberg static int __init nvme_init(void) 244657dacad5SJay Sternberg { 244757dacad5SJay Sternberg int result; 244857dacad5SJay Sternberg 244957dacad5SJay Sternberg init_waitqueue_head(&nvme_kthread_wait); 245057dacad5SJay Sternberg 245157dacad5SJay Sternberg nvme_workq = create_singlethread_workqueue("nvme"); 245257dacad5SJay Sternberg if (!nvme_workq) 245357dacad5SJay Sternberg return -ENOMEM; 245457dacad5SJay Sternberg 24555bae7f73SChristoph Hellwig result = nvme_core_init(); 245657dacad5SJay Sternberg if (result < 0) 245757dacad5SJay Sternberg goto kill_workq; 245857dacad5SJay Sternberg 245957dacad5SJay Sternberg result = pci_register_driver(&nvme_driver); 246057dacad5SJay Sternberg if (result) 2461f3ca80fcSChristoph Hellwig goto core_exit; 246257dacad5SJay Sternberg return 0; 246357dacad5SJay Sternberg 2464f3ca80fcSChristoph Hellwig core_exit: 24655bae7f73SChristoph Hellwig nvme_core_exit(); 246657dacad5SJay Sternberg kill_workq: 246757dacad5SJay Sternberg destroy_workqueue(nvme_workq); 246857dacad5SJay Sternberg return result; 246957dacad5SJay Sternberg } 247057dacad5SJay Sternberg 247157dacad5SJay Sternberg static void __exit nvme_exit(void) 247257dacad5SJay Sternberg { 247357dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 24745bae7f73SChristoph Hellwig nvme_core_exit(); 247557dacad5SJay Sternberg destroy_workqueue(nvme_workq); 247657dacad5SJay Sternberg BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); 247757dacad5SJay Sternberg _nvme_check_size(); 247857dacad5SJay Sternberg } 247957dacad5SJay Sternberg 248057dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 248157dacad5SJay Sternberg MODULE_LICENSE("GPL"); 248257dacad5SJay Sternberg MODULE_VERSION("1.0"); 248357dacad5SJay Sternberg module_init(nvme_init); 248457dacad5SJay Sternberg module_exit(nvme_exit); 2485