xref: /openbmc/linux/drivers/nvme/host/pci.c (revision ebe6d874)
157dacad5SJay Sternberg /*
257dacad5SJay Sternberg  * NVM Express device driver
357dacad5SJay Sternberg  * Copyright (c) 2011-2014, Intel Corporation.
457dacad5SJay Sternberg  *
557dacad5SJay Sternberg  * This program is free software; you can redistribute it and/or modify it
657dacad5SJay Sternberg  * under the terms and conditions of the GNU General Public License,
757dacad5SJay Sternberg  * version 2, as published by the Free Software Foundation.
857dacad5SJay Sternberg  *
957dacad5SJay Sternberg  * This program is distributed in the hope it will be useful, but WITHOUT
1057dacad5SJay Sternberg  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1157dacad5SJay Sternberg  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
1257dacad5SJay Sternberg  * more details.
1357dacad5SJay Sternberg  */
1457dacad5SJay Sternberg 
15a0a3408eSKeith Busch #include <linux/aer.h>
1657dacad5SJay Sternberg #include <linux/bitops.h>
1757dacad5SJay Sternberg #include <linux/blkdev.h>
1857dacad5SJay Sternberg #include <linux/blk-mq.h>
19dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h>
20ff5350a8SAndy Lutomirski #include <linux/dmi.h>
2157dacad5SJay Sternberg #include <linux/init.h>
2257dacad5SJay Sternberg #include <linux/interrupt.h>
2357dacad5SJay Sternberg #include <linux/io.h>
2457dacad5SJay Sternberg #include <linux/mm.h>
2557dacad5SJay Sternberg #include <linux/module.h>
2677bf25eaSKeith Busch #include <linux/mutex.h>
2757dacad5SJay Sternberg #include <linux/pci.h>
2857dacad5SJay Sternberg #include <linux/poison.h>
2957dacad5SJay Sternberg #include <linux/t10-pi.h>
302d55cd5fSChristoph Hellwig #include <linux/timer.h>
3157dacad5SJay Sternberg #include <linux/types.h>
329cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h>
331d277a63SKeith Busch #include <asm/unaligned.h>
34a98e58e5SScott Bauer #include <linux/sed-opal.h>
3557dacad5SJay Sternberg 
3657dacad5SJay Sternberg #include "nvme.h"
3757dacad5SJay Sternberg 
3857dacad5SJay Sternberg #define NVME_Q_DEPTH		1024
3957dacad5SJay Sternberg #define NVME_AQ_DEPTH		256
4057dacad5SJay Sternberg #define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
4157dacad5SJay Sternberg #define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
4257dacad5SJay Sternberg 
43adf68f21SChristoph Hellwig /*
44adf68f21SChristoph Hellwig  * We handle AEN commands ourselves and don't even let the
45adf68f21SChristoph Hellwig  * block layer know about them.
46adf68f21SChristoph Hellwig  */
47f866fc42SChristoph Hellwig #define NVME_AQ_BLKMQ_DEPTH	(NVME_AQ_DEPTH - NVME_NR_AERS)
48adf68f21SChristoph Hellwig 
4957dacad5SJay Sternberg static int use_threaded_interrupts;
5057dacad5SJay Sternberg module_param(use_threaded_interrupts, int, 0);
5157dacad5SJay Sternberg 
5257dacad5SJay Sternberg static bool use_cmb_sqes = true;
5357dacad5SJay Sternberg module_param(use_cmb_sqes, bool, 0644);
5457dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
5557dacad5SJay Sternberg 
5687ad72a5SChristoph Hellwig static unsigned int max_host_mem_size_mb = 128;
5787ad72a5SChristoph Hellwig module_param(max_host_mem_size_mb, uint, 0444);
5887ad72a5SChristoph Hellwig MODULE_PARM_DESC(max_host_mem_size_mb,
5987ad72a5SChristoph Hellwig 	"Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
6087ad72a5SChristoph Hellwig 
611c63dc66SChristoph Hellwig struct nvme_dev;
621c63dc66SChristoph Hellwig struct nvme_queue;
6357dacad5SJay Sternberg 
6457dacad5SJay Sternberg static int nvme_reset(struct nvme_dev *dev);
65a0fa9647SJens Axboe static void nvme_process_cq(struct nvme_queue *nvmeq);
66a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
6757dacad5SJay Sternberg 
6857dacad5SJay Sternberg /*
691c63dc66SChristoph Hellwig  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
701c63dc66SChristoph Hellwig  */
711c63dc66SChristoph Hellwig struct nvme_dev {
721c63dc66SChristoph Hellwig 	struct nvme_queue **queues;
731c63dc66SChristoph Hellwig 	struct blk_mq_tag_set tagset;
741c63dc66SChristoph Hellwig 	struct blk_mq_tag_set admin_tagset;
751c63dc66SChristoph Hellwig 	u32 __iomem *dbs;
761c63dc66SChristoph Hellwig 	struct device *dev;
771c63dc66SChristoph Hellwig 	struct dma_pool *prp_page_pool;
781c63dc66SChristoph Hellwig 	struct dma_pool *prp_small_pool;
791c63dc66SChristoph Hellwig 	unsigned queue_count;
801c63dc66SChristoph Hellwig 	unsigned online_queues;
811c63dc66SChristoph Hellwig 	unsigned max_qid;
821c63dc66SChristoph Hellwig 	int q_depth;
831c63dc66SChristoph Hellwig 	u32 db_stride;
841c63dc66SChristoph Hellwig 	void __iomem *bar;
8597f6ef64SXu Yu 	unsigned long bar_mapped_size;
861c63dc66SChristoph Hellwig 	struct work_struct reset_work;
875c8809e6SChristoph Hellwig 	struct work_struct remove_work;
8877bf25eaSKeith Busch 	struct mutex shutdown_lock;
891c63dc66SChristoph Hellwig 	bool subsystem;
901c63dc66SChristoph Hellwig 	void __iomem *cmb;
911c63dc66SChristoph Hellwig 	dma_addr_t cmb_dma_addr;
921c63dc66SChristoph Hellwig 	u64 cmb_size;
931c63dc66SChristoph Hellwig 	u32 cmbsz;
94202021c1SStephen Bates 	u32 cmbloc;
951c63dc66SChristoph Hellwig 	struct nvme_ctrl ctrl;
96db3cbfffSKeith Busch 	struct completion ioq_wait;
9787ad72a5SChristoph Hellwig 
9887ad72a5SChristoph Hellwig 	/* shadow doorbell buffer support: */
99f9f38e33SHelen Koike 	u32 *dbbuf_dbs;
100f9f38e33SHelen Koike 	dma_addr_t dbbuf_dbs_dma_addr;
101f9f38e33SHelen Koike 	u32 *dbbuf_eis;
102f9f38e33SHelen Koike 	dma_addr_t dbbuf_eis_dma_addr;
10387ad72a5SChristoph Hellwig 
10487ad72a5SChristoph Hellwig 	/* host memory buffer support: */
10587ad72a5SChristoph Hellwig 	u64 host_mem_size;
10687ad72a5SChristoph Hellwig 	u32 nr_host_mem_descs;
10787ad72a5SChristoph Hellwig 	struct nvme_host_mem_buf_desc *host_mem_descs;
10887ad72a5SChristoph Hellwig 	void **host_mem_desc_bufs;
10957dacad5SJay Sternberg };
11057dacad5SJay Sternberg 
111f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride)
112f9f38e33SHelen Koike {
113f9f38e33SHelen Koike 	return qid * 2 * stride;
114f9f38e33SHelen Koike }
115f9f38e33SHelen Koike 
116f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride)
117f9f38e33SHelen Koike {
118f9f38e33SHelen Koike 	return (qid * 2 + 1) * stride;
119f9f38e33SHelen Koike }
120f9f38e33SHelen Koike 
1211c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
1221c63dc66SChristoph Hellwig {
1231c63dc66SChristoph Hellwig 	return container_of(ctrl, struct nvme_dev, ctrl);
1241c63dc66SChristoph Hellwig }
1251c63dc66SChristoph Hellwig 
12657dacad5SJay Sternberg /*
12757dacad5SJay Sternberg  * An NVM Express queue.  Each device has at least two (one for admin
12857dacad5SJay Sternberg  * commands and one for I/O commands).
12957dacad5SJay Sternberg  */
13057dacad5SJay Sternberg struct nvme_queue {
13157dacad5SJay Sternberg 	struct device *q_dmadev;
13257dacad5SJay Sternberg 	struct nvme_dev *dev;
13357dacad5SJay Sternberg 	spinlock_t q_lock;
13457dacad5SJay Sternberg 	struct nvme_command *sq_cmds;
13557dacad5SJay Sternberg 	struct nvme_command __iomem *sq_cmds_io;
13657dacad5SJay Sternberg 	volatile struct nvme_completion *cqes;
13757dacad5SJay Sternberg 	struct blk_mq_tags **tags;
13857dacad5SJay Sternberg 	dma_addr_t sq_dma_addr;
13957dacad5SJay Sternberg 	dma_addr_t cq_dma_addr;
14057dacad5SJay Sternberg 	u32 __iomem *q_db;
14157dacad5SJay Sternberg 	u16 q_depth;
14257dacad5SJay Sternberg 	s16 cq_vector;
14357dacad5SJay Sternberg 	u16 sq_tail;
14457dacad5SJay Sternberg 	u16 cq_head;
14557dacad5SJay Sternberg 	u16 qid;
14657dacad5SJay Sternberg 	u8 cq_phase;
14757dacad5SJay Sternberg 	u8 cqe_seen;
148f9f38e33SHelen Koike 	u32 *dbbuf_sq_db;
149f9f38e33SHelen Koike 	u32 *dbbuf_cq_db;
150f9f38e33SHelen Koike 	u32 *dbbuf_sq_ei;
151f9f38e33SHelen Koike 	u32 *dbbuf_cq_ei;
15257dacad5SJay Sternberg };
15357dacad5SJay Sternberg 
15457dacad5SJay Sternberg /*
15571bd150cSChristoph Hellwig  * The nvme_iod describes the data in an I/O, including the list of PRP
15671bd150cSChristoph Hellwig  * entries.  You can't see it in this data structure because C doesn't let
157f4800d6dSChristoph Hellwig  * me express that.  Use nvme_init_iod to ensure there's enough space
15871bd150cSChristoph Hellwig  * allocated to store the PRP list.
15971bd150cSChristoph Hellwig  */
16071bd150cSChristoph Hellwig struct nvme_iod {
161d49187e9SChristoph Hellwig 	struct nvme_request req;
162f4800d6dSChristoph Hellwig 	struct nvme_queue *nvmeq;
163f4800d6dSChristoph Hellwig 	int aborted;
16471bd150cSChristoph Hellwig 	int npages;		/* In the PRP list. 0 means small pool in use */
16571bd150cSChristoph Hellwig 	int nents;		/* Used in scatterlist */
16671bd150cSChristoph Hellwig 	int length;		/* Of data, in bytes */
16771bd150cSChristoph Hellwig 	dma_addr_t first_dma;
168bf684057SChristoph Hellwig 	struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
169f4800d6dSChristoph Hellwig 	struct scatterlist *sg;
170f4800d6dSChristoph Hellwig 	struct scatterlist inline_sg[0];
17157dacad5SJay Sternberg };
17257dacad5SJay Sternberg 
17357dacad5SJay Sternberg /*
17457dacad5SJay Sternberg  * Check we didin't inadvertently grow the command struct
17557dacad5SJay Sternberg  */
17657dacad5SJay Sternberg static inline void _nvme_check_size(void)
17757dacad5SJay Sternberg {
17857dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
17957dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
18057dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
18157dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
18257dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
18357dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
18457dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
18557dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
1860add5e8eSJohannes Thumshirn 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
1870add5e8eSJohannes Thumshirn 	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
18857dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
18957dacad5SJay Sternberg 	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
190f9f38e33SHelen Koike 	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
191f9f38e33SHelen Koike }
192f9f38e33SHelen Koike 
193f9f38e33SHelen Koike static inline unsigned int nvme_dbbuf_size(u32 stride)
194f9f38e33SHelen Koike {
195f9f38e33SHelen Koike 	return ((num_possible_cpus() + 1) * 8 * stride);
196f9f38e33SHelen Koike }
197f9f38e33SHelen Koike 
198f9f38e33SHelen Koike static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
199f9f38e33SHelen Koike {
200f9f38e33SHelen Koike 	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
201f9f38e33SHelen Koike 
202f9f38e33SHelen Koike 	if (dev->dbbuf_dbs)
203f9f38e33SHelen Koike 		return 0;
204f9f38e33SHelen Koike 
205f9f38e33SHelen Koike 	dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
206f9f38e33SHelen Koike 					    &dev->dbbuf_dbs_dma_addr,
207f9f38e33SHelen Koike 					    GFP_KERNEL);
208f9f38e33SHelen Koike 	if (!dev->dbbuf_dbs)
209f9f38e33SHelen Koike 		return -ENOMEM;
210f9f38e33SHelen Koike 	dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
211f9f38e33SHelen Koike 					    &dev->dbbuf_eis_dma_addr,
212f9f38e33SHelen Koike 					    GFP_KERNEL);
213f9f38e33SHelen Koike 	if (!dev->dbbuf_eis) {
214f9f38e33SHelen Koike 		dma_free_coherent(dev->dev, mem_size,
215f9f38e33SHelen Koike 				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
216f9f38e33SHelen Koike 		dev->dbbuf_dbs = NULL;
217f9f38e33SHelen Koike 		return -ENOMEM;
218f9f38e33SHelen Koike 	}
219f9f38e33SHelen Koike 
220f9f38e33SHelen Koike 	return 0;
221f9f38e33SHelen Koike }
222f9f38e33SHelen Koike 
223f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
224f9f38e33SHelen Koike {
225f9f38e33SHelen Koike 	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
226f9f38e33SHelen Koike 
227f9f38e33SHelen Koike 	if (dev->dbbuf_dbs) {
228f9f38e33SHelen Koike 		dma_free_coherent(dev->dev, mem_size,
229f9f38e33SHelen Koike 				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
230f9f38e33SHelen Koike 		dev->dbbuf_dbs = NULL;
231f9f38e33SHelen Koike 	}
232f9f38e33SHelen Koike 	if (dev->dbbuf_eis) {
233f9f38e33SHelen Koike 		dma_free_coherent(dev->dev, mem_size,
234f9f38e33SHelen Koike 				  dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
235f9f38e33SHelen Koike 		dev->dbbuf_eis = NULL;
236f9f38e33SHelen Koike 	}
237f9f38e33SHelen Koike }
238f9f38e33SHelen Koike 
239f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev,
240f9f38e33SHelen Koike 			    struct nvme_queue *nvmeq, int qid)
241f9f38e33SHelen Koike {
242f9f38e33SHelen Koike 	if (!dev->dbbuf_dbs || !qid)
243f9f38e33SHelen Koike 		return;
244f9f38e33SHelen Koike 
245f9f38e33SHelen Koike 	nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
246f9f38e33SHelen Koike 	nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
247f9f38e33SHelen Koike 	nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
248f9f38e33SHelen Koike 	nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
249f9f38e33SHelen Koike }
250f9f38e33SHelen Koike 
251f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev)
252f9f38e33SHelen Koike {
253f9f38e33SHelen Koike 	struct nvme_command c;
254f9f38e33SHelen Koike 
255f9f38e33SHelen Koike 	if (!dev->dbbuf_dbs)
256f9f38e33SHelen Koike 		return;
257f9f38e33SHelen Koike 
258f9f38e33SHelen Koike 	memset(&c, 0, sizeof(c));
259f9f38e33SHelen Koike 	c.dbbuf.opcode = nvme_admin_dbbuf;
260f9f38e33SHelen Koike 	c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
261f9f38e33SHelen Koike 	c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
262f9f38e33SHelen Koike 
263f9f38e33SHelen Koike 	if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
2649bdcfb10SChristoph Hellwig 		dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
265f9f38e33SHelen Koike 		/* Free memory and continue on */
266f9f38e33SHelen Koike 		nvme_dbbuf_dma_free(dev);
267f9f38e33SHelen Koike 	}
268f9f38e33SHelen Koike }
269f9f38e33SHelen Koike 
270f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
271f9f38e33SHelen Koike {
272f9f38e33SHelen Koike 	return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
273f9f38e33SHelen Koike }
274f9f38e33SHelen Koike 
275f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */
276f9f38e33SHelen Koike static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
277f9f38e33SHelen Koike 					      volatile u32 *dbbuf_ei)
278f9f38e33SHelen Koike {
279f9f38e33SHelen Koike 	if (dbbuf_db) {
280f9f38e33SHelen Koike 		u16 old_value;
281f9f38e33SHelen Koike 
282f9f38e33SHelen Koike 		/*
283f9f38e33SHelen Koike 		 * Ensure that the queue is written before updating
284f9f38e33SHelen Koike 		 * the doorbell in memory
285f9f38e33SHelen Koike 		 */
286f9f38e33SHelen Koike 		wmb();
287f9f38e33SHelen Koike 
288f9f38e33SHelen Koike 		old_value = *dbbuf_db;
289f9f38e33SHelen Koike 		*dbbuf_db = value;
290f9f38e33SHelen Koike 
291f9f38e33SHelen Koike 		if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
292f9f38e33SHelen Koike 			return false;
293f9f38e33SHelen Koike 	}
294f9f38e33SHelen Koike 
295f9f38e33SHelen Koike 	return true;
29657dacad5SJay Sternberg }
29757dacad5SJay Sternberg 
29857dacad5SJay Sternberg /*
29957dacad5SJay Sternberg  * Max size of iod being embedded in the request payload
30057dacad5SJay Sternberg  */
30157dacad5SJay Sternberg #define NVME_INT_PAGES		2
3025fd4ce1bSChristoph Hellwig #define NVME_INT_BYTES(dev)	(NVME_INT_PAGES * (dev)->ctrl.page_size)
30357dacad5SJay Sternberg 
30457dacad5SJay Sternberg /*
30557dacad5SJay Sternberg  * Will slightly overestimate the number of pages needed.  This is OK
30657dacad5SJay Sternberg  * as it only leads to a small amount of wasted memory for the lifetime of
30757dacad5SJay Sternberg  * the I/O.
30857dacad5SJay Sternberg  */
30957dacad5SJay Sternberg static int nvme_npages(unsigned size, struct nvme_dev *dev)
31057dacad5SJay Sternberg {
3115fd4ce1bSChristoph Hellwig 	unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
3125fd4ce1bSChristoph Hellwig 				      dev->ctrl.page_size);
31357dacad5SJay Sternberg 	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
31457dacad5SJay Sternberg }
31557dacad5SJay Sternberg 
316f4800d6dSChristoph Hellwig static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev,
317f4800d6dSChristoph Hellwig 		unsigned int size, unsigned int nseg)
318f4800d6dSChristoph Hellwig {
319f4800d6dSChristoph Hellwig 	return sizeof(__le64 *) * nvme_npages(size, dev) +
320f4800d6dSChristoph Hellwig 			sizeof(struct scatterlist) * nseg;
321f4800d6dSChristoph Hellwig }
322f4800d6dSChristoph Hellwig 
32357dacad5SJay Sternberg static unsigned int nvme_cmd_size(struct nvme_dev *dev)
32457dacad5SJay Sternberg {
325f4800d6dSChristoph Hellwig 	return sizeof(struct nvme_iod) +
326f4800d6dSChristoph Hellwig 		nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
32757dacad5SJay Sternberg }
32857dacad5SJay Sternberg 
32957dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
33057dacad5SJay Sternberg 				unsigned int hctx_idx)
33157dacad5SJay Sternberg {
33257dacad5SJay Sternberg 	struct nvme_dev *dev = data;
33357dacad5SJay Sternberg 	struct nvme_queue *nvmeq = dev->queues[0];
33457dacad5SJay Sternberg 
33557dacad5SJay Sternberg 	WARN_ON(hctx_idx != 0);
33657dacad5SJay Sternberg 	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
33757dacad5SJay Sternberg 	WARN_ON(nvmeq->tags);
33857dacad5SJay Sternberg 
33957dacad5SJay Sternberg 	hctx->driver_data = nvmeq;
34057dacad5SJay Sternberg 	nvmeq->tags = &dev->admin_tagset.tags[0];
34157dacad5SJay Sternberg 	return 0;
34257dacad5SJay Sternberg }
34357dacad5SJay Sternberg 
34457dacad5SJay Sternberg static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
34557dacad5SJay Sternberg {
34657dacad5SJay Sternberg 	struct nvme_queue *nvmeq = hctx->driver_data;
34757dacad5SJay Sternberg 
34857dacad5SJay Sternberg 	nvmeq->tags = NULL;
34957dacad5SJay Sternberg }
35057dacad5SJay Sternberg 
351d6296d39SChristoph Hellwig static int nvme_admin_init_request(struct blk_mq_tag_set *set,
352d6296d39SChristoph Hellwig 		struct request *req, unsigned int hctx_idx,
35357dacad5SJay Sternberg 		unsigned int numa_node)
35457dacad5SJay Sternberg {
355d6296d39SChristoph Hellwig 	struct nvme_dev *dev = set->driver_data;
356f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
35757dacad5SJay Sternberg 	struct nvme_queue *nvmeq = dev->queues[0];
35857dacad5SJay Sternberg 
35957dacad5SJay Sternberg 	BUG_ON(!nvmeq);
360f4800d6dSChristoph Hellwig 	iod->nvmeq = nvmeq;
36157dacad5SJay Sternberg 	return 0;
36257dacad5SJay Sternberg }
36357dacad5SJay Sternberg 
36457dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
36557dacad5SJay Sternberg 			  unsigned int hctx_idx)
36657dacad5SJay Sternberg {
36757dacad5SJay Sternberg 	struct nvme_dev *dev = data;
36857dacad5SJay Sternberg 	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
36957dacad5SJay Sternberg 
37057dacad5SJay Sternberg 	if (!nvmeq->tags)
37157dacad5SJay Sternberg 		nvmeq->tags = &dev->tagset.tags[hctx_idx];
37257dacad5SJay Sternberg 
37357dacad5SJay Sternberg 	WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
37457dacad5SJay Sternberg 	hctx->driver_data = nvmeq;
37557dacad5SJay Sternberg 	return 0;
37657dacad5SJay Sternberg }
37757dacad5SJay Sternberg 
378d6296d39SChristoph Hellwig static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
379d6296d39SChristoph Hellwig 		unsigned int hctx_idx, unsigned int numa_node)
38057dacad5SJay Sternberg {
381d6296d39SChristoph Hellwig 	struct nvme_dev *dev = set->driver_data;
382f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
38357dacad5SJay Sternberg 	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
38457dacad5SJay Sternberg 
38557dacad5SJay Sternberg 	BUG_ON(!nvmeq);
386f4800d6dSChristoph Hellwig 	iod->nvmeq = nvmeq;
38757dacad5SJay Sternberg 	return 0;
38857dacad5SJay Sternberg }
38957dacad5SJay Sternberg 
390dca51e78SChristoph Hellwig static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
391dca51e78SChristoph Hellwig {
392dca51e78SChristoph Hellwig 	struct nvme_dev *dev = set->driver_data;
393dca51e78SChristoph Hellwig 
394dca51e78SChristoph Hellwig 	return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
395dca51e78SChristoph Hellwig }
396dca51e78SChristoph Hellwig 
39757dacad5SJay Sternberg /**
398adf68f21SChristoph Hellwig  * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
39957dacad5SJay Sternberg  * @nvmeq: The queue to use
40057dacad5SJay Sternberg  * @cmd: The command to send
40157dacad5SJay Sternberg  *
40257dacad5SJay Sternberg  * Safe to use from interrupt context
40357dacad5SJay Sternberg  */
40457dacad5SJay Sternberg static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
40557dacad5SJay Sternberg 						struct nvme_command *cmd)
40657dacad5SJay Sternberg {
40757dacad5SJay Sternberg 	u16 tail = nvmeq->sq_tail;
40857dacad5SJay Sternberg 
40957dacad5SJay Sternberg 	if (nvmeq->sq_cmds_io)
41057dacad5SJay Sternberg 		memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
41157dacad5SJay Sternberg 	else
41257dacad5SJay Sternberg 		memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
41357dacad5SJay Sternberg 
41457dacad5SJay Sternberg 	if (++tail == nvmeq->q_depth)
41557dacad5SJay Sternberg 		tail = 0;
416f9f38e33SHelen Koike 	if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db,
417f9f38e33SHelen Koike 					      nvmeq->dbbuf_sq_ei))
41857dacad5SJay Sternberg 		writel(tail, nvmeq->q_db);
41957dacad5SJay Sternberg 	nvmeq->sq_tail = tail;
42057dacad5SJay Sternberg }
42157dacad5SJay Sternberg 
422f4800d6dSChristoph Hellwig static __le64 **iod_list(struct request *req)
42357dacad5SJay Sternberg {
424f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
425f9d03f96SChristoph Hellwig 	return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
42657dacad5SJay Sternberg }
42757dacad5SJay Sternberg 
428fc17b653SChristoph Hellwig static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
42957dacad5SJay Sternberg {
430f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
431f9d03f96SChristoph Hellwig 	int nseg = blk_rq_nr_phys_segments(rq);
432b131c61dSChristoph Hellwig 	unsigned int size = blk_rq_payload_bytes(rq);
433f4800d6dSChristoph Hellwig 
434f4800d6dSChristoph Hellwig 	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
435f4800d6dSChristoph Hellwig 		iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
436f4800d6dSChristoph Hellwig 		if (!iod->sg)
437fc17b653SChristoph Hellwig 			return BLK_STS_RESOURCE;
438f4800d6dSChristoph Hellwig 	} else {
439f4800d6dSChristoph Hellwig 		iod->sg = iod->inline_sg;
44057dacad5SJay Sternberg 	}
44157dacad5SJay Sternberg 
442f4800d6dSChristoph Hellwig 	iod->aborted = 0;
44357dacad5SJay Sternberg 	iod->npages = -1;
44457dacad5SJay Sternberg 	iod->nents = 0;
445f4800d6dSChristoph Hellwig 	iod->length = size;
446f80ec966SKeith Busch 
447fc17b653SChristoph Hellwig 	return BLK_STS_OK;
44857dacad5SJay Sternberg }
44957dacad5SJay Sternberg 
450f4800d6dSChristoph Hellwig static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
45157dacad5SJay Sternberg {
452f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
4535fd4ce1bSChristoph Hellwig 	const int last_prp = dev->ctrl.page_size / 8 - 1;
45457dacad5SJay Sternberg 	int i;
455f4800d6dSChristoph Hellwig 	__le64 **list = iod_list(req);
45657dacad5SJay Sternberg 	dma_addr_t prp_dma = iod->first_dma;
45757dacad5SJay Sternberg 
45857dacad5SJay Sternberg 	if (iod->npages == 0)
45957dacad5SJay Sternberg 		dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
46057dacad5SJay Sternberg 	for (i = 0; i < iod->npages; i++) {
46157dacad5SJay Sternberg 		__le64 *prp_list = list[i];
46257dacad5SJay Sternberg 		dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
46357dacad5SJay Sternberg 		dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
46457dacad5SJay Sternberg 		prp_dma = next_prp_dma;
46557dacad5SJay Sternberg 	}
46657dacad5SJay Sternberg 
467f4800d6dSChristoph Hellwig 	if (iod->sg != iod->inline_sg)
468f4800d6dSChristoph Hellwig 		kfree(iod->sg);
46957dacad5SJay Sternberg }
47057dacad5SJay Sternberg 
47157dacad5SJay Sternberg #ifdef CONFIG_BLK_DEV_INTEGRITY
47257dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
47357dacad5SJay Sternberg {
47457dacad5SJay Sternberg 	if (be32_to_cpu(pi->ref_tag) == v)
47557dacad5SJay Sternberg 		pi->ref_tag = cpu_to_be32(p);
47657dacad5SJay Sternberg }
47757dacad5SJay Sternberg 
47857dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
47957dacad5SJay Sternberg {
48057dacad5SJay Sternberg 	if (be32_to_cpu(pi->ref_tag) == p)
48157dacad5SJay Sternberg 		pi->ref_tag = cpu_to_be32(v);
48257dacad5SJay Sternberg }
48357dacad5SJay Sternberg 
48457dacad5SJay Sternberg /**
48557dacad5SJay Sternberg  * nvme_dif_remap - remaps ref tags to bip seed and physical lba
48657dacad5SJay Sternberg  *
48757dacad5SJay Sternberg  * The virtual start sector is the one that was originally submitted by the
48857dacad5SJay Sternberg  * block layer.	Due to partitioning, MD/DM cloning, etc. the actual physical
48957dacad5SJay Sternberg  * start sector may be different. Remap protection information to match the
49057dacad5SJay Sternberg  * physical LBA on writes, and back to the original seed on reads.
49157dacad5SJay Sternberg  *
49257dacad5SJay Sternberg  * Type 0 and 3 do not have a ref tag, so no remapping required.
49357dacad5SJay Sternberg  */
49457dacad5SJay Sternberg static void nvme_dif_remap(struct request *req,
49557dacad5SJay Sternberg 			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
49657dacad5SJay Sternberg {
49757dacad5SJay Sternberg 	struct nvme_ns *ns = req->rq_disk->private_data;
49857dacad5SJay Sternberg 	struct bio_integrity_payload *bip;
49957dacad5SJay Sternberg 	struct t10_pi_tuple *pi;
50057dacad5SJay Sternberg 	void *p, *pmap;
50157dacad5SJay Sternberg 	u32 i, nlb, ts, phys, virt;
50257dacad5SJay Sternberg 
50357dacad5SJay Sternberg 	if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
50457dacad5SJay Sternberg 		return;
50557dacad5SJay Sternberg 
50657dacad5SJay Sternberg 	bip = bio_integrity(req->bio);
50757dacad5SJay Sternberg 	if (!bip)
50857dacad5SJay Sternberg 		return;
50957dacad5SJay Sternberg 
51057dacad5SJay Sternberg 	pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
51157dacad5SJay Sternberg 
51257dacad5SJay Sternberg 	p = pmap;
51357dacad5SJay Sternberg 	virt = bip_get_seed(bip);
51457dacad5SJay Sternberg 	phys = nvme_block_nr(ns, blk_rq_pos(req));
51557dacad5SJay Sternberg 	nlb = (blk_rq_bytes(req) >> ns->lba_shift);
516ac6fc48cSDan Williams 	ts = ns->disk->queue->integrity.tuple_size;
51757dacad5SJay Sternberg 
51857dacad5SJay Sternberg 	for (i = 0; i < nlb; i++, virt++, phys++) {
51957dacad5SJay Sternberg 		pi = (struct t10_pi_tuple *)p;
52057dacad5SJay Sternberg 		dif_swap(phys, virt, pi);
52157dacad5SJay Sternberg 		p += ts;
52257dacad5SJay Sternberg 	}
52357dacad5SJay Sternberg 	kunmap_atomic(pmap);
52457dacad5SJay Sternberg }
52557dacad5SJay Sternberg #else /* CONFIG_BLK_DEV_INTEGRITY */
52657dacad5SJay Sternberg static void nvme_dif_remap(struct request *req,
52757dacad5SJay Sternberg 			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
52857dacad5SJay Sternberg {
52957dacad5SJay Sternberg }
53057dacad5SJay Sternberg static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
53157dacad5SJay Sternberg {
53257dacad5SJay Sternberg }
53357dacad5SJay Sternberg static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
53457dacad5SJay Sternberg {
53557dacad5SJay Sternberg }
53657dacad5SJay Sternberg #endif
53757dacad5SJay Sternberg 
538b131c61dSChristoph Hellwig static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
53957dacad5SJay Sternberg {
540f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
54157dacad5SJay Sternberg 	struct dma_pool *pool;
542b131c61dSChristoph Hellwig 	int length = blk_rq_payload_bytes(req);
54357dacad5SJay Sternberg 	struct scatterlist *sg = iod->sg;
54457dacad5SJay Sternberg 	int dma_len = sg_dma_len(sg);
54557dacad5SJay Sternberg 	u64 dma_addr = sg_dma_address(sg);
5465fd4ce1bSChristoph Hellwig 	u32 page_size = dev->ctrl.page_size;
54757dacad5SJay Sternberg 	int offset = dma_addr & (page_size - 1);
54857dacad5SJay Sternberg 	__le64 *prp_list;
549f4800d6dSChristoph Hellwig 	__le64 **list = iod_list(req);
55057dacad5SJay Sternberg 	dma_addr_t prp_dma;
55157dacad5SJay Sternberg 	int nprps, i;
55257dacad5SJay Sternberg 
55357dacad5SJay Sternberg 	length -= (page_size - offset);
55457dacad5SJay Sternberg 	if (length <= 0)
55569d2b571SChristoph Hellwig 		return true;
55657dacad5SJay Sternberg 
55757dacad5SJay Sternberg 	dma_len -= (page_size - offset);
55857dacad5SJay Sternberg 	if (dma_len) {
55957dacad5SJay Sternberg 		dma_addr += (page_size - offset);
56057dacad5SJay Sternberg 	} else {
56157dacad5SJay Sternberg 		sg = sg_next(sg);
56257dacad5SJay Sternberg 		dma_addr = sg_dma_address(sg);
56357dacad5SJay Sternberg 		dma_len = sg_dma_len(sg);
56457dacad5SJay Sternberg 	}
56557dacad5SJay Sternberg 
56657dacad5SJay Sternberg 	if (length <= page_size) {
56757dacad5SJay Sternberg 		iod->first_dma = dma_addr;
56869d2b571SChristoph Hellwig 		return true;
56957dacad5SJay Sternberg 	}
57057dacad5SJay Sternberg 
57157dacad5SJay Sternberg 	nprps = DIV_ROUND_UP(length, page_size);
57257dacad5SJay Sternberg 	if (nprps <= (256 / 8)) {
57357dacad5SJay Sternberg 		pool = dev->prp_small_pool;
57457dacad5SJay Sternberg 		iod->npages = 0;
57557dacad5SJay Sternberg 	} else {
57657dacad5SJay Sternberg 		pool = dev->prp_page_pool;
57757dacad5SJay Sternberg 		iod->npages = 1;
57857dacad5SJay Sternberg 	}
57957dacad5SJay Sternberg 
58069d2b571SChristoph Hellwig 	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
58157dacad5SJay Sternberg 	if (!prp_list) {
58257dacad5SJay Sternberg 		iod->first_dma = dma_addr;
58357dacad5SJay Sternberg 		iod->npages = -1;
58469d2b571SChristoph Hellwig 		return false;
58557dacad5SJay Sternberg 	}
58657dacad5SJay Sternberg 	list[0] = prp_list;
58757dacad5SJay Sternberg 	iod->first_dma = prp_dma;
58857dacad5SJay Sternberg 	i = 0;
58957dacad5SJay Sternberg 	for (;;) {
59057dacad5SJay Sternberg 		if (i == page_size >> 3) {
59157dacad5SJay Sternberg 			__le64 *old_prp_list = prp_list;
59269d2b571SChristoph Hellwig 			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
59357dacad5SJay Sternberg 			if (!prp_list)
59469d2b571SChristoph Hellwig 				return false;
59557dacad5SJay Sternberg 			list[iod->npages++] = prp_list;
59657dacad5SJay Sternberg 			prp_list[0] = old_prp_list[i - 1];
59757dacad5SJay Sternberg 			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
59857dacad5SJay Sternberg 			i = 1;
59957dacad5SJay Sternberg 		}
60057dacad5SJay Sternberg 		prp_list[i++] = cpu_to_le64(dma_addr);
60157dacad5SJay Sternberg 		dma_len -= page_size;
60257dacad5SJay Sternberg 		dma_addr += page_size;
60357dacad5SJay Sternberg 		length -= page_size;
60457dacad5SJay Sternberg 		if (length <= 0)
60557dacad5SJay Sternberg 			break;
60657dacad5SJay Sternberg 		if (dma_len > 0)
60757dacad5SJay Sternberg 			continue;
60857dacad5SJay Sternberg 		BUG_ON(dma_len < 0);
60957dacad5SJay Sternberg 		sg = sg_next(sg);
61057dacad5SJay Sternberg 		dma_addr = sg_dma_address(sg);
61157dacad5SJay Sternberg 		dma_len = sg_dma_len(sg);
61257dacad5SJay Sternberg 	}
61357dacad5SJay Sternberg 
61469d2b571SChristoph Hellwig 	return true;
61557dacad5SJay Sternberg }
61657dacad5SJay Sternberg 
617fc17b653SChristoph Hellwig static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
618b131c61dSChristoph Hellwig 		struct nvme_command *cmnd)
61957dacad5SJay Sternberg {
620f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
621ba1ca37eSChristoph Hellwig 	struct request_queue *q = req->q;
622ba1ca37eSChristoph Hellwig 	enum dma_data_direction dma_dir = rq_data_dir(req) ?
623ba1ca37eSChristoph Hellwig 			DMA_TO_DEVICE : DMA_FROM_DEVICE;
624fc17b653SChristoph Hellwig 	blk_status_t ret = BLK_STS_IOERR;
62557dacad5SJay Sternberg 
626f9d03f96SChristoph Hellwig 	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
627ba1ca37eSChristoph Hellwig 	iod->nents = blk_rq_map_sg(q, req, iod->sg);
628ba1ca37eSChristoph Hellwig 	if (!iod->nents)
629ba1ca37eSChristoph Hellwig 		goto out;
630ba1ca37eSChristoph Hellwig 
631fc17b653SChristoph Hellwig 	ret = BLK_STS_RESOURCE;
6322b6b535dSMauricio Faria de Oliveira 	if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
6332b6b535dSMauricio Faria de Oliveira 				DMA_ATTR_NO_WARN))
634ba1ca37eSChristoph Hellwig 		goto out;
635ba1ca37eSChristoph Hellwig 
636b131c61dSChristoph Hellwig 	if (!nvme_setup_prps(dev, req))
637ba1ca37eSChristoph Hellwig 		goto out_unmap;
638ba1ca37eSChristoph Hellwig 
639fc17b653SChristoph Hellwig 	ret = BLK_STS_IOERR;
640ba1ca37eSChristoph Hellwig 	if (blk_integrity_rq(req)) {
641ba1ca37eSChristoph Hellwig 		if (blk_rq_count_integrity_sg(q, req->bio) != 1)
642ba1ca37eSChristoph Hellwig 			goto out_unmap;
643ba1ca37eSChristoph Hellwig 
644bf684057SChristoph Hellwig 		sg_init_table(&iod->meta_sg, 1);
645bf684057SChristoph Hellwig 		if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
646ba1ca37eSChristoph Hellwig 			goto out_unmap;
647ba1ca37eSChristoph Hellwig 
648ba1ca37eSChristoph Hellwig 		if (rq_data_dir(req))
649ba1ca37eSChristoph Hellwig 			nvme_dif_remap(req, nvme_dif_prep);
650ba1ca37eSChristoph Hellwig 
651bf684057SChristoph Hellwig 		if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
652ba1ca37eSChristoph Hellwig 			goto out_unmap;
65357dacad5SJay Sternberg 	}
65457dacad5SJay Sternberg 
655eb793e2cSChristoph Hellwig 	cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
656eb793e2cSChristoph Hellwig 	cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
657ba1ca37eSChristoph Hellwig 	if (blk_integrity_rq(req))
658bf684057SChristoph Hellwig 		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
659fc17b653SChristoph Hellwig 	return BLK_STS_OK;
660ba1ca37eSChristoph Hellwig 
661ba1ca37eSChristoph Hellwig out_unmap:
662ba1ca37eSChristoph Hellwig 	dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
663ba1ca37eSChristoph Hellwig out:
664ba1ca37eSChristoph Hellwig 	return ret;
66557dacad5SJay Sternberg }
66657dacad5SJay Sternberg 
667f4800d6dSChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
668d4f6c3abSChristoph Hellwig {
669f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
670d4f6c3abSChristoph Hellwig 	enum dma_data_direction dma_dir = rq_data_dir(req) ?
671d4f6c3abSChristoph Hellwig 			DMA_TO_DEVICE : DMA_FROM_DEVICE;
672d4f6c3abSChristoph Hellwig 
673d4f6c3abSChristoph Hellwig 	if (iod->nents) {
674d4f6c3abSChristoph Hellwig 		dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
675d4f6c3abSChristoph Hellwig 		if (blk_integrity_rq(req)) {
676d4f6c3abSChristoph Hellwig 			if (!rq_data_dir(req))
677d4f6c3abSChristoph Hellwig 				nvme_dif_remap(req, nvme_dif_complete);
678bf684057SChristoph Hellwig 			dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
679d4f6c3abSChristoph Hellwig 		}
680d4f6c3abSChristoph Hellwig 	}
681d4f6c3abSChristoph Hellwig 
682f9d03f96SChristoph Hellwig 	nvme_cleanup_cmd(req);
683f4800d6dSChristoph Hellwig 	nvme_free_iod(dev, req);
68457dacad5SJay Sternberg }
68557dacad5SJay Sternberg 
68657dacad5SJay Sternberg /*
68757dacad5SJay Sternberg  * NOTE: ns is NULL when called on the admin queue.
68857dacad5SJay Sternberg  */
689fc17b653SChristoph Hellwig static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
69057dacad5SJay Sternberg 			 const struct blk_mq_queue_data *bd)
69157dacad5SJay Sternberg {
69257dacad5SJay Sternberg 	struct nvme_ns *ns = hctx->queue->queuedata;
69357dacad5SJay Sternberg 	struct nvme_queue *nvmeq = hctx->driver_data;
69457dacad5SJay Sternberg 	struct nvme_dev *dev = nvmeq->dev;
69557dacad5SJay Sternberg 	struct request *req = bd->rq;
696ba1ca37eSChristoph Hellwig 	struct nvme_command cmnd;
697*ebe6d874SChristoph Hellwig 	blk_status_t ret;
69857dacad5SJay Sternberg 
699f9d03f96SChristoph Hellwig 	ret = nvme_setup_cmd(ns, req, &cmnd);
700fc17b653SChristoph Hellwig 	if (ret)
701f4800d6dSChristoph Hellwig 		return ret;
70257dacad5SJay Sternberg 
703b131c61dSChristoph Hellwig 	ret = nvme_init_iod(req, dev);
704fc17b653SChristoph Hellwig 	if (ret)
705f9d03f96SChristoph Hellwig 		goto out_free_cmd;
70657dacad5SJay Sternberg 
707fc17b653SChristoph Hellwig 	if (blk_rq_nr_phys_segments(req)) {
708b131c61dSChristoph Hellwig 		ret = nvme_map_data(dev, req, &cmnd);
709fc17b653SChristoph Hellwig 		if (ret)
710f9d03f96SChristoph Hellwig 			goto out_cleanup_iod;
711fc17b653SChristoph Hellwig 	}
712ba1ca37eSChristoph Hellwig 
713aae239e1SChristoph Hellwig 	blk_mq_start_request(req);
714ba1ca37eSChristoph Hellwig 
715ba1ca37eSChristoph Hellwig 	spin_lock_irq(&nvmeq->q_lock);
716ae1fba20SKeith Busch 	if (unlikely(nvmeq->cq_vector < 0)) {
717fc17b653SChristoph Hellwig 		ret = BLK_STS_IOERR;
718ae1fba20SKeith Busch 		spin_unlock_irq(&nvmeq->q_lock);
719f9d03f96SChristoph Hellwig 		goto out_cleanup_iod;
720ae1fba20SKeith Busch 	}
721ba1ca37eSChristoph Hellwig 	__nvme_submit_cmd(nvmeq, &cmnd);
72257dacad5SJay Sternberg 	nvme_process_cq(nvmeq);
72357dacad5SJay Sternberg 	spin_unlock_irq(&nvmeq->q_lock);
724fc17b653SChristoph Hellwig 	return BLK_STS_OK;
725f9d03f96SChristoph Hellwig out_cleanup_iod:
726f4800d6dSChristoph Hellwig 	nvme_free_iod(dev, req);
727f9d03f96SChristoph Hellwig out_free_cmd:
728f9d03f96SChristoph Hellwig 	nvme_cleanup_cmd(req);
729ba1ca37eSChristoph Hellwig 	return ret;
73057dacad5SJay Sternberg }
73157dacad5SJay Sternberg 
73277f02a7aSChristoph Hellwig static void nvme_pci_complete_rq(struct request *req)
733eee417b0SChristoph Hellwig {
734f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
735eee417b0SChristoph Hellwig 
73677f02a7aSChristoph Hellwig 	nvme_unmap_data(iod->nvmeq->dev, req);
73777f02a7aSChristoph Hellwig 	nvme_complete_rq(req);
73857dacad5SJay Sternberg }
73957dacad5SJay Sternberg 
740d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */
741d783e0bdSMarta Rybczynska static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
742d783e0bdSMarta Rybczynska 		u16 phase)
743d783e0bdSMarta Rybczynska {
744d783e0bdSMarta Rybczynska 	return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
745d783e0bdSMarta Rybczynska }
746d783e0bdSMarta Rybczynska 
747a0fa9647SJens Axboe static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
74857dacad5SJay Sternberg {
74957dacad5SJay Sternberg 	u16 head, phase;
75057dacad5SJay Sternberg 
75157dacad5SJay Sternberg 	head = nvmeq->cq_head;
75257dacad5SJay Sternberg 	phase = nvmeq->cq_phase;
75357dacad5SJay Sternberg 
754d783e0bdSMarta Rybczynska 	while (nvme_cqe_valid(nvmeq, head, phase)) {
75557dacad5SJay Sternberg 		struct nvme_completion cqe = nvmeq->cqes[head];
756eee417b0SChristoph Hellwig 		struct request *req;
757adf68f21SChristoph Hellwig 
75857dacad5SJay Sternberg 		if (++head == nvmeq->q_depth) {
75957dacad5SJay Sternberg 			head = 0;
76057dacad5SJay Sternberg 			phase = !phase;
76157dacad5SJay Sternberg 		}
762adf68f21SChristoph Hellwig 
763a0fa9647SJens Axboe 		if (tag && *tag == cqe.command_id)
764a0fa9647SJens Axboe 			*tag = -1;
765adf68f21SChristoph Hellwig 
766aae239e1SChristoph Hellwig 		if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
7671b3c47c1SSagi Grimberg 			dev_warn(nvmeq->dev->ctrl.device,
768aae239e1SChristoph Hellwig 				"invalid id %d completed on queue %d\n",
769aae239e1SChristoph Hellwig 				cqe.command_id, le16_to_cpu(cqe.sq_id));
770aae239e1SChristoph Hellwig 			continue;
771aae239e1SChristoph Hellwig 		}
772aae239e1SChristoph Hellwig 
773adf68f21SChristoph Hellwig 		/*
774adf68f21SChristoph Hellwig 		 * AEN requests are special as they don't time out and can
775adf68f21SChristoph Hellwig 		 * survive any kind of queue freeze and often don't respond to
776adf68f21SChristoph Hellwig 		 * aborts.  We don't even bother to allocate a struct request
777adf68f21SChristoph Hellwig 		 * for them but rather special case them here.
778adf68f21SChristoph Hellwig 		 */
779adf68f21SChristoph Hellwig 		if (unlikely(nvmeq->qid == 0 &&
780adf68f21SChristoph Hellwig 				cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
7817bf58533SChristoph Hellwig 			nvme_complete_async_event(&nvmeq->dev->ctrl,
7827bf58533SChristoph Hellwig 					cqe.status, &cqe.result);
783adf68f21SChristoph Hellwig 			continue;
784adf68f21SChristoph Hellwig 		}
785adf68f21SChristoph Hellwig 
786eee417b0SChristoph Hellwig 		req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
78727fa9bc5SChristoph Hellwig 		nvme_end_request(req, cqe.status, cqe.result);
78857dacad5SJay Sternberg 	}
78957dacad5SJay Sternberg 
79057dacad5SJay Sternberg 	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
791a0fa9647SJens Axboe 		return;
79257dacad5SJay Sternberg 
793604e8c8dSKeith Busch 	if (likely(nvmeq->cq_vector >= 0))
794f9f38e33SHelen Koike 		if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
795f9f38e33SHelen Koike 						      nvmeq->dbbuf_cq_ei))
79657dacad5SJay Sternberg 			writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
79757dacad5SJay Sternberg 	nvmeq->cq_head = head;
79857dacad5SJay Sternberg 	nvmeq->cq_phase = phase;
79957dacad5SJay Sternberg 
80057dacad5SJay Sternberg 	nvmeq->cqe_seen = 1;
801a0fa9647SJens Axboe }
802a0fa9647SJens Axboe 
803a0fa9647SJens Axboe static void nvme_process_cq(struct nvme_queue *nvmeq)
804a0fa9647SJens Axboe {
805a0fa9647SJens Axboe 	__nvme_process_cq(nvmeq, NULL);
80657dacad5SJay Sternberg }
80757dacad5SJay Sternberg 
80857dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data)
80957dacad5SJay Sternberg {
81057dacad5SJay Sternberg 	irqreturn_t result;
81157dacad5SJay Sternberg 	struct nvme_queue *nvmeq = data;
81257dacad5SJay Sternberg 	spin_lock(&nvmeq->q_lock);
81357dacad5SJay Sternberg 	nvme_process_cq(nvmeq);
81457dacad5SJay Sternberg 	result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
81557dacad5SJay Sternberg 	nvmeq->cqe_seen = 0;
81657dacad5SJay Sternberg 	spin_unlock(&nvmeq->q_lock);
81757dacad5SJay Sternberg 	return result;
81857dacad5SJay Sternberg }
81957dacad5SJay Sternberg 
82057dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data)
82157dacad5SJay Sternberg {
82257dacad5SJay Sternberg 	struct nvme_queue *nvmeq = data;
823d783e0bdSMarta Rybczynska 	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
82457dacad5SJay Sternberg 		return IRQ_WAKE_THREAD;
825d783e0bdSMarta Rybczynska 	return IRQ_NONE;
82657dacad5SJay Sternberg }
82757dacad5SJay Sternberg 
8287776db1cSKeith Busch static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
829a0fa9647SJens Axboe {
830d783e0bdSMarta Rybczynska 	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
831a0fa9647SJens Axboe 		spin_lock_irq(&nvmeq->q_lock);
832a0fa9647SJens Axboe 		__nvme_process_cq(nvmeq, &tag);
833a0fa9647SJens Axboe 		spin_unlock_irq(&nvmeq->q_lock);
834a0fa9647SJens Axboe 
835a0fa9647SJens Axboe 		if (tag == -1)
836a0fa9647SJens Axboe 			return 1;
837a0fa9647SJens Axboe 	}
838a0fa9647SJens Axboe 
839a0fa9647SJens Axboe 	return 0;
840a0fa9647SJens Axboe }
841a0fa9647SJens Axboe 
8427776db1cSKeith Busch static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
8437776db1cSKeith Busch {
8447776db1cSKeith Busch 	struct nvme_queue *nvmeq = hctx->driver_data;
8457776db1cSKeith Busch 
8467776db1cSKeith Busch 	return __nvme_poll(nvmeq, tag);
8477776db1cSKeith Busch }
8487776db1cSKeith Busch 
849f866fc42SChristoph Hellwig static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
85057dacad5SJay Sternberg {
851f866fc42SChristoph Hellwig 	struct nvme_dev *dev = to_nvme_dev(ctrl);
8529396dec9SChristoph Hellwig 	struct nvme_queue *nvmeq = dev->queues[0];
85357dacad5SJay Sternberg 	struct nvme_command c;
85457dacad5SJay Sternberg 
85557dacad5SJay Sternberg 	memset(&c, 0, sizeof(c));
85657dacad5SJay Sternberg 	c.common.opcode = nvme_admin_async_event;
857f866fc42SChristoph Hellwig 	c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
85857dacad5SJay Sternberg 
8599396dec9SChristoph Hellwig 	spin_lock_irq(&nvmeq->q_lock);
8609396dec9SChristoph Hellwig 	__nvme_submit_cmd(nvmeq, &c);
8619396dec9SChristoph Hellwig 	spin_unlock_irq(&nvmeq->q_lock);
86257dacad5SJay Sternberg }
86357dacad5SJay Sternberg 
86457dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
86557dacad5SJay Sternberg {
86657dacad5SJay Sternberg 	struct nvme_command c;
86757dacad5SJay Sternberg 
86857dacad5SJay Sternberg 	memset(&c, 0, sizeof(c));
86957dacad5SJay Sternberg 	c.delete_queue.opcode = opcode;
87057dacad5SJay Sternberg 	c.delete_queue.qid = cpu_to_le16(id);
87157dacad5SJay Sternberg 
8721c63dc66SChristoph Hellwig 	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
87357dacad5SJay Sternberg }
87457dacad5SJay Sternberg 
87557dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
87657dacad5SJay Sternberg 						struct nvme_queue *nvmeq)
87757dacad5SJay Sternberg {
87857dacad5SJay Sternberg 	struct nvme_command c;
87957dacad5SJay Sternberg 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
88057dacad5SJay Sternberg 
88157dacad5SJay Sternberg 	/*
88257dacad5SJay Sternberg 	 * Note: we (ab)use the fact the the prp fields survive if no data
88357dacad5SJay Sternberg 	 * is attached to the request.
88457dacad5SJay Sternberg 	 */
88557dacad5SJay Sternberg 	memset(&c, 0, sizeof(c));
88657dacad5SJay Sternberg 	c.create_cq.opcode = nvme_admin_create_cq;
88757dacad5SJay Sternberg 	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
88857dacad5SJay Sternberg 	c.create_cq.cqid = cpu_to_le16(qid);
88957dacad5SJay Sternberg 	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
89057dacad5SJay Sternberg 	c.create_cq.cq_flags = cpu_to_le16(flags);
89157dacad5SJay Sternberg 	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
89257dacad5SJay Sternberg 
8931c63dc66SChristoph Hellwig 	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
89457dacad5SJay Sternberg }
89557dacad5SJay Sternberg 
89657dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
89757dacad5SJay Sternberg 						struct nvme_queue *nvmeq)
89857dacad5SJay Sternberg {
89957dacad5SJay Sternberg 	struct nvme_command c;
90081c1cd98SKeith Busch 	int flags = NVME_QUEUE_PHYS_CONTIG;
90157dacad5SJay Sternberg 
90257dacad5SJay Sternberg 	/*
90357dacad5SJay Sternberg 	 * Note: we (ab)use the fact the the prp fields survive if no data
90457dacad5SJay Sternberg 	 * is attached to the request.
90557dacad5SJay Sternberg 	 */
90657dacad5SJay Sternberg 	memset(&c, 0, sizeof(c));
90757dacad5SJay Sternberg 	c.create_sq.opcode = nvme_admin_create_sq;
90857dacad5SJay Sternberg 	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
90957dacad5SJay Sternberg 	c.create_sq.sqid = cpu_to_le16(qid);
91057dacad5SJay Sternberg 	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
91157dacad5SJay Sternberg 	c.create_sq.sq_flags = cpu_to_le16(flags);
91257dacad5SJay Sternberg 	c.create_sq.cqid = cpu_to_le16(qid);
91357dacad5SJay Sternberg 
9141c63dc66SChristoph Hellwig 	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
91557dacad5SJay Sternberg }
91657dacad5SJay Sternberg 
91757dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
91857dacad5SJay Sternberg {
91957dacad5SJay Sternberg 	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
92057dacad5SJay Sternberg }
92157dacad5SJay Sternberg 
92257dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
92357dacad5SJay Sternberg {
92457dacad5SJay Sternberg 	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
92557dacad5SJay Sternberg }
92657dacad5SJay Sternberg 
9272a842acaSChristoph Hellwig static void abort_endio(struct request *req, blk_status_t error)
92857dacad5SJay Sternberg {
929f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
930f4800d6dSChristoph Hellwig 	struct nvme_queue *nvmeq = iod->nvmeq;
93157dacad5SJay Sternberg 
93227fa9bc5SChristoph Hellwig 	dev_warn(nvmeq->dev->ctrl.device,
93327fa9bc5SChristoph Hellwig 		 "Abort status: 0x%x", nvme_req(req)->status);
934e7a2a87dSChristoph Hellwig 	atomic_inc(&nvmeq->dev->ctrl.abort_limit);
935e7a2a87dSChristoph Hellwig 	blk_mq_free_request(req);
93657dacad5SJay Sternberg }
93757dacad5SJay Sternberg 
938b2a0eb1aSKeith Busch static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
939b2a0eb1aSKeith Busch {
940b2a0eb1aSKeith Busch 
941b2a0eb1aSKeith Busch 	/* If true, indicates loss of adapter communication, possibly by a
942b2a0eb1aSKeith Busch 	 * NVMe Subsystem reset.
943b2a0eb1aSKeith Busch 	 */
944b2a0eb1aSKeith Busch 	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
945b2a0eb1aSKeith Busch 
946b2a0eb1aSKeith Busch 	/* If there is a reset ongoing, we shouldn't reset again. */
947b2a0eb1aSKeith Busch 	if (dev->ctrl.state == NVME_CTRL_RESETTING)
948b2a0eb1aSKeith Busch 		return false;
949b2a0eb1aSKeith Busch 
950b2a0eb1aSKeith Busch 	/* We shouldn't reset unless the controller is on fatal error state
951b2a0eb1aSKeith Busch 	 * _or_ if we lost the communication with it.
952b2a0eb1aSKeith Busch 	 */
953b2a0eb1aSKeith Busch 	if (!(csts & NVME_CSTS_CFS) && !nssro)
954b2a0eb1aSKeith Busch 		return false;
955b2a0eb1aSKeith Busch 
956b2a0eb1aSKeith Busch 	/* If PCI error recovery process is happening, we cannot reset or
957b2a0eb1aSKeith Busch 	 * the recovery mechanism will surely fail.
958b2a0eb1aSKeith Busch 	 */
959b2a0eb1aSKeith Busch 	if (pci_channel_offline(to_pci_dev(dev->dev)))
960b2a0eb1aSKeith Busch 		return false;
961b2a0eb1aSKeith Busch 
962b2a0eb1aSKeith Busch 	return true;
963b2a0eb1aSKeith Busch }
964b2a0eb1aSKeith Busch 
965b2a0eb1aSKeith Busch static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
966b2a0eb1aSKeith Busch {
967b2a0eb1aSKeith Busch 	/* Read a config register to help see what died. */
968b2a0eb1aSKeith Busch 	u16 pci_status;
969b2a0eb1aSKeith Busch 	int result;
970b2a0eb1aSKeith Busch 
971b2a0eb1aSKeith Busch 	result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
972b2a0eb1aSKeith Busch 				      &pci_status);
973b2a0eb1aSKeith Busch 	if (result == PCIBIOS_SUCCESSFUL)
974b2a0eb1aSKeith Busch 		dev_warn(dev->ctrl.device,
975b2a0eb1aSKeith Busch 			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
976b2a0eb1aSKeith Busch 			 csts, pci_status);
977b2a0eb1aSKeith Busch 	else
978b2a0eb1aSKeith Busch 		dev_warn(dev->ctrl.device,
979b2a0eb1aSKeith Busch 			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
980b2a0eb1aSKeith Busch 			 csts, result);
981b2a0eb1aSKeith Busch }
982b2a0eb1aSKeith Busch 
98331c7c7d2SChristoph Hellwig static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
98457dacad5SJay Sternberg {
985f4800d6dSChristoph Hellwig 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
986f4800d6dSChristoph Hellwig 	struct nvme_queue *nvmeq = iod->nvmeq;
98757dacad5SJay Sternberg 	struct nvme_dev *dev = nvmeq->dev;
98857dacad5SJay Sternberg 	struct request *abort_req;
98957dacad5SJay Sternberg 	struct nvme_command cmd;
990b2a0eb1aSKeith Busch 	u32 csts = readl(dev->bar + NVME_REG_CSTS);
991b2a0eb1aSKeith Busch 
992b2a0eb1aSKeith Busch 	/*
993b2a0eb1aSKeith Busch 	 * Reset immediately if the controller is failed
994b2a0eb1aSKeith Busch 	 */
995b2a0eb1aSKeith Busch 	if (nvme_should_reset(dev, csts)) {
996b2a0eb1aSKeith Busch 		nvme_warn_reset(dev, csts);
997b2a0eb1aSKeith Busch 		nvme_dev_disable(dev, false);
998b2a0eb1aSKeith Busch 		nvme_reset(dev);
999b2a0eb1aSKeith Busch 		return BLK_EH_HANDLED;
1000b2a0eb1aSKeith Busch 	}
100157dacad5SJay Sternberg 
100231c7c7d2SChristoph Hellwig 	/*
10037776db1cSKeith Busch 	 * Did we miss an interrupt?
10047776db1cSKeith Busch 	 */
10057776db1cSKeith Busch 	if (__nvme_poll(nvmeq, req->tag)) {
10067776db1cSKeith Busch 		dev_warn(dev->ctrl.device,
10077776db1cSKeith Busch 			 "I/O %d QID %d timeout, completion polled\n",
10087776db1cSKeith Busch 			 req->tag, nvmeq->qid);
10097776db1cSKeith Busch 		return BLK_EH_HANDLED;
10107776db1cSKeith Busch 	}
10117776db1cSKeith Busch 
10127776db1cSKeith Busch 	/*
1013fd634f41SChristoph Hellwig 	 * Shutdown immediately if controller times out while starting. The
1014fd634f41SChristoph Hellwig 	 * reset work will see the pci device disabled when it gets the forced
1015fd634f41SChristoph Hellwig 	 * cancellation error. All outstanding requests are completed on
1016fd634f41SChristoph Hellwig 	 * shutdown, so we return BLK_EH_HANDLED.
1017fd634f41SChristoph Hellwig 	 */
1018bb8d261eSChristoph Hellwig 	if (dev->ctrl.state == NVME_CTRL_RESETTING) {
10191b3c47c1SSagi Grimberg 		dev_warn(dev->ctrl.device,
1020fd634f41SChristoph Hellwig 			 "I/O %d QID %d timeout, disable controller\n",
1021fd634f41SChristoph Hellwig 			 req->tag, nvmeq->qid);
1022a5cdb68cSKeith Busch 		nvme_dev_disable(dev, false);
102327fa9bc5SChristoph Hellwig 		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1024fd634f41SChristoph Hellwig 		return BLK_EH_HANDLED;
1025fd634f41SChristoph Hellwig 	}
1026fd634f41SChristoph Hellwig 
1027fd634f41SChristoph Hellwig 	/*
1028e1569a16SKeith Busch  	 * Shutdown the controller immediately and schedule a reset if the
1029e1569a16SKeith Busch  	 * command was already aborted once before and still hasn't been
1030e1569a16SKeith Busch  	 * returned to the driver, or if this is the admin queue.
103131c7c7d2SChristoph Hellwig 	 */
1032f4800d6dSChristoph Hellwig 	if (!nvmeq->qid || iod->aborted) {
10331b3c47c1SSagi Grimberg 		dev_warn(dev->ctrl.device,
103457dacad5SJay Sternberg 			 "I/O %d QID %d timeout, reset controller\n",
103557dacad5SJay Sternberg 			 req->tag, nvmeq->qid);
1036a5cdb68cSKeith Busch 		nvme_dev_disable(dev, false);
1037c5f6ce97SKeith Busch 		nvme_reset(dev);
1038e1569a16SKeith Busch 
1039e1569a16SKeith Busch 		/*
1040e1569a16SKeith Busch 		 * Mark the request as handled, since the inline shutdown
1041e1569a16SKeith Busch 		 * forces all outstanding requests to complete.
1042e1569a16SKeith Busch 		 */
104327fa9bc5SChristoph Hellwig 		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1044e1569a16SKeith Busch 		return BLK_EH_HANDLED;
104557dacad5SJay Sternberg 	}
104657dacad5SJay Sternberg 
1047e7a2a87dSChristoph Hellwig 	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1048e7a2a87dSChristoph Hellwig 		atomic_inc(&dev->ctrl.abort_limit);
1049e7a2a87dSChristoph Hellwig 		return BLK_EH_RESET_TIMER;
1050e7a2a87dSChristoph Hellwig 	}
10517bf7d778SKeith Busch 	iod->aborted = 1;
105257dacad5SJay Sternberg 
105357dacad5SJay Sternberg 	memset(&cmd, 0, sizeof(cmd));
105457dacad5SJay Sternberg 	cmd.abort.opcode = nvme_admin_abort_cmd;
105557dacad5SJay Sternberg 	cmd.abort.cid = req->tag;
105657dacad5SJay Sternberg 	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
105757dacad5SJay Sternberg 
10581b3c47c1SSagi Grimberg 	dev_warn(nvmeq->dev->ctrl.device,
10591b3c47c1SSagi Grimberg 		"I/O %d QID %d timeout, aborting\n",
106057dacad5SJay Sternberg 		 req->tag, nvmeq->qid);
1061e7a2a87dSChristoph Hellwig 
1062e7a2a87dSChristoph Hellwig 	abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1063eb71f435SChristoph Hellwig 			BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
10646bf25d16SChristoph Hellwig 	if (IS_ERR(abort_req)) {
10656bf25d16SChristoph Hellwig 		atomic_inc(&dev->ctrl.abort_limit);
106631c7c7d2SChristoph Hellwig 		return BLK_EH_RESET_TIMER;
106757dacad5SJay Sternberg 	}
106857dacad5SJay Sternberg 
1069e7a2a87dSChristoph Hellwig 	abort_req->timeout = ADMIN_TIMEOUT;
1070e7a2a87dSChristoph Hellwig 	abort_req->end_io_data = NULL;
1071e7a2a87dSChristoph Hellwig 	blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
107257dacad5SJay Sternberg 
107357dacad5SJay Sternberg 	/*
107457dacad5SJay Sternberg 	 * The aborted req will be completed on receiving the abort req.
107557dacad5SJay Sternberg 	 * We enable the timer again. If hit twice, it'll cause a device reset,
107657dacad5SJay Sternberg 	 * as the device then is in a faulty state.
107757dacad5SJay Sternberg 	 */
107857dacad5SJay Sternberg 	return BLK_EH_RESET_TIMER;
107957dacad5SJay Sternberg }
108057dacad5SJay Sternberg 
108157dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq)
108257dacad5SJay Sternberg {
108357dacad5SJay Sternberg 	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
108457dacad5SJay Sternberg 				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
108557dacad5SJay Sternberg 	if (nvmeq->sq_cmds)
108657dacad5SJay Sternberg 		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
108757dacad5SJay Sternberg 					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
108857dacad5SJay Sternberg 	kfree(nvmeq);
108957dacad5SJay Sternberg }
109057dacad5SJay Sternberg 
109157dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest)
109257dacad5SJay Sternberg {
109357dacad5SJay Sternberg 	int i;
109457dacad5SJay Sternberg 
109557dacad5SJay Sternberg 	for (i = dev->queue_count - 1; i >= lowest; i--) {
109657dacad5SJay Sternberg 		struct nvme_queue *nvmeq = dev->queues[i];
109757dacad5SJay Sternberg 		dev->queue_count--;
109857dacad5SJay Sternberg 		dev->queues[i] = NULL;
109957dacad5SJay Sternberg 		nvme_free_queue(nvmeq);
110057dacad5SJay Sternberg 	}
110157dacad5SJay Sternberg }
110257dacad5SJay Sternberg 
110357dacad5SJay Sternberg /**
110457dacad5SJay Sternberg  * nvme_suspend_queue - put queue into suspended state
110557dacad5SJay Sternberg  * @nvmeq - queue to suspend
110657dacad5SJay Sternberg  */
110757dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq)
110857dacad5SJay Sternberg {
110957dacad5SJay Sternberg 	int vector;
111057dacad5SJay Sternberg 
111157dacad5SJay Sternberg 	spin_lock_irq(&nvmeq->q_lock);
111257dacad5SJay Sternberg 	if (nvmeq->cq_vector == -1) {
111357dacad5SJay Sternberg 		spin_unlock_irq(&nvmeq->q_lock);
111457dacad5SJay Sternberg 		return 1;
111557dacad5SJay Sternberg 	}
11160ff199cbSChristoph Hellwig 	vector = nvmeq->cq_vector;
111757dacad5SJay Sternberg 	nvmeq->dev->online_queues--;
111857dacad5SJay Sternberg 	nvmeq->cq_vector = -1;
111957dacad5SJay Sternberg 	spin_unlock_irq(&nvmeq->q_lock);
112057dacad5SJay Sternberg 
11211c63dc66SChristoph Hellwig 	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
112225646264SKeith Busch 		blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
112357dacad5SJay Sternberg 
11240ff199cbSChristoph Hellwig 	pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
112557dacad5SJay Sternberg 
112657dacad5SJay Sternberg 	return 0;
112757dacad5SJay Sternberg }
112857dacad5SJay Sternberg 
1129a5cdb68cSKeith Busch static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
113057dacad5SJay Sternberg {
1131a5cdb68cSKeith Busch 	struct nvme_queue *nvmeq = dev->queues[0];
113257dacad5SJay Sternberg 
113357dacad5SJay Sternberg 	if (!nvmeq)
113457dacad5SJay Sternberg 		return;
113557dacad5SJay Sternberg 	if (nvme_suspend_queue(nvmeq))
113657dacad5SJay Sternberg 		return;
113757dacad5SJay Sternberg 
1138a5cdb68cSKeith Busch 	if (shutdown)
1139a5cdb68cSKeith Busch 		nvme_shutdown_ctrl(&dev->ctrl);
1140a5cdb68cSKeith Busch 	else
1141a5cdb68cSKeith Busch 		nvme_disable_ctrl(&dev->ctrl, lo_hi_readq(
1142a5cdb68cSKeith Busch 						dev->bar + NVME_REG_CAP));
114357dacad5SJay Sternberg 
114457dacad5SJay Sternberg 	spin_lock_irq(&nvmeq->q_lock);
114557dacad5SJay Sternberg 	nvme_process_cq(nvmeq);
114657dacad5SJay Sternberg 	spin_unlock_irq(&nvmeq->q_lock);
114757dacad5SJay Sternberg }
114857dacad5SJay Sternberg 
114957dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
115057dacad5SJay Sternberg 				int entry_size)
115157dacad5SJay Sternberg {
115257dacad5SJay Sternberg 	int q_depth = dev->q_depth;
11535fd4ce1bSChristoph Hellwig 	unsigned q_size_aligned = roundup(q_depth * entry_size,
11545fd4ce1bSChristoph Hellwig 					  dev->ctrl.page_size);
115557dacad5SJay Sternberg 
115657dacad5SJay Sternberg 	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
115757dacad5SJay Sternberg 		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
11585fd4ce1bSChristoph Hellwig 		mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
115957dacad5SJay Sternberg 		q_depth = div_u64(mem_per_q, entry_size);
116057dacad5SJay Sternberg 
116157dacad5SJay Sternberg 		/*
116257dacad5SJay Sternberg 		 * Ensure the reduced q_depth is above some threshold where it
116357dacad5SJay Sternberg 		 * would be better to map queues in system memory with the
116457dacad5SJay Sternberg 		 * original depth
116557dacad5SJay Sternberg 		 */
116657dacad5SJay Sternberg 		if (q_depth < 64)
116757dacad5SJay Sternberg 			return -ENOMEM;
116857dacad5SJay Sternberg 	}
116957dacad5SJay Sternberg 
117057dacad5SJay Sternberg 	return q_depth;
117157dacad5SJay Sternberg }
117257dacad5SJay Sternberg 
117357dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
117457dacad5SJay Sternberg 				int qid, int depth)
117557dacad5SJay Sternberg {
117657dacad5SJay Sternberg 	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
11775fd4ce1bSChristoph Hellwig 		unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
11785fd4ce1bSChristoph Hellwig 						      dev->ctrl.page_size);
117957dacad5SJay Sternberg 		nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
118057dacad5SJay Sternberg 		nvmeq->sq_cmds_io = dev->cmb + offset;
118157dacad5SJay Sternberg 	} else {
118257dacad5SJay Sternberg 		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
118357dacad5SJay Sternberg 					&nvmeq->sq_dma_addr, GFP_KERNEL);
118457dacad5SJay Sternberg 		if (!nvmeq->sq_cmds)
118557dacad5SJay Sternberg 			return -ENOMEM;
118657dacad5SJay Sternberg 	}
118757dacad5SJay Sternberg 
118857dacad5SJay Sternberg 	return 0;
118957dacad5SJay Sternberg }
119057dacad5SJay Sternberg 
119157dacad5SJay Sternberg static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1192d3af3ecdSShaohua Li 							int depth, int node)
119357dacad5SJay Sternberg {
1194d3af3ecdSShaohua Li 	struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
1195d3af3ecdSShaohua Li 							node);
119657dacad5SJay Sternberg 	if (!nvmeq)
119757dacad5SJay Sternberg 		return NULL;
119857dacad5SJay Sternberg 
119957dacad5SJay Sternberg 	nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
120057dacad5SJay Sternberg 					  &nvmeq->cq_dma_addr, GFP_KERNEL);
120157dacad5SJay Sternberg 	if (!nvmeq->cqes)
120257dacad5SJay Sternberg 		goto free_nvmeq;
120357dacad5SJay Sternberg 
120457dacad5SJay Sternberg 	if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
120557dacad5SJay Sternberg 		goto free_cqdma;
120657dacad5SJay Sternberg 
120757dacad5SJay Sternberg 	nvmeq->q_dmadev = dev->dev;
120857dacad5SJay Sternberg 	nvmeq->dev = dev;
120957dacad5SJay Sternberg 	spin_lock_init(&nvmeq->q_lock);
121057dacad5SJay Sternberg 	nvmeq->cq_head = 0;
121157dacad5SJay Sternberg 	nvmeq->cq_phase = 1;
121257dacad5SJay Sternberg 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
121357dacad5SJay Sternberg 	nvmeq->q_depth = depth;
121457dacad5SJay Sternberg 	nvmeq->qid = qid;
121557dacad5SJay Sternberg 	nvmeq->cq_vector = -1;
121657dacad5SJay Sternberg 	dev->queues[qid] = nvmeq;
121757dacad5SJay Sternberg 	dev->queue_count++;
121857dacad5SJay Sternberg 
121957dacad5SJay Sternberg 	return nvmeq;
122057dacad5SJay Sternberg 
122157dacad5SJay Sternberg  free_cqdma:
122257dacad5SJay Sternberg 	dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
122357dacad5SJay Sternberg 							nvmeq->cq_dma_addr);
122457dacad5SJay Sternberg  free_nvmeq:
122557dacad5SJay Sternberg 	kfree(nvmeq);
122657dacad5SJay Sternberg 	return NULL;
122757dacad5SJay Sternberg }
122857dacad5SJay Sternberg 
1229dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq)
123057dacad5SJay Sternberg {
12310ff199cbSChristoph Hellwig 	struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
12320ff199cbSChristoph Hellwig 	int nr = nvmeq->dev->ctrl.instance;
12330ff199cbSChristoph Hellwig 
12340ff199cbSChristoph Hellwig 	if (use_threaded_interrupts) {
12350ff199cbSChristoph Hellwig 		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
12360ff199cbSChristoph Hellwig 				nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
12370ff199cbSChristoph Hellwig 	} else {
12380ff199cbSChristoph Hellwig 		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
12390ff199cbSChristoph Hellwig 				NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
12400ff199cbSChristoph Hellwig 	}
124157dacad5SJay Sternberg }
124257dacad5SJay Sternberg 
124357dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
124457dacad5SJay Sternberg {
124557dacad5SJay Sternberg 	struct nvme_dev *dev = nvmeq->dev;
124657dacad5SJay Sternberg 
124757dacad5SJay Sternberg 	spin_lock_irq(&nvmeq->q_lock);
124857dacad5SJay Sternberg 	nvmeq->sq_tail = 0;
124957dacad5SJay Sternberg 	nvmeq->cq_head = 0;
125057dacad5SJay Sternberg 	nvmeq->cq_phase = 1;
125157dacad5SJay Sternberg 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
125257dacad5SJay Sternberg 	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1253f9f38e33SHelen Koike 	nvme_dbbuf_init(dev, nvmeq, qid);
125457dacad5SJay Sternberg 	dev->online_queues++;
125557dacad5SJay Sternberg 	spin_unlock_irq(&nvmeq->q_lock);
125657dacad5SJay Sternberg }
125757dacad5SJay Sternberg 
125857dacad5SJay Sternberg static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
125957dacad5SJay Sternberg {
126057dacad5SJay Sternberg 	struct nvme_dev *dev = nvmeq->dev;
126157dacad5SJay Sternberg 	int result;
126257dacad5SJay Sternberg 
126357dacad5SJay Sternberg 	nvmeq->cq_vector = qid - 1;
126457dacad5SJay Sternberg 	result = adapter_alloc_cq(dev, qid, nvmeq);
126557dacad5SJay Sternberg 	if (result < 0)
126657dacad5SJay Sternberg 		return result;
126757dacad5SJay Sternberg 
126857dacad5SJay Sternberg 	result = adapter_alloc_sq(dev, qid, nvmeq);
126957dacad5SJay Sternberg 	if (result < 0)
127057dacad5SJay Sternberg 		goto release_cq;
127157dacad5SJay Sternberg 
1272dca51e78SChristoph Hellwig 	result = queue_request_irq(nvmeq);
127357dacad5SJay Sternberg 	if (result < 0)
127457dacad5SJay Sternberg 		goto release_sq;
127557dacad5SJay Sternberg 
127657dacad5SJay Sternberg 	nvme_init_queue(nvmeq, qid);
127757dacad5SJay Sternberg 	return result;
127857dacad5SJay Sternberg 
127957dacad5SJay Sternberg  release_sq:
128057dacad5SJay Sternberg 	adapter_delete_sq(dev, qid);
128157dacad5SJay Sternberg  release_cq:
128257dacad5SJay Sternberg 	adapter_delete_cq(dev, qid);
128357dacad5SJay Sternberg 	return result;
128457dacad5SJay Sternberg }
128557dacad5SJay Sternberg 
1286f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = {
128757dacad5SJay Sternberg 	.queue_rq	= nvme_queue_rq,
128877f02a7aSChristoph Hellwig 	.complete	= nvme_pci_complete_rq,
128957dacad5SJay Sternberg 	.init_hctx	= nvme_admin_init_hctx,
129057dacad5SJay Sternberg 	.exit_hctx      = nvme_admin_exit_hctx,
129157dacad5SJay Sternberg 	.init_request	= nvme_admin_init_request,
129257dacad5SJay Sternberg 	.timeout	= nvme_timeout,
129357dacad5SJay Sternberg };
129457dacad5SJay Sternberg 
1295f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = {
129657dacad5SJay Sternberg 	.queue_rq	= nvme_queue_rq,
129777f02a7aSChristoph Hellwig 	.complete	= nvme_pci_complete_rq,
129857dacad5SJay Sternberg 	.init_hctx	= nvme_init_hctx,
129957dacad5SJay Sternberg 	.init_request	= nvme_init_request,
1300dca51e78SChristoph Hellwig 	.map_queues	= nvme_pci_map_queues,
130157dacad5SJay Sternberg 	.timeout	= nvme_timeout,
1302a0fa9647SJens Axboe 	.poll		= nvme_poll,
130357dacad5SJay Sternberg };
130457dacad5SJay Sternberg 
130557dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev)
130657dacad5SJay Sternberg {
13071c63dc66SChristoph Hellwig 	if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
130869d9a99cSKeith Busch 		/*
130969d9a99cSKeith Busch 		 * If the controller was reset during removal, it's possible
131069d9a99cSKeith Busch 		 * user requests may be waiting on a stopped queue. Start the
131169d9a99cSKeith Busch 		 * queue to flush these to completion.
131269d9a99cSKeith Busch 		 */
131369d9a99cSKeith Busch 		blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
13141c63dc66SChristoph Hellwig 		blk_cleanup_queue(dev->ctrl.admin_q);
131557dacad5SJay Sternberg 		blk_mq_free_tag_set(&dev->admin_tagset);
131657dacad5SJay Sternberg 	}
131757dacad5SJay Sternberg }
131857dacad5SJay Sternberg 
131957dacad5SJay Sternberg static int nvme_alloc_admin_tags(struct nvme_dev *dev)
132057dacad5SJay Sternberg {
13211c63dc66SChristoph Hellwig 	if (!dev->ctrl.admin_q) {
132257dacad5SJay Sternberg 		dev->admin_tagset.ops = &nvme_mq_admin_ops;
132357dacad5SJay Sternberg 		dev->admin_tagset.nr_hw_queues = 1;
1324e3e9d50cSKeith Busch 
1325e3e9d50cSKeith Busch 		/*
1326e3e9d50cSKeith Busch 		 * Subtract one to leave an empty queue entry for 'Full Queue'
1327e3e9d50cSKeith Busch 		 * condition. See NVM-Express 1.2 specification, section 4.1.2.
1328e3e9d50cSKeith Busch 		 */
1329e3e9d50cSKeith Busch 		dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
133057dacad5SJay Sternberg 		dev->admin_tagset.timeout = ADMIN_TIMEOUT;
133157dacad5SJay Sternberg 		dev->admin_tagset.numa_node = dev_to_node(dev->dev);
133257dacad5SJay Sternberg 		dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
1333d3484991SJens Axboe 		dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
133457dacad5SJay Sternberg 		dev->admin_tagset.driver_data = dev;
133557dacad5SJay Sternberg 
133657dacad5SJay Sternberg 		if (blk_mq_alloc_tag_set(&dev->admin_tagset))
133757dacad5SJay Sternberg 			return -ENOMEM;
133857dacad5SJay Sternberg 
13391c63dc66SChristoph Hellwig 		dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
13401c63dc66SChristoph Hellwig 		if (IS_ERR(dev->ctrl.admin_q)) {
134157dacad5SJay Sternberg 			blk_mq_free_tag_set(&dev->admin_tagset);
134257dacad5SJay Sternberg 			return -ENOMEM;
134357dacad5SJay Sternberg 		}
13441c63dc66SChristoph Hellwig 		if (!blk_get_queue(dev->ctrl.admin_q)) {
134557dacad5SJay Sternberg 			nvme_dev_remove_admin(dev);
13461c63dc66SChristoph Hellwig 			dev->ctrl.admin_q = NULL;
134757dacad5SJay Sternberg 			return -ENODEV;
134857dacad5SJay Sternberg 		}
134957dacad5SJay Sternberg 	} else
135025646264SKeith Busch 		blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
135157dacad5SJay Sternberg 
135257dacad5SJay Sternberg 	return 0;
135357dacad5SJay Sternberg }
135457dacad5SJay Sternberg 
135597f6ef64SXu Yu static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
135697f6ef64SXu Yu {
135797f6ef64SXu Yu 	return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
135897f6ef64SXu Yu }
135997f6ef64SXu Yu 
136097f6ef64SXu Yu static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
136197f6ef64SXu Yu {
136297f6ef64SXu Yu 	struct pci_dev *pdev = to_pci_dev(dev->dev);
136397f6ef64SXu Yu 
136497f6ef64SXu Yu 	if (size <= dev->bar_mapped_size)
136597f6ef64SXu Yu 		return 0;
136697f6ef64SXu Yu 	if (size > pci_resource_len(pdev, 0))
136797f6ef64SXu Yu 		return -ENOMEM;
136897f6ef64SXu Yu 	if (dev->bar)
136997f6ef64SXu Yu 		iounmap(dev->bar);
137097f6ef64SXu Yu 	dev->bar = ioremap(pci_resource_start(pdev, 0), size);
137197f6ef64SXu Yu 	if (!dev->bar) {
137297f6ef64SXu Yu 		dev->bar_mapped_size = 0;
137397f6ef64SXu Yu 		return -ENOMEM;
137497f6ef64SXu Yu 	}
137597f6ef64SXu Yu 	dev->bar_mapped_size = size;
137697f6ef64SXu Yu 	dev->dbs = dev->bar + NVME_REG_DBS;
137797f6ef64SXu Yu 
137897f6ef64SXu Yu 	return 0;
137997f6ef64SXu Yu }
138097f6ef64SXu Yu 
138157dacad5SJay Sternberg static int nvme_configure_admin_queue(struct nvme_dev *dev)
138257dacad5SJay Sternberg {
138357dacad5SJay Sternberg 	int result;
138457dacad5SJay Sternberg 	u32 aqa;
13857a67cbeaSChristoph Hellwig 	u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
138657dacad5SJay Sternberg 	struct nvme_queue *nvmeq;
138757dacad5SJay Sternberg 
138897f6ef64SXu Yu 	result = nvme_remap_bar(dev, db_bar_size(dev, 0));
138997f6ef64SXu Yu 	if (result < 0)
139097f6ef64SXu Yu 		return result;
139197f6ef64SXu Yu 
13928ef2074dSGabriel Krisman Bertazi 	dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
139357dacad5SJay Sternberg 						NVME_CAP_NSSRC(cap) : 0;
139457dacad5SJay Sternberg 
13957a67cbeaSChristoph Hellwig 	if (dev->subsystem &&
13967a67cbeaSChristoph Hellwig 	    (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
13977a67cbeaSChristoph Hellwig 		writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
139857dacad5SJay Sternberg 
13995fd4ce1bSChristoph Hellwig 	result = nvme_disable_ctrl(&dev->ctrl, cap);
140057dacad5SJay Sternberg 	if (result < 0)
140157dacad5SJay Sternberg 		return result;
140257dacad5SJay Sternberg 
140357dacad5SJay Sternberg 	nvmeq = dev->queues[0];
140457dacad5SJay Sternberg 	if (!nvmeq) {
1405d3af3ecdSShaohua Li 		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
1406d3af3ecdSShaohua Li 					dev_to_node(dev->dev));
140757dacad5SJay Sternberg 		if (!nvmeq)
140857dacad5SJay Sternberg 			return -ENOMEM;
140957dacad5SJay Sternberg 	}
141057dacad5SJay Sternberg 
141157dacad5SJay Sternberg 	aqa = nvmeq->q_depth - 1;
141257dacad5SJay Sternberg 	aqa |= aqa << 16;
141357dacad5SJay Sternberg 
14147a67cbeaSChristoph Hellwig 	writel(aqa, dev->bar + NVME_REG_AQA);
14157a67cbeaSChristoph Hellwig 	lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
14167a67cbeaSChristoph Hellwig 	lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
141757dacad5SJay Sternberg 
14185fd4ce1bSChristoph Hellwig 	result = nvme_enable_ctrl(&dev->ctrl, cap);
141957dacad5SJay Sternberg 	if (result)
1420d4875622SKeith Busch 		return result;
142157dacad5SJay Sternberg 
142257dacad5SJay Sternberg 	nvmeq->cq_vector = 0;
1423dca51e78SChristoph Hellwig 	result = queue_request_irq(nvmeq);
142457dacad5SJay Sternberg 	if (result) {
142557dacad5SJay Sternberg 		nvmeq->cq_vector = -1;
1426d4875622SKeith Busch 		return result;
142757dacad5SJay Sternberg 	}
142857dacad5SJay Sternberg 
142957dacad5SJay Sternberg 	return result;
143057dacad5SJay Sternberg }
143157dacad5SJay Sternberg 
1432749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev)
143357dacad5SJay Sternberg {
1434949928c1SKeith Busch 	unsigned i, max;
1435749941f2SChristoph Hellwig 	int ret = 0;
143657dacad5SJay Sternberg 
1437749941f2SChristoph Hellwig 	for (i = dev->queue_count; i <= dev->max_qid; i++) {
1438d3af3ecdSShaohua Li 		/* vector == qid - 1, match nvme_create_queue */
1439d3af3ecdSShaohua Li 		if (!nvme_alloc_queue(dev, i, dev->q_depth,
1440d3af3ecdSShaohua Li 		     pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
1441749941f2SChristoph Hellwig 			ret = -ENOMEM;
144257dacad5SJay Sternberg 			break;
1443749941f2SChristoph Hellwig 		}
1444749941f2SChristoph Hellwig 	}
144557dacad5SJay Sternberg 
1446949928c1SKeith Busch 	max = min(dev->max_qid, dev->queue_count - 1);
1447949928c1SKeith Busch 	for (i = dev->online_queues; i <= max; i++) {
1448749941f2SChristoph Hellwig 		ret = nvme_create_queue(dev->queues[i], i);
1449d4875622SKeith Busch 		if (ret)
145057dacad5SJay Sternberg 			break;
145157dacad5SJay Sternberg 	}
145257dacad5SJay Sternberg 
1453749941f2SChristoph Hellwig 	/*
1454749941f2SChristoph Hellwig 	 * Ignore failing Create SQ/CQ commands, we can continue with less
1455749941f2SChristoph Hellwig 	 * than the desired aount of queues, and even a controller without
1456749941f2SChristoph Hellwig 	 * I/O queues an still be used to issue admin commands.  This might
1457749941f2SChristoph Hellwig 	 * be useful to upgrade a buggy firmware for example.
1458749941f2SChristoph Hellwig 	 */
1459749941f2SChristoph Hellwig 	return ret >= 0 ? 0 : ret;
146057dacad5SJay Sternberg }
146157dacad5SJay Sternberg 
1462202021c1SStephen Bates static ssize_t nvme_cmb_show(struct device *dev,
1463202021c1SStephen Bates 			     struct device_attribute *attr,
1464202021c1SStephen Bates 			     char *buf)
1465202021c1SStephen Bates {
1466202021c1SStephen Bates 	struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
1467202021c1SStephen Bates 
1468c965809cSStephen Bates 	return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
1469202021c1SStephen Bates 		       ndev->cmbloc, ndev->cmbsz);
1470202021c1SStephen Bates }
1471202021c1SStephen Bates static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
1472202021c1SStephen Bates 
147357dacad5SJay Sternberg static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
147457dacad5SJay Sternberg {
147557dacad5SJay Sternberg 	u64 szu, size, offset;
147657dacad5SJay Sternberg 	resource_size_t bar_size;
147757dacad5SJay Sternberg 	struct pci_dev *pdev = to_pci_dev(dev->dev);
147857dacad5SJay Sternberg 	void __iomem *cmb;
147957dacad5SJay Sternberg 	dma_addr_t dma_addr;
148057dacad5SJay Sternberg 
14817a67cbeaSChristoph Hellwig 	dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
148257dacad5SJay Sternberg 	if (!(NVME_CMB_SZ(dev->cmbsz)))
148357dacad5SJay Sternberg 		return NULL;
1484202021c1SStephen Bates 	dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
148557dacad5SJay Sternberg 
1486202021c1SStephen Bates 	if (!use_cmb_sqes)
1487202021c1SStephen Bates 		return NULL;
148857dacad5SJay Sternberg 
148957dacad5SJay Sternberg 	szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
149057dacad5SJay Sternberg 	size = szu * NVME_CMB_SZ(dev->cmbsz);
1491202021c1SStephen Bates 	offset = szu * NVME_CMB_OFST(dev->cmbloc);
1492202021c1SStephen Bates 	bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
149357dacad5SJay Sternberg 
149457dacad5SJay Sternberg 	if (offset > bar_size)
149557dacad5SJay Sternberg 		return NULL;
149657dacad5SJay Sternberg 
149757dacad5SJay Sternberg 	/*
149857dacad5SJay Sternberg 	 * Controllers may support a CMB size larger than their BAR,
149957dacad5SJay Sternberg 	 * for example, due to being behind a bridge. Reduce the CMB to
150057dacad5SJay Sternberg 	 * the reported size of the BAR
150157dacad5SJay Sternberg 	 */
150257dacad5SJay Sternberg 	if (size > bar_size - offset)
150357dacad5SJay Sternberg 		size = bar_size - offset;
150457dacad5SJay Sternberg 
1505202021c1SStephen Bates 	dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
150657dacad5SJay Sternberg 	cmb = ioremap_wc(dma_addr, size);
150757dacad5SJay Sternberg 	if (!cmb)
150857dacad5SJay Sternberg 		return NULL;
150957dacad5SJay Sternberg 
151057dacad5SJay Sternberg 	dev->cmb_dma_addr = dma_addr;
151157dacad5SJay Sternberg 	dev->cmb_size = size;
151257dacad5SJay Sternberg 	return cmb;
151357dacad5SJay Sternberg }
151457dacad5SJay Sternberg 
151557dacad5SJay Sternberg static inline void nvme_release_cmb(struct nvme_dev *dev)
151657dacad5SJay Sternberg {
151757dacad5SJay Sternberg 	if (dev->cmb) {
151857dacad5SJay Sternberg 		iounmap(dev->cmb);
151957dacad5SJay Sternberg 		dev->cmb = NULL;
1520f63572dfSJon Derrick 		if (dev->cmbsz) {
1521f63572dfSJon Derrick 			sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
1522f63572dfSJon Derrick 						     &dev_attr_cmb.attr, NULL);
1523f63572dfSJon Derrick 			dev->cmbsz = 0;
1524f63572dfSJon Derrick 		}
152557dacad5SJay Sternberg 	}
152657dacad5SJay Sternberg }
152757dacad5SJay Sternberg 
152887ad72a5SChristoph Hellwig static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
152987ad72a5SChristoph Hellwig {
153087ad72a5SChristoph Hellwig 	size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
153187ad72a5SChristoph Hellwig 	struct nvme_command c;
153287ad72a5SChristoph Hellwig 	u64 dma_addr;
153387ad72a5SChristoph Hellwig 	int ret;
153487ad72a5SChristoph Hellwig 
153587ad72a5SChristoph Hellwig 	dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
153687ad72a5SChristoph Hellwig 			DMA_TO_DEVICE);
153787ad72a5SChristoph Hellwig 	if (dma_mapping_error(dev->dev, dma_addr))
153887ad72a5SChristoph Hellwig 		return -ENOMEM;
153987ad72a5SChristoph Hellwig 
154087ad72a5SChristoph Hellwig 	memset(&c, 0, sizeof(c));
154187ad72a5SChristoph Hellwig 	c.features.opcode	= nvme_admin_set_features;
154287ad72a5SChristoph Hellwig 	c.features.fid		= cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
154387ad72a5SChristoph Hellwig 	c.features.dword11	= cpu_to_le32(bits);
154487ad72a5SChristoph Hellwig 	c.features.dword12	= cpu_to_le32(dev->host_mem_size >>
154587ad72a5SChristoph Hellwig 					      ilog2(dev->ctrl.page_size));
154687ad72a5SChristoph Hellwig 	c.features.dword13	= cpu_to_le32(lower_32_bits(dma_addr));
154787ad72a5SChristoph Hellwig 	c.features.dword14	= cpu_to_le32(upper_32_bits(dma_addr));
154887ad72a5SChristoph Hellwig 	c.features.dword15	= cpu_to_le32(dev->nr_host_mem_descs);
154987ad72a5SChristoph Hellwig 
155087ad72a5SChristoph Hellwig 	ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
155187ad72a5SChristoph Hellwig 	if (ret) {
155287ad72a5SChristoph Hellwig 		dev_warn(dev->ctrl.device,
155387ad72a5SChristoph Hellwig 			 "failed to set host mem (err %d, flags %#x).\n",
155487ad72a5SChristoph Hellwig 			 ret, bits);
155587ad72a5SChristoph Hellwig 	}
155687ad72a5SChristoph Hellwig 	dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
155787ad72a5SChristoph Hellwig 	return ret;
155887ad72a5SChristoph Hellwig }
155987ad72a5SChristoph Hellwig 
156087ad72a5SChristoph Hellwig static void nvme_free_host_mem(struct nvme_dev *dev)
156187ad72a5SChristoph Hellwig {
156287ad72a5SChristoph Hellwig 	int i;
156387ad72a5SChristoph Hellwig 
156487ad72a5SChristoph Hellwig 	for (i = 0; i < dev->nr_host_mem_descs; i++) {
156587ad72a5SChristoph Hellwig 		struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
156687ad72a5SChristoph Hellwig 		size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
156787ad72a5SChristoph Hellwig 
156887ad72a5SChristoph Hellwig 		dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
156987ad72a5SChristoph Hellwig 				le64_to_cpu(desc->addr));
157087ad72a5SChristoph Hellwig 	}
157187ad72a5SChristoph Hellwig 
157287ad72a5SChristoph Hellwig 	kfree(dev->host_mem_desc_bufs);
157387ad72a5SChristoph Hellwig 	dev->host_mem_desc_bufs = NULL;
157487ad72a5SChristoph Hellwig 	kfree(dev->host_mem_descs);
157587ad72a5SChristoph Hellwig 	dev->host_mem_descs = NULL;
157687ad72a5SChristoph Hellwig }
157787ad72a5SChristoph Hellwig 
157887ad72a5SChristoph Hellwig static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
157987ad72a5SChristoph Hellwig {
158087ad72a5SChristoph Hellwig 	struct nvme_host_mem_buf_desc *descs;
158187ad72a5SChristoph Hellwig 	u32 chunk_size, max_entries, i = 0;
158287ad72a5SChristoph Hellwig 	void **bufs;
158387ad72a5SChristoph Hellwig 	u64 size, tmp;
158487ad72a5SChristoph Hellwig 
158587ad72a5SChristoph Hellwig 	/* start big and work our way down */
158687ad72a5SChristoph Hellwig 	chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER);
158787ad72a5SChristoph Hellwig retry:
158887ad72a5SChristoph Hellwig 	tmp = (preferred + chunk_size - 1);
158987ad72a5SChristoph Hellwig 	do_div(tmp, chunk_size);
159087ad72a5SChristoph Hellwig 	max_entries = tmp;
159187ad72a5SChristoph Hellwig 	descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
159287ad72a5SChristoph Hellwig 	if (!descs)
159387ad72a5SChristoph Hellwig 		goto out;
159487ad72a5SChristoph Hellwig 
159587ad72a5SChristoph Hellwig 	bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
159687ad72a5SChristoph Hellwig 	if (!bufs)
159787ad72a5SChristoph Hellwig 		goto out_free_descs;
159887ad72a5SChristoph Hellwig 
159987ad72a5SChristoph Hellwig 	for (size = 0; size < preferred; size += chunk_size) {
160087ad72a5SChristoph Hellwig 		u32 len = min_t(u64, chunk_size, preferred - size);
160187ad72a5SChristoph Hellwig 		dma_addr_t dma_addr;
160287ad72a5SChristoph Hellwig 
160387ad72a5SChristoph Hellwig 		bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
160487ad72a5SChristoph Hellwig 				DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
160587ad72a5SChristoph Hellwig 		if (!bufs[i])
160687ad72a5SChristoph Hellwig 			break;
160787ad72a5SChristoph Hellwig 
160887ad72a5SChristoph Hellwig 		descs[i].addr = cpu_to_le64(dma_addr);
160987ad72a5SChristoph Hellwig 		descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
161087ad72a5SChristoph Hellwig 		i++;
161187ad72a5SChristoph Hellwig 	}
161287ad72a5SChristoph Hellwig 
161387ad72a5SChristoph Hellwig 	if (!size || (min && size < min)) {
161487ad72a5SChristoph Hellwig 		dev_warn(dev->ctrl.device,
161587ad72a5SChristoph Hellwig 			"failed to allocate host memory buffer.\n");
161687ad72a5SChristoph Hellwig 		goto out_free_bufs;
161787ad72a5SChristoph Hellwig 	}
161887ad72a5SChristoph Hellwig 
161987ad72a5SChristoph Hellwig 	dev_info(dev->ctrl.device,
162087ad72a5SChristoph Hellwig 		"allocated %lld MiB host memory buffer.\n",
162187ad72a5SChristoph Hellwig 		size >> ilog2(SZ_1M));
162287ad72a5SChristoph Hellwig 	dev->nr_host_mem_descs = i;
162387ad72a5SChristoph Hellwig 	dev->host_mem_size = size;
162487ad72a5SChristoph Hellwig 	dev->host_mem_descs = descs;
162587ad72a5SChristoph Hellwig 	dev->host_mem_desc_bufs = bufs;
162687ad72a5SChristoph Hellwig 	return 0;
162787ad72a5SChristoph Hellwig 
162887ad72a5SChristoph Hellwig out_free_bufs:
162987ad72a5SChristoph Hellwig 	while (--i >= 0) {
163087ad72a5SChristoph Hellwig 		size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
163187ad72a5SChristoph Hellwig 
163287ad72a5SChristoph Hellwig 		dma_free_coherent(dev->dev, size, bufs[i],
163387ad72a5SChristoph Hellwig 				le64_to_cpu(descs[i].addr));
163487ad72a5SChristoph Hellwig 	}
163587ad72a5SChristoph Hellwig 
163687ad72a5SChristoph Hellwig 	kfree(bufs);
163787ad72a5SChristoph Hellwig out_free_descs:
163887ad72a5SChristoph Hellwig 	kfree(descs);
163987ad72a5SChristoph Hellwig out:
164087ad72a5SChristoph Hellwig 	/* try a smaller chunk size if we failed early */
164187ad72a5SChristoph Hellwig 	if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
164287ad72a5SChristoph Hellwig 		chunk_size /= 2;
164387ad72a5SChristoph Hellwig 		goto retry;
164487ad72a5SChristoph Hellwig 	}
164587ad72a5SChristoph Hellwig 	dev->host_mem_descs = NULL;
164687ad72a5SChristoph Hellwig 	return -ENOMEM;
164787ad72a5SChristoph Hellwig }
164887ad72a5SChristoph Hellwig 
164987ad72a5SChristoph Hellwig static void nvme_setup_host_mem(struct nvme_dev *dev)
165087ad72a5SChristoph Hellwig {
165187ad72a5SChristoph Hellwig 	u64 max = (u64)max_host_mem_size_mb * SZ_1M;
165287ad72a5SChristoph Hellwig 	u64 preferred = (u64)dev->ctrl.hmpre * 4096;
165387ad72a5SChristoph Hellwig 	u64 min = (u64)dev->ctrl.hmmin * 4096;
165487ad72a5SChristoph Hellwig 	u32 enable_bits = NVME_HOST_MEM_ENABLE;
165587ad72a5SChristoph Hellwig 
165687ad72a5SChristoph Hellwig 	preferred = min(preferred, max);
165787ad72a5SChristoph Hellwig 	if (min > max) {
165887ad72a5SChristoph Hellwig 		dev_warn(dev->ctrl.device,
165987ad72a5SChristoph Hellwig 			"min host memory (%lld MiB) above limit (%d MiB).\n",
166087ad72a5SChristoph Hellwig 			min >> ilog2(SZ_1M), max_host_mem_size_mb);
166187ad72a5SChristoph Hellwig 		nvme_free_host_mem(dev);
166287ad72a5SChristoph Hellwig 		return;
166387ad72a5SChristoph Hellwig 	}
166487ad72a5SChristoph Hellwig 
166587ad72a5SChristoph Hellwig 	/*
166687ad72a5SChristoph Hellwig 	 * If we already have a buffer allocated check if we can reuse it.
166787ad72a5SChristoph Hellwig 	 */
166887ad72a5SChristoph Hellwig 	if (dev->host_mem_descs) {
166987ad72a5SChristoph Hellwig 		if (dev->host_mem_size >= min)
167087ad72a5SChristoph Hellwig 			enable_bits |= NVME_HOST_MEM_RETURN;
167187ad72a5SChristoph Hellwig 		else
167287ad72a5SChristoph Hellwig 			nvme_free_host_mem(dev);
167387ad72a5SChristoph Hellwig 	}
167487ad72a5SChristoph Hellwig 
167587ad72a5SChristoph Hellwig 	if (!dev->host_mem_descs) {
167687ad72a5SChristoph Hellwig 		if (nvme_alloc_host_mem(dev, min, preferred))
167787ad72a5SChristoph Hellwig 			return;
167887ad72a5SChristoph Hellwig 	}
167987ad72a5SChristoph Hellwig 
168087ad72a5SChristoph Hellwig 	if (nvme_set_host_mem(dev, enable_bits))
168187ad72a5SChristoph Hellwig 		nvme_free_host_mem(dev);
168287ad72a5SChristoph Hellwig }
168387ad72a5SChristoph Hellwig 
168457dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev)
168557dacad5SJay Sternberg {
168657dacad5SJay Sternberg 	struct nvme_queue *adminq = dev->queues[0];
168757dacad5SJay Sternberg 	struct pci_dev *pdev = to_pci_dev(dev->dev);
168897f6ef64SXu Yu 	int result, nr_io_queues;
168997f6ef64SXu Yu 	unsigned long size;
169057dacad5SJay Sternberg 
16912800b8e7SKeith Busch 	nr_io_queues = num_online_cpus();
16929a0be7abSChristoph Hellwig 	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
16939a0be7abSChristoph Hellwig 	if (result < 0)
169457dacad5SJay Sternberg 		return result;
16959a0be7abSChristoph Hellwig 
1696f5fa90dcSChristoph Hellwig 	if (nr_io_queues == 0)
1697a5229050SKeith Busch 		return 0;
169857dacad5SJay Sternberg 
169957dacad5SJay Sternberg 	if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
170057dacad5SJay Sternberg 		result = nvme_cmb_qdepth(dev, nr_io_queues,
170157dacad5SJay Sternberg 				sizeof(struct nvme_command));
170257dacad5SJay Sternberg 		if (result > 0)
170357dacad5SJay Sternberg 			dev->q_depth = result;
170457dacad5SJay Sternberg 		else
170557dacad5SJay Sternberg 			nvme_release_cmb(dev);
170657dacad5SJay Sternberg 	}
170757dacad5SJay Sternberg 
170857dacad5SJay Sternberg 	do {
170997f6ef64SXu Yu 		size = db_bar_size(dev, nr_io_queues);
171097f6ef64SXu Yu 		result = nvme_remap_bar(dev, size);
171197f6ef64SXu Yu 		if (!result)
171257dacad5SJay Sternberg 			break;
171357dacad5SJay Sternberg 		if (!--nr_io_queues)
171457dacad5SJay Sternberg 			return -ENOMEM;
171557dacad5SJay Sternberg 	} while (1);
171657dacad5SJay Sternberg 	adminq->q_db = dev->dbs;
171757dacad5SJay Sternberg 
171857dacad5SJay Sternberg 	/* Deregister the admin queue's interrupt */
17190ff199cbSChristoph Hellwig 	pci_free_irq(pdev, 0, adminq);
172057dacad5SJay Sternberg 
172157dacad5SJay Sternberg 	/*
172257dacad5SJay Sternberg 	 * If we enable msix early due to not intx, disable it again before
172357dacad5SJay Sternberg 	 * setting up the full range we need.
172457dacad5SJay Sternberg 	 */
1725dca51e78SChristoph Hellwig 	pci_free_irq_vectors(pdev);
1726dca51e78SChristoph Hellwig 	nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
1727dca51e78SChristoph Hellwig 			PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
1728dca51e78SChristoph Hellwig 	if (nr_io_queues <= 0)
1729dca51e78SChristoph Hellwig 		return -EIO;
1730dca51e78SChristoph Hellwig 	dev->max_qid = nr_io_queues;
173157dacad5SJay Sternberg 
173257dacad5SJay Sternberg 	/*
173357dacad5SJay Sternberg 	 * Should investigate if there's a performance win from allocating
173457dacad5SJay Sternberg 	 * more queues than interrupt vectors; it might allow the submission
173557dacad5SJay Sternberg 	 * path to scale better, even if the receive path is limited by the
173657dacad5SJay Sternberg 	 * number of interrupts.
173757dacad5SJay Sternberg 	 */
173857dacad5SJay Sternberg 
1739dca51e78SChristoph Hellwig 	result = queue_request_irq(adminq);
174057dacad5SJay Sternberg 	if (result) {
174157dacad5SJay Sternberg 		adminq->cq_vector = -1;
1742d4875622SKeith Busch 		return result;
174357dacad5SJay Sternberg 	}
1744749941f2SChristoph Hellwig 	return nvme_create_io_queues(dev);
174557dacad5SJay Sternberg }
174657dacad5SJay Sternberg 
17472a842acaSChristoph Hellwig static void nvme_del_queue_end(struct request *req, blk_status_t error)
1748db3cbfffSKeith Busch {
1749db3cbfffSKeith Busch 	struct nvme_queue *nvmeq = req->end_io_data;
1750db3cbfffSKeith Busch 
1751db3cbfffSKeith Busch 	blk_mq_free_request(req);
1752db3cbfffSKeith Busch 	complete(&nvmeq->dev->ioq_wait);
1753db3cbfffSKeith Busch }
1754db3cbfffSKeith Busch 
17552a842acaSChristoph Hellwig static void nvme_del_cq_end(struct request *req, blk_status_t error)
1756db3cbfffSKeith Busch {
1757db3cbfffSKeith Busch 	struct nvme_queue *nvmeq = req->end_io_data;
1758db3cbfffSKeith Busch 
1759db3cbfffSKeith Busch 	if (!error) {
1760db3cbfffSKeith Busch 		unsigned long flags;
1761db3cbfffSKeith Busch 
17622e39e0f6SMing Lin 		/*
17632e39e0f6SMing Lin 		 * We might be called with the AQ q_lock held
17642e39e0f6SMing Lin 		 * and the I/O queue q_lock should always
17652e39e0f6SMing Lin 		 * nest inside the AQ one.
17662e39e0f6SMing Lin 		 */
17672e39e0f6SMing Lin 		spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
17682e39e0f6SMing Lin 					SINGLE_DEPTH_NESTING);
1769db3cbfffSKeith Busch 		nvme_process_cq(nvmeq);
1770db3cbfffSKeith Busch 		spin_unlock_irqrestore(&nvmeq->q_lock, flags);
1771db3cbfffSKeith Busch 	}
1772db3cbfffSKeith Busch 
1773db3cbfffSKeith Busch 	nvme_del_queue_end(req, error);
1774db3cbfffSKeith Busch }
1775db3cbfffSKeith Busch 
1776db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
1777db3cbfffSKeith Busch {
1778db3cbfffSKeith Busch 	struct request_queue *q = nvmeq->dev->ctrl.admin_q;
1779db3cbfffSKeith Busch 	struct request *req;
1780db3cbfffSKeith Busch 	struct nvme_command cmd;
1781db3cbfffSKeith Busch 
1782db3cbfffSKeith Busch 	memset(&cmd, 0, sizeof(cmd));
1783db3cbfffSKeith Busch 	cmd.delete_queue.opcode = opcode;
1784db3cbfffSKeith Busch 	cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
1785db3cbfffSKeith Busch 
1786eb71f435SChristoph Hellwig 	req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
1787db3cbfffSKeith Busch 	if (IS_ERR(req))
1788db3cbfffSKeith Busch 		return PTR_ERR(req);
1789db3cbfffSKeith Busch 
1790db3cbfffSKeith Busch 	req->timeout = ADMIN_TIMEOUT;
1791db3cbfffSKeith Busch 	req->end_io_data = nvmeq;
1792db3cbfffSKeith Busch 
1793db3cbfffSKeith Busch 	blk_execute_rq_nowait(q, NULL, req, false,
1794db3cbfffSKeith Busch 			opcode == nvme_admin_delete_cq ?
1795db3cbfffSKeith Busch 				nvme_del_cq_end : nvme_del_queue_end);
1796db3cbfffSKeith Busch 	return 0;
1797db3cbfffSKeith Busch }
1798db3cbfffSKeith Busch 
179970659060SKeith Busch static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
1800db3cbfffSKeith Busch {
180170659060SKeith Busch 	int pass;
1802db3cbfffSKeith Busch 	unsigned long timeout;
1803db3cbfffSKeith Busch 	u8 opcode = nvme_admin_delete_sq;
1804db3cbfffSKeith Busch 
1805db3cbfffSKeith Busch 	for (pass = 0; pass < 2; pass++) {
1806014a0d60SKeith Busch 		int sent = 0, i = queues;
1807db3cbfffSKeith Busch 
1808db3cbfffSKeith Busch 		reinit_completion(&dev->ioq_wait);
1809db3cbfffSKeith Busch  retry:
1810db3cbfffSKeith Busch 		timeout = ADMIN_TIMEOUT;
1811c21377f8SGabriel Krisman Bertazi 		for (; i > 0; i--, sent++)
1812c21377f8SGabriel Krisman Bertazi 			if (nvme_delete_queue(dev->queues[i], opcode))
1813db3cbfffSKeith Busch 				break;
1814c21377f8SGabriel Krisman Bertazi 
1815db3cbfffSKeith Busch 		while (sent--) {
1816db3cbfffSKeith Busch 			timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
1817db3cbfffSKeith Busch 			if (timeout == 0)
1818db3cbfffSKeith Busch 				return;
1819db3cbfffSKeith Busch 			if (i)
1820db3cbfffSKeith Busch 				goto retry;
1821db3cbfffSKeith Busch 		}
1822db3cbfffSKeith Busch 		opcode = nvme_admin_delete_cq;
1823db3cbfffSKeith Busch 	}
1824db3cbfffSKeith Busch }
1825db3cbfffSKeith Busch 
182657dacad5SJay Sternberg /*
182757dacad5SJay Sternberg  * Return: error value if an error occurred setting up the queues or calling
182857dacad5SJay Sternberg  * Identify Device.  0 if these succeeded, even if adding some of the
182957dacad5SJay Sternberg  * namespaces failed.  At the moment, these failures are silent.  TBD which
183057dacad5SJay Sternberg  * failures should be reported.
183157dacad5SJay Sternberg  */
183257dacad5SJay Sternberg static int nvme_dev_add(struct nvme_dev *dev)
183357dacad5SJay Sternberg {
18345bae7f73SChristoph Hellwig 	if (!dev->ctrl.tagset) {
183557dacad5SJay Sternberg 		dev->tagset.ops = &nvme_mq_ops;
183657dacad5SJay Sternberg 		dev->tagset.nr_hw_queues = dev->online_queues - 1;
183757dacad5SJay Sternberg 		dev->tagset.timeout = NVME_IO_TIMEOUT;
183857dacad5SJay Sternberg 		dev->tagset.numa_node = dev_to_node(dev->dev);
183957dacad5SJay Sternberg 		dev->tagset.queue_depth =
184057dacad5SJay Sternberg 				min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
184157dacad5SJay Sternberg 		dev->tagset.cmd_size = nvme_cmd_size(dev);
184257dacad5SJay Sternberg 		dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
184357dacad5SJay Sternberg 		dev->tagset.driver_data = dev;
184457dacad5SJay Sternberg 
184557dacad5SJay Sternberg 		if (blk_mq_alloc_tag_set(&dev->tagset))
184657dacad5SJay Sternberg 			return 0;
18475bae7f73SChristoph Hellwig 		dev->ctrl.tagset = &dev->tagset;
1848f9f38e33SHelen Koike 
1849f9f38e33SHelen Koike 		nvme_dbbuf_set(dev);
1850949928c1SKeith Busch 	} else {
1851949928c1SKeith Busch 		blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
1852949928c1SKeith Busch 
1853949928c1SKeith Busch 		/* Free previously allocated queues that are no longer usable */
1854949928c1SKeith Busch 		nvme_free_queues(dev, dev->online_queues);
185557dacad5SJay Sternberg 	}
1856949928c1SKeith Busch 
185757dacad5SJay Sternberg 	return 0;
185857dacad5SJay Sternberg }
185957dacad5SJay Sternberg 
1860b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev)
186157dacad5SJay Sternberg {
186257dacad5SJay Sternberg 	u64 cap;
1863b00a726aSKeith Busch 	int result = -ENOMEM;
186457dacad5SJay Sternberg 	struct pci_dev *pdev = to_pci_dev(dev->dev);
186557dacad5SJay Sternberg 
186657dacad5SJay Sternberg 	if (pci_enable_device_mem(pdev))
186757dacad5SJay Sternberg 		return result;
186857dacad5SJay Sternberg 
186957dacad5SJay Sternberg 	pci_set_master(pdev);
187057dacad5SJay Sternberg 
187157dacad5SJay Sternberg 	if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
187257dacad5SJay Sternberg 	    dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
187357dacad5SJay Sternberg 		goto disable;
187457dacad5SJay Sternberg 
18757a67cbeaSChristoph Hellwig 	if (readl(dev->bar + NVME_REG_CSTS) == -1) {
187657dacad5SJay Sternberg 		result = -ENODEV;
1877b00a726aSKeith Busch 		goto disable;
187857dacad5SJay Sternberg 	}
187957dacad5SJay Sternberg 
188057dacad5SJay Sternberg 	/*
1881a5229050SKeith Busch 	 * Some devices and/or platforms don't advertise or work with INTx
1882a5229050SKeith Busch 	 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
1883a5229050SKeith Busch 	 * adjust this later.
188457dacad5SJay Sternberg 	 */
1885dca51e78SChristoph Hellwig 	result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1886dca51e78SChristoph Hellwig 	if (result < 0)
1887dca51e78SChristoph Hellwig 		return result;
188857dacad5SJay Sternberg 
18897a67cbeaSChristoph Hellwig 	cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
18907a67cbeaSChristoph Hellwig 
189157dacad5SJay Sternberg 	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
189257dacad5SJay Sternberg 	dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
18937a67cbeaSChristoph Hellwig 	dev->dbs = dev->bar + 4096;
18941f390c1fSStephan Günther 
18951f390c1fSStephan Günther 	/*
18961f390c1fSStephan Günther 	 * Temporary fix for the Apple controller found in the MacBook8,1 and
18971f390c1fSStephan Günther 	 * some MacBook7,1 to avoid controller resets and data loss.
18981f390c1fSStephan Günther 	 */
18991f390c1fSStephan Günther 	if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
19001f390c1fSStephan Günther 		dev->q_depth = 2;
19019bdcfb10SChristoph Hellwig 		dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
19029bdcfb10SChristoph Hellwig 			"set queue depth=%u to work around controller resets\n",
19031f390c1fSStephan Günther 			dev->q_depth);
19041f390c1fSStephan Günther 	}
19051f390c1fSStephan Günther 
1906202021c1SStephen Bates 	/*
1907202021c1SStephen Bates 	 * CMBs can currently only exist on >=1.2 PCIe devices. We only
1908202021c1SStephen Bates 	 * populate sysfs if a CMB is implemented. Note that we add the
1909202021c1SStephen Bates 	 * CMB attribute to the nvme_ctrl kobj which removes the need to remove
1910202021c1SStephen Bates 	 * it on exit. Since nvme_dev_attrs_group has no name we can pass
1911202021c1SStephen Bates 	 * NULL as final argument to sysfs_add_file_to_group.
1912202021c1SStephen Bates 	 */
1913202021c1SStephen Bates 
19148ef2074dSGabriel Krisman Bertazi 	if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
191557dacad5SJay Sternberg 		dev->cmb = nvme_map_cmb(dev);
191657dacad5SJay Sternberg 
1917202021c1SStephen Bates 		if (dev->cmbsz) {
1918202021c1SStephen Bates 			if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1919202021c1SStephen Bates 						    &dev_attr_cmb.attr, NULL))
19209bdcfb10SChristoph Hellwig 				dev_warn(dev->ctrl.device,
1921202021c1SStephen Bates 					 "failed to add sysfs attribute for CMB\n");
1922202021c1SStephen Bates 		}
1923202021c1SStephen Bates 	}
1924202021c1SStephen Bates 
1925a0a3408eSKeith Busch 	pci_enable_pcie_error_reporting(pdev);
1926a0a3408eSKeith Busch 	pci_save_state(pdev);
192757dacad5SJay Sternberg 	return 0;
192857dacad5SJay Sternberg 
192957dacad5SJay Sternberg  disable:
193057dacad5SJay Sternberg 	pci_disable_device(pdev);
193157dacad5SJay Sternberg 	return result;
193257dacad5SJay Sternberg }
193357dacad5SJay Sternberg 
193457dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev)
193557dacad5SJay Sternberg {
1936b00a726aSKeith Busch 	if (dev->bar)
1937b00a726aSKeith Busch 		iounmap(dev->bar);
1938a1f447b3SJohannes Thumshirn 	pci_release_mem_regions(to_pci_dev(dev->dev));
1939b00a726aSKeith Busch }
1940b00a726aSKeith Busch 
1941b00a726aSKeith Busch static void nvme_pci_disable(struct nvme_dev *dev)
1942b00a726aSKeith Busch {
194357dacad5SJay Sternberg 	struct pci_dev *pdev = to_pci_dev(dev->dev);
194457dacad5SJay Sternberg 
1945f63572dfSJon Derrick 	nvme_release_cmb(dev);
1946dca51e78SChristoph Hellwig 	pci_free_irq_vectors(pdev);
194757dacad5SJay Sternberg 
1948a0a3408eSKeith Busch 	if (pci_is_enabled(pdev)) {
1949a0a3408eSKeith Busch 		pci_disable_pcie_error_reporting(pdev);
195057dacad5SJay Sternberg 		pci_disable_device(pdev);
195157dacad5SJay Sternberg 	}
1952a0a3408eSKeith Busch }
195357dacad5SJay Sternberg 
1954a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
195557dacad5SJay Sternberg {
195670659060SKeith Busch 	int i, queues;
1957302ad8ccSKeith Busch 	bool dead = true;
1958302ad8ccSKeith Busch 	struct pci_dev *pdev = to_pci_dev(dev->dev);
195957dacad5SJay Sternberg 
196077bf25eaSKeith Busch 	mutex_lock(&dev->shutdown_lock);
1961302ad8ccSKeith Busch 	if (pci_is_enabled(pdev)) {
1962302ad8ccSKeith Busch 		u32 csts = readl(dev->bar + NVME_REG_CSTS);
1963302ad8ccSKeith Busch 
1964302ad8ccSKeith Busch 		if (dev->ctrl.state == NVME_CTRL_LIVE)
1965302ad8ccSKeith Busch 			nvme_start_freeze(&dev->ctrl);
1966302ad8ccSKeith Busch 		dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
1967302ad8ccSKeith Busch 			pdev->error_state  != pci_channel_io_normal);
196857dacad5SJay Sternberg 	}
1969c21377f8SGabriel Krisman Bertazi 
1970302ad8ccSKeith Busch 	/*
1971302ad8ccSKeith Busch 	 * Give the controller a chance to complete all entered requests if
1972302ad8ccSKeith Busch 	 * doing a safe shutdown.
1973302ad8ccSKeith Busch 	 */
197487ad72a5SChristoph Hellwig 	if (!dead) {
197587ad72a5SChristoph Hellwig 		if (shutdown)
1976302ad8ccSKeith Busch 			nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
197787ad72a5SChristoph Hellwig 
197887ad72a5SChristoph Hellwig 		/*
197987ad72a5SChristoph Hellwig 		 * If the controller is still alive tell it to stop using the
198087ad72a5SChristoph Hellwig 		 * host memory buffer.  In theory the shutdown / reset should
198187ad72a5SChristoph Hellwig 		 * make sure that it doesn't access the host memoery anymore,
198287ad72a5SChristoph Hellwig 		 * but I'd rather be safe than sorry..
198387ad72a5SChristoph Hellwig 		 */
198487ad72a5SChristoph Hellwig 		if (dev->host_mem_descs)
198587ad72a5SChristoph Hellwig 			nvme_set_host_mem(dev, 0);
198687ad72a5SChristoph Hellwig 
198787ad72a5SChristoph Hellwig 	}
1988302ad8ccSKeith Busch 	nvme_stop_queues(&dev->ctrl);
1989302ad8ccSKeith Busch 
199070659060SKeith Busch 	queues = dev->online_queues - 1;
1991c21377f8SGabriel Krisman Bertazi 	for (i = dev->queue_count - 1; i > 0; i--)
1992c21377f8SGabriel Krisman Bertazi 		nvme_suspend_queue(dev->queues[i]);
1993c21377f8SGabriel Krisman Bertazi 
1994302ad8ccSKeith Busch 	if (dead) {
199582469c59SGabriel Krisman Bertazi 		/* A device might become IO incapable very soon during
199682469c59SGabriel Krisman Bertazi 		 * probe, before the admin queue is configured. Thus,
199782469c59SGabriel Krisman Bertazi 		 * queue_count can be 0 here.
199882469c59SGabriel Krisman Bertazi 		 */
199982469c59SGabriel Krisman Bertazi 		if (dev->queue_count)
2000c21377f8SGabriel Krisman Bertazi 			nvme_suspend_queue(dev->queues[0]);
200157dacad5SJay Sternberg 	} else {
200270659060SKeith Busch 		nvme_disable_io_queues(dev, queues);
2003a5cdb68cSKeith Busch 		nvme_disable_admin_queue(dev, shutdown);
200457dacad5SJay Sternberg 	}
2005b00a726aSKeith Busch 	nvme_pci_disable(dev);
200657dacad5SJay Sternberg 
2007e1958e65SMing Lin 	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
2008e1958e65SMing Lin 	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
2009302ad8ccSKeith Busch 
2010302ad8ccSKeith Busch 	/*
2011302ad8ccSKeith Busch 	 * The driver will not be starting up queues again if shutting down so
2012302ad8ccSKeith Busch 	 * must flush all entered requests to their failed completion to avoid
2013302ad8ccSKeith Busch 	 * deadlocking blk-mq hot-cpu notifier.
2014302ad8ccSKeith Busch 	 */
2015302ad8ccSKeith Busch 	if (shutdown)
2016302ad8ccSKeith Busch 		nvme_start_queues(&dev->ctrl);
201777bf25eaSKeith Busch 	mutex_unlock(&dev->shutdown_lock);
201857dacad5SJay Sternberg }
201957dacad5SJay Sternberg 
202057dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev)
202157dacad5SJay Sternberg {
202257dacad5SJay Sternberg 	dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
202357dacad5SJay Sternberg 						PAGE_SIZE, PAGE_SIZE, 0);
202457dacad5SJay Sternberg 	if (!dev->prp_page_pool)
202557dacad5SJay Sternberg 		return -ENOMEM;
202657dacad5SJay Sternberg 
202757dacad5SJay Sternberg 	/* Optimisation for I/Os between 4k and 128k */
202857dacad5SJay Sternberg 	dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
202957dacad5SJay Sternberg 						256, 256, 0);
203057dacad5SJay Sternberg 	if (!dev->prp_small_pool) {
203157dacad5SJay Sternberg 		dma_pool_destroy(dev->prp_page_pool);
203257dacad5SJay Sternberg 		return -ENOMEM;
203357dacad5SJay Sternberg 	}
203457dacad5SJay Sternberg 	return 0;
203557dacad5SJay Sternberg }
203657dacad5SJay Sternberg 
203757dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev)
203857dacad5SJay Sternberg {
203957dacad5SJay Sternberg 	dma_pool_destroy(dev->prp_page_pool);
204057dacad5SJay Sternberg 	dma_pool_destroy(dev->prp_small_pool);
204157dacad5SJay Sternberg }
204257dacad5SJay Sternberg 
20431673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
204457dacad5SJay Sternberg {
20451673f1f0SChristoph Hellwig 	struct nvme_dev *dev = to_nvme_dev(ctrl);
204657dacad5SJay Sternberg 
2047f9f38e33SHelen Koike 	nvme_dbbuf_dma_free(dev);
204857dacad5SJay Sternberg 	put_device(dev->dev);
204957dacad5SJay Sternberg 	if (dev->tagset.tags)
205057dacad5SJay Sternberg 		blk_mq_free_tag_set(&dev->tagset);
20511c63dc66SChristoph Hellwig 	if (dev->ctrl.admin_q)
20521c63dc66SChristoph Hellwig 		blk_put_queue(dev->ctrl.admin_q);
205357dacad5SJay Sternberg 	kfree(dev->queues);
2054e286bcfcSScott Bauer 	free_opal_dev(dev->ctrl.opal_dev);
205557dacad5SJay Sternberg 	kfree(dev);
205657dacad5SJay Sternberg }
205757dacad5SJay Sternberg 
2058f58944e2SKeith Busch static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
2059f58944e2SKeith Busch {
2060237045fcSLinus Torvalds 	dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
2061f58944e2SKeith Busch 
2062f58944e2SKeith Busch 	kref_get(&dev->ctrl.kref);
206369d9a99cSKeith Busch 	nvme_dev_disable(dev, false);
2064f58944e2SKeith Busch 	if (!schedule_work(&dev->remove_work))
2065f58944e2SKeith Busch 		nvme_put_ctrl(&dev->ctrl);
2066f58944e2SKeith Busch }
2067f58944e2SKeith Busch 
2068fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work)
206957dacad5SJay Sternberg {
2070fd634f41SChristoph Hellwig 	struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
2071a98e58e5SScott Bauer 	bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
2072f58944e2SKeith Busch 	int result = -ENODEV;
207357dacad5SJay Sternberg 
207482b057caSRakesh Pandit 	if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
2075fd634f41SChristoph Hellwig 		goto out;
2076fd634f41SChristoph Hellwig 
2077fd634f41SChristoph Hellwig 	/*
2078fd634f41SChristoph Hellwig 	 * If we're called to reset a live controller first shut it down before
2079fd634f41SChristoph Hellwig 	 * moving on.
2080fd634f41SChristoph Hellwig 	 */
2081b00a726aSKeith Busch 	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2082a5cdb68cSKeith Busch 		nvme_dev_disable(dev, false);
2083fd634f41SChristoph Hellwig 
2084b00a726aSKeith Busch 	result = nvme_pci_enable(dev);
208557dacad5SJay Sternberg 	if (result)
208657dacad5SJay Sternberg 		goto out;
208757dacad5SJay Sternberg 
208857dacad5SJay Sternberg 	result = nvme_configure_admin_queue(dev);
208957dacad5SJay Sternberg 	if (result)
2090f58944e2SKeith Busch 		goto out;
209157dacad5SJay Sternberg 
209257dacad5SJay Sternberg 	nvme_init_queue(dev->queues[0], 0);
209357dacad5SJay Sternberg 	result = nvme_alloc_admin_tags(dev);
209457dacad5SJay Sternberg 	if (result)
2095f58944e2SKeith Busch 		goto out;
209657dacad5SJay Sternberg 
2097ce4541f4SChristoph Hellwig 	result = nvme_init_identify(&dev->ctrl);
2098ce4541f4SChristoph Hellwig 	if (result)
2099f58944e2SKeith Busch 		goto out;
2100ce4541f4SChristoph Hellwig 
2101e286bcfcSScott Bauer 	if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
2102e286bcfcSScott Bauer 		if (!dev->ctrl.opal_dev)
21034f1244c8SChristoph Hellwig 			dev->ctrl.opal_dev =
21044f1244c8SChristoph Hellwig 				init_opal_dev(&dev->ctrl, &nvme_sec_submit);
2105e286bcfcSScott Bauer 		else if (was_suspend)
21064f1244c8SChristoph Hellwig 			opal_unlock_from_suspend(dev->ctrl.opal_dev);
2107e286bcfcSScott Bauer 	} else {
2108e286bcfcSScott Bauer 		free_opal_dev(dev->ctrl.opal_dev);
2109e286bcfcSScott Bauer 		dev->ctrl.opal_dev = NULL;
2110e286bcfcSScott Bauer 	}
2111a98e58e5SScott Bauer 
2112f9f38e33SHelen Koike 	if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
2113f9f38e33SHelen Koike 		result = nvme_dbbuf_dma_alloc(dev);
2114f9f38e33SHelen Koike 		if (result)
2115f9f38e33SHelen Koike 			dev_warn(dev->dev,
2116f9f38e33SHelen Koike 				 "unable to allocate dma for dbbuf\n");
2117f9f38e33SHelen Koike 	}
2118f9f38e33SHelen Koike 
211987ad72a5SChristoph Hellwig 	if (dev->ctrl.hmpre)
212087ad72a5SChristoph Hellwig 		nvme_setup_host_mem(dev);
212187ad72a5SChristoph Hellwig 
212257dacad5SJay Sternberg 	result = nvme_setup_io_queues(dev);
212357dacad5SJay Sternberg 	if (result)
2124f58944e2SKeith Busch 		goto out;
212557dacad5SJay Sternberg 
212621f033f7SKeith Busch 	/*
212721f033f7SKeith Busch 	 * A controller that can not execute IO typically requires user
212821f033f7SKeith Busch 	 * intervention to correct. For such degraded controllers, the driver
212921f033f7SKeith Busch 	 * should not submit commands the user did not request, so skip
213021f033f7SKeith Busch 	 * registering for asynchronous event notification on this condition.
213121f033f7SKeith Busch 	 */
2132f866fc42SChristoph Hellwig 	if (dev->online_queues > 1)
2133f866fc42SChristoph Hellwig 		nvme_queue_async_events(&dev->ctrl);
213457dacad5SJay Sternberg 
213557dacad5SJay Sternberg 	/*
213657dacad5SJay Sternberg 	 * Keep the controller around but remove all namespaces if we don't have
213757dacad5SJay Sternberg 	 * any working I/O queue.
213857dacad5SJay Sternberg 	 */
213957dacad5SJay Sternberg 	if (dev->online_queues < 2) {
21401b3c47c1SSagi Grimberg 		dev_warn(dev->ctrl.device, "IO queues not created\n");
21413b24774eSKeith Busch 		nvme_kill_queues(&dev->ctrl);
21425bae7f73SChristoph Hellwig 		nvme_remove_namespaces(&dev->ctrl);
214357dacad5SJay Sternberg 	} else {
214425646264SKeith Busch 		nvme_start_queues(&dev->ctrl);
2145302ad8ccSKeith Busch 		nvme_wait_freeze(&dev->ctrl);
214657dacad5SJay Sternberg 		nvme_dev_add(dev);
2147302ad8ccSKeith Busch 		nvme_unfreeze(&dev->ctrl);
214857dacad5SJay Sternberg 	}
214957dacad5SJay Sternberg 
2150bb8d261eSChristoph Hellwig 	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
2151bb8d261eSChristoph Hellwig 		dev_warn(dev->ctrl.device, "failed to mark controller live\n");
2152bb8d261eSChristoph Hellwig 		goto out;
2153bb8d261eSChristoph Hellwig 	}
215492911a55SChristoph Hellwig 
215592911a55SChristoph Hellwig 	if (dev->online_queues > 1)
21565955be21SChristoph Hellwig 		nvme_queue_scan(&dev->ctrl);
215757dacad5SJay Sternberg 	return;
215857dacad5SJay Sternberg 
215957dacad5SJay Sternberg  out:
2160f58944e2SKeith Busch 	nvme_remove_dead_ctrl(dev, result);
216157dacad5SJay Sternberg }
216257dacad5SJay Sternberg 
21635c8809e6SChristoph Hellwig static void nvme_remove_dead_ctrl_work(struct work_struct *work)
216457dacad5SJay Sternberg {
21655c8809e6SChristoph Hellwig 	struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
216657dacad5SJay Sternberg 	struct pci_dev *pdev = to_pci_dev(dev->dev);
216757dacad5SJay Sternberg 
216869d9a99cSKeith Busch 	nvme_kill_queues(&dev->ctrl);
216957dacad5SJay Sternberg 	if (pci_get_drvdata(pdev))
2170921920abSKeith Busch 		device_release_driver(&pdev->dev);
21711673f1f0SChristoph Hellwig 	nvme_put_ctrl(&dev->ctrl);
217257dacad5SJay Sternberg }
217357dacad5SJay Sternberg 
217457dacad5SJay Sternberg static int nvme_reset(struct nvme_dev *dev)
217557dacad5SJay Sternberg {
21761c63dc66SChristoph Hellwig 	if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
217757dacad5SJay Sternberg 		return -ENODEV;
217882b057caSRakesh Pandit 	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
217982b057caSRakesh Pandit 		return -EBUSY;
21809a6327d2SSagi Grimberg 	if (!queue_work(nvme_wq, &dev->reset_work))
2181846cc05fSChristoph Hellwig 		return -EBUSY;
218257dacad5SJay Sternberg 	return 0;
218357dacad5SJay Sternberg }
218457dacad5SJay Sternberg 
21851c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
218657dacad5SJay Sternberg {
21871c63dc66SChristoph Hellwig 	*val = readl(to_nvme_dev(ctrl)->bar + off);
21881c63dc66SChristoph Hellwig 	return 0;
218957dacad5SJay Sternberg }
21901c63dc66SChristoph Hellwig 
21915fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
21925fd4ce1bSChristoph Hellwig {
21935fd4ce1bSChristoph Hellwig 	writel(val, to_nvme_dev(ctrl)->bar + off);
21945fd4ce1bSChristoph Hellwig 	return 0;
21955fd4ce1bSChristoph Hellwig }
21965fd4ce1bSChristoph Hellwig 
21977fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
21987fd8930fSChristoph Hellwig {
21997fd8930fSChristoph Hellwig 	*val = readq(to_nvme_dev(ctrl)->bar + off);
22007fd8930fSChristoph Hellwig 	return 0;
22017fd8930fSChristoph Hellwig }
22027fd8930fSChristoph Hellwig 
2203f3ca80fcSChristoph Hellwig static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
2204f3ca80fcSChristoph Hellwig {
2205c5f6ce97SKeith Busch 	struct nvme_dev *dev = to_nvme_dev(ctrl);
2206c5f6ce97SKeith Busch 	int ret = nvme_reset(dev);
2207c5f6ce97SKeith Busch 
2208c5f6ce97SKeith Busch 	if (!ret)
2209c5f6ce97SKeith Busch 		flush_work(&dev->reset_work);
2210c5f6ce97SKeith Busch 	return ret;
2211f3ca80fcSChristoph Hellwig }
2212f3ca80fcSChristoph Hellwig 
22131c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
22141a353d85SMing Lin 	.name			= "pcie",
2215e439bb12SSagi Grimberg 	.module			= THIS_MODULE,
2216c81bfba9SChristoph Hellwig 	.flags			= NVME_F_METADATA_SUPPORTED,
22171c63dc66SChristoph Hellwig 	.reg_read32		= nvme_pci_reg_read32,
22185fd4ce1bSChristoph Hellwig 	.reg_write32		= nvme_pci_reg_write32,
22197fd8930fSChristoph Hellwig 	.reg_read64		= nvme_pci_reg_read64,
2220f3ca80fcSChristoph Hellwig 	.reset_ctrl		= nvme_pci_reset_ctrl,
22211673f1f0SChristoph Hellwig 	.free_ctrl		= nvme_pci_free_ctrl,
2222f866fc42SChristoph Hellwig 	.submit_async_event	= nvme_pci_submit_async_event,
22231c63dc66SChristoph Hellwig };
222457dacad5SJay Sternberg 
2225b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev)
2226b00a726aSKeith Busch {
2227b00a726aSKeith Busch 	struct pci_dev *pdev = to_pci_dev(dev->dev);
2228b00a726aSKeith Busch 
2229a1f447b3SJohannes Thumshirn 	if (pci_request_mem_regions(pdev, "nvme"))
2230b00a726aSKeith Busch 		return -ENODEV;
2231b00a726aSKeith Busch 
223297f6ef64SXu Yu 	if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
2233b00a726aSKeith Busch 		goto release;
2234b00a726aSKeith Busch 
2235b00a726aSKeith Busch 	return 0;
2236b00a726aSKeith Busch   release:
2237a1f447b3SJohannes Thumshirn 	pci_release_mem_regions(pdev);
2238b00a726aSKeith Busch 	return -ENODEV;
2239b00a726aSKeith Busch }
2240b00a726aSKeith Busch 
2241ff5350a8SAndy Lutomirski static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
2242ff5350a8SAndy Lutomirski {
2243ff5350a8SAndy Lutomirski 	if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
2244ff5350a8SAndy Lutomirski 		/*
2245ff5350a8SAndy Lutomirski 		 * Several Samsung devices seem to drop off the PCIe bus
2246ff5350a8SAndy Lutomirski 		 * randomly when APST is on and uses the deepest sleep state.
2247ff5350a8SAndy Lutomirski 		 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
2248ff5350a8SAndy Lutomirski 		 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
2249ff5350a8SAndy Lutomirski 		 * 950 PRO 256GB", but it seems to be restricted to two Dell
2250ff5350a8SAndy Lutomirski 		 * laptops.
2251ff5350a8SAndy Lutomirski 		 */
2252ff5350a8SAndy Lutomirski 		if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
2253ff5350a8SAndy Lutomirski 		    (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
2254ff5350a8SAndy Lutomirski 		     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
2255ff5350a8SAndy Lutomirski 			return NVME_QUIRK_NO_DEEPEST_PS;
2256ff5350a8SAndy Lutomirski 	}
2257ff5350a8SAndy Lutomirski 
2258ff5350a8SAndy Lutomirski 	return 0;
2259ff5350a8SAndy Lutomirski }
2260ff5350a8SAndy Lutomirski 
226157dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
226257dacad5SJay Sternberg {
226357dacad5SJay Sternberg 	int node, result = -ENOMEM;
226457dacad5SJay Sternberg 	struct nvme_dev *dev;
2265ff5350a8SAndy Lutomirski 	unsigned long quirks = id->driver_data;
226657dacad5SJay Sternberg 
226757dacad5SJay Sternberg 	node = dev_to_node(&pdev->dev);
226857dacad5SJay Sternberg 	if (node == NUMA_NO_NODE)
22692fa84351SMasayoshi Mizuma 		set_dev_node(&pdev->dev, first_memory_node);
227057dacad5SJay Sternberg 
227157dacad5SJay Sternberg 	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
227257dacad5SJay Sternberg 	if (!dev)
227357dacad5SJay Sternberg 		return -ENOMEM;
227457dacad5SJay Sternberg 	dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
227557dacad5SJay Sternberg 							GFP_KERNEL, node);
227657dacad5SJay Sternberg 	if (!dev->queues)
227757dacad5SJay Sternberg 		goto free;
227857dacad5SJay Sternberg 
227957dacad5SJay Sternberg 	dev->dev = get_device(&pdev->dev);
228057dacad5SJay Sternberg 	pci_set_drvdata(pdev, dev);
228157dacad5SJay Sternberg 
2282b00a726aSKeith Busch 	result = nvme_dev_map(dev);
2283b00a726aSKeith Busch 	if (result)
2284b00a726aSKeith Busch 		goto free;
2285b00a726aSKeith Busch 
2286f3ca80fcSChristoph Hellwig 	INIT_WORK(&dev->reset_work, nvme_reset_work);
22875c8809e6SChristoph Hellwig 	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
228877bf25eaSKeith Busch 	mutex_init(&dev->shutdown_lock);
2289db3cbfffSKeith Busch 	init_completion(&dev->ioq_wait);
2290f3ca80fcSChristoph Hellwig 
2291f3ca80fcSChristoph Hellwig 	result = nvme_setup_prp_pools(dev);
2292f3ca80fcSChristoph Hellwig 	if (result)
2293f3ca80fcSChristoph Hellwig 		goto put_pci;
2294f3ca80fcSChristoph Hellwig 
2295ff5350a8SAndy Lutomirski 	quirks |= check_dell_samsung_bug(pdev);
2296ff5350a8SAndy Lutomirski 
2297f3ca80fcSChristoph Hellwig 	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2298ff5350a8SAndy Lutomirski 			quirks);
2299f3ca80fcSChristoph Hellwig 	if (result)
2300f3ca80fcSChristoph Hellwig 		goto release_pools;
2301f3ca80fcSChristoph Hellwig 
230282b057caSRakesh Pandit 	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
23031b3c47c1SSagi Grimberg 	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
23041b3c47c1SSagi Grimberg 
23059a6327d2SSagi Grimberg 	queue_work(nvme_wq, &dev->reset_work);
230657dacad5SJay Sternberg 	return 0;
230757dacad5SJay Sternberg 
230857dacad5SJay Sternberg  release_pools:
230957dacad5SJay Sternberg 	nvme_release_prp_pools(dev);
231057dacad5SJay Sternberg  put_pci:
231157dacad5SJay Sternberg 	put_device(dev->dev);
2312b00a726aSKeith Busch 	nvme_dev_unmap(dev);
231357dacad5SJay Sternberg  free:
231457dacad5SJay Sternberg 	kfree(dev->queues);
231557dacad5SJay Sternberg 	kfree(dev);
231657dacad5SJay Sternberg 	return result;
231757dacad5SJay Sternberg }
231857dacad5SJay Sternberg 
231957dacad5SJay Sternberg static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
232057dacad5SJay Sternberg {
232157dacad5SJay Sternberg 	struct nvme_dev *dev = pci_get_drvdata(pdev);
232257dacad5SJay Sternberg 
232357dacad5SJay Sternberg 	if (prepare)
2324a5cdb68cSKeith Busch 		nvme_dev_disable(dev, false);
232557dacad5SJay Sternberg 	else
2326c5f6ce97SKeith Busch 		nvme_reset(dev);
232757dacad5SJay Sternberg }
232857dacad5SJay Sternberg 
232957dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev)
233057dacad5SJay Sternberg {
233157dacad5SJay Sternberg 	struct nvme_dev *dev = pci_get_drvdata(pdev);
2332a5cdb68cSKeith Busch 	nvme_dev_disable(dev, true);
233357dacad5SJay Sternberg }
233457dacad5SJay Sternberg 
2335f58944e2SKeith Busch /*
2336f58944e2SKeith Busch  * The driver's remove may be called on a device in a partially initialized
2337f58944e2SKeith Busch  * state. This function must not have any dependencies on the device state in
2338f58944e2SKeith Busch  * order to proceed.
2339f58944e2SKeith Busch  */
234057dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev)
234157dacad5SJay Sternberg {
234257dacad5SJay Sternberg 	struct nvme_dev *dev = pci_get_drvdata(pdev);
234357dacad5SJay Sternberg 
2344bb8d261eSChristoph Hellwig 	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2345bb8d261eSChristoph Hellwig 
234682b057caSRakesh Pandit 	cancel_work_sync(&dev->reset_work);
234757dacad5SJay Sternberg 	pci_set_drvdata(pdev, NULL);
23480ff9d4e1SKeith Busch 
23496db28edaSKeith Busch 	if (!pci_device_is_present(pdev)) {
23500ff9d4e1SKeith Busch 		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
23516db28edaSKeith Busch 		nvme_dev_disable(dev, false);
23526db28edaSKeith Busch 	}
23530ff9d4e1SKeith Busch 
23549bf2b972SKeith Busch 	flush_work(&dev->reset_work);
235553029b04SKeith Busch 	nvme_uninit_ctrl(&dev->ctrl);
2356a5cdb68cSKeith Busch 	nvme_dev_disable(dev, true);
235787ad72a5SChristoph Hellwig 	nvme_free_host_mem(dev);
235857dacad5SJay Sternberg 	nvme_dev_remove_admin(dev);
235957dacad5SJay Sternberg 	nvme_free_queues(dev, 0);
236057dacad5SJay Sternberg 	nvme_release_prp_pools(dev);
2361b00a726aSKeith Busch 	nvme_dev_unmap(dev);
23621673f1f0SChristoph Hellwig 	nvme_put_ctrl(&dev->ctrl);
236357dacad5SJay Sternberg }
236457dacad5SJay Sternberg 
236513880f5bSKeith Busch static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
236613880f5bSKeith Busch {
236713880f5bSKeith Busch 	int ret = 0;
236813880f5bSKeith Busch 
236913880f5bSKeith Busch 	if (numvfs == 0) {
237013880f5bSKeith Busch 		if (pci_vfs_assigned(pdev)) {
237113880f5bSKeith Busch 			dev_warn(&pdev->dev,
237213880f5bSKeith Busch 				"Cannot disable SR-IOV VFs while assigned\n");
237313880f5bSKeith Busch 			return -EPERM;
237413880f5bSKeith Busch 		}
237513880f5bSKeith Busch 		pci_disable_sriov(pdev);
237613880f5bSKeith Busch 		return 0;
237713880f5bSKeith Busch 	}
237813880f5bSKeith Busch 
237913880f5bSKeith Busch 	ret = pci_enable_sriov(pdev, numvfs);
238013880f5bSKeith Busch 	return ret ? ret : numvfs;
238113880f5bSKeith Busch }
238213880f5bSKeith Busch 
238357dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP
238457dacad5SJay Sternberg static int nvme_suspend(struct device *dev)
238557dacad5SJay Sternberg {
238657dacad5SJay Sternberg 	struct pci_dev *pdev = to_pci_dev(dev);
238757dacad5SJay Sternberg 	struct nvme_dev *ndev = pci_get_drvdata(pdev);
238857dacad5SJay Sternberg 
2389a5cdb68cSKeith Busch 	nvme_dev_disable(ndev, true);
239057dacad5SJay Sternberg 	return 0;
239157dacad5SJay Sternberg }
239257dacad5SJay Sternberg 
239357dacad5SJay Sternberg static int nvme_resume(struct device *dev)
239457dacad5SJay Sternberg {
239557dacad5SJay Sternberg 	struct pci_dev *pdev = to_pci_dev(dev);
239657dacad5SJay Sternberg 	struct nvme_dev *ndev = pci_get_drvdata(pdev);
239757dacad5SJay Sternberg 
2398c5f6ce97SKeith Busch 	nvme_reset(ndev);
239957dacad5SJay Sternberg 	return 0;
240057dacad5SJay Sternberg }
240157dacad5SJay Sternberg #endif
240257dacad5SJay Sternberg 
240357dacad5SJay Sternberg static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
240457dacad5SJay Sternberg 
2405a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
2406a0a3408eSKeith Busch 						pci_channel_state_t state)
2407a0a3408eSKeith Busch {
2408a0a3408eSKeith Busch 	struct nvme_dev *dev = pci_get_drvdata(pdev);
2409a0a3408eSKeith Busch 
2410a0a3408eSKeith Busch 	/*
2411a0a3408eSKeith Busch 	 * A frozen channel requires a reset. When detected, this method will
2412a0a3408eSKeith Busch 	 * shutdown the controller to quiesce. The controller will be restarted
2413a0a3408eSKeith Busch 	 * after the slot reset through driver's slot_reset callback.
2414a0a3408eSKeith Busch 	 */
2415a0a3408eSKeith Busch 	switch (state) {
2416a0a3408eSKeith Busch 	case pci_channel_io_normal:
2417a0a3408eSKeith Busch 		return PCI_ERS_RESULT_CAN_RECOVER;
2418a0a3408eSKeith Busch 	case pci_channel_io_frozen:
2419d011fb31SKeith Busch 		dev_warn(dev->ctrl.device,
2420d011fb31SKeith Busch 			"frozen state error detected, reset controller\n");
2421a5cdb68cSKeith Busch 		nvme_dev_disable(dev, false);
2422a0a3408eSKeith Busch 		return PCI_ERS_RESULT_NEED_RESET;
2423a0a3408eSKeith Busch 	case pci_channel_io_perm_failure:
2424d011fb31SKeith Busch 		dev_warn(dev->ctrl.device,
2425d011fb31SKeith Busch 			"failure state error detected, request disconnect\n");
2426a0a3408eSKeith Busch 		return PCI_ERS_RESULT_DISCONNECT;
2427a0a3408eSKeith Busch 	}
2428a0a3408eSKeith Busch 	return PCI_ERS_RESULT_NEED_RESET;
2429a0a3408eSKeith Busch }
2430a0a3408eSKeith Busch 
2431a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
2432a0a3408eSKeith Busch {
2433a0a3408eSKeith Busch 	struct nvme_dev *dev = pci_get_drvdata(pdev);
2434a0a3408eSKeith Busch 
24351b3c47c1SSagi Grimberg 	dev_info(dev->ctrl.device, "restart after slot reset\n");
2436a0a3408eSKeith Busch 	pci_restore_state(pdev);
2437c5f6ce97SKeith Busch 	nvme_reset(dev);
2438a0a3408eSKeith Busch 	return PCI_ERS_RESULT_RECOVERED;
2439a0a3408eSKeith Busch }
2440a0a3408eSKeith Busch 
2441a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev)
2442a0a3408eSKeith Busch {
2443a0a3408eSKeith Busch 	pci_cleanup_aer_uncorrect_error_status(pdev);
2444a0a3408eSKeith Busch }
2445a0a3408eSKeith Busch 
244657dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = {
244757dacad5SJay Sternberg 	.error_detected	= nvme_error_detected,
244857dacad5SJay Sternberg 	.slot_reset	= nvme_slot_reset,
244957dacad5SJay Sternberg 	.resume		= nvme_error_resume,
245057dacad5SJay Sternberg 	.reset_notify	= nvme_reset_notify,
245157dacad5SJay Sternberg };
245257dacad5SJay Sternberg 
245357dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = {
2454106198edSChristoph Hellwig 	{ PCI_VDEVICE(INTEL, 0x0953),
245508095e70SKeith Busch 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2456e850fd16SChristoph Hellwig 				NVME_QUIRK_DEALLOCATE_ZEROES, },
245799466e70SKeith Busch 	{ PCI_VDEVICE(INTEL, 0x0a53),
245899466e70SKeith Busch 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2459e850fd16SChristoph Hellwig 				NVME_QUIRK_DEALLOCATE_ZEROES, },
246099466e70SKeith Busch 	{ PCI_VDEVICE(INTEL, 0x0a54),
246199466e70SKeith Busch 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
2462e850fd16SChristoph Hellwig 				NVME_QUIRK_DEALLOCATE_ZEROES, },
246350af47d0SAndy Lutomirski 	{ PCI_VDEVICE(INTEL, 0xf1a5),	/* Intel 600P/P3100 */
246450af47d0SAndy Lutomirski 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS },
2465540c801cSKeith Busch 	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
2466540c801cSKeith Busch 		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
246754adc010SGuilherme G. Piccoli 	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
246854adc010SGuilherme G. Piccoli 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2469015282c9SWenbo Wang 	{ PCI_DEVICE(0x1c5f, 0x0540),	/* Memblaze Pblaze4 adapter */
2470015282c9SWenbo Wang 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
247157dacad5SJay Sternberg 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2472c74dc780SStephan Günther 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
2473124298bdSDaniel Roschka 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
247457dacad5SJay Sternberg 	{ 0, }
247557dacad5SJay Sternberg };
247657dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table);
247757dacad5SJay Sternberg 
247857dacad5SJay Sternberg static struct pci_driver nvme_driver = {
247957dacad5SJay Sternberg 	.name		= "nvme",
248057dacad5SJay Sternberg 	.id_table	= nvme_id_table,
248157dacad5SJay Sternberg 	.probe		= nvme_probe,
248257dacad5SJay Sternberg 	.remove		= nvme_remove,
248357dacad5SJay Sternberg 	.shutdown	= nvme_shutdown,
248457dacad5SJay Sternberg 	.driver		= {
248557dacad5SJay Sternberg 		.pm	= &nvme_dev_pm_ops,
248657dacad5SJay Sternberg 	},
248713880f5bSKeith Busch 	.sriov_configure = nvme_pci_sriov_configure,
248857dacad5SJay Sternberg 	.err_handler	= &nvme_err_handler,
248957dacad5SJay Sternberg };
249057dacad5SJay Sternberg 
249157dacad5SJay Sternberg static int __init nvme_init(void)
249257dacad5SJay Sternberg {
24939a6327d2SSagi Grimberg 	return pci_register_driver(&nvme_driver);
249457dacad5SJay Sternberg }
249557dacad5SJay Sternberg 
249657dacad5SJay Sternberg static void __exit nvme_exit(void)
249757dacad5SJay Sternberg {
249857dacad5SJay Sternberg 	pci_unregister_driver(&nvme_driver);
249957dacad5SJay Sternberg 	_nvme_check_size();
250057dacad5SJay Sternberg }
250157dacad5SJay Sternberg 
250257dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
250357dacad5SJay Sternberg MODULE_LICENSE("GPL");
250457dacad5SJay Sternberg MODULE_VERSION("1.0");
250557dacad5SJay Sternberg module_init(nvme_init);
250657dacad5SJay Sternberg module_exit(nvme_exit);
2507