xref: /openbmc/linux/drivers/nvme/host/apple.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Apple ANS NVM Express device driver
4   * Copyright The Asahi Linux Contributors
5   *
6   * Based on the pci.c NVM Express device driver
7   * Copyright (c) 2011-2014, Intel Corporation.
8   * and on the rdma.c NVMe over Fabrics RDMA host code.
9   * Copyright (c) 2015-2016 HGST, a Western Digital Company.
10   */
11  
12  #include <linux/async.h>
13  #include <linux/blkdev.h>
14  #include <linux/blk-mq.h>
15  #include <linux/device.h>
16  #include <linux/dma-mapping.h>
17  #include <linux/dmapool.h>
18  #include <linux/interrupt.h>
19  #include <linux/io-64-nonatomic-lo-hi.h>
20  #include <linux/io.h>
21  #include <linux/iopoll.h>
22  #include <linux/jiffies.h>
23  #include <linux/mempool.h>
24  #include <linux/module.h>
25  #include <linux/of.h>
26  #include <linux/of_platform.h>
27  #include <linux/once.h>
28  #include <linux/platform_device.h>
29  #include <linux/pm_domain.h>
30  #include <linux/soc/apple/rtkit.h>
31  #include <linux/soc/apple/sart.h>
32  #include <linux/reset.h>
33  #include <linux/time64.h>
34  
35  #include "nvme.h"
36  
37  #define APPLE_ANS_BOOT_TIMEOUT	  USEC_PER_SEC
38  #define APPLE_ANS_MAX_QUEUE_DEPTH 64
39  
40  #define APPLE_ANS_COPROC_CPU_CONTROL	 0x44
41  #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
42  
43  #define APPLE_ANS_ACQ_DB  0x1004
44  #define APPLE_ANS_IOCQ_DB 0x100c
45  
46  #define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
47  
48  #define APPLE_ANS_BOOT_STATUS	 0x1300
49  #define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
50  
51  #define APPLE_ANS_UNKNOWN_CTRL	 0x24008
52  #define APPLE_ANS_PRP_NULL_CHECK BIT(11)
53  
54  #define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
55  #define APPLE_ANS_LINEAR_SQ_EN	 BIT(0)
56  
57  #define APPLE_ANS_LINEAR_ASQ_DB	 0x2490c
58  #define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
59  
60  #define APPLE_NVMMU_NUM_TCBS	  0x28100
61  #define APPLE_NVMMU_ASQ_TCB_BASE  0x28108
62  #define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
63  #define APPLE_NVMMU_TCB_INVAL	  0x28118
64  #define APPLE_NVMMU_TCB_STAT	  0x28120
65  
66  /*
67   * This controller is a bit weird in the way command tags works: Both the
68   * admin and the IO queue share the same tag space. Additionally, tags
69   * cannot be higher than 0x40 which effectively limits the combined
70   * queue depth to 0x40. Instead of wasting half of that on the admin queue
71   * which gets much less traffic we instead reduce its size here.
72   * The controller also doesn't support async event such that no space must
73   * be reserved for NVME_NR_AEN_COMMANDS.
74   */
75  #define APPLE_NVME_AQ_DEPTH	   2
76  #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
77  
78  /*
79   * These can be higher, but we need to ensure that any command doesn't
80   * require an sg allocation that needs more than a page of data.
81   */
82  #define NVME_MAX_KB_SZ 4096
83  #define NVME_MAX_SEGS  127
84  
85  /*
86   * This controller comes with an embedded IOMMU known as NVMMU.
87   * The NVMMU is pointed to an array of TCBs indexed by the command tag.
88   * Each command must be configured inside this structure before it's allowed
89   * to execute, including commands that don't require DMA transfers.
90   *
91   * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
92   * admin queue): Those commands must still be added to the NVMMU but the DMA
93   * buffers cannot be represented as PRPs and must instead be allowed using SART.
94   *
95   * Programming the PRPs to the same values as those in the submission queue
96   * looks rather silly at first. This hardware is however designed for a kernel
97   * that runs the NVMMU code in a higher exception level than the NVMe driver.
98   * In that setting the NVMe driver first programs the submission queue entry
99   * and then executes a hypercall to the code that is allowed to program the
100   * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
101   * verifying that they don't point to kernel text, data, pagetables, or similar
102   * protected areas before programming the TCB to point to this shadow copy.
103   * Since Linux doesn't do any of that we may as well just point both the queue
104   * and the TCB PRP pointer to the same memory.
105   */
106  struct apple_nvmmu_tcb {
107  	u8 opcode;
108  
109  #define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
110  #define APPLE_ANS_TCB_DMA_TO_DEVICE   BIT(1)
111  	u8 dma_flags;
112  
113  	u8 command_id;
114  	u8 _unk0;
115  	__le16 length;
116  	u8 _unk1[18];
117  	__le64 prp1;
118  	__le64 prp2;
119  	u8 _unk2[16];
120  	u8 aes_iv[8];
121  	u8 _aes_unk[64];
122  };
123  
124  /*
125   * The Apple NVMe controller only supports a single admin and a single IO queue
126   * which are both limited to 64 entries and share a single interrupt.
127   *
128   * The completion queue works as usual. The submission "queue" instead is
129   * an array indexed by the command tag on this hardware. Commands must also be
130   * present in the NVMMU's tcb array. They are triggered by writing their tag to
131   * a MMIO register.
132   */
133  struct apple_nvme_queue {
134  	struct nvme_command *sqes;
135  	struct nvme_completion *cqes;
136  	struct apple_nvmmu_tcb *tcbs;
137  
138  	dma_addr_t sq_dma_addr;
139  	dma_addr_t cq_dma_addr;
140  	dma_addr_t tcb_dma_addr;
141  
142  	u32 __iomem *sq_db;
143  	u32 __iomem *cq_db;
144  
145  	u16 cq_head;
146  	u8 cq_phase;
147  
148  	bool is_adminq;
149  	bool enabled;
150  };
151  
152  /*
153   * The apple_nvme_iod describes the data in an I/O.
154   *
155   * The sg pointer contains the list of PRP chunk allocations in addition
156   * to the actual struct scatterlist.
157   */
158  struct apple_nvme_iod {
159  	struct nvme_request req;
160  	struct nvme_command cmd;
161  	struct apple_nvme_queue *q;
162  	int npages; /* In the PRP list. 0 means small pool in use */
163  	int nents; /* Used in scatterlist */
164  	dma_addr_t first_dma;
165  	unsigned int dma_len; /* length of single DMA segment mapping */
166  	struct scatterlist *sg;
167  };
168  
169  struct apple_nvme {
170  	struct device *dev;
171  
172  	void __iomem *mmio_coproc;
173  	void __iomem *mmio_nvme;
174  
175  	struct device **pd_dev;
176  	struct device_link **pd_link;
177  	int pd_count;
178  
179  	struct apple_sart *sart;
180  	struct apple_rtkit *rtk;
181  	struct reset_control *reset;
182  
183  	struct dma_pool *prp_page_pool;
184  	struct dma_pool *prp_small_pool;
185  	mempool_t *iod_mempool;
186  
187  	struct nvme_ctrl ctrl;
188  	struct work_struct remove_work;
189  
190  	struct apple_nvme_queue adminq;
191  	struct apple_nvme_queue ioq;
192  
193  	struct blk_mq_tag_set admin_tagset;
194  	struct blk_mq_tag_set tagset;
195  
196  	int irq;
197  	spinlock_t lock;
198  };
199  
200  static_assert(sizeof(struct nvme_command) == 64);
201  static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
202  
ctrl_to_apple_nvme(struct nvme_ctrl * ctrl)203  static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
204  {
205  	return container_of(ctrl, struct apple_nvme, ctrl);
206  }
207  
queue_to_apple_nvme(struct apple_nvme_queue * q)208  static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
209  {
210  	if (q->is_adminq)
211  		return container_of(q, struct apple_nvme, adminq);
212  
213  	return container_of(q, struct apple_nvme, ioq);
214  }
215  
apple_nvme_queue_depth(struct apple_nvme_queue * q)216  static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
217  {
218  	if (q->is_adminq)
219  		return APPLE_NVME_AQ_DEPTH;
220  
221  	return APPLE_ANS_MAX_QUEUE_DEPTH;
222  }
223  
apple_nvme_rtkit_crashed(void * cookie)224  static void apple_nvme_rtkit_crashed(void *cookie)
225  {
226  	struct apple_nvme *anv = cookie;
227  
228  	dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
229  	nvme_reset_ctrl(&anv->ctrl);
230  }
231  
apple_nvme_sart_dma_setup(void * cookie,struct apple_rtkit_shmem * bfr)232  static int apple_nvme_sart_dma_setup(void *cookie,
233  				     struct apple_rtkit_shmem *bfr)
234  {
235  	struct apple_nvme *anv = cookie;
236  	int ret;
237  
238  	if (bfr->iova)
239  		return -EINVAL;
240  	if (!bfr->size)
241  		return -EINVAL;
242  
243  	bfr->buffer =
244  		dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
245  	if (!bfr->buffer)
246  		return -ENOMEM;
247  
248  	ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
249  	if (ret) {
250  		dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
251  		bfr->buffer = NULL;
252  		return -ENOMEM;
253  	}
254  
255  	return 0;
256  }
257  
apple_nvme_sart_dma_destroy(void * cookie,struct apple_rtkit_shmem * bfr)258  static void apple_nvme_sart_dma_destroy(void *cookie,
259  					struct apple_rtkit_shmem *bfr)
260  {
261  	struct apple_nvme *anv = cookie;
262  
263  	apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
264  	dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
265  }
266  
267  static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
268  	.crashed = apple_nvme_rtkit_crashed,
269  	.shmem_setup = apple_nvme_sart_dma_setup,
270  	.shmem_destroy = apple_nvme_sart_dma_destroy,
271  };
272  
apple_nvmmu_inval(struct apple_nvme_queue * q,unsigned int tag)273  static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
274  {
275  	struct apple_nvme *anv = queue_to_apple_nvme(q);
276  
277  	writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
278  	if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
279  		dev_warn_ratelimited(anv->dev,
280  				     "NVMMU TCB invalidation failed\n");
281  }
282  
apple_nvme_submit_cmd(struct apple_nvme_queue * q,struct nvme_command * cmd)283  static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
284  				  struct nvme_command *cmd)
285  {
286  	struct apple_nvme *anv = queue_to_apple_nvme(q);
287  	u32 tag = nvme_tag_from_cid(cmd->common.command_id);
288  	struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
289  
290  	tcb->opcode = cmd->common.opcode;
291  	tcb->prp1 = cmd->common.dptr.prp1;
292  	tcb->prp2 = cmd->common.dptr.prp2;
293  	tcb->length = cmd->rw.length;
294  	tcb->command_id = tag;
295  
296  	if (nvme_is_write(cmd))
297  		tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
298  	else
299  		tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
300  
301  	memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
302  
303  	/*
304  	 * This lock here doesn't make much sense at a first glace but
305  	 * removing it will result in occasional missed completetion
306  	 * interrupts even though the commands still appear on the CQ.
307  	 * It's unclear why this happens but our best guess is that
308  	 * there is a bug in the firmware triggered when a new command
309  	 * is issued while we're inside the irq handler between the
310  	 * NVMMU invalidation (and making the tag available again)
311  	 * and the final CQ update.
312  	 */
313  	spin_lock_irq(&anv->lock);
314  	writel(tag, q->sq_db);
315  	spin_unlock_irq(&anv->lock);
316  }
317  
318  /*
319   * From pci.c:
320   * Will slightly overestimate the number of pages needed.  This is OK
321   * as it only leads to a small amount of wasted memory for the lifetime of
322   * the I/O.
323   */
apple_nvme_iod_alloc_size(void)324  static inline size_t apple_nvme_iod_alloc_size(void)
325  {
326  	const unsigned int nprps = DIV_ROUND_UP(
327  		NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
328  	const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
329  	const size_t alloc_size = sizeof(__le64 *) * npages +
330  				  sizeof(struct scatterlist) * NVME_MAX_SEGS;
331  
332  	return alloc_size;
333  }
334  
apple_nvme_iod_list(struct request * req)335  static void **apple_nvme_iod_list(struct request *req)
336  {
337  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
338  
339  	return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
340  }
341  
apple_nvme_free_prps(struct apple_nvme * anv,struct request * req)342  static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
343  {
344  	const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
345  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
346  	dma_addr_t dma_addr = iod->first_dma;
347  	int i;
348  
349  	for (i = 0; i < iod->npages; i++) {
350  		__le64 *prp_list = apple_nvme_iod_list(req)[i];
351  		dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
352  
353  		dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
354  		dma_addr = next_dma_addr;
355  	}
356  }
357  
apple_nvme_unmap_data(struct apple_nvme * anv,struct request * req)358  static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
359  {
360  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
361  
362  	if (iod->dma_len) {
363  		dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
364  			       rq_dma_dir(req));
365  		return;
366  	}
367  
368  	WARN_ON_ONCE(!iod->nents);
369  
370  	dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
371  	if (iod->npages == 0)
372  		dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
373  			      iod->first_dma);
374  	else
375  		apple_nvme_free_prps(anv, req);
376  	mempool_free(iod->sg, anv->iod_mempool);
377  }
378  
apple_nvme_print_sgl(struct scatterlist * sgl,int nents)379  static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
380  {
381  	int i;
382  	struct scatterlist *sg;
383  
384  	for_each_sg(sgl, sg, nents, i) {
385  		dma_addr_t phys = sg_phys(sg);
386  
387  		pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
388  			i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
389  			sg_dma_len(sg));
390  	}
391  }
392  
apple_nvme_setup_prps(struct apple_nvme * anv,struct request * req,struct nvme_rw_command * cmnd)393  static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
394  					  struct request *req,
395  					  struct nvme_rw_command *cmnd)
396  {
397  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
398  	struct dma_pool *pool;
399  	int length = blk_rq_payload_bytes(req);
400  	struct scatterlist *sg = iod->sg;
401  	int dma_len = sg_dma_len(sg);
402  	u64 dma_addr = sg_dma_address(sg);
403  	int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
404  	__le64 *prp_list;
405  	void **list = apple_nvme_iod_list(req);
406  	dma_addr_t prp_dma;
407  	int nprps, i;
408  
409  	length -= (NVME_CTRL_PAGE_SIZE - offset);
410  	if (length <= 0) {
411  		iod->first_dma = 0;
412  		goto done;
413  	}
414  
415  	dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
416  	if (dma_len) {
417  		dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
418  	} else {
419  		sg = sg_next(sg);
420  		dma_addr = sg_dma_address(sg);
421  		dma_len = sg_dma_len(sg);
422  	}
423  
424  	if (length <= NVME_CTRL_PAGE_SIZE) {
425  		iod->first_dma = dma_addr;
426  		goto done;
427  	}
428  
429  	nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
430  	if (nprps <= (256 / 8)) {
431  		pool = anv->prp_small_pool;
432  		iod->npages = 0;
433  	} else {
434  		pool = anv->prp_page_pool;
435  		iod->npages = 1;
436  	}
437  
438  	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
439  	if (!prp_list) {
440  		iod->first_dma = dma_addr;
441  		iod->npages = -1;
442  		return BLK_STS_RESOURCE;
443  	}
444  	list[0] = prp_list;
445  	iod->first_dma = prp_dma;
446  	i = 0;
447  	for (;;) {
448  		if (i == NVME_CTRL_PAGE_SIZE >> 3) {
449  			__le64 *old_prp_list = prp_list;
450  
451  			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
452  			if (!prp_list)
453  				goto free_prps;
454  			list[iod->npages++] = prp_list;
455  			prp_list[0] = old_prp_list[i - 1];
456  			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
457  			i = 1;
458  		}
459  		prp_list[i++] = cpu_to_le64(dma_addr);
460  		dma_len -= NVME_CTRL_PAGE_SIZE;
461  		dma_addr += NVME_CTRL_PAGE_SIZE;
462  		length -= NVME_CTRL_PAGE_SIZE;
463  		if (length <= 0)
464  			break;
465  		if (dma_len > 0)
466  			continue;
467  		if (unlikely(dma_len < 0))
468  			goto bad_sgl;
469  		sg = sg_next(sg);
470  		dma_addr = sg_dma_address(sg);
471  		dma_len = sg_dma_len(sg);
472  	}
473  done:
474  	cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
475  	cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
476  	return BLK_STS_OK;
477  free_prps:
478  	apple_nvme_free_prps(anv, req);
479  	return BLK_STS_RESOURCE;
480  bad_sgl:
481  	WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
482  	     "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
483  	     iod->nents);
484  	return BLK_STS_IOERR;
485  }
486  
apple_nvme_setup_prp_simple(struct apple_nvme * anv,struct request * req,struct nvme_rw_command * cmnd,struct bio_vec * bv)487  static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
488  						struct request *req,
489  						struct nvme_rw_command *cmnd,
490  						struct bio_vec *bv)
491  {
492  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
493  	unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
494  	unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
495  
496  	iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
497  	if (dma_mapping_error(anv->dev, iod->first_dma))
498  		return BLK_STS_RESOURCE;
499  	iod->dma_len = bv->bv_len;
500  
501  	cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
502  	if (bv->bv_len > first_prp_len)
503  		cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
504  	return BLK_STS_OK;
505  }
506  
apple_nvme_map_data(struct apple_nvme * anv,struct request * req,struct nvme_command * cmnd)507  static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
508  					struct request *req,
509  					struct nvme_command *cmnd)
510  {
511  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
512  	blk_status_t ret = BLK_STS_RESOURCE;
513  	int nr_mapped;
514  
515  	if (blk_rq_nr_phys_segments(req) == 1) {
516  		struct bio_vec bv = req_bvec(req);
517  
518  		if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
519  			return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
520  							   &bv);
521  	}
522  
523  	iod->dma_len = 0;
524  	iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
525  	if (!iod->sg)
526  		return BLK_STS_RESOURCE;
527  	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
528  	iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
529  	if (!iod->nents)
530  		goto out_free_sg;
531  
532  	nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
533  				     rq_dma_dir(req), DMA_ATTR_NO_WARN);
534  	if (!nr_mapped)
535  		goto out_free_sg;
536  
537  	ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
538  	if (ret != BLK_STS_OK)
539  		goto out_unmap_sg;
540  	return BLK_STS_OK;
541  
542  out_unmap_sg:
543  	dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
544  out_free_sg:
545  	mempool_free(iod->sg, anv->iod_mempool);
546  	return ret;
547  }
548  
apple_nvme_unmap_rq(struct request * req)549  static __always_inline void apple_nvme_unmap_rq(struct request *req)
550  {
551  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
552  	struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
553  
554  	if (blk_rq_nr_phys_segments(req))
555  		apple_nvme_unmap_data(anv, req);
556  }
557  
apple_nvme_complete_rq(struct request * req)558  static void apple_nvme_complete_rq(struct request *req)
559  {
560  	apple_nvme_unmap_rq(req);
561  	nvme_complete_rq(req);
562  }
563  
apple_nvme_complete_batch(struct io_comp_batch * iob)564  static void apple_nvme_complete_batch(struct io_comp_batch *iob)
565  {
566  	nvme_complete_batch(iob, apple_nvme_unmap_rq);
567  }
568  
apple_nvme_cqe_pending(struct apple_nvme_queue * q)569  static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
570  {
571  	struct nvme_completion *hcqe = &q->cqes[q->cq_head];
572  
573  	return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
574  }
575  
576  static inline struct blk_mq_tags *
apple_nvme_queue_tagset(struct apple_nvme * anv,struct apple_nvme_queue * q)577  apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
578  {
579  	if (q->is_adminq)
580  		return anv->admin_tagset.tags[0];
581  	else
582  		return anv->tagset.tags[0];
583  }
584  
apple_nvme_handle_cqe(struct apple_nvme_queue * q,struct io_comp_batch * iob,u16 idx)585  static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
586  					 struct io_comp_batch *iob, u16 idx)
587  {
588  	struct apple_nvme *anv = queue_to_apple_nvme(q);
589  	struct nvme_completion *cqe = &q->cqes[idx];
590  	__u16 command_id = READ_ONCE(cqe->command_id);
591  	struct request *req;
592  
593  	apple_nvmmu_inval(q, command_id);
594  
595  	req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
596  	if (unlikely(!req)) {
597  		dev_warn(anv->dev, "invalid id %d completed", command_id);
598  		return;
599  	}
600  
601  	if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
602  	    !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
603  				 apple_nvme_complete_batch))
604  		apple_nvme_complete_rq(req);
605  }
606  
apple_nvme_update_cq_head(struct apple_nvme_queue * q)607  static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
608  {
609  	u32 tmp = q->cq_head + 1;
610  
611  	if (tmp == apple_nvme_queue_depth(q)) {
612  		q->cq_head = 0;
613  		q->cq_phase ^= 1;
614  	} else {
615  		q->cq_head = tmp;
616  	}
617  }
618  
apple_nvme_poll_cq(struct apple_nvme_queue * q,struct io_comp_batch * iob)619  static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
620  			       struct io_comp_batch *iob)
621  {
622  	bool found = false;
623  
624  	while (apple_nvme_cqe_pending(q)) {
625  		found = true;
626  
627  		/*
628  		 * load-load control dependency between phase and the rest of
629  		 * the cqe requires a full read memory barrier
630  		 */
631  		dma_rmb();
632  		apple_nvme_handle_cqe(q, iob, q->cq_head);
633  		apple_nvme_update_cq_head(q);
634  	}
635  
636  	if (found)
637  		writel(q->cq_head, q->cq_db);
638  
639  	return found;
640  }
641  
apple_nvme_handle_cq(struct apple_nvme_queue * q,bool force)642  static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
643  {
644  	bool found;
645  	DEFINE_IO_COMP_BATCH(iob);
646  
647  	if (!READ_ONCE(q->enabled) && !force)
648  		return false;
649  
650  	found = apple_nvme_poll_cq(q, &iob);
651  
652  	if (!rq_list_empty(iob.req_list))
653  		apple_nvme_complete_batch(&iob);
654  
655  	return found;
656  }
657  
apple_nvme_irq(int irq,void * data)658  static irqreturn_t apple_nvme_irq(int irq, void *data)
659  {
660  	struct apple_nvme *anv = data;
661  	bool handled = false;
662  	unsigned long flags;
663  
664  	spin_lock_irqsave(&anv->lock, flags);
665  	if (apple_nvme_handle_cq(&anv->ioq, false))
666  		handled = true;
667  	if (apple_nvme_handle_cq(&anv->adminq, false))
668  		handled = true;
669  	spin_unlock_irqrestore(&anv->lock, flags);
670  
671  	if (handled)
672  		return IRQ_HANDLED;
673  	return IRQ_NONE;
674  }
675  
apple_nvme_create_cq(struct apple_nvme * anv)676  static int apple_nvme_create_cq(struct apple_nvme *anv)
677  {
678  	struct nvme_command c = {};
679  
680  	/*
681  	 * Note: we (ab)use the fact that the prp fields survive if no data
682  	 * is attached to the request.
683  	 */
684  	c.create_cq.opcode = nvme_admin_create_cq;
685  	c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
686  	c.create_cq.cqid = cpu_to_le16(1);
687  	c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
688  	c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
689  	c.create_cq.irq_vector = cpu_to_le16(0);
690  
691  	return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
692  }
693  
apple_nvme_remove_cq(struct apple_nvme * anv)694  static int apple_nvme_remove_cq(struct apple_nvme *anv)
695  {
696  	struct nvme_command c = {};
697  
698  	c.delete_queue.opcode = nvme_admin_delete_cq;
699  	c.delete_queue.qid = cpu_to_le16(1);
700  
701  	return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
702  }
703  
apple_nvme_create_sq(struct apple_nvme * anv)704  static int apple_nvme_create_sq(struct apple_nvme *anv)
705  {
706  	struct nvme_command c = {};
707  
708  	/*
709  	 * Note: we (ab)use the fact that the prp fields survive if no data
710  	 * is attached to the request.
711  	 */
712  	c.create_sq.opcode = nvme_admin_create_sq;
713  	c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
714  	c.create_sq.sqid = cpu_to_le16(1);
715  	c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
716  	c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
717  	c.create_sq.cqid = cpu_to_le16(1);
718  
719  	return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
720  }
721  
apple_nvme_remove_sq(struct apple_nvme * anv)722  static int apple_nvme_remove_sq(struct apple_nvme *anv)
723  {
724  	struct nvme_command c = {};
725  
726  	c.delete_queue.opcode = nvme_admin_delete_sq;
727  	c.delete_queue.qid = cpu_to_le16(1);
728  
729  	return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
730  }
731  
apple_nvme_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)732  static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
733  					const struct blk_mq_queue_data *bd)
734  {
735  	struct nvme_ns *ns = hctx->queue->queuedata;
736  	struct apple_nvme_queue *q = hctx->driver_data;
737  	struct apple_nvme *anv = queue_to_apple_nvme(q);
738  	struct request *req = bd->rq;
739  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
740  	struct nvme_command *cmnd = &iod->cmd;
741  	blk_status_t ret;
742  
743  	iod->npages = -1;
744  	iod->nents = 0;
745  
746  	/*
747  	 * We should not need to do this, but we're still using this to
748  	 * ensure we can drain requests on a dying queue.
749  	 */
750  	if (unlikely(!READ_ONCE(q->enabled)))
751  		return BLK_STS_IOERR;
752  
753  	if (!nvme_check_ready(&anv->ctrl, req, true))
754  		return nvme_fail_nonready_command(&anv->ctrl, req);
755  
756  	ret = nvme_setup_cmd(ns, req);
757  	if (ret)
758  		return ret;
759  
760  	if (blk_rq_nr_phys_segments(req)) {
761  		ret = apple_nvme_map_data(anv, req, cmnd);
762  		if (ret)
763  			goto out_free_cmd;
764  	}
765  
766  	nvme_start_request(req);
767  	apple_nvme_submit_cmd(q, cmnd);
768  	return BLK_STS_OK;
769  
770  out_free_cmd:
771  	nvme_cleanup_cmd(req);
772  	return ret;
773  }
774  
apple_nvme_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)775  static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
776  				unsigned int hctx_idx)
777  {
778  	hctx->driver_data = data;
779  	return 0;
780  }
781  
apple_nvme_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)782  static int apple_nvme_init_request(struct blk_mq_tag_set *set,
783  				   struct request *req, unsigned int hctx_idx,
784  				   unsigned int numa_node)
785  {
786  	struct apple_nvme_queue *q = set->driver_data;
787  	struct apple_nvme *anv = queue_to_apple_nvme(q);
788  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
789  	struct nvme_request *nreq = nvme_req(req);
790  
791  	iod->q = q;
792  	nreq->ctrl = &anv->ctrl;
793  	nreq->cmd = &iod->cmd;
794  
795  	return 0;
796  }
797  
apple_nvme_disable(struct apple_nvme * anv,bool shutdown)798  static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
799  {
800  	u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
801  	bool dead = false, freeze = false;
802  	unsigned long flags;
803  
804  	if (apple_rtkit_is_crashed(anv->rtk))
805  		dead = true;
806  	if (!(csts & NVME_CSTS_RDY))
807  		dead = true;
808  	if (csts & NVME_CSTS_CFS)
809  		dead = true;
810  
811  	if (anv->ctrl.state == NVME_CTRL_LIVE ||
812  	    anv->ctrl.state == NVME_CTRL_RESETTING) {
813  		freeze = true;
814  		nvme_start_freeze(&anv->ctrl);
815  	}
816  
817  	/*
818  	 * Give the controller a chance to complete all entered requests if
819  	 * doing a safe shutdown.
820  	 */
821  	if (!dead && shutdown && freeze)
822  		nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
823  
824  	nvme_quiesce_io_queues(&anv->ctrl);
825  
826  	if (!dead) {
827  		if (READ_ONCE(anv->ioq.enabled)) {
828  			apple_nvme_remove_sq(anv);
829  			apple_nvme_remove_cq(anv);
830  		}
831  
832  		/*
833  		 * Always disable the NVMe controller after shutdown.
834  		 * We need to do this to bring it back up later anyway, and we
835  		 * can't do it while the firmware is not running (e.g. in the
836  		 * resume reset path before RTKit is initialized), so for Apple
837  		 * controllers it makes sense to unconditionally do it here.
838  		 * Additionally, this sequence of events is reliable, while
839  		 * others (like disabling after bringing back the firmware on
840  		 * resume) seem to run into trouble under some circumstances.
841  		 *
842  		 * Both U-Boot and m1n1 also use this convention (i.e. an ANS
843  		 * NVMe controller is handed off with firmware shut down, in an
844  		 * NVMe disabled state, after a clean shutdown).
845  		 */
846  		if (shutdown)
847  			nvme_disable_ctrl(&anv->ctrl, shutdown);
848  		nvme_disable_ctrl(&anv->ctrl, false);
849  	}
850  
851  	WRITE_ONCE(anv->ioq.enabled, false);
852  	WRITE_ONCE(anv->adminq.enabled, false);
853  	mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
854  	nvme_quiesce_admin_queue(&anv->ctrl);
855  
856  	/* last chance to complete any requests before nvme_cancel_request */
857  	spin_lock_irqsave(&anv->lock, flags);
858  	apple_nvme_handle_cq(&anv->ioq, true);
859  	apple_nvme_handle_cq(&anv->adminq, true);
860  	spin_unlock_irqrestore(&anv->lock, flags);
861  
862  	nvme_cancel_tagset(&anv->ctrl);
863  	nvme_cancel_admin_tagset(&anv->ctrl);
864  
865  	/*
866  	 * The driver will not be starting up queues again if shutting down so
867  	 * must flush all entered requests to their failed completion to avoid
868  	 * deadlocking blk-mq hot-cpu notifier.
869  	 */
870  	if (shutdown) {
871  		nvme_unquiesce_io_queues(&anv->ctrl);
872  		nvme_unquiesce_admin_queue(&anv->ctrl);
873  	}
874  }
875  
apple_nvme_timeout(struct request * req)876  static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
877  {
878  	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
879  	struct apple_nvme_queue *q = iod->q;
880  	struct apple_nvme *anv = queue_to_apple_nvme(q);
881  	unsigned long flags;
882  	u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
883  
884  	if (anv->ctrl.state != NVME_CTRL_LIVE) {
885  		/*
886  		 * From rdma.c:
887  		 * If we are resetting, connecting or deleting we should
888  		 * complete immediately because we may block controller
889  		 * teardown or setup sequence
890  		 * - ctrl disable/shutdown fabrics requests
891  		 * - connect requests
892  		 * - initialization admin requests
893  		 * - I/O requests that entered after unquiescing and
894  		 *   the controller stopped responding
895  		 *
896  		 * All other requests should be cancelled by the error
897  		 * recovery work, so it's fine that we fail it here.
898  		 */
899  		dev_warn(anv->dev,
900  			 "I/O %d(aq:%d) timeout while not in live state\n",
901  			 req->tag, q->is_adminq);
902  		if (blk_mq_request_started(req) &&
903  		    !blk_mq_request_completed(req)) {
904  			nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
905  			nvme_req(req)->flags |= NVME_REQ_CANCELLED;
906  			blk_mq_complete_request(req);
907  		}
908  		return BLK_EH_DONE;
909  	}
910  
911  	/* check if we just missed an interrupt if we're still alive */
912  	if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
913  		spin_lock_irqsave(&anv->lock, flags);
914  		apple_nvme_handle_cq(q, false);
915  		spin_unlock_irqrestore(&anv->lock, flags);
916  		if (blk_mq_request_completed(req)) {
917  			dev_warn(anv->dev,
918  				 "I/O %d(aq:%d) timeout: completion polled\n",
919  				 req->tag, q->is_adminq);
920  			return BLK_EH_DONE;
921  		}
922  	}
923  
924  	/*
925  	 * aborting commands isn't supported which leaves a full reset as our
926  	 * only option here
927  	 */
928  	dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
929  		 req->tag, q->is_adminq);
930  	nvme_req(req)->flags |= NVME_REQ_CANCELLED;
931  	apple_nvme_disable(anv, false);
932  	nvme_reset_ctrl(&anv->ctrl);
933  	return BLK_EH_DONE;
934  }
935  
apple_nvme_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)936  static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
937  			   struct io_comp_batch *iob)
938  {
939  	struct apple_nvme_queue *q = hctx->driver_data;
940  	struct apple_nvme *anv = queue_to_apple_nvme(q);
941  	bool found;
942  	unsigned long flags;
943  
944  	spin_lock_irqsave(&anv->lock, flags);
945  	found = apple_nvme_poll_cq(q, iob);
946  	spin_unlock_irqrestore(&anv->lock, flags);
947  
948  	return found;
949  }
950  
951  static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
952  	.queue_rq = apple_nvme_queue_rq,
953  	.complete = apple_nvme_complete_rq,
954  	.init_hctx = apple_nvme_init_hctx,
955  	.init_request = apple_nvme_init_request,
956  	.timeout = apple_nvme_timeout,
957  };
958  
959  static const struct blk_mq_ops apple_nvme_mq_ops = {
960  	.queue_rq = apple_nvme_queue_rq,
961  	.complete = apple_nvme_complete_rq,
962  	.init_hctx = apple_nvme_init_hctx,
963  	.init_request = apple_nvme_init_request,
964  	.timeout = apple_nvme_timeout,
965  	.poll = apple_nvme_poll,
966  };
967  
apple_nvme_init_queue(struct apple_nvme_queue * q)968  static void apple_nvme_init_queue(struct apple_nvme_queue *q)
969  {
970  	unsigned int depth = apple_nvme_queue_depth(q);
971  
972  	q->cq_head = 0;
973  	q->cq_phase = 1;
974  	memset(q->tcbs, 0,
975  	       APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
976  	memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
977  	WRITE_ONCE(q->enabled, true);
978  	wmb(); /* ensure the first interrupt sees the initialization */
979  }
980  
apple_nvme_reset_work(struct work_struct * work)981  static void apple_nvme_reset_work(struct work_struct *work)
982  {
983  	unsigned int nr_io_queues = 1;
984  	int ret;
985  	u32 boot_status, aqa;
986  	struct apple_nvme *anv =
987  		container_of(work, struct apple_nvme, ctrl.reset_work);
988  
989  	if (anv->ctrl.state != NVME_CTRL_RESETTING) {
990  		dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
991  			 anv->ctrl.state);
992  		ret = -ENODEV;
993  		goto out;
994  	}
995  
996  	/* there's unfortunately no known way to recover if RTKit crashed :( */
997  	if (apple_rtkit_is_crashed(anv->rtk)) {
998  		dev_err(anv->dev,
999  			"RTKit has crashed without any way to recover.");
1000  		ret = -EIO;
1001  		goto out;
1002  	}
1003  
1004  	/* RTKit must be shut down cleanly for the (soft)-reset to work */
1005  	if (apple_rtkit_is_running(anv->rtk)) {
1006  		/* reset the controller if it is enabled */
1007  		if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
1008  			apple_nvme_disable(anv, false);
1009  		dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
1010  		ret = apple_rtkit_shutdown(anv->rtk);
1011  		if (ret)
1012  			goto out;
1013  	}
1014  
1015  	writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1016  
1017  	ret = reset_control_assert(anv->reset);
1018  	if (ret)
1019  		goto out;
1020  
1021  	ret = apple_rtkit_reinit(anv->rtk);
1022  	if (ret)
1023  		goto out;
1024  
1025  	ret = reset_control_deassert(anv->reset);
1026  	if (ret)
1027  		goto out;
1028  
1029  	writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
1030  	       anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1031  	ret = apple_rtkit_boot(anv->rtk);
1032  	if (ret) {
1033  		dev_err(anv->dev, "ANS did not boot");
1034  		goto out;
1035  	}
1036  
1037  	ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
1038  				 boot_status,
1039  				 boot_status == APPLE_ANS_BOOT_STATUS_OK,
1040  				 USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
1041  	if (ret) {
1042  		dev_err(anv->dev, "ANS did not initialize");
1043  		goto out;
1044  	}
1045  
1046  	dev_dbg(anv->dev, "ANS booted successfully.");
1047  
1048  	/*
1049  	 * Limit the max command size to prevent iod->sg allocations going
1050  	 * over a single page.
1051  	 */
1052  	anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
1053  					 dma_max_mapping_size(anv->dev) >> 9);
1054  	anv->ctrl.max_segments = NVME_MAX_SEGS;
1055  
1056  	dma_set_max_seg_size(anv->dev, 0xffffffff);
1057  
1058  	/*
1059  	 * Enable NVMMU and linear submission queues.
1060  	 * While we could keep those disabled and pretend this is slightly
1061  	 * more common NVMe controller we'd still need some quirks (e.g.
1062  	 * sq entries will be 128 bytes) and Apple might drop support for
1063  	 * that mode in the future.
1064  	 */
1065  	writel(APPLE_ANS_LINEAR_SQ_EN,
1066  	       anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
1067  
1068  	/* Allow as many pending command as possible for both queues */
1069  	writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
1070  	       anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
1071  
1072  	/* Setup the NVMMU for the maximum admin and IO queue depth */
1073  	writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
1074  	       anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
1075  
1076  	/*
1077  	 * This is probably a chicken bit: without it all commands where any PRP
1078  	 * is set to zero (including those that don't use that field) fail and
1079  	 * the co-processor complains about "completed with err BAD_CMD-" or
1080  	 * a "NULL_PRP_PTR_ERR" in the syslog
1081  	 */
1082  	writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
1083  		       ~APPLE_ANS_PRP_NULL_CHECK,
1084  	       anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
1085  
1086  	/* Setup the admin queue */
1087  	aqa = APPLE_NVME_AQ_DEPTH - 1;
1088  	aqa |= aqa << 16;
1089  	writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
1090  	writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
1091  	writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
1092  
1093  	/* Setup NVMMU for both queues */
1094  	writeq(anv->adminq.tcb_dma_addr,
1095  	       anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
1096  	writeq(anv->ioq.tcb_dma_addr,
1097  	       anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
1098  
1099  	anv->ctrl.sqsize =
1100  		APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
1101  	anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
1102  
1103  	dev_dbg(anv->dev, "Enabling controller now");
1104  	ret = nvme_enable_ctrl(&anv->ctrl);
1105  	if (ret)
1106  		goto out;
1107  
1108  	dev_dbg(anv->dev, "Starting admin queue");
1109  	apple_nvme_init_queue(&anv->adminq);
1110  	nvme_unquiesce_admin_queue(&anv->ctrl);
1111  
1112  	if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
1113  		dev_warn(anv->ctrl.device,
1114  			 "failed to mark controller CONNECTING\n");
1115  		ret = -ENODEV;
1116  		goto out;
1117  	}
1118  
1119  	ret = nvme_init_ctrl_finish(&anv->ctrl, false);
1120  	if (ret)
1121  		goto out;
1122  
1123  	dev_dbg(anv->dev, "Creating IOCQ");
1124  	ret = apple_nvme_create_cq(anv);
1125  	if (ret)
1126  		goto out;
1127  	dev_dbg(anv->dev, "Creating IOSQ");
1128  	ret = apple_nvme_create_sq(anv);
1129  	if (ret)
1130  		goto out_remove_cq;
1131  
1132  	apple_nvme_init_queue(&anv->ioq);
1133  	nr_io_queues = 1;
1134  	ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
1135  	if (ret)
1136  		goto out_remove_sq;
1137  	if (nr_io_queues != 1) {
1138  		ret = -ENXIO;
1139  		goto out_remove_sq;
1140  	}
1141  
1142  	anv->ctrl.queue_count = nr_io_queues + 1;
1143  
1144  	nvme_unquiesce_io_queues(&anv->ctrl);
1145  	nvme_wait_freeze(&anv->ctrl);
1146  	blk_mq_update_nr_hw_queues(&anv->tagset, 1);
1147  	nvme_unfreeze(&anv->ctrl);
1148  
1149  	if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
1150  		dev_warn(anv->ctrl.device,
1151  			 "failed to mark controller live state\n");
1152  		ret = -ENODEV;
1153  		goto out_remove_sq;
1154  	}
1155  
1156  	nvme_start_ctrl(&anv->ctrl);
1157  
1158  	dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
1159  	return;
1160  
1161  out_remove_sq:
1162  	apple_nvme_remove_sq(anv);
1163  out_remove_cq:
1164  	apple_nvme_remove_cq(anv);
1165  out:
1166  	dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
1167  	nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1168  	nvme_get_ctrl(&anv->ctrl);
1169  	apple_nvme_disable(anv, false);
1170  	nvme_mark_namespaces_dead(&anv->ctrl);
1171  	if (!queue_work(nvme_wq, &anv->remove_work))
1172  		nvme_put_ctrl(&anv->ctrl);
1173  }
1174  
apple_nvme_remove_dead_ctrl_work(struct work_struct * work)1175  static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
1176  {
1177  	struct apple_nvme *anv =
1178  		container_of(work, struct apple_nvme, remove_work);
1179  
1180  	nvme_put_ctrl(&anv->ctrl);
1181  	device_release_driver(anv->dev);
1182  }
1183  
apple_nvme_reg_read32(struct nvme_ctrl * ctrl,u32 off,u32 * val)1184  static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
1185  {
1186  	*val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1187  	return 0;
1188  }
1189  
apple_nvme_reg_write32(struct nvme_ctrl * ctrl,u32 off,u32 val)1190  static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
1191  {
1192  	writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1193  	return 0;
1194  }
1195  
apple_nvme_reg_read64(struct nvme_ctrl * ctrl,u32 off,u64 * val)1196  static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
1197  {
1198  	*val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1199  	return 0;
1200  }
1201  
apple_nvme_get_address(struct nvme_ctrl * ctrl,char * buf,int size)1202  static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
1203  {
1204  	struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
1205  
1206  	return snprintf(buf, size, "%s\n", dev_name(dev));
1207  }
1208  
apple_nvme_free_ctrl(struct nvme_ctrl * ctrl)1209  static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
1210  {
1211  	struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
1212  
1213  	if (anv->ctrl.admin_q)
1214  		blk_put_queue(anv->ctrl.admin_q);
1215  	put_device(anv->dev);
1216  }
1217  
1218  static const struct nvme_ctrl_ops nvme_ctrl_ops = {
1219  	.name = "apple-nvme",
1220  	.module = THIS_MODULE,
1221  	.flags = 0,
1222  	.reg_read32 = apple_nvme_reg_read32,
1223  	.reg_write32 = apple_nvme_reg_write32,
1224  	.reg_read64 = apple_nvme_reg_read64,
1225  	.free_ctrl = apple_nvme_free_ctrl,
1226  	.get_address = apple_nvme_get_address,
1227  };
1228  
apple_nvme_async_probe(void * data,async_cookie_t cookie)1229  static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
1230  {
1231  	struct apple_nvme *anv = data;
1232  
1233  	flush_work(&anv->ctrl.reset_work);
1234  	flush_work(&anv->ctrl.scan_work);
1235  	nvme_put_ctrl(&anv->ctrl);
1236  }
1237  
devm_apple_nvme_put_tag_set(void * data)1238  static void devm_apple_nvme_put_tag_set(void *data)
1239  {
1240  	blk_mq_free_tag_set(data);
1241  }
1242  
apple_nvme_alloc_tagsets(struct apple_nvme * anv)1243  static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
1244  {
1245  	int ret;
1246  
1247  	anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
1248  	anv->admin_tagset.nr_hw_queues = 1;
1249  	anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
1250  	anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
1251  	anv->admin_tagset.numa_node = NUMA_NO_NODE;
1252  	anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
1253  	anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
1254  	anv->admin_tagset.driver_data = &anv->adminq;
1255  
1256  	ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
1257  	if (ret)
1258  		return ret;
1259  	ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1260  				       &anv->admin_tagset);
1261  	if (ret)
1262  		return ret;
1263  
1264  	anv->tagset.ops = &apple_nvme_mq_ops;
1265  	anv->tagset.nr_hw_queues = 1;
1266  	anv->tagset.nr_maps = 1;
1267  	/*
1268  	 * Tags are used as an index to the NVMMU and must be unique across
1269  	 * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
1270  	 * must be marked as reserved in the IO queue.
1271  	 */
1272  	anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
1273  	anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
1274  	anv->tagset.timeout = NVME_IO_TIMEOUT;
1275  	anv->tagset.numa_node = NUMA_NO_NODE;
1276  	anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
1277  	anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
1278  	anv->tagset.driver_data = &anv->ioq;
1279  
1280  	ret = blk_mq_alloc_tag_set(&anv->tagset);
1281  	if (ret)
1282  		return ret;
1283  	ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1284  					&anv->tagset);
1285  	if (ret)
1286  		return ret;
1287  
1288  	anv->ctrl.admin_tagset = &anv->admin_tagset;
1289  	anv->ctrl.tagset = &anv->tagset;
1290  
1291  	return 0;
1292  }
1293  
apple_nvme_queue_alloc(struct apple_nvme * anv,struct apple_nvme_queue * q)1294  static int apple_nvme_queue_alloc(struct apple_nvme *anv,
1295  				  struct apple_nvme_queue *q)
1296  {
1297  	unsigned int depth = apple_nvme_queue_depth(q);
1298  
1299  	q->cqes = dmam_alloc_coherent(anv->dev,
1300  				      depth * sizeof(struct nvme_completion),
1301  				      &q->cq_dma_addr, GFP_KERNEL);
1302  	if (!q->cqes)
1303  		return -ENOMEM;
1304  
1305  	q->sqes = dmam_alloc_coherent(anv->dev,
1306  				      depth * sizeof(struct nvme_command),
1307  				      &q->sq_dma_addr, GFP_KERNEL);
1308  	if (!q->sqes)
1309  		return -ENOMEM;
1310  
1311  	/*
1312  	 * We need the maximum queue depth here because the NVMMU only has a
1313  	 * single depth configuration shared between both queues.
1314  	 */
1315  	q->tcbs = dmam_alloc_coherent(anv->dev,
1316  				      APPLE_ANS_MAX_QUEUE_DEPTH *
1317  					      sizeof(struct apple_nvmmu_tcb),
1318  				      &q->tcb_dma_addr, GFP_KERNEL);
1319  	if (!q->tcbs)
1320  		return -ENOMEM;
1321  
1322  	/*
1323  	 * initialize phase to make sure the allocated and empty memory
1324  	 * doesn't look like a full cq already.
1325  	 */
1326  	q->cq_phase = 1;
1327  	return 0;
1328  }
1329  
apple_nvme_detach_genpd(struct apple_nvme * anv)1330  static void apple_nvme_detach_genpd(struct apple_nvme *anv)
1331  {
1332  	int i;
1333  
1334  	if (anv->pd_count <= 1)
1335  		return;
1336  
1337  	for (i = anv->pd_count - 1; i >= 0; i--) {
1338  		if (anv->pd_link[i])
1339  			device_link_del(anv->pd_link[i]);
1340  		if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
1341  			dev_pm_domain_detach(anv->pd_dev[i], true);
1342  	}
1343  }
1344  
apple_nvme_attach_genpd(struct apple_nvme * anv)1345  static int apple_nvme_attach_genpd(struct apple_nvme *anv)
1346  {
1347  	struct device *dev = anv->dev;
1348  	int i;
1349  
1350  	anv->pd_count = of_count_phandle_with_args(
1351  		dev->of_node, "power-domains", "#power-domain-cells");
1352  	if (anv->pd_count <= 1)
1353  		return 0;
1354  
1355  	anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
1356  				   GFP_KERNEL);
1357  	if (!anv->pd_dev)
1358  		return -ENOMEM;
1359  
1360  	anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
1361  				    GFP_KERNEL);
1362  	if (!anv->pd_link)
1363  		return -ENOMEM;
1364  
1365  	for (i = 0; i < anv->pd_count; i++) {
1366  		anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
1367  		if (IS_ERR(anv->pd_dev[i])) {
1368  			apple_nvme_detach_genpd(anv);
1369  			return PTR_ERR(anv->pd_dev[i]);
1370  		}
1371  
1372  		anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
1373  						  DL_FLAG_STATELESS |
1374  						  DL_FLAG_PM_RUNTIME |
1375  						  DL_FLAG_RPM_ACTIVE);
1376  		if (!anv->pd_link[i]) {
1377  			apple_nvme_detach_genpd(anv);
1378  			return -EINVAL;
1379  		}
1380  	}
1381  
1382  	return 0;
1383  }
1384  
devm_apple_nvme_mempool_destroy(void * data)1385  static void devm_apple_nvme_mempool_destroy(void *data)
1386  {
1387  	mempool_destroy(data);
1388  }
1389  
apple_nvme_alloc(struct platform_device * pdev)1390  static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
1391  {
1392  	struct device *dev = &pdev->dev;
1393  	struct apple_nvme *anv;
1394  	int ret;
1395  
1396  	anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
1397  	if (!anv)
1398  		return ERR_PTR(-ENOMEM);
1399  
1400  	anv->dev = get_device(dev);
1401  	anv->adminq.is_adminq = true;
1402  	platform_set_drvdata(pdev, anv);
1403  
1404  	ret = apple_nvme_attach_genpd(anv);
1405  	if (ret < 0) {
1406  		dev_err_probe(dev, ret, "Failed to attach power domains");
1407  		goto put_dev;
1408  	}
1409  	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
1410  		ret = -ENXIO;
1411  		goto put_dev;
1412  	}
1413  
1414  	anv->irq = platform_get_irq(pdev, 0);
1415  	if (anv->irq < 0) {
1416  		ret = anv->irq;
1417  		goto put_dev;
1418  	}
1419  	if (!anv->irq) {
1420  		ret = -ENXIO;
1421  		goto put_dev;
1422  	}
1423  
1424  	anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
1425  	if (IS_ERR(anv->mmio_coproc)) {
1426  		ret = PTR_ERR(anv->mmio_coproc);
1427  		goto put_dev;
1428  	}
1429  	anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
1430  	if (IS_ERR(anv->mmio_nvme)) {
1431  		ret = PTR_ERR(anv->mmio_nvme);
1432  		goto put_dev;
1433  	}
1434  
1435  	anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
1436  	anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
1437  	anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
1438  	anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
1439  
1440  	anv->sart = devm_apple_sart_get(dev);
1441  	if (IS_ERR(anv->sart)) {
1442  		ret = dev_err_probe(dev, PTR_ERR(anv->sart),
1443  				    "Failed to initialize SART");
1444  		goto put_dev;
1445  	}
1446  
1447  	anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
1448  	if (IS_ERR(anv->reset)) {
1449  		ret = dev_err_probe(dev, PTR_ERR(anv->reset),
1450  				    "Failed to get reset control");
1451  		goto put_dev;
1452  	}
1453  
1454  	INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
1455  	INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
1456  	spin_lock_init(&anv->lock);
1457  
1458  	ret = apple_nvme_queue_alloc(anv, &anv->adminq);
1459  	if (ret)
1460  		goto put_dev;
1461  	ret = apple_nvme_queue_alloc(anv, &anv->ioq);
1462  	if (ret)
1463  		goto put_dev;
1464  
1465  	anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
1466  					      NVME_CTRL_PAGE_SIZE,
1467  					      NVME_CTRL_PAGE_SIZE, 0);
1468  	if (!anv->prp_page_pool) {
1469  		ret = -ENOMEM;
1470  		goto put_dev;
1471  	}
1472  
1473  	anv->prp_small_pool =
1474  		dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
1475  	if (!anv->prp_small_pool) {
1476  		ret = -ENOMEM;
1477  		goto put_dev;
1478  	}
1479  
1480  	WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
1481  	anv->iod_mempool =
1482  		mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
1483  	if (!anv->iod_mempool) {
1484  		ret = -ENOMEM;
1485  		goto put_dev;
1486  	}
1487  	ret = devm_add_action_or_reset(anv->dev,
1488  			devm_apple_nvme_mempool_destroy, anv->iod_mempool);
1489  	if (ret)
1490  		goto put_dev;
1491  
1492  	ret = apple_nvme_alloc_tagsets(anv);
1493  	if (ret)
1494  		goto put_dev;
1495  
1496  	ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
1497  			       "nvme-apple", anv);
1498  	if (ret) {
1499  		dev_err_probe(dev, ret, "Failed to request IRQ");
1500  		goto put_dev;
1501  	}
1502  
1503  	anv->rtk =
1504  		devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
1505  	if (IS_ERR(anv->rtk)) {
1506  		ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
1507  				    "Failed to initialize RTKit");
1508  		goto put_dev;
1509  	}
1510  
1511  	ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
1512  			     NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS);
1513  	if (ret) {
1514  		dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
1515  		goto put_dev;
1516  	}
1517  
1518  	return anv;
1519  put_dev:
1520  	put_device(anv->dev);
1521  	return ERR_PTR(ret);
1522  }
1523  
apple_nvme_probe(struct platform_device * pdev)1524  static int apple_nvme_probe(struct platform_device *pdev)
1525  {
1526  	struct apple_nvme *anv;
1527  	int ret;
1528  
1529  	anv = apple_nvme_alloc(pdev);
1530  	if (IS_ERR(anv))
1531  		return PTR_ERR(anv);
1532  
1533  	anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
1534  	if (IS_ERR(anv->ctrl.admin_q)) {
1535  		ret = -ENOMEM;
1536  		anv->ctrl.admin_q = NULL;
1537  		goto out_uninit_ctrl;
1538  	}
1539  
1540  	nvme_reset_ctrl(&anv->ctrl);
1541  	async_schedule(apple_nvme_async_probe, anv);
1542  
1543  	return 0;
1544  
1545  out_uninit_ctrl:
1546  	nvme_uninit_ctrl(&anv->ctrl);
1547  	nvme_put_ctrl(&anv->ctrl);
1548  	return ret;
1549  }
1550  
apple_nvme_remove(struct platform_device * pdev)1551  static int apple_nvme_remove(struct platform_device *pdev)
1552  {
1553  	struct apple_nvme *anv = platform_get_drvdata(pdev);
1554  
1555  	nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1556  	flush_work(&anv->ctrl.reset_work);
1557  	nvme_stop_ctrl(&anv->ctrl);
1558  	nvme_remove_namespaces(&anv->ctrl);
1559  	apple_nvme_disable(anv, true);
1560  	nvme_uninit_ctrl(&anv->ctrl);
1561  
1562  	if (apple_rtkit_is_running(anv->rtk))
1563  		apple_rtkit_shutdown(anv->rtk);
1564  
1565  	apple_nvme_detach_genpd(anv);
1566  
1567  	return 0;
1568  }
1569  
apple_nvme_shutdown(struct platform_device * pdev)1570  static void apple_nvme_shutdown(struct platform_device *pdev)
1571  {
1572  	struct apple_nvme *anv = platform_get_drvdata(pdev);
1573  
1574  	apple_nvme_disable(anv, true);
1575  	if (apple_rtkit_is_running(anv->rtk))
1576  		apple_rtkit_shutdown(anv->rtk);
1577  }
1578  
apple_nvme_resume(struct device * dev)1579  static int apple_nvme_resume(struct device *dev)
1580  {
1581  	struct apple_nvme *anv = dev_get_drvdata(dev);
1582  
1583  	return nvme_reset_ctrl(&anv->ctrl);
1584  }
1585  
apple_nvme_suspend(struct device * dev)1586  static int apple_nvme_suspend(struct device *dev)
1587  {
1588  	struct apple_nvme *anv = dev_get_drvdata(dev);
1589  	int ret = 0;
1590  
1591  	apple_nvme_disable(anv, true);
1592  
1593  	if (apple_rtkit_is_running(anv->rtk))
1594  		ret = apple_rtkit_shutdown(anv->rtk);
1595  
1596  	writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1597  
1598  	return ret;
1599  }
1600  
1601  static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
1602  				apple_nvme_resume);
1603  
1604  static const struct of_device_id apple_nvme_of_match[] = {
1605  	{ .compatible = "apple,nvme-ans2" },
1606  	{},
1607  };
1608  MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
1609  
1610  static struct platform_driver apple_nvme_driver = {
1611  	.driver = {
1612  		.name = "nvme-apple",
1613  		.of_match_table = apple_nvme_of_match,
1614  		.pm = pm_sleep_ptr(&apple_nvme_pm_ops),
1615  	},
1616  	.probe = apple_nvme_probe,
1617  	.remove = apple_nvme_remove,
1618  	.shutdown = apple_nvme_shutdown,
1619  };
1620  module_platform_driver(apple_nvme_driver);
1621  
1622  MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
1623  MODULE_LICENSE("GPL");
1624