Lines Matching refs:dev

111 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
112 static void nvme_delete_io_queues(struct nvme_dev *dev);
113 static void nvme_update_attrs(struct nvme_dev *dev);
123 struct device *dev; member
190 struct nvme_dev *dev; member
243 static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) in nvme_dbbuf_size() argument
245 return dev->nr_allocated_queues * 8 * dev->db_stride; in nvme_dbbuf_size()
248 static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev) in nvme_dbbuf_dma_alloc() argument
250 unsigned int mem_size = nvme_dbbuf_size(dev); in nvme_dbbuf_dma_alloc()
252 if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) in nvme_dbbuf_dma_alloc()
255 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_alloc()
260 memset(dev->dbbuf_dbs, 0, mem_size); in nvme_dbbuf_dma_alloc()
261 memset(dev->dbbuf_eis, 0, mem_size); in nvme_dbbuf_dma_alloc()
265 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
266 &dev->dbbuf_dbs_dma_addr, in nvme_dbbuf_dma_alloc()
268 if (!dev->dbbuf_dbs) in nvme_dbbuf_dma_alloc()
270 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
271 &dev->dbbuf_eis_dma_addr, in nvme_dbbuf_dma_alloc()
273 if (!dev->dbbuf_eis) in nvme_dbbuf_dma_alloc()
278 dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, in nvme_dbbuf_dma_alloc()
279 dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_alloc()
280 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_alloc()
282 dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); in nvme_dbbuf_dma_alloc()
285 static void nvme_dbbuf_dma_free(struct nvme_dev *dev) in nvme_dbbuf_dma_free() argument
287 unsigned int mem_size = nvme_dbbuf_size(dev); in nvme_dbbuf_dma_free()
289 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_free()
290 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
291 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_free()
292 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_free()
294 if (dev->dbbuf_eis) { in nvme_dbbuf_dma_free()
295 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
296 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); in nvme_dbbuf_dma_free()
297 dev->dbbuf_eis = NULL; in nvme_dbbuf_dma_free()
301 static void nvme_dbbuf_init(struct nvme_dev *dev, in nvme_dbbuf_init() argument
304 if (!dev->dbbuf_dbs || !qid) in nvme_dbbuf_init()
307 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
308 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
309 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
310 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
324 static void nvme_dbbuf_set(struct nvme_dev *dev) in nvme_dbbuf_set() argument
329 if (!dev->dbbuf_dbs) in nvme_dbbuf_set()
333 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_set()
334 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); in nvme_dbbuf_set()
336 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { in nvme_dbbuf_set()
337 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); in nvme_dbbuf_set()
339 nvme_dbbuf_dma_free(dev); in nvme_dbbuf_set()
341 for (i = 1; i <= dev->online_queues; i++) in nvme_dbbuf_set()
342 nvme_dbbuf_free(&dev->queues[i]); in nvme_dbbuf_set()
398 struct nvme_dev *dev = to_nvme_dev(data); in nvme_admin_init_hctx() local
399 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx()
402 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
411 struct nvme_dev *dev = to_nvme_dev(data); in nvme_init_hctx() local
412 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx()
414 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx()
430 static int queue_irq_offset(struct nvme_dev *dev) in queue_irq_offset() argument
433 if (dev->num_vecs > 1) in queue_irq_offset()
441 struct nvme_dev *dev = to_nvme_dev(set->driver_data); in nvme_pci_map_queues() local
444 offset = queue_irq_offset(dev); in nvme_pci_map_queues()
448 map->nr_queues = dev->io_queues[i]; in nvme_pci_map_queues()
460 blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); in nvme_pci_map_queues()
507 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, in nvme_pci_use_sgls() argument
515 if (!nvme_ctrl_sgl_supported(&dev->ctrl)) in nvme_pci_use_sgls()
524 static void nvme_free_prps(struct nvme_dev *dev, struct request *req) in nvme_free_prps() argument
535 dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); in nvme_free_prps()
540 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) in nvme_unmap_data() argument
545 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, in nvme_unmap_data()
552 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_unmap_data()
555 dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, in nvme_unmap_data()
558 dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, in nvme_unmap_data()
561 nvme_free_prps(dev, req); in nvme_unmap_data()
562 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_unmap_data()
579 static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, in nvme_pci_setup_prps() argument
615 pool = dev->prp_small_pool; in nvme_pci_setup_prps()
618 pool = dev->prp_page_pool; in nvme_pci_setup_prps()
660 nvme_free_prps(dev, req); in nvme_pci_setup_prps()
685 static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, in nvme_pci_setup_sgls() argument
705 pool = dev->prp_small_pool; in nvme_pci_setup_sgls()
708 pool = dev->prp_page_pool; in nvme_pci_setup_sgls()
730 static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, in nvme_setup_prp_simple() argument
738 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_prp_simple()
739 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_prp_simple()
751 static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, in nvme_setup_sgl_simple() argument
757 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_sgl_simple()
758 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_sgl_simple()
769 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, in nvme_map_data() argument
783 return nvme_setup_prp_simple(dev, req, in nvme_map_data()
787 nvme_ctrl_sgl_supported(&dev->ctrl)) in nvme_map_data()
788 return nvme_setup_sgl_simple(dev, req, in nvme_map_data()
794 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); in nvme_map_data()
802 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), in nvme_map_data()
810 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) in nvme_map_data()
811 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); in nvme_map_data()
813 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); in nvme_map_data()
819 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_map_data()
821 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_map_data()
825 static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, in nvme_map_metadata() argument
831 iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); in nvme_map_metadata()
832 if (dma_mapping_error(dev->dev, iod->meta_dma)) in nvme_map_metadata()
838 static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) in nvme_prep_rq() argument
852 ret = nvme_map_data(dev, req, &iod->cmd); in nvme_prep_rq()
858 ret = nvme_map_metadata(dev, req, &iod->cmd); in nvme_prep_rq()
867 nvme_unmap_data(dev, req); in nvme_prep_rq()
880 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq() local
892 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) in nvme_queue_rq()
893 return nvme_fail_nonready_command(&dev->ctrl, req); in nvme_queue_rq()
895 ret = nvme_prep_rq(dev, req); in nvme_queue_rq()
926 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) in nvme_prep_rq_batch()
930 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; in nvme_prep_rq_batch()
966 struct nvme_dev *dev = nvmeq->dev; in nvme_pci_unmap_rq() local
971 dma_unmap_page(dev->dev, iod->meta_dma, in nvme_pci_unmap_rq()
976 nvme_unmap_data(dev, req); in nvme_pci_unmap_rq()
1004 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_ring_cq_doorbell()
1010 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
1011 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; in nvme_queue_tagset()
1028 nvme_complete_async_event(&nvmeq->dev->ctrl, in nvme_handle_cqe()
1035 dev_warn(nvmeq->dev->ctrl.device, in nvme_handle_cqe()
1109 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in nvme_poll_irqdisable()
1135 struct nvme_dev *dev = to_nvme_dev(ctrl); in nvme_pci_submit_async_event() local
1136 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event()
1148 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) in adapter_delete_queue() argument
1155 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_delete_queue()
1158 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, in adapter_alloc_cq() argument
1178 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_cq()
1181 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, in adapter_alloc_sq() argument
1184 struct nvme_ctrl *ctrl = &dev->ctrl; in adapter_alloc_sq()
1207 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_sq()
1210 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) in adapter_delete_cq() argument
1212 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); in adapter_delete_cq()
1215 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) in adapter_delete_sq() argument
1217 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); in adapter_delete_sq()
1224 dev_warn(nvmeq->dev->ctrl.device, in abort_endio()
1226 atomic_inc(&nvmeq->dev->ctrl.abort_limit); in abort_endio()
1231 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) in nvme_should_reset() argument
1236 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); in nvme_should_reset()
1239 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_should_reset()
1256 static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) in nvme_warn_reset() argument
1262 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, in nvme_warn_reset()
1265 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1269 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1276 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1278 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1286 struct nvme_dev *dev = nvmeq->dev; in nvme_timeout() local
1289 u32 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_timeout()
1291 if (nvme_state_terminal(&dev->ctrl)) in nvme_timeout()
1298 if (pci_channel_offline(to_pci_dev(dev->dev))) in nvme_timeout()
1304 if (nvme_should_reset(dev, csts)) { in nvme_timeout()
1305 nvme_warn_reset(dev, csts); in nvme_timeout()
1318 dev_warn(dev->ctrl.device, in nvme_timeout()
1330 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_timeout()
1332 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_timeout()
1335 dev_warn_ratelimited(dev->ctrl.device, in nvme_timeout()
1339 nvme_dev_disable(dev, true); in nvme_timeout()
1353 dev_warn(dev->ctrl.device, in nvme_timeout()
1360 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { in nvme_timeout()
1361 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1370 dev_warn(nvmeq->dev->ctrl.device, in nvme_timeout()
1376 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), in nvme_timeout()
1379 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1396 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { in nvme_timeout()
1397 if (nvme_state_terminal(&dev->ctrl)) in nvme_timeout()
1398 nvme_dev_disable(dev, true); in nvme_timeout()
1402 nvme_dev_disable(dev, false); in nvme_timeout()
1403 if (nvme_try_sched_reset(&dev->ctrl)) in nvme_timeout()
1404 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_timeout()
1410 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), in nvme_free_queue()
1416 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), in nvme_free_queue()
1419 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), in nvme_free_queue()
1424 static void nvme_free_queues(struct nvme_dev *dev, int lowest) in nvme_free_queues() argument
1428 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { in nvme_free_queues()
1429 dev->ctrl.queue_count--; in nvme_free_queues()
1430 nvme_free_queue(&dev->queues[i]); in nvme_free_queues()
1434 static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid) in nvme_suspend_queue() argument
1436 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_suspend_queue()
1444 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1445 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) in nvme_suspend_queue()
1446 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); in nvme_suspend_queue()
1448 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); in nvme_suspend_queue()
1451 static void nvme_suspend_io_queues(struct nvme_dev *dev) in nvme_suspend_io_queues() argument
1455 for (i = dev->ctrl.queue_count - 1; i > 0; i--) in nvme_suspend_io_queues()
1456 nvme_suspend_queue(dev, i); in nvme_suspend_io_queues()
1465 static void nvme_reap_pending_cqes(struct nvme_dev *dev) in nvme_reap_pending_cqes() argument
1469 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { in nvme_reap_pending_cqes()
1470 spin_lock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1471 nvme_poll_cq(&dev->queues[i], NULL); in nvme_reap_pending_cqes()
1472 spin_unlock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1476 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, in nvme_cmb_qdepth() argument
1479 int q_depth = dev->q_depth; in nvme_cmb_qdepth()
1483 if (q_size_aligned * nr_io_queues > dev->cmb_size) { in nvme_cmb_qdepth()
1484 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); in nvme_cmb_qdepth()
1501 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, in nvme_alloc_sq_cmds() argument
1504 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_alloc_sq_cmds()
1506 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { in nvme_alloc_sq_cmds()
1520 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), in nvme_alloc_sq_cmds()
1527 static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) in nvme_alloc_queue() argument
1529 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue()
1531 if (dev->ctrl.queue_count > qid) in nvme_alloc_queue()
1534 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; in nvme_alloc_queue()
1536 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1541 if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) in nvme_alloc_queue()
1544 nvmeq->dev = dev; in nvme_alloc_queue()
1549 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1551 dev->ctrl.queue_count++; in nvme_alloc_queue()
1556 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1564 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in queue_request_irq()
1565 int nr = nvmeq->dev->ctrl.instance; in queue_request_irq()
1578 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue() local
1584 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1586 nvme_dbbuf_init(dev, nvmeq, qid); in nvme_init_queue()
1587 dev->online_queues++; in nvme_init_queue()
1594 static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) in nvme_setup_io_queues_trylock() argument
1599 if (!mutex_trylock(&dev->shutdown_lock)) in nvme_setup_io_queues_trylock()
1605 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) { in nvme_setup_io_queues_trylock()
1606 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues_trylock()
1615 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue() local
1626 vector = dev->num_vecs == 1 ? 0 : qid; in nvme_create_queue()
1630 result = adapter_alloc_cq(dev, qid, nvmeq, vector); in nvme_create_queue()
1634 result = adapter_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
1642 result = nvme_setup_io_queues_trylock(dev); in nvme_create_queue()
1653 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
1657 dev->online_queues--; in nvme_create_queue()
1658 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
1659 adapter_delete_sq(dev, qid); in nvme_create_queue()
1661 adapter_delete_cq(dev, qid); in nvme_create_queue()
1685 static void nvme_dev_remove_admin(struct nvme_dev *dev) in nvme_dev_remove_admin() argument
1687 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { in nvme_dev_remove_admin()
1693 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_remove_admin()
1694 nvme_remove_admin_tag_set(&dev->ctrl); in nvme_dev_remove_admin()
1698 static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) in db_bar_size() argument
1700 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); in db_bar_size()
1703 static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) in nvme_remap_bar() argument
1705 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_remap_bar()
1707 if (size <= dev->bar_mapped_size) in nvme_remap_bar()
1711 if (dev->bar) in nvme_remap_bar()
1712 iounmap(dev->bar); in nvme_remap_bar()
1713 dev->bar = ioremap(pci_resource_start(pdev, 0), size); in nvme_remap_bar()
1714 if (!dev->bar) { in nvme_remap_bar()
1715 dev->bar_mapped_size = 0; in nvme_remap_bar()
1718 dev->bar_mapped_size = size; in nvme_remap_bar()
1719 dev->dbs = dev->bar + NVME_REG_DBS; in nvme_remap_bar()
1724 static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) in nvme_pci_configure_admin_queue() argument
1730 result = nvme_remap_bar(dev, db_bar_size(dev, 0)); in nvme_pci_configure_admin_queue()
1734 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? in nvme_pci_configure_admin_queue()
1735 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; in nvme_pci_configure_admin_queue()
1737 if (dev->subsystem && in nvme_pci_configure_admin_queue()
1738 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) in nvme_pci_configure_admin_queue()
1739 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); in nvme_pci_configure_admin_queue()
1748 result = nvme_disable_ctrl(&dev->ctrl, false); in nvme_pci_configure_admin_queue()
1752 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); in nvme_pci_configure_admin_queue()
1756 dev->ctrl.numa_node = dev_to_node(dev->dev); in nvme_pci_configure_admin_queue()
1758 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
1762 writel(aqa, dev->bar + NVME_REG_AQA); in nvme_pci_configure_admin_queue()
1763 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); in nvme_pci_configure_admin_queue()
1764 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); in nvme_pci_configure_admin_queue()
1766 result = nvme_enable_ctrl(&dev->ctrl); in nvme_pci_configure_admin_queue()
1774 dev->online_queues--; in nvme_pci_configure_admin_queue()
1782 static int nvme_create_io_queues(struct nvme_dev *dev) in nvme_create_io_queues() argument
1787 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { in nvme_create_io_queues()
1788 if (nvme_alloc_queue(dev, i, dev->q_depth)) { in nvme_create_io_queues()
1794 max = min(dev->max_qid, dev->ctrl.queue_count - 1); in nvme_create_io_queues()
1795 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { in nvme_create_io_queues()
1796 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + in nvme_create_io_queues()
1797 dev->io_queues[HCTX_TYPE_READ]; in nvme_create_io_queues()
1802 for (i = dev->online_queues; i <= max; i++) { in nvme_create_io_queues()
1805 ret = nvme_create_queue(&dev->queues[i], i, polled); in nvme_create_io_queues()
1819 static u64 nvme_cmb_size_unit(struct nvme_dev *dev) in nvme_cmb_size_unit() argument
1821 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; in nvme_cmb_size_unit()
1826 static u32 nvme_cmb_size(struct nvme_dev *dev) in nvme_cmb_size() argument
1828 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; in nvme_cmb_size()
1831 static void nvme_map_cmb(struct nvme_dev *dev) in nvme_map_cmb() argument
1835 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_map_cmb()
1838 if (dev->cmb_size) in nvme_map_cmb()
1841 if (NVME_CAP_CMBS(dev->ctrl.cap)) in nvme_map_cmb()
1842 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
1844 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); in nvme_map_cmb()
1845 if (!dev->cmbsz) in nvme_map_cmb()
1847 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); in nvme_map_cmb()
1849 size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); in nvme_map_cmb()
1850 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); in nvme_map_cmb()
1851 bar = NVME_CMB_BIR(dev->cmbloc); in nvme_map_cmb()
1861 if (NVME_CAP_CMBS(dev->ctrl.cap)) { in nvme_map_cmb()
1864 dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
1876 dev_warn(dev->ctrl.device, in nvme_map_cmb()
1881 dev->cmb_size = size; in nvme_map_cmb()
1882 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); in nvme_map_cmb()
1884 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == in nvme_map_cmb()
1888 nvme_update_attrs(dev); in nvme_map_cmb()
1891 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) in nvme_set_host_mem() argument
1893 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; in nvme_set_host_mem()
1894 u64 dma_addr = dev->host_mem_descs_dma; in nvme_set_host_mem()
1904 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); in nvme_set_host_mem()
1906 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in nvme_set_host_mem()
1908 dev_warn(dev->ctrl.device, in nvme_set_host_mem()
1912 dev->hmb = bits & NVME_HOST_MEM_ENABLE; in nvme_set_host_mem()
1917 static void nvme_free_host_mem(struct nvme_dev *dev) in nvme_free_host_mem() argument
1921 for (i = 0; i < dev->nr_host_mem_descs; i++) { in nvme_free_host_mem()
1922 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; in nvme_free_host_mem()
1925 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], in nvme_free_host_mem()
1930 kfree(dev->host_mem_desc_bufs); in nvme_free_host_mem()
1931 dev->host_mem_desc_bufs = NULL; in nvme_free_host_mem()
1932 dma_free_coherent(dev->dev, in nvme_free_host_mem()
1933 dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), in nvme_free_host_mem()
1934 dev->host_mem_descs, dev->host_mem_descs_dma); in nvme_free_host_mem()
1935 dev->host_mem_descs = NULL; in nvme_free_host_mem()
1936 dev->nr_host_mem_descs = 0; in nvme_free_host_mem()
1939 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, in __nvme_alloc_host_mem() argument
1953 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) in __nvme_alloc_host_mem()
1954 max_entries = dev->ctrl.hmmaxd; in __nvme_alloc_host_mem()
1956 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), in __nvme_alloc_host_mem()
1969 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, in __nvme_alloc_host_mem()
1982 dev->nr_host_mem_descs = i; in __nvme_alloc_host_mem()
1983 dev->host_mem_size = size; in __nvme_alloc_host_mem()
1984 dev->host_mem_descs = descs; in __nvme_alloc_host_mem()
1985 dev->host_mem_descs_dma = descs_dma; in __nvme_alloc_host_mem()
1986 dev->host_mem_desc_bufs = bufs; in __nvme_alloc_host_mem()
1993 dma_free_attrs(dev->dev, size, bufs[i], in __nvme_alloc_host_mem()
2000 dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, in __nvme_alloc_host_mem()
2003 dev->host_mem_descs = NULL; in __nvme_alloc_host_mem()
2007 static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) in nvme_alloc_host_mem() argument
2010 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); in nvme_alloc_host_mem()
2015 if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { in nvme_alloc_host_mem()
2016 if (!min || dev->host_mem_size >= min) in nvme_alloc_host_mem()
2018 nvme_free_host_mem(dev); in nvme_alloc_host_mem()
2025 static int nvme_setup_host_mem(struct nvme_dev *dev) in nvme_setup_host_mem() argument
2028 u64 preferred = (u64)dev->ctrl.hmpre * 4096; in nvme_setup_host_mem()
2029 u64 min = (u64)dev->ctrl.hmmin * 4096; in nvme_setup_host_mem()
2033 if (!dev->ctrl.hmpre) in nvme_setup_host_mem()
2038 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2041 nvme_free_host_mem(dev); in nvme_setup_host_mem()
2048 if (dev->host_mem_descs) { in nvme_setup_host_mem()
2049 if (dev->host_mem_size >= min) in nvme_setup_host_mem()
2052 nvme_free_host_mem(dev); in nvme_setup_host_mem()
2055 if (!dev->host_mem_descs) { in nvme_setup_host_mem()
2056 if (nvme_alloc_host_mem(dev, min, preferred)) { in nvme_setup_host_mem()
2057 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2062 dev_info(dev->ctrl.device, in nvme_setup_host_mem()
2064 dev->host_mem_size >> ilog2(SZ_1M)); in nvme_setup_host_mem()
2067 ret = nvme_set_host_mem(dev, enable_bits); in nvme_setup_host_mem()
2069 nvme_free_host_mem(dev); in nvme_setup_host_mem()
2073 static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, in cmb_show() argument
2076 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); in cmb_show()
2083 static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, in cmbloc_show() argument
2086 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); in cmbloc_show()
2092 static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, in cmbsz_show() argument
2095 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); in cmbsz_show()
2101 static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, in hmb_show() argument
2104 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); in hmb_show()
2109 static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, in hmb_store() argument
2112 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); in hmb_store()
2142 struct nvme_dev *dev = to_nvme_dev(ctrl); in nvme_pci_attrs_are_visible() local
2147 if (!dev->cmbsz) in nvme_pci_attrs_are_visible()
2175 static void nvme_update_attrs(struct nvme_dev *dev) in nvme_update_attrs() argument
2177 sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group); in nvme_update_attrs()
2186 struct nvme_dev *dev = affd->priv; in nvme_calc_irq_sets() local
2187 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; in nvme_calc_irq_sets()
2211 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2213 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2218 static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) in nvme_setup_irqs() argument
2220 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_irqs()
2224 .priv = dev, in nvme_setup_irqs()
2233 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); in nvme_setup_irqs()
2234 dev->io_queues[HCTX_TYPE_POLL] = poll_queues; in nvme_setup_irqs()
2240 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; in nvme_setup_irqs()
2241 dev->io_queues[HCTX_TYPE_READ] = 0; in nvme_setup_irqs()
2249 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) in nvme_setup_irqs()
2251 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) in nvme_setup_irqs()
2257 static unsigned int nvme_max_io_queues(struct nvme_dev *dev) in nvme_max_io_queues() argument
2263 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) in nvme_max_io_queues()
2265 return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; in nvme_max_io_queues()
2268 static int nvme_setup_io_queues(struct nvme_dev *dev) in nvme_setup_io_queues() argument
2270 struct nvme_queue *adminq = &dev->queues[0]; in nvme_setup_io_queues()
2271 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_io_queues()
2280 dev->nr_write_queues = write_queues; in nvme_setup_io_queues()
2281 dev->nr_poll_queues = poll_queues; in nvme_setup_io_queues()
2283 nr_io_queues = dev->nr_allocated_queues - 1; in nvme_setup_io_queues()
2284 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); in nvme_setup_io_queues()
2298 result = nvme_setup_io_queues_trylock(dev); in nvme_setup_io_queues()
2304 if (dev->cmb_use_sqes) { in nvme_setup_io_queues()
2305 result = nvme_cmb_qdepth(dev, nr_io_queues, in nvme_setup_io_queues()
2308 dev->q_depth = result; in nvme_setup_io_queues()
2309 dev->ctrl.sqsize = result - 1; in nvme_setup_io_queues()
2311 dev->cmb_use_sqes = false; in nvme_setup_io_queues()
2316 size = db_bar_size(dev, nr_io_queues); in nvme_setup_io_queues()
2317 result = nvme_remap_bar(dev, size); in nvme_setup_io_queues()
2325 adminq->q_db = dev->dbs; in nvme_setup_io_queues()
2338 result = nvme_setup_irqs(dev, nr_io_queues); in nvme_setup_io_queues()
2344 dev->num_vecs = result; in nvme_setup_io_queues()
2346 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; in nvme_setup_io_queues()
2358 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2360 result = nvme_create_io_queues(dev); in nvme_setup_io_queues()
2361 if (result || dev->online_queues < 2) in nvme_setup_io_queues()
2364 if (dev->online_queues - 1 < dev->max_qid) { in nvme_setup_io_queues()
2365 nr_io_queues = dev->online_queues - 1; in nvme_setup_io_queues()
2366 nvme_delete_io_queues(dev); in nvme_setup_io_queues()
2367 result = nvme_setup_io_queues_trylock(dev); in nvme_setup_io_queues()
2370 nvme_suspend_io_queues(dev); in nvme_setup_io_queues()
2373 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", in nvme_setup_io_queues()
2374 dev->io_queues[HCTX_TYPE_DEFAULT], in nvme_setup_io_queues()
2375 dev->io_queues[HCTX_TYPE_READ], in nvme_setup_io_queues()
2376 dev->io_queues[HCTX_TYPE_POLL]); in nvme_setup_io_queues()
2379 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2406 struct request_queue *q = nvmeq->dev->ctrl.admin_q; in nvme_delete_queue()
2429 static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode) in __nvme_delete_io_queues() argument
2431 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_delete_io_queues()
2437 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) in __nvme_delete_io_queues()
2443 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_delete_io_queues()
2457 static void nvme_delete_io_queues(struct nvme_dev *dev) in nvme_delete_io_queues() argument
2459 if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq)) in nvme_delete_io_queues()
2460 __nvme_delete_io_queues(dev, nvme_admin_delete_cq); in nvme_delete_io_queues()
2463 static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) in nvme_pci_nr_maps() argument
2465 if (dev->io_queues[HCTX_TYPE_POLL]) in nvme_pci_nr_maps()
2467 if (dev->io_queues[HCTX_TYPE_READ]) in nvme_pci_nr_maps()
2472 static void nvme_pci_update_nr_queues(struct nvme_dev *dev) in nvme_pci_update_nr_queues() argument
2474 if (!dev->ctrl.tagset) { in nvme_pci_update_nr_queues()
2475 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, in nvme_pci_update_nr_queues()
2476 nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); in nvme_pci_update_nr_queues()
2480 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); in nvme_pci_update_nr_queues()
2482 nvme_free_queues(dev, dev->online_queues); in nvme_pci_update_nr_queues()
2485 static int nvme_pci_enable(struct nvme_dev *dev) in nvme_pci_enable() argument
2488 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_enable()
2496 if (readl(dev->bar + NVME_REG_CSTS) == -1) { in nvme_pci_enable()
2506 if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) in nvme_pci_enable()
2512 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); in nvme_pci_enable()
2514 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, in nvme_pci_enable()
2516 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); in nvme_pci_enable()
2517 dev->dbs = dev->bar + 4096; in nvme_pci_enable()
2524 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) in nvme_pci_enable()
2525 dev->io_sqes = 7; in nvme_pci_enable()
2527 dev->io_sqes = NVME_NVM_IOSQES; in nvme_pci_enable()
2534 dev->q_depth = 2; in nvme_pci_enable()
2535 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " in nvme_pci_enable()
2537 dev->q_depth); in nvme_pci_enable()
2540 NVME_CAP_MQES(dev->ctrl.cap) == 0) { in nvme_pci_enable()
2541 dev->q_depth = 64; in nvme_pci_enable()
2542 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " in nvme_pci_enable()
2543 "set queue depth=%u\n", dev->q_depth); in nvme_pci_enable()
2550 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && in nvme_pci_enable()
2551 (dev->q_depth < (NVME_AQ_DEPTH + 2))) { in nvme_pci_enable()
2552 dev->q_depth = NVME_AQ_DEPTH + 2; in nvme_pci_enable()
2553 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", in nvme_pci_enable()
2554 dev->q_depth); in nvme_pci_enable()
2556 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
2558 nvme_map_cmb(dev); in nvme_pci_enable()
2562 result = nvme_pci_configure_admin_queue(dev); in nvme_pci_enable()
2574 static void nvme_dev_unmap(struct nvme_dev *dev) in nvme_dev_unmap() argument
2576 if (dev->bar) in nvme_dev_unmap()
2577 iounmap(dev->bar); in nvme_dev_unmap()
2578 pci_release_mem_regions(to_pci_dev(dev->dev)); in nvme_dev_unmap()
2581 static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev) in nvme_pci_ctrl_is_dead() argument
2583 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_ctrl_is_dead()
2591 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_pci_ctrl_is_dead()
2595 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) in nvme_dev_disable() argument
2597 enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl); in nvme_dev_disable()
2598 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_disable()
2601 mutex_lock(&dev->shutdown_lock); in nvme_dev_disable()
2602 dead = nvme_pci_ctrl_is_dead(dev); in nvme_dev_disable()
2605 nvme_start_freeze(&dev->ctrl); in nvme_dev_disable()
2611 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); in nvme_dev_disable()
2614 nvme_quiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
2616 if (!dead && dev->ctrl.queue_count > 0) { in nvme_dev_disable()
2617 nvme_delete_io_queues(dev); in nvme_dev_disable()
2618 nvme_disable_ctrl(&dev->ctrl, shutdown); in nvme_dev_disable()
2619 nvme_poll_irqdisable(&dev->queues[0]); in nvme_dev_disable()
2621 nvme_suspend_io_queues(dev); in nvme_dev_disable()
2622 nvme_suspend_queue(dev, 0); in nvme_dev_disable()
2626 nvme_reap_pending_cqes(dev); in nvme_dev_disable()
2628 nvme_cancel_tagset(&dev->ctrl); in nvme_dev_disable()
2629 nvme_cancel_admin_tagset(&dev->ctrl); in nvme_dev_disable()
2637 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
2638 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) in nvme_dev_disable()
2639 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_disable()
2641 mutex_unlock(&dev->shutdown_lock); in nvme_dev_disable()
2644 static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) in nvme_disable_prepare_reset() argument
2646 if (!nvme_wait_reset(&dev->ctrl)) in nvme_disable_prepare_reset()
2648 nvme_dev_disable(dev, shutdown); in nvme_disable_prepare_reset()
2652 static int nvme_setup_prp_pools(struct nvme_dev *dev) in nvme_setup_prp_pools() argument
2654 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, in nvme_setup_prp_pools()
2657 if (!dev->prp_page_pool) in nvme_setup_prp_pools()
2661 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, in nvme_setup_prp_pools()
2663 if (!dev->prp_small_pool) { in nvme_setup_prp_pools()
2664 dma_pool_destroy(dev->prp_page_pool); in nvme_setup_prp_pools()
2670 static void nvme_release_prp_pools(struct nvme_dev *dev) in nvme_release_prp_pools() argument
2672 dma_pool_destroy(dev->prp_page_pool); in nvme_release_prp_pools()
2673 dma_pool_destroy(dev->prp_small_pool); in nvme_release_prp_pools()
2676 static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) in nvme_pci_alloc_iod_mempool() argument
2680 dev->iod_mempool = mempool_create_node(1, in nvme_pci_alloc_iod_mempool()
2683 dev_to_node(dev->dev)); in nvme_pci_alloc_iod_mempool()
2684 if (!dev->iod_mempool) in nvme_pci_alloc_iod_mempool()
2689 static void nvme_free_tagset(struct nvme_dev *dev) in nvme_free_tagset() argument
2691 if (dev->tagset.tags) in nvme_free_tagset()
2692 nvme_remove_io_tag_set(&dev->ctrl); in nvme_free_tagset()
2693 dev->ctrl.tagset = NULL; in nvme_free_tagset()
2699 struct nvme_dev *dev = to_nvme_dev(ctrl); in nvme_pci_free_ctrl() local
2701 nvme_free_tagset(dev); in nvme_pci_free_ctrl()
2702 put_device(dev->dev); in nvme_pci_free_ctrl()
2703 kfree(dev->queues); in nvme_pci_free_ctrl()
2704 kfree(dev); in nvme_pci_free_ctrl()
2709 struct nvme_dev *dev = in nvme_reset_work() local
2711 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); in nvme_reset_work()
2714 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) { in nvme_reset_work()
2715 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", in nvme_reset_work()
2716 dev->ctrl.state); in nvme_reset_work()
2725 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) in nvme_reset_work()
2726 nvme_dev_disable(dev, false); in nvme_reset_work()
2727 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
2729 mutex_lock(&dev->shutdown_lock); in nvme_reset_work()
2730 result = nvme_pci_enable(dev); in nvme_reset_work()
2733 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_reset_work()
2734 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
2740 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_work()
2741 dev_warn(dev->ctrl.device, in nvme_reset_work()
2747 result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); in nvme_reset_work()
2751 nvme_dbbuf_dma_alloc(dev); in nvme_reset_work()
2753 result = nvme_setup_host_mem(dev); in nvme_reset_work()
2757 result = nvme_setup_io_queues(dev); in nvme_reset_work()
2766 if (dev->online_queues > 1) { in nvme_reset_work()
2767 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2768 nvme_wait_freeze(&dev->ctrl); in nvme_reset_work()
2769 nvme_pci_update_nr_queues(dev); in nvme_reset_work()
2770 nvme_dbbuf_set(dev); in nvme_reset_work()
2771 nvme_unfreeze(&dev->ctrl); in nvme_reset_work()
2773 dev_warn(dev->ctrl.device, "IO queues lost\n"); in nvme_reset_work()
2774 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
2775 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2776 nvme_remove_namespaces(&dev->ctrl); in nvme_reset_work()
2777 nvme_free_tagset(dev); in nvme_reset_work()
2784 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_reset_work()
2785 dev_warn(dev->ctrl.device, in nvme_reset_work()
2791 nvme_start_ctrl(&dev->ctrl); in nvme_reset_work()
2795 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
2801 dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", in nvme_reset_work()
2803 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_reset_work()
2804 nvme_dev_disable(dev, true); in nvme_reset_work()
2805 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
2806 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
2807 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2808 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_reset_work()
2831 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_get_address()
2833 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); in nvme_pci_get_address()
2838 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_print_device_info()
2852 struct nvme_dev *dev = to_nvme_dev(ctrl); in nvme_pci_supports_pci_p2pdma() local
2854 return dma_pci_p2pdma_supported(dev->dev); in nvme_pci_supports_pci_p2pdma()
2872 static int nvme_dev_map(struct nvme_dev *dev) in nvme_dev_map() argument
2874 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_map()
2879 if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) in nvme_dev_map()
2965 int node = dev_to_node(&pdev->dev); in nvme_pci_alloc_dev()
2966 struct nvme_dev *dev; in nvme_pci_alloc_dev() local
2969 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); in nvme_pci_alloc_dev()
2970 if (!dev) in nvme_pci_alloc_dev()
2972 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); in nvme_pci_alloc_dev()
2973 mutex_init(&dev->shutdown_lock); in nvme_pci_alloc_dev()
2975 dev->nr_write_queues = write_queues; in nvme_pci_alloc_dev()
2976 dev->nr_poll_queues = poll_queues; in nvme_pci_alloc_dev()
2977 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; in nvme_pci_alloc_dev()
2978 dev->queues = kcalloc_node(dev->nr_allocated_queues, in nvme_pci_alloc_dev()
2980 if (!dev->queues) in nvme_pci_alloc_dev()
2983 dev->dev = get_device(&pdev->dev); in nvme_pci_alloc_dev()
2988 acpi_storage_d3(&pdev->dev)) { in nvme_pci_alloc_dev()
2993 dev_info(&pdev->dev, in nvme_pci_alloc_dev()
2997 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, in nvme_pci_alloc_dev()
3002 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) in nvme_pci_alloc_dev()
3003 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); in nvme_pci_alloc_dev()
3005 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in nvme_pci_alloc_dev()
3006 dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_alloc_dev()
3007 dma_set_max_seg_size(&pdev->dev, 0xffffffff); in nvme_pci_alloc_dev()
3013 dev->ctrl.max_hw_sectors = min_t(u32, in nvme_pci_alloc_dev()
3014 NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); in nvme_pci_alloc_dev()
3015 dev->ctrl.max_segments = NVME_MAX_SEGS; in nvme_pci_alloc_dev()
3021 dev->ctrl.max_integrity_segments = 1; in nvme_pci_alloc_dev()
3022 return dev; in nvme_pci_alloc_dev()
3025 put_device(dev->dev); in nvme_pci_alloc_dev()
3026 kfree(dev->queues); in nvme_pci_alloc_dev()
3028 kfree(dev); in nvme_pci_alloc_dev()
3034 struct nvme_dev *dev; in nvme_probe() local
3037 dev = nvme_pci_alloc_dev(pdev, id); in nvme_probe()
3038 if (IS_ERR(dev)) in nvme_probe()
3039 return PTR_ERR(dev); in nvme_probe()
3041 result = nvme_dev_map(dev); in nvme_probe()
3045 result = nvme_setup_prp_pools(dev); in nvme_probe()
3049 result = nvme_pci_alloc_iod_mempool(dev); in nvme_probe()
3053 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); in nvme_probe()
3055 result = nvme_pci_enable(dev); in nvme_probe()
3059 result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, in nvme_probe()
3068 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_probe()
3069 dev_warn(dev->ctrl.device, in nvme_probe()
3075 result = nvme_init_ctrl_finish(&dev->ctrl, false); in nvme_probe()
3079 nvme_dbbuf_dma_alloc(dev); in nvme_probe()
3081 result = nvme_setup_host_mem(dev); in nvme_probe()
3085 result = nvme_setup_io_queues(dev); in nvme_probe()
3089 if (dev->online_queues > 1) { in nvme_probe()
3090 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, in nvme_probe()
3091 nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); in nvme_probe()
3092 nvme_dbbuf_set(dev); in nvme_probe()
3095 if (!dev->ctrl.tagset) in nvme_probe()
3096 dev_warn(dev->ctrl.device, "IO queues not created\n"); in nvme_probe()
3098 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_probe()
3099 dev_warn(dev->ctrl.device, in nvme_probe()
3105 pci_set_drvdata(pdev, dev); in nvme_probe()
3107 nvme_start_ctrl(&dev->ctrl); in nvme_probe()
3108 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3109 flush_work(&dev->ctrl.scan_work); in nvme_probe()
3113 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_probe()
3114 nvme_dev_disable(dev, true); in nvme_probe()
3115 nvme_free_host_mem(dev); in nvme_probe()
3116 nvme_dev_remove_admin(dev); in nvme_probe()
3117 nvme_dbbuf_dma_free(dev); in nvme_probe()
3118 nvme_free_queues(dev, 0); in nvme_probe()
3120 mempool_destroy(dev->iod_mempool); in nvme_probe()
3122 nvme_release_prp_pools(dev); in nvme_probe()
3124 nvme_dev_unmap(dev); in nvme_probe()
3126 nvme_uninit_ctrl(&dev->ctrl); in nvme_probe()
3127 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3133 struct nvme_dev *dev = pci_get_drvdata(pdev); in nvme_reset_prepare() local
3140 nvme_disable_prepare_reset(dev, false); in nvme_reset_prepare()
3141 nvme_sync_queues(&dev->ctrl); in nvme_reset_prepare()
3146 struct nvme_dev *dev = pci_get_drvdata(pdev); in nvme_reset_done() local
3148 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_reset_done()
3149 flush_work(&dev->ctrl.reset_work); in nvme_reset_done()
3154 struct nvme_dev *dev = pci_get_drvdata(pdev); in nvme_shutdown() local
3156 nvme_disable_prepare_reset(dev, true); in nvme_shutdown()
3166 struct nvme_dev *dev = pci_get_drvdata(pdev); in nvme_remove() local
3168 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_remove()
3172 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_remove()
3173 nvme_dev_disable(dev, true); in nvme_remove()
3176 flush_work(&dev->ctrl.reset_work); in nvme_remove()
3177 nvme_stop_ctrl(&dev->ctrl); in nvme_remove()
3178 nvme_remove_namespaces(&dev->ctrl); in nvme_remove()
3179 nvme_dev_disable(dev, true); in nvme_remove()
3180 nvme_free_host_mem(dev); in nvme_remove()
3181 nvme_dev_remove_admin(dev); in nvme_remove()
3182 nvme_dbbuf_dma_free(dev); in nvme_remove()
3183 nvme_free_queues(dev, 0); in nvme_remove()
3184 mempool_destroy(dev->iod_mempool); in nvme_remove()
3185 nvme_release_prp_pools(dev); in nvme_remove()
3186 nvme_dev_unmap(dev); in nvme_remove()
3187 nvme_uninit_ctrl(&dev->ctrl); in nvme_remove()
3201 static int nvme_resume(struct device *dev) in nvme_resume() argument
3203 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); in nvme_resume()
3217 static int nvme_suspend(struct device *dev) in nvme_suspend() argument
3219 struct pci_dev *pdev = to_pci_dev(dev); in nvme_suspend()
3293 static int nvme_simple_suspend(struct device *dev) in nvme_simple_suspend() argument
3295 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); in nvme_simple_suspend()
3300 static int nvme_simple_resume(struct device *dev) in nvme_simple_resume() argument
3302 struct pci_dev *pdev = to_pci_dev(dev); in nvme_simple_resume()
3321 struct nvme_dev *dev = pci_get_drvdata(pdev); in nvme_error_detected() local
3332 dev_warn(dev->ctrl.device, in nvme_error_detected()
3334 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { in nvme_error_detected()
3335 nvme_dev_disable(dev, true); in nvme_error_detected()
3338 nvme_dev_disable(dev, false); in nvme_error_detected()
3341 dev_warn(dev->ctrl.device, in nvme_error_detected()
3350 struct nvme_dev *dev = pci_get_drvdata(pdev); in nvme_slot_reset() local
3352 dev_info(dev->ctrl.device, "restart after slot reset\n"); in nvme_slot_reset()
3354 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_slot_reset()
3355 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_slot_reset()
3361 struct nvme_dev *dev = pci_get_drvdata(pdev); in nvme_error_resume() local
3363 flush_work(&dev->ctrl.reset_work); in nvme_error_resume()