xref: /openbmc/linux/drivers/nvme/host/nvme.h (revision 106198ed)
157dacad5SJay Sternberg /*
257dacad5SJay Sternberg  * Copyright (c) 2011-2014, Intel Corporation.
357dacad5SJay Sternberg  *
457dacad5SJay Sternberg  * This program is free software; you can redistribute it and/or modify it
557dacad5SJay Sternberg  * under the terms and conditions of the GNU General Public License,
657dacad5SJay Sternberg  * version 2, as published by the Free Software Foundation.
757dacad5SJay Sternberg  *
857dacad5SJay Sternberg  * This program is distributed in the hope it will be useful, but WITHOUT
957dacad5SJay Sternberg  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1057dacad5SJay Sternberg  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
1157dacad5SJay Sternberg  * more details.
1257dacad5SJay Sternberg  */
1357dacad5SJay Sternberg 
1457dacad5SJay Sternberg #ifndef _NVME_H
1557dacad5SJay Sternberg #define _NVME_H
1657dacad5SJay Sternberg 
1757dacad5SJay Sternberg #include <linux/nvme.h>
1857dacad5SJay Sternberg #include <linux/pci.h>
1957dacad5SJay Sternberg #include <linux/kref.h>
2057dacad5SJay Sternberg #include <linux/blk-mq.h>
2157dacad5SJay Sternberg 
221673f1f0SChristoph Hellwig struct nvme_passthru_cmd;
231673f1f0SChristoph Hellwig 
2457dacad5SJay Sternberg extern unsigned char nvme_io_timeout;
2557dacad5SJay Sternberg #define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)
2657dacad5SJay Sternberg 
2721d34711SChristoph Hellwig extern unsigned char admin_timeout;
2821d34711SChristoph Hellwig #define ADMIN_TIMEOUT	(admin_timeout * HZ)
2921d34711SChristoph Hellwig 
30ca064085SMatias Bjørling enum {
31ca064085SMatias Bjørling 	NVME_NS_LBA		= 0,
32ca064085SMatias Bjørling 	NVME_NS_LIGHTNVM	= 1,
33ca064085SMatias Bjørling };
34ca064085SMatias Bjørling 
35*106198edSChristoph Hellwig /*
36*106198edSChristoph Hellwig  * List of workarounds for devices that required behavior not specified in
37*106198edSChristoph Hellwig  * the standard.
38*106198edSChristoph Hellwig  */
39*106198edSChristoph Hellwig enum nvme_quirks {
40*106198edSChristoph Hellwig 	/*
41*106198edSChristoph Hellwig 	 * Prefers I/O aligned to a stripe size specified in a vendor
42*106198edSChristoph Hellwig 	 * specific Identify field.
43*106198edSChristoph Hellwig 	 */
44*106198edSChristoph Hellwig 	NVME_QUIRK_STRIPE_SIZE			= (1 << 0),
45*106198edSChristoph Hellwig };
46*106198edSChristoph Hellwig 
471c63dc66SChristoph Hellwig struct nvme_ctrl {
481c63dc66SChristoph Hellwig 	const struct nvme_ctrl_ops *ops;
4957dacad5SJay Sternberg 	struct request_queue *admin_q;
5057dacad5SJay Sternberg 	struct device *dev;
511673f1f0SChristoph Hellwig 	struct kref kref;
5257dacad5SJay Sternberg 	int instance;
531c63dc66SChristoph Hellwig 
5457dacad5SJay Sternberg 	char name[12];
5557dacad5SJay Sternberg 	char serial[20];
5657dacad5SJay Sternberg 	char model[40];
5757dacad5SJay Sternberg 	char firmware_rev[8];
5857dacad5SJay Sternberg 	u16 oncs;
5957dacad5SJay Sternberg 	u16 abort_limit;
6057dacad5SJay Sternberg 	u8 event_limit;
6157dacad5SJay Sternberg 	u8 vwc;
62*106198edSChristoph Hellwig 	unsigned long quirks;
6357dacad5SJay Sternberg };
6457dacad5SJay Sternberg 
6557dacad5SJay Sternberg /*
6657dacad5SJay Sternberg  * An NVM Express namespace is equivalent to a SCSI LUN
6757dacad5SJay Sternberg  */
6857dacad5SJay Sternberg struct nvme_ns {
6957dacad5SJay Sternberg 	struct list_head list;
7057dacad5SJay Sternberg 
711c63dc66SChristoph Hellwig 	struct nvme_ctrl *ctrl;
7257dacad5SJay Sternberg 	struct request_queue *queue;
7357dacad5SJay Sternberg 	struct gendisk *disk;
7457dacad5SJay Sternberg 	struct kref kref;
7557dacad5SJay Sternberg 
7657dacad5SJay Sternberg 	unsigned ns_id;
7757dacad5SJay Sternberg 	int lba_shift;
7857dacad5SJay Sternberg 	u16 ms;
7957dacad5SJay Sternberg 	bool ext;
8057dacad5SJay Sternberg 	u8 pi_type;
81ca064085SMatias Bjørling 	int type;
8257dacad5SJay Sternberg 	u64 mode_select_num_blocks;
8357dacad5SJay Sternberg 	u32 mode_select_block_len;
8457dacad5SJay Sternberg };
8557dacad5SJay Sternberg 
861c63dc66SChristoph Hellwig struct nvme_ctrl_ops {
871c63dc66SChristoph Hellwig 	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
881673f1f0SChristoph Hellwig 	void (*free_ctrl)(struct nvme_ctrl *ctrl);
891c63dc66SChristoph Hellwig };
901c63dc66SChristoph Hellwig 
911c63dc66SChristoph Hellwig static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
921c63dc66SChristoph Hellwig {
931c63dc66SChristoph Hellwig 	u32 val = 0;
941c63dc66SChristoph Hellwig 
951c63dc66SChristoph Hellwig 	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
961c63dc66SChristoph Hellwig 		return false;
971c63dc66SChristoph Hellwig 	return val & NVME_CSTS_RDY;
981c63dc66SChristoph Hellwig }
991c63dc66SChristoph Hellwig 
10057dacad5SJay Sternberg static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
10157dacad5SJay Sternberg {
10257dacad5SJay Sternberg 	return (sector >> (ns->lba_shift - 9));
10357dacad5SJay Sternberg }
10457dacad5SJay Sternberg 
10522944e99SChristoph Hellwig static inline void nvme_setup_flush(struct nvme_ns *ns,
10622944e99SChristoph Hellwig 		struct nvme_command *cmnd)
10722944e99SChristoph Hellwig {
10822944e99SChristoph Hellwig 	memset(cmnd, 0, sizeof(*cmnd));
10922944e99SChristoph Hellwig 	cmnd->common.opcode = nvme_cmd_flush;
11022944e99SChristoph Hellwig 	cmnd->common.nsid = cpu_to_le32(ns->ns_id);
11122944e99SChristoph Hellwig }
11222944e99SChristoph Hellwig 
11322944e99SChristoph Hellwig static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
11422944e99SChristoph Hellwig 		struct nvme_command *cmnd)
11522944e99SChristoph Hellwig {
11622944e99SChristoph Hellwig 	u16 control = 0;
11722944e99SChristoph Hellwig 	u32 dsmgmt = 0;
11822944e99SChristoph Hellwig 
11922944e99SChristoph Hellwig 	if (req->cmd_flags & REQ_FUA)
12022944e99SChristoph Hellwig 		control |= NVME_RW_FUA;
12122944e99SChristoph Hellwig 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
12222944e99SChristoph Hellwig 		control |= NVME_RW_LR;
12322944e99SChristoph Hellwig 
12422944e99SChristoph Hellwig 	if (req->cmd_flags & REQ_RAHEAD)
12522944e99SChristoph Hellwig 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
12622944e99SChristoph Hellwig 
12722944e99SChristoph Hellwig 	memset(cmnd, 0, sizeof(*cmnd));
12822944e99SChristoph Hellwig 	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
12922944e99SChristoph Hellwig 	cmnd->rw.command_id = req->tag;
13022944e99SChristoph Hellwig 	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
13122944e99SChristoph Hellwig 	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
13222944e99SChristoph Hellwig 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
13322944e99SChristoph Hellwig 
13422944e99SChristoph Hellwig 	if (ns->ms) {
13522944e99SChristoph Hellwig 		switch (ns->pi_type) {
13622944e99SChristoph Hellwig 		case NVME_NS_DPS_PI_TYPE3:
13722944e99SChristoph Hellwig 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
13822944e99SChristoph Hellwig 			break;
13922944e99SChristoph Hellwig 		case NVME_NS_DPS_PI_TYPE1:
14022944e99SChristoph Hellwig 		case NVME_NS_DPS_PI_TYPE2:
14122944e99SChristoph Hellwig 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
14222944e99SChristoph Hellwig 					NVME_RW_PRINFO_PRCHK_REF;
14322944e99SChristoph Hellwig 			cmnd->rw.reftag = cpu_to_le32(
14422944e99SChristoph Hellwig 					nvme_block_nr(ns, blk_rq_pos(req)));
14522944e99SChristoph Hellwig 			break;
14622944e99SChristoph Hellwig 		}
14722944e99SChristoph Hellwig 		if (!blk_integrity_rq(req))
14822944e99SChristoph Hellwig 			control |= NVME_RW_PRINFO_PRACT;
14922944e99SChristoph Hellwig 	}
15022944e99SChristoph Hellwig 
15122944e99SChristoph Hellwig 	cmnd->rw.control = cpu_to_le16(control);
15222944e99SChristoph Hellwig 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
15322944e99SChristoph Hellwig }
15422944e99SChristoph Hellwig 
15522944e99SChristoph Hellwig 
15615a190f7SChristoph Hellwig static inline int nvme_error_status(u16 status)
15715a190f7SChristoph Hellwig {
15815a190f7SChristoph Hellwig 	switch (status & 0x7ff) {
15915a190f7SChristoph Hellwig 	case NVME_SC_SUCCESS:
16015a190f7SChristoph Hellwig 		return 0;
16115a190f7SChristoph Hellwig 	case NVME_SC_CAP_EXCEEDED:
16215a190f7SChristoph Hellwig 		return -ENOSPC;
16315a190f7SChristoph Hellwig 	default:
16415a190f7SChristoph Hellwig 		return -EIO;
16515a190f7SChristoph Hellwig 	}
16615a190f7SChristoph Hellwig }
16715a190f7SChristoph Hellwig 
1681673f1f0SChristoph Hellwig void nvme_put_ctrl(struct nvme_ctrl *ctrl);
1691673f1f0SChristoph Hellwig void nvme_put_ns(struct nvme_ns *ns);
1701673f1f0SChristoph Hellwig 
1714160982eSChristoph Hellwig struct request *nvme_alloc_request(struct request_queue *q,
1724160982eSChristoph Hellwig 		struct nvme_command *cmd, unsigned int flags);
17357dacad5SJay Sternberg int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
17457dacad5SJay Sternberg 		void *buf, unsigned bufflen);
17557dacad5SJay Sternberg int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1764160982eSChristoph Hellwig 		void *buffer, unsigned bufflen,  u32 *result, unsigned timeout);
1774160982eSChristoph Hellwig int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
1784160982eSChristoph Hellwig 		void __user *ubuffer, unsigned bufflen, u32 *result,
1794160982eSChristoph Hellwig 		unsigned timeout);
1800b7f1f26SKeith Busch int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
1810b7f1f26SKeith Busch 		void __user *ubuffer, unsigned bufflen,
1820b7f1f26SKeith Busch 		void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
1830b7f1f26SKeith Busch 		u32 *result, unsigned timeout);
1841c63dc66SChristoph Hellwig int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
1851c63dc66SChristoph Hellwig int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
18657dacad5SJay Sternberg 		struct nvme_id_ns **id);
1871c63dc66SChristoph Hellwig int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
1881c63dc66SChristoph Hellwig int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
18957dacad5SJay Sternberg 			dma_addr_t dma_addr, u32 *result);
1901c63dc66SChristoph Hellwig int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
19157dacad5SJay Sternberg 			dma_addr_t dma_addr, u32 *result);
19257dacad5SJay Sternberg 
1931673f1f0SChristoph Hellwig extern const struct block_device_operations nvme_fops;
1941673f1f0SChristoph Hellwig extern spinlock_t dev_list_lock;
1951673f1f0SChristoph Hellwig 
1961673f1f0SChristoph Hellwig int nvme_revalidate_disk(struct gendisk *disk);
1971673f1f0SChristoph Hellwig int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1981673f1f0SChristoph Hellwig 			struct nvme_passthru_cmd __user *ucmd);
1991673f1f0SChristoph Hellwig 
20057dacad5SJay Sternberg struct sg_io_hdr;
20157dacad5SJay Sternberg 
20257dacad5SJay Sternberg int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
20357dacad5SJay Sternberg int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
20457dacad5SJay Sternberg int nvme_sg_get_version_num(int __user *ip);
20557dacad5SJay Sternberg 
206ca064085SMatias Bjørling int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
207ca064085SMatias Bjørling int nvme_nvm_register(struct request_queue *q, char *disk_name);
208ca064085SMatias Bjørling void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
209ca064085SMatias Bjørling 
21057dacad5SJay Sternberg #endif /* _NVME_H */
211