xref: /openbmc/linux/drivers/ufs/core/ufs_bsg.c (revision 7f1005dd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bsg endpoint that supports UPIUs
4  *
5  * Copyright (C) 2018 Western Digital Corporation
6  */
7 
8 #include <linux/bsg-lib.h>
9 #include <linux/dma-mapping.h>
10 #include <scsi/scsi.h>
11 #include <scsi/scsi_host.h>
12 #include "ufs_bsg.h"
13 #include <ufs/ufshcd.h>
14 #include "ufshcd-priv.h"
15 
16 static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
17 				       struct utp_upiu_query *qr)
18 {
19 	int desc_size = be16_to_cpu(qr->length);
20 
21 	if (desc_size <= 0)
22 		return -EINVAL;
23 
24 	*desc_len = min_t(int, QUERY_DESC_MAX_SIZE, desc_size);
25 
26 	return 0;
27 }
28 
29 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job,
30 				     uint8_t **desc_buff, int *desc_len,
31 				     enum query_opcode desc_op)
32 {
33 	struct ufs_bsg_request *bsg_request = job->request;
34 	struct utp_upiu_query *qr;
35 	u8 *descp;
36 
37 	if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC &&
38 	    desc_op != UPIU_QUERY_OPCODE_READ_DESC)
39 		goto out;
40 
41 	qr = &bsg_request->upiu_req.qr;
42 	if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
43 		dev_err(hba->dev, "Illegal desc size\n");
44 		return -EINVAL;
45 	}
46 
47 	if (*desc_len > job->request_payload.payload_len) {
48 		dev_err(hba->dev, "Illegal desc size\n");
49 		return -EINVAL;
50 	}
51 
52 	descp = kzalloc(*desc_len, GFP_KERNEL);
53 	if (!descp)
54 		return -ENOMEM;
55 
56 	if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
57 		sg_copy_to_buffer(job->request_payload.sg_list,
58 				  job->request_payload.sg_cnt, descp,
59 				  *desc_len);
60 
61 	*desc_buff = descp;
62 
63 out:
64 	return 0;
65 }
66 
67 static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job)
68 {
69 	struct ufs_rpmb_request *rpmb_request = job->request;
70 	struct ufs_rpmb_reply *rpmb_reply = job->reply;
71 	struct bsg_buffer *payload = NULL;
72 	enum dma_data_direction dir;
73 	struct scatterlist *sg_list = NULL;
74 	int rpmb_req_type;
75 	int sg_cnt = 0;
76 	int ret;
77 	int data_len;
78 
79 	if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en)
80 		return -EINVAL;
81 
82 	if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
83 		return -EINVAL;
84 
85 	rpmb_req_type = be16_to_cpu(rpmb_request->ehs_req.meta.req_resp_type);
86 
87 	switch (rpmb_req_type) {
88 	case UFS_RPMB_WRITE_KEY:
89 	case UFS_RPMB_READ_CNT:
90 	case UFS_RPMB_PURGE_ENABLE:
91 		dir = DMA_NONE;
92 		break;
93 	case UFS_RPMB_WRITE:
94 	case UFS_RPMB_SEC_CONF_WRITE:
95 		dir = DMA_TO_DEVICE;
96 		break;
97 	case UFS_RPMB_READ:
98 	case UFS_RPMB_SEC_CONF_READ:
99 	case UFS_RPMB_PURGE_STATUS_READ:
100 		dir = DMA_FROM_DEVICE;
101 		break;
102 	default:
103 		return -EINVAL;
104 	}
105 
106 	if (dir != DMA_NONE) {
107 		payload = &job->request_payload;
108 		if (!payload || !payload->payload_len || !payload->sg_cnt)
109 			return -EINVAL;
110 
111 		sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
112 		if (unlikely(!sg_cnt))
113 			return -ENOMEM;
114 		sg_list = payload->sg_list;
115 		data_len = payload->payload_len;
116 	}
117 
118 	ret = ufshcd_advanced_rpmb_req_handler(hba, &rpmb_request->bsg_request.upiu_req,
119 				   &rpmb_reply->bsg_reply.upiu_rsp, &rpmb_request->ehs_req,
120 				   &rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir);
121 
122 	if (dir != DMA_NONE) {
123 		dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
124 
125 		if (!ret)
126 			rpmb_reply->bsg_reply.reply_payload_rcv_len = data_len;
127 	}
128 
129 	return ret;
130 }
131 
132 static int ufs_bsg_request(struct bsg_job *job)
133 {
134 	struct ufs_bsg_request *bsg_request = job->request;
135 	struct ufs_bsg_reply *bsg_reply = job->reply;
136 	struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
137 	struct uic_command uc = {};
138 	int msgcode;
139 	uint8_t *buff = NULL;
140 	int desc_len = 0;
141 	enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
142 	int ret;
143 	bool rpmb = false;
144 
145 	bsg_reply->reply_payload_rcv_len = 0;
146 
147 	ufshcd_rpm_get_sync(hba);
148 
149 	msgcode = bsg_request->msgcode;
150 	switch (msgcode) {
151 	case UPIU_TRANSACTION_QUERY_REQ:
152 		desc_op = bsg_request->upiu_req.qr.opcode;
153 		ret = ufs_bsg_alloc_desc_buffer(hba, job, &buff, &desc_len, desc_op);
154 		if (ret)
155 			goto out;
156 		fallthrough;
157 	case UPIU_TRANSACTION_NOP_OUT:
158 	case UPIU_TRANSACTION_TASK_REQ:
159 		ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
160 					       &bsg_reply->upiu_rsp, msgcode,
161 					       buff, &desc_len, desc_op);
162 		if (ret)
163 			dev_err(hba->dev, "exe raw upiu: error code %d\n", ret);
164 		else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) {
165 			bsg_reply->reply_payload_rcv_len =
166 				sg_copy_from_buffer(job->request_payload.sg_list,
167 						    job->request_payload.sg_cnt,
168 						    buff, desc_len);
169 		}
170 		break;
171 	case UPIU_TRANSACTION_UIC_CMD:
172 		memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
173 		ret = ufshcd_send_uic_cmd(hba, &uc);
174 		if (ret)
175 			dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
176 
177 		memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
178 
179 		break;
180 	case UPIU_TRANSACTION_ARPMB_CMD:
181 		rpmb = true;
182 		ret = ufs_bsg_exec_advanced_rpmb_req(hba, job);
183 		if (ret)
184 			dev_err(hba->dev, "ARPMB OP failed: error code  %d\n", ret);
185 		break;
186 	default:
187 		ret = -ENOTSUPP;
188 		dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
189 
190 		break;
191 	}
192 
193 out:
194 	ufshcd_rpm_put_sync(hba);
195 	kfree(buff);
196 	bsg_reply->result = ret;
197 	job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
198 	/* complete the job here only if no error */
199 	if (ret == 0)
200 		bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
201 
202 	return ret;
203 }
204 
205 /**
206  * ufs_bsg_remove - detach and remove the added ufs-bsg node
207  * @hba: per adapter object
208  *
209  * Should be called when unloading the driver.
210  */
211 void ufs_bsg_remove(struct ufs_hba *hba)
212 {
213 	struct device *bsg_dev = &hba->bsg_dev;
214 
215 	if (!hba->bsg_queue)
216 		return;
217 
218 	bsg_remove_queue(hba->bsg_queue);
219 
220 	device_del(bsg_dev);
221 	put_device(bsg_dev);
222 }
223 
224 static inline void ufs_bsg_node_release(struct device *dev)
225 {
226 	put_device(dev->parent);
227 }
228 
229 /**
230  * ufs_bsg_probe - Add ufs bsg device node
231  * @hba: per adapter object
232  *
233  * Called during initial loading of the driver, and before scsi_scan_host.
234  *
235  * Returns: 0 (success).
236  */
237 int ufs_bsg_probe(struct ufs_hba *hba)
238 {
239 	struct device *bsg_dev = &hba->bsg_dev;
240 	struct Scsi_Host *shost = hba->host;
241 	struct device *parent = &shost->shost_gendev;
242 	struct request_queue *q;
243 	int ret;
244 
245 	device_initialize(bsg_dev);
246 
247 	bsg_dev->parent = get_device(parent);
248 	bsg_dev->release = ufs_bsg_node_release;
249 
250 	dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
251 
252 	ret = device_add(bsg_dev);
253 	if (ret)
254 		goto out;
255 
256 	q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
257 	if (IS_ERR(q)) {
258 		ret = PTR_ERR(q);
259 		goto out;
260 	}
261 
262 	hba->bsg_queue = q;
263 
264 	return 0;
265 
266 out:
267 	dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
268 	put_device(bsg_dev);
269 	return ret;
270 }
271