1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include "otx2_cpt_common.h"
5 #include "otx2_cptpf.h"
6 #include "rvu_reg.h"
7 
8 /*
9  * CPT PF driver version, It will be incremented by 1 for every feature
10  * addition in CPT mailbox messages.
11  */
12 #define OTX2_CPT_PF_DRV_VERSION 0x1
13 
14 static int forward_to_af(struct otx2_cptpf_dev *cptpf,
15 			 struct otx2_cptvf_info *vf,
16 			 struct mbox_msghdr *req, int size)
17 {
18 	struct mbox_msghdr *msg;
19 	int ret;
20 
21 	mutex_lock(&cptpf->lock);
22 	msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
23 	if (msg == NULL)
24 		return -ENOMEM;
25 
26 	memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
27 	       (uint8_t *)req + sizeof(struct mbox_msghdr), size);
28 	msg->id = req->id;
29 	msg->pcifunc = req->pcifunc;
30 	msg->sig = req->sig;
31 	msg->ver = req->ver;
32 
33 	ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
34 	/* Error code -EIO indicate there is a communication failure
35 	 * to the AF. Rest of the error codes indicate that AF processed
36 	 * VF messages and set the error codes in response messages
37 	 * (if any) so simply forward responses to VF.
38 	 */
39 	if (ret == -EIO) {
40 		dev_warn(&cptpf->pdev->dev,
41 			 "AF not responding to VF%d messages\n", vf->vf_id);
42 		mutex_unlock(&cptpf->lock);
43 		return ret;
44 	}
45 	mutex_unlock(&cptpf->lock);
46 	return 0;
47 }
48 
49 static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
50 			       struct otx2_cptvf_info *vf,
51 			       struct mbox_msghdr *req)
52 {
53 	struct otx2_cpt_caps_rsp *rsp;
54 
55 	rsp = (struct otx2_cpt_caps_rsp *)
56 	      otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
57 				  sizeof(*rsp));
58 	if (!rsp)
59 		return -ENOMEM;
60 
61 	rsp->hdr.id = MBOX_MSG_GET_CAPS;
62 	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
63 	rsp->hdr.pcifunc = req->pcifunc;
64 	rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
65 	rsp->cpt_revision = cptpf->pdev->revision;
66 	memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
67 
68 	return 0;
69 }
70 
71 static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
72 				      struct otx2_cptvf_info *vf,
73 				      struct mbox_msghdr *req)
74 {
75 	struct otx2_cpt_egrp_num_msg *grp_req;
76 	struct otx2_cpt_egrp_num_rsp *rsp;
77 
78 	grp_req = (struct otx2_cpt_egrp_num_msg *)req;
79 	rsp = (struct otx2_cpt_egrp_num_rsp *)
80 	       otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
81 	if (!rsp)
82 		return -ENOMEM;
83 
84 	rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
85 	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
86 	rsp->hdr.pcifunc = req->pcifunc;
87 	rsp->eng_type = grp_req->eng_type;
88 	rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
89 						grp_req->eng_type);
90 
91 	return 0;
92 }
93 
94 static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
95 				 struct otx2_cptvf_info *vf,
96 				 struct mbox_msghdr *req)
97 {
98 	struct otx2_cpt_kvf_limits_rsp *rsp;
99 
100 	rsp = (struct otx2_cpt_kvf_limits_rsp *)
101 	      otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
102 	if (!rsp)
103 		return -ENOMEM;
104 
105 	rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
106 	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
107 	rsp->hdr.pcifunc = req->pcifunc;
108 	rsp->kvf_limits = cptpf->kvf_limits;
109 
110 	return 0;
111 }
112 
113 static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
114 			       struct otx2_cptvf_info *vf,
115 			       struct mbox_msghdr *req, int size)
116 {
117 	int err = 0;
118 
119 	/* Check if msg is valid, if not reply with an invalid msg */
120 	if (req->sig != OTX2_MBOX_REQ_SIG)
121 		goto inval_msg;
122 
123 	switch (req->id) {
124 	case MBOX_MSG_GET_ENG_GRP_NUM:
125 		err = handle_msg_get_eng_grp_num(cptpf, vf, req);
126 		break;
127 	case MBOX_MSG_GET_CAPS:
128 		err = handle_msg_get_caps(cptpf, vf, req);
129 		break;
130 	case MBOX_MSG_GET_KVF_LIMITS:
131 		err = handle_msg_kvf_limits(cptpf, vf, req);
132 		break;
133 	default:
134 		err = forward_to_af(cptpf, vf, req, size);
135 		break;
136 	}
137 	return err;
138 
139 inval_msg:
140 	otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
141 	otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
142 	return err;
143 }
144 
145 irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
146 {
147 	struct otx2_cptpf_dev *cptpf = arg;
148 	struct otx2_cptvf_info *vf;
149 	int i, vf_idx;
150 	u64 intr;
151 
152 	/*
153 	 * Check which VF has raised an interrupt and schedule
154 	 * corresponding work queue to process the messages
155 	 */
156 	for (i = 0; i < 2; i++) {
157 		/* Read the interrupt bits */
158 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
159 				       RVU_PF_VFPF_MBOX_INTX(i));
160 
161 		for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
162 			vf = &cptpf->vf[vf_idx];
163 			if (intr & (1ULL << vf->intr_idx)) {
164 				queue_work(cptpf->vfpf_mbox_wq,
165 					   &vf->vfpf_mbox_work);
166 				/* Clear the interrupt */
167 				otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
168 						 0, RVU_PF_VFPF_MBOX_INTX(i),
169 						 BIT_ULL(vf->intr_idx));
170 			}
171 		}
172 	}
173 	return IRQ_HANDLED;
174 }
175 
176 void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
177 {
178 	struct otx2_cptpf_dev *cptpf;
179 	struct otx2_cptvf_info *vf;
180 	struct otx2_mbox_dev *mdev;
181 	struct mbox_hdr *req_hdr;
182 	struct mbox_msghdr *msg;
183 	struct otx2_mbox *mbox;
184 	int offset, i, err;
185 
186 	vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
187 	cptpf = vf->cptpf;
188 	mbox = &cptpf->vfpf_mbox;
189 	/* sync with mbox memory region */
190 	smp_rmb();
191 	mdev = &mbox->dev[vf->vf_id];
192 	/* Process received mbox messages */
193 	req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
194 	offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
195 
196 	for (i = 0; i < req_hdr->num_msgs; i++) {
197 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
198 
199 		/* Set which VF sent this message based on mbox IRQ */
200 		msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
201 				((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
202 
203 		err = cptpf_handle_vf_req(cptpf, vf, msg,
204 					  msg->next_msgoff - offset);
205 		/*
206 		 * Behave as the AF, drop the msg if there is
207 		 * no memory, timeout handling also goes here
208 		 */
209 		if (err == -ENOMEM || err == -EIO)
210 			break;
211 		offset = msg->next_msgoff;
212 		/* Write barrier required for VF responses which are handled by
213 		 * PF driver and not forwarded to AF.
214 		 */
215 		smp_wmb();
216 	}
217 	/* Send mbox responses to VF */
218 	if (mdev->num_msgs)
219 		otx2_mbox_msg_send(mbox, vf->vf_id);
220 }
221 
222 irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
223 {
224 	struct otx2_cptpf_dev *cptpf = arg;
225 	u64 intr;
226 
227 	/* Read the interrupt bits */
228 	intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
229 
230 	if (intr & 0x1ULL) {
231 		/* Schedule work queue function to process the MBOX request */
232 		queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
233 		/* Clear and ack the interrupt */
234 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
235 				 0x1ULL);
236 	}
237 	return IRQ_HANDLED;
238 }
239 
240 static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
241 				  struct mbox_msghdr *msg)
242 {
243 	struct device *dev = &cptpf->pdev->dev;
244 	struct cpt_rd_wr_reg_msg *rsp_rd_wr;
245 
246 	if (msg->id >= MBOX_MSG_MAX) {
247 		dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
248 		return;
249 	}
250 	if (msg->sig != OTX2_MBOX_RSP_SIG) {
251 		dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
252 			msg->sig, msg->id);
253 		return;
254 	}
255 
256 	switch (msg->id) {
257 	case MBOX_MSG_READY:
258 		cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
259 				RVU_PFVF_PF_MASK;
260 		break;
261 	case MBOX_MSG_CPT_RD_WR_REGISTER:
262 		rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
263 		if (msg->rc) {
264 			dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
265 				rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
266 				msg->rc);
267 			return;
268 		}
269 		if (!rsp_rd_wr->is_write)
270 			*rsp_rd_wr->ret_val = rsp_rd_wr->val;
271 		break;
272 	case MBOX_MSG_ATTACH_RESOURCES:
273 		if (!msg->rc)
274 			cptpf->lfs.are_lfs_attached = 1;
275 		break;
276 	case MBOX_MSG_DETACH_RESOURCES:
277 		if (!msg->rc)
278 			cptpf->lfs.are_lfs_attached = 0;
279 		break;
280 
281 	default:
282 		dev_err(dev,
283 			"Unsupported msg %d received.\n", msg->id);
284 		break;
285 	}
286 }
287 
288 static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
289 			  int vf_id, int size)
290 {
291 	struct otx2_mbox *vfpf_mbox;
292 	struct mbox_msghdr *fwd;
293 
294 	if (msg->id >= MBOX_MSG_MAX) {
295 		dev_err(&cptpf->pdev->dev,
296 			"MBOX msg with unknown ID %d\n", msg->id);
297 		return;
298 	}
299 	if (msg->sig != OTX2_MBOX_RSP_SIG) {
300 		dev_err(&cptpf->pdev->dev,
301 			"MBOX msg with wrong signature %x, ID %d\n",
302 			msg->sig, msg->id);
303 		return;
304 	}
305 	vfpf_mbox = &cptpf->vfpf_mbox;
306 	vf_id--;
307 	if (vf_id >= cptpf->enabled_vfs) {
308 		dev_err(&cptpf->pdev->dev,
309 			"MBOX msg to unknown VF: %d >= %d\n",
310 			vf_id, cptpf->enabled_vfs);
311 		return;
312 	}
313 	if (msg->id == MBOX_MSG_VF_FLR)
314 		return;
315 
316 	fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size);
317 	if (!fwd) {
318 		dev_err(&cptpf->pdev->dev,
319 			"Forwarding to VF%d failed.\n", vf_id);
320 		return;
321 	}
322 	memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
323 		(uint8_t *)msg + sizeof(struct mbox_msghdr), size);
324 	fwd->id = msg->id;
325 	fwd->pcifunc = msg->pcifunc;
326 	fwd->sig = msg->sig;
327 	fwd->ver = msg->ver;
328 	fwd->rc = msg->rc;
329 }
330 
331 /* Handle mailbox messages received from AF */
332 void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
333 {
334 	struct otx2_cptpf_dev *cptpf;
335 	struct otx2_mbox *afpf_mbox;
336 	struct otx2_mbox_dev *mdev;
337 	struct mbox_hdr *rsp_hdr;
338 	struct mbox_msghdr *msg;
339 	int offset, vf_id, i;
340 
341 	cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
342 	afpf_mbox = &cptpf->afpf_mbox;
343 	mdev = &afpf_mbox->dev[0];
344 	/* Sync mbox data into memory */
345 	smp_wmb();
346 
347 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
348 	offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
349 
350 	for (i = 0; i < rsp_hdr->num_msgs; i++) {
351 		msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
352 					     offset);
353 		vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
354 			 RVU_PFVF_FUNC_MASK;
355 		if (vf_id > 0)
356 			forward_to_vf(cptpf, msg, vf_id,
357 				      msg->next_msgoff - offset);
358 		else
359 			process_afpf_mbox_msg(cptpf, msg);
360 
361 		offset = msg->next_msgoff;
362 		/* Sync VF response ready to be sent */
363 		smp_wmb();
364 		mdev->msgs_acked++;
365 	}
366 	otx2_mbox_reset(afpf_mbox, 0);
367 }
368