1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3
4 #include "otx2_cpt_common.h"
5 #include "otx2_cptpf.h"
6 #include "rvu_reg.h"
7
8 /* Fastpath ipsec opcode with inplace processing */
9 #define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6))
10 #define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6))
11
12 #define cpt_inline_rx_opcode(pdev) \
13 ({ \
14 u8 opcode; \
15 if (is_dev_otx2(pdev)) \
16 opcode = CPT_INLINE_RX_OPCODE; \
17 else \
18 opcode = CN10K_CPT_INLINE_RX_OPCODE; \
19 (opcode); \
20 })
21
22 /*
23 * CPT PF driver version, It will be incremented by 1 for every feature
24 * addition in CPT mailbox messages.
25 */
26 #define OTX2_CPT_PF_DRV_VERSION 0x1
27
forward_to_af(struct otx2_cptpf_dev * cptpf,struct otx2_cptvf_info * vf,struct mbox_msghdr * req,int size)28 static int forward_to_af(struct otx2_cptpf_dev *cptpf,
29 struct otx2_cptvf_info *vf,
30 struct mbox_msghdr *req, int size)
31 {
32 struct mbox_msghdr *msg;
33 int ret;
34
35 mutex_lock(&cptpf->lock);
36 msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
37 if (msg == NULL) {
38 mutex_unlock(&cptpf->lock);
39 return -ENOMEM;
40 }
41
42 memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
43 (uint8_t *)req + sizeof(struct mbox_msghdr), size);
44 msg->id = req->id;
45 msg->pcifunc = req->pcifunc;
46 msg->sig = req->sig;
47 msg->ver = req->ver;
48
49 ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
50 /* Error code -EIO indicate there is a communication failure
51 * to the AF. Rest of the error codes indicate that AF processed
52 * VF messages and set the error codes in response messages
53 * (if any) so simply forward responses to VF.
54 */
55 if (ret == -EIO) {
56 dev_warn(&cptpf->pdev->dev,
57 "AF not responding to VF%d messages\n", vf->vf_id);
58 mutex_unlock(&cptpf->lock);
59 return ret;
60 }
61 mutex_unlock(&cptpf->lock);
62 return 0;
63 }
64
handle_msg_get_caps(struct otx2_cptpf_dev * cptpf,struct otx2_cptvf_info * vf,struct mbox_msghdr * req)65 static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
66 struct otx2_cptvf_info *vf,
67 struct mbox_msghdr *req)
68 {
69 struct otx2_cpt_caps_rsp *rsp;
70
71 rsp = (struct otx2_cpt_caps_rsp *)
72 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
73 sizeof(*rsp));
74 if (!rsp)
75 return -ENOMEM;
76
77 rsp->hdr.id = MBOX_MSG_GET_CAPS;
78 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
79 rsp->hdr.pcifunc = req->pcifunc;
80 rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
81 rsp->cpt_revision = cptpf->pdev->revision;
82 memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
83
84 return 0;
85 }
86
handle_msg_get_eng_grp_num(struct otx2_cptpf_dev * cptpf,struct otx2_cptvf_info * vf,struct mbox_msghdr * req)87 static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
88 struct otx2_cptvf_info *vf,
89 struct mbox_msghdr *req)
90 {
91 struct otx2_cpt_egrp_num_msg *grp_req;
92 struct otx2_cpt_egrp_num_rsp *rsp;
93
94 grp_req = (struct otx2_cpt_egrp_num_msg *)req;
95 rsp = (struct otx2_cpt_egrp_num_rsp *)
96 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
97 if (!rsp)
98 return -ENOMEM;
99
100 rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
101 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
102 rsp->hdr.pcifunc = req->pcifunc;
103 rsp->eng_type = grp_req->eng_type;
104 rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
105 grp_req->eng_type);
106
107 return 0;
108 }
109
handle_msg_kvf_limits(struct otx2_cptpf_dev * cptpf,struct otx2_cptvf_info * vf,struct mbox_msghdr * req)110 static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
111 struct otx2_cptvf_info *vf,
112 struct mbox_msghdr *req)
113 {
114 struct otx2_cpt_kvf_limits_rsp *rsp;
115
116 rsp = (struct otx2_cpt_kvf_limits_rsp *)
117 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
118 if (!rsp)
119 return -ENOMEM;
120
121 rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
122 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
123 rsp->hdr.pcifunc = req->pcifunc;
124 rsp->kvf_limits = cptpf->kvf_limits;
125
126 return 0;
127 }
128
send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev * cptpf,int sso_pf_func,u8 slot)129 static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
130 int sso_pf_func, u8 slot)
131 {
132 struct cpt_inline_ipsec_cfg_msg *req;
133 struct pci_dev *pdev = cptpf->pdev;
134
135 req = (struct cpt_inline_ipsec_cfg_msg *)
136 otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
137 sizeof(*req), sizeof(struct msg_rsp));
138 if (req == NULL) {
139 dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
140 return -EFAULT;
141 }
142 memset(req, 0, sizeof(*req));
143 req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
144 req->hdr.sig = OTX2_MBOX_REQ_SIG;
145 req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
146 req->dir = CPT_INLINE_INBOUND;
147 req->slot = slot;
148 req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
149 req->sso_pf_func = sso_pf_func;
150 req->enable = 1;
151
152 return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
153 }
154
rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev * cptpf,u8 egrp,struct otx2_cpt_rx_inline_lf_cfg * req)155 static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
156 struct otx2_cpt_rx_inline_lf_cfg *req)
157 {
158 struct nix_inline_ipsec_cfg *nix_req;
159 struct pci_dev *pdev = cptpf->pdev;
160 int ret;
161
162 nix_req = (struct nix_inline_ipsec_cfg *)
163 otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
164 sizeof(*nix_req),
165 sizeof(struct msg_rsp));
166 if (nix_req == NULL) {
167 dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
168 return -EFAULT;
169 }
170 memset(nix_req, 0, sizeof(*nix_req));
171 nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
172 nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
173 nix_req->enable = 1;
174 if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS)
175 nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
176 else
177 nix_req->cpt_credit = req->credit - 1;
178 nix_req->gen_cfg.egrp = egrp;
179 if (req->opcode)
180 nix_req->gen_cfg.opcode = req->opcode;
181 else
182 nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
183 nix_req->gen_cfg.param1 = req->param1;
184 nix_req->gen_cfg.param2 = req->param2;
185 nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
186 nix_req->inst_qsel.cpt_slot = 0;
187 ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
188 if (ret)
189 return ret;
190
191 if (cptpf->has_cpt1) {
192 ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1);
193 if (ret)
194 return ret;
195 }
196
197 return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
198 }
199
handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev * cptpf,struct mbox_msghdr * req)200 static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
201 struct mbox_msghdr *req)
202 {
203 struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
204 u8 egrp;
205 int ret;
206
207 cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
208 if (cptpf->lfs.lfs_num) {
209 dev_err(&cptpf->pdev->dev,
210 "LF is already configured for RX inline ipsec.\n");
211 return -EEXIST;
212 }
213 /*
214 * Allow LFs to execute requests destined to only grp IE_TYPES and
215 * set queue priority of each LF to high
216 */
217 egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES);
218 if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
219 dev_err(&cptpf->pdev->dev,
220 "Engine group for inline ipsec is not available\n");
221 return -ENOENT;
222 }
223
224 otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
225 &cptpf->afpf_mbox, BLKADDR_CPT0);
226 ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO,
227 1);
228 if (ret) {
229 dev_err(&cptpf->pdev->dev,
230 "LF configuration failed for RX inline ipsec.\n");
231 return ret;
232 }
233
234 if (cptpf->has_cpt1) {
235 cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
236 otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
237 cptpf->reg_base, &cptpf->afpf_mbox,
238 BLKADDR_CPT1);
239 ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp,
240 OTX2_CPT_QUEUE_HI_PRIO, 1);
241 if (ret) {
242 dev_err(&cptpf->pdev->dev,
243 "LF configuration failed for RX inline ipsec.\n");
244 goto lf_cleanup;
245 }
246 cptpf->rsrc_req_blkaddr = 0;
247 }
248
249 ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req);
250 if (ret)
251 goto lf1_cleanup;
252
253 return 0;
254
255 lf1_cleanup:
256 otx2_cptlf_shutdown(&cptpf->cpt1_lfs);
257 lf_cleanup:
258 otx2_cptlf_shutdown(&cptpf->lfs);
259 return ret;
260 }
261
cptpf_handle_vf_req(struct otx2_cptpf_dev * cptpf,struct otx2_cptvf_info * vf,struct mbox_msghdr * req,int size)262 static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
263 struct otx2_cptvf_info *vf,
264 struct mbox_msghdr *req, int size)
265 {
266 int err = 0;
267
268 /* Check if msg is valid, if not reply with an invalid msg */
269 if (req->sig != OTX2_MBOX_REQ_SIG)
270 goto inval_msg;
271
272 switch (req->id) {
273 case MBOX_MSG_GET_ENG_GRP_NUM:
274 err = handle_msg_get_eng_grp_num(cptpf, vf, req);
275 break;
276 case MBOX_MSG_GET_CAPS:
277 err = handle_msg_get_caps(cptpf, vf, req);
278 break;
279 case MBOX_MSG_GET_KVF_LIMITS:
280 err = handle_msg_kvf_limits(cptpf, vf, req);
281 break;
282 case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG:
283 err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req);
284 break;
285
286 default:
287 err = forward_to_af(cptpf, vf, req, size);
288 break;
289 }
290 return err;
291
292 inval_msg:
293 otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
294 otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
295 return err;
296 }
297
otx2_cptpf_vfpf_mbox_intr(int __always_unused irq,void * arg)298 irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
299 {
300 struct otx2_cptpf_dev *cptpf = arg;
301 struct otx2_cptvf_info *vf;
302 int i, vf_idx;
303 u64 intr;
304
305 /*
306 * Check which VF has raised an interrupt and schedule
307 * corresponding work queue to process the messages
308 */
309 for (i = 0; i < 2; i++) {
310 /* Read the interrupt bits */
311 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
312 RVU_PF_VFPF_MBOX_INTX(i));
313
314 for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
315 vf = &cptpf->vf[vf_idx];
316 if (intr & (1ULL << vf->intr_idx)) {
317 queue_work(cptpf->vfpf_mbox_wq,
318 &vf->vfpf_mbox_work);
319 /* Clear the interrupt */
320 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
321 0, RVU_PF_VFPF_MBOX_INTX(i),
322 BIT_ULL(vf->intr_idx));
323 }
324 }
325 }
326 return IRQ_HANDLED;
327 }
328
otx2_cptpf_vfpf_mbox_handler(struct work_struct * work)329 void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
330 {
331 struct otx2_cptpf_dev *cptpf;
332 struct otx2_cptvf_info *vf;
333 struct otx2_mbox_dev *mdev;
334 struct mbox_hdr *req_hdr;
335 struct mbox_msghdr *msg;
336 struct otx2_mbox *mbox;
337 int offset, i, err;
338
339 vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
340 cptpf = vf->cptpf;
341 mbox = &cptpf->vfpf_mbox;
342 /* sync with mbox memory region */
343 smp_rmb();
344 mdev = &mbox->dev[vf->vf_id];
345 /* Process received mbox messages */
346 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
347 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
348
349 for (i = 0; i < req_hdr->num_msgs; i++) {
350 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
351
352 /* Set which VF sent this message based on mbox IRQ */
353 msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
354 ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
355
356 err = cptpf_handle_vf_req(cptpf, vf, msg,
357 msg->next_msgoff - offset);
358 /*
359 * Behave as the AF, drop the msg if there is
360 * no memory, timeout handling also goes here
361 */
362 if (err == -ENOMEM || err == -EIO)
363 break;
364 offset = msg->next_msgoff;
365 /* Write barrier required for VF responses which are handled by
366 * PF driver and not forwarded to AF.
367 */
368 smp_wmb();
369 }
370 /* Send mbox responses to VF */
371 if (mdev->num_msgs)
372 otx2_mbox_msg_send(mbox, vf->vf_id);
373 }
374
otx2_cptpf_afpf_mbox_intr(int __always_unused irq,void * arg)375 irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
376 {
377 struct otx2_cptpf_dev *cptpf = arg;
378 struct otx2_mbox_dev *mdev;
379 struct otx2_mbox *mbox;
380 struct mbox_hdr *hdr;
381 u64 intr;
382
383 /* Read the interrupt bits */
384 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
385
386 if (intr & 0x1ULL) {
387 mbox = &cptpf->afpf_mbox;
388 mdev = &mbox->dev[0];
389 hdr = mdev->mbase + mbox->rx_start;
390 if (hdr->num_msgs)
391 /* Schedule work queue function to process the MBOX request */
392 queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
393
394 mbox = &cptpf->afpf_mbox_up;
395 mdev = &mbox->dev[0];
396 hdr = mdev->mbase + mbox->rx_start;
397 if (hdr->num_msgs)
398 /* Schedule work queue function to process the MBOX request */
399 queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work);
400 /* Clear and ack the interrupt */
401 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
402 0x1ULL);
403 }
404 return IRQ_HANDLED;
405 }
406
process_afpf_mbox_msg(struct otx2_cptpf_dev * cptpf,struct mbox_msghdr * msg)407 static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
408 struct mbox_msghdr *msg)
409 {
410 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
411 struct device *dev = &cptpf->pdev->dev;
412 struct cpt_rd_wr_reg_msg *rsp_rd_wr;
413
414 if (msg->id >= MBOX_MSG_MAX) {
415 dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
416 return;
417 }
418 if (msg->sig != OTX2_MBOX_RSP_SIG) {
419 dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
420 msg->sig, msg->id);
421 return;
422 }
423 if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1)
424 lfs = &cptpf->cpt1_lfs;
425
426 switch (msg->id) {
427 case MBOX_MSG_READY:
428 cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
429 RVU_PFVF_PF_MASK;
430 break;
431 case MBOX_MSG_CPT_RD_WR_REGISTER:
432 rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
433 if (msg->rc) {
434 dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
435 rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
436 msg->rc);
437 return;
438 }
439 if (!rsp_rd_wr->is_write)
440 *rsp_rd_wr->ret_val = rsp_rd_wr->val;
441 break;
442 case MBOX_MSG_ATTACH_RESOURCES:
443 if (!msg->rc)
444 lfs->are_lfs_attached = 1;
445 break;
446 case MBOX_MSG_DETACH_RESOURCES:
447 if (!msg->rc)
448 lfs->are_lfs_attached = 0;
449 break;
450 case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
451 case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
452 break;
453
454 default:
455 dev_err(dev,
456 "Unsupported msg %d received.\n", msg->id);
457 break;
458 }
459 }
460
forward_to_vf(struct otx2_cptpf_dev * cptpf,struct mbox_msghdr * msg,int vf_id,int size)461 static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
462 int vf_id, int size)
463 {
464 struct otx2_mbox *vfpf_mbox;
465 struct mbox_msghdr *fwd;
466
467 if (msg->id >= MBOX_MSG_MAX) {
468 dev_err(&cptpf->pdev->dev,
469 "MBOX msg with unknown ID %d\n", msg->id);
470 return;
471 }
472 if (msg->sig != OTX2_MBOX_RSP_SIG) {
473 dev_err(&cptpf->pdev->dev,
474 "MBOX msg with wrong signature %x, ID %d\n",
475 msg->sig, msg->id);
476 return;
477 }
478 vfpf_mbox = &cptpf->vfpf_mbox;
479 vf_id--;
480 if (vf_id >= cptpf->enabled_vfs) {
481 dev_err(&cptpf->pdev->dev,
482 "MBOX msg to unknown VF: %d >= %d\n",
483 vf_id, cptpf->enabled_vfs);
484 return;
485 }
486 if (msg->id == MBOX_MSG_VF_FLR)
487 return;
488
489 fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size);
490 if (!fwd) {
491 dev_err(&cptpf->pdev->dev,
492 "Forwarding to VF%d failed.\n", vf_id);
493 return;
494 }
495 memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
496 (uint8_t *)msg + sizeof(struct mbox_msghdr), size);
497 fwd->id = msg->id;
498 fwd->pcifunc = msg->pcifunc;
499 fwd->sig = msg->sig;
500 fwd->ver = msg->ver;
501 fwd->rc = msg->rc;
502 }
503
504 /* Handle mailbox messages received from AF */
otx2_cptpf_afpf_mbox_handler(struct work_struct * work)505 void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
506 {
507 struct otx2_cptpf_dev *cptpf;
508 struct otx2_mbox *afpf_mbox;
509 struct otx2_mbox_dev *mdev;
510 struct mbox_hdr *rsp_hdr;
511 struct mbox_msghdr *msg;
512 int offset, vf_id, i;
513
514 cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
515 afpf_mbox = &cptpf->afpf_mbox;
516 mdev = &afpf_mbox->dev[0];
517 /* Sync mbox data into memory */
518 smp_wmb();
519
520 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
521 offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
522
523 for (i = 0; i < rsp_hdr->num_msgs; i++) {
524 msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
525 offset);
526 vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
527 RVU_PFVF_FUNC_MASK;
528 if (vf_id > 0)
529 forward_to_vf(cptpf, msg, vf_id,
530 msg->next_msgoff - offset);
531 else
532 process_afpf_mbox_msg(cptpf, msg);
533
534 offset = msg->next_msgoff;
535 /* Sync VF response ready to be sent */
536 smp_wmb();
537 mdev->msgs_acked++;
538 }
539 otx2_mbox_reset(afpf_mbox, 0);
540 }
541
handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev * cptpf,struct mbox_msghdr * msg)542 static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf,
543 struct mbox_msghdr *msg)
544 {
545 struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg;
546 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
547 struct msg_rsp *rsp;
548
549 if (cptpf->lfs.lfs_num)
550 lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1,
551 &lfs->lf[0]);
552
553 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0,
554 sizeof(*rsp));
555 if (!rsp)
556 return;
557
558 rsp->hdr.id = msg->id;
559 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
560 rsp->hdr.pcifunc = 0;
561 rsp->hdr.rc = 0;
562 }
563
process_afpf_mbox_up_msg(struct otx2_cptpf_dev * cptpf,struct mbox_msghdr * msg)564 static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf,
565 struct mbox_msghdr *msg)
566 {
567 if (msg->id >= MBOX_MSG_MAX) {
568 dev_err(&cptpf->pdev->dev,
569 "MBOX msg with unknown ID %d\n", msg->id);
570 return;
571 }
572
573 switch (msg->id) {
574 case MBOX_MSG_CPT_INST_LMTST:
575 handle_msg_cpt_inst_lmtst(cptpf, msg);
576 break;
577 default:
578 otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id);
579 }
580 }
581
otx2_cptpf_afpf_mbox_up_handler(struct work_struct * work)582 void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work)
583 {
584 struct otx2_cptpf_dev *cptpf;
585 struct otx2_mbox_dev *mdev;
586 struct mbox_hdr *rsp_hdr;
587 struct mbox_msghdr *msg;
588 struct otx2_mbox *mbox;
589 int offset, i;
590
591 cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
592 mbox = &cptpf->afpf_mbox_up;
593 mdev = &mbox->dev[0];
594 /* Sync mbox data into memory */
595 smp_wmb();
596
597 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
598 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
599
600 for (i = 0; i < rsp_hdr->num_msgs; i++) {
601 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
602
603 process_afpf_mbox_up_msg(cptpf, msg);
604
605 offset = mbox->rx_start + msg->next_msgoff;
606 }
607 otx2_mbox_msg_send(mbox, 0);
608 }
609