1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2020 Marvell. */ 3 4 #include "otx2_cpt_common.h" 5 #include "otx2_cptpf.h" 6 #include "rvu_reg.h" 7 8 /* 9 * CPT PF driver version, It will be incremented by 1 for every feature 10 * addition in CPT mailbox messages. 11 */ 12 #define OTX2_CPT_PF_DRV_VERSION 0x1 13 14 static int forward_to_af(struct otx2_cptpf_dev *cptpf, 15 struct otx2_cptvf_info *vf, 16 struct mbox_msghdr *req, int size) 17 { 18 struct mbox_msghdr *msg; 19 int ret; 20 21 msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size); 22 if (msg == NULL) 23 return -ENOMEM; 24 25 memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr), 26 (uint8_t *)req + sizeof(struct mbox_msghdr), size); 27 msg->id = req->id; 28 msg->pcifunc = req->pcifunc; 29 msg->sig = req->sig; 30 msg->ver = req->ver; 31 32 otx2_mbox_msg_send(&cptpf->afpf_mbox, 0); 33 ret = otx2_mbox_wait_for_rsp(&cptpf->afpf_mbox, 0); 34 if (ret == -EIO) { 35 dev_err(&cptpf->pdev->dev, "RVU MBOX timeout.\n"); 36 return ret; 37 } else if (ret) { 38 dev_err(&cptpf->pdev->dev, "RVU MBOX error: %d.\n", ret); 39 return -EFAULT; 40 } 41 return 0; 42 } 43 44 static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf, 45 struct otx2_cptvf_info *vf, 46 struct mbox_msghdr *req) 47 { 48 struct otx2_cpt_caps_rsp *rsp; 49 50 rsp = (struct otx2_cpt_caps_rsp *) 51 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, 52 sizeof(*rsp)); 53 if (!rsp) 54 return -ENOMEM; 55 56 rsp->hdr.id = MBOX_MSG_GET_CAPS; 57 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; 58 rsp->hdr.pcifunc = req->pcifunc; 59 rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION; 60 rsp->cpt_revision = cptpf->pdev->revision; 61 memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps)); 62 63 return 0; 64 } 65 66 static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf, 67 struct otx2_cptvf_info *vf, 68 struct mbox_msghdr *req) 69 { 70 struct otx2_cpt_egrp_num_msg *grp_req; 71 struct otx2_cpt_egrp_num_rsp *rsp; 72 73 grp_req = (struct otx2_cpt_egrp_num_msg *)req; 74 rsp = (struct otx2_cpt_egrp_num_rsp *) 75 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp)); 76 if (!rsp) 77 return -ENOMEM; 78 79 rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM; 80 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; 81 rsp->hdr.pcifunc = req->pcifunc; 82 rsp->eng_type = grp_req->eng_type; 83 rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps, 84 grp_req->eng_type); 85 86 return 0; 87 } 88 89 static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf, 90 struct otx2_cptvf_info *vf, 91 struct mbox_msghdr *req) 92 { 93 struct otx2_cpt_kvf_limits_rsp *rsp; 94 95 rsp = (struct otx2_cpt_kvf_limits_rsp *) 96 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp)); 97 if (!rsp) 98 return -ENOMEM; 99 100 rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS; 101 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; 102 rsp->hdr.pcifunc = req->pcifunc; 103 rsp->kvf_limits = cptpf->kvf_limits; 104 105 return 0; 106 } 107 108 static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf, 109 struct otx2_cptvf_info *vf, 110 struct mbox_msghdr *req, int size) 111 { 112 int err = 0; 113 114 /* Check if msg is valid, if not reply with an invalid msg */ 115 if (req->sig != OTX2_MBOX_REQ_SIG) 116 goto inval_msg; 117 118 switch (req->id) { 119 case MBOX_MSG_GET_ENG_GRP_NUM: 120 err = handle_msg_get_eng_grp_num(cptpf, vf, req); 121 break; 122 case MBOX_MSG_GET_CAPS: 123 err = handle_msg_get_caps(cptpf, vf, req); 124 break; 125 case MBOX_MSG_GET_KVF_LIMITS: 126 err = handle_msg_kvf_limits(cptpf, vf, req); 127 break; 128 default: 129 err = forward_to_af(cptpf, vf, req, size); 130 break; 131 } 132 return err; 133 134 inval_msg: 135 otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id); 136 otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id); 137 return err; 138 } 139 140 irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg) 141 { 142 struct otx2_cptpf_dev *cptpf = arg; 143 struct otx2_cptvf_info *vf; 144 int i, vf_idx; 145 u64 intr; 146 147 /* 148 * Check which VF has raised an interrupt and schedule 149 * corresponding work queue to process the messages 150 */ 151 for (i = 0; i < 2; i++) { 152 /* Read the interrupt bits */ 153 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, 154 RVU_PF_VFPF_MBOX_INTX(i)); 155 156 for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) { 157 vf = &cptpf->vf[vf_idx]; 158 if (intr & (1ULL << vf->intr_idx)) { 159 queue_work(cptpf->vfpf_mbox_wq, 160 &vf->vfpf_mbox_work); 161 /* Clear the interrupt */ 162 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 163 0, RVU_PF_VFPF_MBOX_INTX(i), 164 BIT_ULL(vf->intr_idx)); 165 } 166 } 167 } 168 return IRQ_HANDLED; 169 } 170 171 void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work) 172 { 173 struct otx2_cptpf_dev *cptpf; 174 struct otx2_cptvf_info *vf; 175 struct otx2_mbox_dev *mdev; 176 struct mbox_hdr *req_hdr; 177 struct mbox_msghdr *msg; 178 struct otx2_mbox *mbox; 179 int offset, i, err; 180 181 vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work); 182 cptpf = vf->cptpf; 183 mbox = &cptpf->vfpf_mbox; 184 /* sync with mbox memory region */ 185 smp_rmb(); 186 mdev = &mbox->dev[vf->vf_id]; 187 /* Process received mbox messages */ 188 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 189 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 190 191 for (i = 0; i < req_hdr->num_msgs; i++) { 192 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 193 194 /* Set which VF sent this message based on mbox IRQ */ 195 msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) | 196 ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK); 197 198 err = cptpf_handle_vf_req(cptpf, vf, msg, 199 msg->next_msgoff - offset); 200 /* 201 * Behave as the AF, drop the msg if there is 202 * no memory, timeout handling also goes here 203 */ 204 if (err == -ENOMEM || err == -EIO) 205 break; 206 offset = msg->next_msgoff; 207 } 208 /* Send mbox responses to VF */ 209 if (mdev->num_msgs) 210 otx2_mbox_msg_send(mbox, vf->vf_id); 211 } 212 213 irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg) 214 { 215 struct otx2_cptpf_dev *cptpf = arg; 216 u64 intr; 217 218 /* Read the interrupt bits */ 219 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT); 220 221 if (intr & 0x1ULL) { 222 /* Schedule work queue function to process the MBOX request */ 223 queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work); 224 /* Clear and ack the interrupt */ 225 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 226 0x1ULL); 227 } 228 return IRQ_HANDLED; 229 } 230 231 static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, 232 struct mbox_msghdr *msg) 233 { 234 struct device *dev = &cptpf->pdev->dev; 235 struct cpt_rd_wr_reg_msg *rsp_rd_wr; 236 237 if (msg->id >= MBOX_MSG_MAX) { 238 dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id); 239 return; 240 } 241 if (msg->sig != OTX2_MBOX_RSP_SIG) { 242 dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n", 243 msg->sig, msg->id); 244 return; 245 } 246 247 switch (msg->id) { 248 case MBOX_MSG_READY: 249 cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) & 250 RVU_PFVF_PF_MASK; 251 break; 252 case MBOX_MSG_CPT_RD_WR_REGISTER: 253 rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg; 254 if (msg->rc) { 255 dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n", 256 rsp_rd_wr->reg_offset, rsp_rd_wr->is_write, 257 msg->rc); 258 return; 259 } 260 if (!rsp_rd_wr->is_write) 261 *rsp_rd_wr->ret_val = rsp_rd_wr->val; 262 break; 263 case MBOX_MSG_ATTACH_RESOURCES: 264 if (!msg->rc) 265 cptpf->lfs.are_lfs_attached = 1; 266 break; 267 case MBOX_MSG_DETACH_RESOURCES: 268 if (!msg->rc) 269 cptpf->lfs.are_lfs_attached = 0; 270 break; 271 272 default: 273 dev_err(dev, 274 "Unsupported msg %d received.\n", msg->id); 275 break; 276 } 277 } 278 279 static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg, 280 int vf_id, int size) 281 { 282 struct otx2_mbox *vfpf_mbox; 283 struct mbox_msghdr *fwd; 284 285 if (msg->id >= MBOX_MSG_MAX) { 286 dev_err(&cptpf->pdev->dev, 287 "MBOX msg with unknown ID %d\n", msg->id); 288 return; 289 } 290 if (msg->sig != OTX2_MBOX_RSP_SIG) { 291 dev_err(&cptpf->pdev->dev, 292 "MBOX msg with wrong signature %x, ID %d\n", 293 msg->sig, msg->id); 294 return; 295 } 296 vfpf_mbox = &cptpf->vfpf_mbox; 297 vf_id--; 298 if (vf_id >= cptpf->enabled_vfs) { 299 dev_err(&cptpf->pdev->dev, 300 "MBOX msg to unknown VF: %d >= %d\n", 301 vf_id, cptpf->enabled_vfs); 302 return; 303 } 304 if (msg->id == MBOX_MSG_VF_FLR) 305 return; 306 307 fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size); 308 if (!fwd) { 309 dev_err(&cptpf->pdev->dev, 310 "Forwarding to VF%d failed.\n", vf_id); 311 return; 312 } 313 memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr), 314 (uint8_t *)msg + sizeof(struct mbox_msghdr), size); 315 fwd->id = msg->id; 316 fwd->pcifunc = msg->pcifunc; 317 fwd->sig = msg->sig; 318 fwd->ver = msg->ver; 319 fwd->rc = msg->rc; 320 } 321 322 /* Handle mailbox messages received from AF */ 323 void otx2_cptpf_afpf_mbox_handler(struct work_struct *work) 324 { 325 struct otx2_cptpf_dev *cptpf; 326 struct otx2_mbox *afpf_mbox; 327 struct otx2_mbox_dev *mdev; 328 struct mbox_hdr *rsp_hdr; 329 struct mbox_msghdr *msg; 330 int offset, vf_id, i; 331 332 cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work); 333 afpf_mbox = &cptpf->afpf_mbox; 334 mdev = &afpf_mbox->dev[0]; 335 /* Sync mbox data into memory */ 336 smp_wmb(); 337 338 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start); 339 offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 340 341 for (i = 0; i < rsp_hdr->num_msgs; i++) { 342 msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start + 343 offset); 344 vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) & 345 RVU_PFVF_FUNC_MASK; 346 if (vf_id > 0) 347 forward_to_vf(cptpf, msg, vf_id, 348 msg->next_msgoff - offset); 349 else 350 process_afpf_mbox_msg(cptpf, msg); 351 352 offset = msg->next_msgoff; 353 mdev->msgs_acked++; 354 } 355 otx2_mbox_reset(afpf_mbox, 0); 356 } 357