1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/pci.h> 14 15 #include "rvu_reg.h" 16 #include "mbox.h" 17 #include "rvu_trace.h" 18 19 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 20 21 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid) 22 { 23 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 24 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 25 struct mbox_hdr *tx_hdr, *rx_hdr; 26 27 tx_hdr = hw_mbase + mbox->tx_start; 28 rx_hdr = hw_mbase + mbox->rx_start; 29 30 spin_lock(&mdev->mbox_lock); 31 mdev->msg_size = 0; 32 mdev->rsp_size = 0; 33 tx_hdr->num_msgs = 0; 34 tx_hdr->msg_size = 0; 35 rx_hdr->num_msgs = 0; 36 rx_hdr->msg_size = 0; 37 spin_unlock(&mdev->mbox_lock); 38 } 39 EXPORT_SYMBOL(otx2_mbox_reset); 40 41 void otx2_mbox_destroy(struct otx2_mbox *mbox) 42 { 43 mbox->reg_base = NULL; 44 mbox->hwbase = NULL; 45 46 kfree(mbox->dev); 47 mbox->dev = NULL; 48 } 49 EXPORT_SYMBOL(otx2_mbox_destroy); 50 51 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, 52 void *reg_base, int direction, int ndevs) 53 { 54 struct otx2_mbox_dev *mdev; 55 int devid; 56 57 switch (direction) { 58 case MBOX_DIR_AFPF: 59 case MBOX_DIR_PFVF: 60 mbox->tx_start = MBOX_DOWN_TX_START; 61 mbox->rx_start = MBOX_DOWN_RX_START; 62 mbox->tx_size = MBOX_DOWN_TX_SIZE; 63 mbox->rx_size = MBOX_DOWN_RX_SIZE; 64 break; 65 case MBOX_DIR_PFAF: 66 case MBOX_DIR_VFPF: 67 mbox->tx_start = MBOX_DOWN_RX_START; 68 mbox->rx_start = MBOX_DOWN_TX_START; 69 mbox->tx_size = MBOX_DOWN_RX_SIZE; 70 mbox->rx_size = MBOX_DOWN_TX_SIZE; 71 break; 72 case MBOX_DIR_AFPF_UP: 73 case MBOX_DIR_PFVF_UP: 74 mbox->tx_start = MBOX_UP_TX_START; 75 mbox->rx_start = MBOX_UP_RX_START; 76 mbox->tx_size = MBOX_UP_TX_SIZE; 77 mbox->rx_size = MBOX_UP_RX_SIZE; 78 break; 79 case MBOX_DIR_PFAF_UP: 80 case MBOX_DIR_VFPF_UP: 81 mbox->tx_start = MBOX_UP_RX_START; 82 mbox->rx_start = MBOX_UP_TX_START; 83 mbox->tx_size = MBOX_UP_RX_SIZE; 84 mbox->rx_size = MBOX_UP_TX_SIZE; 85 break; 86 default: 87 return -ENODEV; 88 } 89 90 switch (direction) { 91 case MBOX_DIR_AFPF: 92 case MBOX_DIR_AFPF_UP: 93 mbox->trigger = RVU_AF_AFPF_MBOX0; 94 mbox->tr_shift = 4; 95 break; 96 case MBOX_DIR_PFAF: 97 case MBOX_DIR_PFAF_UP: 98 mbox->trigger = RVU_PF_PFAF_MBOX1; 99 mbox->tr_shift = 0; 100 break; 101 case MBOX_DIR_PFVF: 102 case MBOX_DIR_PFVF_UP: 103 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0; 104 mbox->tr_shift = 12; 105 break; 106 case MBOX_DIR_VFPF: 107 case MBOX_DIR_VFPF_UP: 108 mbox->trigger = RVU_VF_VFPF_MBOX1; 109 mbox->tr_shift = 0; 110 break; 111 default: 112 return -ENODEV; 113 } 114 115 mbox->reg_base = reg_base; 116 mbox->hwbase = hwbase; 117 mbox->pdev = pdev; 118 119 mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); 120 if (!mbox->dev) { 121 otx2_mbox_destroy(mbox); 122 return -ENOMEM; 123 } 124 125 mbox->ndevs = ndevs; 126 for (devid = 0; devid < ndevs; devid++) { 127 mdev = &mbox->dev[devid]; 128 mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); 129 spin_lock_init(&mdev->mbox_lock); 130 /* Init header to reset value */ 131 otx2_mbox_reset(mbox, devid); 132 } 133 134 return 0; 135 } 136 EXPORT_SYMBOL(otx2_mbox_init); 137 138 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) 139 { 140 unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); 141 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 142 struct device *sender = &mbox->pdev->dev; 143 144 while (!time_after(jiffies, timeout)) { 145 if (mdev->num_msgs == mdev->msgs_acked) 146 return 0; 147 usleep_range(800, 1000); 148 } 149 dev_dbg(sender, "timed out while waiting for rsp\n"); 150 return -EIO; 151 } 152 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); 153 154 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid) 155 { 156 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 157 unsigned long timeout = jiffies + 1 * HZ; 158 159 while (!time_after(jiffies, timeout)) { 160 if (mdev->num_msgs == mdev->msgs_acked) 161 return 0; 162 cpu_relax(); 163 } 164 return -EIO; 165 } 166 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); 167 168 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) 169 { 170 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 171 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 172 struct mbox_hdr *tx_hdr, *rx_hdr; 173 174 tx_hdr = hw_mbase + mbox->tx_start; 175 rx_hdr = hw_mbase + mbox->rx_start; 176 177 /* If bounce buffer is implemented copy mbox messages from 178 * bounce buffer to hw mbox memory. 179 */ 180 if (mdev->mbase != hw_mbase) 181 memcpy(hw_mbase + mbox->tx_start + msgs_offset, 182 mdev->mbase + mbox->tx_start + msgs_offset, 183 mdev->msg_size); 184 185 spin_lock(&mdev->mbox_lock); 186 187 tx_hdr->msg_size = mdev->msg_size; 188 189 /* Reset header for next messages */ 190 mdev->msg_size = 0; 191 mdev->rsp_size = 0; 192 mdev->msgs_acked = 0; 193 194 /* Sync mbox data into memory */ 195 smp_wmb(); 196 197 /* num_msgs != 0 signals to the peer that the buffer has a number of 198 * messages. So this should be written after writing all the messages 199 * to the shared memory. 200 */ 201 tx_hdr->num_msgs = mdev->num_msgs; 202 rx_hdr->num_msgs = 0; 203 204 trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size); 205 206 spin_unlock(&mdev->mbox_lock); 207 208 /* The interrupt should be fired after num_msgs is written 209 * to the shared memory 210 */ 211 writeq(1, (void __iomem *)mbox->reg_base + 212 (mbox->trigger | (devid << mbox->tr_shift))); 213 } 214 EXPORT_SYMBOL(otx2_mbox_msg_send); 215 216 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, 217 int size, int size_rsp) 218 { 219 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 220 struct mbox_msghdr *msghdr = NULL; 221 222 spin_lock(&mdev->mbox_lock); 223 size = ALIGN(size, MBOX_MSG_ALIGN); 224 size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN); 225 /* Check if there is space in mailbox */ 226 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset) 227 goto exit; 228 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset) 229 goto exit; 230 231 if (mdev->msg_size == 0) 232 mdev->num_msgs = 0; 233 mdev->num_msgs++; 234 235 msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size; 236 237 /* Clear the whole msg region */ 238 memset(msghdr, 0, size); 239 /* Init message header with reset values */ 240 msghdr->ver = OTX2_MBOX_VERSION; 241 mdev->msg_size += size; 242 mdev->rsp_size += size_rsp; 243 msghdr->next_msgoff = mdev->msg_size + msgs_offset; 244 exit: 245 spin_unlock(&mdev->mbox_lock); 246 247 return msghdr; 248 } 249 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp); 250 251 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, 252 struct mbox_msghdr *msg) 253 { 254 unsigned long imsg = mbox->tx_start + msgs_offset; 255 unsigned long irsp = mbox->rx_start + msgs_offset; 256 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 257 u16 msgs; 258 259 spin_lock(&mdev->mbox_lock); 260 261 if (mdev->num_msgs != mdev->msgs_acked) 262 goto error; 263 264 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { 265 struct mbox_msghdr *pmsg = mdev->mbase + imsg; 266 struct mbox_msghdr *prsp = mdev->mbase + irsp; 267 268 if (msg == pmsg) { 269 if (pmsg->id != prsp->id) 270 goto error; 271 spin_unlock(&mdev->mbox_lock); 272 return prsp; 273 } 274 275 imsg = mbox->tx_start + pmsg->next_msgoff; 276 irsp = mbox->rx_start + prsp->next_msgoff; 277 } 278 279 error: 280 spin_unlock(&mdev->mbox_lock); 281 return ERR_PTR(-ENODEV); 282 } 283 EXPORT_SYMBOL(otx2_mbox_get_rsp); 284 285 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid) 286 { 287 unsigned long ireq = mbox->tx_start + msgs_offset; 288 unsigned long irsp = mbox->rx_start + msgs_offset; 289 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 290 int rc = -ENODEV; 291 u16 msgs; 292 293 spin_lock(&mdev->mbox_lock); 294 295 if (mdev->num_msgs != mdev->msgs_acked) 296 goto exit; 297 298 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { 299 struct mbox_msghdr *preq = mdev->mbase + ireq; 300 struct mbox_msghdr *prsp = mdev->mbase + irsp; 301 302 if (preq->id != prsp->id) { 303 trace_otx2_msg_check(mbox->pdev, preq->id, 304 prsp->id, prsp->rc); 305 goto exit; 306 } 307 if (prsp->rc) { 308 rc = prsp->rc; 309 trace_otx2_msg_check(mbox->pdev, preq->id, 310 prsp->id, prsp->rc); 311 goto exit; 312 } 313 314 ireq = mbox->tx_start + preq->next_msgoff; 315 irsp = mbox->rx_start + prsp->next_msgoff; 316 } 317 rc = 0; 318 exit: 319 spin_unlock(&mdev->mbox_lock); 320 return rc; 321 } 322 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs); 323 324 int 325 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id) 326 { 327 struct msg_rsp *rsp; 328 329 rsp = (struct msg_rsp *) 330 otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp)); 331 if (!rsp) 332 return -ENOMEM; 333 rsp->hdr.id = id; 334 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; 335 rsp->hdr.rc = MBOX_MSG_INVALID; 336 rsp->hdr.pcifunc = pcifunc; 337 return 0; 338 } 339 EXPORT_SYMBOL(otx2_reply_invalid_msg); 340 341 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid) 342 { 343 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 344 bool ret; 345 346 spin_lock(&mdev->mbox_lock); 347 ret = mdev->num_msgs != 0; 348 spin_unlock(&mdev->mbox_lock); 349 350 return ret; 351 } 352 EXPORT_SYMBOL(otx2_mbox_nonempty); 353 354 const char *otx2_mbox_id2name(u16 id) 355 { 356 switch (id) { 357 #define M(_name, _id, _1, _2, _3) case _id: return # _name; 358 MBOX_MESSAGES 359 #undef M 360 default: 361 return "INVALID ID"; 362 } 363 } 364 EXPORT_SYMBOL(otx2_mbox_id2name); 365 366 MODULE_AUTHOR("Marvell International Ltd."); 367 MODULE_LICENSE("GPL v2"); 368