1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/pci.h> 14 15 #include "rvu_reg.h" 16 #include "mbox.h" 17 18 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 19 20 void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid) 21 { 22 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 23 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 24 struct mbox_hdr *tx_hdr, *rx_hdr; 25 26 tx_hdr = hw_mbase + mbox->tx_start; 27 rx_hdr = hw_mbase + mbox->rx_start; 28 29 mdev->msg_size = 0; 30 mdev->rsp_size = 0; 31 tx_hdr->num_msgs = 0; 32 tx_hdr->msg_size = 0; 33 rx_hdr->num_msgs = 0; 34 rx_hdr->msg_size = 0; 35 } 36 EXPORT_SYMBOL(__otx2_mbox_reset); 37 38 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid) 39 { 40 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 41 42 spin_lock(&mdev->mbox_lock); 43 __otx2_mbox_reset(mbox, devid); 44 spin_unlock(&mdev->mbox_lock); 45 } 46 EXPORT_SYMBOL(otx2_mbox_reset); 47 48 void otx2_mbox_destroy(struct otx2_mbox *mbox) 49 { 50 mbox->reg_base = NULL; 51 mbox->hwbase = NULL; 52 53 kfree(mbox->dev); 54 mbox->dev = NULL; 55 } 56 EXPORT_SYMBOL(otx2_mbox_destroy); 57 58 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, 59 void *reg_base, int direction, int ndevs) 60 { 61 struct otx2_mbox_dev *mdev; 62 int devid; 63 64 switch (direction) { 65 case MBOX_DIR_AFPF: 66 case MBOX_DIR_PFVF: 67 mbox->tx_start = MBOX_DOWN_TX_START; 68 mbox->rx_start = MBOX_DOWN_RX_START; 69 mbox->tx_size = MBOX_DOWN_TX_SIZE; 70 mbox->rx_size = MBOX_DOWN_RX_SIZE; 71 break; 72 case MBOX_DIR_PFAF: 73 case MBOX_DIR_VFPF: 74 mbox->tx_start = MBOX_DOWN_RX_START; 75 mbox->rx_start = MBOX_DOWN_TX_START; 76 mbox->tx_size = MBOX_DOWN_RX_SIZE; 77 mbox->rx_size = MBOX_DOWN_TX_SIZE; 78 break; 79 case MBOX_DIR_AFPF_UP: 80 case MBOX_DIR_PFVF_UP: 81 mbox->tx_start = MBOX_UP_TX_START; 82 mbox->rx_start = MBOX_UP_RX_START; 83 mbox->tx_size = MBOX_UP_TX_SIZE; 84 mbox->rx_size = MBOX_UP_RX_SIZE; 85 break; 86 case MBOX_DIR_PFAF_UP: 87 case MBOX_DIR_VFPF_UP: 88 mbox->tx_start = MBOX_UP_RX_START; 89 mbox->rx_start = MBOX_UP_TX_START; 90 mbox->tx_size = MBOX_UP_RX_SIZE; 91 mbox->rx_size = MBOX_UP_TX_SIZE; 92 break; 93 default: 94 return -ENODEV; 95 } 96 97 switch (direction) { 98 case MBOX_DIR_AFPF: 99 case MBOX_DIR_AFPF_UP: 100 mbox->trigger = RVU_AF_AFPF_MBOX0; 101 mbox->tr_shift = 4; 102 break; 103 case MBOX_DIR_PFAF: 104 case MBOX_DIR_PFAF_UP: 105 mbox->trigger = RVU_PF_PFAF_MBOX1; 106 mbox->tr_shift = 0; 107 break; 108 case MBOX_DIR_PFVF: 109 case MBOX_DIR_PFVF_UP: 110 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0; 111 mbox->tr_shift = 12; 112 break; 113 case MBOX_DIR_VFPF: 114 case MBOX_DIR_VFPF_UP: 115 mbox->trigger = RVU_VF_VFPF_MBOX1; 116 mbox->tr_shift = 0; 117 break; 118 default: 119 return -ENODEV; 120 } 121 122 mbox->reg_base = reg_base; 123 mbox->hwbase = hwbase; 124 mbox->pdev = pdev; 125 126 mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); 127 if (!mbox->dev) { 128 otx2_mbox_destroy(mbox); 129 return -ENOMEM; 130 } 131 132 mbox->ndevs = ndevs; 133 for (devid = 0; devid < ndevs; devid++) { 134 mdev = &mbox->dev[devid]; 135 mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); 136 spin_lock_init(&mdev->mbox_lock); 137 /* Init header to reset value */ 138 otx2_mbox_reset(mbox, devid); 139 } 140 141 return 0; 142 } 143 EXPORT_SYMBOL(otx2_mbox_init); 144 145 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) 146 { 147 unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); 148 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 149 struct device *sender = &mbox->pdev->dev; 150 151 while (!time_after(jiffies, timeout)) { 152 if (mdev->num_msgs == mdev->msgs_acked) 153 return 0; 154 usleep_range(800, 1000); 155 } 156 dev_dbg(sender, "timed out while waiting for rsp\n"); 157 return -EIO; 158 } 159 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); 160 161 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid) 162 { 163 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 164 unsigned long timeout = jiffies + 1 * HZ; 165 166 while (!time_after(jiffies, timeout)) { 167 if (mdev->num_msgs == mdev->msgs_acked) 168 return 0; 169 cpu_relax(); 170 } 171 return -EIO; 172 } 173 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); 174 175 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) 176 { 177 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 178 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 179 struct mbox_hdr *tx_hdr, *rx_hdr; 180 181 tx_hdr = hw_mbase + mbox->tx_start; 182 rx_hdr = hw_mbase + mbox->rx_start; 183 184 /* If bounce buffer is implemented copy mbox messages from 185 * bounce buffer to hw mbox memory. 186 */ 187 if (mdev->mbase != hw_mbase) 188 memcpy(hw_mbase + mbox->tx_start + msgs_offset, 189 mdev->mbase + mbox->tx_start + msgs_offset, 190 mdev->msg_size); 191 192 spin_lock(&mdev->mbox_lock); 193 194 tx_hdr->msg_size = mdev->msg_size; 195 196 /* Reset header for next messages */ 197 mdev->msg_size = 0; 198 mdev->rsp_size = 0; 199 mdev->msgs_acked = 0; 200 201 /* Sync mbox data into memory */ 202 smp_wmb(); 203 204 /* num_msgs != 0 signals to the peer that the buffer has a number of 205 * messages. So this should be written after writing all the messages 206 * to the shared memory. 207 */ 208 tx_hdr->num_msgs = mdev->num_msgs; 209 rx_hdr->num_msgs = 0; 210 spin_unlock(&mdev->mbox_lock); 211 212 /* The interrupt should be fired after num_msgs is written 213 * to the shared memory 214 */ 215 writeq(1, (void __iomem *)mbox->reg_base + 216 (mbox->trigger | (devid << mbox->tr_shift))); 217 } 218 EXPORT_SYMBOL(otx2_mbox_msg_send); 219 220 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, 221 int size, int size_rsp) 222 { 223 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 224 struct mbox_msghdr *msghdr = NULL; 225 226 spin_lock(&mdev->mbox_lock); 227 size = ALIGN(size, MBOX_MSG_ALIGN); 228 size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN); 229 /* Check if there is space in mailbox */ 230 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset) 231 goto exit; 232 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset) 233 goto exit; 234 235 if (mdev->msg_size == 0) 236 mdev->num_msgs = 0; 237 mdev->num_msgs++; 238 239 msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size; 240 241 /* Clear the whole msg region */ 242 memset(msghdr, 0, size); 243 /* Init message header with reset values */ 244 msghdr->ver = OTX2_MBOX_VERSION; 245 mdev->msg_size += size; 246 mdev->rsp_size += size_rsp; 247 msghdr->next_msgoff = mdev->msg_size + msgs_offset; 248 exit: 249 spin_unlock(&mdev->mbox_lock); 250 251 return msghdr; 252 } 253 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp); 254 255 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, 256 struct mbox_msghdr *msg) 257 { 258 unsigned long imsg = mbox->tx_start + msgs_offset; 259 unsigned long irsp = mbox->rx_start + msgs_offset; 260 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 261 u16 msgs; 262 263 spin_lock(&mdev->mbox_lock); 264 265 if (mdev->num_msgs != mdev->msgs_acked) 266 goto error; 267 268 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { 269 struct mbox_msghdr *pmsg = mdev->mbase + imsg; 270 struct mbox_msghdr *prsp = mdev->mbase + irsp; 271 272 if (msg == pmsg) { 273 if (pmsg->id != prsp->id) 274 goto error; 275 spin_unlock(&mdev->mbox_lock); 276 return prsp; 277 } 278 279 imsg = mbox->tx_start + pmsg->next_msgoff; 280 irsp = mbox->rx_start + prsp->next_msgoff; 281 } 282 283 error: 284 spin_unlock(&mdev->mbox_lock); 285 return ERR_PTR(-ENODEV); 286 } 287 EXPORT_SYMBOL(otx2_mbox_get_rsp); 288 289 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid) 290 { 291 unsigned long ireq = mbox->tx_start + msgs_offset; 292 unsigned long irsp = mbox->rx_start + msgs_offset; 293 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 294 int rc = -ENODEV; 295 u16 msgs; 296 297 spin_lock(&mdev->mbox_lock); 298 299 if (mdev->num_msgs != mdev->msgs_acked) 300 goto exit; 301 302 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { 303 struct mbox_msghdr *preq = mdev->mbase + ireq; 304 struct mbox_msghdr *prsp = mdev->mbase + irsp; 305 306 if (preq->id != prsp->id) 307 goto exit; 308 if (prsp->rc) { 309 rc = prsp->rc; 310 goto exit; 311 } 312 313 ireq = mbox->tx_start + preq->next_msgoff; 314 irsp = mbox->rx_start + prsp->next_msgoff; 315 } 316 rc = 0; 317 exit: 318 spin_unlock(&mdev->mbox_lock); 319 return rc; 320 } 321 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs); 322 323 int 324 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id) 325 { 326 struct msg_rsp *rsp; 327 328 rsp = (struct msg_rsp *) 329 otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp)); 330 if (!rsp) 331 return -ENOMEM; 332 rsp->hdr.id = id; 333 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; 334 rsp->hdr.rc = MBOX_MSG_INVALID; 335 rsp->hdr.pcifunc = pcifunc; 336 return 0; 337 } 338 EXPORT_SYMBOL(otx2_reply_invalid_msg); 339 340 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid) 341 { 342 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 343 bool ret; 344 345 spin_lock(&mdev->mbox_lock); 346 ret = mdev->num_msgs != 0; 347 spin_unlock(&mdev->mbox_lock); 348 349 return ret; 350 } 351 EXPORT_SYMBOL(otx2_mbox_nonempty); 352 353 const char *otx2_mbox_id2name(u16 id) 354 { 355 switch (id) { 356 #define M(_name, _id, _1, _2, _3) case _id: return # _name; 357 MBOX_MESSAGES 358 #undef M 359 default: 360 return "INVALID ID"; 361 } 362 } 363 EXPORT_SYMBOL(otx2_mbox_id2name); 364 365 MODULE_AUTHOR("Marvell International Ltd."); 366 MODULE_LICENSE("GPL v2"); 367