1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_reg.h"
16 #include "mbox.h"
17 
18 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
19 
20 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
21 {
22 	void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
23 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
24 	struct mbox_hdr *tx_hdr, *rx_hdr;
25 
26 	tx_hdr = hw_mbase + mbox->tx_start;
27 	rx_hdr = hw_mbase + mbox->rx_start;
28 
29 	spin_lock(&mdev->mbox_lock);
30 	mdev->msg_size = 0;
31 	mdev->rsp_size = 0;
32 	tx_hdr->num_msgs = 0;
33 	tx_hdr->msg_size = 0;
34 	rx_hdr->num_msgs = 0;
35 	rx_hdr->msg_size = 0;
36 	spin_unlock(&mdev->mbox_lock);
37 }
38 EXPORT_SYMBOL(otx2_mbox_reset);
39 
40 void otx2_mbox_destroy(struct otx2_mbox *mbox)
41 {
42 	mbox->reg_base = NULL;
43 	mbox->hwbase = NULL;
44 
45 	kfree(mbox->dev);
46 	mbox->dev = NULL;
47 }
48 EXPORT_SYMBOL(otx2_mbox_destroy);
49 
50 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
51 		   void *reg_base, int direction, int ndevs)
52 {
53 	struct otx2_mbox_dev *mdev;
54 	int devid;
55 
56 	switch (direction) {
57 	case MBOX_DIR_AFPF:
58 	case MBOX_DIR_PFVF:
59 		mbox->tx_start = MBOX_DOWN_TX_START;
60 		mbox->rx_start = MBOX_DOWN_RX_START;
61 		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
62 		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
63 		break;
64 	case MBOX_DIR_PFAF:
65 	case MBOX_DIR_VFPF:
66 		mbox->tx_start = MBOX_DOWN_RX_START;
67 		mbox->rx_start = MBOX_DOWN_TX_START;
68 		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
69 		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
70 		break;
71 	case MBOX_DIR_AFPF_UP:
72 	case MBOX_DIR_PFVF_UP:
73 		mbox->tx_start = MBOX_UP_TX_START;
74 		mbox->rx_start = MBOX_UP_RX_START;
75 		mbox->tx_size  = MBOX_UP_TX_SIZE;
76 		mbox->rx_size  = MBOX_UP_RX_SIZE;
77 		break;
78 	case MBOX_DIR_PFAF_UP:
79 	case MBOX_DIR_VFPF_UP:
80 		mbox->tx_start = MBOX_UP_RX_START;
81 		mbox->rx_start = MBOX_UP_TX_START;
82 		mbox->tx_size  = MBOX_UP_RX_SIZE;
83 		mbox->rx_size  = MBOX_UP_TX_SIZE;
84 		break;
85 	default:
86 		return -ENODEV;
87 	}
88 
89 	switch (direction) {
90 	case MBOX_DIR_AFPF:
91 	case MBOX_DIR_AFPF_UP:
92 		mbox->trigger = RVU_AF_AFPF_MBOX0;
93 		mbox->tr_shift = 4;
94 		break;
95 	case MBOX_DIR_PFAF:
96 	case MBOX_DIR_PFAF_UP:
97 		mbox->trigger = RVU_PF_PFAF_MBOX1;
98 		mbox->tr_shift = 0;
99 		break;
100 	case MBOX_DIR_PFVF:
101 	case MBOX_DIR_PFVF_UP:
102 		mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
103 		mbox->tr_shift = 12;
104 		break;
105 	case MBOX_DIR_VFPF:
106 	case MBOX_DIR_VFPF_UP:
107 		mbox->trigger = RVU_VF_VFPF_MBOX1;
108 		mbox->tr_shift = 0;
109 		break;
110 	default:
111 		return -ENODEV;
112 	}
113 
114 	mbox->reg_base = reg_base;
115 	mbox->hwbase = hwbase;
116 	mbox->pdev = pdev;
117 
118 	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
119 	if (!mbox->dev) {
120 		otx2_mbox_destroy(mbox);
121 		return -ENOMEM;
122 	}
123 
124 	mbox->ndevs = ndevs;
125 	for (devid = 0; devid < ndevs; devid++) {
126 		mdev = &mbox->dev[devid];
127 		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
128 		spin_lock_init(&mdev->mbox_lock);
129 		/* Init header to reset value */
130 		otx2_mbox_reset(mbox, devid);
131 	}
132 
133 	return 0;
134 }
135 EXPORT_SYMBOL(otx2_mbox_init);
136 
137 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
138 {
139 	unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
140 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
141 	struct device *sender = &mbox->pdev->dev;
142 
143 	while (!time_after(jiffies, timeout)) {
144 		if (mdev->num_msgs == mdev->msgs_acked)
145 			return 0;
146 		usleep_range(800, 1000);
147 	}
148 	dev_dbg(sender, "timed out while waiting for rsp\n");
149 	return -EIO;
150 }
151 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
152 
153 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
154 {
155 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
156 	unsigned long timeout = jiffies + 1 * HZ;
157 
158 	while (!time_after(jiffies, timeout)) {
159 		if (mdev->num_msgs == mdev->msgs_acked)
160 			return 0;
161 		cpu_relax();
162 	}
163 	return -EIO;
164 }
165 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
166 
167 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
168 {
169 	void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
170 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
171 	struct mbox_hdr *tx_hdr, *rx_hdr;
172 
173 	tx_hdr = hw_mbase + mbox->tx_start;
174 	rx_hdr = hw_mbase + mbox->rx_start;
175 
176 	/* If bounce buffer is implemented copy mbox messages from
177 	 * bounce buffer to hw mbox memory.
178 	 */
179 	if (mdev->mbase != hw_mbase)
180 		memcpy(hw_mbase + mbox->tx_start + msgs_offset,
181 		       mdev->mbase + mbox->tx_start + msgs_offset,
182 		       mdev->msg_size);
183 
184 	spin_lock(&mdev->mbox_lock);
185 
186 	tx_hdr->msg_size = mdev->msg_size;
187 
188 	/* Reset header for next messages */
189 	mdev->msg_size = 0;
190 	mdev->rsp_size = 0;
191 	mdev->msgs_acked = 0;
192 
193 	/* Sync mbox data into memory */
194 	smp_wmb();
195 
196 	/* num_msgs != 0 signals to the peer that the buffer has a number of
197 	 * messages.  So this should be written after writing all the messages
198 	 * to the shared memory.
199 	 */
200 	tx_hdr->num_msgs = mdev->num_msgs;
201 	rx_hdr->num_msgs = 0;
202 	spin_unlock(&mdev->mbox_lock);
203 
204 	/* The interrupt should be fired after num_msgs is written
205 	 * to the shared memory
206 	 */
207 	writeq(1, (void __iomem *)mbox->reg_base +
208 	       (mbox->trigger | (devid << mbox->tr_shift)));
209 }
210 EXPORT_SYMBOL(otx2_mbox_msg_send);
211 
212 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
213 					    int size, int size_rsp)
214 {
215 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
216 	struct mbox_msghdr *msghdr = NULL;
217 
218 	spin_lock(&mdev->mbox_lock);
219 	size = ALIGN(size, MBOX_MSG_ALIGN);
220 	size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
221 	/* Check if there is space in mailbox */
222 	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
223 		goto exit;
224 	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
225 		goto exit;
226 
227 	if (mdev->msg_size == 0)
228 		mdev->num_msgs = 0;
229 	mdev->num_msgs++;
230 
231 	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
232 
233 	/* Clear the whole msg region */
234 	memset(msghdr, 0, size);
235 	/* Init message header with reset values */
236 	msghdr->ver = OTX2_MBOX_VERSION;
237 	mdev->msg_size += size;
238 	mdev->rsp_size += size_rsp;
239 	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
240 exit:
241 	spin_unlock(&mdev->mbox_lock);
242 
243 	return msghdr;
244 }
245 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
246 
247 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
248 				      struct mbox_msghdr *msg)
249 {
250 	unsigned long imsg = mbox->tx_start + msgs_offset;
251 	unsigned long irsp = mbox->rx_start + msgs_offset;
252 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
253 	u16 msgs;
254 
255 	spin_lock(&mdev->mbox_lock);
256 
257 	if (mdev->num_msgs != mdev->msgs_acked)
258 		goto error;
259 
260 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
261 		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
262 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
263 
264 		if (msg == pmsg) {
265 			if (pmsg->id != prsp->id)
266 				goto error;
267 			spin_unlock(&mdev->mbox_lock);
268 			return prsp;
269 		}
270 
271 		imsg = mbox->tx_start + pmsg->next_msgoff;
272 		irsp = mbox->rx_start + prsp->next_msgoff;
273 	}
274 
275 error:
276 	spin_unlock(&mdev->mbox_lock);
277 	return ERR_PTR(-ENODEV);
278 }
279 EXPORT_SYMBOL(otx2_mbox_get_rsp);
280 
281 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
282 {
283 	unsigned long ireq = mbox->tx_start + msgs_offset;
284 	unsigned long irsp = mbox->rx_start + msgs_offset;
285 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
286 	int rc = -ENODEV;
287 	u16 msgs;
288 
289 	spin_lock(&mdev->mbox_lock);
290 
291 	if (mdev->num_msgs != mdev->msgs_acked)
292 		goto exit;
293 
294 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
295 		struct mbox_msghdr *preq = mdev->mbase + ireq;
296 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
297 
298 		if (preq->id != prsp->id)
299 			goto exit;
300 		if (prsp->rc) {
301 			rc = prsp->rc;
302 			goto exit;
303 		}
304 
305 		ireq = mbox->tx_start + preq->next_msgoff;
306 		irsp = mbox->rx_start + prsp->next_msgoff;
307 	}
308 	rc = 0;
309 exit:
310 	spin_unlock(&mdev->mbox_lock);
311 	return rc;
312 }
313 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
314 
315 int
316 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
317 {
318 	struct msg_rsp *rsp;
319 
320 	rsp = (struct msg_rsp *)
321 	       otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
322 	if (!rsp)
323 		return -ENOMEM;
324 	rsp->hdr.id = id;
325 	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
326 	rsp->hdr.rc = MBOX_MSG_INVALID;
327 	rsp->hdr.pcifunc = pcifunc;
328 	return 0;
329 }
330 EXPORT_SYMBOL(otx2_reply_invalid_msg);
331 
332 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
333 {
334 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
335 	bool ret;
336 
337 	spin_lock(&mdev->mbox_lock);
338 	ret = mdev->num_msgs != 0;
339 	spin_unlock(&mdev->mbox_lock);
340 
341 	return ret;
342 }
343 EXPORT_SYMBOL(otx2_mbox_nonempty);
344 
345 const char *otx2_mbox_id2name(u16 id)
346 {
347 	switch (id) {
348 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
349 	MBOX_MESSAGES
350 #undef M
351 	default:
352 		return "INVALID ID";
353 	}
354 }
355 EXPORT_SYMBOL(otx2_mbox_id2name);
356 
357 MODULE_AUTHOR("Marvell International Ltd.");
358 MODULE_LICENSE("GPL v2");
359