1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/netdevice.h>
20 #include <linux/vmalloc.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "cn23xx_vf_device.h"
27 #include "octeon_main.h"
28 #include "octeon_mailbox.h"
29 
30 u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
31 {
32 	/* This gives the SLI clock per microsec */
33 	u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us;
34 
35 	/* This gives the clock cycles per millisecond */
36 	oqticks_per_us *= 1000;
37 
38 	/* This gives the oq ticks (1024 core clock cycles) per millisecond */
39 	oqticks_per_us /= 1024;
40 
41 	/* time_intr is in microseconds. The next 2 steps gives the oq ticks
42 	 * corressponding to time_intr.
43 	 */
44 	oqticks_per_us *= time_intr_in_us;
45 	oqticks_per_us /= 1000;
46 
47 	return oqticks_per_us;
48 }
49 
50 static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues)
51 {
52 	u32 loop = BUSY_READING_REG_VF_LOOP_COUNT;
53 	int ret_val = 0;
54 	u32 q_no;
55 	u64 d64;
56 
57 	for (q_no = 0; q_no < num_queues; q_no++) {
58 		/* set RST bit to 1. This bit applies to both IQ and OQ */
59 		d64 = octeon_read_csr64(oct,
60 					CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
61 		d64 |= CN23XX_PKT_INPUT_CTL_RST;
62 		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
63 				   d64);
64 	}
65 
66 	/* wait until the RST bit is clear or the RST and QUIET bits are set */
67 	for (q_no = 0; q_no < num_queues; q_no++) {
68 		u64 reg_val = octeon_read_csr64(oct,
69 					CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
70 		while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
71 		       !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
72 		       loop) {
73 			WRITE_ONCE(reg_val, octeon_read_csr64(
74 			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
75 			loop--;
76 		}
77 		if (!loop) {
78 			dev_err(&oct->pci_dev->dev,
79 				"clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
80 				q_no);
81 			return -1;
82 		}
83 		WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
84 			   ~CN23XX_PKT_INPUT_CTL_RST);
85 		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
86 				   READ_ONCE(reg_val));
87 
88 		WRITE_ONCE(reg_val, octeon_read_csr64(
89 		    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
90 		if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
91 			dev_err(&oct->pci_dev->dev,
92 				"clearing the reset failed for qno: %u\n",
93 				q_no);
94 			ret_val = -1;
95 		}
96 	}
97 
98 	return ret_val;
99 }
100 
101 static int cn23xx_vf_setup_global_input_regs(struct octeon_device *oct)
102 {
103 	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
104 	struct octeon_instr_queue *iq;
105 	u64 q_no, intr_threshold;
106 	u64 d64;
107 
108 	if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf))
109 		return -1;
110 
111 	for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
112 		void __iomem *inst_cnt_reg;
113 
114 		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_DOORBELL(q_no),
115 				   0xFFFFFFFF);
116 		iq = oct->instr_queue[q_no];
117 
118 		if (iq)
119 			inst_cnt_reg = iq->inst_cnt_reg;
120 		else
121 			inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
122 				       CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no);
123 
124 		d64 = octeon_read_csr64(oct,
125 					CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no));
126 
127 		d64 &= 0xEFFFFFFFFFFFFFFFL;
128 
129 		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
130 				   d64);
131 
132 		/* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
133 		 * the Input Queues
134 		 */
135 		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
136 				   CN23XX_PKT_INPUT_CTL_MASK);
137 
138 		/* set the wmark level to trigger PI_INT */
139 		intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
140 				 CN23XX_PKT_IN_DONE_WMARK_MASK;
141 
142 		writeq((readq(inst_cnt_reg) &
143 			~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
144 			  CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
145 		       (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
146 		       inst_cnt_reg);
147 	}
148 	return 0;
149 }
150 
151 static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
152 {
153 	u32 reg_val;
154 	u32 q_no;
155 
156 	for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
157 		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no),
158 				 0xFFFFFFFF);
159 
160 		reg_val =
161 		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no));
162 
163 		reg_val &= 0xEFFFFFFFFFFFFFFFL;
164 
165 		reg_val =
166 		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
167 
168 		/* set IPTR & DPTR */
169 		reg_val |=
170 		    (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
171 
172 		/* reset BMODE */
173 		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
174 
175 		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
176 		 * for Output Queue ScatterList reset ROR_P, NSR_P
177 		 */
178 		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
179 		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
180 
181 #ifdef __LITTLE_ENDIAN_BITFIELD
182 		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
183 #else
184 		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
185 #endif
186 		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
187 		 * for Output Queue Data reset ROR, NSR
188 		 */
189 		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
190 		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
191 		/* set the ES bit */
192 		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
193 
194 		/* write all the selected settings */
195 		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no),
196 				 reg_val);
197 	}
198 }
199 
200 static int cn23xx_setup_vf_device_regs(struct octeon_device *oct)
201 {
202 	if (cn23xx_vf_setup_global_input_regs(oct))
203 		return -1;
204 
205 	cn23xx_vf_setup_global_output_regs(oct);
206 
207 	return 0;
208 }
209 
210 static void cn23xx_setup_vf_iq_regs(struct octeon_device *oct, u32 iq_no)
211 {
212 	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
213 	u64 pkt_in_done;
214 
215 	/* Write the start of the input queue's ring and its size */
216 	octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(iq_no),
217 			   iq->base_addr_dma);
218 	octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count);
219 
220 	/* Remember the doorbell & instruction count register addr
221 	 * for this queue
222 	 */
223 	iq->doorbell_reg =
224 	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_DOORBELL(iq_no);
225 	iq->inst_cnt_reg =
226 	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq_no);
227 	dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
228 		iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
229 
230 	/* Store the current instruction counter (used in flush_iq
231 	 * calculation)
232 	 */
233 	pkt_in_done = readq(iq->inst_cnt_reg);
234 
235 	if (oct->msix_on) {
236 		/* Set CINT_ENB to enable IQ interrupt */
237 		writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
238 		       iq->inst_cnt_reg);
239 	}
240 	iq->reset_instr_cnt = 0;
241 }
242 
243 static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no)
244 {
245 	struct octeon_droq *droq = oct->droq[oq_no];
246 
247 	octeon_write_csr64(oct, CN23XX_VF_SLI_OQ_BASE_ADDR64(oq_no),
248 			   droq->desc_ring_dma);
249 	octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);
250 
251 	octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
252 			 (droq->buffer_size | (OCT_RH_SIZE << 16)));
253 
254 	/* Get the mapped address of the pkt_sent and pkts_credit regs */
255 	droq->pkts_sent_reg =
256 	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_SENT(oq_no);
257 	droq->pkts_credit_reg =
258 	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq_no);
259 }
260 
261 static void cn23xx_vf_mbox_thread(struct work_struct *work)
262 {
263 	struct cavium_wk *wk = (struct cavium_wk *)work;
264 	struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
265 
266 	octeon_mbox_process_message(mbox);
267 }
268 
269 static int cn23xx_free_vf_mbox(struct octeon_device *oct)
270 {
271 	cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work);
272 	vfree(oct->mbox[0]);
273 	return 0;
274 }
275 
276 static int cn23xx_setup_vf_mbox(struct octeon_device *oct)
277 {
278 	struct octeon_mbox *mbox = NULL;
279 
280 	mbox = vmalloc(sizeof(*mbox));
281 	if (!mbox)
282 		return 1;
283 
284 	memset(mbox, 0, sizeof(struct octeon_mbox));
285 
286 	spin_lock_init(&mbox->lock);
287 
288 	mbox->oct_dev = oct;
289 
290 	mbox->q_no = 0;
291 
292 	mbox->state = OCTEON_MBOX_STATE_IDLE;
293 
294 	/* VF mbox interrupt reg */
295 	mbox->mbox_int_reg =
296 	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0);
297 	/* VF reads from SIG0 reg */
298 	mbox->mbox_read_reg =
299 	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
300 	/* VF writes into SIG1 reg */
301 	mbox->mbox_write_reg =
302 	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
303 
304 	INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
305 			  cn23xx_vf_mbox_thread);
306 
307 	mbox->mbox_poll_wk.ctxptr = mbox;
308 
309 	oct->mbox[0] = mbox;
310 
311 	writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
312 
313 	return 0;
314 }
315 
316 static int cn23xx_enable_vf_io_queues(struct octeon_device *oct)
317 {
318 	u32 q_no;
319 
320 	for (q_no = 0; q_no < oct->num_iqs; q_no++) {
321 		u64 reg_val;
322 
323 		/* set the corresponding IQ IS_64B bit */
324 		if (oct->io_qmask.iq64B & BIT_ULL(q_no)) {
325 			reg_val = octeon_read_csr64(
326 			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
327 			reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B;
328 			octeon_write_csr64(
329 			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
330 		}
331 
332 		/* set the corresponding IQ ENB bit */
333 		if (oct->io_qmask.iq & BIT_ULL(q_no)) {
334 			reg_val = octeon_read_csr64(
335 			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
336 			reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB;
337 			octeon_write_csr64(
338 			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
339 		}
340 	}
341 	for (q_no = 0; q_no < oct->num_oqs; q_no++) {
342 		u32 reg_val;
343 
344 		/* set the corresponding OQ ENB bit */
345 		if (oct->io_qmask.oq & BIT_ULL(q_no)) {
346 			reg_val = octeon_read_csr(
347 			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
348 			reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB;
349 			octeon_write_csr(
350 			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val);
351 		}
352 	}
353 
354 	return 0;
355 }
356 
357 static void cn23xx_disable_vf_io_queues(struct octeon_device *oct)
358 {
359 	u32 num_queues = oct->num_iqs;
360 
361 	/* per HRM, rings can only be disabled via reset operation,
362 	 * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
363 	 */
364 	if (num_queues < oct->num_oqs)
365 		num_queues = oct->num_oqs;
366 
367 	cn23xx_vf_reset_io_queues(oct, num_queues);
368 }
369 
370 void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
371 {
372 	struct octeon_mbox_cmd mbox_cmd;
373 
374 	mbox_cmd.msg.u64 = 0;
375 	mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
376 	mbox_cmd.msg.s.resp_needed = 0;
377 	mbox_cmd.msg.s.cmd = OCTEON_VF_FLR_REQUEST;
378 	mbox_cmd.msg.s.len = 1;
379 	mbox_cmd.q_no = 0;
380 	mbox_cmd.recv_len = 0;
381 	mbox_cmd.recv_status = 0;
382 	mbox_cmd.fn = NULL;
383 	mbox_cmd.fn_arg = 0;
384 
385 	octeon_mbox_write(oct, &mbox_cmd);
386 }
387 
388 static void octeon_pfvf_hs_callback(struct octeon_device *oct,
389 				    struct octeon_mbox_cmd *cmd,
390 				    void *arg)
391 {
392 	u32 major = 0;
393 
394 	memcpy((uint8_t *)&oct->pfvf_hsword, cmd->msg.s.params,
395 	       CN23XX_MAILBOX_MSGPARAM_SIZE);
396 	if (cmd->recv_len > 1)  {
397 		major = ((struct lio_version *)(cmd->data))->major;
398 		major = major << 16;
399 	}
400 
401 	atomic_set((atomic_t *)arg, major | 1);
402 }
403 
404 int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
405 {
406 	struct octeon_mbox_cmd mbox_cmd;
407 	u32 q_no, count = 0;
408 	atomic_t status;
409 	u32 pfmajor;
410 	u32 vfmajor;
411 	u32 ret;
412 
413 	/* Sending VF_ACTIVE indication to the PF driver */
414 	dev_dbg(&oct->pci_dev->dev, "requesting info from pf\n");
415 
416 	mbox_cmd.msg.u64 = 0;
417 	mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
418 	mbox_cmd.msg.s.resp_needed = 1;
419 	mbox_cmd.msg.s.cmd = OCTEON_VF_ACTIVE;
420 	mbox_cmd.msg.s.len = 2;
421 	mbox_cmd.data[0] = 0;
422 	((struct lio_version *)&mbox_cmd.data[0])->major =
423 						LIQUIDIO_BASE_MAJOR_VERSION;
424 	((struct lio_version *)&mbox_cmd.data[0])->minor =
425 						LIQUIDIO_BASE_MINOR_VERSION;
426 	((struct lio_version *)&mbox_cmd.data[0])->micro =
427 						LIQUIDIO_BASE_MICRO_VERSION;
428 	mbox_cmd.q_no = 0;
429 	mbox_cmd.recv_len = 0;
430 	mbox_cmd.recv_status = 0;
431 	mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
432 	mbox_cmd.fn_arg = &status;
433 
434 	/* Interrupts are not enabled at this point.
435 	 * Enable them with default oq ticks
436 	 */
437 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
438 
439 	octeon_mbox_write(oct, &mbox_cmd);
440 
441 	atomic_set(&status, 0);
442 
443 	do {
444 		schedule_timeout_uninterruptible(1);
445 	} while ((!atomic_read(&status)) && (count++ < 100000));
446 
447 	/* Disable the interrupt so that the interrupsts will be reenabled
448 	 * with the oq ticks received from the PF
449 	 */
450 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
451 
452 	ret = atomic_read(&status);
453 	if (!ret) {
454 		dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n");
455 		return 1;
456 	}
457 
458 	for (q_no = 0 ; q_no < oct->num_iqs ; q_no++)
459 		oct->instr_queue[q_no]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
460 
461 	vfmajor = LIQUIDIO_BASE_MAJOR_VERSION;
462 	pfmajor = ret >> 16;
463 	if (pfmajor != vfmajor) {
464 		dev_err(&oct->pci_dev->dev,
465 			"VF Liquidio driver (major version %d) is not compatible with Liquidio PF driver (major version %d)\n",
466 			vfmajor, pfmajor);
467 		return 1;
468 	}
469 
470 	dev_dbg(&oct->pci_dev->dev,
471 		"VF Liquidio driver (major version %d), Liquidio PF driver (major version %d)\n",
472 		vfmajor, pfmajor);
473 
474 	dev_dbg(&oct->pci_dev->dev, "got data from pf pkind is %d\n",
475 		oct->pfvf_hsword.pkind);
476 
477 	return 0;
478 }
479 
480 static void cn23xx_handle_vf_mbox_intr(struct octeon_ioq_vector *ioq_vector)
481 {
482 	struct octeon_device *oct = ioq_vector->oct_dev;
483 	u64 mbox_int_val;
484 
485 	if (!ioq_vector->droq_index) {
486 		/* read and clear by writing 1 */
487 		mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
488 		writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
489 		if (octeon_mbox_read(oct->mbox[0]))
490 			schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
491 					      msecs_to_jiffies(0));
492 	}
493 }
494 
495 static u64 cn23xx_vf_msix_interrupt_handler(void *dev)
496 {
497 	struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
498 	struct octeon_device *oct = ioq_vector->oct_dev;
499 	struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
500 	u64 pkts_sent;
501 	u64 ret = 0;
502 
503 	dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
504 	pkts_sent = readq(droq->pkts_sent_reg);
505 
506 	/* If our device has interrupted, then proceed. Also check
507 	 * for all f's if interrupt was triggered on an error
508 	 * and the PCI read fails.
509 	 */
510 	if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
511 		return ret;
512 
513 	/* Write count reg in sli_pkt_cnts to clear these int. */
514 	if ((pkts_sent & CN23XX_INTR_PO_INT) ||
515 	    (pkts_sent & CN23XX_INTR_PI_INT)) {
516 		if (pkts_sent & CN23XX_INTR_PO_INT)
517 			ret |= MSIX_PO_INT;
518 	}
519 
520 	if (pkts_sent & CN23XX_INTR_PI_INT)
521 		/* We will clear the count when we update the read_index. */
522 		ret |= MSIX_PI_INT;
523 
524 	if (pkts_sent & CN23XX_INTR_MBOX_INT) {
525 		cn23xx_handle_vf_mbox_intr(ioq_vector);
526 		ret |= MSIX_MBOX_INT;
527 	}
528 
529 	return ret;
530 }
531 
532 static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
533 {
534 	u32 pkt_in_done = readl(iq->inst_cnt_reg);
535 	u32 last_done;
536 	u32 new_idx;
537 
538 	last_done = pkt_in_done - iq->pkt_in_done;
539 	iq->pkt_in_done = pkt_in_done;
540 
541 	/* Modulo of the new index with the IQ size will give us
542 	 * the new index.  The iq->reset_instr_cnt is always zero for
543 	 * cn23xx, so no extra adjustments are needed.
544 	 */
545 	new_idx = (iq->octeon_read_index +
546 		   (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
547 		  iq->max_count;
548 
549 	return new_idx;
550 }
551 
552 static void cn23xx_enable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
553 {
554 	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
555 	u32 q_no, time_threshold;
556 
557 	if (intr_flag & OCTEON_OUTPUT_INTR) {
558 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
559 			/* Set up interrupt packet and time thresholds
560 			 * for all the OQs
561 			 */
562 			time_threshold = cn23xx_vf_get_oq_ticks(
563 				oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
564 
565 			octeon_write_csr64(
566 			    oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
567 			    (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
568 			     ((u64)time_threshold << 32)));
569 		}
570 	}
571 
572 	if (intr_flag & OCTEON_INPUT_INTR) {
573 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
574 			/* Set CINT_ENB to enable IQ interrupt */
575 			octeon_write_csr64(
576 			    oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
577 			    ((octeon_read_csr64(
578 				  oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
579 			      ~CN23XX_PKT_IN_DONE_CNT_MASK) |
580 			     CN23XX_INTR_CINT_ENB));
581 		}
582 	}
583 
584 	/* Set queue-0 MBOX_ENB to enable VF mailbox interrupt */
585 	if (intr_flag & OCTEON_MBOX_INTR) {
586 		octeon_write_csr64(
587 		    oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
588 		    (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) |
589 		     CN23XX_INTR_MBOX_ENB));
590 	}
591 }
592 
593 static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
594 {
595 	u32 q_no;
596 
597 	if (intr_flag & OCTEON_OUTPUT_INTR) {
598 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
599 			/* Write all 1's in INT_LEVEL reg to disable PO_INT */
600 			octeon_write_csr64(
601 			    oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
602 			    0x3fffffffffffff);
603 		}
604 	}
605 	if (intr_flag & OCTEON_INPUT_INTR) {
606 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
607 			octeon_write_csr64(
608 			    oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
609 			    (octeon_read_csr64(
610 				 oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
611 			     ~(CN23XX_INTR_CINT_ENB |
612 			       CN23XX_PKT_IN_DONE_CNT_MASK)));
613 		}
614 	}
615 
616 	if (intr_flag & OCTEON_MBOX_INTR) {
617 		octeon_write_csr64(
618 		    oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
619 		    (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) &
620 		     ~CN23XX_INTR_MBOX_ENB));
621 	}
622 }
623 
624 int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
625 {
626 	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
627 	u32 rings_per_vf, ring_flag;
628 	u64 reg_val;
629 
630 	if (octeon_map_pci_barx(oct, 0, 0))
631 		return 1;
632 
633 	/* INPUT_CONTROL[RPVF] gives the VF IOq count */
634 	reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0));
635 
636 	oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
637 		      CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
638 	oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
639 		      CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
640 
641 	reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
642 
643 	rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
644 
645 	ring_flag = 0;
646 
647 	cn23xx->conf  = oct_get_config_info(oct, LIO_23XX);
648 	if (!cn23xx->conf) {
649 		dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n",
650 			__func__);
651 		octeon_unmap_pci_barx(oct, 0);
652 		return 1;
653 	}
654 
655 	if (oct->sriov_info.rings_per_vf > rings_per_vf) {
656 		dev_warn(&oct->pci_dev->dev,
657 			 "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n",
658 			 oct->sriov_info.rings_per_vf, rings_per_vf,
659 			 rings_per_vf);
660 		oct->sriov_info.rings_per_vf = rings_per_vf;
661 	} else {
662 		if (rings_per_vf > num_present_cpus()) {
663 			dev_warn(&oct->pci_dev->dev,
664 				 "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n",
665 				 rings_per_vf,
666 				 num_present_cpus(),
667 				 num_present_cpus());
668 			oct->sriov_info.rings_per_vf =
669 				num_present_cpus();
670 		} else {
671 			oct->sriov_info.rings_per_vf = rings_per_vf;
672 		}
673 	}
674 
675 	oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs;
676 	oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs;
677 	oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox;
678 	oct->fn_list.free_mbox = cn23xx_free_vf_mbox;
679 
680 	oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler;
681 
682 	oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs;
683 	oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
684 
685 	oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt;
686 	oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt;
687 
688 	oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues;
689 	oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues;
690 
691 	return 0;
692 }
693 
694 void cn23xx_dump_vf_iq_regs(struct octeon_device *oct)
695 {
696 	u32 regval, q_no;
697 
698 	dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
699 		CN23XX_VF_SLI_IQ_DOORBELL(0),
700 		CVM_CAST64(octeon_read_csr64(
701 					oct, CN23XX_VF_SLI_IQ_DOORBELL(0))));
702 
703 	dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
704 		CN23XX_VF_SLI_IQ_BASE_ADDR64(0),
705 		CVM_CAST64(octeon_read_csr64(
706 			oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0))));
707 
708 	dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
709 		CN23XX_VF_SLI_IQ_SIZE(0),
710 		CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0))));
711 
712 	for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
713 		dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
714 			q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
715 			CVM_CAST64(octeon_read_csr64(
716 				oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))));
717 	}
718 
719 	pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
720 	dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
721 		CN23XX_CONFIG_PCIE_DEVCTL, regval);
722 }
723