1*e6550b3eSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2a6a5580cSJeff Kirsher /*
3a6a5580cSJeff Kirsher  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
4a6a5580cSJeff Kirsher  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
5a6a5580cSJeff Kirsher  */
6a6a5580cSJeff Kirsher 
7a6a5580cSJeff Kirsher #ifndef _VNIC_CQ_H_
8a6a5580cSJeff Kirsher #define _VNIC_CQ_H_
9a6a5580cSJeff Kirsher 
10a6a5580cSJeff Kirsher #include "cq_desc.h"
11a6a5580cSJeff Kirsher #include "vnic_dev.h"
12a6a5580cSJeff Kirsher 
13a6a5580cSJeff Kirsher /* Completion queue control */
14a6a5580cSJeff Kirsher struct vnic_cq_ctrl {
15a6a5580cSJeff Kirsher 	u64 ring_base;			/* 0x00 */
16a6a5580cSJeff Kirsher 	u32 ring_size;			/* 0x08 */
17a6a5580cSJeff Kirsher 	u32 pad0;
18a6a5580cSJeff Kirsher 	u32 flow_control_enable;	/* 0x10 */
19a6a5580cSJeff Kirsher 	u32 pad1;
20a6a5580cSJeff Kirsher 	u32 color_enable;		/* 0x18 */
21a6a5580cSJeff Kirsher 	u32 pad2;
22a6a5580cSJeff Kirsher 	u32 cq_head;			/* 0x20 */
23a6a5580cSJeff Kirsher 	u32 pad3;
24a6a5580cSJeff Kirsher 	u32 cq_tail;			/* 0x28 */
25a6a5580cSJeff Kirsher 	u32 pad4;
26a6a5580cSJeff Kirsher 	u32 cq_tail_color;		/* 0x30 */
27a6a5580cSJeff Kirsher 	u32 pad5;
28a6a5580cSJeff Kirsher 	u32 interrupt_enable;		/* 0x38 */
29a6a5580cSJeff Kirsher 	u32 pad6;
30a6a5580cSJeff Kirsher 	u32 cq_entry_enable;		/* 0x40 */
31a6a5580cSJeff Kirsher 	u32 pad7;
32a6a5580cSJeff Kirsher 	u32 cq_message_enable;		/* 0x48 */
33a6a5580cSJeff Kirsher 	u32 pad8;
34a6a5580cSJeff Kirsher 	u32 interrupt_offset;		/* 0x50 */
35a6a5580cSJeff Kirsher 	u32 pad9;
36a6a5580cSJeff Kirsher 	u64 cq_message_addr;		/* 0x58 */
37a6a5580cSJeff Kirsher 	u32 pad10;
38a6a5580cSJeff Kirsher };
39a6a5580cSJeff Kirsher 
407c2ce6e6SSujith Sankar struct vnic_rx_bytes_counter {
417c2ce6e6SSujith Sankar 	unsigned int small_pkt_bytes_cnt;
427c2ce6e6SSujith Sankar 	unsigned int large_pkt_bytes_cnt;
437c2ce6e6SSujith Sankar };
447c2ce6e6SSujith Sankar 
45a6a5580cSJeff Kirsher struct vnic_cq {
46a6a5580cSJeff Kirsher 	unsigned int index;
47a6a5580cSJeff Kirsher 	struct vnic_dev *vdev;
48a6a5580cSJeff Kirsher 	struct vnic_cq_ctrl __iomem *ctrl;              /* memory-mapped */
49a6a5580cSJeff Kirsher 	struct vnic_dev_ring ring;
50a6a5580cSJeff Kirsher 	unsigned int to_clean;
51a6a5580cSJeff Kirsher 	unsigned int last_color;
52a6a5580cSJeff Kirsher 	unsigned int interrupt_offset;
537c2ce6e6SSujith Sankar 	struct vnic_rx_bytes_counter pkt_size_counter;
547c2ce6e6SSujith Sankar 	unsigned int cur_rx_coal_timeval;
557c2ce6e6SSujith Sankar 	unsigned int tobe_rx_coal_timeval;
567c2ce6e6SSujith Sankar 	ktime_t prev_ts;
57a6a5580cSJeff Kirsher };
58a6a5580cSJeff Kirsher 
vnic_cq_service(struct vnic_cq * cq,unsigned int work_to_do,int (* q_service)(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque),void * opaque)59a6a5580cSJeff Kirsher static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
60a6a5580cSJeff Kirsher 	unsigned int work_to_do,
61a6a5580cSJeff Kirsher 	int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
62a6a5580cSJeff Kirsher 	u8 type, u16 q_number, u16 completed_index, void *opaque),
63a6a5580cSJeff Kirsher 	void *opaque)
64a6a5580cSJeff Kirsher {
65a6a5580cSJeff Kirsher 	struct cq_desc *cq_desc;
66a6a5580cSJeff Kirsher 	unsigned int work_done = 0;
67a6a5580cSJeff Kirsher 	u16 q_number, completed_index;
68a6a5580cSJeff Kirsher 	u8 type, color;
69a6a5580cSJeff Kirsher 
70a6a5580cSJeff Kirsher 	cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
71a6a5580cSJeff Kirsher 		cq->ring.desc_size * cq->to_clean);
72a6a5580cSJeff Kirsher 	cq_desc_dec(cq_desc, &type, &color,
73a6a5580cSJeff Kirsher 		&q_number, &completed_index);
74a6a5580cSJeff Kirsher 
75a6a5580cSJeff Kirsher 	while (color != cq->last_color) {
76a6a5580cSJeff Kirsher 
77a6a5580cSJeff Kirsher 		if ((*q_service)(cq->vdev, cq_desc, type,
78a6a5580cSJeff Kirsher 			q_number, completed_index, opaque))
79a6a5580cSJeff Kirsher 			break;
80a6a5580cSJeff Kirsher 
81a6a5580cSJeff Kirsher 		cq->to_clean++;
82a6a5580cSJeff Kirsher 		if (cq->to_clean == cq->ring.desc_count) {
83a6a5580cSJeff Kirsher 			cq->to_clean = 0;
84a6a5580cSJeff Kirsher 			cq->last_color = cq->last_color ? 0 : 1;
85a6a5580cSJeff Kirsher 		}
86a6a5580cSJeff Kirsher 
87a6a5580cSJeff Kirsher 		cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
88a6a5580cSJeff Kirsher 			cq->ring.desc_size * cq->to_clean);
89a6a5580cSJeff Kirsher 		cq_desc_dec(cq_desc, &type, &color,
90a6a5580cSJeff Kirsher 			&q_number, &completed_index);
91a6a5580cSJeff Kirsher 
92a6a5580cSJeff Kirsher 		work_done++;
93a6a5580cSJeff Kirsher 		if (work_done >= work_to_do)
94a6a5580cSJeff Kirsher 			break;
95a6a5580cSJeff Kirsher 	}
96a6a5580cSJeff Kirsher 
97a6a5580cSJeff Kirsher 	return work_done;
98a6a5580cSJeff Kirsher }
99a6a5580cSJeff Kirsher 
100a6a5580cSJeff Kirsher void vnic_cq_free(struct vnic_cq *cq);
101a6a5580cSJeff Kirsher int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
102a6a5580cSJeff Kirsher 	unsigned int desc_count, unsigned int desc_size);
103a6a5580cSJeff Kirsher void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
104a6a5580cSJeff Kirsher 	unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
105a6a5580cSJeff Kirsher 	unsigned int cq_tail_color, unsigned int interrupt_enable,
106a6a5580cSJeff Kirsher 	unsigned int cq_entry_enable, unsigned int message_enable,
107a6a5580cSJeff Kirsher 	unsigned int interrupt_offset, u64 message_addr);
108a6a5580cSJeff Kirsher void vnic_cq_clean(struct vnic_cq *cq);
109a6a5580cSJeff Kirsher 
110a6a5580cSJeff Kirsher #endif /* _VNIC_CQ_H_ */
111