1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*  Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #ifndef COMMON_H
12 #define COMMON_H
13 
14 #include "rvu_struct.h"
15 
16 #define OTX2_ALIGN			128  /* Align to cacheline */
17 
18 #define Q_SIZE_16		0ULL /* 16 entries */
19 #define Q_SIZE_64		1ULL /* 64 entries */
20 #define Q_SIZE_256		2ULL
21 #define Q_SIZE_1K		3ULL
22 #define Q_SIZE_4K		4ULL
23 #define Q_SIZE_16K		5ULL
24 #define Q_SIZE_64K		6ULL
25 #define Q_SIZE_256K		7ULL
26 #define Q_SIZE_1M		8ULL /* Million entries */
27 #define Q_SIZE_MIN		Q_SIZE_16
28 #define Q_SIZE_MAX		Q_SIZE_1M
29 
30 #define Q_COUNT(x)		(16ULL << (2 * x))
31 #define Q_SIZE(x, n)		((ilog2(x) - (n)) / 2)
32 
33 /* Admin queue info */
34 
35 /* Since we intend to add only one instruction at a time,
36  * keep queue size to it's minimum.
37  */
38 #define AQ_SIZE			Q_SIZE_16
39 /* HW head & tail pointer mask */
40 #define AQ_PTR_MASK		0xFFFFF
41 
42 struct qmem {
43 	void            *base;
44 	dma_addr_t	iova;
45 	int		alloc_sz;
46 	u16		entry_sz;
47 	u8		align;
48 	u32		qsize;
49 };
50 
51 static inline int qmem_alloc(struct device *dev, struct qmem **q,
52 			     int qsize, int entry_sz)
53 {
54 	struct qmem *qmem;
55 	int aligned_addr;
56 
57 	if (!qsize)
58 		return -EINVAL;
59 
60 	*q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
61 	if (!*q)
62 		return -ENOMEM;
63 	qmem = *q;
64 
65 	qmem->entry_sz = entry_sz;
66 	qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
67 	qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
68 					 &qmem->iova, GFP_KERNEL);
69 	if (!qmem->base)
70 		return -ENOMEM;
71 
72 	qmem->qsize = qsize;
73 
74 	aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
75 	qmem->align = (aligned_addr - qmem->iova);
76 	qmem->base += qmem->align;
77 	qmem->iova += qmem->align;
78 	return 0;
79 }
80 
81 static inline void qmem_free(struct device *dev, struct qmem *qmem)
82 {
83 	if (!qmem)
84 		return;
85 
86 	if (qmem->base)
87 		dma_free_coherent(dev, qmem->alloc_sz,
88 				  qmem->base - qmem->align,
89 				  qmem->iova - qmem->align);
90 	devm_kfree(dev, qmem);
91 }
92 
93 struct admin_queue {
94 	struct qmem	*inst;
95 	struct qmem	*res;
96 	spinlock_t	lock; /* Serialize inst enqueue from PFs */
97 };
98 
99 /* NPA aura count */
100 enum npa_aura_sz {
101 	NPA_AURA_SZ_0,
102 	NPA_AURA_SZ_128,
103 	NPA_AURA_SZ_256,
104 	NPA_AURA_SZ_512,
105 	NPA_AURA_SZ_1K,
106 	NPA_AURA_SZ_2K,
107 	NPA_AURA_SZ_4K,
108 	NPA_AURA_SZ_8K,
109 	NPA_AURA_SZ_16K,
110 	NPA_AURA_SZ_32K,
111 	NPA_AURA_SZ_64K,
112 	NPA_AURA_SZ_128K,
113 	NPA_AURA_SZ_256K,
114 	NPA_AURA_SZ_512K,
115 	NPA_AURA_SZ_1M,
116 	NPA_AURA_SZ_MAX,
117 };
118 
119 #define NPA_AURA_COUNT(x)	(1ULL << ((x) + 6))
120 
121 /* NPA AQ result structure for init/read/write of aura HW contexts */
122 struct npa_aq_aura_res {
123 	struct	npa_aq_res_s	res;
124 	struct	npa_aura_s	aura_ctx;
125 	struct	npa_aura_s	ctx_mask;
126 };
127 
128 /* NPA AQ result structure for init/read/write of pool HW contexts */
129 struct npa_aq_pool_res {
130 	struct	npa_aq_res_s	res;
131 	struct	npa_pool_s	pool_ctx;
132 	struct	npa_pool_s	ctx_mask;
133 };
134 
135 /* NIX Transmit schedulers */
136 enum nix_scheduler {
137 	NIX_TXSCH_LVL_SMQ = 0x0,
138 	NIX_TXSCH_LVL_MDQ = 0x0,
139 	NIX_TXSCH_LVL_TL4 = 0x1,
140 	NIX_TXSCH_LVL_TL3 = 0x2,
141 	NIX_TXSCH_LVL_TL2 = 0x3,
142 	NIX_TXSCH_LVL_TL1 = 0x4,
143 	NIX_TXSCH_LVL_CNT = 0x5,
144 };
145 
146 #define TXSCH_RR_QTM_MAX		((1 << 24) - 1)
147 #define TXSCH_TL1_DFLT_RR_QTM		TXSCH_RR_QTM_MAX
148 #define TXSCH_TL1_DFLT_RR_PRIO		(0x1ull)
149 #define MAX_SCHED_WEIGHT		0xFF
150 #define DFLT_RR_WEIGHT			71
151 #define DFLT_RR_QTM	((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
152 			 / MAX_SCHED_WEIGHT)
153 
154 /* Min/Max packet sizes, excluding FCS */
155 #define	NIC_HW_MIN_FRS			40
156 #define	NIC_HW_MAX_FRS			9212
157 #define	SDP_HW_MAX_FRS			65535
158 
159 /* NIX RX action operation*/
160 #define NIX_RX_ACTIONOP_DROP		(0x0ull)
161 #define NIX_RX_ACTIONOP_UCAST		(0x1ull)
162 #define NIX_RX_ACTIONOP_UCAST_IPSEC	(0x2ull)
163 #define NIX_RX_ACTIONOP_MCAST		(0x3ull)
164 #define NIX_RX_ACTIONOP_RSS		(0x4ull)
165 /* Use the RX action set in the default unicast entry */
166 #define NIX_RX_ACTION_DEFAULT		(0xfull)
167 
168 /* NIX TX action operation*/
169 #define NIX_TX_ACTIONOP_DROP		(0x0ull)
170 #define NIX_TX_ACTIONOP_UCAST_DEFAULT	(0x1ull)
171 #define NIX_TX_ACTIONOP_UCAST_CHAN	(0x2ull)
172 #define NIX_TX_ACTIONOP_MCAST		(0x3ull)
173 #define NIX_TX_ACTIONOP_DROP_VIOL	(0x5ull)
174 
175 #define NPC_MCAM_KEY_X1			0
176 #define NPC_MCAM_KEY_X2			1
177 #define NPC_MCAM_KEY_X4			2
178 
179 #define NIX_INTFX_RX(a)			(0x0ull | (a) << 1)
180 #define NIX_INTFX_TX(a)			(0x1ull | (a) << 1)
181 
182 /* Default interfaces are NIX0_RX and NIX0_TX */
183 #define NIX_INTF_RX			NIX_INTFX_RX(0)
184 #define NIX_INTF_TX			NIX_INTFX_TX(0)
185 
186 #define NIX_INTF_TYPE_CGX		0
187 #define NIX_INTF_TYPE_LBK		1
188 
189 #define MAX_LMAC_PKIND			12
190 #define NIX_LINK_CGX_LMAC(a, b)		(0 + 4 * (a) + (b))
191 #define NIX_LINK_LBK(a)			(12 + (a))
192 #define NIX_CHAN_CGX_LMAC_CHX(a, b, c)	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
193 #define NIX_CHAN_LBK_CHX(a, b)		(0 + 0x100 * (a) + (b))
194 
195 /* NIX LSO format indices.
196  * As of now TSO is the only one using, so statically assigning indices.
197  */
198 #define NIX_LSO_FORMAT_IDX_TSOV4	0
199 #define NIX_LSO_FORMAT_IDX_TSOV6	1
200 
201 /* RSS info */
202 #define MAX_RSS_GROUPS			8
203 /* Group 0 has to be used in default pkt forwarding MCAM entries
204  * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
205  * filters.
206  */
207 #define DEFAULT_RSS_CONTEXT_GROUP	0
208 #define MAX_RSS_INDIR_TBL_SIZE		256 /* 1 << Max adder bits */
209 
210 /* NDC info */
211 enum ndc_idx_e {
212 	NIX0_RX = 0x0,
213 	NIX0_TX = 0x1,
214 	NPA0_U  = 0x2,
215 	NIX1_RX = 0x4,
216 	NIX1_TX = 0x5,
217 };
218 
219 enum ndc_ctype_e {
220 	CACHING = 0x0,
221 	BYPASS = 0x1,
222 };
223 
224 #define NDC_MAX_PORT 6
225 #define NDC_READ_TRANS 0
226 #define NDC_WRITE_TRANS 1
227 
228 #endif /* COMMON_H */
229