xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/af/common.h (revision fed8b7e366e7c8f81e957ef91aa8f0a38e038c66)
1 /* SPDX-License-Identifier: GPL-2.0
2  * Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #ifndef COMMON_H
12 #define COMMON_H
13 
14 #include "rvu_struct.h"
15 
16 #define OTX2_ALIGN			128  /* Align to cacheline */
17 
18 #define Q_SIZE_16		0ULL /* 16 entries */
19 #define Q_SIZE_64		1ULL /* 64 entries */
20 #define Q_SIZE_256		2ULL
21 #define Q_SIZE_1K		3ULL
22 #define Q_SIZE_4K		4ULL
23 #define Q_SIZE_16K		5ULL
24 #define Q_SIZE_64K		6ULL
25 #define Q_SIZE_256K		7ULL
26 #define Q_SIZE_1M		8ULL /* Million entries */
27 #define Q_SIZE_MIN		Q_SIZE_16
28 #define Q_SIZE_MAX		Q_SIZE_1M
29 
30 #define Q_COUNT(x)		(16ULL << (2 * x))
31 #define Q_SIZE(x, n)		((ilog2(x) - (n)) / 2)
32 
33 /* Admin queue info */
34 
35 /* Since we intend to add only one instruction at a time,
36  * keep queue size to it's minimum.
37  */
38 #define AQ_SIZE			Q_SIZE_16
39 /* HW head & tail pointer mask */
40 #define AQ_PTR_MASK		0xFFFFF
41 
42 struct qmem {
43 	void            *base;
44 	dma_addr_t	iova;
45 	int		alloc_sz;
46 	u8		entry_sz;
47 	u8		align;
48 	u32		qsize;
49 };
50 
51 static inline int qmem_alloc(struct device *dev, struct qmem **q,
52 			     int qsize, int entry_sz)
53 {
54 	struct qmem *qmem;
55 	int aligned_addr;
56 
57 	if (!qsize)
58 		return -EINVAL;
59 
60 	*q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
61 	if (!*q)
62 		return -ENOMEM;
63 	qmem = *q;
64 
65 	qmem->entry_sz = entry_sz;
66 	qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
67 	qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz,
68 					 &qmem->iova, GFP_KERNEL);
69 	if (!qmem->base)
70 		return -ENOMEM;
71 
72 	qmem->qsize = qsize;
73 
74 	aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
75 	qmem->align = (aligned_addr - qmem->iova);
76 	qmem->base += qmem->align;
77 	qmem->iova += qmem->align;
78 	return 0;
79 }
80 
81 static inline void qmem_free(struct device *dev, struct qmem *qmem)
82 {
83 	if (!qmem)
84 		return;
85 
86 	if (qmem->base)
87 		dma_free_coherent(dev, qmem->alloc_sz,
88 				  qmem->base - qmem->align,
89 				  qmem->iova - qmem->align);
90 	devm_kfree(dev, qmem);
91 }
92 
93 struct admin_queue {
94 	struct qmem	*inst;
95 	struct qmem	*res;
96 	spinlock_t	lock; /* Serialize inst enqueue from PFs */
97 };
98 
99 /* NPA aura count */
100 enum npa_aura_sz {
101 	NPA_AURA_SZ_0,
102 	NPA_AURA_SZ_128,
103 	NPA_AURA_SZ_256,
104 	NPA_AURA_SZ_512,
105 	NPA_AURA_SZ_1K,
106 	NPA_AURA_SZ_2K,
107 	NPA_AURA_SZ_4K,
108 	NPA_AURA_SZ_8K,
109 	NPA_AURA_SZ_16K,
110 	NPA_AURA_SZ_32K,
111 	NPA_AURA_SZ_64K,
112 	NPA_AURA_SZ_128K,
113 	NPA_AURA_SZ_256K,
114 	NPA_AURA_SZ_512K,
115 	NPA_AURA_SZ_1M,
116 	NPA_AURA_SZ_MAX,
117 };
118 
119 #define NPA_AURA_COUNT(x)	(1ULL << ((x) + 6))
120 
121 /* NPA AQ result structure for init/read/write of aura HW contexts */
122 struct npa_aq_aura_res {
123 	struct	npa_aq_res_s	res;
124 	struct	npa_aura_s	aura_ctx;
125 	struct	npa_aura_s	ctx_mask;
126 };
127 
128 /* NPA AQ result structure for init/read/write of pool HW contexts */
129 struct npa_aq_pool_res {
130 	struct	npa_aq_res_s	res;
131 	struct	npa_pool_s	pool_ctx;
132 	struct	npa_pool_s	ctx_mask;
133 };
134 
135 /* NIX Transmit schedulers */
136 enum nix_scheduler {
137 	NIX_TXSCH_LVL_SMQ = 0x0,
138 	NIX_TXSCH_LVL_MDQ = 0x0,
139 	NIX_TXSCH_LVL_TL4 = 0x1,
140 	NIX_TXSCH_LVL_TL3 = 0x2,
141 	NIX_TXSCH_LVL_TL2 = 0x3,
142 	NIX_TXSCH_LVL_TL1 = 0x4,
143 	NIX_TXSCH_LVL_CNT = 0x5,
144 };
145 
146 /* NIX RX action operation*/
147 #define NIX_RX_ACTIONOP_DROP		(0x0ull)
148 #define NIX_RX_ACTIONOP_UCAST		(0x1ull)
149 #define NIX_RX_ACTIONOP_UCAST_IPSEC	(0x2ull)
150 #define NIX_RX_ACTIONOP_MCAST		(0x3ull)
151 #define NIX_RX_ACTIONOP_RSS		(0x4ull)
152 
153 /* NIX TX action operation*/
154 #define NIX_TX_ACTIONOP_DROP		(0x0ull)
155 #define NIX_TX_ACTIONOP_UCAST_DEFAULT	(0x1ull)
156 #define NIX_TX_ACTIONOP_UCAST_CHAN	(0x2ull)
157 #define NIX_TX_ACTIONOP_MCAST		(0x3ull)
158 #define NIX_TX_ACTIONOP_DROP_VIOL	(0x5ull)
159 
160 #define NPC_MCAM_KEY_X1			0
161 #define NPC_MCAM_KEY_X2			1
162 #define NPC_MCAM_KEY_X4			2
163 
164 #define NIX_INTF_RX			0
165 #define NIX_INTF_TX			1
166 
167 #define NIX_INTF_TYPE_CGX		0
168 #define NIX_INTF_TYPE_LBK		1
169 
170 #define MAX_LMAC_PKIND			12
171 #define NIX_LINK_CGX_LMAC(a, b)		(0 + 4 * (a) + (b))
172 #define NIX_CHAN_CGX_LMAC_CHX(a, b, c)	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
173 
174 /* NIX LSO format indices.
175  * As of now TSO is the only one using, so statically assigning indices.
176  */
177 #define NIX_LSO_FORMAT_IDX_TSOV4	0
178 #define NIX_LSO_FORMAT_IDX_TSOV6	1
179 
180 /* RSS info */
181 #define MAX_RSS_GROUPS			8
182 /* Group 0 has to be used in default pkt forwarding MCAM entries
183  * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
184  * filters.
185  */
186 #define DEFAULT_RSS_CONTEXT_GROUP	0
187 #define MAX_RSS_INDIR_TBL_SIZE		256 /* 1 << Max adder bits */
188 
189 /* NIX flow tag, key type flags */
190 #define FLOW_KEY_TYPE_PORT	BIT(0)
191 #define FLOW_KEY_TYPE_IPV4	BIT(1)
192 #define FLOW_KEY_TYPE_IPV6	BIT(2)
193 #define FLOW_KEY_TYPE_TCP	BIT(3)
194 #define FLOW_KEY_TYPE_UDP	BIT(4)
195 #define FLOW_KEY_TYPE_SCTP	BIT(5)
196 
197 /* NIX flow tag algorithm indices, max is 31 */
198 enum {
199 	FLOW_KEY_ALG_PORT,
200 	FLOW_KEY_ALG_IP,
201 	FLOW_KEY_ALG_TCP,
202 	FLOW_KEY_ALG_UDP,
203 	FLOW_KEY_ALG_SCTP,
204 	FLOW_KEY_ALG_TCP_UDP,
205 	FLOW_KEY_ALG_TCP_SCTP,
206 	FLOW_KEY_ALG_UDP_SCTP,
207 	FLOW_KEY_ALG_TCP_UDP_SCTP,
208 	FLOW_KEY_ALG_MAX,
209 };
210 
211 #endif /* COMMON_H */
212