1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3 
4 #include <linux/bitops.h>
5 #include <linux/kernel.h>
6 #include <linux/log2.h>
7 
8 #include "../nfpcore/nfp_cpp.h"
9 #include "../nfpcore/nfp_nffw.h"
10 #include "../nfp_app.h"
11 #include "../nfp_abi.h"
12 #include "../nfp_main.h"
13 #include "../nfp_net.h"
14 #include "main.h"
15 
16 #define NFP_NUM_PRIOS_SYM_NAME	"_abi_pci_dscp_num_prio_%u"
17 #define NFP_NUM_BANDS_SYM_NAME	"_abi_pci_dscp_num_band_%u"
18 #define NFP_ACT_MASK_SYM_NAME	"_abi_nfd_out_q_actions_%u"
19 
20 #define NFP_RED_SUPPORT_SYM_NAME	"_abi_nfd_out_red_offload_%u"
21 
22 #define NFP_QLVL_SYM_NAME	"_abi_nfd_out_q_lvls_%u%s"
23 #define NFP_QLVL_STRIDE		16
24 #define NFP_QLVL_BLOG_BYTES	0
25 #define NFP_QLVL_BLOG_PKTS	4
26 #define NFP_QLVL_THRS		8
27 #define NFP_QLVL_ACT		12
28 
29 #define NFP_QMSTAT_SYM_NAME	"_abi_nfdqm%u_stats%s"
30 #define NFP_QMSTAT_STRIDE	32
31 #define NFP_QMSTAT_NON_STO	0
32 #define NFP_QMSTAT_STO		8
33 #define NFP_QMSTAT_DROP		16
34 #define NFP_QMSTAT_ECN		24
35 
36 #define NFP_Q_STAT_SYM_NAME	"_abi_nfd_rxq_stats%u%s"
37 #define NFP_Q_STAT_STRIDE	16
38 #define NFP_Q_STAT_PKTS		0
39 #define NFP_Q_STAT_BYTES	8
40 
41 #define NFP_NET_ABM_MBOX_CMD		NFP_NET_CFG_MBOX_SIMPLE_CMD
42 #define NFP_NET_ABM_MBOX_RET		NFP_NET_CFG_MBOX_SIMPLE_RET
43 #define NFP_NET_ABM_MBOX_DATALEN	NFP_NET_CFG_MBOX_SIMPLE_VAL
44 #define NFP_NET_ABM_MBOX_RESERVED	(NFP_NET_CFG_MBOX_SIMPLE_VAL + 4)
45 #define NFP_NET_ABM_MBOX_DATA		(NFP_NET_CFG_MBOX_SIMPLE_VAL + 8)
46 
47 static int
48 nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
49 		  unsigned int stride, unsigned int offset, unsigned int band,
50 		  unsigned int queue, bool is_u64, u64 *res)
51 {
52 	struct nfp_cpp *cpp = alink->abm->app->cpp;
53 	u64 val, sym_offset;
54 	unsigned int qid;
55 	u32 val32;
56 	int err;
57 
58 	qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
59 
60 	sym_offset = qid * stride + offset;
61 	if (is_u64)
62 		err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val);
63 	else
64 		err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32);
65 	if (err) {
66 		nfp_err(cpp, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n",
67 			alink->id, band, queue, alink->queue_base);
68 		return err;
69 	}
70 
71 	*res = is_u64 ? val : val32;
72 	return 0;
73 }
74 
75 int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
76 {
77 	struct nfp_cpp *cpp = abm->app->cpp;
78 	u64 sym_offset;
79 	int err;
80 
81 	__clear_bit(id, abm->threshold_undef);
82 	if (abm->thresholds[id] == val)
83 		return 0;
84 
85 	sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
86 	err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val);
87 	if (err) {
88 		nfp_err(cpp,
89 			"RED offload setting level failed on subqueue %d\n",
90 			id);
91 		return err;
92 	}
93 
94 	abm->thresholds[id] = val;
95 	return 0;
96 }
97 
98 int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
99 			   unsigned int queue, u32 val)
100 {
101 	unsigned int threshold;
102 
103 	threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
104 
105 	return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
106 }
107 
108 int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
109 			     enum nfp_abm_q_action act)
110 {
111 	struct nfp_cpp *cpp = abm->app->cpp;
112 	u64 sym_offset;
113 	int err;
114 
115 	if (abm->actions[id] == act)
116 		return 0;
117 
118 	sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_ACT;
119 	err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, act);
120 	if (err) {
121 		nfp_err(cpp,
122 			"RED offload setting action failed on subqueue %d\n",
123 			id);
124 		return err;
125 	}
126 
127 	abm->actions[id] = act;
128 	return 0;
129 }
130 
131 int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
132 			   unsigned int queue, enum nfp_abm_q_action act)
133 {
134 	unsigned int qid;
135 
136 	qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
137 
138 	return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act);
139 }
140 
141 u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue)
142 {
143 	unsigned int band;
144 	u64 val, sum = 0;
145 
146 	for (band = 0; band < alink->abm->num_bands; band++) {
147 		if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
148 				      NFP_QMSTAT_STRIDE, NFP_QMSTAT_NON_STO,
149 				      band, queue, true, &val))
150 			return 0;
151 		sum += val;
152 	}
153 
154 	return sum;
155 }
156 
157 u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue)
158 {
159 	unsigned int band;
160 	u64 val, sum = 0;
161 
162 	for (band = 0; band < alink->abm->num_bands; band++) {
163 		if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
164 				      NFP_QMSTAT_STRIDE, NFP_QMSTAT_STO,
165 				      band, queue, true, &val))
166 			return 0;
167 		sum += val;
168 	}
169 
170 	return sum;
171 }
172 
173 static int
174 nfp_abm_ctrl_stat_basic(struct nfp_abm_link *alink, unsigned int band,
175 			unsigned int queue, unsigned int off, u64 *val)
176 {
177 	if (!nfp_abm_has_prio(alink->abm)) {
178 		if (!band) {
179 			unsigned int id = alink->queue_base + queue;
180 
181 			*val = nn_readq(alink->vnic,
182 					NFP_NET_CFG_RXR_STATS(id) + off);
183 		} else {
184 			*val = 0;
185 		}
186 
187 		return 0;
188 	} else {
189 		return nfp_abm_ctrl_stat(alink, alink->abm->q_stats,
190 					 NFP_Q_STAT_STRIDE, off, band, queue,
191 					 true, val);
192 	}
193 }
194 
195 int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int band,
196 			      unsigned int queue, struct nfp_alink_stats *stats)
197 {
198 	int err;
199 
200 	err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS,
201 				      &stats->tx_pkts);
202 	if (err)
203 		return err;
204 
205 	err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES,
206 				      &stats->tx_bytes);
207 	if (err)
208 		return err;
209 
210 	err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE,
211 				NFP_QLVL_BLOG_BYTES, band, queue, false,
212 				&stats->backlog_bytes);
213 	if (err)
214 		return err;
215 
216 	err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
217 				NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
218 				band, queue, false, &stats->backlog_pkts);
219 	if (err)
220 		return err;
221 
222 	err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
223 				NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
224 				band, queue, true, &stats->drops);
225 	if (err)
226 		return err;
227 
228 	return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
229 				 NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
230 				 band, queue, true, &stats->overlimits);
231 }
232 
233 int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
234 			       unsigned int band, unsigned int queue,
235 			       struct nfp_alink_xstats *xstats)
236 {
237 	int err;
238 
239 	err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
240 				NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
241 				band, queue, true, &xstats->pdrop);
242 	if (err)
243 		return err;
244 
245 	return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
246 				 NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
247 				 band, queue, true, &xstats->ecn_marked);
248 }
249 
250 int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
251 {
252 	return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE,
253 			    NULL, 0, NULL, 0);
254 }
255 
256 int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
257 {
258 	return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_DISABLE,
259 			    NULL, 0, NULL, 0);
260 }
261 
262 int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
263 {
264 	struct nfp_net *nn = alink->vnic;
265 	unsigned int i;
266 	int err;
267 
268 	/* Write data_len and wipe reserved */
269 	nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
270 		  alink->abm->prio_map_len);
271 
272 	for (i = 0; i < alink->abm->prio_map_len; i += sizeof(u32))
273 		nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
274 			  packed[i / sizeof(u32)]);
275 
276 	err = nfp_net_reconfig_mbox(nn,
277 				    NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
278 	if (err)
279 		nfp_err(alink->abm->app->cpp,
280 			"setting DSCP -> VQ map failed with error %d\n", err);
281 	return err;
282 }
283 
284 static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link *alink)
285 {
286 	struct nfp_abm *abm = alink->abm;
287 	struct nfp_net *nn = alink->vnic;
288 	unsigned int min_mbox_sz;
289 
290 	if (!nfp_abm_has_prio(alink->abm))
291 		return 0;
292 
293 	min_mbox_sz = NFP_NET_ABM_MBOX_DATA + alink->abm->prio_map_len;
294 	if (nn->tlv_caps.mbox_len < min_mbox_sz) {
295 		nfp_err(abm->app->pf->cpp, "vNIC mailbox too small for prio offload: %u, need: %u\n",
296 			nn->tlv_caps.mbox_len,  min_mbox_sz);
297 		return -EINVAL;
298 	}
299 
300 	return 0;
301 }
302 
303 int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
304 {
305 	alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
306 	alink->queue_base /= alink->vnic->stride_rx;
307 
308 	return nfp_abm_ctrl_prio_check_params(alink);
309 }
310 
311 static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm *abm)
312 {
313 	unsigned int size;
314 
315 	size = roundup_pow_of_two(order_base_2(abm->num_bands));
316 	size = DIV_ROUND_UP(size * abm->num_prios, BITS_PER_BYTE);
317 	size = round_up(size, sizeof(u32));
318 
319 	return size;
320 }
321 
322 static const struct nfp_rtsym *
323 nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
324 {
325 	const struct nfp_rtsym *sym;
326 
327 	sym = nfp_rtsym_lookup(pf->rtbl, name);
328 	if (!sym) {
329 		nfp_err(pf->cpp, "Symbol '%s' not found\n", name);
330 		return ERR_PTR(-ENOENT);
331 	}
332 	if (nfp_rtsym_size(sym) != size) {
333 		nfp_err(pf->cpp,
334 			"Symbol '%s' wrong size: expected %u got %llu\n",
335 			name, size, nfp_rtsym_size(sym));
336 		return ERR_PTR(-EINVAL);
337 	}
338 
339 	return sym;
340 }
341 
342 static const struct nfp_rtsym *
343 nfp_abm_ctrl_find_q_rtsym(struct nfp_abm *abm, const char *name_fmt,
344 			  size_t size)
345 {
346 	char pf_symbol[64];
347 
348 	size = array3_size(size, abm->num_bands, NFP_NET_MAX_RX_RINGS);
349 	snprintf(pf_symbol, sizeof(pf_symbol), name_fmt,
350 		 abm->pf_id, nfp_abm_has_prio(abm) ? "_per_band" : "");
351 
352 	return nfp_abm_ctrl_find_rtsym(abm->app->pf, pf_symbol, size);
353 }
354 
355 int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
356 {
357 	struct nfp_pf *pf = abm->app->pf;
358 	const struct nfp_rtsym *sym;
359 	int res;
360 
361 	abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp);
362 
363 	/* Check if Qdisc offloads are supported */
364 	res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1);
365 	if (res < 0)
366 		return res;
367 	abm->red_support = res;
368 
369 	/* Read count of prios and prio bands */
370 	res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_BANDS_SYM_NAME, 1);
371 	if (res < 0)
372 		return res;
373 	abm->num_bands = res;
374 
375 	res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_PRIOS_SYM_NAME, 1);
376 	if (res < 0)
377 		return res;
378 	abm->num_prios = res;
379 
380 	/* Read available actions */
381 	res = nfp_pf_rtsym_read_optional(pf, NFP_ACT_MASK_SYM_NAME,
382 					 BIT(NFP_ABM_ACT_MARK_DROP));
383 	if (res < 0)
384 		return res;
385 	abm->action_mask = res;
386 
387 	abm->prio_map_len = nfp_abm_ctrl_prio_map_size(abm);
388 	abm->dscp_mask = GENMASK(7, 8 - order_base_2(abm->num_prios));
389 
390 	/* Check values are sane, U16_MAX is arbitrarily chosen as max */
391 	if (!is_power_of_2(abm->num_bands) || !is_power_of_2(abm->num_prios) ||
392 	    abm->num_bands > U16_MAX || abm->num_prios > U16_MAX ||
393 	    (abm->num_bands == 1) != (abm->num_prios == 1)) {
394 		nfp_err(pf->cpp,
395 			"invalid priomap description num bands: %u and num prios: %u\n",
396 			abm->num_bands, abm->num_prios);
397 		return -EINVAL;
398 	}
399 
400 	/* Find level and stat symbols */
401 	if (!abm->red_support)
402 		return 0;
403 
404 	sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QLVL_SYM_NAME,
405 					NFP_QLVL_STRIDE);
406 	if (IS_ERR(sym))
407 		return PTR_ERR(sym);
408 	abm->q_lvls = sym;
409 
410 	sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QMSTAT_SYM_NAME,
411 					NFP_QMSTAT_STRIDE);
412 	if (IS_ERR(sym))
413 		return PTR_ERR(sym);
414 	abm->qm_stats = sym;
415 
416 	if (nfp_abm_has_prio(abm)) {
417 		sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_Q_STAT_SYM_NAME,
418 						NFP_Q_STAT_STRIDE);
419 		if (IS_ERR(sym))
420 			return PTR_ERR(sym);
421 		abm->q_stats = sym;
422 	}
423 
424 	return 0;
425 }
426