1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physcial Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include "cn10k.h"
8 #include "otx2_reg.h"
9 #include "otx2_struct.h"
10 
11 static struct dev_hw_ops	otx2_hw_ops = {
12 	.sq_aq_init = otx2_sq_aq_init,
13 	.sqe_flush = otx2_sqe_flush,
14 	.aura_freeptr = otx2_aura_freeptr,
15 	.refill_pool_ptrs = otx2_refill_pool_ptrs,
16 };
17 
18 static struct dev_hw_ops cn10k_hw_ops = {
19 	.sq_aq_init = cn10k_sq_aq_init,
20 	.sqe_flush = cn10k_sqe_flush,
21 	.aura_freeptr = cn10k_aura_freeptr,
22 	.refill_pool_ptrs = cn10k_refill_pool_ptrs,
23 };
24 
25 int cn10k_lmtst_init(struct otx2_nic *pfvf)
26 {
27 
28 	struct lmtst_tbl_setup_req *req;
29 	int qcount, err;
30 
31 	if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
32 		pfvf->hw_ops = &otx2_hw_ops;
33 		return 0;
34 	}
35 
36 	pfvf->hw_ops = &cn10k_hw_ops;
37 	qcount = pfvf->hw.max_queues;
38 	/* LMTST lines allocation
39 	 * qcount = num_online_cpus();
40 	 * NPA = TX + RX + XDP.
41 	 * NIX = TX * 32 (For Burst SQE flush).
42 	 */
43 	pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
44 	pfvf->npa_lmt_lines = qcount * 3;
45 	pfvf->nix_lmt_size =  LMT_BURST_SIZE * LMT_LINE_SIZE;
46 
47 	mutex_lock(&pfvf->mbox.lock);
48 	req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
49 	if (!req) {
50 		mutex_unlock(&pfvf->mbox.lock);
51 		return -ENOMEM;
52 	}
53 
54 	req->use_local_lmt_region = true;
55 
56 	err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
57 			 LMT_LINE_SIZE);
58 	if (err) {
59 		mutex_unlock(&pfvf->mbox.lock);
60 		return err;
61 	}
62 	pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
63 	req->lmt_iova = (u64)pfvf->dync_lmt->iova;
64 
65 	err = otx2_sync_mbox_msg(&pfvf->mbox);
66 	mutex_unlock(&pfvf->mbox.lock);
67 
68 	return 0;
69 }
70 EXPORT_SYMBOL(cn10k_lmtst_init);
71 
72 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
73 {
74 	struct nix_cn10k_aq_enq_req *aq;
75 	struct otx2_nic *pfvf = dev;
76 	struct otx2_snd_queue *sq;
77 
78 	sq = &pfvf->qset.sq[qidx];
79 	sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
80 			       (qidx * pfvf->nix_lmt_size));
81 
82 	sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
83 
84 	/* Get memory to put this msg */
85 	aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
86 	if (!aq)
87 		return -ENOMEM;
88 
89 	aq->sq.cq = pfvf->hw.rx_queues + qidx;
90 	aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
91 	aq->sq.cq_ena = 1;
92 	aq->sq.ena = 1;
93 	/* Only one SMQ is allocated, map all SQ's to that SMQ  */
94 	aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
95 	/* FIXME: set based on NIX_AF_DWRR_RPM_MTU*/
96 	aq->sq.smq_rr_weight = pfvf->netdev->mtu;
97 	aq->sq.default_chan = pfvf->hw.tx_chan_base;
98 	aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
99 	aq->sq.sqb_aura = sqb_aura;
100 	aq->sq.sq_int_ena = NIX_SQINT_BITS;
101 	aq->sq.qint_idx = 0;
102 	/* Due pipelining impact minimum 2000 unused SQ CQE's
103 	 * need to maintain to avoid CQ overflow.
104 	 */
105 	aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
106 
107 	/* Fill AQ info */
108 	aq->qidx = qidx;
109 	aq->ctype = NIX_AQ_CTYPE_SQ;
110 	aq->op = NIX_AQ_INSTOP_INIT;
111 
112 	return otx2_sync_mbox_msg(&pfvf->mbox);
113 }
114 
115 #define NPA_MAX_BURST 16
116 void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
117 {
118 	struct otx2_nic *pfvf = dev;
119 	u64 ptrs[NPA_MAX_BURST];
120 	int num_ptrs = 1;
121 	dma_addr_t bufptr;
122 
123 	/* Refill pool with new buffers */
124 	while (cq->pool_ptrs) {
125 		if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
126 			if (num_ptrs--)
127 				__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
128 						     num_ptrs,
129 						     cq->rbpool->lmt_addr);
130 			break;
131 		}
132 		cq->pool_ptrs--;
133 		ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
134 		num_ptrs++;
135 		if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
136 			__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
137 					     num_ptrs,
138 					     cq->rbpool->lmt_addr);
139 			num_ptrs = 1;
140 		}
141 	}
142 }
143 
144 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
145 {
146 	u64 val = 0, tar_addr = 0;
147 
148 	/* FIXME: val[0:10] LMT_ID.
149 	 * [12:15] no of LMTST - 1 in the burst.
150 	 * [19:63] data size of each LMTST in the burst except first.
151 	 */
152 	val = (sq->lmt_id & 0x7FF);
153 	/* Target address for LMTST flush tells HW how many 128bit
154 	 * words are present.
155 	 * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
156 	 */
157 	tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
158 	dma_wmb();
159 	memcpy(sq->lmt_addr, sq->sqe_base, size);
160 	cn10k_lmt_flush(val, tar_addr);
161 
162 	sq->head++;
163 	sq->head &= (sq->sqe_cnt - 1);
164 }
165 
166 int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
167 {
168 	struct nix_bandprof_free_req *req;
169 	int rc;
170 
171 	if (is_dev_otx2(pfvf->pdev))
172 		return 0;
173 
174 	mutex_lock(&pfvf->mbox.lock);
175 
176 	req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
177 	if (!req) {
178 		rc =  -ENOMEM;
179 		goto out;
180 	}
181 
182 	/* Free all bandwidth profiles allocated */
183 	req->free_all = true;
184 
185 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
186 out:
187 	mutex_unlock(&pfvf->mbox.lock);
188 	return rc;
189 }
190 
191 int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
192 {
193 	struct nix_bandprof_alloc_req *req;
194 	struct nix_bandprof_alloc_rsp *rsp;
195 	int rc;
196 
197 	req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
198 	if (!req)
199 		return  -ENOMEM;
200 
201 	req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
202 
203 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
204 	if (rc)
205 		goto out;
206 
207 	rsp = (struct  nix_bandprof_alloc_rsp *)
208 	       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
209 	if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
210 		rc = -EIO;
211 		goto out;
212 	}
213 
214 	*leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
215 out:
216 	if (rc) {
217 		dev_warn(pfvf->dev,
218 			 "Failed to allocate ingress bandwidth policer\n");
219 	}
220 
221 	return rc;
222 }
223 
224 int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
225 {
226 	struct otx2_hw *hw = &pfvf->hw;
227 	int ret;
228 
229 	mutex_lock(&pfvf->mbox.lock);
230 
231 	ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
232 
233 	mutex_unlock(&pfvf->mbox.lock);
234 
235 	return ret;
236 }
237 
238 #define POLICER_TIMESTAMP	  1  /* 1 second */
239 #define MAX_RATE_EXP		  22 /* Valid rate exponent range: 0 - 22 */
240 
241 static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
242 					u32 *burst_mantissa)
243 {
244 	int tmp;
245 
246 	/* Burst is calculated as
247 	 * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
248 	 * This is the upper limit on number tokens (bytes) that
249 	 * can be accumulated in the bucket.
250 	 */
251 	*burst_exp = ilog2(burst);
252 	if (burst < 256) {
253 		/* No float: can't express mantissa in this case */
254 		*burst_mantissa = 0;
255 		return;
256 	}
257 
258 	if (*burst_exp > MAX_RATE_EXP)
259 		*burst_exp = MAX_RATE_EXP;
260 
261 	/* Calculate mantissa
262 	 * Find remaining bytes 'burst - 2^burst_exp'
263 	 * mantissa = (remaining bytes) / 2^ (burst_exp - 8)
264 	 */
265 	tmp = burst - rounddown_pow_of_two(burst);
266 	*burst_mantissa = tmp / (1UL << (*burst_exp - 8));
267 }
268 
269 static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
270 				       u32 *rate_mantissa, u32 *rdiv)
271 {
272 	u32 div = 0;
273 	u32 exp = 0;
274 	u64 tmp;
275 
276 	/* Figure out mantissa, exponent and divider from given max pkt rate
277 	 *
278 	 * To achieve desired rate HW adds
279 	 * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
280 	 * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
281 	 * Here policer timeunit is 2 usecs and rate is in bits per sec.
282 	 * Since floating point cannot be used below algorithm uses 1000000
283 	 * scale factor to support rates upto 100Gbps.
284 	 */
285 	tmp = rate * 32 * 2;
286 	if (tmp < 256000000) {
287 		while (tmp < 256000000) {
288 			tmp = tmp * 2;
289 			div++;
290 		}
291 	} else {
292 		for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
293 			tmp = tmp / 2;
294 
295 		if (exp > MAX_RATE_EXP)
296 			exp = MAX_RATE_EXP;
297 	}
298 
299 	*rate_mantissa = (tmp - 256000000) / 1000000;
300 	*rate_exp = exp;
301 	*rdiv = div;
302 }
303 
304 int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
305 			       u16 policer, bool map)
306 {
307 	struct nix_cn10k_aq_enq_req *aq;
308 
309 	aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
310 	if (!aq)
311 		return -ENOMEM;
312 
313 	/* Enable policing and set the bandwidth profile (policer) index */
314 	if (map)
315 		aq->rq.policer_ena = 1;
316 	else
317 		aq->rq.policer_ena = 0;
318 	aq->rq_mask.policer_ena = 1;
319 
320 	aq->rq.band_prof_id = policer;
321 	aq->rq_mask.band_prof_id = GENMASK(9, 0);
322 
323 	/* Fill AQ info */
324 	aq->qidx = rq_idx;
325 	aq->ctype = NIX_AQ_CTYPE_RQ;
326 	aq->op = NIX_AQ_INSTOP_WRITE;
327 
328 	return otx2_sync_mbox_msg(&pfvf->mbox);
329 }
330 
331 int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
332 {
333 	struct nix_bandprof_free_req *req;
334 
335 	req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
336 	if (!req)
337 		return -ENOMEM;
338 
339 	req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
340 	req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
341 
342 	return otx2_sync_mbox_msg(&pfvf->mbox);
343 }
344 
345 int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
346 {
347 	struct otx2_hw *hw = &pfvf->hw;
348 	int qidx, rc;
349 
350 	mutex_lock(&pfvf->mbox.lock);
351 
352 	/* Remove RQ's policer mapping */
353 	for (qidx = 0; qidx < hw->rx_queues; qidx++)
354 		cn10k_map_unmap_rq_policer(pfvf, qidx,
355 					   hw->matchall_ipolicer, false);
356 
357 	rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
358 
359 	mutex_unlock(&pfvf->mbox.lock);
360 	return rc;
361 }
362 
363 int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
364 			    u32 burst, u64 rate, bool pps)
365 {
366 	struct nix_cn10k_aq_enq_req *aq;
367 	u32 burst_exp, burst_mantissa;
368 	u32 rate_exp, rate_mantissa;
369 	u32 rdiv;
370 
371 	/* Get exponent and mantissa values for the desired rate */
372 	cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
373 	cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
374 
375 	/* Init bandwidth profile */
376 	aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
377 	if (!aq)
378 		return -ENOMEM;
379 
380 	/* Set initial color mode to blind */
381 	aq->prof.icolor = 0x03;
382 	aq->prof_mask.icolor = 0x03;
383 
384 	/* Set rate and burst values */
385 	aq->prof.cir_exponent = rate_exp;
386 	aq->prof_mask.cir_exponent = 0x1F;
387 
388 	aq->prof.cir_mantissa = rate_mantissa;
389 	aq->prof_mask.cir_mantissa = 0xFF;
390 
391 	aq->prof.cbs_exponent = burst_exp;
392 	aq->prof_mask.cbs_exponent = 0x1F;
393 
394 	aq->prof.cbs_mantissa = burst_mantissa;
395 	aq->prof_mask.cbs_mantissa = 0xFF;
396 
397 	aq->prof.rdiv = rdiv;
398 	aq->prof_mask.rdiv = 0xF;
399 
400 	if (pps) {
401 		/* The amount of decremented tokens is calculated according to
402 		 * the following equation:
403 		 * max([ LMODE ? 0 : (packet_length - LXPTR)] +
404 		 *	     ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
405 		 *	1/256)
406 		 * if LMODE is 1 then rate limiting will be based on
407 		 * PPS otherwise bps.
408 		 * The aim of the ADJUST value is to specify a token cost per
409 		 * packet in contrary to the packet length that specifies a
410 		 * cost per byte. To rate limit based on PPS adjust mantissa
411 		 * is set as 384 and exponent as 1 so that number of tokens
412 		 * decremented becomes 1 i.e, 1 token per packeet.
413 		 */
414 		aq->prof.adjust_exponent = 1;
415 		aq->prof_mask.adjust_exponent = 0x1F;
416 
417 		aq->prof.adjust_mantissa = 384;
418 		aq->prof_mask.adjust_mantissa = 0x1FF;
419 
420 		aq->prof.lmode = 0x1;
421 		aq->prof_mask.lmode = 0x1;
422 	}
423 
424 	/* Two rate three color marker
425 	 * With PEIR/EIR set to zero, color will be either green or red
426 	 */
427 	aq->prof.meter_algo = 2;
428 	aq->prof_mask.meter_algo = 0x3;
429 
430 	aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
431 	aq->prof_mask.rc_action = 0x3;
432 
433 	aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
434 	aq->prof_mask.yc_action = 0x3;
435 
436 	aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
437 	aq->prof_mask.gc_action = 0x3;
438 
439 	/* Setting exponent value as 24 and mantissa as 0 configures
440 	 * the bucket with zero values making bucket unused. Peak
441 	 * information rate and Excess information rate buckets are
442 	 * unused here.
443 	 */
444 	aq->prof.peir_exponent = 24;
445 	aq->prof_mask.peir_exponent = 0x1F;
446 
447 	aq->prof.peir_mantissa = 0;
448 	aq->prof_mask.peir_mantissa = 0xFF;
449 
450 	aq->prof.pebs_exponent = 24;
451 	aq->prof_mask.pebs_exponent = 0x1F;
452 
453 	aq->prof.pebs_mantissa = 0;
454 	aq->prof_mask.pebs_mantissa = 0xFF;
455 
456 	/* Fill AQ info */
457 	aq->qidx = profile;
458 	aq->ctype = NIX_AQ_CTYPE_BANDPROF;
459 	aq->op = NIX_AQ_INSTOP_WRITE;
460 
461 	return otx2_sync_mbox_msg(&pfvf->mbox);
462 }
463 
464 int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
465 				     u32 burst, u64 rate)
466 {
467 	struct otx2_hw *hw = &pfvf->hw;
468 	int qidx, rc;
469 
470 	mutex_lock(&pfvf->mbox.lock);
471 
472 	rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
473 				     rate, false);
474 	if (rc)
475 		goto out;
476 
477 	for (qidx = 0; qidx < hw->rx_queues; qidx++) {
478 		rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
479 						hw->matchall_ipolicer, true);
480 		if (rc)
481 			break;
482 	}
483 
484 out:
485 	mutex_unlock(&pfvf->mbox.lock);
486 	return rc;
487 }
488