1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/module.h>
36 #include <linux/netdevice.h>
37 
38 #include "cxgb4.h"
39 #include "sched.h"
40 
41 static int t4_sched_class_fw_cmd(struct port_info *pi,
42 				 struct ch_sched_params *p,
43 				 enum sched_fw_ops op)
44 {
45 	struct adapter *adap = pi->adapter;
46 	struct sched_table *s = pi->sched_tbl;
47 	struct sched_class *e;
48 	int err = 0;
49 
50 	e = &s->tab[p->u.params.class];
51 	switch (op) {
52 	case SCHED_FW_OP_ADD:
53 	case SCHED_FW_OP_DEL:
54 		err = t4_sched_params(adap, p->type,
55 				      p->u.params.level, p->u.params.mode,
56 				      p->u.params.rateunit,
57 				      p->u.params.ratemode,
58 				      p->u.params.channel, e->idx,
59 				      p->u.params.minrate, p->u.params.maxrate,
60 				      p->u.params.weight, p->u.params.pktsize);
61 		break;
62 	default:
63 		err = -ENOTSUPP;
64 		break;
65 	}
66 
67 	return err;
68 }
69 
70 static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
71 				   enum sched_bind_type type, bool bind)
72 {
73 	struct adapter *adap = pi->adapter;
74 	u32 fw_mnem, fw_class, fw_param;
75 	unsigned int pf = adap->pf;
76 	unsigned int vf = 0;
77 	int err = 0;
78 
79 	switch (type) {
80 	case SCHED_QUEUE: {
81 		struct sched_queue_entry *qe;
82 
83 		qe = (struct sched_queue_entry *)arg;
84 
85 		/* Create a template for the FW_PARAMS_CMD mnemonic and
86 		 * value (TX Scheduling Class in this case).
87 		 */
88 		fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
89 			   FW_PARAMS_PARAM_X_V(
90 				   FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
91 		fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
92 		fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
93 
94 		pf = adap->pf;
95 		vf = 0;
96 
97 		err = t4_set_params(adap, adap->mbox, pf, vf, 1,
98 				    &fw_param, &fw_class);
99 		break;
100 	}
101 	case SCHED_FLOWC: {
102 		struct sched_flowc_entry *fe;
103 
104 		fe = (struct sched_flowc_entry *)arg;
105 
106 		fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
107 		err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
108 					       fe->param.tid, fw_class);
109 		break;
110 	}
111 	default:
112 		err = -ENOTSUPP;
113 		break;
114 	}
115 
116 	return err;
117 }
118 
119 static void *t4_sched_entry_lookup(struct port_info *pi,
120 				   enum sched_bind_type type,
121 				   const u32 val)
122 {
123 	struct sched_table *s = pi->sched_tbl;
124 	struct sched_class *e, *end;
125 	void *found = NULL;
126 
127 	/* Look for an entry with matching @val */
128 	end = &s->tab[s->sched_size];
129 	for (e = &s->tab[0]; e != end; ++e) {
130 		if (e->state == SCHED_STATE_UNUSED ||
131 		    e->bind_type != type)
132 			continue;
133 
134 		switch (type) {
135 		case SCHED_QUEUE: {
136 			struct sched_queue_entry *qe;
137 
138 			list_for_each_entry(qe, &e->entry_list, list) {
139 				if (qe->cntxt_id == val) {
140 					found = qe;
141 					break;
142 				}
143 			}
144 			break;
145 		}
146 		case SCHED_FLOWC: {
147 			struct sched_flowc_entry *fe;
148 
149 			list_for_each_entry(fe, &e->entry_list, list) {
150 				if (fe->param.tid == val) {
151 					found = fe;
152 					break;
153 				}
154 			}
155 			break;
156 		}
157 		default:
158 			return NULL;
159 		}
160 
161 		if (found)
162 			break;
163 	}
164 
165 	return found;
166 }
167 
168 struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
169 					     struct ch_sched_queue *p)
170 {
171 	struct port_info *pi = netdev2pinfo(dev);
172 	struct sched_queue_entry *qe = NULL;
173 	struct adapter *adap = pi->adapter;
174 	struct sge_eth_txq *txq;
175 
176 	if (p->queue < 0 || p->queue >= pi->nqsets)
177 		return NULL;
178 
179 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
180 	qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
181 	return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
182 }
183 
184 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
185 {
186 	struct sched_queue_entry *qe = NULL;
187 	struct adapter *adap = pi->adapter;
188 	struct sge_eth_txq *txq;
189 	struct sched_class *e;
190 	int err = 0;
191 
192 	if (p->queue < 0 || p->queue >= pi->nqsets)
193 		return -ERANGE;
194 
195 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
196 
197 	/* Find the existing entry that the queue is bound to */
198 	qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
199 	if (qe) {
200 		err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
201 					      false);
202 		if (err)
203 			return err;
204 
205 		e = &pi->sched_tbl->tab[qe->param.class];
206 		list_del(&qe->list);
207 		kvfree(qe);
208 		if (atomic_dec_and_test(&e->refcnt))
209 			cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
210 	}
211 	return err;
212 }
213 
214 static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
215 {
216 	struct sched_table *s = pi->sched_tbl;
217 	struct sched_queue_entry *qe = NULL;
218 	struct adapter *adap = pi->adapter;
219 	struct sge_eth_txq *txq;
220 	struct sched_class *e;
221 	unsigned int qid;
222 	int err = 0;
223 
224 	if (p->queue < 0 || p->queue >= pi->nqsets)
225 		return -ERANGE;
226 
227 	qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
228 	if (!qe)
229 		return -ENOMEM;
230 
231 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
232 	qid = txq->q.cntxt_id;
233 
234 	/* Unbind queue from any existing class */
235 	err = t4_sched_queue_unbind(pi, p);
236 	if (err)
237 		goto out_err;
238 
239 	/* Bind queue to specified class */
240 	qe->cntxt_id = qid;
241 	memcpy(&qe->param, p, sizeof(qe->param));
242 
243 	e = &s->tab[qe->param.class];
244 	err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
245 	if (err)
246 		goto out_err;
247 
248 	list_add_tail(&qe->list, &e->entry_list);
249 	e->bind_type = SCHED_QUEUE;
250 	atomic_inc(&e->refcnt);
251 	return err;
252 
253 out_err:
254 	kvfree(qe);
255 	return err;
256 }
257 
258 static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
259 {
260 	struct sched_flowc_entry *fe = NULL;
261 	struct adapter *adap = pi->adapter;
262 	struct sched_class *e;
263 	int err = 0;
264 
265 	if (p->tid < 0 || p->tid >= adap->tids.neotids)
266 		return -ERANGE;
267 
268 	/* Find the existing entry that the flowc is bound to */
269 	fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
270 	if (fe) {
271 		err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
272 					      false);
273 		if (err)
274 			return err;
275 
276 		e = &pi->sched_tbl->tab[fe->param.class];
277 		list_del(&fe->list);
278 		kvfree(fe);
279 		if (atomic_dec_and_test(&e->refcnt))
280 			cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
281 	}
282 	return err;
283 }
284 
285 static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
286 {
287 	struct sched_table *s = pi->sched_tbl;
288 	struct sched_flowc_entry *fe = NULL;
289 	struct adapter *adap = pi->adapter;
290 	struct sched_class *e;
291 	int err = 0;
292 
293 	if (p->tid < 0 || p->tid >= adap->tids.neotids)
294 		return -ERANGE;
295 
296 	fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
297 	if (!fe)
298 		return -ENOMEM;
299 
300 	/* Unbind flowc from any existing class */
301 	err = t4_sched_flowc_unbind(pi, p);
302 	if (err)
303 		goto out_err;
304 
305 	/* Bind flowc to specified class */
306 	memcpy(&fe->param, p, sizeof(fe->param));
307 
308 	e = &s->tab[fe->param.class];
309 	err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
310 	if (err)
311 		goto out_err;
312 
313 	list_add_tail(&fe->list, &e->entry_list);
314 	e->bind_type = SCHED_FLOWC;
315 	atomic_inc(&e->refcnt);
316 	return err;
317 
318 out_err:
319 	kvfree(fe);
320 	return err;
321 }
322 
323 static void t4_sched_class_unbind_all(struct port_info *pi,
324 				      struct sched_class *e,
325 				      enum sched_bind_type type)
326 {
327 	if (!e)
328 		return;
329 
330 	switch (type) {
331 	case SCHED_QUEUE: {
332 		struct sched_queue_entry *qe;
333 
334 		list_for_each_entry(qe, &e->entry_list, list)
335 			t4_sched_queue_unbind(pi, &qe->param);
336 		break;
337 	}
338 	case SCHED_FLOWC: {
339 		struct sched_flowc_entry *fe;
340 
341 		list_for_each_entry(fe, &e->entry_list, list)
342 			t4_sched_flowc_unbind(pi, &fe->param);
343 		break;
344 	}
345 	default:
346 		break;
347 	}
348 }
349 
350 static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
351 					 enum sched_bind_type type, bool bind)
352 {
353 	int err = 0;
354 
355 	if (!arg)
356 		return -EINVAL;
357 
358 	switch (type) {
359 	case SCHED_QUEUE: {
360 		struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
361 
362 		if (bind)
363 			err = t4_sched_queue_bind(pi, qe);
364 		else
365 			err = t4_sched_queue_unbind(pi, qe);
366 		break;
367 	}
368 	case SCHED_FLOWC: {
369 		struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
370 
371 		if (bind)
372 			err = t4_sched_flowc_bind(pi, fe);
373 		else
374 			err = t4_sched_flowc_unbind(pi, fe);
375 		break;
376 	}
377 	default:
378 		err = -ENOTSUPP;
379 		break;
380 	}
381 
382 	return err;
383 }
384 
385 /**
386  * cxgb4_sched_class_bind - Bind an entity to a scheduling class
387  * @dev: net_device pointer
388  * @arg: Entity opaque data
389  * @type: Entity type (Queue)
390  *
391  * Binds an entity (queue) to a scheduling class.  If the entity
392  * is bound to another class, it will be unbound from the other class
393  * and bound to the class specified in @arg.
394  */
395 int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
396 			   enum sched_bind_type type)
397 {
398 	struct port_info *pi = netdev2pinfo(dev);
399 	u8 class_id;
400 
401 	if (!can_sched(dev))
402 		return -ENOTSUPP;
403 
404 	if (!arg)
405 		return -EINVAL;
406 
407 	switch (type) {
408 	case SCHED_QUEUE: {
409 		struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
410 
411 		class_id = qe->class;
412 		break;
413 	}
414 	case SCHED_FLOWC: {
415 		struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
416 
417 		class_id = fe->class;
418 		break;
419 	}
420 	default:
421 		return -ENOTSUPP;
422 	}
423 
424 	if (!valid_class_id(dev, class_id))
425 		return -EINVAL;
426 
427 	if (class_id == SCHED_CLS_NONE)
428 		return -ENOTSUPP;
429 
430 	return t4_sched_class_bind_unbind_op(pi, arg, type, true);
431 
432 }
433 
434 /**
435  * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
436  * @dev: net_device pointer
437  * @arg: Entity opaque data
438  * @type: Entity type (Queue)
439  *
440  * Unbinds an entity (queue) from a scheduling class.
441  */
442 int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
443 			     enum sched_bind_type type)
444 {
445 	struct port_info *pi = netdev2pinfo(dev);
446 	u8 class_id;
447 
448 	if (!can_sched(dev))
449 		return -ENOTSUPP;
450 
451 	if (!arg)
452 		return -EINVAL;
453 
454 	switch (type) {
455 	case SCHED_QUEUE: {
456 		struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
457 
458 		class_id = qe->class;
459 		break;
460 	}
461 	case SCHED_FLOWC: {
462 		struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
463 
464 		class_id = fe->class;
465 		break;
466 	}
467 	default:
468 		return -ENOTSUPP;
469 	}
470 
471 	if (!valid_class_id(dev, class_id))
472 		return -EINVAL;
473 
474 	return t4_sched_class_bind_unbind_op(pi, arg, type, false);
475 }
476 
477 /* If @p is NULL, fetch any available unused class */
478 static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
479 						const struct ch_sched_params *p)
480 {
481 	struct sched_table *s = pi->sched_tbl;
482 	struct sched_class *found = NULL;
483 	struct sched_class *e, *end;
484 
485 	if (!p) {
486 		/* Get any available unused class */
487 		end = &s->tab[s->sched_size];
488 		for (e = &s->tab[0]; e != end; ++e) {
489 			if (e->state == SCHED_STATE_UNUSED) {
490 				found = e;
491 				break;
492 			}
493 		}
494 	} else {
495 		/* Look for a class with matching scheduling parameters */
496 		struct ch_sched_params info;
497 		struct ch_sched_params tp;
498 
499 		memcpy(&tp, p, sizeof(tp));
500 		/* Don't try to match class parameter */
501 		tp.u.params.class = SCHED_CLS_NONE;
502 
503 		end = &s->tab[s->sched_size];
504 		for (e = &s->tab[0]; e != end; ++e) {
505 			if (e->state == SCHED_STATE_UNUSED)
506 				continue;
507 
508 			memcpy(&info, &e->info, sizeof(info));
509 			/* Don't try to match class parameter */
510 			info.u.params.class = SCHED_CLS_NONE;
511 
512 			if ((info.type == tp.type) &&
513 			    (!memcmp(&info.u.params, &tp.u.params,
514 				     sizeof(info.u.params)))) {
515 				found = e;
516 				break;
517 			}
518 		}
519 	}
520 
521 	return found;
522 }
523 
524 static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
525 						struct ch_sched_params *p)
526 {
527 	struct sched_class *e = NULL;
528 	u8 class_id;
529 	int err;
530 
531 	if (!p)
532 		return NULL;
533 
534 	class_id = p->u.params.class;
535 
536 	/* Only accept search for existing class with matching params
537 	 * or allocation of new class with specified params
538 	 */
539 	if (class_id != SCHED_CLS_NONE)
540 		return NULL;
541 
542 	/* See if there's an exisiting class with same requested sched
543 	 * params. Classes can only be shared among FLOWC types. For
544 	 * other types, always request a new class.
545 	 */
546 	if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
547 		e = t4_sched_class_lookup(pi, p);
548 
549 	if (!e) {
550 		struct ch_sched_params np;
551 
552 		/* Fetch any available unused class */
553 		e = t4_sched_class_lookup(pi, NULL);
554 		if (!e)
555 			return NULL;
556 
557 		memcpy(&np, p, sizeof(np));
558 		np.u.params.class = e->idx;
559 		/* New class */
560 		err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
561 		if (err)
562 			return NULL;
563 		memcpy(&e->info, &np, sizeof(e->info));
564 		atomic_set(&e->refcnt, 0);
565 		e->state = SCHED_STATE_ACTIVE;
566 	}
567 
568 	return e;
569 }
570 
571 /**
572  * cxgb4_sched_class_alloc - allocate a scheduling class
573  * @dev: net_device pointer
574  * @p: new scheduling class to create.
575  *
576  * Returns pointer to the scheduling class created.  If @p is NULL, then
577  * it allocates and returns any available unused scheduling class. If a
578  * scheduling class with matching @p is found, then the matching class is
579  * returned.
580  */
581 struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
582 					    struct ch_sched_params *p)
583 {
584 	struct port_info *pi = netdev2pinfo(dev);
585 	u8 class_id;
586 
587 	if (!can_sched(dev))
588 		return NULL;
589 
590 	class_id = p->u.params.class;
591 	if (!valid_class_id(dev, class_id))
592 		return NULL;
593 
594 	return t4_sched_class_alloc(pi, p);
595 }
596 
597 /**
598  * cxgb4_sched_class_free - free a scheduling class
599  * @dev: net_device pointer
600  * @e: scheduling class
601  *
602  * Frees a scheduling class if there are no users.
603  */
604 void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
605 {
606 	struct port_info *pi = netdev2pinfo(dev);
607 	struct sched_table *s = pi->sched_tbl;
608 	struct ch_sched_params p;
609 	struct sched_class *e;
610 	u32 speed;
611 	int ret;
612 
613 	e = &s->tab[classid];
614 	if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
615 		/* Port based rate limiting needs explicit reset back
616 		 * to max rate. But, we'll do explicit reset for all
617 		 * types, instead of just port based type, to be on
618 		 * the safer side.
619 		 */
620 		memcpy(&p, &e->info, sizeof(p));
621 		/* Always reset mode to 0. Otherwise, FLOWC mode will
622 		 * still be enabled even after resetting the traffic
623 		 * class.
624 		 */
625 		p.u.params.mode = 0;
626 		p.u.params.minrate = 0;
627 		p.u.params.pktsize = 0;
628 
629 		ret = t4_get_link_params(pi, NULL, &speed, NULL);
630 		if (!ret)
631 			p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
632 		else
633 			p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
634 
635 		t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
636 
637 		e->state = SCHED_STATE_UNUSED;
638 		memset(&e->info, 0, sizeof(e->info));
639 	}
640 }
641 
642 static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
643 {
644 	struct port_info *pi = netdev2pinfo(dev);
645 
646 	t4_sched_class_unbind_all(pi, e, e->bind_type);
647 	cxgb4_sched_class_free(dev, e->idx);
648 }
649 
650 struct sched_table *t4_init_sched(unsigned int sched_size)
651 {
652 	struct sched_table *s;
653 	unsigned int i;
654 
655 	s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL);
656 	if (!s)
657 		return NULL;
658 
659 	s->sched_size = sched_size;
660 
661 	for (i = 0; i < s->sched_size; i++) {
662 		memset(&s->tab[i], 0, sizeof(struct sched_class));
663 		s->tab[i].idx = i;
664 		s->tab[i].state = SCHED_STATE_UNUSED;
665 		INIT_LIST_HEAD(&s->tab[i].entry_list);
666 		atomic_set(&s->tab[i].refcnt, 0);
667 	}
668 	return s;
669 }
670 
671 void t4_cleanup_sched(struct adapter *adap)
672 {
673 	struct sched_table *s;
674 	unsigned int j, i;
675 
676 	for_each_port(adap, j) {
677 		struct port_info *pi = netdev2pinfo(adap->port[j]);
678 
679 		s = pi->sched_tbl;
680 		if (!s)
681 			continue;
682 
683 		for (i = 0; i < s->sched_size; i++) {
684 			struct sched_class *e;
685 
686 			e = &s->tab[i];
687 			if (e->state == SCHED_STATE_ACTIVE)
688 				t4_sched_class_free(adap->port[j], e);
689 		}
690 		kvfree(s);
691 	}
692 }
693