14ec4762dSRahul Lakkireddy // SPDX-License-Identifier: GPL-2.0-only
24ec4762dSRahul Lakkireddy /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
34ec4762dSRahul Lakkireddy
44ec4762dSRahul Lakkireddy #include "cxgb4.h"
54ec4762dSRahul Lakkireddy #include "cxgb4_tc_matchall.h"
64ec4762dSRahul Lakkireddy #include "sched.h"
721c4c60bSRahul Lakkireddy #include "cxgb4_uld.h"
821c4c60bSRahul Lakkireddy #include "cxgb4_filter.h"
921c4c60bSRahul Lakkireddy #include "cxgb4_tc_flower.h"
104ec4762dSRahul Lakkireddy
cxgb4_policer_validate(const struct flow_action * action,const struct flow_action_entry * act,struct netlink_ext_ack * extack)11*d97b4b10SJianbo Liu static int cxgb4_policer_validate(const struct flow_action *action,
12*d97b4b10SJianbo Liu const struct flow_action_entry *act,
13*d97b4b10SJianbo Liu struct netlink_ext_ack *extack)
14*d97b4b10SJianbo Liu {
15*d97b4b10SJianbo Liu if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
16*d97b4b10SJianbo Liu NL_SET_ERR_MSG_MOD(extack,
17*d97b4b10SJianbo Liu "Offload not supported when exceed action is not drop");
18*d97b4b10SJianbo Liu return -EOPNOTSUPP;
19*d97b4b10SJianbo Liu }
20*d97b4b10SJianbo Liu
21*d97b4b10SJianbo Liu if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
22*d97b4b10SJianbo Liu act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
23*d97b4b10SJianbo Liu NL_SET_ERR_MSG_MOD(extack,
24*d97b4b10SJianbo Liu "Offload not supported when conform action is not pipe or ok");
25*d97b4b10SJianbo Liu return -EOPNOTSUPP;
26*d97b4b10SJianbo Liu }
27*d97b4b10SJianbo Liu
28*d97b4b10SJianbo Liu if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
29*d97b4b10SJianbo Liu !flow_action_is_last_entry(action, act)) {
30*d97b4b10SJianbo Liu NL_SET_ERR_MSG_MOD(extack,
31*d97b4b10SJianbo Liu "Offload not supported when conform action is ok, but action is not last");
32*d97b4b10SJianbo Liu return -EOPNOTSUPP;
33*d97b4b10SJianbo Liu }
34*d97b4b10SJianbo Liu
35*d97b4b10SJianbo Liu if (act->police.peakrate_bytes_ps ||
36*d97b4b10SJianbo Liu act->police.avrate || act->police.overhead) {
37*d97b4b10SJianbo Liu NL_SET_ERR_MSG_MOD(extack,
38*d97b4b10SJianbo Liu "Offload not supported when peakrate/avrate/overhead is configured");
39*d97b4b10SJianbo Liu return -EOPNOTSUPP;
40*d97b4b10SJianbo Liu }
41*d97b4b10SJianbo Liu
42*d97b4b10SJianbo Liu if (act->police.rate_pkt_ps) {
43*d97b4b10SJianbo Liu NL_SET_ERR_MSG_MOD(extack,
44*d97b4b10SJianbo Liu "QoS offload not support packets per second");
45*d97b4b10SJianbo Liu return -EOPNOTSUPP;
46*d97b4b10SJianbo Liu }
47*d97b4b10SJianbo Liu
48*d97b4b10SJianbo Liu return 0;
49*d97b4b10SJianbo Liu }
50*d97b4b10SJianbo Liu
cxgb4_matchall_egress_validate(struct net_device * dev,struct tc_cls_matchall_offload * cls)514ec4762dSRahul Lakkireddy static int cxgb4_matchall_egress_validate(struct net_device *dev,
524ec4762dSRahul Lakkireddy struct tc_cls_matchall_offload *cls)
534ec4762dSRahul Lakkireddy {
544ec4762dSRahul Lakkireddy struct netlink_ext_ack *extack = cls->common.extack;
554ec4762dSRahul Lakkireddy struct flow_action *actions = &cls->rule->action;
564ec4762dSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
574ec4762dSRahul Lakkireddy struct flow_action_entry *entry;
58c856e2b6SRahul Lakkireddy struct ch_sched_queue qe;
59c856e2b6SRahul Lakkireddy struct sched_class *e;
604ec4762dSRahul Lakkireddy u64 max_link_rate;
614ec4762dSRahul Lakkireddy u32 i, speed;
624ec4762dSRahul Lakkireddy int ret;
634ec4762dSRahul Lakkireddy
644ec4762dSRahul Lakkireddy if (!flow_action_has_entries(actions)) {
654ec4762dSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
664ec4762dSRahul Lakkireddy "Egress MATCHALL offload needs at least 1 policing action");
674ec4762dSRahul Lakkireddy return -EINVAL;
684ec4762dSRahul Lakkireddy } else if (!flow_offload_has_one_action(actions)) {
694ec4762dSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
704ec4762dSRahul Lakkireddy "Egress MATCHALL offload only supports 1 policing action");
714ec4762dSRahul Lakkireddy return -EINVAL;
724ec4762dSRahul Lakkireddy } else if (pi->tc_block_shared) {
734ec4762dSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
744ec4762dSRahul Lakkireddy "Egress MATCHALL offload not supported with shared blocks");
754ec4762dSRahul Lakkireddy return -EINVAL;
764ec4762dSRahul Lakkireddy }
774ec4762dSRahul Lakkireddy
784ec4762dSRahul Lakkireddy ret = t4_get_link_params(pi, NULL, &speed, NULL);
794ec4762dSRahul Lakkireddy if (ret) {
804ec4762dSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
814ec4762dSRahul Lakkireddy "Failed to get max speed supported by the link");
824ec4762dSRahul Lakkireddy return -EINVAL;
834ec4762dSRahul Lakkireddy }
844ec4762dSRahul Lakkireddy
854ec4762dSRahul Lakkireddy /* Convert from Mbps to bps */
864ec4762dSRahul Lakkireddy max_link_rate = (u64)speed * 1000 * 1000;
874ec4762dSRahul Lakkireddy
884ec4762dSRahul Lakkireddy flow_action_for_each(i, entry, actions) {
894ec4762dSRahul Lakkireddy switch (entry->id) {
904ec4762dSRahul Lakkireddy case FLOW_ACTION_POLICE:
91*d97b4b10SJianbo Liu ret = cxgb4_policer_validate(actions, entry, extack);
92*d97b4b10SJianbo Liu if (ret)
93*d97b4b10SJianbo Liu return ret;
94*d97b4b10SJianbo Liu
954ec4762dSRahul Lakkireddy /* Convert bytes per second to bits per second */
964ec4762dSRahul Lakkireddy if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
974ec4762dSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
984ec4762dSRahul Lakkireddy "Specified policing max rate is larger than underlying link speed");
994ec4762dSRahul Lakkireddy return -ERANGE;
1004ec4762dSRahul Lakkireddy }
1014ec4762dSRahul Lakkireddy break;
1024ec4762dSRahul Lakkireddy default:
1034ec4762dSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
1044ec4762dSRahul Lakkireddy "Only policing action supported with Egress MATCHALL offload");
1054ec4762dSRahul Lakkireddy return -EOPNOTSUPP;
1064ec4762dSRahul Lakkireddy }
1074ec4762dSRahul Lakkireddy }
1084ec4762dSRahul Lakkireddy
109c856e2b6SRahul Lakkireddy for (i = 0; i < pi->nqsets; i++) {
110c856e2b6SRahul Lakkireddy memset(&qe, 0, sizeof(qe));
111c856e2b6SRahul Lakkireddy qe.queue = i;
112c856e2b6SRahul Lakkireddy
113c856e2b6SRahul Lakkireddy e = cxgb4_sched_queue_lookup(dev, &qe);
114c856e2b6SRahul Lakkireddy if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
115c856e2b6SRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
116c856e2b6SRahul Lakkireddy "Some queues are already bound to different class");
117c856e2b6SRahul Lakkireddy return -EBUSY;
118c856e2b6SRahul Lakkireddy }
119c856e2b6SRahul Lakkireddy }
120c856e2b6SRahul Lakkireddy
1214ec4762dSRahul Lakkireddy return 0;
1224ec4762dSRahul Lakkireddy }
1234ec4762dSRahul Lakkireddy
cxgb4_matchall_tc_bind_queues(struct net_device * dev,u32 tc)124c856e2b6SRahul Lakkireddy static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
125c856e2b6SRahul Lakkireddy {
126c856e2b6SRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
127c856e2b6SRahul Lakkireddy struct ch_sched_queue qe;
128c856e2b6SRahul Lakkireddy int ret;
129c856e2b6SRahul Lakkireddy u32 i;
130c856e2b6SRahul Lakkireddy
131c856e2b6SRahul Lakkireddy for (i = 0; i < pi->nqsets; i++) {
132c856e2b6SRahul Lakkireddy qe.queue = i;
133c856e2b6SRahul Lakkireddy qe.class = tc;
134c856e2b6SRahul Lakkireddy ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
135c856e2b6SRahul Lakkireddy if (ret)
136c856e2b6SRahul Lakkireddy goto out_free;
137c856e2b6SRahul Lakkireddy }
138c856e2b6SRahul Lakkireddy
139c856e2b6SRahul Lakkireddy return 0;
140c856e2b6SRahul Lakkireddy
141c856e2b6SRahul Lakkireddy out_free:
142c856e2b6SRahul Lakkireddy while (i--) {
143c856e2b6SRahul Lakkireddy qe.queue = i;
144c856e2b6SRahul Lakkireddy qe.class = SCHED_CLS_NONE;
145c856e2b6SRahul Lakkireddy cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
146c856e2b6SRahul Lakkireddy }
147c856e2b6SRahul Lakkireddy
148c856e2b6SRahul Lakkireddy return ret;
149c856e2b6SRahul Lakkireddy }
150c856e2b6SRahul Lakkireddy
cxgb4_matchall_tc_unbind_queues(struct net_device * dev)151c856e2b6SRahul Lakkireddy static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
152c856e2b6SRahul Lakkireddy {
153c856e2b6SRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
154c856e2b6SRahul Lakkireddy struct ch_sched_queue qe;
155c856e2b6SRahul Lakkireddy u32 i;
156c856e2b6SRahul Lakkireddy
157c856e2b6SRahul Lakkireddy for (i = 0; i < pi->nqsets; i++) {
158c856e2b6SRahul Lakkireddy qe.queue = i;
159c856e2b6SRahul Lakkireddy qe.class = SCHED_CLS_NONE;
160c856e2b6SRahul Lakkireddy cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
161c856e2b6SRahul Lakkireddy }
162c856e2b6SRahul Lakkireddy }
163c856e2b6SRahul Lakkireddy
cxgb4_matchall_alloc_tc(struct net_device * dev,struct tc_cls_matchall_offload * cls)1644ec4762dSRahul Lakkireddy static int cxgb4_matchall_alloc_tc(struct net_device *dev,
1654ec4762dSRahul Lakkireddy struct tc_cls_matchall_offload *cls)
1664ec4762dSRahul Lakkireddy {
1674ec4762dSRahul Lakkireddy struct ch_sched_params p = {
1684ec4762dSRahul Lakkireddy .type = SCHED_CLASS_TYPE_PACKET,
1694ec4762dSRahul Lakkireddy .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
1704ec4762dSRahul Lakkireddy .u.params.mode = SCHED_CLASS_MODE_CLASS,
1714ec4762dSRahul Lakkireddy .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
1724ec4762dSRahul Lakkireddy .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
1734ec4762dSRahul Lakkireddy .u.params.class = SCHED_CLS_NONE,
1744ec4762dSRahul Lakkireddy .u.params.minrate = 0,
1754ec4762dSRahul Lakkireddy .u.params.weight = 0,
1764ec4762dSRahul Lakkireddy .u.params.pktsize = dev->mtu,
1774ec4762dSRahul Lakkireddy };
1784ec4762dSRahul Lakkireddy struct netlink_ext_ack *extack = cls->common.extack;
1794ec4762dSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
1804ec4762dSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
1814ec4762dSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
1824ec4762dSRahul Lakkireddy struct flow_action_entry *entry;
1834ec4762dSRahul Lakkireddy struct sched_class *e;
184c856e2b6SRahul Lakkireddy int ret;
1854ec4762dSRahul Lakkireddy u32 i;
1864ec4762dSRahul Lakkireddy
1874ec4762dSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
1884ec4762dSRahul Lakkireddy
1894ec4762dSRahul Lakkireddy flow_action_for_each(i, entry, &cls->rule->action)
1904ec4762dSRahul Lakkireddy if (entry->id == FLOW_ACTION_POLICE)
1914ec4762dSRahul Lakkireddy break;
192*d97b4b10SJianbo Liu
193*d97b4b10SJianbo Liu ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
194*d97b4b10SJianbo Liu if (ret)
195*d97b4b10SJianbo Liu return ret;
196*d97b4b10SJianbo Liu
1974ec4762dSRahul Lakkireddy /* Convert from bytes per second to Kbps */
1984ec4762dSRahul Lakkireddy p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
1994ec4762dSRahul Lakkireddy p.u.params.channel = pi->tx_chan;
2004ec4762dSRahul Lakkireddy e = cxgb4_sched_class_alloc(dev, &p);
2014ec4762dSRahul Lakkireddy if (!e) {
2024ec4762dSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
2034ec4762dSRahul Lakkireddy "No free traffic class available for policing action");
2044ec4762dSRahul Lakkireddy return -ENOMEM;
2054ec4762dSRahul Lakkireddy }
2064ec4762dSRahul Lakkireddy
207c856e2b6SRahul Lakkireddy ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
208c856e2b6SRahul Lakkireddy if (ret) {
209c856e2b6SRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
210c856e2b6SRahul Lakkireddy "Could not bind queues to traffic class");
211c856e2b6SRahul Lakkireddy goto out_free;
212c856e2b6SRahul Lakkireddy }
213c856e2b6SRahul Lakkireddy
2144ec4762dSRahul Lakkireddy tc_port_matchall->egress.hwtc = e->idx;
2154ec4762dSRahul Lakkireddy tc_port_matchall->egress.cookie = cls->cookie;
2164ec4762dSRahul Lakkireddy tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
2174ec4762dSRahul Lakkireddy return 0;
218c856e2b6SRahul Lakkireddy
219c856e2b6SRahul Lakkireddy out_free:
220c856e2b6SRahul Lakkireddy cxgb4_sched_class_free(dev, e->idx);
221c856e2b6SRahul Lakkireddy return ret;
2224ec4762dSRahul Lakkireddy }
2234ec4762dSRahul Lakkireddy
cxgb4_matchall_free_tc(struct net_device * dev)2244ec4762dSRahul Lakkireddy static void cxgb4_matchall_free_tc(struct net_device *dev)
2254ec4762dSRahul Lakkireddy {
2264ec4762dSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
2274ec4762dSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
2284ec4762dSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
2294ec4762dSRahul Lakkireddy
2304ec4762dSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
231c856e2b6SRahul Lakkireddy cxgb4_matchall_tc_unbind_queues(dev);
2324ec4762dSRahul Lakkireddy cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
2334ec4762dSRahul Lakkireddy
2344ec4762dSRahul Lakkireddy tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
2354ec4762dSRahul Lakkireddy tc_port_matchall->egress.cookie = 0;
2364ec4762dSRahul Lakkireddy tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
2374ec4762dSRahul Lakkireddy }
2384ec4762dSRahul Lakkireddy
cxgb4_matchall_mirror_alloc(struct net_device * dev,struct tc_cls_matchall_offload * cls)239fd2261d8SRahul Lakkireddy static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
240fd2261d8SRahul Lakkireddy struct tc_cls_matchall_offload *cls)
241fd2261d8SRahul Lakkireddy {
242fd2261d8SRahul Lakkireddy struct netlink_ext_ack *extack = cls->common.extack;
243fd2261d8SRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
244fd2261d8SRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
245fd2261d8SRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
246fd2261d8SRahul Lakkireddy struct flow_action_entry *act;
247fd2261d8SRahul Lakkireddy int ret;
248fd2261d8SRahul Lakkireddy u32 i;
249fd2261d8SRahul Lakkireddy
250fd2261d8SRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
251fd2261d8SRahul Lakkireddy flow_action_for_each(i, act, &cls->rule->action) {
252fd2261d8SRahul Lakkireddy if (act->id == FLOW_ACTION_MIRRED) {
253fd2261d8SRahul Lakkireddy ret = cxgb4_port_mirror_alloc(dev);
254fd2261d8SRahul Lakkireddy if (ret) {
255fd2261d8SRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
256fd2261d8SRahul Lakkireddy "Couldn't allocate mirror");
257fd2261d8SRahul Lakkireddy return ret;
258fd2261d8SRahul Lakkireddy }
259fd2261d8SRahul Lakkireddy
260fd2261d8SRahul Lakkireddy tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
261fd2261d8SRahul Lakkireddy break;
262fd2261d8SRahul Lakkireddy }
263fd2261d8SRahul Lakkireddy }
264fd2261d8SRahul Lakkireddy
265fd2261d8SRahul Lakkireddy return 0;
266fd2261d8SRahul Lakkireddy }
267fd2261d8SRahul Lakkireddy
cxgb4_matchall_mirror_free(struct net_device * dev)268fd2261d8SRahul Lakkireddy static void cxgb4_matchall_mirror_free(struct net_device *dev)
269fd2261d8SRahul Lakkireddy {
270fd2261d8SRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
271fd2261d8SRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
272fd2261d8SRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
273fd2261d8SRahul Lakkireddy
274fd2261d8SRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
275fd2261d8SRahul Lakkireddy if (!tc_port_matchall->ingress.viid_mirror)
276fd2261d8SRahul Lakkireddy return;
277fd2261d8SRahul Lakkireddy
278fd2261d8SRahul Lakkireddy cxgb4_port_mirror_free(dev);
279fd2261d8SRahul Lakkireddy tc_port_matchall->ingress.viid_mirror = 0;
280fd2261d8SRahul Lakkireddy }
281fd2261d8SRahul Lakkireddy
cxgb4_matchall_del_filter(struct net_device * dev,u8 filter_type)28259b328cfSRahul Lakkireddy static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
28359b328cfSRahul Lakkireddy {
28459b328cfSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
28559b328cfSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
28659b328cfSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
28759b328cfSRahul Lakkireddy int ret;
28859b328cfSRahul Lakkireddy
28959b328cfSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
29059b328cfSRahul Lakkireddy ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
29159b328cfSRahul Lakkireddy &tc_port_matchall->ingress.fs[filter_type]);
29259b328cfSRahul Lakkireddy if (ret)
29359b328cfSRahul Lakkireddy return ret;
29459b328cfSRahul Lakkireddy
29559b328cfSRahul Lakkireddy tc_port_matchall->ingress.tid[filter_type] = 0;
29659b328cfSRahul Lakkireddy return 0;
29759b328cfSRahul Lakkireddy }
29859b328cfSRahul Lakkireddy
cxgb4_matchall_add_filter(struct net_device * dev,struct tc_cls_matchall_offload * cls,u8 filter_type)29959b328cfSRahul Lakkireddy static int cxgb4_matchall_add_filter(struct net_device *dev,
30059b328cfSRahul Lakkireddy struct tc_cls_matchall_offload *cls,
30159b328cfSRahul Lakkireddy u8 filter_type)
30221c4c60bSRahul Lakkireddy {
30321c4c60bSRahul Lakkireddy struct netlink_ext_ack *extack = cls->common.extack;
30421c4c60bSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
30521c4c60bSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
30621c4c60bSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
30721c4c60bSRahul Lakkireddy struct ch_filter_specification *fs;
30821c4c60bSRahul Lakkireddy int ret, fidx;
30921c4c60bSRahul Lakkireddy
3108d174351SRahul Lakkireddy /* Get a free filter entry TID, where we can insert this new
3118d174351SRahul Lakkireddy * rule. Only insert rule if its prio doesn't conflict with
3128d174351SRahul Lakkireddy * existing rules.
31321c4c60bSRahul Lakkireddy */
31459b328cfSRahul Lakkireddy fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
31559b328cfSRahul Lakkireddy false, cls->common.prio);
3168d174351SRahul Lakkireddy if (fidx < 0) {
31721c4c60bSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
31821c4c60bSRahul Lakkireddy "No free LETCAM index available");
31921c4c60bSRahul Lakkireddy return -ENOMEM;
32021c4c60bSRahul Lakkireddy }
32121c4c60bSRahul Lakkireddy
32221c4c60bSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
32359b328cfSRahul Lakkireddy fs = &tc_port_matchall->ingress.fs[filter_type];
32421c4c60bSRahul Lakkireddy memset(fs, 0, sizeof(*fs));
32521c4c60bSRahul Lakkireddy
326c2193999SShahjada Abul Husain if (fidx < adap->tids.nhpftids)
327c2193999SShahjada Abul Husain fs->prio = 1;
32821c4c60bSRahul Lakkireddy fs->tc_prio = cls->common.prio;
32921c4c60bSRahul Lakkireddy fs->tc_cookie = cls->cookie;
33059b328cfSRahul Lakkireddy fs->type = filter_type;
33121c4c60bSRahul Lakkireddy fs->hitcnts = 1;
33221c4c60bSRahul Lakkireddy
33321c4c60bSRahul Lakkireddy fs->val.pfvf_vld = 1;
33421c4c60bSRahul Lakkireddy fs->val.pf = adap->pf;
33521c4c60bSRahul Lakkireddy fs->val.vf = pi->vin;
33621c4c60bSRahul Lakkireddy
33721c4c60bSRahul Lakkireddy cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
33821c4c60bSRahul Lakkireddy
33921c4c60bSRahul Lakkireddy ret = cxgb4_set_filter(dev, fidx, fs);
34021c4c60bSRahul Lakkireddy if (ret)
34159b328cfSRahul Lakkireddy return ret;
34221c4c60bSRahul Lakkireddy
34359b328cfSRahul Lakkireddy tc_port_matchall->ingress.tid[filter_type] = fidx;
34459b328cfSRahul Lakkireddy return 0;
34559b328cfSRahul Lakkireddy }
34659b328cfSRahul Lakkireddy
cxgb4_matchall_alloc_filter(struct net_device * dev,struct tc_cls_matchall_offload * cls)34759b328cfSRahul Lakkireddy static int cxgb4_matchall_alloc_filter(struct net_device *dev,
34859b328cfSRahul Lakkireddy struct tc_cls_matchall_offload *cls)
34959b328cfSRahul Lakkireddy {
35059b328cfSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
35159b328cfSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
35259b328cfSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
35359b328cfSRahul Lakkireddy int ret, i;
35459b328cfSRahul Lakkireddy
35559b328cfSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
35659b328cfSRahul Lakkireddy
35759b328cfSRahul Lakkireddy ret = cxgb4_matchall_mirror_alloc(dev, cls);
35859b328cfSRahul Lakkireddy if (ret)
35959b328cfSRahul Lakkireddy return ret;
36059b328cfSRahul Lakkireddy
36159b328cfSRahul Lakkireddy for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
36259b328cfSRahul Lakkireddy ret = cxgb4_matchall_add_filter(dev, cls, i);
36359b328cfSRahul Lakkireddy if (ret)
36459b328cfSRahul Lakkireddy goto out_free;
36559b328cfSRahul Lakkireddy }
36659b328cfSRahul Lakkireddy
36721c4c60bSRahul Lakkireddy tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
36821c4c60bSRahul Lakkireddy return 0;
369fd2261d8SRahul Lakkireddy
370fd2261d8SRahul Lakkireddy out_free:
37159b328cfSRahul Lakkireddy while (i-- > 0)
37259b328cfSRahul Lakkireddy cxgb4_matchall_del_filter(dev, i);
37359b328cfSRahul Lakkireddy
374fd2261d8SRahul Lakkireddy cxgb4_matchall_mirror_free(dev);
375fd2261d8SRahul Lakkireddy return ret;
37621c4c60bSRahul Lakkireddy }
37721c4c60bSRahul Lakkireddy
cxgb4_matchall_free_filter(struct net_device * dev)37821c4c60bSRahul Lakkireddy static int cxgb4_matchall_free_filter(struct net_device *dev)
37921c4c60bSRahul Lakkireddy {
38021c4c60bSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
38121c4c60bSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
38221c4c60bSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
38321c4c60bSRahul Lakkireddy int ret;
38459b328cfSRahul Lakkireddy u8 i;
38521c4c60bSRahul Lakkireddy
38621c4c60bSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
38721c4c60bSRahul Lakkireddy
38859b328cfSRahul Lakkireddy for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
38959b328cfSRahul Lakkireddy ret = cxgb4_matchall_del_filter(dev, i);
39021c4c60bSRahul Lakkireddy if (ret)
39121c4c60bSRahul Lakkireddy return ret;
39259b328cfSRahul Lakkireddy }
39321c4c60bSRahul Lakkireddy
394fd2261d8SRahul Lakkireddy cxgb4_matchall_mirror_free(dev);
395fd2261d8SRahul Lakkireddy
39621c4c60bSRahul Lakkireddy tc_port_matchall->ingress.packets = 0;
39721c4c60bSRahul Lakkireddy tc_port_matchall->ingress.bytes = 0;
39821c4c60bSRahul Lakkireddy tc_port_matchall->ingress.last_used = 0;
39921c4c60bSRahul Lakkireddy tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
40021c4c60bSRahul Lakkireddy return 0;
40121c4c60bSRahul Lakkireddy }
40221c4c60bSRahul Lakkireddy
cxgb4_tc_matchall_replace(struct net_device * dev,struct tc_cls_matchall_offload * cls_matchall,bool ingress)4034ec4762dSRahul Lakkireddy int cxgb4_tc_matchall_replace(struct net_device *dev,
40421c4c60bSRahul Lakkireddy struct tc_cls_matchall_offload *cls_matchall,
40521c4c60bSRahul Lakkireddy bool ingress)
4064ec4762dSRahul Lakkireddy {
4074ec4762dSRahul Lakkireddy struct netlink_ext_ack *extack = cls_matchall->common.extack;
4084ec4762dSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
4094ec4762dSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
4104ec4762dSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
4114ec4762dSRahul Lakkireddy int ret;
4124ec4762dSRahul Lakkireddy
4134ec4762dSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
41421c4c60bSRahul Lakkireddy if (ingress) {
41521c4c60bSRahul Lakkireddy if (tc_port_matchall->ingress.state ==
41621c4c60bSRahul Lakkireddy CXGB4_MATCHALL_STATE_ENABLED) {
41721c4c60bSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
41821c4c60bSRahul Lakkireddy "Only 1 Ingress MATCHALL can be offloaded");
41921c4c60bSRahul Lakkireddy return -ENOMEM;
42021c4c60bSRahul Lakkireddy }
42121c4c60bSRahul Lakkireddy
42221c4c60bSRahul Lakkireddy ret = cxgb4_validate_flow_actions(dev,
423319a1d19SJiri Pirko &cls_matchall->rule->action,
424fd2261d8SRahul Lakkireddy extack, 1);
42521c4c60bSRahul Lakkireddy if (ret)
42621c4c60bSRahul Lakkireddy return ret;
42721c4c60bSRahul Lakkireddy
42821c4c60bSRahul Lakkireddy return cxgb4_matchall_alloc_filter(dev, cls_matchall);
42921c4c60bSRahul Lakkireddy }
43021c4c60bSRahul Lakkireddy
4314ec4762dSRahul Lakkireddy if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
4324ec4762dSRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack,
4334ec4762dSRahul Lakkireddy "Only 1 Egress MATCHALL can be offloaded");
4344ec4762dSRahul Lakkireddy return -ENOMEM;
4354ec4762dSRahul Lakkireddy }
4364ec4762dSRahul Lakkireddy
4374ec4762dSRahul Lakkireddy ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
4384ec4762dSRahul Lakkireddy if (ret)
4394ec4762dSRahul Lakkireddy return ret;
4404ec4762dSRahul Lakkireddy
4414ec4762dSRahul Lakkireddy return cxgb4_matchall_alloc_tc(dev, cls_matchall);
4424ec4762dSRahul Lakkireddy }
4434ec4762dSRahul Lakkireddy
cxgb4_tc_matchall_destroy(struct net_device * dev,struct tc_cls_matchall_offload * cls_matchall,bool ingress)4444ec4762dSRahul Lakkireddy int cxgb4_tc_matchall_destroy(struct net_device *dev,
44521c4c60bSRahul Lakkireddy struct tc_cls_matchall_offload *cls_matchall,
44621c4c60bSRahul Lakkireddy bool ingress)
4474ec4762dSRahul Lakkireddy {
4484ec4762dSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
4494ec4762dSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
4504ec4762dSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
4514ec4762dSRahul Lakkireddy
4524ec4762dSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
45321c4c60bSRahul Lakkireddy if (ingress) {
45459b328cfSRahul Lakkireddy /* All the filter types of this matchall rule save the
45559b328cfSRahul Lakkireddy * same cookie. So, checking for the first one is
45659b328cfSRahul Lakkireddy * enough.
45759b328cfSRahul Lakkireddy */
45821c4c60bSRahul Lakkireddy if (cls_matchall->cookie !=
45959b328cfSRahul Lakkireddy tc_port_matchall->ingress.fs[0].tc_cookie)
46021c4c60bSRahul Lakkireddy return -ENOENT;
46121c4c60bSRahul Lakkireddy
46221c4c60bSRahul Lakkireddy return cxgb4_matchall_free_filter(dev);
46321c4c60bSRahul Lakkireddy }
46421c4c60bSRahul Lakkireddy
4654ec4762dSRahul Lakkireddy if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
4664ec4762dSRahul Lakkireddy return -ENOENT;
4674ec4762dSRahul Lakkireddy
4684ec4762dSRahul Lakkireddy cxgb4_matchall_free_tc(dev);
4694ec4762dSRahul Lakkireddy return 0;
4704ec4762dSRahul Lakkireddy }
4714ec4762dSRahul Lakkireddy
cxgb4_tc_matchall_stats(struct net_device * dev,struct tc_cls_matchall_offload * cls_matchall)47221c4c60bSRahul Lakkireddy int cxgb4_tc_matchall_stats(struct net_device *dev,
47321c4c60bSRahul Lakkireddy struct tc_cls_matchall_offload *cls_matchall)
47421c4c60bSRahul Lakkireddy {
47559b328cfSRahul Lakkireddy u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
47621c4c60bSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
47759b328cfSRahul Lakkireddy struct cxgb4_matchall_ingress_entry *ingress;
47821c4c60bSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
47921c4c60bSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
48021c4c60bSRahul Lakkireddy int ret;
48159b328cfSRahul Lakkireddy u8 i;
48221c4c60bSRahul Lakkireddy
48321c4c60bSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
48421c4c60bSRahul Lakkireddy if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
48521c4c60bSRahul Lakkireddy return -ENOENT;
48621c4c60bSRahul Lakkireddy
48759b328cfSRahul Lakkireddy ingress = &tc_port_matchall->ingress;
48859b328cfSRahul Lakkireddy for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
48959b328cfSRahul Lakkireddy ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
49059b328cfSRahul Lakkireddy &tmp_packets, &tmp_bytes,
49159b328cfSRahul Lakkireddy ingress->fs[i].hash);
49221c4c60bSRahul Lakkireddy if (ret)
49321c4c60bSRahul Lakkireddy return ret;
49421c4c60bSRahul Lakkireddy
49559b328cfSRahul Lakkireddy packets += tmp_packets;
49659b328cfSRahul Lakkireddy bytes += tmp_bytes;
49759b328cfSRahul Lakkireddy }
49859b328cfSRahul Lakkireddy
49921c4c60bSRahul Lakkireddy if (tc_port_matchall->ingress.packets != packets) {
50021c4c60bSRahul Lakkireddy flow_stats_update(&cls_matchall->stats,
50121c4c60bSRahul Lakkireddy bytes - tc_port_matchall->ingress.bytes,
50221c4c60bSRahul Lakkireddy packets - tc_port_matchall->ingress.packets,
5034b61d3e8SPo Liu 0, tc_port_matchall->ingress.last_used,
50493a129ebSJiri Pirko FLOW_ACTION_HW_STATS_IMMEDIATE);
50521c4c60bSRahul Lakkireddy
50621c4c60bSRahul Lakkireddy tc_port_matchall->ingress.packets = packets;
50721c4c60bSRahul Lakkireddy tc_port_matchall->ingress.bytes = bytes;
50821c4c60bSRahul Lakkireddy tc_port_matchall->ingress.last_used = jiffies;
50921c4c60bSRahul Lakkireddy }
51021c4c60bSRahul Lakkireddy
51121c4c60bSRahul Lakkireddy return 0;
51221c4c60bSRahul Lakkireddy }
51321c4c60bSRahul Lakkireddy
cxgb4_matchall_disable_offload(struct net_device * dev)5144ec4762dSRahul Lakkireddy static void cxgb4_matchall_disable_offload(struct net_device *dev)
5154ec4762dSRahul Lakkireddy {
5164ec4762dSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
5174ec4762dSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev);
5184ec4762dSRahul Lakkireddy struct adapter *adap = netdev2adap(dev);
5194ec4762dSRahul Lakkireddy
5204ec4762dSRahul Lakkireddy tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
5214ec4762dSRahul Lakkireddy if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
5224ec4762dSRahul Lakkireddy cxgb4_matchall_free_tc(dev);
52321c4c60bSRahul Lakkireddy
52421c4c60bSRahul Lakkireddy if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
52521c4c60bSRahul Lakkireddy cxgb4_matchall_free_filter(dev);
5264ec4762dSRahul Lakkireddy }
5274ec4762dSRahul Lakkireddy
cxgb4_init_tc_matchall(struct adapter * adap)5284ec4762dSRahul Lakkireddy int cxgb4_init_tc_matchall(struct adapter *adap)
5294ec4762dSRahul Lakkireddy {
5304ec4762dSRahul Lakkireddy struct cxgb4_tc_port_matchall *tc_port_matchall;
5314ec4762dSRahul Lakkireddy struct cxgb4_tc_matchall *tc_matchall;
5324ec4762dSRahul Lakkireddy int ret;
5334ec4762dSRahul Lakkireddy
5344ec4762dSRahul Lakkireddy tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
5354ec4762dSRahul Lakkireddy if (!tc_matchall)
5364ec4762dSRahul Lakkireddy return -ENOMEM;
5374ec4762dSRahul Lakkireddy
5384ec4762dSRahul Lakkireddy tc_port_matchall = kcalloc(adap->params.nports,
5394ec4762dSRahul Lakkireddy sizeof(*tc_port_matchall),
5404ec4762dSRahul Lakkireddy GFP_KERNEL);
5414ec4762dSRahul Lakkireddy if (!tc_port_matchall) {
5424ec4762dSRahul Lakkireddy ret = -ENOMEM;
5434ec4762dSRahul Lakkireddy goto out_free_matchall;
5444ec4762dSRahul Lakkireddy }
5454ec4762dSRahul Lakkireddy
5464ec4762dSRahul Lakkireddy tc_matchall->port_matchall = tc_port_matchall;
5474ec4762dSRahul Lakkireddy adap->tc_matchall = tc_matchall;
5484ec4762dSRahul Lakkireddy return 0;
5494ec4762dSRahul Lakkireddy
5504ec4762dSRahul Lakkireddy out_free_matchall:
5514ec4762dSRahul Lakkireddy kfree(tc_matchall);
5524ec4762dSRahul Lakkireddy return ret;
5534ec4762dSRahul Lakkireddy }
5544ec4762dSRahul Lakkireddy
cxgb4_cleanup_tc_matchall(struct adapter * adap)5554ec4762dSRahul Lakkireddy void cxgb4_cleanup_tc_matchall(struct adapter *adap)
5564ec4762dSRahul Lakkireddy {
5574ec4762dSRahul Lakkireddy u8 i;
5584ec4762dSRahul Lakkireddy
5594ec4762dSRahul Lakkireddy if (adap->tc_matchall) {
5604ec4762dSRahul Lakkireddy if (adap->tc_matchall->port_matchall) {
5614ec4762dSRahul Lakkireddy for (i = 0; i < adap->params.nports; i++) {
5624ec4762dSRahul Lakkireddy struct net_device *dev = adap->port[i];
5634ec4762dSRahul Lakkireddy
5644ec4762dSRahul Lakkireddy if (dev)
5654ec4762dSRahul Lakkireddy cxgb4_matchall_disable_offload(dev);
5664ec4762dSRahul Lakkireddy }
5674ec4762dSRahul Lakkireddy kfree(adap->tc_matchall->port_matchall);
5684ec4762dSRahul Lakkireddy }
5694ec4762dSRahul Lakkireddy kfree(adap->tc_matchall);
5704ec4762dSRahul Lakkireddy }
5714ec4762dSRahul Lakkireddy }
572