1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <net/ipv6.h>
35 
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4_tcb.h"
39 #include "t4_values.h"
40 #include "clip_tbl.h"
41 #include "l2t.h"
42 #include "smt.h"
43 #include "t4fw_api.h"
44 #include "cxgb4_filter.h"
45 
is_field_set(u32 val,u32 mask)46 static inline bool is_field_set(u32 val, u32 mask)
47 {
48 	return val || mask;
49 }
50 
unsupported(u32 conf,u32 conf_mask,u32 val,u32 mask)51 static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
52 {
53 	return !(conf & conf_mask) && is_field_set(val, mask);
54 }
55 
set_tcb_field(struct adapter * adap,struct filter_entry * f,unsigned int ftid,u16 word,u64 mask,u64 val,int no_reply)56 static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
57 			 unsigned int ftid,  u16 word, u64 mask, u64 val,
58 			 int no_reply)
59 {
60 	struct cpl_set_tcb_field *req;
61 	struct sk_buff *skb;
62 
63 	skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
64 	if (!skb)
65 		return -ENOMEM;
66 
67 	req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
68 	INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
69 	req->reply_ctrl = htons(REPLY_CHAN_V(0) |
70 				QUEUENO_V(adap->sge.fw_evtq.abs_id) |
71 				NO_REPLY_V(no_reply));
72 	req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
73 	req->mask = cpu_to_be64(mask);
74 	req->val = cpu_to_be64(val);
75 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
76 	t4_ofld_send(adap, skb);
77 	return 0;
78 }
79 
80 /* Set one of the t_flags bits in the TCB.
81  */
set_tcb_tflag(struct adapter * adap,struct filter_entry * f,unsigned int ftid,unsigned int bit_pos,unsigned int val,int no_reply)82 static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
83 			 unsigned int ftid, unsigned int bit_pos,
84 			 unsigned int val, int no_reply)
85 {
86 	return set_tcb_field(adap, f, ftid,  TCB_T_FLAGS_W, 1ULL << bit_pos,
87 			     (unsigned long long)val << bit_pos, no_reply);
88 }
89 
mk_abort_req_ulp(struct cpl_abort_req * abort_req,unsigned int tid)90 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid)
91 {
92 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
93 	struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
94 
95 	txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
96 	txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16));
97 	sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
98 	sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr));
99 	OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
100 	abort_req->rsvd0 = htonl(0);
101 	abort_req->rsvd1 = 0;
102 	abort_req->cmd = CPL_ABORT_NO_RST;
103 }
104 
mk_abort_rpl_ulp(struct cpl_abort_rpl * abort_rpl,unsigned int tid)105 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid)
106 {
107 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
108 	struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
109 
110 	txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
111 	txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
112 	sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
113 	sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr));
114 	OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
115 	abort_rpl->rsvd0 = htonl(0);
116 	abort_rpl->rsvd1 = 0;
117 	abort_rpl->cmd = CPL_ABORT_NO_RST;
118 }
119 
mk_set_tcb_ulp(struct filter_entry * f,struct cpl_set_tcb_field * req,unsigned int word,u64 mask,u64 val,u8 cookie,int no_reply)120 static void mk_set_tcb_ulp(struct filter_entry *f,
121 			   struct cpl_set_tcb_field *req,
122 			   unsigned int word, u64 mask, u64 val,
123 			   u8 cookie, int no_reply)
124 {
125 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
126 	struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
127 
128 	txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
129 	txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
130 	sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
131 	sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
132 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
133 	req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
134 				QUEUENO_V(0));
135 	req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
136 	req->mask = cpu_to_be64(mask);
137 	req->val = cpu_to_be64(val);
138 	sc = (struct ulptx_idata *)(req + 1);
139 	sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
140 	sc->len = htonl(0);
141 }
142 
configure_filter_smac(struct adapter * adap,struct filter_entry * f)143 static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
144 {
145 	int err;
146 
147 	/* do a set-tcb for smac-sel and CWR bit.. */
148 	err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
149 			    TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
150 			    TCB_SMAC_SEL_V(f->smt->idx), 1);
151 	if (err)
152 		goto smac_err;
153 
154 	err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
155 	if (!err)
156 		return 0;
157 
158 smac_err:
159 	dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
160 		f->tid, err);
161 	return err;
162 }
163 
set_nat_params(struct adapter * adap,struct filter_entry * f,unsigned int tid,bool dip,bool sip,bool dp,bool sp)164 static void set_nat_params(struct adapter *adap, struct filter_entry *f,
165 			   unsigned int tid, bool dip, bool sip, bool dp,
166 			   bool sp)
167 {
168 	u8 *nat_lp = (u8 *)&f->fs.nat_lport;
169 	u8 *nat_fp = (u8 *)&f->fs.nat_fport;
170 
171 	if (dip) {
172 		if (f->fs.type) {
173 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
174 				      WORD_MASK, f->fs.nat_lip[15] |
175 				      f->fs.nat_lip[14] << 8 |
176 				      f->fs.nat_lip[13] << 16 |
177 				      (u64)f->fs.nat_lip[12] << 24, 1);
178 
179 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
180 				      WORD_MASK, f->fs.nat_lip[11] |
181 				      f->fs.nat_lip[10] << 8 |
182 				      f->fs.nat_lip[9] << 16 |
183 				      (u64)f->fs.nat_lip[8] << 24, 1);
184 
185 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
186 				      WORD_MASK, f->fs.nat_lip[7] |
187 				      f->fs.nat_lip[6] << 8 |
188 				      f->fs.nat_lip[5] << 16 |
189 				      (u64)f->fs.nat_lip[4] << 24, 1);
190 
191 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
192 				      WORD_MASK, f->fs.nat_lip[3] |
193 				      f->fs.nat_lip[2] << 8 |
194 				      f->fs.nat_lip[1] << 16 |
195 				      (u64)f->fs.nat_lip[0] << 24, 1);
196 		} else {
197 			set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
198 				      WORD_MASK, f->fs.nat_lip[3] |
199 				      f->fs.nat_lip[2] << 8 |
200 				      f->fs.nat_lip[1] << 16 |
201 				      (u64)f->fs.nat_lip[0] << 24, 1);
202 		}
203 	}
204 
205 	if (sip) {
206 		if (f->fs.type) {
207 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W,
208 				      WORD_MASK, f->fs.nat_fip[15] |
209 				      f->fs.nat_fip[14] << 8 |
210 				      f->fs.nat_fip[13] << 16 |
211 				      (u64)f->fs.nat_fip[12] << 24, 1);
212 
213 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
214 				      WORD_MASK, f->fs.nat_fip[11] |
215 				      f->fs.nat_fip[10] << 8 |
216 				      f->fs.nat_fip[9] << 16 |
217 				      (u64)f->fs.nat_fip[8] << 24, 1);
218 
219 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
220 				      WORD_MASK, f->fs.nat_fip[7] |
221 				      f->fs.nat_fip[6] << 8 |
222 				      f->fs.nat_fip[5] << 16 |
223 				      (u64)f->fs.nat_fip[4] << 24, 1);
224 
225 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
226 				      WORD_MASK, f->fs.nat_fip[3] |
227 				      f->fs.nat_fip[2] << 8 |
228 				      f->fs.nat_fip[1] << 16 |
229 				      (u64)f->fs.nat_fip[0] << 24, 1);
230 
231 		} else {
232 			set_tcb_field(adap, f, tid,
233 				      TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
234 				      WORD_MASK, f->fs.nat_fip[3] |
235 				      f->fs.nat_fip[2] << 8 |
236 				      f->fs.nat_fip[1] << 16 |
237 				      (u64)f->fs.nat_fip[0] << 24, 1);
238 		}
239 	}
240 
241 	set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
242 		      (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
243 		      (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
244 		      1);
245 }
246 
247 /* Validate filter spec against configuration done on the card. */
validate_filter(struct net_device * dev,struct ch_filter_specification * fs)248 static int validate_filter(struct net_device *dev,
249 			   struct ch_filter_specification *fs)
250 {
251 	struct adapter *adapter = netdev2adap(dev);
252 	u32 fconf, iconf;
253 
254 	/* Check for unconfigured fields being used. */
255 	iconf = adapter->params.tp.ingress_config;
256 	fconf = fs->hash ? adapter->params.tp.filter_mask :
257 			   adapter->params.tp.vlan_pri_map;
258 
259 	if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
260 	    unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
261 	    unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
262 	    unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
263 			fs->mask.ethtype) ||
264 	    unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
265 	    unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
266 			fs->mask.matchtype) ||
267 	    unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
268 	    unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
269 	    unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
270 			fs->mask.pfvf_vld) ||
271 	    unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
272 			fs->mask.ovlan_vld) ||
273 	    unsupported(fconf, VNIC_ID_F, fs->val.encap_vld,
274 			fs->mask.encap_vld) ||
275 	    unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
276 		return -EOPNOTSUPP;
277 
278 	/* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
279 	 * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
280 	 * in TP_INGRESS_CONFIG.  Hense the somewhat crazy checks
281 	 * below.  Additionally, since the T4 firmware interface also
282 	 * carries that overlap, we need to translate any PF/VF
283 	 * specification into that internal format below.
284 	 */
285 	if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
286 	     is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) ||
287 	    (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
288 	     is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) ||
289 	    (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
290 	     is_field_set(fs->val.encap_vld, fs->mask.encap_vld)))
291 		return -EOPNOTSUPP;
292 	if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
293 	    (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
294 	     (iconf & VNIC_F)))
295 		return -EOPNOTSUPP;
296 	if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
297 		return -ERANGE;
298 	fs->mask.pf &= 0x7;
299 	fs->mask.vf &= 0x7f;
300 
301 	/* If the user is requesting that the filter action loop
302 	 * matching packets back out one of our ports, make sure that
303 	 * the egress port is in range.
304 	 */
305 	if (fs->action == FILTER_SWITCH &&
306 	    fs->eport >= adapter->params.nports)
307 		return -ERANGE;
308 
309 	/* Don't allow various trivially obvious bogus out-of-range values... */
310 	if (fs->val.iport >= adapter->params.nports)
311 		return -ERANGE;
312 
313 	/* T4 doesn't support removing VLAN Tags for loop back filters. */
314 	if (is_t4(adapter->params.chip) &&
315 	    fs->action == FILTER_SWITCH &&
316 	    (fs->newvlan == VLAN_REMOVE ||
317 	     fs->newvlan == VLAN_REWRITE))
318 		return -EOPNOTSUPP;
319 
320 	if (fs->val.encap_vld &&
321 	    CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
322 		return -EOPNOTSUPP;
323 	return 0;
324 }
325 
get_filter_steerq(struct net_device * dev,struct ch_filter_specification * fs)326 static int get_filter_steerq(struct net_device *dev,
327 			     struct ch_filter_specification *fs)
328 {
329 	struct adapter *adapter = netdev2adap(dev);
330 	int iq;
331 
332 	/* If the user has requested steering matching Ingress Packets
333 	 * to a specific Queue Set, we need to make sure it's in range
334 	 * for the port and map that into the Absolute Queue ID of the
335 	 * Queue Set's Response Queue.
336 	 */
337 	if (!fs->dirsteer) {
338 		if (fs->iq)
339 			return -EINVAL;
340 		iq = 0;
341 	} else {
342 		struct port_info *pi = netdev_priv(dev);
343 
344 		/* If the iq id is greater than the number of qsets,
345 		 * then assume it is an absolute qid.
346 		 */
347 		if (fs->iq < pi->nqsets)
348 			iq = adapter->sge.ethrxq[pi->first_qset +
349 						 fs->iq].rspq.abs_id;
350 		else
351 			iq = fs->iq;
352 	}
353 
354 	return iq;
355 }
356 
get_filter_count(struct adapter * adapter,unsigned int fidx,u64 * pkts,u64 * bytes,bool hash)357 static int get_filter_count(struct adapter *adapter, unsigned int fidx,
358 			    u64 *pkts, u64 *bytes, bool hash)
359 {
360 	unsigned int tcb_base, tcbaddr;
361 	unsigned int word_offset;
362 	struct filter_entry *f;
363 	__be64 be64_byte_count;
364 	int ret;
365 
366 	tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
367 	if (is_hashfilter(adapter) && hash) {
368 		if (tid_out_of_range(&adapter->tids, fidx))
369 			return -E2BIG;
370 		f = adapter->tids.tid_tab[fidx - adapter->tids.tid_base];
371 		if (!f)
372 			return -EINVAL;
373 	} else {
374 		if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids +
375 			      adapter->tids.nhpftids - 1)) &&
376 		    fidx >= (adapter->tids.nftids + adapter->tids.nhpftids))
377 			return -E2BIG;
378 
379 		if (fidx < adapter->tids.nhpftids)
380 			f = &adapter->tids.hpftid_tab[fidx];
381 		else
382 			f = &adapter->tids.ftid_tab[fidx -
383 						    adapter->tids.nhpftids];
384 		if (!f->valid)
385 			return -EINVAL;
386 	}
387 	tcbaddr = tcb_base + f->tid * TCB_SIZE;
388 
389 	spin_lock(&adapter->win0_lock);
390 	if (is_t4(adapter->params.chip)) {
391 		__be64 be64_count;
392 
393 		/* T4 doesn't maintain byte counts in hw */
394 		*bytes = 0;
395 
396 		/* Get pkts */
397 		word_offset = 4;
398 		ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
399 				   tcbaddr + (word_offset * sizeof(__be32)),
400 				   sizeof(be64_count),
401 				   (__be32 *)&be64_count,
402 				   T4_MEMORY_READ);
403 		if (ret < 0)
404 			goto out;
405 		*pkts = be64_to_cpu(be64_count);
406 	} else {
407 		__be32 be32_count;
408 
409 		/* Get bytes */
410 		word_offset = 4;
411 		ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
412 				   tcbaddr + (word_offset * sizeof(__be32)),
413 				   sizeof(be64_byte_count),
414 				   &be64_byte_count,
415 				   T4_MEMORY_READ);
416 		if (ret < 0)
417 			goto out;
418 		*bytes = be64_to_cpu(be64_byte_count);
419 
420 		/* Get pkts */
421 		word_offset = 6;
422 		ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
423 				   tcbaddr + (word_offset * sizeof(__be32)),
424 				   sizeof(be32_count),
425 				   &be32_count,
426 				   T4_MEMORY_READ);
427 		if (ret < 0)
428 			goto out;
429 		*pkts = (u64)be32_to_cpu(be32_count);
430 	}
431 
432 out:
433 	spin_unlock(&adapter->win0_lock);
434 	return ret;
435 }
436 
cxgb4_get_filter_counters(struct net_device * dev,unsigned int fidx,u64 * hitcnt,u64 * bytecnt,bool hash)437 int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
438 			      u64 *hitcnt, u64 *bytecnt, bool hash)
439 {
440 	struct adapter *adapter = netdev2adap(dev);
441 
442 	return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
443 }
444 
cxgb4_filter_prio_in_range(struct tid_info * t,u32 idx,u8 nslots,u32 prio)445 static bool cxgb4_filter_prio_in_range(struct tid_info *t, u32 idx, u8 nslots,
446 				       u32 prio)
447 {
448 	struct filter_entry *prev_tab, *next_tab, *prev_fe, *next_fe;
449 	u32 prev_ftid, next_ftid;
450 
451 	/* Only insert the rule if both of the following conditions
452 	 * are met:
453 	 * 1. The immediate previous rule has priority <= @prio.
454 	 * 2. The immediate next rule has priority >= @prio.
455 	 */
456 
457 	/* High Priority (HPFILTER) region always has higher priority
458 	 * than normal FILTER region. So, all rules in HPFILTER region
459 	 * must have prio value <= rules in normal FILTER region.
460 	 */
461 	if (idx < t->nhpftids) {
462 		/* Don't insert if there's a rule already present at @idx
463 		 * in HPFILTER region.
464 		 */
465 		if (test_bit(idx, t->hpftid_bmap))
466 			return false;
467 
468 		next_tab = t->hpftid_tab;
469 		next_ftid = find_next_bit(t->hpftid_bmap, t->nhpftids, idx);
470 		if (next_ftid >= t->nhpftids) {
471 			/* No next entry found in HPFILTER region.
472 			 * See if there's any next entry in normal
473 			 * FILTER region.
474 			 */
475 			next_ftid = find_first_bit(t->ftid_bmap, t->nftids);
476 			if (next_ftid >= t->nftids)
477 				next_ftid = idx;
478 			else
479 				next_tab = t->ftid_tab;
480 		}
481 
482 		/* Search for the closest previous filter entry in HPFILTER
483 		 * region. No need to search in normal FILTER region because
484 		 * there can never be any entry in normal FILTER region whose
485 		 * prio value is < last entry in HPFILTER region.
486 		 */
487 		prev_ftid = find_last_bit(t->hpftid_bmap, idx);
488 		if (prev_ftid >= idx)
489 			prev_ftid = idx;
490 
491 		prev_tab = t->hpftid_tab;
492 	} else {
493 		idx -= t->nhpftids;
494 
495 		/* Don't insert if there's a rule already present at @idx
496 		 * in normal FILTER region.
497 		 */
498 		if (test_bit(idx, t->ftid_bmap))
499 			return false;
500 
501 		prev_tab = t->ftid_tab;
502 		prev_ftid = find_last_bit(t->ftid_bmap, idx);
503 		if (prev_ftid >= idx) {
504 			/* No previous entry found in normal FILTER
505 			 * region. See if there's any previous entry
506 			 * in HPFILTER region.
507 			 */
508 			prev_ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
509 			if (prev_ftid >= t->nhpftids)
510 				prev_ftid = idx;
511 			else
512 				prev_tab = t->hpftid_tab;
513 		}
514 
515 		/* Search for the closest next filter entry in normal
516 		 * FILTER region. No need to search in HPFILTER region
517 		 * because there can never be any entry in HPFILTER
518 		 * region whose prio value is > first entry in normal
519 		 * FILTER region.
520 		 */
521 		next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
522 		if (next_ftid >= t->nftids)
523 			next_ftid = idx;
524 
525 		next_tab = t->ftid_tab;
526 	}
527 
528 	next_fe = &next_tab[next_ftid];
529 
530 	/* See if the filter entry belongs to an IPv6 rule, which
531 	 * occupy 4 slots on T5 and 2 slots on T6. Adjust the
532 	 * reference to the previously inserted filter entry
533 	 * accordingly.
534 	 */
535 	prev_fe = &prev_tab[prev_ftid & ~(nslots - 1)];
536 	if (!prev_fe->fs.type)
537 		prev_fe = &prev_tab[prev_ftid];
538 
539 	if ((prev_fe->valid && prev_fe->fs.tc_prio > prio) ||
540 	    (next_fe->valid && next_fe->fs.tc_prio < prio))
541 		return false;
542 
543 	return true;
544 }
545 
cxgb4_get_free_ftid(struct net_device * dev,u8 family,bool hash_en,u32 tc_prio)546 int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
547 			u32 tc_prio)
548 {
549 	struct adapter *adap = netdev2adap(dev);
550 	struct tid_info *t = &adap->tids;
551 	u32 bmap_ftid, max_ftid;
552 	struct filter_entry *f;
553 	unsigned long *bmap;
554 	bool found = false;
555 	u8 i, cnt, n;
556 	int ftid = 0;
557 
558 	/* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
559 	 * on T5.
560 	 */
561 	n = 1;
562 	if (family == PF_INET6) {
563 		n++;
564 		if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
565 			n += 2;
566 	}
567 
568 	/* There are 3 filter regions available in hardware in
569 	 * following order of priority:
570 	 *
571 	 * 1. High Priority (HPFILTER) region (Highest Priority).
572 	 * 2. HASH region.
573 	 * 3. Normal FILTER region (Lowest Priority).
574 	 *
575 	 * Entries in HPFILTER and normal FILTER region have index
576 	 * 0 as the highest priority and the rules will be scanned
577 	 * in ascending order until either a rule hits or end of
578 	 * the region is reached.
579 	 *
580 	 * All HASH region entries have same priority. The set of
581 	 * fields to match in headers are pre-determined. The same
582 	 * set of header match fields must be compulsorily specified
583 	 * in all the rules wanting to get inserted in HASH region.
584 	 * Hence, HASH region is an exact-match region. A HASH is
585 	 * generated for a rule based on the values in the
586 	 * pre-determined set of header match fields. The generated
587 	 * HASH serves as an index into the HASH region. There can
588 	 * never be 2 rules having the same HASH. Hardware will
589 	 * compute a HASH for every incoming packet based on the
590 	 * values in the pre-determined set of header match fields
591 	 * and uses it as an index to check if there's a rule
592 	 * inserted in the HASH region at the specified index. If
593 	 * there's a rule inserted, then it's considered as a filter
594 	 * hit. Otherwise, it's a filter miss and normal FILTER region
595 	 * is scanned afterwards.
596 	 */
597 
598 	spin_lock_bh(&t->ftid_lock);
599 
600 	ftid = (tc_prio <= t->nhpftids) ? 0 : t->nhpftids;
601 	max_ftid = t->nftids + t->nhpftids;
602 	while (ftid < max_ftid) {
603 		if (ftid < t->nhpftids) {
604 			/* If the new rule wants to get inserted into
605 			 * HPFILTER region, but its prio is greater
606 			 * than the rule with the highest prio in HASH
607 			 * region, or if there's not enough slots
608 			 * available in HPFILTER region, then skip
609 			 * trying to insert this rule into HPFILTER
610 			 * region and directly go to the next region.
611 			 */
612 			if ((t->tc_hash_tids_max_prio &&
613 			     tc_prio > t->tc_hash_tids_max_prio) ||
614 			     (ftid + n) > t->nhpftids) {
615 				ftid = t->nhpftids;
616 				continue;
617 			}
618 
619 			bmap = t->hpftid_bmap;
620 			bmap_ftid = ftid;
621 		} else if (hash_en) {
622 			/* Ensure priority is >= last rule in HPFILTER
623 			 * region.
624 			 */
625 			ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
626 			if (ftid < t->nhpftids) {
627 				f = &t->hpftid_tab[ftid];
628 				if (f->valid && tc_prio < f->fs.tc_prio)
629 					break;
630 			}
631 
632 			/* Ensure priority is <= first rule in normal
633 			 * FILTER region.
634 			 */
635 			ftid = find_first_bit(t->ftid_bmap, t->nftids);
636 			if (ftid < t->nftids) {
637 				f = &t->ftid_tab[ftid];
638 				if (f->valid && tc_prio > f->fs.tc_prio)
639 					break;
640 			}
641 
642 			found = true;
643 			ftid = t->nhpftids;
644 			goto out_unlock;
645 		} else {
646 			/* If the new rule wants to get inserted into
647 			 * normal FILTER region, but its prio is less
648 			 * than the rule with the highest prio in HASH
649 			 * region, then reject the rule.
650 			 */
651 			if (t->tc_hash_tids_max_prio &&
652 			    tc_prio < t->tc_hash_tids_max_prio)
653 				break;
654 
655 			if (ftid + n > max_ftid)
656 				break;
657 
658 			bmap = t->ftid_bmap;
659 			bmap_ftid = ftid - t->nhpftids;
660 		}
661 
662 		cnt = 0;
663 		for (i = 0; i < n; i++) {
664 			if (test_bit(bmap_ftid + i, bmap))
665 				break;
666 			cnt++;
667 		}
668 
669 		if (cnt == n) {
670 			/* Ensure the new rule's prio doesn't conflict
671 			 * with existing rules.
672 			 */
673 			if (cxgb4_filter_prio_in_range(t, ftid, n,
674 						       tc_prio)) {
675 				ftid &= ~(n - 1);
676 				found = true;
677 				break;
678 			}
679 		}
680 
681 		ftid += n;
682 	}
683 
684 out_unlock:
685 	spin_unlock_bh(&t->ftid_lock);
686 	return found ? ftid : -ENOMEM;
687 }
688 
cxgb4_set_ftid(struct tid_info * t,int fidx,int family,unsigned int chip_ver)689 static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
690 			  unsigned int chip_ver)
691 {
692 	spin_lock_bh(&t->ftid_lock);
693 
694 	if (test_bit(fidx, t->ftid_bmap)) {
695 		spin_unlock_bh(&t->ftid_lock);
696 		return -EBUSY;
697 	}
698 
699 	if (family == PF_INET) {
700 		__set_bit(fidx, t->ftid_bmap);
701 	} else {
702 		if (chip_ver < CHELSIO_T6)
703 			bitmap_allocate_region(t->ftid_bmap, fidx, 2);
704 		else
705 			bitmap_allocate_region(t->ftid_bmap, fidx, 1);
706 	}
707 
708 	spin_unlock_bh(&t->ftid_lock);
709 	return 0;
710 }
711 
cxgb4_set_hpftid(struct tid_info * t,int fidx,int family)712 static int cxgb4_set_hpftid(struct tid_info *t, int fidx, int family)
713 {
714 	spin_lock_bh(&t->ftid_lock);
715 
716 	if (test_bit(fidx, t->hpftid_bmap)) {
717 		spin_unlock_bh(&t->ftid_lock);
718 		return -EBUSY;
719 	}
720 
721 	if (family == PF_INET)
722 		__set_bit(fidx, t->hpftid_bmap);
723 	else
724 		bitmap_allocate_region(t->hpftid_bmap, fidx, 1);
725 
726 	spin_unlock_bh(&t->ftid_lock);
727 	return 0;
728 }
729 
cxgb4_clear_ftid(struct tid_info * t,int fidx,int family,unsigned int chip_ver)730 static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
731 			     unsigned int chip_ver)
732 {
733 	spin_lock_bh(&t->ftid_lock);
734 	if (family == PF_INET) {
735 		__clear_bit(fidx, t->ftid_bmap);
736 	} else {
737 		if (chip_ver < CHELSIO_T6)
738 			bitmap_release_region(t->ftid_bmap, fidx, 2);
739 		else
740 			bitmap_release_region(t->ftid_bmap, fidx, 1);
741 	}
742 	spin_unlock_bh(&t->ftid_lock);
743 }
744 
cxgb4_clear_hpftid(struct tid_info * t,int fidx,int family)745 static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
746 {
747 	spin_lock_bh(&t->ftid_lock);
748 
749 	if (family == PF_INET)
750 		__clear_bit(fidx, t->hpftid_bmap);
751 	else
752 		bitmap_release_region(t->hpftid_bmap, fidx, 1);
753 
754 	spin_unlock_bh(&t->ftid_lock);
755 }
756 
757 /* Delete the filter at a specified index. */
del_filter_wr(struct adapter * adapter,int fidx)758 static int del_filter_wr(struct adapter *adapter, int fidx)
759 {
760 	struct fw_filter_wr *fwr;
761 	struct filter_entry *f;
762 	struct sk_buff *skb;
763 	unsigned int len;
764 
765 	if (fidx < adapter->tids.nhpftids)
766 		f = &adapter->tids.hpftid_tab[fidx];
767 	else
768 		f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
769 
770 	len = sizeof(*fwr);
771 
772 	skb = alloc_skb(len, GFP_KERNEL);
773 	if (!skb)
774 		return -ENOMEM;
775 
776 	fwr = __skb_put(skb, len);
777 	t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
778 
779 	/* Mark the filter as "pending" and ship off the Filter Work Request.
780 	 * When we get the Work Request Reply we'll clear the pending status.
781 	 */
782 	f->pending = 1;
783 	t4_mgmt_tx(adapter, skb);
784 	return 0;
785 }
786 
787 /* Send a Work Request to write the filter at a specified index.  We construct
788  * a Firmware Filter Work Request to have the work done and put the indicated
789  * filter into "pending" mode which will prevent any further actions against
790  * it till we get a reply from the firmware on the completion status of the
791  * request.
792  */
set_filter_wr(struct adapter * adapter,int fidx)793 int set_filter_wr(struct adapter *adapter, int fidx)
794 {
795 	struct fw_filter2_wr *fwr;
796 	struct filter_entry *f;
797 	struct sk_buff *skb;
798 
799 	if (fidx < adapter->tids.nhpftids)
800 		f = &adapter->tids.hpftid_tab[fidx];
801 	else
802 		f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
803 
804 	skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
805 	if (!skb)
806 		return -ENOMEM;
807 
808 	/* If the new filter requires loopback Destination MAC and/or VLAN
809 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
810 	 * the filter.
811 	 */
812 	if (f->fs.newdmac || f->fs.newvlan) {
813 		/* allocate L2T entry for new filter */
814 		f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
815 						f->fs.eport, f->fs.dmac);
816 		if (!f->l2t) {
817 			kfree_skb(skb);
818 			return -ENOMEM;
819 		}
820 	}
821 
822 	/* If the new filter requires loopback Source MAC rewriting then
823 	 * we need to allocate a SMT entry for the filter.
824 	 */
825 	if (f->fs.newsmac) {
826 		f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
827 		if (!f->smt) {
828 			if (f->l2t) {
829 				cxgb4_l2t_release(f->l2t);
830 				f->l2t = NULL;
831 			}
832 			kfree_skb(skb);
833 			return -ENOMEM;
834 		}
835 	}
836 
837 	fwr = __skb_put_zero(skb, sizeof(*fwr));
838 
839 	/* It would be nice to put most of the following in t4_hw.c but most
840 	 * of the work is translating the cxgbtool ch_filter_specification
841 	 * into the Work Request and the definition of that structure is
842 	 * currently in cxgbtool.h which isn't appropriate to pull into the
843 	 * common code.  We may eventually try to come up with a more neutral
844 	 * filter specification structure but for now it's easiest to simply
845 	 * put this fairly direct code in line ...
846 	 */
847 	if (adapter->params.filter2_wr_support)
848 		fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
849 	else
850 		fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
851 	fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
852 	fwr->tid_to_iq =
853 		htonl(FW_FILTER_WR_TID_V(f->tid) |
854 		      FW_FILTER_WR_RQTYPE_V(f->fs.type) |
855 		      FW_FILTER_WR_NOREPLY_V(0) |
856 		      FW_FILTER_WR_IQ_V(f->fs.iq));
857 	fwr->del_filter_to_l2tix =
858 		htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
859 		      FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
860 		      FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
861 		      FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
862 		      FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
863 		      FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
864 		      FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
865 		      FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
866 		      FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
867 					     f->fs.newvlan == VLAN_REWRITE) |
868 		      FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
869 					    f->fs.newvlan == VLAN_REWRITE) |
870 		      FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
871 		      FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
872 		      FW_FILTER_WR_PRIO_V(f->fs.prio) |
873 		      FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
874 	fwr->ethtype = htons(f->fs.val.ethtype);
875 	fwr->ethtypem = htons(f->fs.mask.ethtype);
876 	fwr->frag_to_ovlan_vldm =
877 		(FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
878 		 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
879 		 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
880 		 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
881 		 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
882 		 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
883 	if (f->fs.newsmac)
884 		fwr->smac_sel = f->smt->idx;
885 	fwr->rx_chan_rx_rpl_iq =
886 		htons(FW_FILTER_WR_RX_CHAN_V(0) |
887 		      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
888 	fwr->maci_to_matchtypem =
889 		htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
890 		      FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
891 		      FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
892 		      FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
893 		      FW_FILTER_WR_PORT_V(f->fs.val.iport) |
894 		      FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
895 		      FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
896 		      FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
897 	fwr->ptcl = f->fs.val.proto;
898 	fwr->ptclm = f->fs.mask.proto;
899 	fwr->ttyp = f->fs.val.tos;
900 	fwr->ttypm = f->fs.mask.tos;
901 	fwr->ivlan = htons(f->fs.val.ivlan);
902 	fwr->ivlanm = htons(f->fs.mask.ivlan);
903 	fwr->ovlan = htons(f->fs.val.ovlan);
904 	fwr->ovlanm = htons(f->fs.mask.ovlan);
905 	memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
906 	memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
907 	memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
908 	memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
909 	fwr->lp = htons(f->fs.val.lport);
910 	fwr->lpm = htons(f->fs.mask.lport);
911 	fwr->fp = htons(f->fs.val.fport);
912 	fwr->fpm = htons(f->fs.mask.fport);
913 
914 	if (adapter->params.filter2_wr_support) {
915 		u8 *nat_lp = (u8 *)&f->fs.nat_lport;
916 		u8 *nat_fp = (u8 *)&f->fs.nat_fport;
917 
918 		fwr->natmode_to_ulp_type =
919 			FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
920 						 ULP_MODE_TCPDDP :
921 						 ULP_MODE_NONE) |
922 			FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
923 		memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
924 		memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
925 		fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
926 		fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
927 	}
928 
929 	/* Mark the filter as "pending" and ship off the Filter Work Request.
930 	 * When we get the Work Request Reply we'll clear the pending status.
931 	 */
932 	f->pending = 1;
933 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
934 	t4_ofld_send(adapter, skb);
935 	return 0;
936 }
937 
938 /* Return an error number if the indicated filter isn't writable ... */
writable_filter(struct filter_entry * f)939 int writable_filter(struct filter_entry *f)
940 {
941 	if (f->locked)
942 		return -EPERM;
943 	if (f->pending)
944 		return -EBUSY;
945 
946 	return 0;
947 }
948 
949 /* Delete the filter at the specified index (if valid).  The checks for all
950  * the common problems with doing this like the filter being locked, currently
951  * pending in another operation, etc.
952  */
delete_filter(struct adapter * adapter,unsigned int fidx)953 int delete_filter(struct adapter *adapter, unsigned int fidx)
954 {
955 	struct filter_entry *f;
956 	int ret;
957 
958 	if (fidx >= adapter->tids.nftids + adapter->tids.nsftids +
959 		    adapter->tids.nhpftids)
960 		return -EINVAL;
961 
962 	if (fidx < adapter->tids.nhpftids)
963 		f = &adapter->tids.hpftid_tab[fidx];
964 	else
965 		f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
966 	ret = writable_filter(f);
967 	if (ret)
968 		return ret;
969 	if (f->valid)
970 		return del_filter_wr(adapter, fidx);
971 
972 	return 0;
973 }
974 
975 /* Clear a filter and release any of its resources that we own.  This also
976  * clears the filter's "pending" status.
977  */
clear_filter(struct adapter * adap,struct filter_entry * f)978 void clear_filter(struct adapter *adap, struct filter_entry *f)
979 {
980 	struct port_info *pi = netdev_priv(f->dev);
981 
982 	/* If the new or old filter have loopback rewriting rules then we'll
983 	 * need to free any existing L2T, SMT, CLIP entries of filter
984 	 * rule.
985 	 */
986 	if (f->l2t)
987 		cxgb4_l2t_release(f->l2t);
988 
989 	if (f->smt)
990 		cxgb4_smt_release(f->smt);
991 
992 	if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
993 		t4_free_encap_mac_filt(adap, pi->viid,
994 				       f->fs.val.ovlan & 0x1ff, 0);
995 
996 	if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
997 		cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
998 
999 	/* The zeroing of the filter rule below clears the filter valid,
1000 	 * pending, locked flags, l2t pointer, etc. so it's all we need for
1001 	 * this operation.
1002 	 */
1003 	memset(f, 0, sizeof(*f));
1004 }
1005 
clear_all_filters(struct adapter * adapter)1006 void clear_all_filters(struct adapter *adapter)
1007 {
1008 	struct net_device *dev = adapter->port[0];
1009 	unsigned int i;
1010 
1011 	if (adapter->tids.hpftid_tab) {
1012 		struct filter_entry *f = &adapter->tids.hpftid_tab[0];
1013 
1014 		for (i = 0; i < adapter->tids.nhpftids; i++, f++)
1015 			if (f->valid || f->pending)
1016 				cxgb4_del_filter(dev, i, &f->fs);
1017 	}
1018 
1019 	if (adapter->tids.ftid_tab) {
1020 		struct filter_entry *f = &adapter->tids.ftid_tab[0];
1021 		unsigned int max_ftid = adapter->tids.nftids +
1022 					adapter->tids.nsftids +
1023 					adapter->tids.nhpftids;
1024 
1025 		/* Clear all TCAM filters */
1026 		for (i = adapter->tids.nhpftids; i < max_ftid; i++, f++)
1027 			if (f->valid || f->pending)
1028 				cxgb4_del_filter(dev, i, &f->fs);
1029 	}
1030 
1031 	/* Clear all hash filters */
1032 	if (is_hashfilter(adapter) && adapter->tids.tid_tab) {
1033 		struct filter_entry *f;
1034 		unsigned int sb;
1035 
1036 		for (i = adapter->tids.hash_base;
1037 		     i <= adapter->tids.ntids; i++) {
1038 			f = (struct filter_entry *)
1039 				adapter->tids.tid_tab[i];
1040 
1041 			if (f && (f->valid || f->pending))
1042 				cxgb4_del_filter(dev, f->tid, &f->fs);
1043 		}
1044 
1045 		sb = adapter->tids.stid_base;
1046 		for (i = 0; i < sb; i++) {
1047 			f = (struct filter_entry *)adapter->tids.tid_tab[i];
1048 
1049 			if (f && (f->valid || f->pending))
1050 				cxgb4_del_filter(dev, f->tid, &f->fs);
1051 		}
1052 	}
1053 }
1054 
1055 /* Fill up default masks for set match fields. */
fill_default_mask(struct ch_filter_specification * fs)1056 static void fill_default_mask(struct ch_filter_specification *fs)
1057 {
1058 	unsigned int lip = 0, lip_mask = 0;
1059 	unsigned int fip = 0, fip_mask = 0;
1060 	unsigned int i;
1061 
1062 	if (fs->val.iport && !fs->mask.iport)
1063 		fs->mask.iport |= ~0;
1064 	if (fs->val.fcoe && !fs->mask.fcoe)
1065 		fs->mask.fcoe |= ~0;
1066 	if (fs->val.matchtype && !fs->mask.matchtype)
1067 		fs->mask.matchtype |= ~0;
1068 	if (fs->val.macidx && !fs->mask.macidx)
1069 		fs->mask.macidx |= ~0;
1070 	if (fs->val.ethtype && !fs->mask.ethtype)
1071 		fs->mask.ethtype |= ~0;
1072 	if (fs->val.ivlan && !fs->mask.ivlan)
1073 		fs->mask.ivlan |= ~0;
1074 	if (fs->val.ovlan && !fs->mask.ovlan)
1075 		fs->mask.ovlan |= ~0;
1076 	if (fs->val.frag && !fs->mask.frag)
1077 		fs->mask.frag |= ~0;
1078 	if (fs->val.tos && !fs->mask.tos)
1079 		fs->mask.tos |= ~0;
1080 	if (fs->val.proto && !fs->mask.proto)
1081 		fs->mask.proto |= ~0;
1082 	if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
1083 		fs->mask.pfvf_vld |= ~0;
1084 	if (fs->val.pf && !fs->mask.pf)
1085 		fs->mask.pf |= ~0;
1086 	if (fs->val.vf && !fs->mask.vf)
1087 		fs->mask.vf |= ~0;
1088 
1089 	for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
1090 		lip |= fs->val.lip[i];
1091 		lip_mask |= fs->mask.lip[i];
1092 		fip |= fs->val.fip[i];
1093 		fip_mask |= fs->mask.fip[i];
1094 	}
1095 
1096 	if (lip && !lip_mask)
1097 		memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
1098 
1099 	if (fip && !fip_mask)
1100 		memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
1101 
1102 	if (fs->val.lport && !fs->mask.lport)
1103 		fs->mask.lport = ~0;
1104 	if (fs->val.fport && !fs->mask.fport)
1105 		fs->mask.fport = ~0;
1106 }
1107 
is_addr_all_mask(u8 * ipmask,int family)1108 static bool is_addr_all_mask(u8 *ipmask, int family)
1109 {
1110 	if (family == AF_INET) {
1111 		struct in_addr *addr;
1112 
1113 		addr = (struct in_addr *)ipmask;
1114 		if (addr->s_addr == htonl(0xffffffff))
1115 			return true;
1116 	} else if (family == AF_INET6) {
1117 		struct in6_addr *addr6;
1118 
1119 		addr6 = (struct in6_addr *)ipmask;
1120 		if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
1121 		    addr6->s6_addr32[1] == htonl(0xffffffff) &&
1122 		    addr6->s6_addr32[2] == htonl(0xffffffff) &&
1123 		    addr6->s6_addr32[3] == htonl(0xffffffff))
1124 			return true;
1125 	}
1126 	return false;
1127 }
1128 
is_inaddr_any(u8 * ip,int family)1129 static bool is_inaddr_any(u8 *ip, int family)
1130 {
1131 	int addr_type;
1132 
1133 	if (family == AF_INET) {
1134 		struct in_addr *addr;
1135 
1136 		addr = (struct in_addr *)ip;
1137 		if (addr->s_addr == htonl(INADDR_ANY))
1138 			return true;
1139 	} else if (family == AF_INET6) {
1140 		struct in6_addr *addr6;
1141 
1142 		addr6 = (struct in6_addr *)ip;
1143 		addr_type = ipv6_addr_type((const struct in6_addr *)
1144 					   &addr6);
1145 		if (addr_type == IPV6_ADDR_ANY)
1146 			return true;
1147 	}
1148 	return false;
1149 }
1150 
is_filter_exact_match(struct adapter * adap,struct ch_filter_specification * fs)1151 bool is_filter_exact_match(struct adapter *adap,
1152 			   struct ch_filter_specification *fs)
1153 {
1154 	struct tp_params *tp = &adap->params.tp;
1155 	u64 hash_filter_mask = tp->hash_filter_mask;
1156 	u64 ntuple_mask = 0;
1157 
1158 	if (!is_hashfilter(adap))
1159 		return false;
1160 
1161 	if ((atomic_read(&adap->tids.hash_tids_in_use) +
1162 	     atomic_read(&adap->tids.tids_in_use)) >=
1163 	    (adap->tids.nhash + (adap->tids.stid_base - adap->tids.tid_base)))
1164 		return false;
1165 
1166 	 /* Keep tunnel VNI match disabled for hash-filters for now */
1167 	if (fs->mask.encap_vld)
1168 		return false;
1169 
1170 	if (fs->type) {
1171 		if (is_inaddr_any(fs->val.fip, AF_INET6) ||
1172 		    !is_addr_all_mask(fs->mask.fip, AF_INET6))
1173 			return false;
1174 
1175 		if (is_inaddr_any(fs->val.lip, AF_INET6) ||
1176 		    !is_addr_all_mask(fs->mask.lip, AF_INET6))
1177 			return false;
1178 	} else {
1179 		if (is_inaddr_any(fs->val.fip, AF_INET) ||
1180 		    !is_addr_all_mask(fs->mask.fip, AF_INET))
1181 			return false;
1182 
1183 		if (is_inaddr_any(fs->val.lip, AF_INET) ||
1184 		    !is_addr_all_mask(fs->mask.lip, AF_INET))
1185 			return false;
1186 	}
1187 
1188 	if (!fs->val.lport || fs->mask.lport != 0xffff)
1189 		return false;
1190 
1191 	if (!fs->val.fport || fs->mask.fport != 0xffff)
1192 		return false;
1193 
1194 	/* calculate tuple mask and compare with mask configured in hw */
1195 	if (tp->fcoe_shift >= 0)
1196 		ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
1197 
1198 	if (tp->port_shift >= 0)
1199 		ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
1200 
1201 	if (tp->vnic_shift >= 0) {
1202 		if ((adap->params.tp.ingress_config & VNIC_F))
1203 			ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
1204 		else
1205 			ntuple_mask |= (u64)fs->mask.ovlan_vld <<
1206 				tp->vnic_shift;
1207 	}
1208 
1209 	if (tp->vlan_shift >= 0)
1210 		ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
1211 
1212 	if (tp->tos_shift >= 0)
1213 		ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
1214 
1215 	if (tp->protocol_shift >= 0)
1216 		ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
1217 
1218 	if (tp->ethertype_shift >= 0)
1219 		ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
1220 
1221 	if (tp->macmatch_shift >= 0)
1222 		ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
1223 
1224 	if (tp->matchtype_shift >= 0)
1225 		ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
1226 
1227 	if (tp->frag_shift >= 0)
1228 		ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
1229 
1230 	if (ntuple_mask != hash_filter_mask)
1231 		return false;
1232 
1233 	return true;
1234 }
1235 
hash_filter_ntuple(struct ch_filter_specification * fs,struct net_device * dev)1236 static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
1237 			      struct net_device *dev)
1238 {
1239 	struct adapter *adap = netdev2adap(dev);
1240 	struct tp_params *tp = &adap->params.tp;
1241 	u64 ntuple = 0;
1242 
1243 	/* Initialize each of the fields which we care about which are present
1244 	 * in the Compressed Filter Tuple.
1245 	 */
1246 	if (tp->vlan_shift >= 0 && fs->mask.ivlan)
1247 		ntuple |= (u64)(FT_VLAN_VLD_F |
1248 				fs->val.ivlan) << tp->vlan_shift;
1249 
1250 	if (tp->port_shift >= 0 && fs->mask.iport)
1251 		ntuple |= (u64)fs->val.iport << tp->port_shift;
1252 
1253 	if (tp->protocol_shift >= 0) {
1254 		if (!fs->val.proto)
1255 			ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
1256 		else
1257 			ntuple |= (u64)fs->val.proto << tp->protocol_shift;
1258 	}
1259 
1260 	if (tp->tos_shift >= 0 && fs->mask.tos)
1261 		ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
1262 
1263 	if (tp->vnic_shift >= 0) {
1264 		if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
1265 		    fs->mask.encap_vld)
1266 			ntuple |= (u64)((fs->val.encap_vld << 16) |
1267 					(fs->val.ovlan)) << tp->vnic_shift;
1268 		else if ((adap->params.tp.ingress_config & VNIC_F) &&
1269 			 fs->mask.pfvf_vld)
1270 			ntuple |= (u64)((fs->val.pfvf_vld << 16) |
1271 					(fs->val.pf << 13) |
1272 					(fs->val.vf)) << tp->vnic_shift;
1273 		else
1274 			ntuple |= (u64)((fs->val.ovlan_vld << 16) |
1275 					(fs->val.ovlan)) << tp->vnic_shift;
1276 	}
1277 
1278 	if (tp->macmatch_shift >= 0 && fs->mask.macidx)
1279 		ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift;
1280 
1281 	if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
1282 		ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift;
1283 
1284 	if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
1285 		ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift;
1286 
1287 	if (tp->frag_shift >= 0 && fs->mask.frag)
1288 		ntuple |= (u64)(fs->val.frag) << tp->frag_shift;
1289 
1290 	if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
1291 		ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift;
1292 	return ntuple;
1293 }
1294 
mk_act_open_req6(struct filter_entry * f,struct sk_buff * skb,unsigned int qid_filterid,struct adapter * adap)1295 static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
1296 			     unsigned int qid_filterid, struct adapter *adap)
1297 {
1298 	struct cpl_t6_act_open_req6 *t6req = NULL;
1299 	struct cpl_act_open_req6 *req = NULL;
1300 
1301 	t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
1302 	INIT_TP_WR(t6req, 0);
1303 	req = (struct cpl_act_open_req6 *)t6req;
1304 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
1305 	req->local_port = cpu_to_be16(f->fs.val.lport);
1306 	req->peer_port = cpu_to_be16(f->fs.val.fport);
1307 	req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
1308 	req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
1309 	req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
1310 	req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
1311 	req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1312 					f->fs.newvlan == VLAN_REWRITE) |
1313 				DELACK_V(f->fs.hitcnts) |
1314 				L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1315 				SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1316 					    0x7F) << 1) |
1317 				TX_CHAN_V(f->fs.eport) |
1318 				NO_CONG_V(f->fs.rpttid) |
1319 				ULP_MODE_V(f->fs.nat_mode ?
1320 					   ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1321 				TCAM_BYPASS_F | NON_OFFLOAD_F);
1322 	t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1323 								      f->dev)));
1324 	t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1325 			    RSS_QUEUE_V(f->fs.iq) |
1326 			    TX_QUEUE_V(f->fs.nat_mode) |
1327 			    T5_OPT_2_VALID_F |
1328 			    RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1329 			    PACE_V((f->fs.maskhash) |
1330 				   ((f->fs.dirsteerhash) << 1)));
1331 }
1332 
mk_act_open_req(struct filter_entry * f,struct sk_buff * skb,unsigned int qid_filterid,struct adapter * adap)1333 static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
1334 			    unsigned int qid_filterid, struct adapter *adap)
1335 {
1336 	struct cpl_t6_act_open_req *t6req = NULL;
1337 	struct cpl_act_open_req *req = NULL;
1338 
1339 	t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
1340 	INIT_TP_WR(t6req, 0);
1341 	req = (struct cpl_act_open_req *)t6req;
1342 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
1343 	req->local_port = cpu_to_be16(f->fs.val.lport);
1344 	req->peer_port = cpu_to_be16(f->fs.val.fport);
1345 	memcpy(&req->local_ip, f->fs.val.lip, 4);
1346 	memcpy(&req->peer_ip, f->fs.val.fip, 4);
1347 	req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1348 					f->fs.newvlan == VLAN_REWRITE) |
1349 				DELACK_V(f->fs.hitcnts) |
1350 				L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1351 				SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1352 					    0x7F) << 1) |
1353 				TX_CHAN_V(f->fs.eport) |
1354 				NO_CONG_V(f->fs.rpttid) |
1355 				ULP_MODE_V(f->fs.nat_mode ?
1356 					   ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1357 				TCAM_BYPASS_F | NON_OFFLOAD_F);
1358 
1359 	t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1360 								      f->dev)));
1361 	t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1362 			    RSS_QUEUE_V(f->fs.iq) |
1363 			    TX_QUEUE_V(f->fs.nat_mode) |
1364 			    T5_OPT_2_VALID_F |
1365 			    RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1366 			    PACE_V((f->fs.maskhash) |
1367 				   ((f->fs.dirsteerhash) << 1)));
1368 }
1369 
cxgb4_set_hash_filter(struct net_device * dev,struct ch_filter_specification * fs,struct filter_ctx * ctx)1370 static int cxgb4_set_hash_filter(struct net_device *dev,
1371 				 struct ch_filter_specification *fs,
1372 				 struct filter_ctx *ctx)
1373 {
1374 	struct adapter *adapter = netdev2adap(dev);
1375 	struct port_info *pi = netdev_priv(dev);
1376 	struct tid_info *t = &adapter->tids;
1377 	struct filter_entry *f;
1378 	struct sk_buff *skb;
1379 	int iq, atid, size;
1380 	int ret = 0;
1381 	u32 iconf;
1382 
1383 	fill_default_mask(fs);
1384 	ret = validate_filter(dev, fs);
1385 	if (ret)
1386 		return ret;
1387 
1388 	iq = get_filter_steerq(dev, fs);
1389 	if (iq < 0)
1390 		return iq;
1391 
1392 	f = kzalloc(sizeof(*f), GFP_KERNEL);
1393 	if (!f)
1394 		return -ENOMEM;
1395 
1396 	f->fs = *fs;
1397 	f->ctx = ctx;
1398 	f->dev = dev;
1399 	f->fs.iq = iq;
1400 
1401 	/* If the new filter requires loopback Destination MAC and/or VLAN
1402 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1403 	 * the filter.
1404 	 */
1405 	if (f->fs.newdmac || f->fs.newvlan) {
1406 		/* allocate L2T entry for new filter */
1407 		f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1408 						f->fs.eport, f->fs.dmac);
1409 		if (!f->l2t) {
1410 			ret = -ENOMEM;
1411 			goto out_err;
1412 		}
1413 	}
1414 
1415 	/* If the new filter requires loopback Source MAC rewriting then
1416 	 * we need to allocate a SMT entry for the filter.
1417 	 */
1418 	if (f->fs.newsmac) {
1419 		f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
1420 		if (!f->smt) {
1421 			if (f->l2t) {
1422 				cxgb4_l2t_release(f->l2t);
1423 				f->l2t = NULL;
1424 			}
1425 			ret = -ENOMEM;
1426 			goto free_l2t;
1427 		}
1428 	}
1429 
1430 	atid = cxgb4_alloc_atid(t, f);
1431 	if (atid < 0) {
1432 		ret = atid;
1433 		goto free_smt;
1434 	}
1435 
1436 	iconf = adapter->params.tp.ingress_config;
1437 	if (iconf & VNIC_F) {
1438 		f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1439 		f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1440 		f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1441 		f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1442 	} else if (iconf & USE_ENC_IDX_F) {
1443 		if (f->fs.val.encap_vld) {
1444 			struct port_info *pi = netdev_priv(f->dev);
1445 			static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1446 
1447 			/* allocate MPS TCAM entry */
1448 			ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1449 						      match_all_mac,
1450 						      match_all_mac,
1451 						      f->fs.val.vni,
1452 						      f->fs.mask.vni,
1453 						      0, 1, 1);
1454 			if (ret < 0)
1455 				goto free_atid;
1456 
1457 			f->fs.val.ovlan = ret;
1458 			f->fs.mask.ovlan = 0xffff;
1459 			f->fs.val.ovlan_vld = 1;
1460 			f->fs.mask.ovlan_vld = 1;
1461 		}
1462 	}
1463 
1464 	size = sizeof(struct cpl_t6_act_open_req);
1465 	if (f->fs.type) {
1466 		ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
1467 		if (ret)
1468 			goto free_mps;
1469 
1470 		skb = alloc_skb(size, GFP_KERNEL);
1471 		if (!skb) {
1472 			ret = -ENOMEM;
1473 			goto free_clip;
1474 		}
1475 
1476 		mk_act_open_req6(f, skb,
1477 				 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1478 				 adapter);
1479 	} else {
1480 		skb = alloc_skb(size, GFP_KERNEL);
1481 		if (!skb) {
1482 			ret = -ENOMEM;
1483 			goto free_mps;
1484 		}
1485 
1486 		mk_act_open_req(f, skb,
1487 				((adapter->sge.fw_evtq.abs_id << 14) | atid),
1488 				adapter);
1489 	}
1490 
1491 	f->pending = 1;
1492 	set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
1493 	t4_ofld_send(adapter, skb);
1494 	return 0;
1495 
1496 free_clip:
1497 	cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
1498 
1499 free_mps:
1500 	if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
1501 		t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1);
1502 
1503 free_atid:
1504 	cxgb4_free_atid(t, atid);
1505 
1506 free_smt:
1507 	if (f->smt) {
1508 		cxgb4_smt_release(f->smt);
1509 		f->smt = NULL;
1510 	}
1511 
1512 free_l2t:
1513 	if (f->l2t) {
1514 		cxgb4_l2t_release(f->l2t);
1515 		f->l2t = NULL;
1516 	}
1517 
1518 out_err:
1519 	kfree(f);
1520 	return ret;
1521 }
1522 
1523 /* Check a Chelsio Filter Request for validity, convert it into our internal
1524  * format and send it to the hardware.  Return 0 on success, an error number
1525  * otherwise.  We attach any provided filter operation context to the internal
1526  * filter specification in order to facilitate signaling completion of the
1527  * operation.
1528  */
__cxgb4_set_filter(struct net_device * dev,int ftid,struct ch_filter_specification * fs,struct filter_ctx * ctx)1529 int __cxgb4_set_filter(struct net_device *dev, int ftid,
1530 		       struct ch_filter_specification *fs,
1531 		       struct filter_ctx *ctx)
1532 {
1533 	struct adapter *adapter = netdev2adap(dev);
1534 	unsigned int max_fidx, fidx, chip_ver;
1535 	int iq, ret, filter_id = ftid;
1536 	struct filter_entry *f, *tab;
1537 	u32 iconf;
1538 
1539 	chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1540 	if (fs->hash) {
1541 		if (is_hashfilter(adapter))
1542 			return cxgb4_set_hash_filter(dev, fs, ctx);
1543 		netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1544 			   __func__);
1545 		return -EINVAL;
1546 	}
1547 
1548 	max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1549 	if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1550 	    filter_id >= max_fidx)
1551 		return -E2BIG;
1552 
1553 	fill_default_mask(fs);
1554 
1555 	ret = validate_filter(dev, fs);
1556 	if (ret)
1557 		return ret;
1558 
1559 	iq = get_filter_steerq(dev, fs);
1560 	if (iq < 0)
1561 		return iq;
1562 
1563 	if (fs->prio) {
1564 		tab = &adapter->tids.hpftid_tab[0];
1565 	} else {
1566 		tab = &adapter->tids.ftid_tab[0];
1567 		filter_id = ftid - adapter->tids.nhpftids;
1568 	}
1569 
1570 	/* IPv6 filters occupy four slots and must be aligned on
1571 	 * four-slot boundaries.  IPv4 filters only occupy a single
1572 	 * slot and have no alignment requirements but writing a new
1573 	 * IPv4 filter into the middle of an existing IPv6 filter
1574 	 * requires clearing the old IPv6 filter and hence we prevent
1575 	 * insertion.
1576 	 */
1577 	if (fs->type == 0) { /* IPv4 */
1578 		/* For T6, If our IPv4 filter isn't being written to a
1579 		 * multiple of two filter index and there's an IPv6
1580 		 * filter at the multiple of 2 base slot, then we need
1581 		 * to delete that IPv6 filter ...
1582 		 * For adapters below T6, IPv6 filter occupies 4 entries.
1583 		 * Hence we need to delete the filter in multiple of 4 slot.
1584 		 */
1585 		if (chip_ver < CHELSIO_T6)
1586 			fidx = filter_id & ~0x3;
1587 		else
1588 			fidx = filter_id & ~0x1;
1589 
1590 		if (fidx != filter_id && tab[fidx].fs.type) {
1591 			f = &tab[fidx];
1592 			if (f->valid) {
1593 				dev_err(adapter->pdev_dev,
1594 					"Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
1595 					fidx, fidx + 3);
1596 				return -EINVAL;
1597 			}
1598 		}
1599 	} else { /* IPv6 */
1600 		if (chip_ver < CHELSIO_T6) {
1601 			/* Ensure that the IPv6 filter is aligned on a
1602 			 * multiple of 4 boundary.
1603 			 */
1604 			if (filter_id & 0x3) {
1605 				dev_err(adapter->pdev_dev,
1606 					"Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
1607 				return -EINVAL;
1608 			}
1609 
1610 			/* Check all except the base overlapping IPv4 filter
1611 			 * slots.
1612 			 */
1613 			for (fidx = filter_id + 1; fidx < filter_id + 4;
1614 			     fidx++) {
1615 				f = &tab[fidx];
1616 				if (f->valid) {
1617 					dev_err(adapter->pdev_dev,
1618 						"Invalid location.  IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
1619 						fidx);
1620 					return -EBUSY;
1621 				}
1622 			}
1623 		} else {
1624 			/* For T6, CLIP being enabled, IPv6 filter would occupy
1625 			 * 2 entries.
1626 			 */
1627 			if (filter_id & 0x1)
1628 				return -EINVAL;
1629 			/* Check overlapping IPv4 filter slot */
1630 			fidx = filter_id + 1;
1631 			f = &tab[fidx];
1632 			if (f->valid) {
1633 				pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
1634 				       __func__, fidx);
1635 				return -EBUSY;
1636 			}
1637 		}
1638 	}
1639 
1640 	/* Check to make sure that provided filter index is not
1641 	 * already in use by someone else
1642 	 */
1643 	f = &tab[filter_id];
1644 	if (f->valid)
1645 		return -EBUSY;
1646 
1647 	if (fs->prio) {
1648 		fidx = filter_id + adapter->tids.hpftid_base;
1649 		ret = cxgb4_set_hpftid(&adapter->tids, filter_id,
1650 				       fs->type ? PF_INET6 : PF_INET);
1651 	} else {
1652 		fidx = filter_id + adapter->tids.ftid_base;
1653 		ret = cxgb4_set_ftid(&adapter->tids, filter_id,
1654 				     fs->type ? PF_INET6 : PF_INET,
1655 				     chip_ver);
1656 	}
1657 
1658 	if (ret)
1659 		return ret;
1660 
1661 	/* Check t  make sure the filter requested is writable ... */
1662 	ret = writable_filter(f);
1663 	if (ret)
1664 		goto free_tid;
1665 
1666 	if (is_t6(adapter->params.chip) && fs->type &&
1667 	    ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
1668 	    IPV6_ADDR_ANY) {
1669 		ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
1670 		if (ret)
1671 			goto free_tid;
1672 	}
1673 
1674 	/* Convert the filter specification into our internal format.
1675 	 * We copy the PF/VF specification into the Outer VLAN field
1676 	 * here so the rest of the code -- including the interface to
1677 	 * the firmware -- doesn't have to constantly do these checks.
1678 	 */
1679 	f->fs = *fs;
1680 	f->fs.iq = iq;
1681 	f->dev = dev;
1682 
1683 	iconf = adapter->params.tp.ingress_config;
1684 	if (iconf & VNIC_F) {
1685 		f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1686 		f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1687 		f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1688 		f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1689 	} else if (iconf & USE_ENC_IDX_F) {
1690 		if (f->fs.val.encap_vld) {
1691 			struct port_info *pi = netdev_priv(f->dev);
1692 			static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1693 
1694 			/* allocate MPS TCAM entry */
1695 			ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1696 						      match_all_mac,
1697 						      match_all_mac,
1698 						      f->fs.val.vni,
1699 						      f->fs.mask.vni,
1700 						      0, 1, 1);
1701 			if (ret < 0)
1702 				goto free_tid;
1703 
1704 			f->fs.val.ovlan = ret;
1705 			f->fs.mask.ovlan = 0x1ff;
1706 			f->fs.val.ovlan_vld = 1;
1707 			f->fs.mask.ovlan_vld = 1;
1708 		}
1709 	}
1710 
1711 	/* Attempt to set the filter.  If we don't succeed, we clear
1712 	 * it and return the failure.
1713 	 */
1714 	f->ctx = ctx;
1715 	f->tid = fidx; /* Save the actual tid */
1716 	ret = set_filter_wr(adapter, ftid);
1717 	if (ret)
1718 		goto free_tid;
1719 
1720 	return ret;
1721 
1722 free_tid:
1723 	if (f->fs.prio)
1724 		cxgb4_clear_hpftid(&adapter->tids, filter_id,
1725 				   fs->type ? PF_INET6 : PF_INET);
1726 	else
1727 		cxgb4_clear_ftid(&adapter->tids, filter_id,
1728 				 fs->type ? PF_INET6 : PF_INET,
1729 				 chip_ver);
1730 
1731 	clear_filter(adapter, f);
1732 	return ret;
1733 }
1734 
cxgb4_del_hash_filter(struct net_device * dev,int filter_id,struct filter_ctx * ctx)1735 static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
1736 				 struct filter_ctx *ctx)
1737 {
1738 	struct adapter *adapter = netdev2adap(dev);
1739 	struct tid_info *t = &adapter->tids;
1740 	struct cpl_abort_req *abort_req;
1741 	struct cpl_abort_rpl *abort_rpl;
1742 	struct cpl_set_tcb_field *req;
1743 	struct ulptx_idata *aligner;
1744 	struct work_request_hdr *wr;
1745 	struct filter_entry *f;
1746 	struct sk_buff *skb;
1747 	unsigned int wrlen;
1748 	int ret;
1749 
1750 	netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
1751 		   __func__, filter_id, adapter->tids.nftids);
1752 
1753 	if (tid_out_of_range(t, filter_id))
1754 		return -E2BIG;
1755 
1756 	f = lookup_tid(t, filter_id);
1757 	if (!f) {
1758 		netdev_err(dev, "%s: no filter entry for filter_id = %d",
1759 			   __func__, filter_id);
1760 		return -EINVAL;
1761 	}
1762 
1763 	ret = writable_filter(f);
1764 	if (ret)
1765 		return ret;
1766 
1767 	if (!f->valid)
1768 		return -EINVAL;
1769 
1770 	f->ctx = ctx;
1771 	f->pending = 1;
1772 	wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
1773 			+ sizeof(*abort_req) + sizeof(*abort_rpl), 16);
1774 	skb = alloc_skb(wrlen, GFP_KERNEL);
1775 	if (!skb) {
1776 		netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
1777 		return -ENOMEM;
1778 	}
1779 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1780 	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
1781 	INIT_ULPTX_WR(req, wrlen, 0, 0);
1782 	wr = (struct work_request_hdr *)req;
1783 	wr++;
1784 	req = (struct cpl_set_tcb_field *)wr;
1785 	mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
1786 		       TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
1787 	aligner = (struct ulptx_idata *)(req + 1);
1788 	abort_req = (struct cpl_abort_req *)(aligner + 1);
1789 	mk_abort_req_ulp(abort_req, f->tid);
1790 	abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
1791 	mk_abort_rpl_ulp(abort_rpl, f->tid);
1792 	t4_ofld_send(adapter, skb);
1793 	return 0;
1794 }
1795 
1796 /* Check a delete filter request for validity and send it to the hardware.
1797  * Return 0 on success, an error number otherwise.  We attach any provided
1798  * filter operation context to the internal filter specification in order to
1799  * facilitate signaling completion of the operation.
1800  */
__cxgb4_del_filter(struct net_device * dev,int filter_id,struct ch_filter_specification * fs,struct filter_ctx * ctx)1801 int __cxgb4_del_filter(struct net_device *dev, int filter_id,
1802 		       struct ch_filter_specification *fs,
1803 		       struct filter_ctx *ctx)
1804 {
1805 	struct adapter *adapter = netdev2adap(dev);
1806 	unsigned int max_fidx, chip_ver;
1807 	struct filter_entry *f;
1808 	int ret;
1809 
1810 	chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1811 	if (fs && fs->hash) {
1812 		if (is_hashfilter(adapter))
1813 			return cxgb4_del_hash_filter(dev, filter_id, ctx);
1814 		netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1815 			   __func__);
1816 		return -EINVAL;
1817 	}
1818 
1819 	max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1820 	if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1821 	    filter_id >= max_fidx)
1822 		return -E2BIG;
1823 
1824 	if (filter_id < adapter->tids.nhpftids)
1825 		f = &adapter->tids.hpftid_tab[filter_id];
1826 	else
1827 		f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
1828 
1829 	ret = writable_filter(f);
1830 	if (ret)
1831 		return ret;
1832 
1833 	if (f->valid) {
1834 		f->ctx = ctx;
1835 		if (f->fs.prio)
1836 			cxgb4_clear_hpftid(&adapter->tids,
1837 					   f->tid - adapter->tids.hpftid_base,
1838 					   f->fs.type ? PF_INET6 : PF_INET);
1839 		else
1840 			cxgb4_clear_ftid(&adapter->tids,
1841 					 f->tid - adapter->tids.ftid_base,
1842 					 f->fs.type ? PF_INET6 : PF_INET,
1843 					 chip_ver);
1844 		return del_filter_wr(adapter, filter_id);
1845 	}
1846 
1847 	/* If the caller has passed in a Completion Context then we need to
1848 	 * mark it as a successful completion so they don't stall waiting
1849 	 * for it.
1850 	 */
1851 	if (ctx) {
1852 		ctx->result = 0;
1853 		complete(&ctx->completion);
1854 	}
1855 	return ret;
1856 }
1857 
cxgb4_set_filter(struct net_device * dev,int filter_id,struct ch_filter_specification * fs)1858 int cxgb4_set_filter(struct net_device *dev, int filter_id,
1859 		     struct ch_filter_specification *fs)
1860 {
1861 	struct filter_ctx ctx;
1862 	int ret;
1863 
1864 	init_completion(&ctx.completion);
1865 
1866 	ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
1867 	if (ret)
1868 		goto out;
1869 
1870 	/* Wait for reply */
1871 	ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1872 	if (!ret)
1873 		return -ETIMEDOUT;
1874 
1875 	ret = ctx.result;
1876 out:
1877 	return ret;
1878 }
1879 
cxgb4_del_filter(struct net_device * dev,int filter_id,struct ch_filter_specification * fs)1880 int cxgb4_del_filter(struct net_device *dev, int filter_id,
1881 		     struct ch_filter_specification *fs)
1882 {
1883 	struct filter_ctx ctx;
1884 	int ret;
1885 
1886 	if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
1887 		return 0;
1888 
1889 	init_completion(&ctx.completion);
1890 
1891 	ret = __cxgb4_del_filter(dev, filter_id, fs, &ctx);
1892 	if (ret)
1893 		goto out;
1894 
1895 	/* Wait for reply */
1896 	ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1897 	if (!ret)
1898 		return -ETIMEDOUT;
1899 
1900 	ret = ctx.result;
1901 out:
1902 	return ret;
1903 }
1904 
configure_filter_tcb(struct adapter * adap,unsigned int tid,struct filter_entry * f)1905 static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
1906 				struct filter_entry *f)
1907 {
1908 	if (f->fs.hitcnts) {
1909 		set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
1910 			      TCB_TIMESTAMP_V(TCB_TIMESTAMP_M),
1911 			      TCB_TIMESTAMP_V(0ULL),
1912 			      1);
1913 		set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W,
1914 			      TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
1915 			      TCB_RTT_TS_RECENT_AGE_V(0ULL),
1916 			      1);
1917 	}
1918 
1919 	if (f->fs.newdmac)
1920 		set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
1921 			      1);
1922 
1923 	if (f->fs.newvlan == VLAN_INSERT ||
1924 	    f->fs.newvlan == VLAN_REWRITE)
1925 		set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1,
1926 			      1);
1927 	if (f->fs.newsmac)
1928 		configure_filter_smac(adap, f);
1929 
1930 	if (f->fs.nat_mode) {
1931 		switch (f->fs.nat_mode) {
1932 		case NAT_MODE_DIP:
1933 			set_nat_params(adap, f, tid, true, false, false, false);
1934 			break;
1935 
1936 		case NAT_MODE_DIP_DP:
1937 			set_nat_params(adap, f, tid, true, false, true, false);
1938 			break;
1939 
1940 		case NAT_MODE_DIP_DP_SIP:
1941 			set_nat_params(adap, f, tid, true, true, true, false);
1942 			break;
1943 		case NAT_MODE_DIP_DP_SP:
1944 			set_nat_params(adap, f, tid, true, false, true, true);
1945 			break;
1946 
1947 		case NAT_MODE_SIP_SP:
1948 			set_nat_params(adap, f, tid, false, true, false, true);
1949 			break;
1950 
1951 		case NAT_MODE_DIP_SIP_SP:
1952 			set_nat_params(adap, f, tid, true, true, false, true);
1953 			break;
1954 
1955 		case NAT_MODE_ALL:
1956 			set_nat_params(adap, f, tid, true, true, true, true);
1957 			break;
1958 
1959 		default:
1960 			pr_err("%s: Invalid NAT mode: %d\n",
1961 			       __func__, f->fs.nat_mode);
1962 			return -EINVAL;
1963 		}
1964 	}
1965 	return 0;
1966 }
1967 
hash_del_filter_rpl(struct adapter * adap,const struct cpl_abort_rpl_rss * rpl)1968 void hash_del_filter_rpl(struct adapter *adap,
1969 			 const struct cpl_abort_rpl_rss *rpl)
1970 {
1971 	unsigned int status = rpl->status;
1972 	struct tid_info *t = &adap->tids;
1973 	unsigned int tid = GET_TID(rpl);
1974 	struct filter_ctx *ctx = NULL;
1975 	struct filter_entry *f;
1976 
1977 	dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n",
1978 		__func__, status, tid);
1979 
1980 	f = lookup_tid(t, tid);
1981 	if (!f) {
1982 		dev_err(adap->pdev_dev, "%s:could not find filter entry",
1983 			__func__);
1984 		return;
1985 	}
1986 	ctx = f->ctx;
1987 	f->ctx = NULL;
1988 	clear_filter(adap, f);
1989 	cxgb4_remove_tid(t, 0, tid, 0);
1990 	kfree(f);
1991 	if (ctx) {
1992 		ctx->result = 0;
1993 		complete(&ctx->completion);
1994 	}
1995 }
1996 
hash_filter_rpl(struct adapter * adap,const struct cpl_act_open_rpl * rpl)1997 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1998 {
1999 	unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status)));
2000 	unsigned int status  = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2001 	struct tid_info *t = &adap->tids;
2002 	unsigned int tid = GET_TID(rpl);
2003 	struct filter_ctx *ctx = NULL;
2004 	struct filter_entry *f;
2005 
2006 	dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n",
2007 		__func__, tid, ftid, status);
2008 
2009 	f = lookup_atid(t, ftid);
2010 	if (!f) {
2011 		dev_err(adap->pdev_dev, "%s:could not find filter entry",
2012 			__func__);
2013 		return;
2014 	}
2015 	ctx = f->ctx;
2016 	f->ctx = NULL;
2017 
2018 	switch (status) {
2019 	case CPL_ERR_NONE:
2020 		f->tid = tid;
2021 		f->pending = 0;
2022 		f->valid = 1;
2023 		cxgb4_insert_tid(t, f, f->tid, 0);
2024 		cxgb4_free_atid(t, ftid);
2025 		if (ctx) {
2026 			ctx->tid = f->tid;
2027 			ctx->result = 0;
2028 		}
2029 		if (configure_filter_tcb(adap, tid, f)) {
2030 			clear_filter(adap, f);
2031 			cxgb4_remove_tid(t, 0, tid, 0);
2032 			kfree(f);
2033 			if (ctx) {
2034 				ctx->result = -EINVAL;
2035 				complete(&ctx->completion);
2036 			}
2037 			return;
2038 		}
2039 		switch (f->fs.action) {
2040 		case FILTER_PASS:
2041 			if (f->fs.dirsteer)
2042 				set_tcb_tflag(adap, f, tid,
2043 					      TF_DIRECT_STEER_S, 1, 1);
2044 			break;
2045 		case FILTER_DROP:
2046 			set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
2047 			break;
2048 		case FILTER_SWITCH:
2049 			set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
2050 			break;
2051 		}
2052 
2053 		break;
2054 
2055 	default:
2056 		if (status != CPL_ERR_TCAM_FULL)
2057 			dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
2058 				__func__, status);
2059 
2060 		if (ctx) {
2061 			if (status == CPL_ERR_TCAM_FULL)
2062 				ctx->result = -ENOSPC;
2063 			else
2064 				ctx->result = -EINVAL;
2065 		}
2066 		clear_filter(adap, f);
2067 		cxgb4_free_atid(t, ftid);
2068 		kfree(f);
2069 	}
2070 	if (ctx)
2071 		complete(&ctx->completion);
2072 }
2073 
2074 /* Handle a filter write/deletion reply. */
filter_rpl(struct adapter * adap,const struct cpl_set_tcb_rpl * rpl)2075 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
2076 {
2077 	unsigned int tid = GET_TID(rpl);
2078 	struct filter_entry *f = NULL;
2079 	unsigned int max_fidx;
2080 	int idx;
2081 
2082 	max_fidx = adap->tids.nftids + adap->tids.nsftids;
2083 	/* Get the corresponding filter entry for this tid */
2084 	if (adap->tids.ftid_tab) {
2085 		idx = tid - adap->tids.hpftid_base;
2086 		if (idx < adap->tids.nhpftids) {
2087 			f = &adap->tids.hpftid_tab[idx];
2088 		} else {
2089 			/* Check this in normal filter region */
2090 			idx = tid - adap->tids.ftid_base;
2091 			if (idx >= max_fidx)
2092 				return;
2093 			f = &adap->tids.ftid_tab[idx];
2094 			idx += adap->tids.nhpftids;
2095 		}
2096 
2097 		if (f->tid != tid)
2098 			return;
2099 	}
2100 
2101 	/* We found the filter entry for this tid */
2102 	if (f) {
2103 		unsigned int ret = TCB_COOKIE_G(rpl->cookie);
2104 		struct filter_ctx *ctx;
2105 
2106 		/* Pull off any filter operation context attached to the
2107 		 * filter.
2108 		 */
2109 		ctx = f->ctx;
2110 		f->ctx = NULL;
2111 
2112 		if (ret == FW_FILTER_WR_FLT_DELETED) {
2113 			/* Clear the filter when we get confirmation from the
2114 			 * hardware that the filter has been deleted.
2115 			 */
2116 			clear_filter(adap, f);
2117 			if (ctx)
2118 				ctx->result = 0;
2119 		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
2120 			f->pending = 0;  /* async setup completed */
2121 			f->valid = 1;
2122 			if (ctx) {
2123 				ctx->result = 0;
2124 				ctx->tid = idx;
2125 			}
2126 		} else {
2127 			/* Something went wrong.  Issue a warning about the
2128 			 * problem and clear everything out.
2129 			 */
2130 			dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
2131 				idx, ret);
2132 			clear_filter(adap, f);
2133 			if (ctx)
2134 				ctx->result = -EINVAL;
2135 		}
2136 		if (ctx)
2137 			complete(&ctx->completion);
2138 	}
2139 }
2140 
init_hash_filter(struct adapter * adap)2141 void init_hash_filter(struct adapter *adap)
2142 {
2143 	u32 reg;
2144 
2145 	/* On T6, verify the necessary register configs and warn the user in
2146 	 * case of improper config
2147 	 */
2148 	if (is_t6(adap->params.chip)) {
2149 		if (is_offload(adap)) {
2150 			if (!(t4_read_reg(adap, TP_GLOBAL_CONFIG_A)
2151 			   & ACTIVEFILTERCOUNTS_F)) {
2152 				dev_err(adap->pdev_dev, "Invalid hash filter + ofld config\n");
2153 				return;
2154 			}
2155 		} else {
2156 			reg = t4_read_reg(adap, LE_DB_RSP_CODE_0_A);
2157 			if (TCAM_ACTV_HIT_G(reg) != 4) {
2158 				dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2159 				return;
2160 			}
2161 
2162 			reg = t4_read_reg(adap, LE_DB_RSP_CODE_1_A);
2163 			if (HASH_ACTV_HIT_G(reg) != 4) {
2164 				dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2165 				return;
2166 			}
2167 		}
2168 
2169 	} else {
2170 		dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
2171 		return;
2172 	}
2173 
2174 	adap->params.hash_filter = 1;
2175 }
2176