1 /*
2  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __LIBCXGB_CM_H__
34 #define __LIBCXGB_CM_H__
35 
36 
37 #include <net/tcp.h>
38 
39 #include <cxgb4.h>
40 #include <t4_msg.h>
41 #include <l2t.h>
42 
43 void
44 cxgb_get_4tuple(struct cpl_pass_accept_req *, enum chip_type,
45 		int *, __u8 *, __u8 *, __be16 *, __be16 *);
46 struct dst_entry *
47 cxgb_find_route(struct cxgb4_lld_info *,
48 		struct net_device *(*)(struct net_device *),
49 		__be32, __be32, __be16,	__be16, u8);
50 struct dst_entry *
51 cxgb_find_route6(struct cxgb4_lld_info *,
52 		 struct net_device *(*)(struct net_device *),
53 		 __u8 *, __u8 *, __be16, __be16, u8, __u32);
54 
55 /* Returns whether a CPL status conveys negative advice.
56  */
cxgb_is_neg_adv(unsigned int status)57 static inline bool cxgb_is_neg_adv(unsigned int status)
58 {
59 	return status == CPL_ERR_RTX_NEG_ADVICE ||
60 	       status == CPL_ERR_PERSIST_NEG_ADVICE ||
61 	       status == CPL_ERR_KEEPALV_NEG_ADVICE;
62 }
63 
64 static inline void
cxgb_best_mtu(const unsigned short * mtus,unsigned short mtu,unsigned int * idx,int use_ts,int ipv6)65 cxgb_best_mtu(const unsigned short *mtus, unsigned short mtu,
66 	      unsigned int *idx, int use_ts, int ipv6)
67 {
68 	unsigned short hdr_size = (ipv6 ?
69 				   sizeof(struct ipv6hdr) :
70 				   sizeof(struct iphdr)) +
71 				  sizeof(struct tcphdr) +
72 				  (use_ts ?
73 				   round_up(TCPOLEN_TIMESTAMP, 4) : 0);
74 	unsigned short data_size = mtu - hdr_size;
75 
76 	cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
77 }
78 
cxgb_compute_wscale(u32 win)79 static inline u32 cxgb_compute_wscale(u32 win)
80 {
81 	u32 wscale = 0;
82 
83 	while (wscale < 14 && (65535 << wscale) < win)
84 		wscale++;
85 	return wscale;
86 }
87 
88 static inline void
cxgb_mk_tid_release(struct sk_buff * skb,u32 len,u32 tid,u16 chan)89 cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
90 {
91 	struct cpl_tid_release *req;
92 
93 	req = __skb_put_zero(skb, len);
94 
95 	INIT_TP_WR(req, tid);
96 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
97 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
98 }
99 
100 static inline void
cxgb_mk_close_con_req(struct sk_buff * skb,u32 len,u32 tid,u16 chan,void * handle,arp_err_handler_t handler)101 cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
102 		      void *handle, arp_err_handler_t handler)
103 {
104 	struct cpl_close_con_req *req;
105 
106 	req = __skb_put_zero(skb, len);
107 
108 	INIT_TP_WR(req, tid);
109 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
110 	set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
111 	t4_set_arp_err_handler(skb, handle, handler);
112 }
113 
114 static inline void
cxgb_mk_abort_req(struct sk_buff * skb,u32 len,u32 tid,u16 chan,void * handle,arp_err_handler_t handler)115 cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
116 		  void *handle, arp_err_handler_t handler)
117 {
118 	struct cpl_abort_req *req;
119 
120 	req = __skb_put_zero(skb, len);
121 
122 	INIT_TP_WR(req, tid);
123 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
124 	req->cmd = CPL_ABORT_SEND_RST;
125 	set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
126 	t4_set_arp_err_handler(skb, handle, handler);
127 }
128 
129 static inline void
cxgb_mk_abort_rpl(struct sk_buff * skb,u32 len,u32 tid,u16 chan)130 cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
131 {
132 	struct cpl_abort_rpl *rpl;
133 
134 	rpl = __skb_put_zero(skb, len);
135 
136 	INIT_TP_WR(rpl, tid);
137 	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
138 	rpl->cmd = CPL_ABORT_NO_RST;
139 	set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
140 }
141 
142 static inline void
cxgb_mk_rx_data_ack(struct sk_buff * skb,u32 len,u32 tid,u16 chan,u32 credit_dack)143 cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
144 		    u32 credit_dack)
145 {
146 	struct cpl_rx_data_ack *req;
147 
148 	req = __skb_put_zero(skb, len);
149 
150 	INIT_TP_WR(req, tid);
151 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
152 	req->credit_dack = cpu_to_be32(credit_dack);
153 	set_wr_txq(skb, CPL_PRIORITY_ACK, chan);
154 }
155 #endif
156