1 /*
2  * Copyright (c) 2016 Chelsio Communications, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef __CXGBIT_H__
10 #define __CXGBIT_H__
11 
12 #include <linux/mutex.h>
13 #include <linux/list.h>
14 #include <linux/spinlock.h>
15 #include <linux/idr.h>
16 #include <linux/completion.h>
17 #include <linux/netdevice.h>
18 #include <linux/sched.h>
19 #include <linux/pci.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/inet.h>
22 #include <linux/wait.h>
23 #include <linux/kref.h>
24 #include <linux/timer.h>
25 #include <linux/io.h>
26 
27 #include <asm/byteorder.h>
28 
29 #include <net/net_namespace.h>
30 
31 #include <target/iscsi/iscsi_transport.h>
32 #include <iscsi_target_parameters.h>
33 #include <iscsi_target_login.h>
34 
35 #include "t4_regs.h"
36 #include "t4_msg.h"
37 #include "cxgb4.h"
38 #include "cxgb4_uld.h"
39 #include "l2t.h"
40 #include "libcxgb_ppm.h"
41 #include "cxgbit_lro.h"
42 
43 extern struct mutex cdev_list_lock;
44 extern struct list_head cdev_list_head;
45 struct cxgbit_np;
46 
47 struct cxgbit_sock;
48 
49 struct cxgbit_cmd {
50 	struct scatterlist sg;
51 	struct cxgbi_task_tag_info ttinfo;
52 	bool setup_ddp;
53 	bool release;
54 };
55 
56 #define CXGBIT_MAX_ISO_PAYLOAD	\
57 	min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
58 
59 struct cxgbit_iso_info {
60 	u8 flags;
61 	u32 mpdu;
62 	u32 len;
63 	u32 burst_len;
64 };
65 
66 enum cxgbit_skcb_flags {
67 	SKCBF_TX_NEED_HDR	= (1 << 0), /* packet needs a header */
68 	SKCBF_TX_FLAG_COMPL	= (1 << 1), /* wr completion flag */
69 	SKCBF_TX_ISO		= (1 << 2), /* iso cpl in tx skb */
70 	SKCBF_RX_LRO		= (1 << 3), /* lro skb */
71 };
72 
73 struct cxgbit_skb_rx_cb {
74 	u8 opcode;
75 	void *pdu_cb;
76 	void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
77 };
78 
79 struct cxgbit_skb_tx_cb {
80 	u8 submode;
81 	u32 extra_len;
82 };
83 
84 union cxgbit_skb_cb {
85 	struct {
86 		u8 flags;
87 		union {
88 			struct cxgbit_skb_tx_cb tx;
89 			struct cxgbit_skb_rx_cb rx;
90 		};
91 	};
92 
93 	struct {
94 		/* This member must be first. */
95 		struct l2t_skb_cb l2t;
96 		struct sk_buff *wr_next;
97 	};
98 };
99 
100 #define CXGBIT_SKB_CB(skb)	((union cxgbit_skb_cb *)&((skb)->cb[0]))
101 #define cxgbit_skcb_flags(skb)		(CXGBIT_SKB_CB(skb)->flags)
102 #define cxgbit_skcb_submode(skb)	(CXGBIT_SKB_CB(skb)->tx.submode)
103 #define cxgbit_skcb_tx_wr_next(skb)	(CXGBIT_SKB_CB(skb)->wr_next)
104 #define cxgbit_skcb_tx_extralen(skb)	(CXGBIT_SKB_CB(skb)->tx.extra_len)
105 #define cxgbit_skcb_rx_opcode(skb)	(CXGBIT_SKB_CB(skb)->rx.opcode)
106 #define cxgbit_skcb_rx_backlog_fn(skb)	(CXGBIT_SKB_CB(skb)->rx.backlog_fn)
107 #define cxgbit_rx_pdu_cb(skb)		(CXGBIT_SKB_CB(skb)->rx.pdu_cb)
108 
109 static inline void *cplhdr(struct sk_buff *skb)
110 {
111 	return skb->data;
112 }
113 
114 enum cxgbit_cdev_flags {
115 	CDEV_STATE_UP = 0,
116 	CDEV_ISO_ENABLE,
117 	CDEV_DDP_ENABLE,
118 };
119 
120 #define NP_INFO_HASH_SIZE 32
121 
122 struct np_info {
123 	struct np_info *next;
124 	struct cxgbit_np *cnp;
125 	unsigned int stid;
126 };
127 
128 struct cxgbit_list_head {
129 	struct list_head list;
130 	/* device lock */
131 	spinlock_t lock;
132 };
133 
134 struct cxgbit_device {
135 	struct list_head list;
136 	struct cxgb4_lld_info lldi;
137 	struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
138 	/* np lock */
139 	spinlock_t np_lock;
140 	u8 selectq[MAX_NPORTS][2];
141 	struct cxgbit_list_head cskq;
142 	u32 mdsl;
143 	struct kref kref;
144 	unsigned long flags;
145 };
146 
147 struct cxgbit_wr_wait {
148 	struct completion completion;
149 	int ret;
150 };
151 
152 enum cxgbit_csk_state {
153 	CSK_STATE_IDLE = 0,
154 	CSK_STATE_LISTEN,
155 	CSK_STATE_CONNECTING,
156 	CSK_STATE_ESTABLISHED,
157 	CSK_STATE_ABORTING,
158 	CSK_STATE_CLOSING,
159 	CSK_STATE_MORIBUND,
160 	CSK_STATE_DEAD,
161 };
162 
163 enum cxgbit_csk_flags {
164 	CSK_TX_DATA_SENT = 0,
165 	CSK_LOGIN_PDU_DONE,
166 	CSK_LOGIN_DONE,
167 	CSK_DDP_ENABLE,
168 	CSK_ABORT_RPL_WAIT,
169 };
170 
171 struct cxgbit_sock_common {
172 	struct cxgbit_device *cdev;
173 	struct sockaddr_storage local_addr;
174 	struct sockaddr_storage remote_addr;
175 	struct cxgbit_wr_wait wr_wait;
176 	enum cxgbit_csk_state state;
177 	unsigned long flags;
178 };
179 
180 struct cxgbit_np {
181 	struct cxgbit_sock_common com;
182 	wait_queue_head_t accept_wait;
183 	struct iscsi_np *np;
184 	struct completion accept_comp;
185 	struct list_head np_accept_list;
186 	/* np accept lock */
187 	spinlock_t np_accept_lock;
188 	struct kref kref;
189 	unsigned int stid;
190 };
191 
192 struct cxgbit_sock {
193 	struct cxgbit_sock_common com;
194 	struct cxgbit_np *cnp;
195 	struct iscsi_conn *conn;
196 	struct l2t_entry *l2t;
197 	struct dst_entry *dst;
198 	struct list_head list;
199 	struct sk_buff_head rxq;
200 	struct sk_buff_head txq;
201 	struct sk_buff_head ppodq;
202 	struct sk_buff_head backlogq;
203 	struct sk_buff_head skbq;
204 	struct sk_buff *wr_pending_head;
205 	struct sk_buff *wr_pending_tail;
206 	struct sk_buff *skb;
207 	struct sk_buff *lro_skb;
208 	struct sk_buff *lro_hskb;
209 	struct list_head accept_node;
210 	/* socket lock */
211 	spinlock_t lock;
212 	wait_queue_head_t waitq;
213 	wait_queue_head_t ack_waitq;
214 	bool lock_owner;
215 	struct kref kref;
216 	u32 max_iso_npdu;
217 	u32 wr_cred;
218 	u32 wr_una_cred;
219 	u32 wr_max_cred;
220 	u32 snd_una;
221 	u32 tid;
222 	u32 snd_nxt;
223 	u32 rcv_nxt;
224 	u32 smac_idx;
225 	u32 tx_chan;
226 	u32 mtu;
227 	u32 write_seq;
228 	u32 rx_credits;
229 	u32 snd_win;
230 	u32 rcv_win;
231 	u16 mss;
232 	u16 emss;
233 	u16 plen;
234 	u16 rss_qid;
235 	u16 txq_idx;
236 	u16 ctrlq_idx;
237 	u8 tos;
238 	u8 port_id;
239 #define CXGBIT_SUBMODE_HCRC 0x1
240 #define CXGBIT_SUBMODE_DCRC 0x2
241 	u8 submode;
242 #ifdef CONFIG_CHELSIO_T4_DCB
243 	u8 dcb_priority;
244 #endif
245 	u8 snd_wscale;
246 };
247 
248 void _cxgbit_free_cdev(struct kref *kref);
249 void _cxgbit_free_csk(struct kref *kref);
250 void _cxgbit_free_cnp(struct kref *kref);
251 
252 static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
253 {
254 	kref_get(&cdev->kref);
255 }
256 
257 static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
258 {
259 	kref_put(&cdev->kref, _cxgbit_free_cdev);
260 }
261 
262 static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
263 {
264 	kref_get(&csk->kref);
265 }
266 
267 static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
268 {
269 	kref_put(&csk->kref, _cxgbit_free_csk);
270 }
271 
272 static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
273 {
274 	kref_get(&cnp->kref);
275 }
276 
277 static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
278 {
279 	kref_put(&cnp->kref, _cxgbit_free_cnp);
280 }
281 
282 static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
283 {
284 	csk->wr_pending_tail = NULL;
285 	csk->wr_pending_head = NULL;
286 }
287 
288 static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
289 {
290 	return csk->wr_pending_head;
291 }
292 
293 static inline void
294 cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
295 {
296 	cxgbit_skcb_tx_wr_next(skb) = NULL;
297 
298 	skb_get(skb);
299 
300 	if (!csk->wr_pending_head)
301 		csk->wr_pending_head = skb;
302 	else
303 		cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
304 	csk->wr_pending_tail = skb;
305 }
306 
307 static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
308 {
309 	struct sk_buff *skb = csk->wr_pending_head;
310 
311 	if (likely(skb)) {
312 		csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
313 		cxgbit_skcb_tx_wr_next(skb) = NULL;
314 	}
315 	return skb;
316 }
317 
318 typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
319 				       struct sk_buff *);
320 
321 int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
322 int cxgbit_setup_conn_digest(struct cxgbit_sock *);
323 int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
324 void cxgbit_free_np(struct iscsi_np *);
325 void cxgbit_abort_conn(struct cxgbit_sock *csk);
326 void cxgbit_free_conn(struct iscsi_conn *);
327 extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
328 int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
329 int cxgbit_rx_data_ack(struct cxgbit_sock *);
330 int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
331 		    struct l2t_entry *);
332 void cxgbit_push_tx_frames(struct cxgbit_sock *);
333 int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
334 int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
335 		    struct iscsi_datain_req *, const void *, u32);
336 void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
337 			struct iscsi_r2t *);
338 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
339 int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
340 void cxgbit_get_rx_pdu(struct iscsi_conn *);
341 int cxgbit_validate_params(struct iscsi_conn *);
342 struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
343 
344 /* DDP */
345 int cxgbit_ddp_init(struct cxgbit_device *);
346 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
347 int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
348 void cxgbit_unmap_cmd(struct iscsi_conn *, struct iscsi_cmd *);
349 
350 static inline
351 struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
352 {
353 	return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
354 }
355 #endif /* __CXGBIT_H__ */
356