1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (c) 2018 Chelsio Communications, Inc. 4 */ 5 6 #ifndef __CHTLS_H__ 7 #define __CHTLS_H__ 8 9 #include <crypto/aes.h> 10 #include <crypto/algapi.h> 11 #include <crypto/hash.h> 12 #include <crypto/sha1.h> 13 #include <crypto/sha2.h> 14 #include <crypto/authenc.h> 15 #include <crypto/ctr.h> 16 #include <crypto/gf128mul.h> 17 #include <crypto/internal/aead.h> 18 #include <crypto/null.h> 19 #include <crypto/internal/skcipher.h> 20 #include <crypto/aead.h> 21 #include <crypto/scatterwalk.h> 22 #include <crypto/internal/hash.h> 23 #include <linux/tls.h> 24 #include <net/tls.h> 25 #include <net/tls_prot.h> 26 #include <net/tls_toe.h> 27 28 #include "t4fw_api.h" 29 #include "t4_msg.h" 30 #include "cxgb4.h" 31 #include "cxgb4_uld.h" 32 #include "l2t.h" 33 #include "chcr_algo.h" 34 #include "chcr_core.h" 35 #include "chcr_crypto.h" 36 37 #define CHTLS_DRV_VERSION "1.0.0.0-ko" 38 39 #define TLS_KEYCTX_RXFLIT_CNT_S 24 40 #define TLS_KEYCTX_RXFLIT_CNT_V(x) ((x) << TLS_KEYCTX_RXFLIT_CNT_S) 41 42 #define TLS_KEYCTX_RXPROT_VER_S 20 43 #define TLS_KEYCTX_RXPROT_VER_M 0xf 44 #define TLS_KEYCTX_RXPROT_VER_V(x) ((x) << TLS_KEYCTX_RXPROT_VER_S) 45 46 #define TLS_KEYCTX_RXCIPH_MODE_S 16 47 #define TLS_KEYCTX_RXCIPH_MODE_M 0xf 48 #define TLS_KEYCTX_RXCIPH_MODE_V(x) ((x) << TLS_KEYCTX_RXCIPH_MODE_S) 49 50 #define TLS_KEYCTX_RXAUTH_MODE_S 12 51 #define TLS_KEYCTX_RXAUTH_MODE_M 0xf 52 #define TLS_KEYCTX_RXAUTH_MODE_V(x) ((x) << TLS_KEYCTX_RXAUTH_MODE_S) 53 54 #define TLS_KEYCTX_RXCIAU_CTRL_S 11 55 #define TLS_KEYCTX_RXCIAU_CTRL_V(x) ((x) << TLS_KEYCTX_RXCIAU_CTRL_S) 56 57 #define TLS_KEYCTX_RX_SEQCTR_S 9 58 #define TLS_KEYCTX_RX_SEQCTR_M 0x3 59 #define TLS_KEYCTX_RX_SEQCTR_V(x) ((x) << TLS_KEYCTX_RX_SEQCTR_S) 60 61 #define TLS_KEYCTX_RX_VALID_S 8 62 #define TLS_KEYCTX_RX_VALID_V(x) ((x) << TLS_KEYCTX_RX_VALID_S) 63 64 #define TLS_KEYCTX_RXCK_SIZE_S 3 65 #define TLS_KEYCTX_RXCK_SIZE_M 0x7 66 #define TLS_KEYCTX_RXCK_SIZE_V(x) ((x) << TLS_KEYCTX_RXCK_SIZE_S) 67 68 #define TLS_KEYCTX_RXMK_SIZE_S 0 69 #define TLS_KEYCTX_RXMK_SIZE_M 0x7 70 #define TLS_KEYCTX_RXMK_SIZE_V(x) ((x) << TLS_KEYCTX_RXMK_SIZE_S) 71 72 #define KEYCTX_TX_WR_IV_S 55 73 #define KEYCTX_TX_WR_IV_M 0x1ffULL 74 #define KEYCTX_TX_WR_IV_V(x) ((x) << KEYCTX_TX_WR_IV_S) 75 #define KEYCTX_TX_WR_IV_G(x) \ 76 (((x) >> KEYCTX_TX_WR_IV_S) & KEYCTX_TX_WR_IV_M) 77 78 #define KEYCTX_TX_WR_AAD_S 47 79 #define KEYCTX_TX_WR_AAD_M 0xffULL 80 #define KEYCTX_TX_WR_AAD_V(x) ((x) << KEYCTX_TX_WR_AAD_S) 81 #define KEYCTX_TX_WR_AAD_G(x) (((x) >> KEYCTX_TX_WR_AAD_S) & \ 82 KEYCTX_TX_WR_AAD_M) 83 84 #define KEYCTX_TX_WR_AADST_S 39 85 #define KEYCTX_TX_WR_AADST_M 0xffULL 86 #define KEYCTX_TX_WR_AADST_V(x) ((x) << KEYCTX_TX_WR_AADST_S) 87 #define KEYCTX_TX_WR_AADST_G(x) \ 88 (((x) >> KEYCTX_TX_WR_AADST_S) & KEYCTX_TX_WR_AADST_M) 89 90 #define KEYCTX_TX_WR_CIPHER_S 30 91 #define KEYCTX_TX_WR_CIPHER_M 0x1ffULL 92 #define KEYCTX_TX_WR_CIPHER_V(x) ((x) << KEYCTX_TX_WR_CIPHER_S) 93 #define KEYCTX_TX_WR_CIPHER_G(x) \ 94 (((x) >> KEYCTX_TX_WR_CIPHER_S) & KEYCTX_TX_WR_CIPHER_M) 95 96 #define KEYCTX_TX_WR_CIPHERST_S 23 97 #define KEYCTX_TX_WR_CIPHERST_M 0x7f 98 #define KEYCTX_TX_WR_CIPHERST_V(x) ((x) << KEYCTX_TX_WR_CIPHERST_S) 99 #define KEYCTX_TX_WR_CIPHERST_G(x) \ 100 (((x) >> KEYCTX_TX_WR_CIPHERST_S) & KEYCTX_TX_WR_CIPHERST_M) 101 102 #define KEYCTX_TX_WR_AUTH_S 14 103 #define KEYCTX_TX_WR_AUTH_M 0x1ff 104 #define KEYCTX_TX_WR_AUTH_V(x) ((x) << KEYCTX_TX_WR_AUTH_S) 105 #define KEYCTX_TX_WR_AUTH_G(x) \ 106 (((x) >> KEYCTX_TX_WR_AUTH_S) & KEYCTX_TX_WR_AUTH_M) 107 108 #define KEYCTX_TX_WR_AUTHST_S 7 109 #define KEYCTX_TX_WR_AUTHST_M 0x7f 110 #define KEYCTX_TX_WR_AUTHST_V(x) ((x) << KEYCTX_TX_WR_AUTHST_S) 111 #define KEYCTX_TX_WR_AUTHST_G(x) \ 112 (((x) >> KEYCTX_TX_WR_AUTHST_S) & KEYCTX_TX_WR_AUTHST_M) 113 114 #define KEYCTX_TX_WR_AUTHIN_S 0 115 #define KEYCTX_TX_WR_AUTHIN_M 0x7f 116 #define KEYCTX_TX_WR_AUTHIN_V(x) ((x) << KEYCTX_TX_WR_AUTHIN_S) 117 #define KEYCTX_TX_WR_AUTHIN_G(x) \ 118 (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M) 119 120 struct sge_opaque_hdr { 121 void *dev; 122 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 123 }; 124 125 #define MAX_IVS_PAGE 256 126 #define TLS_KEY_CONTEXT_SZ 64 127 #define CIPHER_BLOCK_SIZE 16 128 #define GCM_TAG_SIZE 16 129 #define KEY_ON_MEM_SZ 16 130 #define AEAD_EXPLICIT_DATA_SIZE 8 131 #define TLS_HEADER_LENGTH 5 132 #define SCMD_CIPH_MODE_AES_GCM 2 133 /* Any MFS size should work and come from openssl */ 134 #define TLS_MFS 16384 135 136 #define RSS_HDR sizeof(struct rss_header) 137 #define TLS_WR_CPL_LEN \ 138 (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo)) 139 140 enum { 141 CHTLS_KEY_CONTEXT_DSGL, 142 CHTLS_KEY_CONTEXT_IMM, 143 CHTLS_KEY_CONTEXT_DDR, 144 }; 145 146 enum { 147 CHTLS_LISTEN_START, 148 CHTLS_LISTEN_STOP, 149 }; 150 151 /* Flags for return value of CPL message handlers */ 152 enum { 153 CPL_RET_BUF_DONE = 1, /* buffer processing done */ 154 CPL_RET_BAD_MSG = 2, /* bad CPL message */ 155 CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */ 156 }; 157 158 #define LISTEN_INFO_HASH_SIZE 32 159 #define RSPQ_HASH_BITS 5 160 struct listen_info { 161 struct listen_info *next; /* Link to next entry */ 162 struct sock *sk; /* The listening socket */ 163 unsigned int stid; /* The server TID */ 164 }; 165 166 enum { 167 T4_LISTEN_START_PENDING, 168 T4_LISTEN_STARTED 169 }; 170 171 enum csk_flags { 172 CSK_CALLBACKS_CHKD, /* socket callbacks have been sanitized */ 173 CSK_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */ 174 CSK_TX_MORE_DATA, /* sending ULP data; don't set SHOVE bit */ 175 CSK_TX_WAIT_IDLE, /* suspend Tx until in-flight data is ACKed */ 176 CSK_ABORT_SHUTDOWN, /* shouldn't send more abort requests */ 177 CSK_ABORT_RPL_PENDING, /* expecting an abort reply */ 178 CSK_CLOSE_CON_REQUESTED,/* we've sent a close_conn_req */ 179 CSK_TX_DATA_SENT, /* sent a TX_DATA WR on this connection */ 180 CSK_TX_FAILOVER, /* Tx traffic failing over */ 181 CSK_UPDATE_RCV_WND, /* Need to update rcv window */ 182 CSK_RST_ABORTED, /* outgoing RST was aborted */ 183 CSK_TLS_HANDSHK, /* TLS Handshake */ 184 CSK_CONN_INLINE, /* Connection on HW */ 185 }; 186 187 enum chtls_cdev_state { 188 CHTLS_CDEV_STATE_UP = 1 189 }; 190 191 struct listen_ctx { 192 struct sock *lsk; 193 struct chtls_dev *cdev; 194 struct sk_buff_head synq; 195 u32 state; 196 }; 197 198 struct key_map { 199 unsigned long *addr; 200 unsigned int start; 201 unsigned int available; 202 unsigned int size; 203 spinlock_t lock; /* lock for key id request from map */ 204 } __packed; 205 206 struct tls_scmd { 207 u32 seqno_numivs; 208 u32 ivgen_hdrlen; 209 }; 210 211 struct chtls_dev { 212 struct tls_toe_device tlsdev; 213 struct list_head list; 214 struct cxgb4_lld_info *lldi; 215 struct pci_dev *pdev; 216 struct listen_info *listen_hash_tab[LISTEN_INFO_HASH_SIZE]; 217 spinlock_t listen_lock; /* lock for listen list */ 218 struct net_device **ports; 219 struct tid_info *tids; 220 unsigned int pfvf; 221 const unsigned short *mtus; 222 223 struct idr hwtid_idr; 224 struct idr stid_idr; 225 226 spinlock_t idr_lock ____cacheline_aligned_in_smp; 227 228 struct net_device *egr_dev[NCHAN * 2]; 229 struct sk_buff *rspq_skb_cache[1 << RSPQ_HASH_BITS]; 230 struct sk_buff *askb; 231 232 struct sk_buff_head deferq; 233 struct work_struct deferq_task; 234 235 struct list_head list_node; 236 struct list_head rcu_node; 237 struct list_head na_node; 238 unsigned int send_page_order; 239 int max_host_sndbuf; 240 u32 round_robin_cnt; 241 struct key_map kmap; 242 unsigned int cdev_state; 243 }; 244 245 struct chtls_listen { 246 struct chtls_dev *cdev; 247 struct sock *sk; 248 }; 249 250 struct chtls_hws { 251 struct sk_buff_head sk_recv_queue; 252 u8 txqid; 253 u8 ofld; 254 u16 type; 255 u16 rstate; 256 u16 keyrpl; 257 u16 pldlen; 258 u16 rcvpld; 259 u16 compute; 260 u16 expansion; 261 u16 keylen; 262 u16 pdus; 263 u16 adjustlen; 264 u16 ivsize; 265 u16 txleft; 266 u32 mfs; 267 s32 txkey; 268 s32 rxkey; 269 u32 fcplenmax; 270 u32 copied_seq; 271 u64 tx_seq_no; 272 struct tls_scmd scmd; 273 union { 274 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; 275 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; 276 } crypto_info; 277 }; 278 279 struct chtls_sock { 280 struct sock *sk; 281 struct chtls_dev *cdev; 282 struct l2t_entry *l2t_entry; /* pointer to the L2T entry */ 283 struct net_device *egress_dev; /* TX_CHAN for act open retry */ 284 285 struct sk_buff_head txq; 286 struct sk_buff *wr_skb_head; 287 struct sk_buff *wr_skb_tail; 288 struct sk_buff *ctrl_skb_cache; 289 struct sk_buff *txdata_skb_cache; /* abort path messages */ 290 struct kref kref; 291 unsigned long flags; 292 u32 opt2; 293 u32 wr_credits; 294 u32 wr_unacked; 295 u32 wr_max_credits; 296 u32 wr_nondata; 297 u32 hwtid; /* TCP Control Block ID */ 298 u32 txq_idx; 299 u32 rss_qid; 300 u32 tid; 301 u32 idr; 302 u32 mss; 303 u32 ulp_mode; 304 u32 tx_chan; 305 u32 rx_chan; 306 u32 sndbuf; 307 u32 txplen_max; 308 u32 mtu_idx; /* MTU table index */ 309 u32 smac_idx; 310 u8 port_id; 311 u8 tos; 312 u16 resv2; 313 u32 delack_mode; 314 u32 delack_seq; 315 u32 snd_win; 316 u32 rcv_win; 317 318 void *passive_reap_next; /* placeholder for passive */ 319 struct chtls_hws tlshws; 320 struct synq { 321 struct sk_buff *next; 322 struct sk_buff *prev; 323 } synq; 324 struct listen_ctx *listen_ctx; 325 }; 326 327 struct tls_hdr { 328 u8 type; 329 u16 version; 330 u16 length; 331 } __packed; 332 333 struct tlsrx_cmp_hdr { 334 u8 type; 335 u16 version; 336 u16 length; 337 338 u64 tls_seq; 339 u16 reserved1; 340 u8 res_to_mac_error; 341 } __packed; 342 343 /* res_to_mac_error fields */ 344 #define TLSRX_HDR_PKT_INT_ERROR_S 4 345 #define TLSRX_HDR_PKT_INT_ERROR_M 0x1 346 #define TLSRX_HDR_PKT_INT_ERROR_V(x) \ 347 ((x) << TLSRX_HDR_PKT_INT_ERROR_S) 348 #define TLSRX_HDR_PKT_INT_ERROR_G(x) \ 349 (((x) >> TLSRX_HDR_PKT_INT_ERROR_S) & TLSRX_HDR_PKT_INT_ERROR_M) 350 #define TLSRX_HDR_PKT_INT_ERROR_F TLSRX_HDR_PKT_INT_ERROR_V(1U) 351 352 #define TLSRX_HDR_PKT_SPP_ERROR_S 3 353 #define TLSRX_HDR_PKT_SPP_ERROR_M 0x1 354 #define TLSRX_HDR_PKT_SPP_ERROR_V(x) ((x) << TLSRX_HDR_PKT_SPP_ERROR) 355 #define TLSRX_HDR_PKT_SPP_ERROR_G(x) \ 356 (((x) >> TLSRX_HDR_PKT_SPP_ERROR_S) & TLSRX_HDR_PKT_SPP_ERROR_M) 357 #define TLSRX_HDR_PKT_SPP_ERROR_F TLSRX_HDR_PKT_SPP_ERROR_V(1U) 358 359 #define TLSRX_HDR_PKT_CCDX_ERROR_S 2 360 #define TLSRX_HDR_PKT_CCDX_ERROR_M 0x1 361 #define TLSRX_HDR_PKT_CCDX_ERROR_V(x) ((x) << TLSRX_HDR_PKT_CCDX_ERROR_S) 362 #define TLSRX_HDR_PKT_CCDX_ERROR_G(x) \ 363 (((x) >> TLSRX_HDR_PKT_CCDX_ERROR_S) & TLSRX_HDR_PKT_CCDX_ERROR_M) 364 #define TLSRX_HDR_PKT_CCDX_ERROR_F TLSRX_HDR_PKT_CCDX_ERROR_V(1U) 365 366 #define TLSRX_HDR_PKT_PAD_ERROR_S 1 367 #define TLSRX_HDR_PKT_PAD_ERROR_M 0x1 368 #define TLSRX_HDR_PKT_PAD_ERROR_V(x) ((x) << TLSRX_HDR_PKT_PAD_ERROR_S) 369 #define TLSRX_HDR_PKT_PAD_ERROR_G(x) \ 370 (((x) >> TLSRX_HDR_PKT_PAD_ERROR_S) & TLSRX_HDR_PKT_PAD_ERROR_M) 371 #define TLSRX_HDR_PKT_PAD_ERROR_F TLSRX_HDR_PKT_PAD_ERROR_V(1U) 372 373 #define TLSRX_HDR_PKT_MAC_ERROR_S 0 374 #define TLSRX_HDR_PKT_MAC_ERROR_M 0x1 375 #define TLSRX_HDR_PKT_MAC_ERROR_V(x) ((x) << TLSRX_HDR_PKT_MAC_ERROR) 376 #define TLSRX_HDR_PKT_MAC_ERROR_G(x) \ 377 (((x) >> S_TLSRX_HDR_PKT_MAC_ERROR_S) & TLSRX_HDR_PKT_MAC_ERROR_M) 378 #define TLSRX_HDR_PKT_MAC_ERROR_F TLSRX_HDR_PKT_MAC_ERROR_V(1U) 379 380 #define TLSRX_HDR_PKT_ERROR_M 0x1F 381 #define CONTENT_TYPE_ERROR 0x7F 382 383 struct ulp_mem_rw { 384 __be32 cmd; 385 __be32 len16; /* command length */ 386 __be32 dlen; /* data length in 32-byte units */ 387 __be32 lock_addr; 388 }; 389 390 struct tls_key_wr { 391 __be32 op_to_compl; 392 __be32 flowid_len16; 393 __be32 ftid; 394 u8 reneg_to_write_rx; 395 u8 protocol; 396 __be16 mfs; 397 }; 398 399 struct tls_key_req { 400 struct tls_key_wr wr; 401 struct ulp_mem_rw req; 402 struct ulptx_idata sc_imm; 403 }; 404 405 /* 406 * This lives in skb->cb and is used to chain WRs in a linked list. 407 */ 408 struct wr_skb_cb { 409 struct l2t_skb_cb l2t; /* reserve space for l2t CB */ 410 struct sk_buff *next_wr; /* next write request */ 411 }; 412 413 /* Per-skb backlog handler. Run when a socket's backlog is processed. */ 414 struct blog_skb_cb { 415 void (*backlog_rcv)(struct sock *sk, struct sk_buff *skb); 416 struct chtls_dev *cdev; 417 }; 418 419 /* 420 * Similar to tcp_skb_cb but with ULP elements added to support TLS, 421 * etc. 422 */ 423 struct ulp_skb_cb { 424 struct wr_skb_cb wr; /* reserve space for write request */ 425 u16 flags; /* TCP-like flags */ 426 u8 psh; 427 u8 ulp_mode; /* ULP mode/submode of sk_buff */ 428 u32 seq; /* TCP sequence number */ 429 union { /* ULP-specific fields */ 430 struct { 431 u8 type; 432 u8 ofld; 433 u8 iv; 434 } tls; 435 } ulp; 436 }; 437 438 #define ULP_SKB_CB(skb) ((struct ulp_skb_cb *)&((skb)->cb[0])) 439 #define BLOG_SKB_CB(skb) ((struct blog_skb_cb *)(skb)->cb) 440 441 /* 442 * Flags for ulp_skb_cb.flags. 443 */ 444 enum { 445 ULPCB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */ 446 ULPCB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */ 447 ULPCB_FLAG_BARRIER = 1 << 2, /* set TX_WAIT_IDLE after sending */ 448 ULPCB_FLAG_HOLD = 1 << 3, /* skb not ready for Tx yet */ 449 ULPCB_FLAG_COMPL = 1 << 4, /* request WR completion */ 450 ULPCB_FLAG_URG = 1 << 5, /* urgent data */ 451 ULPCB_FLAG_TLS_HDR = 1 << 6, /* payload with tls hdr */ 452 ULPCB_FLAG_NO_HDR = 1 << 7, /* not a ofld wr */ 453 }; 454 455 /* The ULP mode/submode of an skbuff */ 456 #define skb_ulp_mode(skb) (ULP_SKB_CB(skb)->ulp_mode) 457 #define TCP_PAGE(sk) (sk->sk_frag.page) 458 #define TCP_OFF(sk) (sk->sk_frag.offset) 459 460 static inline struct chtls_dev *to_chtls_dev(struct tls_toe_device *tlsdev) 461 { 462 return container_of(tlsdev, struct chtls_dev, tlsdev); 463 } 464 465 static inline void csk_set_flag(struct chtls_sock *csk, 466 enum csk_flags flag) 467 { 468 __set_bit(flag, &csk->flags); 469 } 470 471 static inline void csk_reset_flag(struct chtls_sock *csk, 472 enum csk_flags flag) 473 { 474 __clear_bit(flag, &csk->flags); 475 } 476 477 static inline bool csk_conn_inline(const struct chtls_sock *csk) 478 { 479 return test_bit(CSK_CONN_INLINE, &csk->flags); 480 } 481 482 static inline int csk_flag(const struct sock *sk, enum csk_flags flag) 483 { 484 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); 485 486 if (!csk_conn_inline(csk)) 487 return 0; 488 return test_bit(flag, &csk->flags); 489 } 490 491 static inline int csk_flag_nochk(const struct chtls_sock *csk, 492 enum csk_flags flag) 493 { 494 return test_bit(flag, &csk->flags); 495 } 496 497 static inline void *cplhdr(struct sk_buff *skb) 498 { 499 return skb->data; 500 } 501 502 static inline int is_neg_adv(unsigned int status) 503 { 504 return status == CPL_ERR_RTX_NEG_ADVICE || 505 status == CPL_ERR_KEEPALV_NEG_ADVICE || 506 status == CPL_ERR_PERSIST_NEG_ADVICE; 507 } 508 509 static inline void process_cpl_msg(void (*fn)(struct sock *, struct sk_buff *), 510 struct sock *sk, 511 struct sk_buff *skb) 512 { 513 skb_reset_mac_header(skb); 514 skb_reset_network_header(skb); 515 skb_reset_transport_header(skb); 516 517 bh_lock_sock(sk); 518 if (unlikely(sock_owned_by_user(sk))) { 519 BLOG_SKB_CB(skb)->backlog_rcv = fn; 520 __sk_add_backlog(sk, skb); 521 } else { 522 fn(sk, skb); 523 } 524 bh_unlock_sock(sk); 525 } 526 527 static inline void chtls_sock_free(struct kref *ref) 528 { 529 struct chtls_sock *csk = container_of(ref, struct chtls_sock, 530 kref); 531 kfree(csk); 532 } 533 534 static inline void __chtls_sock_put(const char *fn, struct chtls_sock *csk) 535 { 536 kref_put(&csk->kref, chtls_sock_free); 537 } 538 539 static inline void __chtls_sock_get(const char *fn, 540 struct chtls_sock *csk) 541 { 542 kref_get(&csk->kref); 543 } 544 545 static inline void send_or_defer(struct sock *sk, struct tcp_sock *tp, 546 struct sk_buff *skb, int through_l2t) 547 { 548 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); 549 550 if (through_l2t) { 551 /* send through L2T */ 552 cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry); 553 } else { 554 /* send directly */ 555 cxgb4_ofld_send(csk->egress_dev, skb); 556 } 557 } 558 559 typedef int (*chtls_handler_func)(struct chtls_dev *, struct sk_buff *); 560 extern chtls_handler_func chtls_handlers[NUM_CPL_CMDS]; 561 void chtls_install_cpl_ops(struct sock *sk); 562 int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi); 563 void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk); 564 int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk); 565 void chtls_close(struct sock *sk, long timeout); 566 int chtls_disconnect(struct sock *sk, int flags); 567 void chtls_shutdown(struct sock *sk, int how); 568 void chtls_destroy_sock(struct sock *sk); 569 int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 570 int chtls_recvmsg(struct sock *sk, struct msghdr *msg, 571 size_t len, int flags, int *addr_len); 572 void chtls_splice_eof(struct socket *sock); 573 int send_tx_flowc_wr(struct sock *sk, int compl, 574 u32 snd_nxt, u32 rcv_nxt); 575 void chtls_tcp_push(struct sock *sk, int flags); 576 int chtls_push_frames(struct chtls_sock *csk, int comp); 577 int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val); 578 void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word, 579 u64 mask, u64 val, u8 cookie, 580 int through_l2t); 581 int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type); 582 void chtls_set_quiesce_ctrl(struct sock *sk, int val); 583 void skb_entail(struct sock *sk, struct sk_buff *skb, int flags); 584 unsigned int keyid_to_addr(int start_addr, int keyid); 585 void free_tls_keyid(struct sock *sk); 586 #endif 587