1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016 Chelsio Communications, Inc. 4 */ 5 6 #include <linux/workqueue.h> 7 #include <linux/kthread.h> 8 #include <linux/sched/signal.h> 9 10 #include <asm/unaligned.h> 11 #include <net/tcp.h> 12 #include <target/target_core_base.h> 13 #include <target/target_core_fabric.h> 14 #include "cxgbit.h" 15 16 struct sge_opaque_hdr { 17 void *dev; 18 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 19 }; 20 21 static const u8 cxgbit_digest_len[] = {0, 4, 4, 8}; 22 23 #define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \ 24 sizeof(struct fw_ofld_tx_data_wr)) 25 26 static struct sk_buff * 27 __cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso) 28 { 29 struct sk_buff *skb = NULL; 30 u8 submode = 0; 31 int errcode; 32 static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN; 33 34 if (len) { 35 skb = alloc_skb_with_frags(hdr_len, len, 36 0, &errcode, 37 GFP_KERNEL); 38 if (!skb) 39 return NULL; 40 41 skb_reserve(skb, TX_HDR_LEN); 42 skb_reset_transport_header(skb); 43 __skb_put(skb, ISCSI_HDR_LEN); 44 skb->data_len = len; 45 skb->len += len; 46 submode |= (csk->submode & CXGBIT_SUBMODE_DCRC); 47 48 } else { 49 u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0; 50 51 skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL); 52 if (!skb) 53 return NULL; 54 55 skb_reserve(skb, TX_HDR_LEN + iso_len); 56 skb_reset_transport_header(skb); 57 __skb_put(skb, ISCSI_HDR_LEN); 58 } 59 60 submode |= (csk->submode & CXGBIT_SUBMODE_HCRC); 61 cxgbit_skcb_submode(skb) = submode; 62 cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode]; 63 cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR; 64 return skb; 65 } 66 67 static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len) 68 { 69 return __cxgbit_alloc_skb(csk, len, false); 70 } 71 72 /* 73 * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data 74 * @skb: the packet 75 * 76 * Returns true if a packet can be sent as an offload WR with immediate 77 * data. We currently use the same limit as for Ethernet packets. 78 */ 79 static int cxgbit_is_ofld_imm(const struct sk_buff *skb) 80 { 81 int length = skb->len; 82 83 if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) 84 length += sizeof(struct fw_ofld_tx_data_wr); 85 86 if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)) 87 length += sizeof(struct cpl_tx_data_iso); 88 89 #define MAX_IMM_TX_PKT_LEN 256 90 return length <= MAX_IMM_TX_PKT_LEN; 91 } 92 93 /* 94 * cxgbit_sgl_len - calculates the size of an SGL of the given capacity 95 * @n: the number of SGL entries 96 * Calculates the number of flits needed for a scatter/gather list that 97 * can hold the given number of entries. 98 */ 99 static inline unsigned int cxgbit_sgl_len(unsigned int n) 100 { 101 n--; 102 return (3 * n) / 2 + (n & 1) + 2; 103 } 104 105 /* 106 * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet 107 * @skb: the packet 108 * 109 * Returns the number of flits needed for the given offload packet. 110 * These packets are already fully constructed and no additional headers 111 * will be added. 112 */ 113 static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb) 114 { 115 unsigned int flits, cnt; 116 117 if (cxgbit_is_ofld_imm(skb)) 118 return DIV_ROUND_UP(skb->len, 8); 119 flits = skb_transport_offset(skb) / 8; 120 cnt = skb_shinfo(skb)->nr_frags; 121 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 122 cnt++; 123 return flits + cxgbit_sgl_len(cnt); 124 } 125 126 #define CXGBIT_ISO_FSLICE 0x1 127 #define CXGBIT_ISO_LSLICE 0x2 128 static void 129 cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info) 130 { 131 struct cpl_tx_data_iso *cpl; 132 unsigned int submode = cxgbit_skcb_submode(skb); 133 unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE); 134 unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE); 135 136 cpl = __skb_push(skb, sizeof(*cpl)); 137 138 cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) | 139 CPL_TX_DATA_ISO_FIRST_V(fslice) | 140 CPL_TX_DATA_ISO_LAST_V(lslice) | 141 CPL_TX_DATA_ISO_CPLHDRLEN_V(0) | 142 CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) | 143 CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) | 144 CPL_TX_DATA_ISO_IMMEDIATE_V(0) | 145 CPL_TX_DATA_ISO_SCSI_V(2)); 146 147 cpl->ahs_len = 0; 148 cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4)); 149 cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4)); 150 cpl->len = htonl(iso_info->len); 151 cpl->reserved2_seglen_offset = htonl(0); 152 cpl->datasn_offset = htonl(0); 153 cpl->buffer_offset = htonl(0); 154 cpl->reserved3 = 0; 155 156 __skb_pull(skb, sizeof(*cpl)); 157 } 158 159 static void 160 cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, 161 u32 len, u32 credits, u32 compl) 162 { 163 struct fw_ofld_tx_data_wr *req; 164 const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; 165 u32 submode = cxgbit_skcb_submode(skb); 166 u32 wr_ulp_mode = 0; 167 u32 hdr_size = sizeof(*req); 168 u32 opcode = FW_OFLD_TX_DATA_WR; 169 u32 immlen = 0; 170 u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) : 171 T6_TX_FORCE_F; 172 173 if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) { 174 opcode = FW_ISCSI_TX_DATA_WR; 175 immlen += sizeof(struct cpl_tx_data_iso); 176 hdr_size += sizeof(struct cpl_tx_data_iso); 177 submode |= 8; 178 } 179 180 if (cxgbit_is_ofld_imm(skb)) 181 immlen += dlen; 182 183 req = __skb_push(skb, hdr_size); 184 req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) | 185 FW_WR_COMPL_V(compl) | 186 FW_WR_IMMDLEN_V(immlen)); 187 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | 188 FW_WR_LEN16_V(credits)); 189 req->plen = htonl(len); 190 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) | 191 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 192 193 req->tunnel_to_proxy = htonl((wr_ulp_mode) | force | 194 FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1)); 195 } 196 197 static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb) 198 { 199 kfree_skb(skb); 200 } 201 202 void cxgbit_push_tx_frames(struct cxgbit_sock *csk) 203 { 204 struct sk_buff *skb; 205 206 while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) { 207 u32 dlen = skb->len; 208 u32 len = skb->len; 209 u32 credits_needed; 210 u32 compl = 0; 211 u32 flowclen16 = 0; 212 u32 iso_cpl_len = 0; 213 214 if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) 215 iso_cpl_len = sizeof(struct cpl_tx_data_iso); 216 217 if (cxgbit_is_ofld_imm(skb)) 218 credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16); 219 else 220 credits_needed = DIV_ROUND_UP((8 * 221 cxgbit_calc_tx_flits_ofld(skb)) + 222 iso_cpl_len, 16); 223 224 if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) 225 credits_needed += DIV_ROUND_UP( 226 sizeof(struct fw_ofld_tx_data_wr), 16); 227 /* 228 * Assumes the initial credits is large enough to support 229 * fw_flowc_wr plus largest possible first payload 230 */ 231 232 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) { 233 flowclen16 = cxgbit_send_tx_flowc_wr(csk); 234 csk->wr_cred -= flowclen16; 235 csk->wr_una_cred += flowclen16; 236 } 237 238 if (csk->wr_cred < credits_needed) { 239 pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n", 240 csk, skb->len, skb->data_len, 241 credits_needed, csk->wr_cred); 242 break; 243 } 244 __skb_unlink(skb, &csk->txq); 245 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); 246 skb->csum = (__force __wsum)(credits_needed + flowclen16); 247 csk->wr_cred -= credits_needed; 248 csk->wr_una_cred += credits_needed; 249 250 pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", 251 csk, skb->len, skb->data_len, credits_needed, 252 csk->wr_cred, csk->wr_una_cred); 253 254 if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) { 255 len += cxgbit_skcb_tx_extralen(skb); 256 257 if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || 258 (!before(csk->write_seq, 259 csk->snd_una + csk->snd_win))) { 260 compl = 1; 261 csk->wr_una_cred = 0; 262 } 263 264 cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed, 265 compl); 266 csk->snd_nxt += len; 267 268 } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) || 269 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { 270 struct cpl_close_con_req *req = 271 (struct cpl_close_con_req *)skb->data; 272 req->wr.wr_hi |= htonl(FW_WR_COMPL_F); 273 csk->wr_una_cred = 0; 274 } 275 276 cxgbit_sock_enqueue_wr(csk, skb); 277 t4_set_arp_err_handler(skb, csk, 278 cxgbit_arp_failure_skb_discard); 279 280 pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n", 281 csk, csk->tid, skb, len); 282 283 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); 284 } 285 } 286 287 static void cxgbit_unlock_sock(struct cxgbit_sock *csk) 288 { 289 struct sk_buff_head backlogq; 290 struct sk_buff *skb; 291 void (*fn)(struct cxgbit_sock *, struct sk_buff *); 292 293 skb_queue_head_init(&backlogq); 294 295 spin_lock_bh(&csk->lock); 296 while (skb_queue_len(&csk->backlogq)) { 297 skb_queue_splice_init(&csk->backlogq, &backlogq); 298 spin_unlock_bh(&csk->lock); 299 300 while ((skb = __skb_dequeue(&backlogq))) { 301 fn = cxgbit_skcb_rx_backlog_fn(skb); 302 fn(csk, skb); 303 } 304 305 spin_lock_bh(&csk->lock); 306 } 307 308 csk->lock_owner = false; 309 spin_unlock_bh(&csk->lock); 310 } 311 312 static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 313 { 314 int ret = 0; 315 316 spin_lock_bh(&csk->lock); 317 csk->lock_owner = true; 318 spin_unlock_bh(&csk->lock); 319 320 if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) || 321 signal_pending(current))) { 322 __kfree_skb(skb); 323 __skb_queue_purge(&csk->ppodq); 324 ret = -1; 325 goto unlock; 326 } 327 328 csk->write_seq += skb->len + 329 cxgbit_skcb_tx_extralen(skb); 330 331 skb_queue_splice_tail_init(&csk->ppodq, &csk->txq); 332 __skb_queue_tail(&csk->txq, skb); 333 cxgbit_push_tx_frames(csk); 334 335 unlock: 336 cxgbit_unlock_sock(csk); 337 return ret; 338 } 339 340 static int 341 cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset, 342 u32 data_length) 343 { 344 u32 i = 0, nr_frags = MAX_SKB_FRAGS; 345 u32 padding = ((-data_length) & 3); 346 struct scatterlist *sg; 347 struct page *page; 348 unsigned int page_off; 349 350 if (padding) 351 nr_frags--; 352 353 /* 354 * We know each entry in t_data_sg contains a page. 355 */ 356 sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; 357 page_off = (data_offset % PAGE_SIZE); 358 359 while (data_length && (i < nr_frags)) { 360 u32 cur_len = min_t(u32, data_length, sg->length - page_off); 361 362 page = sg_page(sg); 363 364 get_page(page); 365 skb_fill_page_desc(skb, i, page, sg->offset + page_off, 366 cur_len); 367 skb->data_len += cur_len; 368 skb->len += cur_len; 369 skb->truesize += cur_len; 370 371 data_length -= cur_len; 372 page_off = 0; 373 sg = sg_next(sg); 374 i++; 375 } 376 377 if (data_length) 378 return -1; 379 380 if (padding) { 381 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 382 if (!page) 383 return -1; 384 skb_fill_page_desc(skb, i, page, 0, padding); 385 skb->data_len += padding; 386 skb->len += padding; 387 skb->truesize += padding; 388 } 389 390 return 0; 391 } 392 393 static int 394 cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsi_cmd *cmd, 395 struct iscsi_datain_req *dr) 396 { 397 struct iscsi_conn *conn = csk->conn; 398 struct sk_buff *skb; 399 struct iscsi_datain datain; 400 struct cxgbit_iso_info iso_info; 401 u32 data_length = cmd->se_cmd.data_length; 402 u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength; 403 u32 num_pdu, plen, tx_data = 0; 404 bool task_sense = !!(cmd->se_cmd.se_cmd_flags & 405 SCF_TRANSPORT_TASK_SENSE); 406 bool set_statsn = false; 407 int ret = -1; 408 409 while (data_length) { 410 num_pdu = (data_length + mrdsl - 1) / mrdsl; 411 if (num_pdu > csk->max_iso_npdu) 412 num_pdu = csk->max_iso_npdu; 413 414 plen = num_pdu * mrdsl; 415 if (plen > data_length) 416 plen = data_length; 417 418 skb = __cxgbit_alloc_skb(csk, 0, true); 419 if (unlikely(!skb)) 420 return -ENOMEM; 421 422 memset(skb->data, 0, ISCSI_HDR_LEN); 423 cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO; 424 cxgbit_skcb_submode(skb) |= (csk->submode & 425 CXGBIT_SUBMODE_DCRC); 426 cxgbit_skcb_tx_extralen(skb) = (num_pdu * 427 cxgbit_digest_len[cxgbit_skcb_submode(skb)]) + 428 ((num_pdu - 1) * ISCSI_HDR_LEN); 429 430 memset(&datain, 0, sizeof(struct iscsi_datain)); 431 memset(&iso_info, 0, sizeof(iso_info)); 432 433 if (!tx_data) 434 iso_info.flags |= CXGBIT_ISO_FSLICE; 435 436 if (!(data_length - plen)) { 437 iso_info.flags |= CXGBIT_ISO_LSLICE; 438 if (!task_sense) { 439 datain.flags = ISCSI_FLAG_DATA_STATUS; 440 iscsit_increment_maxcmdsn(cmd, conn->sess); 441 cmd->stat_sn = conn->stat_sn++; 442 set_statsn = true; 443 } 444 } 445 446 iso_info.burst_len = num_pdu * mrdsl; 447 iso_info.mpdu = mrdsl; 448 iso_info.len = ISCSI_HDR_LEN + plen; 449 450 cxgbit_cpl_tx_data_iso(skb, &iso_info); 451 452 datain.offset = tx_data; 453 datain.data_sn = cmd->data_sn - 1; 454 455 iscsit_build_datain_pdu(cmd, conn, &datain, 456 (struct iscsi_data_rsp *)skb->data, 457 set_statsn); 458 459 ret = cxgbit_map_skb(cmd, skb, tx_data, plen); 460 if (unlikely(ret)) { 461 __kfree_skb(skb); 462 goto out; 463 } 464 465 ret = cxgbit_queue_skb(csk, skb); 466 if (unlikely(ret)) 467 goto out; 468 469 tx_data += plen; 470 data_length -= plen; 471 472 cmd->read_data_done += plen; 473 cmd->data_sn += num_pdu; 474 } 475 476 dr->dr_complete = DATAIN_COMPLETE_NORMAL; 477 478 return 0; 479 480 out: 481 return ret; 482 } 483 484 static int 485 cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd, 486 const struct iscsi_datain *datain) 487 { 488 struct sk_buff *skb; 489 int ret = 0; 490 491 skb = cxgbit_alloc_skb(csk, 0); 492 if (unlikely(!skb)) 493 return -ENOMEM; 494 495 memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN); 496 497 if (datain->length) { 498 cxgbit_skcb_submode(skb) |= (csk->submode & 499 CXGBIT_SUBMODE_DCRC); 500 cxgbit_skcb_tx_extralen(skb) = 501 cxgbit_digest_len[cxgbit_skcb_submode(skb)]; 502 } 503 504 ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length); 505 if (ret < 0) { 506 __kfree_skb(skb); 507 return ret; 508 } 509 510 return cxgbit_queue_skb(csk, skb); 511 } 512 513 static int 514 cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 515 struct iscsi_datain_req *dr, 516 const struct iscsi_datain *datain) 517 { 518 struct cxgbit_sock *csk = conn->context; 519 u32 data_length = cmd->se_cmd.data_length; 520 u32 padding = ((-data_length) & 3); 521 u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength; 522 523 if ((data_length > mrdsl) && (!dr->recovery) && 524 (!padding) && (!datain->offset) && csk->max_iso_npdu) { 525 atomic_long_add(data_length - datain->length, 526 &conn->sess->tx_data_octets); 527 return cxgbit_tx_datain_iso(csk, cmd, dr); 528 } 529 530 return cxgbit_tx_datain(csk, cmd, datain); 531 } 532 533 static int 534 cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 535 const void *data_buf, u32 data_buf_len) 536 { 537 struct cxgbit_sock *csk = conn->context; 538 struct sk_buff *skb; 539 u32 padding = ((-data_buf_len) & 3); 540 541 skb = cxgbit_alloc_skb(csk, data_buf_len + padding); 542 if (unlikely(!skb)) 543 return -ENOMEM; 544 545 memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN); 546 547 if (data_buf_len) { 548 u32 pad_bytes = 0; 549 550 skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len); 551 552 if (padding) 553 skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len, 554 &pad_bytes, padding); 555 } 556 557 cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[ 558 cxgbit_skcb_submode(skb)]; 559 560 return cxgbit_queue_skb(csk, skb); 561 } 562 563 int 564 cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 565 struct iscsi_datain_req *dr, const void *buf, u32 buf_len) 566 { 567 if (dr) 568 return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf); 569 else 570 return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len); 571 } 572 573 int cxgbit_validate_params(struct iscsi_conn *conn) 574 { 575 struct cxgbit_sock *csk = conn->context; 576 struct cxgbit_device *cdev = csk->com.cdev; 577 struct iscsi_param *param; 578 u32 max_xmitdsl; 579 580 param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH, 581 conn->param_list); 582 if (!param) 583 return -1; 584 585 if (kstrtou32(param->value, 0, &max_xmitdsl) < 0) 586 return -1; 587 588 if (max_xmitdsl > cdev->mdsl) { 589 if (iscsi_change_param_sprintf( 590 conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl)) 591 return -1; 592 } 593 594 return 0; 595 } 596 597 static int cxgbit_set_digest(struct cxgbit_sock *csk) 598 { 599 struct iscsi_conn *conn = csk->conn; 600 struct iscsi_param *param; 601 602 param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list); 603 if (!param) { 604 pr_err("param not found key %s\n", HEADERDIGEST); 605 return -1; 606 } 607 608 if (!strcmp(param->value, CRC32C)) 609 csk->submode |= CXGBIT_SUBMODE_HCRC; 610 611 param = iscsi_find_param_from_key(DATADIGEST, conn->param_list); 612 if (!param) { 613 csk->submode = 0; 614 pr_err("param not found key %s\n", DATADIGEST); 615 return -1; 616 } 617 618 if (!strcmp(param->value, CRC32C)) 619 csk->submode |= CXGBIT_SUBMODE_DCRC; 620 621 if (cxgbit_setup_conn_digest(csk)) { 622 csk->submode = 0; 623 return -1; 624 } 625 626 return 0; 627 } 628 629 static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) 630 { 631 struct iscsi_conn *conn = csk->conn; 632 struct iscsi_conn_ops *conn_ops = conn->conn_ops; 633 struct iscsi_param *param; 634 u32 mrdsl, mbl; 635 u32 max_npdu, max_iso_npdu; 636 u32 max_iso_payload; 637 638 if (conn->login->leading_connection) { 639 param = iscsi_find_param_from_key(MAXBURSTLENGTH, 640 conn->param_list); 641 if (!param) { 642 pr_err("param not found key %s\n", MAXBURSTLENGTH); 643 return -1; 644 } 645 646 if (kstrtou32(param->value, 0, &mbl) < 0) 647 return -1; 648 } else { 649 mbl = conn->sess->sess_ops->MaxBurstLength; 650 } 651 652 mrdsl = conn_ops->MaxRecvDataSegmentLength; 653 max_npdu = mbl / mrdsl; 654 655 max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss); 656 657 max_iso_npdu = max_iso_payload / 658 (ISCSI_HDR_LEN + mrdsl + 659 cxgbit_digest_len[csk->submode]); 660 661 csk->max_iso_npdu = min(max_npdu, max_iso_npdu); 662 663 if (csk->max_iso_npdu <= 1) 664 csk->max_iso_npdu = 0; 665 666 return 0; 667 } 668 669 /* 670 * cxgbit_seq_pdu_inorder() 671 * @csk: pointer to cxgbit socket structure 672 * 673 * This function checks whether data sequence and data 674 * pdu are in order. 675 * 676 * Return: returns -1 on error, 0 if data sequence and 677 * data pdu are in order, 1 if data sequence or data pdu 678 * is not in order. 679 */ 680 static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk) 681 { 682 struct iscsi_conn *conn = csk->conn; 683 struct iscsi_param *param; 684 685 if (conn->login->leading_connection) { 686 param = iscsi_find_param_from_key(DATASEQUENCEINORDER, 687 conn->param_list); 688 if (!param) { 689 pr_err("param not found key %s\n", DATASEQUENCEINORDER); 690 return -1; 691 } 692 693 if (strcmp(param->value, YES)) 694 return 1; 695 696 param = iscsi_find_param_from_key(DATAPDUINORDER, 697 conn->param_list); 698 if (!param) { 699 pr_err("param not found key %s\n", DATAPDUINORDER); 700 return -1; 701 } 702 703 if (strcmp(param->value, YES)) 704 return 1; 705 706 } else { 707 if (!conn->sess->sess_ops->DataSequenceInOrder) 708 return 1; 709 if (!conn->sess->sess_ops->DataPDUInOrder) 710 return 1; 711 } 712 713 return 0; 714 } 715 716 static int cxgbit_set_params(struct iscsi_conn *conn) 717 { 718 struct cxgbit_sock *csk = conn->context; 719 struct cxgbit_device *cdev = csk->com.cdev; 720 struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm; 721 struct iscsi_conn_ops *conn_ops = conn->conn_ops; 722 struct iscsi_param *param; 723 u8 erl; 724 725 if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) 726 conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; 727 728 if (cxgbit_set_digest(csk)) 729 return -1; 730 731 if (conn->login->leading_connection) { 732 param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, 733 conn->param_list); 734 if (!param) { 735 pr_err("param not found key %s\n", ERRORRECOVERYLEVEL); 736 return -1; 737 } 738 if (kstrtou8(param->value, 0, &erl) < 0) 739 return -1; 740 } else { 741 erl = conn->sess->sess_ops->ErrorRecoveryLevel; 742 } 743 744 if (!erl) { 745 int ret; 746 747 ret = cxgbit_seq_pdu_inorder(csk); 748 if (ret < 0) { 749 return -1; 750 } else if (ret > 0) { 751 if (is_t5(cdev->lldi.adapter_type)) 752 goto enable_ddp; 753 else 754 return 0; 755 } 756 757 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { 758 if (cxgbit_set_iso_npdu(csk)) 759 return -1; 760 } 761 762 enable_ddp: 763 if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) { 764 if (cxgbit_setup_conn_pgidx(csk, 765 ppm->tformat.pgsz_idx_dflt)) 766 return -1; 767 set_bit(CSK_DDP_ENABLE, &csk->com.flags); 768 } 769 } 770 771 return 0; 772 } 773 774 int 775 cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, 776 u32 length) 777 { 778 struct cxgbit_sock *csk = conn->context; 779 struct sk_buff *skb; 780 u32 padding_buf = 0; 781 u8 padding = ((-length) & 3); 782 783 skb = cxgbit_alloc_skb(csk, length + padding); 784 if (!skb) 785 return -ENOMEM; 786 skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN); 787 skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length); 788 789 if (padding) 790 skb_store_bits(skb, ISCSI_HDR_LEN + length, 791 &padding_buf, padding); 792 793 if (login->login_complete) { 794 if (cxgbit_set_params(conn)) { 795 kfree_skb(skb); 796 return -1; 797 } 798 799 set_bit(CSK_LOGIN_DONE, &csk->com.flags); 800 } 801 802 if (cxgbit_queue_skb(csk, skb)) 803 return -1; 804 805 if ((!login->login_complete) && (!login->login_failed)) 806 schedule_delayed_work(&conn->login_work, 0); 807 808 return 0; 809 } 810 811 static void 812 cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, 813 unsigned int nents, u32 skip) 814 { 815 struct skb_seq_state st; 816 const u8 *buf; 817 unsigned int consumed = 0, buf_len; 818 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb); 819 820 skb_prepare_seq_read(skb, pdu_cb->doffset, 821 pdu_cb->doffset + pdu_cb->dlen, 822 &st); 823 824 while (true) { 825 buf_len = skb_seq_read(consumed, &buf, &st); 826 if (!buf_len) { 827 skb_abort_seq_read(&st); 828 break; 829 } 830 831 consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, 832 buf_len, skip + consumed); 833 } 834 } 835 836 static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk) 837 { 838 struct iscsi_conn *conn = csk->conn; 839 struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev); 840 struct cxgbit_cmd *ccmd; 841 struct iscsi_cmd *cmd; 842 843 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 844 if (!cmd) { 845 pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n"); 846 return NULL; 847 } 848 849 ccmd = iscsit_priv_cmd(cmd); 850 ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask; 851 ccmd->setup_ddp = true; 852 853 return cmd; 854 } 855 856 static int 857 cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, 858 u32 length) 859 { 860 struct iscsi_conn *conn = cmd->conn; 861 struct cxgbit_sock *csk = conn->context; 862 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); 863 864 if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { 865 pr_err("ImmediateData CRC32C DataDigest error\n"); 866 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 867 pr_err("Unable to recover from" 868 " Immediate Data digest failure while" 869 " in ERL=0.\n"); 870 iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, 871 (unsigned char *)hdr); 872 return IMMEDIATE_DATA_CANNOT_RECOVER; 873 } 874 875 iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, 876 (unsigned char *)hdr); 877 return IMMEDIATE_DATA_ERL1_CRC_FAILURE; 878 } 879 880 if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 881 struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); 882 struct skb_shared_info *ssi = skb_shinfo(csk->skb); 883 skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx]; 884 885 sg_init_table(&ccmd->sg, 1); 886 sg_set_page(&ccmd->sg, skb_frag_page(dfrag), 887 skb_frag_size(dfrag), skb_frag_off(dfrag)); 888 get_page(skb_frag_page(dfrag)); 889 890 cmd->se_cmd.t_data_sg = &ccmd->sg; 891 cmd->se_cmd.t_data_nents = 1; 892 893 ccmd->release = true; 894 } else { 895 struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; 896 u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); 897 898 cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0); 899 } 900 901 cmd->write_data_done += pdu_cb->dlen; 902 903 if (cmd->write_data_done == cmd->se_cmd.data_length) { 904 spin_lock_bh(&cmd->istate_lock); 905 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 906 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 907 spin_unlock_bh(&cmd->istate_lock); 908 } 909 910 return IMMEDIATE_DATA_NORMAL_OPERATION; 911 } 912 913 static int 914 cxgbit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, 915 bool dump_payload) 916 { 917 struct iscsi_conn *conn = cmd->conn; 918 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 919 /* 920 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. 921 */ 922 if (dump_payload) 923 goto after_immediate_data; 924 925 immed_ret = cxgbit_handle_immediate_data(cmd, hdr, 926 cmd->first_burst_len); 927 after_immediate_data: 928 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { 929 /* 930 * A PDU/CmdSN carrying Immediate Data passed 931 * DataCRC, check against ExpCmdSN/MaxCmdSN if 932 * Immediate Bit is not set. 933 */ 934 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, 935 (unsigned char *)hdr, 936 hdr->cmdsn); 937 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 938 return -1; 939 940 if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 941 target_put_sess_cmd(&cmd->se_cmd); 942 return 0; 943 } else if (cmd->unsolicited_data) { 944 iscsit_set_unsolicited_dataout(cmd); 945 } 946 947 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { 948 /* 949 * Immediate Data failed DataCRC and ERL>=1, 950 * silently drop this PDU and let the initiator 951 * plug the CmdSN gap. 952 * 953 * FIXME: Send Unsolicited NOPIN with reserved 954 * TTT here to help the initiator figure out 955 * the missing CmdSN, although they should be 956 * intelligent enough to determine the missing 957 * CmdSN and issue a retry to plug the sequence. 958 */ 959 cmd->i_state = ISTATE_REMOVE; 960 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 961 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ 962 return -1; 963 964 return 0; 965 } 966 967 static int 968 cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd) 969 { 970 struct iscsi_conn *conn = csk->conn; 971 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); 972 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr; 973 int rc; 974 bool dump_payload = false; 975 976 rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr); 977 if (rc < 0) 978 return rc; 979 980 if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) && 981 (pdu_cb->nr_dfrags == 1)) 982 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 983 984 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 985 if (rc < 0) 986 return 0; 987 else if (rc > 0) 988 dump_payload = true; 989 990 if (!pdu_cb->dlen) 991 return 0; 992 993 return cxgbit_get_immediate_data(cmd, hdr, dump_payload); 994 } 995 996 static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) 997 { 998 struct scatterlist *sg_start; 999 struct iscsi_conn *conn = csk->conn; 1000 struct iscsi_cmd *cmd = NULL; 1001 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); 1002 struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr; 1003 u32 data_offset = be32_to_cpu(hdr->offset); 1004 u32 data_len = pdu_cb->dlen; 1005 int rc, sg_nents, sg_off; 1006 bool dcrc_err = false; 1007 1008 if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) { 1009 u32 offset = be32_to_cpu(hdr->offset); 1010 u32 ddp_data_len; 1011 u32 payload_length = ntoh24(hdr->dlength); 1012 bool success = false; 1013 1014 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0); 1015 if (!cmd) 1016 return 0; 1017 1018 ddp_data_len = offset - cmd->write_data_done; 1019 atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets); 1020 1021 cmd->write_data_done = offset; 1022 cmd->next_burst_len = ddp_data_len; 1023 cmd->data_sn = be32_to_cpu(hdr->datasn); 1024 1025 rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, 1026 cmd, payload_length, &success); 1027 if (rc < 0) 1028 return rc; 1029 else if (!success) 1030 return 0; 1031 } else { 1032 rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); 1033 if (rc < 0) 1034 return rc; 1035 else if (!cmd) 1036 return 0; 1037 } 1038 1039 if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { 1040 pr_err("ITT: 0x%08x, Offset: %u, Length: %u," 1041 " DataSN: 0x%08x\n", 1042 hdr->itt, hdr->offset, data_len, 1043 hdr->datasn); 1044 1045 dcrc_err = true; 1046 goto check_payload; 1047 } 1048 1049 pr_debug("DataOut data_len: %u, " 1050 "write_data_done: %u, data_length: %u\n", 1051 data_len, cmd->write_data_done, 1052 cmd->se_cmd.data_length); 1053 1054 if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { 1055 u32 skip = data_offset % PAGE_SIZE; 1056 1057 sg_off = data_offset / PAGE_SIZE; 1058 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1059 sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE)); 1060 1061 cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip); 1062 } 1063 1064 check_payload: 1065 1066 rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err); 1067 if (rc < 0) 1068 return rc; 1069 1070 return 0; 1071 } 1072 1073 static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd) 1074 { 1075 struct iscsi_conn *conn = csk->conn; 1076 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); 1077 struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr; 1078 unsigned char *ping_data = NULL; 1079 u32 payload_length = pdu_cb->dlen; 1080 int ret; 1081 1082 ret = iscsit_setup_nop_out(conn, cmd, hdr); 1083 if (ret < 0) 1084 return 0; 1085 1086 if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { 1087 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1088 pr_err("Unable to recover from" 1089 " NOPOUT Ping DataCRC failure while in" 1090 " ERL=0.\n"); 1091 ret = -1; 1092 goto out; 1093 } else { 1094 /* 1095 * drop this PDU and let the 1096 * initiator plug the CmdSN gap. 1097 */ 1098 pr_info("Dropping NOPOUT" 1099 " Command CmdSN: 0x%08x due to" 1100 " DataCRC error.\n", hdr->cmdsn); 1101 ret = 0; 1102 goto out; 1103 } 1104 } 1105 1106 /* 1107 * Handle NOP-OUT payload for traditional iSCSI sockets 1108 */ 1109 if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 1110 ping_data = kzalloc(payload_length + 1, GFP_KERNEL); 1111 if (!ping_data) { 1112 pr_err("Unable to allocate memory for" 1113 " NOPOUT ping data.\n"); 1114 ret = -1; 1115 goto out; 1116 } 1117 1118 skb_copy_bits(csk->skb, pdu_cb->doffset, 1119 ping_data, payload_length); 1120 1121 ping_data[payload_length] = '\0'; 1122 /* 1123 * Attach ping data to struct iscsi_cmd->buf_ptr. 1124 */ 1125 cmd->buf_ptr = ping_data; 1126 cmd->buf_ptr_size = payload_length; 1127 1128 pr_debug("Got %u bytes of NOPOUT ping" 1129 " data.\n", payload_length); 1130 pr_debug("Ping Data: \"%s\"\n", ping_data); 1131 } 1132 1133 return iscsit_process_nop_out(conn, cmd, hdr); 1134 out: 1135 if (cmd) 1136 iscsit_free_cmd(cmd, false); 1137 return ret; 1138 } 1139 1140 static int 1141 cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd) 1142 { 1143 struct iscsi_conn *conn = csk->conn; 1144 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); 1145 struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr; 1146 u32 payload_length = pdu_cb->dlen; 1147 int rc; 1148 unsigned char *text_in = NULL; 1149 1150 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 1151 if (rc < 0) 1152 return rc; 1153 1154 if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { 1155 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1156 pr_err("Unable to recover from" 1157 " Text Data digest failure while in" 1158 " ERL=0.\n"); 1159 goto reject; 1160 } else { 1161 /* 1162 * drop this PDU and let the 1163 * initiator plug the CmdSN gap. 1164 */ 1165 pr_info("Dropping Text" 1166 " Command CmdSN: 0x%08x due to" 1167 " DataCRC error.\n", hdr->cmdsn); 1168 return 0; 1169 } 1170 } 1171 1172 if (payload_length) { 1173 text_in = kzalloc(payload_length, GFP_KERNEL); 1174 if (!text_in) { 1175 pr_err("Unable to allocate text_in of payload_length: %u\n", 1176 payload_length); 1177 return -ENOMEM; 1178 } 1179 skb_copy_bits(csk->skb, pdu_cb->doffset, 1180 text_in, payload_length); 1181 1182 text_in[payload_length - 1] = '\0'; 1183 1184 cmd->text_in_ptr = text_in; 1185 } 1186 1187 return iscsit_process_text_cmd(conn, cmd, hdr); 1188 1189 reject: 1190 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 1191 pdu_cb->hdr); 1192 } 1193 1194 static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk) 1195 { 1196 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); 1197 struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr; 1198 struct iscsi_conn *conn = csk->conn; 1199 struct iscsi_cmd *cmd = NULL; 1200 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); 1201 int ret = -EINVAL; 1202 1203 switch (opcode) { 1204 case ISCSI_OP_SCSI_CMD: 1205 cmd = cxgbit_allocate_cmd(csk); 1206 if (!cmd) 1207 goto reject; 1208 1209 ret = cxgbit_handle_scsi_cmd(csk, cmd); 1210 break; 1211 case ISCSI_OP_SCSI_DATA_OUT: 1212 ret = cxgbit_handle_iscsi_dataout(csk); 1213 break; 1214 case ISCSI_OP_NOOP_OUT: 1215 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 1216 cmd = cxgbit_allocate_cmd(csk); 1217 if (!cmd) 1218 goto reject; 1219 } 1220 1221 ret = cxgbit_handle_nop_out(csk, cmd); 1222 break; 1223 case ISCSI_OP_SCSI_TMFUNC: 1224 cmd = cxgbit_allocate_cmd(csk); 1225 if (!cmd) 1226 goto reject; 1227 1228 ret = iscsit_handle_task_mgt_cmd(conn, cmd, 1229 (unsigned char *)hdr); 1230 break; 1231 case ISCSI_OP_TEXT: 1232 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { 1233 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1234 if (!cmd) 1235 goto reject; 1236 } else { 1237 cmd = cxgbit_allocate_cmd(csk); 1238 if (!cmd) 1239 goto reject; 1240 } 1241 1242 ret = cxgbit_handle_text_cmd(csk, cmd); 1243 break; 1244 case ISCSI_OP_LOGOUT: 1245 cmd = cxgbit_allocate_cmd(csk); 1246 if (!cmd) 1247 goto reject; 1248 1249 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1250 if (ret > 0) 1251 wait_for_completion_timeout(&conn->conn_logout_comp, 1252 SECONDS_FOR_LOGOUT_COMP 1253 * HZ); 1254 break; 1255 case ISCSI_OP_SNACK: 1256 ret = iscsit_handle_snack(conn, (unsigned char *)hdr); 1257 break; 1258 default: 1259 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1260 dump_stack(); 1261 break; 1262 } 1263 1264 return ret; 1265 1266 reject: 1267 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1268 (unsigned char *)hdr); 1269 return ret; 1270 } 1271 1272 static int cxgbit_rx_opcode(struct cxgbit_sock *csk) 1273 { 1274 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); 1275 struct iscsi_conn *conn = csk->conn; 1276 struct iscsi_hdr *hdr = pdu_cb->hdr; 1277 u8 opcode; 1278 1279 if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) { 1280 atomic_long_inc(&conn->sess->conn_digest_errors); 1281 goto transport_err; 1282 } 1283 1284 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 1285 goto transport_err; 1286 1287 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 1288 1289 if (conn->sess->sess_ops->SessionType && 1290 ((!(opcode & ISCSI_OP_TEXT)) || 1291 (!(opcode & ISCSI_OP_LOGOUT)))) { 1292 pr_err("Received illegal iSCSI Opcode: 0x%02x" 1293 " while in Discovery Session, rejecting.\n", opcode); 1294 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1295 (unsigned char *)hdr); 1296 goto transport_err; 1297 } 1298 1299 if (cxgbit_target_rx_opcode(csk) < 0) 1300 goto transport_err; 1301 1302 return 0; 1303 1304 transport_err: 1305 return -1; 1306 } 1307 1308 static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk) 1309 { 1310 struct iscsi_conn *conn = csk->conn; 1311 struct iscsi_login *login = conn->login; 1312 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); 1313 struct iscsi_login_req *login_req; 1314 1315 login_req = (struct iscsi_login_req *)login->req; 1316 memcpy(login_req, pdu_cb->hdr, sizeof(*login_req)); 1317 1318 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," 1319 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", 1320 login_req->flags, login_req->itt, login_req->cmdsn, 1321 login_req->exp_statsn, login_req->cid, pdu_cb->dlen); 1322 /* 1323 * Setup the initial iscsi_login values from the leading 1324 * login request PDU. 1325 */ 1326 if (login->first_request) { 1327 login_req = (struct iscsi_login_req *)login->req; 1328 login->leading_connection = (!login_req->tsih) ? 1 : 0; 1329 login->current_stage = ISCSI_LOGIN_CURRENT_STAGE( 1330 login_req->flags); 1331 login->version_min = login_req->min_version; 1332 login->version_max = login_req->max_version; 1333 memcpy(login->isid, login_req->isid, 6); 1334 login->cmd_sn = be32_to_cpu(login_req->cmdsn); 1335 login->init_task_tag = login_req->itt; 1336 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); 1337 login->cid = be16_to_cpu(login_req->cid); 1338 login->tsih = be16_to_cpu(login_req->tsih); 1339 } 1340 1341 if (iscsi_target_check_login_request(conn, login) < 0) 1342 return -1; 1343 1344 memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); 1345 skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen); 1346 1347 return 0; 1348 } 1349 1350 static int 1351 cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx) 1352 { 1353 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx); 1354 int ret; 1355 1356 cxgbit_rx_pdu_cb(skb) = pdu_cb; 1357 1358 csk->skb = skb; 1359 1360 if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) { 1361 ret = cxgbit_rx_login_pdu(csk); 1362 set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); 1363 } else { 1364 ret = cxgbit_rx_opcode(csk); 1365 } 1366 1367 return ret; 1368 } 1369 1370 static void cxgbit_lro_skb_dump(struct sk_buff *skb) 1371 { 1372 struct skb_shared_info *ssi = skb_shinfo(skb); 1373 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); 1374 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); 1375 u8 i; 1376 1377 pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n", 1378 skb, skb->head, skb->data, skb->len, skb->data_len, 1379 ssi->nr_frags); 1380 pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n", 1381 skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen); 1382 1383 for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++) 1384 pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, " 1385 "frags %u.\n", 1386 skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq, 1387 pdu_cb->ddigest, pdu_cb->frags); 1388 for (i = 0; i < ssi->nr_frags; i++) 1389 pr_info("skb 0x%p, frag %d, off %u, sz %u.\n", 1390 skb, i, skb_frag_off(&ssi->frags[i]), 1391 skb_frag_size(&ssi->frags[i])); 1392 } 1393 1394 static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) 1395 { 1396 struct sk_buff *skb = csk->lro_hskb; 1397 struct skb_shared_info *ssi = skb_shinfo(skb); 1398 u8 i; 1399 1400 memset(skb->data, 0, LRO_SKB_MIN_HEADROOM); 1401 for (i = 0; i < ssi->nr_frags; i++) 1402 put_page(skb_frag_page(&ssi->frags[i])); 1403 ssi->nr_frags = 0; 1404 skb->data_len = 0; 1405 skb->truesize -= skb->len; 1406 skb->len = 0; 1407 } 1408 1409 static void 1410 cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) 1411 { 1412 struct sk_buff *hskb = csk->lro_hskb; 1413 struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0); 1414 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx); 1415 struct skb_shared_info *hssi = skb_shinfo(hskb); 1416 struct skb_shared_info *ssi = skb_shinfo(skb); 1417 unsigned int len = 0; 1418 1419 if (pdu_cb->flags & PDUCBF_RX_HDR) { 1420 u8 hfrag_idx = hssi->nr_frags; 1421 1422 hpdu_cb->flags |= pdu_cb->flags; 1423 hpdu_cb->seq = pdu_cb->seq; 1424 hpdu_cb->hdr = pdu_cb->hdr; 1425 hpdu_cb->hlen = pdu_cb->hlen; 1426 1427 memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx], 1428 sizeof(skb_frag_t)); 1429 1430 get_page(skb_frag_page(&hssi->frags[hfrag_idx])); 1431 hssi->nr_frags++; 1432 hpdu_cb->frags++; 1433 hpdu_cb->hfrag_idx = hfrag_idx; 1434 1435 len = skb_frag_size(&hssi->frags[hfrag_idx]); 1436 hskb->len += len; 1437 hskb->data_len += len; 1438 hskb->truesize += len; 1439 } 1440 1441 if (pdu_cb->flags & PDUCBF_RX_DATA) { 1442 u8 dfrag_idx = hssi->nr_frags, i; 1443 1444 hpdu_cb->flags |= pdu_cb->flags; 1445 hpdu_cb->dfrag_idx = dfrag_idx; 1446 1447 len = 0; 1448 for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) { 1449 memcpy(&hssi->frags[dfrag_idx], 1450 &ssi->frags[pdu_cb->dfrag_idx + i], 1451 sizeof(skb_frag_t)); 1452 1453 get_page(skb_frag_page(&hssi->frags[dfrag_idx])); 1454 1455 len += skb_frag_size(&hssi->frags[dfrag_idx]); 1456 1457 hssi->nr_frags++; 1458 hpdu_cb->frags++; 1459 } 1460 1461 hpdu_cb->dlen = pdu_cb->dlen; 1462 hpdu_cb->doffset = hpdu_cb->hlen; 1463 hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags; 1464 hskb->len += len; 1465 hskb->data_len += len; 1466 hskb->truesize += len; 1467 } 1468 1469 if (pdu_cb->flags & PDUCBF_RX_STATUS) { 1470 hpdu_cb->flags |= pdu_cb->flags; 1471 1472 if (hpdu_cb->flags & PDUCBF_RX_DATA) 1473 hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD; 1474 1475 hpdu_cb->ddigest = pdu_cb->ddigest; 1476 hpdu_cb->pdulen = pdu_cb->pdulen; 1477 } 1478 } 1479 1480 static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1481 { 1482 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); 1483 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); 1484 u8 pdu_idx = 0, last_idx = 0; 1485 int ret = 0; 1486 1487 if (!pdu_cb->complete) { 1488 cxgbit_lro_skb_merge(csk, skb, 0); 1489 1490 if (pdu_cb->flags & PDUCBF_RX_STATUS) { 1491 struct sk_buff *hskb = csk->lro_hskb; 1492 1493 ret = cxgbit_process_iscsi_pdu(csk, hskb, 0); 1494 1495 cxgbit_lro_hskb_reset(csk); 1496 1497 if (ret < 0) 1498 goto out; 1499 } 1500 1501 pdu_idx = 1; 1502 } 1503 1504 if (lro_cb->pdu_idx) 1505 last_idx = lro_cb->pdu_idx - 1; 1506 1507 for (; pdu_idx <= last_idx; pdu_idx++) { 1508 ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx); 1509 if (ret < 0) 1510 goto out; 1511 } 1512 1513 if ((!lro_cb->complete) && lro_cb->pdu_idx) 1514 cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx); 1515 1516 out: 1517 return ret; 1518 } 1519 1520 static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1521 { 1522 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); 1523 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); 1524 int ret = -1; 1525 1526 if ((pdu_cb->flags & PDUCBF_RX_HDR) && 1527 (pdu_cb->seq != csk->rcv_nxt)) { 1528 pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n", 1529 csk, csk->tid, pdu_cb->seq, csk->rcv_nxt); 1530 cxgbit_lro_skb_dump(skb); 1531 return ret; 1532 } 1533 1534 csk->rcv_nxt += lro_cb->pdu_totallen; 1535 1536 ret = cxgbit_process_lro_skb(csk, skb); 1537 1538 csk->rx_credits += lro_cb->pdu_totallen; 1539 1540 if (csk->rx_credits >= (csk->rcv_win / 4)) 1541 cxgbit_rx_data_ack(csk); 1542 1543 return ret; 1544 } 1545 1546 static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1547 { 1548 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; 1549 int ret = -1; 1550 1551 if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) { 1552 if (is_t5(lldi->adapter_type)) 1553 ret = cxgbit_rx_lro_skb(csk, skb); 1554 else 1555 ret = cxgbit_process_lro_skb(csk, skb); 1556 } 1557 1558 __kfree_skb(skb); 1559 return ret; 1560 } 1561 1562 static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq) 1563 { 1564 spin_lock_bh(&csk->rxq.lock); 1565 if (skb_queue_len(&csk->rxq)) { 1566 skb_queue_splice_init(&csk->rxq, rxq); 1567 spin_unlock_bh(&csk->rxq.lock); 1568 return true; 1569 } 1570 spin_unlock_bh(&csk->rxq.lock); 1571 return false; 1572 } 1573 1574 static int cxgbit_wait_rxq(struct cxgbit_sock *csk) 1575 { 1576 struct sk_buff *skb; 1577 struct sk_buff_head rxq; 1578 1579 skb_queue_head_init(&rxq); 1580 1581 wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq)); 1582 1583 if (signal_pending(current)) 1584 goto out; 1585 1586 while ((skb = __skb_dequeue(&rxq))) { 1587 if (cxgbit_rx_skb(csk, skb)) 1588 goto out; 1589 } 1590 1591 return 0; 1592 out: 1593 __skb_queue_purge(&rxq); 1594 return -1; 1595 } 1596 1597 int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) 1598 { 1599 struct cxgbit_sock *csk = conn->context; 1600 int ret = -1; 1601 1602 while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) { 1603 ret = cxgbit_wait_rxq(csk); 1604 if (ret) { 1605 clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); 1606 break; 1607 } 1608 } 1609 1610 return ret; 1611 } 1612 1613 void cxgbit_get_rx_pdu(struct iscsi_conn *conn) 1614 { 1615 struct cxgbit_sock *csk = conn->context; 1616 1617 while (!kthread_should_stop()) { 1618 iscsit_thread_check_cpumask(conn, current, 0); 1619 if (cxgbit_wait_rxq(csk)) 1620 return; 1621 } 1622 } 1623