1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/prefetch.h> 5 #include <linux/bpf_trace.h> 6 #include <net/mpls.h> 7 #include <net/xdp.h> 8 #include "i40e.h" 9 #include "i40e_trace.h" 10 #include "i40e_prototype.h" 11 #include "i40e_txrx_common.h" 12 #include "i40e_xsk.h" 13 14 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) 15 /** 16 * i40e_fdir - Generate a Flow Director descriptor based on fdata 17 * @tx_ring: Tx ring to send buffer on 18 * @fdata: Flow director filter data 19 * @add: Indicate if we are adding a rule or deleting one 20 * 21 **/ 22 static void i40e_fdir(struct i40e_ring *tx_ring, 23 struct i40e_fdir_filter *fdata, bool add) 24 { 25 struct i40e_filter_program_desc *fdir_desc; 26 struct i40e_pf *pf = tx_ring->vsi->back; 27 u32 flex_ptype, dtype_cmd; 28 u16 i; 29 30 /* grab the next descriptor */ 31 i = tx_ring->next_to_use; 32 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); 33 34 i++; 35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 36 37 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK & 38 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT); 39 40 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK & 41 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); 42 43 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & 44 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); 45 46 /* Use LAN VSI Id if not programmed by user */ 47 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK & 48 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << 49 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT); 50 51 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; 52 53 dtype_cmd |= add ? 54 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << 55 I40E_TXD_FLTR_QW1_PCMD_SHIFT : 56 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << 57 I40E_TXD_FLTR_QW1_PCMD_SHIFT; 58 59 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK & 60 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT); 61 62 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK & 63 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT); 64 65 if (fdata->cnt_index) { 66 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 67 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK & 68 ((u32)fdata->cnt_index << 69 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT); 70 } 71 72 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 73 fdir_desc->rsvd = cpu_to_le32(0); 74 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 75 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id); 76 } 77 78 #define I40E_FD_CLEAN_DELAY 10 79 /** 80 * i40e_program_fdir_filter - Program a Flow Director filter 81 * @fdir_data: Packet data that will be filter parameters 82 * @raw_packet: the pre-allocated packet buffer for FDir 83 * @pf: The PF pointer 84 * @add: True for add/update, False for remove 85 **/ 86 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, 87 u8 *raw_packet, struct i40e_pf *pf, 88 bool add) 89 { 90 struct i40e_tx_buffer *tx_buf, *first; 91 struct i40e_tx_desc *tx_desc; 92 struct i40e_ring *tx_ring; 93 struct i40e_vsi *vsi; 94 struct device *dev; 95 dma_addr_t dma; 96 u32 td_cmd = 0; 97 u16 i; 98 99 /* find existing FDIR VSI */ 100 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); 101 if (!vsi) 102 return -ENOENT; 103 104 tx_ring = vsi->tx_rings[0]; 105 dev = tx_ring->dev; 106 107 /* we need two descriptors to add/del a filter and we can wait */ 108 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { 109 if (!i) 110 return -EAGAIN; 111 msleep_interruptible(1); 112 } 113 114 dma = dma_map_single(dev, raw_packet, 115 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE); 116 if (dma_mapping_error(dev, dma)) 117 goto dma_fail; 118 119 /* grab the next descriptor */ 120 i = tx_ring->next_to_use; 121 first = &tx_ring->tx_bi[i]; 122 i40e_fdir(tx_ring, fdir_data, add); 123 124 /* Now program a dummy descriptor */ 125 i = tx_ring->next_to_use; 126 tx_desc = I40E_TX_DESC(tx_ring, i); 127 tx_buf = &tx_ring->tx_bi[i]; 128 129 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; 130 131 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer)); 132 133 /* record length, and DMA address */ 134 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); 135 dma_unmap_addr_set(tx_buf, dma, dma); 136 137 tx_desc->buffer_addr = cpu_to_le64(dma); 138 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; 139 140 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; 141 tx_buf->raw_buf = (void *)raw_packet; 142 143 tx_desc->cmd_type_offset_bsz = 144 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); 145 146 /* Force memory writes to complete before letting h/w 147 * know there are new descriptors to fetch. 148 */ 149 wmb(); 150 151 /* Mark the data descriptor to be watched */ 152 first->next_to_watch = tx_desc; 153 154 writel(tx_ring->next_to_use, tx_ring->tail); 155 return 0; 156 157 dma_fail: 158 return -1; 159 } 160 161 /** 162 * i40e_create_dummy_packet - Constructs dummy packet for HW 163 * @dummy_packet: preallocated space for dummy packet 164 * @ipv4: is layer 3 packet of version 4 or 6 165 * @l4proto: next level protocol used in data portion of l3 166 * @data: filter data 167 * 168 * Returns address of layer 4 protocol dummy packet. 169 **/ 170 static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto, 171 struct i40e_fdir_filter *data) 172 { 173 bool is_vlan = !!data->vlan_tag; 174 struct vlan_hdr vlan = {}; 175 struct ipv6hdr ipv6 = {}; 176 struct ethhdr eth = {}; 177 struct iphdr ip = {}; 178 u8 *tmp; 179 180 if (ipv4) { 181 eth.h_proto = cpu_to_be16(ETH_P_IP); 182 ip.protocol = l4proto; 183 ip.version = 0x4; 184 ip.ihl = 0x5; 185 186 ip.daddr = data->dst_ip; 187 ip.saddr = data->src_ip; 188 } else { 189 eth.h_proto = cpu_to_be16(ETH_P_IPV6); 190 ipv6.nexthdr = l4proto; 191 ipv6.version = 0x6; 192 193 memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6, 194 sizeof(__be32) * 4); 195 memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6, 196 sizeof(__be32) * 4); 197 } 198 199 if (is_vlan) { 200 vlan.h_vlan_TCI = data->vlan_tag; 201 vlan.h_vlan_encapsulated_proto = eth.h_proto; 202 eth.h_proto = data->vlan_etype; 203 } 204 205 tmp = dummy_packet; 206 memcpy(tmp, ð, sizeof(eth)); 207 tmp += sizeof(eth); 208 209 if (is_vlan) { 210 memcpy(tmp, &vlan, sizeof(vlan)); 211 tmp += sizeof(vlan); 212 } 213 214 if (ipv4) { 215 memcpy(tmp, &ip, sizeof(ip)); 216 tmp += sizeof(ip); 217 } else { 218 memcpy(tmp, &ipv6, sizeof(ipv6)); 219 tmp += sizeof(ipv6); 220 } 221 222 return tmp; 223 } 224 225 /** 226 * i40e_create_dummy_udp_packet - helper function to create UDP packet 227 * @raw_packet: preallocated space for dummy packet 228 * @ipv4: is layer 3 packet of version 4 or 6 229 * @l4proto: next level protocol used in data portion of l3 230 * @data: filter data 231 * 232 * Helper function to populate udp fields. 233 **/ 234 static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto, 235 struct i40e_fdir_filter *data) 236 { 237 struct udphdr *udp; 238 u8 *tmp; 239 240 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data); 241 udp = (struct udphdr *)(tmp); 242 udp->dest = data->dst_port; 243 udp->source = data->src_port; 244 } 245 246 /** 247 * i40e_create_dummy_tcp_packet - helper function to create TCP packet 248 * @raw_packet: preallocated space for dummy packet 249 * @ipv4: is layer 3 packet of version 4 or 6 250 * @l4proto: next level protocol used in data portion of l3 251 * @data: filter data 252 * 253 * Helper function to populate tcp fields. 254 **/ 255 static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto, 256 struct i40e_fdir_filter *data) 257 { 258 struct tcphdr *tcp; 259 u8 *tmp; 260 /* Dummy tcp packet */ 261 static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 262 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0}; 263 264 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data); 265 266 tcp = (struct tcphdr *)tmp; 267 memcpy(tcp, tcp_packet, sizeof(tcp_packet)); 268 tcp->dest = data->dst_port; 269 tcp->source = data->src_port; 270 } 271 272 /** 273 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet 274 * @raw_packet: preallocated space for dummy packet 275 * @ipv4: is layer 3 packet of version 4 or 6 276 * @l4proto: next level protocol used in data portion of l3 277 * @data: filter data 278 * 279 * Helper function to populate sctp fields. 280 **/ 281 static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4, 282 u8 l4proto, 283 struct i40e_fdir_filter *data) 284 { 285 struct sctphdr *sctp; 286 u8 *tmp; 287 288 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data); 289 290 sctp = (struct sctphdr *)tmp; 291 sctp->dest = data->dst_port; 292 sctp->source = data->src_port; 293 } 294 295 /** 296 * i40e_prepare_fdir_filter - Prepare and program fdir filter 297 * @pf: physical function to attach filter to 298 * @fd_data: filter data 299 * @add: add or delete filter 300 * @packet_addr: address of dummy packet, used in filtering 301 * @payload_offset: offset from dummy packet address to user defined data 302 * @pctype: Packet type for which filter is used 303 * 304 * Helper function to offset data of dummy packet, program it and 305 * handle errors. 306 **/ 307 static int i40e_prepare_fdir_filter(struct i40e_pf *pf, 308 struct i40e_fdir_filter *fd_data, 309 bool add, char *packet_addr, 310 int payload_offset, u8 pctype) 311 { 312 int ret; 313 314 if (fd_data->flex_filter) { 315 u8 *payload; 316 __be16 pattern = fd_data->flex_word; 317 u16 off = fd_data->flex_offset; 318 319 payload = packet_addr + payload_offset; 320 321 /* If user provided vlan, offset payload by vlan header length */ 322 if (!!fd_data->vlan_tag) 323 payload += VLAN_HLEN; 324 325 *((__force __be16 *)(payload + off)) = pattern; 326 } 327 328 fd_data->pctype = pctype; 329 ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add); 330 if (ret) { 331 dev_info(&pf->pdev->dev, 332 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 333 fd_data->pctype, fd_data->fd_id, ret); 334 /* Free the packet buffer since it wasn't added to the ring */ 335 return -EOPNOTSUPP; 336 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 337 if (add) 338 dev_info(&pf->pdev->dev, 339 "Filter OK for PCTYPE %d loc = %d\n", 340 fd_data->pctype, fd_data->fd_id); 341 else 342 dev_info(&pf->pdev->dev, 343 "Filter deleted for PCTYPE %d loc = %d\n", 344 fd_data->pctype, fd_data->fd_id); 345 } 346 347 return ret; 348 } 349 350 /** 351 * i40e_change_filter_num - Prepare and program fdir filter 352 * @ipv4: is layer 3 packet of version 4 or 6 353 * @add: add or delete filter 354 * @ipv4_filter_num: field to update 355 * @ipv6_filter_num: field to update 356 * 357 * Update filter number field for pf. 358 **/ 359 static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num, 360 u16 *ipv6_filter_num) 361 { 362 if (add) { 363 if (ipv4) 364 (*ipv4_filter_num)++; 365 else 366 (*ipv6_filter_num)++; 367 } else { 368 if (ipv4) 369 (*ipv4_filter_num)--; 370 else 371 (*ipv6_filter_num)--; 372 } 373 } 374 375 #define I40E_UDPIP_DUMMY_PACKET_LEN 42 376 #define I40E_UDPIP6_DUMMY_PACKET_LEN 62 377 /** 378 * i40e_add_del_fdir_udp - Add/Remove UDP filters 379 * @vsi: pointer to the targeted VSI 380 * @fd_data: the flow director data required for the FDir descriptor 381 * @add: true adds a filter, false removes it 382 * @ipv4: true is v4, false is v6 383 * 384 * Returns 0 if the filters were successfully added or removed 385 **/ 386 static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi, 387 struct i40e_fdir_filter *fd_data, 388 bool add, 389 bool ipv4) 390 { 391 struct i40e_pf *pf = vsi->back; 392 u8 *raw_packet; 393 int ret; 394 395 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 396 if (!raw_packet) 397 return -ENOMEM; 398 399 i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data); 400 401 if (ipv4) 402 ret = i40e_prepare_fdir_filter 403 (pf, fd_data, add, raw_packet, 404 I40E_UDPIP_DUMMY_PACKET_LEN, 405 I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 406 else 407 ret = i40e_prepare_fdir_filter 408 (pf, fd_data, add, raw_packet, 409 I40E_UDPIP6_DUMMY_PACKET_LEN, 410 I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 411 412 if (ret) { 413 kfree(raw_packet); 414 return ret; 415 } 416 417 i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt, 418 &pf->fd_udp6_filter_cnt); 419 420 return 0; 421 } 422 423 #define I40E_TCPIP_DUMMY_PACKET_LEN 54 424 #define I40E_TCPIP6_DUMMY_PACKET_LEN 74 425 /** 426 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters 427 * @vsi: pointer to the targeted VSI 428 * @fd_data: the flow director data required for the FDir descriptor 429 * @add: true adds a filter, false removes it 430 * @ipv4: true is v4, false is v6 431 * 432 * Returns 0 if the filters were successfully added or removed 433 **/ 434 static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi, 435 struct i40e_fdir_filter *fd_data, 436 bool add, 437 bool ipv4) 438 { 439 struct i40e_pf *pf = vsi->back; 440 u8 *raw_packet; 441 int ret; 442 443 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 444 if (!raw_packet) 445 return -ENOMEM; 446 447 i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data); 448 if (ipv4) 449 ret = i40e_prepare_fdir_filter 450 (pf, fd_data, add, raw_packet, 451 I40E_TCPIP_DUMMY_PACKET_LEN, 452 I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 453 else 454 ret = i40e_prepare_fdir_filter 455 (pf, fd_data, add, raw_packet, 456 I40E_TCPIP6_DUMMY_PACKET_LEN, 457 I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 458 459 if (ret) { 460 kfree(raw_packet); 461 return ret; 462 } 463 464 i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt, 465 &pf->fd_tcp6_filter_cnt); 466 467 if (add) { 468 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 469 I40E_DEBUG_FD & pf->hw.debug_mask) 470 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); 471 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); 472 } 473 return 0; 474 } 475 476 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46 477 #define I40E_SCTPIP6_DUMMY_PACKET_LEN 66 478 /** 479 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for 480 * a specific flow spec 481 * @vsi: pointer to the targeted VSI 482 * @fd_data: the flow director data required for the FDir descriptor 483 * @add: true adds a filter, false removes it 484 * @ipv4: true is v4, false is v6 485 * 486 * Returns 0 if the filters were successfully added or removed 487 **/ 488 static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi, 489 struct i40e_fdir_filter *fd_data, 490 bool add, 491 bool ipv4) 492 { 493 struct i40e_pf *pf = vsi->back; 494 u8 *raw_packet; 495 int ret; 496 497 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 498 if (!raw_packet) 499 return -ENOMEM; 500 501 i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data); 502 503 if (ipv4) 504 ret = i40e_prepare_fdir_filter 505 (pf, fd_data, add, raw_packet, 506 I40E_SCTPIP_DUMMY_PACKET_LEN, 507 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP); 508 else 509 ret = i40e_prepare_fdir_filter 510 (pf, fd_data, add, raw_packet, 511 I40E_SCTPIP6_DUMMY_PACKET_LEN, 512 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP); 513 514 if (ret) { 515 kfree(raw_packet); 516 return ret; 517 } 518 519 i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt, 520 &pf->fd_sctp6_filter_cnt); 521 522 return 0; 523 } 524 525 #define I40E_IP_DUMMY_PACKET_LEN 34 526 #define I40E_IP6_DUMMY_PACKET_LEN 54 527 /** 528 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for 529 * a specific flow spec 530 * @vsi: pointer to the targeted VSI 531 * @fd_data: the flow director data required for the FDir descriptor 532 * @add: true adds a filter, false removes it 533 * @ipv4: true is v4, false is v6 534 * 535 * Returns 0 if the filters were successfully added or removed 536 **/ 537 static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi, 538 struct i40e_fdir_filter *fd_data, 539 bool add, 540 bool ipv4) 541 { 542 struct i40e_pf *pf = vsi->back; 543 int payload_offset; 544 u8 *raw_packet; 545 int iter_start; 546 int iter_end; 547 int ret; 548 int i; 549 550 if (ipv4) { 551 iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; 552 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4; 553 } else { 554 iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; 555 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6; 556 } 557 558 for (i = iter_start; i <= iter_end; i++) { 559 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 560 if (!raw_packet) 561 return -ENOMEM; 562 563 /* IPv6 no header option differs from IPv4 */ 564 (void)i40e_create_dummy_packet 565 (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE, 566 fd_data); 567 568 payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN : 569 I40E_IP6_DUMMY_PACKET_LEN; 570 ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet, 571 payload_offset, i); 572 if (ret) 573 goto err; 574 } 575 576 i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt, 577 &pf->fd_ip6_filter_cnt); 578 579 return 0; 580 err: 581 kfree(raw_packet); 582 return ret; 583 } 584 585 /** 586 * i40e_add_del_fdir - Build raw packets to add/del fdir filter 587 * @vsi: pointer to the targeted VSI 588 * @input: filter to add or delete 589 * @add: true adds a filter, false removes it 590 * 591 **/ 592 int i40e_add_del_fdir(struct i40e_vsi *vsi, 593 struct i40e_fdir_filter *input, bool add) 594 { 595 enum ip_ver { ipv6 = 0, ipv4 = 1 }; 596 struct i40e_pf *pf = vsi->back; 597 int ret; 598 599 switch (input->flow_type & ~FLOW_EXT) { 600 case TCP_V4_FLOW: 601 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4); 602 break; 603 case UDP_V4_FLOW: 604 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4); 605 break; 606 case SCTP_V4_FLOW: 607 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4); 608 break; 609 case TCP_V6_FLOW: 610 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6); 611 break; 612 case UDP_V6_FLOW: 613 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6); 614 break; 615 case SCTP_V6_FLOW: 616 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6); 617 break; 618 case IP_USER_FLOW: 619 switch (input->ipl4_proto) { 620 case IPPROTO_TCP: 621 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4); 622 break; 623 case IPPROTO_UDP: 624 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4); 625 break; 626 case IPPROTO_SCTP: 627 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4); 628 break; 629 case IPPROTO_IP: 630 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4); 631 break; 632 default: 633 /* We cannot support masking based on protocol */ 634 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n", 635 input->ipl4_proto); 636 return -EINVAL; 637 } 638 break; 639 case IPV6_USER_FLOW: 640 switch (input->ipl4_proto) { 641 case IPPROTO_TCP: 642 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6); 643 break; 644 case IPPROTO_UDP: 645 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6); 646 break; 647 case IPPROTO_SCTP: 648 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6); 649 break; 650 case IPPROTO_IP: 651 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6); 652 break; 653 default: 654 /* We cannot support masking based on protocol */ 655 dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n", 656 input->ipl4_proto); 657 return -EINVAL; 658 } 659 break; 660 default: 661 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n", 662 input->flow_type); 663 return -EINVAL; 664 } 665 666 /* The buffer allocated here will be normally be freed by 667 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit 668 * completion. In the event of an error adding the buffer to the FDIR 669 * ring, it will immediately be freed. It may also be freed by 670 * i40e_clean_tx_ring() when closing the VSI. 671 */ 672 return ret; 673 } 674 675 /** 676 * i40e_fd_handle_status - check the Programming Status for FD 677 * @rx_ring: the Rx ring for this descriptor 678 * @qword0_raw: qword0 679 * @qword1: qword1 after le_to_cpu 680 * @prog_id: the id originally used for programming 681 * 682 * This is used to verify if the FD programming or invalidation 683 * requested by SW to the HW is successful or not and take actions accordingly. 684 **/ 685 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, 686 u64 qword1, u8 prog_id) 687 { 688 struct i40e_pf *pf = rx_ring->vsi->back; 689 struct pci_dev *pdev = pf->pdev; 690 struct i40e_16b_rx_wb_qw0 *qw0; 691 u32 fcnt_prog, fcnt_avail; 692 u32 error; 693 694 qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw; 695 error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> 696 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; 697 698 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { 699 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id); 700 if (qw0->hi_dword.fd_id != 0 || 701 (I40E_DEBUG_FD & pf->hw.debug_mask)) 702 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", 703 pf->fd_inv); 704 705 /* Check if the programming error is for ATR. 706 * If so, auto disable ATR and set a state for 707 * flush in progress. Next time we come here if flush is in 708 * progress do nothing, once flush is complete the state will 709 * be cleared. 710 */ 711 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 712 return; 713 714 pf->fd_add_err++; 715 /* store the current atr filter count */ 716 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); 717 718 if (qw0->hi_dword.fd_id == 0 && 719 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { 720 /* These set_bit() calls aren't atomic with the 721 * test_bit() here, but worse case we potentially 722 * disable ATR and queue a flush right after SB 723 * support is re-enabled. That shouldn't cause an 724 * issue in practice 725 */ 726 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); 727 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); 728 } 729 730 /* filter programming failed most likely due to table full */ 731 fcnt_prog = i40e_get_global_fd_count(pf); 732 fcnt_avail = pf->fdir_pf_filter_count; 733 /* If ATR is running fcnt_prog can quickly change, 734 * if we are very close to full, it makes sense to disable 735 * FD ATR/SB and then re-enable it when there is room. 736 */ 737 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 738 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 739 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED, 740 pf->state)) 741 if (I40E_DEBUG_FD & pf->hw.debug_mask) 742 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 743 } 744 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 745 if (I40E_DEBUG_FD & pf->hw.debug_mask) 746 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", 747 qw0->hi_dword.fd_id); 748 } 749 } 750 751 /** 752 * i40e_unmap_and_free_tx_resource - Release a Tx buffer 753 * @ring: the ring that owns the buffer 754 * @tx_buffer: the buffer to free 755 **/ 756 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, 757 struct i40e_tx_buffer *tx_buffer) 758 { 759 if (tx_buffer->skb) { 760 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) 761 kfree(tx_buffer->raw_buf); 762 else if (ring_is_xdp(ring)) 763 xdp_return_frame(tx_buffer->xdpf); 764 else 765 dev_kfree_skb_any(tx_buffer->skb); 766 if (dma_unmap_len(tx_buffer, len)) 767 dma_unmap_single(ring->dev, 768 dma_unmap_addr(tx_buffer, dma), 769 dma_unmap_len(tx_buffer, len), 770 DMA_TO_DEVICE); 771 } else if (dma_unmap_len(tx_buffer, len)) { 772 dma_unmap_page(ring->dev, 773 dma_unmap_addr(tx_buffer, dma), 774 dma_unmap_len(tx_buffer, len), 775 DMA_TO_DEVICE); 776 } 777 778 tx_buffer->next_to_watch = NULL; 779 tx_buffer->skb = NULL; 780 dma_unmap_len_set(tx_buffer, len, 0); 781 /* tx_buffer must be completely set up in the transmit path */ 782 } 783 784 /** 785 * i40e_clean_tx_ring - Free any empty Tx buffers 786 * @tx_ring: ring to be cleaned 787 **/ 788 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) 789 { 790 unsigned long bi_size; 791 u16 i; 792 793 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 794 i40e_xsk_clean_tx_ring(tx_ring); 795 } else { 796 /* ring already cleared, nothing to do */ 797 if (!tx_ring->tx_bi) 798 return; 799 800 /* Free all the Tx ring sk_buffs */ 801 for (i = 0; i < tx_ring->count; i++) 802 i40e_unmap_and_free_tx_resource(tx_ring, 803 &tx_ring->tx_bi[i]); 804 } 805 806 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 807 memset(tx_ring->tx_bi, 0, bi_size); 808 809 /* Zero out the descriptor ring */ 810 memset(tx_ring->desc, 0, tx_ring->size); 811 812 tx_ring->next_to_use = 0; 813 tx_ring->next_to_clean = 0; 814 815 if (!tx_ring->netdev) 816 return; 817 818 /* cleanup Tx queue statistics */ 819 netdev_tx_reset_queue(txring_txq(tx_ring)); 820 } 821 822 /** 823 * i40e_free_tx_resources - Free Tx resources per queue 824 * @tx_ring: Tx descriptor ring for a specific queue 825 * 826 * Free all transmit software resources 827 **/ 828 void i40e_free_tx_resources(struct i40e_ring *tx_ring) 829 { 830 i40e_clean_tx_ring(tx_ring); 831 kfree(tx_ring->tx_bi); 832 tx_ring->tx_bi = NULL; 833 834 if (tx_ring->desc) { 835 dma_free_coherent(tx_ring->dev, tx_ring->size, 836 tx_ring->desc, tx_ring->dma); 837 tx_ring->desc = NULL; 838 } 839 } 840 841 /** 842 * i40e_get_tx_pending - how many tx descriptors not processed 843 * @ring: the ring of descriptors 844 * @in_sw: use SW variables 845 * 846 * Since there is no access to the ring head register 847 * in XL710, we need to use our local copies 848 **/ 849 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) 850 { 851 u32 head, tail; 852 853 if (!in_sw) { 854 head = i40e_get_head(ring); 855 tail = readl(ring->tail); 856 } else { 857 head = ring->next_to_clean; 858 tail = ring->next_to_use; 859 } 860 861 if (head != tail) 862 return (head < tail) ? 863 tail - head : (tail + ring->count - head); 864 865 return 0; 866 } 867 868 /** 869 * i40e_detect_recover_hung - Function to detect and recover hung_queues 870 * @vsi: pointer to vsi struct with tx queues 871 * 872 * VSI has netdev and netdev has TX queues. This function is to check each of 873 * those TX queues if they are hung, trigger recovery by issuing SW interrupt. 874 **/ 875 void i40e_detect_recover_hung(struct i40e_vsi *vsi) 876 { 877 struct i40e_ring *tx_ring = NULL; 878 struct net_device *netdev; 879 unsigned int i; 880 int packets; 881 882 if (!vsi) 883 return; 884 885 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 886 return; 887 888 netdev = vsi->netdev; 889 if (!netdev) 890 return; 891 892 if (!netif_carrier_ok(netdev)) 893 return; 894 895 for (i = 0; i < vsi->num_queue_pairs; i++) { 896 tx_ring = vsi->tx_rings[i]; 897 if (tx_ring && tx_ring->desc) { 898 /* If packet counter has not changed the queue is 899 * likely stalled, so force an interrupt for this 900 * queue. 901 * 902 * prev_pkt_ctr would be negative if there was no 903 * pending work. 904 */ 905 packets = tx_ring->stats.packets & INT_MAX; 906 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { 907 i40e_force_wb(vsi, tx_ring->q_vector); 908 continue; 909 } 910 911 /* Memory barrier between read of packet count and call 912 * to i40e_get_tx_pending() 913 */ 914 smp_rmb(); 915 tx_ring->tx_stats.prev_pkt_ctr = 916 i40e_get_tx_pending(tx_ring, true) ? packets : -1; 917 } 918 } 919 } 920 921 /** 922 * i40e_clean_tx_irq - Reclaim resources after transmit completes 923 * @vsi: the VSI we care about 924 * @tx_ring: Tx ring to clean 925 * @napi_budget: Used to determine if we are in netpoll 926 * @tx_cleaned: Out parameter set to the number of TXes cleaned 927 * 928 * Returns true if there's any budget left (e.g. the clean is finished) 929 **/ 930 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, 931 struct i40e_ring *tx_ring, int napi_budget, 932 unsigned int *tx_cleaned) 933 { 934 int i = tx_ring->next_to_clean; 935 struct i40e_tx_buffer *tx_buf; 936 struct i40e_tx_desc *tx_head; 937 struct i40e_tx_desc *tx_desc; 938 unsigned int total_bytes = 0, total_packets = 0; 939 unsigned int budget = vsi->work_limit; 940 941 tx_buf = &tx_ring->tx_bi[i]; 942 tx_desc = I40E_TX_DESC(tx_ring, i); 943 i -= tx_ring->count; 944 945 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); 946 947 do { 948 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 949 950 /* if next_to_watch is not set then there is no work pending */ 951 if (!eop_desc) 952 break; 953 954 /* prevent any other reads prior to eop_desc */ 955 smp_rmb(); 956 957 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 958 /* we have caught up to head, no work left to do */ 959 if (tx_head == tx_desc) 960 break; 961 962 /* clear next_to_watch to prevent false hangs */ 963 tx_buf->next_to_watch = NULL; 964 965 /* update the statistics for this packet */ 966 total_bytes += tx_buf->bytecount; 967 total_packets += tx_buf->gso_segs; 968 969 /* free the skb/XDP data */ 970 if (ring_is_xdp(tx_ring)) 971 xdp_return_frame(tx_buf->xdpf); 972 else 973 napi_consume_skb(tx_buf->skb, napi_budget); 974 975 /* unmap skb header data */ 976 dma_unmap_single(tx_ring->dev, 977 dma_unmap_addr(tx_buf, dma), 978 dma_unmap_len(tx_buf, len), 979 DMA_TO_DEVICE); 980 981 /* clear tx_buffer data */ 982 tx_buf->skb = NULL; 983 dma_unmap_len_set(tx_buf, len, 0); 984 985 /* unmap remaining buffers */ 986 while (tx_desc != eop_desc) { 987 i40e_trace(clean_tx_irq_unmap, 988 tx_ring, tx_desc, tx_buf); 989 990 tx_buf++; 991 tx_desc++; 992 i++; 993 if (unlikely(!i)) { 994 i -= tx_ring->count; 995 tx_buf = tx_ring->tx_bi; 996 tx_desc = I40E_TX_DESC(tx_ring, 0); 997 } 998 999 /* unmap any remaining paged data */ 1000 if (dma_unmap_len(tx_buf, len)) { 1001 dma_unmap_page(tx_ring->dev, 1002 dma_unmap_addr(tx_buf, dma), 1003 dma_unmap_len(tx_buf, len), 1004 DMA_TO_DEVICE); 1005 dma_unmap_len_set(tx_buf, len, 0); 1006 } 1007 } 1008 1009 /* move us one more past the eop_desc for start of next pkt */ 1010 tx_buf++; 1011 tx_desc++; 1012 i++; 1013 if (unlikely(!i)) { 1014 i -= tx_ring->count; 1015 tx_buf = tx_ring->tx_bi; 1016 tx_desc = I40E_TX_DESC(tx_ring, 0); 1017 } 1018 1019 prefetch(tx_desc); 1020 1021 /* update budget accounting */ 1022 budget--; 1023 } while (likely(budget)); 1024 1025 i += tx_ring->count; 1026 tx_ring->next_to_clean = i; 1027 i40e_update_tx_stats(tx_ring, total_packets, total_bytes); 1028 i40e_arm_wb(tx_ring, vsi, budget); 1029 1030 if (ring_is_xdp(tx_ring)) 1031 return !!budget; 1032 1033 /* notify netdev of completed buffers */ 1034 netdev_tx_completed_queue(txring_txq(tx_ring), 1035 total_packets, total_bytes); 1036 1037 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 1038 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1039 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 1040 /* Make sure that anybody stopping the queue after this 1041 * sees the new next_to_clean. 1042 */ 1043 smp_mb(); 1044 if (__netif_subqueue_stopped(tx_ring->netdev, 1045 tx_ring->queue_index) && 1046 !test_bit(__I40E_VSI_DOWN, vsi->state)) { 1047 netif_wake_subqueue(tx_ring->netdev, 1048 tx_ring->queue_index); 1049 ++tx_ring->tx_stats.restart_queue; 1050 } 1051 } 1052 1053 *tx_cleaned = total_packets; 1054 return !!budget; 1055 } 1056 1057 /** 1058 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled 1059 * @vsi: the VSI we care about 1060 * @q_vector: the vector on which to enable writeback 1061 * 1062 **/ 1063 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, 1064 struct i40e_q_vector *q_vector) 1065 { 1066 u16 flags = q_vector->tx.ring[0].flags; 1067 u32 val; 1068 1069 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) 1070 return; 1071 1072 if (q_vector->arm_wb_state) 1073 return; 1074 1075 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 1076 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | 1077 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ 1078 1079 wr32(&vsi->back->hw, 1080 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), 1081 val); 1082 } else { 1083 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | 1084 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ 1085 1086 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); 1087 } 1088 q_vector->arm_wb_state = true; 1089 } 1090 1091 /** 1092 * i40e_force_wb - Issue SW Interrupt so HW does a wb 1093 * @vsi: the VSI we care about 1094 * @q_vector: the vector on which to force writeback 1095 * 1096 **/ 1097 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) 1098 { 1099 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 1100 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1101 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ 1102 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 1103 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; 1104 /* allow 00 to be written to the index */ 1105 1106 wr32(&vsi->back->hw, 1107 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); 1108 } else { 1109 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1110 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */ 1111 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | 1112 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK; 1113 /* allow 00 to be written to the index */ 1114 1115 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); 1116 } 1117 } 1118 1119 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, 1120 struct i40e_ring_container *rc) 1121 { 1122 return &q_vector->rx == rc; 1123 } 1124 1125 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) 1126 { 1127 unsigned int divisor; 1128 1129 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) { 1130 case I40E_LINK_SPEED_40GB: 1131 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; 1132 break; 1133 case I40E_LINK_SPEED_25GB: 1134 case I40E_LINK_SPEED_20GB: 1135 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; 1136 break; 1137 default: 1138 case I40E_LINK_SPEED_10GB: 1139 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; 1140 break; 1141 case I40E_LINK_SPEED_1GB: 1142 case I40E_LINK_SPEED_100MB: 1143 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; 1144 break; 1145 } 1146 1147 return divisor; 1148 } 1149 1150 /** 1151 * i40e_update_itr - update the dynamic ITR value based on statistics 1152 * @q_vector: structure containing interrupt and ring information 1153 * @rc: structure containing ring performance data 1154 * 1155 * Stores a new ITR value based on packets and byte 1156 * counts during the last interrupt. The advantage of per interrupt 1157 * computation is faster updates and more accurate ITR for the current 1158 * traffic pattern. Constants in this function were computed 1159 * based on theoretical maximum wire speed and thresholds were set based 1160 * on testing data as well as attempting to minimize response time 1161 * while increasing bulk throughput. 1162 **/ 1163 static void i40e_update_itr(struct i40e_q_vector *q_vector, 1164 struct i40e_ring_container *rc) 1165 { 1166 unsigned int avg_wire_size, packets, bytes, itr; 1167 unsigned long next_update = jiffies; 1168 1169 /* If we don't have any rings just leave ourselves set for maximum 1170 * possible latency so we take ourselves out of the equation. 1171 */ 1172 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) 1173 return; 1174 1175 /* For Rx we want to push the delay up and default to low latency. 1176 * for Tx we want to pull the delay down and default to high latency. 1177 */ 1178 itr = i40e_container_is_rx(q_vector, rc) ? 1179 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : 1180 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; 1181 1182 /* If we didn't update within up to 1 - 2 jiffies we can assume 1183 * that either packets are coming in so slow there hasn't been 1184 * any work, or that there is so much work that NAPI is dealing 1185 * with interrupt moderation and we don't need to do anything. 1186 */ 1187 if (time_after(next_update, rc->next_update)) 1188 goto clear_counts; 1189 1190 /* If itr_countdown is set it means we programmed an ITR within 1191 * the last 4 interrupt cycles. This has a side effect of us 1192 * potentially firing an early interrupt. In order to work around 1193 * this we need to throw out any data received for a few 1194 * interrupts following the update. 1195 */ 1196 if (q_vector->itr_countdown) { 1197 itr = rc->target_itr; 1198 goto clear_counts; 1199 } 1200 1201 packets = rc->total_packets; 1202 bytes = rc->total_bytes; 1203 1204 if (i40e_container_is_rx(q_vector, rc)) { 1205 /* If Rx there are 1 to 4 packets and bytes are less than 1206 * 9000 assume insufficient data to use bulk rate limiting 1207 * approach unless Tx is already in bulk rate limiting. We 1208 * are likely latency driven. 1209 */ 1210 if (packets && packets < 4 && bytes < 9000 && 1211 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { 1212 itr = I40E_ITR_ADAPTIVE_LATENCY; 1213 goto adjust_by_size; 1214 } 1215 } else if (packets < 4) { 1216 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1217 * bulk mode and we are receiving 4 or fewer packets just 1218 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1219 * that the Rx can relax. 1220 */ 1221 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && 1222 (q_vector->rx.target_itr & I40E_ITR_MASK) == 1223 I40E_ITR_ADAPTIVE_MAX_USECS) 1224 goto clear_counts; 1225 } else if (packets > 32) { 1226 /* If we have processed over 32 packets in a single interrupt 1227 * for Tx assume we need to switch over to "bulk" mode. 1228 */ 1229 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; 1230 } 1231 1232 /* We have no packets to actually measure against. This means 1233 * either one of the other queues on this vector is active or 1234 * we are a Tx queue doing TSO with too high of an interrupt rate. 1235 * 1236 * Between 4 and 56 we can assume that our current interrupt delay 1237 * is only slightly too low. As such we should increase it by a small 1238 * fixed amount. 1239 */ 1240 if (packets < 56) { 1241 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; 1242 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { 1243 itr &= I40E_ITR_ADAPTIVE_LATENCY; 1244 itr += I40E_ITR_ADAPTIVE_MAX_USECS; 1245 } 1246 goto clear_counts; 1247 } 1248 1249 if (packets <= 256) { 1250 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1251 itr &= I40E_ITR_MASK; 1252 1253 /* Between 56 and 112 is our "goldilocks" zone where we are 1254 * working out "just right". Just report that our current 1255 * ITR is good for us. 1256 */ 1257 if (packets <= 112) 1258 goto clear_counts; 1259 1260 /* If packet count is 128 or greater we are likely looking 1261 * at a slight overrun of the delay we want. Try halving 1262 * our delay to see if that will cut the number of packets 1263 * in half per interrupt. 1264 */ 1265 itr /= 2; 1266 itr &= I40E_ITR_MASK; 1267 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) 1268 itr = I40E_ITR_ADAPTIVE_MIN_USECS; 1269 1270 goto clear_counts; 1271 } 1272 1273 /* The paths below assume we are dealing with a bulk ITR since 1274 * number of packets is greater than 256. We are just going to have 1275 * to compute a value and try to bring the count under control, 1276 * though for smaller packet sizes there isn't much we can do as 1277 * NAPI polling will likely be kicking in sooner rather than later. 1278 */ 1279 itr = I40E_ITR_ADAPTIVE_BULK; 1280 1281 adjust_by_size: 1282 /* If packet counts are 256 or greater we can assume we have a gross 1283 * overestimation of what the rate should be. Instead of trying to fine 1284 * tune it just use the formula below to try and dial in an exact value 1285 * give the current packet size of the frame. 1286 */ 1287 avg_wire_size = bytes / packets; 1288 1289 /* The following is a crude approximation of: 1290 * wmem_default / (size + overhead) = desired_pkts_per_int 1291 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1292 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1293 * 1294 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1295 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1296 * formula down to 1297 * 1298 * (170 * (size + 24)) / (size + 640) = ITR 1299 * 1300 * We first do some math on the packet size and then finally bitshift 1301 * by 8 after rounding up. We also have to account for PCIe link speed 1302 * difference as ITR scales based on this. 1303 */ 1304 if (avg_wire_size <= 60) { 1305 /* Start at 250k ints/sec */ 1306 avg_wire_size = 4096; 1307 } else if (avg_wire_size <= 380) { 1308 /* 250K ints/sec to 60K ints/sec */ 1309 avg_wire_size *= 40; 1310 avg_wire_size += 1696; 1311 } else if (avg_wire_size <= 1084) { 1312 /* 60K ints/sec to 36K ints/sec */ 1313 avg_wire_size *= 15; 1314 avg_wire_size += 11452; 1315 } else if (avg_wire_size <= 1980) { 1316 /* 36K ints/sec to 30K ints/sec */ 1317 avg_wire_size *= 5; 1318 avg_wire_size += 22420; 1319 } else { 1320 /* plateau at a limit of 30K ints/sec */ 1321 avg_wire_size = 32256; 1322 } 1323 1324 /* If we are in low latency mode halve our delay which doubles the 1325 * rate to somewhere between 100K to 16K ints/sec 1326 */ 1327 if (itr & I40E_ITR_ADAPTIVE_LATENCY) 1328 avg_wire_size /= 2; 1329 1330 /* Resultant value is 256 times larger than it needs to be. This 1331 * gives us room to adjust the value as needed to either increase 1332 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. 1333 * 1334 * Use addition as we have already recorded the new latency flag 1335 * for the ITR value. 1336 */ 1337 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * 1338 I40E_ITR_ADAPTIVE_MIN_INC; 1339 1340 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { 1341 itr &= I40E_ITR_ADAPTIVE_LATENCY; 1342 itr += I40E_ITR_ADAPTIVE_MAX_USECS; 1343 } 1344 1345 clear_counts: 1346 /* write back value */ 1347 rc->target_itr = itr; 1348 1349 /* next update should occur within next jiffy */ 1350 rc->next_update = next_update + 1; 1351 1352 rc->total_bytes = 0; 1353 rc->total_packets = 0; 1354 } 1355 1356 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) 1357 { 1358 return &rx_ring->rx_bi[idx]; 1359 } 1360 1361 /** 1362 * i40e_reuse_rx_page - page flip buffer and store it back on the ring 1363 * @rx_ring: rx descriptor ring to store buffers on 1364 * @old_buff: donor buffer to have page reused 1365 * 1366 * Synchronizes page for reuse by the adapter 1367 **/ 1368 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, 1369 struct i40e_rx_buffer *old_buff) 1370 { 1371 struct i40e_rx_buffer *new_buff; 1372 u16 nta = rx_ring->next_to_alloc; 1373 1374 new_buff = i40e_rx_bi(rx_ring, nta); 1375 1376 /* update, and store next to alloc */ 1377 nta++; 1378 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1379 1380 /* transfer page from old buffer to new buffer */ 1381 new_buff->dma = old_buff->dma; 1382 new_buff->page = old_buff->page; 1383 new_buff->page_offset = old_buff->page_offset; 1384 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1385 1386 /* clear contents of buffer_info */ 1387 old_buff->page = NULL; 1388 } 1389 1390 /** 1391 * i40e_clean_programming_status - clean the programming status descriptor 1392 * @rx_ring: the rx ring that has this descriptor 1393 * @qword0_raw: qword0 1394 * @qword1: qword1 representing status_error_len in CPU ordering 1395 * 1396 * Flow director should handle FD_FILTER_STATUS to check its filter programming 1397 * status being successful or not and take actions accordingly. FCoE should 1398 * handle its context/filter programming/invalidation status and take actions. 1399 * 1400 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL. 1401 **/ 1402 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, 1403 u64 qword1) 1404 { 1405 u8 id; 1406 1407 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> 1408 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 1409 1410 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) 1411 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id); 1412 } 1413 1414 /** 1415 * i40e_setup_tx_descriptors - Allocate the Tx descriptors 1416 * @tx_ring: the tx ring to set up 1417 * 1418 * Return 0 on success, negative on error 1419 **/ 1420 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) 1421 { 1422 struct device *dev = tx_ring->dev; 1423 int bi_size; 1424 1425 if (!dev) 1426 return -ENOMEM; 1427 1428 /* warn if we are about to overwrite the pointer */ 1429 WARN_ON(tx_ring->tx_bi); 1430 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 1431 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); 1432 if (!tx_ring->tx_bi) 1433 goto err; 1434 1435 u64_stats_init(&tx_ring->syncp); 1436 1437 /* round up to nearest 4K */ 1438 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1439 /* add u32 for head writeback, align after this takes care of 1440 * guaranteeing this is at least one cache line in size 1441 */ 1442 tx_ring->size += sizeof(u32); 1443 tx_ring->size = ALIGN(tx_ring->size, 4096); 1444 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 1445 &tx_ring->dma, GFP_KERNEL); 1446 if (!tx_ring->desc) { 1447 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 1448 tx_ring->size); 1449 goto err; 1450 } 1451 1452 tx_ring->next_to_use = 0; 1453 tx_ring->next_to_clean = 0; 1454 tx_ring->tx_stats.prev_pkt_ctr = -1; 1455 return 0; 1456 1457 err: 1458 kfree(tx_ring->tx_bi); 1459 tx_ring->tx_bi = NULL; 1460 return -ENOMEM; 1461 } 1462 1463 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) 1464 { 1465 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); 1466 } 1467 1468 /** 1469 * i40e_clean_rx_ring - Free Rx buffers 1470 * @rx_ring: ring to be cleaned 1471 **/ 1472 void i40e_clean_rx_ring(struct i40e_ring *rx_ring) 1473 { 1474 u16 i; 1475 1476 /* ring already cleared, nothing to do */ 1477 if (!rx_ring->rx_bi) 1478 return; 1479 1480 if (rx_ring->xsk_pool) { 1481 i40e_xsk_clean_rx_ring(rx_ring); 1482 goto skip_free; 1483 } 1484 1485 /* Free all the Rx ring sk_buffs */ 1486 for (i = 0; i < rx_ring->count; i++) { 1487 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i); 1488 1489 if (!rx_bi->page) 1490 continue; 1491 1492 /* Invalidate cache lines that may have been written to by 1493 * device so that we avoid corrupting memory. 1494 */ 1495 dma_sync_single_range_for_cpu(rx_ring->dev, 1496 rx_bi->dma, 1497 rx_bi->page_offset, 1498 rx_ring->rx_buf_len, 1499 DMA_FROM_DEVICE); 1500 1501 /* free resources associated with mapping */ 1502 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, 1503 i40e_rx_pg_size(rx_ring), 1504 DMA_FROM_DEVICE, 1505 I40E_RX_DMA_ATTR); 1506 1507 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); 1508 1509 rx_bi->page = NULL; 1510 rx_bi->page_offset = 0; 1511 } 1512 1513 skip_free: 1514 if (rx_ring->xsk_pool) 1515 i40e_clear_rx_bi_zc(rx_ring); 1516 else 1517 i40e_clear_rx_bi(rx_ring); 1518 1519 /* Zero out the descriptor ring */ 1520 memset(rx_ring->desc, 0, rx_ring->size); 1521 1522 rx_ring->next_to_alloc = 0; 1523 rx_ring->next_to_clean = 0; 1524 rx_ring->next_to_process = 0; 1525 rx_ring->next_to_use = 0; 1526 } 1527 1528 /** 1529 * i40e_free_rx_resources - Free Rx resources 1530 * @rx_ring: ring to clean the resources from 1531 * 1532 * Free all receive software resources 1533 **/ 1534 void i40e_free_rx_resources(struct i40e_ring *rx_ring) 1535 { 1536 i40e_clean_rx_ring(rx_ring); 1537 if (rx_ring->vsi->type == I40E_VSI_MAIN) 1538 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 1539 rx_ring->xdp_prog = NULL; 1540 kfree(rx_ring->rx_bi); 1541 rx_ring->rx_bi = NULL; 1542 1543 if (rx_ring->desc) { 1544 dma_free_coherent(rx_ring->dev, rx_ring->size, 1545 rx_ring->desc, rx_ring->dma); 1546 rx_ring->desc = NULL; 1547 } 1548 } 1549 1550 /** 1551 * i40e_setup_rx_descriptors - Allocate Rx descriptors 1552 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 1553 * 1554 * Returns 0 on success, negative on failure 1555 **/ 1556 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) 1557 { 1558 struct device *dev = rx_ring->dev; 1559 1560 u64_stats_init(&rx_ring->syncp); 1561 1562 /* Round up to nearest 4K */ 1563 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); 1564 rx_ring->size = ALIGN(rx_ring->size, 4096); 1565 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 1566 &rx_ring->dma, GFP_KERNEL); 1567 1568 if (!rx_ring->desc) { 1569 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 1570 rx_ring->size); 1571 return -ENOMEM; 1572 } 1573 1574 rx_ring->next_to_alloc = 0; 1575 rx_ring->next_to_clean = 0; 1576 rx_ring->next_to_process = 0; 1577 rx_ring->next_to_use = 0; 1578 1579 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; 1580 1581 rx_ring->rx_bi = 1582 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); 1583 if (!rx_ring->rx_bi) 1584 return -ENOMEM; 1585 1586 return 0; 1587 } 1588 1589 /** 1590 * i40e_release_rx_desc - Store the new tail and head values 1591 * @rx_ring: ring to bump 1592 * @val: new head index 1593 **/ 1594 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 1595 { 1596 rx_ring->next_to_use = val; 1597 1598 /* update next to alloc since we have filled the ring */ 1599 rx_ring->next_to_alloc = val; 1600 1601 /* Force memory writes to complete before letting h/w 1602 * know there are new descriptors to fetch. (Only 1603 * applicable for weak-ordered memory model archs, 1604 * such as IA-64). 1605 */ 1606 wmb(); 1607 writel(val, rx_ring->tail); 1608 } 1609 1610 #if (PAGE_SIZE >= 8192) 1611 static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring, 1612 unsigned int size) 1613 { 1614 unsigned int truesize; 1615 1616 truesize = rx_ring->rx_offset ? 1617 SKB_DATA_ALIGN(size + rx_ring->rx_offset) + 1618 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 1619 SKB_DATA_ALIGN(size); 1620 return truesize; 1621 } 1622 #endif 1623 1624 /** 1625 * i40e_alloc_mapped_page - recycle or make a new page 1626 * @rx_ring: ring to use 1627 * @bi: rx_buffer struct to modify 1628 * 1629 * Returns true if the page was successfully allocated or 1630 * reused. 1631 **/ 1632 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, 1633 struct i40e_rx_buffer *bi) 1634 { 1635 struct page *page = bi->page; 1636 dma_addr_t dma; 1637 1638 /* since we are recycling buffers we should seldom need to alloc */ 1639 if (likely(page)) { 1640 rx_ring->rx_stats.page_reuse_count++; 1641 return true; 1642 } 1643 1644 /* alloc new page for storage */ 1645 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); 1646 if (unlikely(!page)) { 1647 rx_ring->rx_stats.alloc_page_failed++; 1648 return false; 1649 } 1650 1651 rx_ring->rx_stats.page_alloc_count++; 1652 1653 /* map page for use */ 1654 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1655 i40e_rx_pg_size(rx_ring), 1656 DMA_FROM_DEVICE, 1657 I40E_RX_DMA_ATTR); 1658 1659 /* if mapping failed free memory back to system since 1660 * there isn't much point in holding memory we can't use 1661 */ 1662 if (dma_mapping_error(rx_ring->dev, dma)) { 1663 __free_pages(page, i40e_rx_pg_order(rx_ring)); 1664 rx_ring->rx_stats.alloc_page_failed++; 1665 return false; 1666 } 1667 1668 bi->dma = dma; 1669 bi->page = page; 1670 bi->page_offset = rx_ring->rx_offset; 1671 page_ref_add(page, USHRT_MAX - 1); 1672 bi->pagecnt_bias = USHRT_MAX; 1673 1674 return true; 1675 } 1676 1677 /** 1678 * i40e_alloc_rx_buffers - Replace used receive buffers 1679 * @rx_ring: ring to place buffers on 1680 * @cleaned_count: number of buffers to replace 1681 * 1682 * Returns false if all allocations were successful, true if any fail 1683 **/ 1684 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) 1685 { 1686 u16 ntu = rx_ring->next_to_use; 1687 union i40e_rx_desc *rx_desc; 1688 struct i40e_rx_buffer *bi; 1689 1690 /* do nothing if no valid netdev defined */ 1691 if (!rx_ring->netdev || !cleaned_count) 1692 return false; 1693 1694 rx_desc = I40E_RX_DESC(rx_ring, ntu); 1695 bi = i40e_rx_bi(rx_ring, ntu); 1696 1697 do { 1698 if (!i40e_alloc_mapped_page(rx_ring, bi)) 1699 goto no_buffers; 1700 1701 /* sync the buffer for use by the device */ 1702 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 1703 bi->page_offset, 1704 rx_ring->rx_buf_len, 1705 DMA_FROM_DEVICE); 1706 1707 /* Refresh the desc even if buffer_addrs didn't change 1708 * because each write-back erases this info. 1709 */ 1710 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1711 1712 rx_desc++; 1713 bi++; 1714 ntu++; 1715 if (unlikely(ntu == rx_ring->count)) { 1716 rx_desc = I40E_RX_DESC(rx_ring, 0); 1717 bi = i40e_rx_bi(rx_ring, 0); 1718 ntu = 0; 1719 } 1720 1721 /* clear the status bits for the next_to_use descriptor */ 1722 rx_desc->wb.qword1.status_error_len = 0; 1723 1724 cleaned_count--; 1725 } while (cleaned_count); 1726 1727 if (rx_ring->next_to_use != ntu) 1728 i40e_release_rx_desc(rx_ring, ntu); 1729 1730 return false; 1731 1732 no_buffers: 1733 if (rx_ring->next_to_use != ntu) 1734 i40e_release_rx_desc(rx_ring, ntu); 1735 1736 /* make sure to come back via polling to try again after 1737 * allocation failure 1738 */ 1739 return true; 1740 } 1741 1742 /** 1743 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum 1744 * @vsi: the VSI we care about 1745 * @skb: skb currently being received and modified 1746 * @rx_desc: the receive descriptor 1747 **/ 1748 static inline void i40e_rx_checksum(struct i40e_vsi *vsi, 1749 struct sk_buff *skb, 1750 union i40e_rx_desc *rx_desc) 1751 { 1752 struct i40e_rx_ptype_decoded decoded; 1753 u32 rx_error, rx_status; 1754 bool ipv4, ipv6; 1755 u8 ptype; 1756 u64 qword; 1757 1758 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1759 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; 1760 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> 1761 I40E_RXD_QW1_ERROR_SHIFT; 1762 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1763 I40E_RXD_QW1_STATUS_SHIFT; 1764 decoded = decode_rx_desc_ptype(ptype); 1765 1766 skb->ip_summed = CHECKSUM_NONE; 1767 1768 skb_checksum_none_assert(skb); 1769 1770 /* Rx csum enabled and ip headers found? */ 1771 if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 1772 return; 1773 1774 /* did the hardware decode the packet and checksum? */ 1775 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1776 return; 1777 1778 /* both known and outer_ip must be set for the below code to work */ 1779 if (!(decoded.known && decoded.outer_ip)) 1780 return; 1781 1782 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && 1783 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); 1784 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && 1785 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); 1786 1787 if (ipv4 && 1788 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | 1789 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) 1790 goto checksum_fail; 1791 1792 /* likely incorrect csum if alternate IP extension headers found */ 1793 if (ipv6 && 1794 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1795 /* don't increment checksum err here, non-fatal err */ 1796 return; 1797 1798 /* there was some L4 error, count error and punt packet to the stack */ 1799 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) 1800 goto checksum_fail; 1801 1802 /* handle packets that were not able to be checksummed due 1803 * to arrival speed, in this case the stack can compute 1804 * the csum. 1805 */ 1806 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 1807 return; 1808 1809 /* If there is an outer header present that might contain a checksum 1810 * we need to bump the checksum level by 1 to reflect the fact that 1811 * we are indicating we validated the inner checksum. 1812 */ 1813 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) 1814 skb->csum_level = 1; 1815 1816 /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 1817 switch (decoded.inner_prot) { 1818 case I40E_RX_PTYPE_INNER_PROT_TCP: 1819 case I40E_RX_PTYPE_INNER_PROT_UDP: 1820 case I40E_RX_PTYPE_INNER_PROT_SCTP: 1821 skb->ip_summed = CHECKSUM_UNNECESSARY; 1822 fallthrough; 1823 default: 1824 break; 1825 } 1826 1827 return; 1828 1829 checksum_fail: 1830 vsi->back->hw_csum_rx_error++; 1831 } 1832 1833 /** 1834 * i40e_ptype_to_htype - get a hash type 1835 * @ptype: the ptype value from the descriptor 1836 * 1837 * Returns a hash type to be used by skb_set_hash 1838 **/ 1839 static inline int i40e_ptype_to_htype(u8 ptype) 1840 { 1841 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); 1842 1843 if (!decoded.known) 1844 return PKT_HASH_TYPE_NONE; 1845 1846 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1847 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) 1848 return PKT_HASH_TYPE_L4; 1849 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1850 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) 1851 return PKT_HASH_TYPE_L3; 1852 else 1853 return PKT_HASH_TYPE_L2; 1854 } 1855 1856 /** 1857 * i40e_rx_hash - set the hash value in the skb 1858 * @ring: descriptor ring 1859 * @rx_desc: specific descriptor 1860 * @skb: skb currently being received and modified 1861 * @rx_ptype: Rx packet type 1862 **/ 1863 static inline void i40e_rx_hash(struct i40e_ring *ring, 1864 union i40e_rx_desc *rx_desc, 1865 struct sk_buff *skb, 1866 u8 rx_ptype) 1867 { 1868 u32 hash; 1869 const __le64 rss_mask = 1870 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << 1871 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); 1872 1873 if (!(ring->netdev->features & NETIF_F_RXHASH)) 1874 return; 1875 1876 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { 1877 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); 1878 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); 1879 } 1880 } 1881 1882 /** 1883 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor 1884 * @rx_ring: rx descriptor ring packet is being transacted on 1885 * @rx_desc: pointer to the EOP Rx descriptor 1886 * @skb: pointer to current skb being populated 1887 * 1888 * This function checks the ring, descriptor, and packet information in 1889 * order to populate the hash, checksum, VLAN, protocol, and 1890 * other fields within the skb. 1891 **/ 1892 void i40e_process_skb_fields(struct i40e_ring *rx_ring, 1893 union i40e_rx_desc *rx_desc, struct sk_buff *skb) 1894 { 1895 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1896 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1897 I40E_RXD_QW1_STATUS_SHIFT; 1898 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK; 1899 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> 1900 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; 1901 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> 1902 I40E_RXD_QW1_PTYPE_SHIFT; 1903 1904 if (unlikely(tsynvalid)) 1905 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); 1906 1907 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); 1908 1909 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); 1910 1911 skb_record_rx_queue(skb, rx_ring->queue_index); 1912 1913 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { 1914 __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; 1915 1916 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1917 le16_to_cpu(vlan_tag)); 1918 } 1919 1920 /* modifies the skb - consumes the enet header */ 1921 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1922 } 1923 1924 /** 1925 * i40e_cleanup_headers - Correct empty headers 1926 * @rx_ring: rx descriptor ring packet is being transacted on 1927 * @skb: pointer to current skb being fixed 1928 * @rx_desc: pointer to the EOP Rx descriptor 1929 * 1930 * In addition if skb is not at least 60 bytes we need to pad it so that 1931 * it is large enough to qualify as a valid Ethernet frame. 1932 * 1933 * Returns true if an error was encountered and skb was freed. 1934 **/ 1935 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, 1936 union i40e_rx_desc *rx_desc) 1937 1938 { 1939 /* ERR_MASK will only have valid bits if EOP set, and 1940 * what we are doing here is actually checking 1941 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in 1942 * the error field 1943 */ 1944 if (unlikely(i40e_test_staterr(rx_desc, 1945 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { 1946 dev_kfree_skb_any(skb); 1947 return true; 1948 } 1949 1950 /* if eth_skb_pad returns an error the skb was freed */ 1951 if (eth_skb_pad(skb)) 1952 return true; 1953 1954 return false; 1955 } 1956 1957 /** 1958 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx 1959 * @rx_buffer: buffer containing the page 1960 * @rx_stats: rx stats structure for the rx ring 1961 * 1962 * If page is reusable, we have a green light for calling i40e_reuse_rx_page, 1963 * which will assign the current buffer to the buffer that next_to_alloc is 1964 * pointing to; otherwise, the DMA mapping needs to be destroyed and 1965 * page freed. 1966 * 1967 * rx_stats will be updated to indicate whether the page was waived 1968 * or busy if it could not be reused. 1969 */ 1970 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, 1971 struct i40e_rx_queue_stats *rx_stats) 1972 { 1973 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1974 struct page *page = rx_buffer->page; 1975 1976 /* Is any reuse possible? */ 1977 if (!dev_page_is_reusable(page)) { 1978 rx_stats->page_waive_count++; 1979 return false; 1980 } 1981 1982 #if (PAGE_SIZE < 8192) 1983 /* if we are only owner of page we can reuse it */ 1984 if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) { 1985 rx_stats->page_busy_count++; 1986 return false; 1987 } 1988 #else 1989 #define I40E_LAST_OFFSET \ 1990 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) 1991 if (rx_buffer->page_offset > I40E_LAST_OFFSET) { 1992 rx_stats->page_busy_count++; 1993 return false; 1994 } 1995 #endif 1996 1997 /* If we have drained the page fragment pool we need to update 1998 * the pagecnt_bias and page count so that we fully restock the 1999 * number of references the driver holds. 2000 */ 2001 if (unlikely(pagecnt_bias == 1)) { 2002 page_ref_add(page, USHRT_MAX - 1); 2003 rx_buffer->pagecnt_bias = USHRT_MAX; 2004 } 2005 2006 return true; 2007 } 2008 2009 /** 2010 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region 2011 * @rx_buffer: Rx buffer to adjust 2012 * @truesize: Size of adjustment 2013 **/ 2014 static void i40e_rx_buffer_flip(struct i40e_rx_buffer *rx_buffer, 2015 unsigned int truesize) 2016 { 2017 #if (PAGE_SIZE < 8192) 2018 rx_buffer->page_offset ^= truesize; 2019 #else 2020 rx_buffer->page_offset += truesize; 2021 #endif 2022 } 2023 2024 /** 2025 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use 2026 * @rx_ring: rx descriptor ring to transact packets on 2027 * @size: size of buffer to add to skb 2028 * 2029 * This function will pull an Rx buffer from the ring and synchronize it 2030 * for use by the CPU. 2031 */ 2032 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, 2033 const unsigned int size) 2034 { 2035 struct i40e_rx_buffer *rx_buffer; 2036 2037 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process); 2038 rx_buffer->page_count = 2039 #if (PAGE_SIZE < 8192) 2040 page_count(rx_buffer->page); 2041 #else 2042 0; 2043 #endif 2044 prefetch_page_address(rx_buffer->page); 2045 2046 /* we are reusing so sync this buffer for CPU use */ 2047 dma_sync_single_range_for_cpu(rx_ring->dev, 2048 rx_buffer->dma, 2049 rx_buffer->page_offset, 2050 size, 2051 DMA_FROM_DEVICE); 2052 2053 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 2054 rx_buffer->pagecnt_bias--; 2055 2056 return rx_buffer; 2057 } 2058 2059 /** 2060 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free 2061 * @rx_ring: rx descriptor ring to transact packets on 2062 * @rx_buffer: rx buffer to pull data from 2063 * 2064 * This function will clean up the contents of the rx_buffer. It will 2065 * either recycle the buffer or unmap it and free the associated resources. 2066 */ 2067 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, 2068 struct i40e_rx_buffer *rx_buffer) 2069 { 2070 if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) { 2071 /* hand second half of page back to the ring */ 2072 i40e_reuse_rx_page(rx_ring, rx_buffer); 2073 } else { 2074 /* we are not reusing the buffer so unmap it */ 2075 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 2076 i40e_rx_pg_size(rx_ring), 2077 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); 2078 __page_frag_cache_drain(rx_buffer->page, 2079 rx_buffer->pagecnt_bias); 2080 /* clear contents of buffer_info */ 2081 rx_buffer->page = NULL; 2082 } 2083 } 2084 2085 /** 2086 * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error 2087 * @rx_ring: Rx descriptor ring to transact packets on 2088 * @xdp_res: Result of the XDP program 2089 * @xdp: xdp_buff pointing to the data 2090 **/ 2091 static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res, 2092 struct xdp_buff *xdp) 2093 { 2094 u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; 2095 u32 next = rx_ring->next_to_clean, i = 0; 2096 struct i40e_rx_buffer *rx_buffer; 2097 2098 xdp->flags = 0; 2099 2100 while (1) { 2101 rx_buffer = i40e_rx_bi(rx_ring, next); 2102 if (++next == rx_ring->count) 2103 next = 0; 2104 2105 if (!rx_buffer->page) 2106 continue; 2107 2108 if (xdp_res != I40E_XDP_CONSUMED) 2109 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); 2110 else if (i++ <= nr_frags) 2111 rx_buffer->pagecnt_bias++; 2112 2113 /* EOP buffer will be put in i40e_clean_rx_irq() */ 2114 if (next == rx_ring->next_to_process) 2115 return; 2116 2117 i40e_put_rx_buffer(rx_ring, rx_buffer); 2118 } 2119 } 2120 2121 /** 2122 * i40e_construct_skb - Allocate skb and populate it 2123 * @rx_ring: rx descriptor ring to transact packets on 2124 * @xdp: xdp_buff pointing to the data 2125 * 2126 * This function allocates an skb. It then populates it with the page 2127 * data from the current receive descriptor, taking care to set up the 2128 * skb correctly. 2129 */ 2130 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, 2131 struct xdp_buff *xdp) 2132 { 2133 unsigned int size = xdp->data_end - xdp->data; 2134 struct i40e_rx_buffer *rx_buffer; 2135 struct skb_shared_info *sinfo; 2136 unsigned int headlen; 2137 struct sk_buff *skb; 2138 u32 nr_frags = 0; 2139 2140 /* prefetch first cache line of first page */ 2141 net_prefetch(xdp->data); 2142 2143 /* Note, we get here by enabling legacy-rx via: 2144 * 2145 * ethtool --set-priv-flags <dev> legacy-rx on 2146 * 2147 * In this mode, we currently get 0 extra XDP headroom as 2148 * opposed to having legacy-rx off, where we process XDP 2149 * packets going to stack via i40e_build_skb(). The latter 2150 * provides us currently with 192 bytes of headroom. 2151 * 2152 * For i40e_construct_skb() mode it means that the 2153 * xdp->data_meta will always point to xdp->data, since 2154 * the helper cannot expand the head. Should this ever 2155 * change in future for legacy-rx mode on, then lets also 2156 * add xdp->data_meta handling here. 2157 */ 2158 2159 /* allocate a skb to store the frags */ 2160 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 2161 I40E_RX_HDR_SIZE, 2162 GFP_ATOMIC | __GFP_NOWARN); 2163 if (unlikely(!skb)) 2164 return NULL; 2165 2166 /* Determine available headroom for copy */ 2167 headlen = size; 2168 if (headlen > I40E_RX_HDR_SIZE) 2169 headlen = eth_get_headlen(skb->dev, xdp->data, 2170 I40E_RX_HDR_SIZE); 2171 2172 /* align pull length to size of long to optimize memcpy performance */ 2173 memcpy(__skb_put(skb, headlen), xdp->data, 2174 ALIGN(headlen, sizeof(long))); 2175 2176 if (unlikely(xdp_buff_has_frags(xdp))) { 2177 sinfo = xdp_get_shared_info_from_buff(xdp); 2178 nr_frags = sinfo->nr_frags; 2179 } 2180 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 2181 /* update all of the pointers */ 2182 size -= headlen; 2183 if (size) { 2184 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2185 dev_kfree_skb(skb); 2186 return NULL; 2187 } 2188 skb_add_rx_frag(skb, 0, rx_buffer->page, 2189 rx_buffer->page_offset + headlen, 2190 size, xdp->frame_sz); 2191 /* buffer is used by skb, update page_offset */ 2192 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); 2193 } else { 2194 /* buffer is unused, reset bias back to rx_buffer */ 2195 rx_buffer->pagecnt_bias++; 2196 } 2197 2198 if (unlikely(xdp_buff_has_frags(xdp))) { 2199 struct skb_shared_info *skinfo = skb_shinfo(skb); 2200 2201 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], 2202 sizeof(skb_frag_t) * nr_frags); 2203 2204 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, 2205 sinfo->xdp_frags_size, 2206 nr_frags * xdp->frame_sz, 2207 xdp_buff_is_frag_pfmemalloc(xdp)); 2208 2209 /* First buffer has already been processed, so bump ntc */ 2210 if (++rx_ring->next_to_clean == rx_ring->count) 2211 rx_ring->next_to_clean = 0; 2212 2213 i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp); 2214 } 2215 2216 return skb; 2217 } 2218 2219 /** 2220 * i40e_build_skb - Build skb around an existing buffer 2221 * @rx_ring: Rx descriptor ring to transact packets on 2222 * @xdp: xdp_buff pointing to the data 2223 * 2224 * This function builds an skb around an existing Rx buffer, taking care 2225 * to set up the skb correctly and avoid any memcpy overhead. 2226 */ 2227 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, 2228 struct xdp_buff *xdp) 2229 { 2230 unsigned int metasize = xdp->data - xdp->data_meta; 2231 struct skb_shared_info *sinfo; 2232 struct sk_buff *skb; 2233 u32 nr_frags; 2234 2235 /* Prefetch first cache line of first page. If xdp->data_meta 2236 * is unused, this points exactly as xdp->data, otherwise we 2237 * likely have a consumer accessing first few bytes of meta 2238 * data, and then actual data. 2239 */ 2240 net_prefetch(xdp->data_meta); 2241 2242 if (unlikely(xdp_buff_has_frags(xdp))) { 2243 sinfo = xdp_get_shared_info_from_buff(xdp); 2244 nr_frags = sinfo->nr_frags; 2245 } 2246 2247 /* build an skb around the page buffer */ 2248 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); 2249 if (unlikely(!skb)) 2250 return NULL; 2251 2252 /* update pointers within the skb to store the data */ 2253 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2254 __skb_put(skb, xdp->data_end - xdp->data); 2255 if (metasize) 2256 skb_metadata_set(skb, metasize); 2257 2258 if (unlikely(xdp_buff_has_frags(xdp))) { 2259 xdp_update_skb_shared_info(skb, nr_frags, 2260 sinfo->xdp_frags_size, 2261 nr_frags * xdp->frame_sz, 2262 xdp_buff_is_frag_pfmemalloc(xdp)); 2263 2264 i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp); 2265 } else { 2266 struct i40e_rx_buffer *rx_buffer; 2267 2268 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 2269 /* buffer is used by skb, update page_offset */ 2270 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); 2271 } 2272 2273 return skb; 2274 } 2275 2276 /** 2277 * i40e_is_non_eop - process handling of non-EOP buffers 2278 * @rx_ring: Rx ring being processed 2279 * @rx_desc: Rx descriptor for current buffer 2280 * 2281 * If the buffer is an EOP buffer, this function exits returning false, 2282 * otherwise return true indicating that this is in fact a non-EOP buffer. 2283 */ 2284 bool i40e_is_non_eop(struct i40e_ring *rx_ring, 2285 union i40e_rx_desc *rx_desc) 2286 { 2287 /* if we are the last buffer then there is nothing else to do */ 2288 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) 2289 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) 2290 return false; 2291 2292 rx_ring->rx_stats.non_eop_descs++; 2293 2294 return true; 2295 } 2296 2297 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 2298 struct i40e_ring *xdp_ring); 2299 2300 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) 2301 { 2302 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2303 2304 if (unlikely(!xdpf)) 2305 return I40E_XDP_CONSUMED; 2306 2307 return i40e_xmit_xdp_ring(xdpf, xdp_ring); 2308 } 2309 2310 /** 2311 * i40e_run_xdp - run an XDP program 2312 * @rx_ring: Rx ring being processed 2313 * @xdp: XDP buffer containing the frame 2314 * @xdp_prog: XDP program to run 2315 **/ 2316 static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) 2317 { 2318 int err, result = I40E_XDP_PASS; 2319 struct i40e_ring *xdp_ring; 2320 u32 act; 2321 2322 if (!xdp_prog) 2323 goto xdp_out; 2324 2325 prefetchw(xdp->data_hard_start); /* xdp_frame write */ 2326 2327 act = bpf_prog_run_xdp(xdp_prog, xdp); 2328 switch (act) { 2329 case XDP_PASS: 2330 break; 2331 case XDP_TX: 2332 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2333 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); 2334 if (result == I40E_XDP_CONSUMED) 2335 goto out_failure; 2336 break; 2337 case XDP_REDIRECT: 2338 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 2339 if (err) 2340 goto out_failure; 2341 result = I40E_XDP_REDIR; 2342 break; 2343 default: 2344 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 2345 fallthrough; 2346 case XDP_ABORTED: 2347 out_failure: 2348 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 2349 fallthrough; /* handle aborts by dropping packet */ 2350 case XDP_DROP: 2351 result = I40E_XDP_CONSUMED; 2352 break; 2353 } 2354 xdp_out: 2355 return result; 2356 } 2357 2358 /** 2359 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register 2360 * @xdp_ring: XDP Tx ring 2361 * 2362 * This function updates the XDP Tx ring tail register. 2363 **/ 2364 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) 2365 { 2366 /* Force memory writes to complete before letting h/w 2367 * know there are new descriptors to fetch. 2368 */ 2369 wmb(); 2370 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); 2371 } 2372 2373 /** 2374 * i40e_update_rx_stats - Update Rx ring statistics 2375 * @rx_ring: rx descriptor ring 2376 * @total_rx_bytes: number of bytes received 2377 * @total_rx_packets: number of packets received 2378 * 2379 * This function updates the Rx ring statistics. 2380 **/ 2381 void i40e_update_rx_stats(struct i40e_ring *rx_ring, 2382 unsigned int total_rx_bytes, 2383 unsigned int total_rx_packets) 2384 { 2385 u64_stats_update_begin(&rx_ring->syncp); 2386 rx_ring->stats.packets += total_rx_packets; 2387 rx_ring->stats.bytes += total_rx_bytes; 2388 u64_stats_update_end(&rx_ring->syncp); 2389 rx_ring->q_vector->rx.total_packets += total_rx_packets; 2390 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 2391 } 2392 2393 /** 2394 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map 2395 * @rx_ring: Rx ring 2396 * @xdp_res: Result of the receive batch 2397 * 2398 * This function bumps XDP Tx tail and/or flush redirect map, and 2399 * should be called when a batch of packets has been processed in the 2400 * napi loop. 2401 **/ 2402 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) 2403 { 2404 if (xdp_res & I40E_XDP_REDIR) 2405 xdp_do_flush_map(); 2406 2407 if (xdp_res & I40E_XDP_TX) { 2408 struct i40e_ring *xdp_ring = 2409 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2410 2411 i40e_xdp_ring_update_tail(xdp_ring); 2412 } 2413 } 2414 2415 /** 2416 * i40e_inc_ntp: Advance the next_to_process index 2417 * @rx_ring: Rx ring 2418 **/ 2419 static void i40e_inc_ntp(struct i40e_ring *rx_ring) 2420 { 2421 u32 ntp = rx_ring->next_to_process + 1; 2422 2423 ntp = (ntp < rx_ring->count) ? ntp : 0; 2424 rx_ring->next_to_process = ntp; 2425 prefetch(I40E_RX_DESC(rx_ring, ntp)); 2426 } 2427 2428 /** 2429 * i40e_add_xdp_frag: Add a frag to xdp_buff 2430 * @xdp: xdp_buff pointing to the data 2431 * @nr_frags: return number of buffers for the packet 2432 * @rx_buffer: rx_buffer holding data of the current frag 2433 * @size: size of data of current frag 2434 */ 2435 static int i40e_add_xdp_frag(struct xdp_buff *xdp, u32 *nr_frags, 2436 struct i40e_rx_buffer *rx_buffer, u32 size) 2437 { 2438 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2439 2440 if (!xdp_buff_has_frags(xdp)) { 2441 sinfo->nr_frags = 0; 2442 sinfo->xdp_frags_size = 0; 2443 xdp_buff_set_frags_flag(xdp); 2444 } else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) { 2445 /* Overflowing packet: All frags need to be dropped */ 2446 return -ENOMEM; 2447 } 2448 2449 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page, 2450 rx_buffer->page_offset, size); 2451 2452 sinfo->xdp_frags_size += size; 2453 2454 if (page_is_pfmemalloc(rx_buffer->page)) 2455 xdp_buff_set_frag_pfmemalloc(xdp); 2456 *nr_frags = sinfo->nr_frags; 2457 2458 return 0; 2459 } 2460 2461 /** 2462 * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc 2463 * @rx_ring: rx descriptor ring to transact packets on 2464 * @xdp: xdp_buff pointing to the data 2465 * @rx_buffer: rx_buffer of eop desc 2466 */ 2467 static void i40e_consume_xdp_buff(struct i40e_ring *rx_ring, 2468 struct xdp_buff *xdp, 2469 struct i40e_rx_buffer *rx_buffer) 2470 { 2471 i40e_process_rx_buffs(rx_ring, I40E_XDP_CONSUMED, xdp); 2472 i40e_put_rx_buffer(rx_ring, rx_buffer); 2473 rx_ring->next_to_clean = rx_ring->next_to_process; 2474 xdp->data = NULL; 2475 } 2476 2477 /** 2478 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 2479 * @rx_ring: rx descriptor ring to transact packets on 2480 * @budget: Total limit on number of packets to process 2481 * @rx_cleaned: Out parameter of the number of packets processed 2482 * 2483 * This function provides a "bounce buffer" approach to Rx interrupt 2484 * processing. The advantage to this is that on systems that have 2485 * expensive overhead for IOMMU access this provides a means of avoiding 2486 * it by maintaining the mapping of the page to the system. 2487 * 2488 * Returns amount of work completed 2489 **/ 2490 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, 2491 unsigned int *rx_cleaned) 2492 { 2493 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 2494 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 2495 u16 clean_threshold = rx_ring->count / 2; 2496 unsigned int offset = rx_ring->rx_offset; 2497 struct xdp_buff *xdp = &rx_ring->xdp; 2498 unsigned int xdp_xmit = 0; 2499 struct bpf_prog *xdp_prog; 2500 bool failure = false; 2501 int xdp_res = 0; 2502 2503 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 2504 2505 while (likely(total_rx_packets < (unsigned int)budget)) { 2506 u16 ntp = rx_ring->next_to_process; 2507 struct i40e_rx_buffer *rx_buffer; 2508 union i40e_rx_desc *rx_desc; 2509 struct sk_buff *skb; 2510 unsigned int size; 2511 u32 nfrags = 0; 2512 bool neop; 2513 u64 qword; 2514 2515 /* return some buffers to hardware, one at a time is too slow */ 2516 if (cleaned_count >= clean_threshold) { 2517 failure = failure || 2518 i40e_alloc_rx_buffers(rx_ring, cleaned_count); 2519 cleaned_count = 0; 2520 } 2521 2522 rx_desc = I40E_RX_DESC(rx_ring, ntp); 2523 2524 /* status_error_len will always be zero for unused descriptors 2525 * because it's cleared in cleanup, and overlaps with hdr_addr 2526 * which is always zero because packet split isn't used, if the 2527 * hardware wrote DD then the length will be non-zero 2528 */ 2529 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 2530 2531 /* This memory barrier is needed to keep us from reading 2532 * any other fields out of the rx_desc until we have 2533 * verified the descriptor has been written back. 2534 */ 2535 dma_rmb(); 2536 2537 if (i40e_rx_is_programming_status(qword)) { 2538 i40e_clean_programming_status(rx_ring, 2539 rx_desc->raw.qword[0], 2540 qword); 2541 rx_buffer = i40e_rx_bi(rx_ring, ntp); 2542 i40e_inc_ntp(rx_ring); 2543 i40e_reuse_rx_page(rx_ring, rx_buffer); 2544 /* Update ntc and bump cleaned count if not in the 2545 * middle of mb packet. 2546 */ 2547 if (rx_ring->next_to_clean == ntp) { 2548 rx_ring->next_to_clean = 2549 rx_ring->next_to_process; 2550 cleaned_count++; 2551 } 2552 continue; 2553 } 2554 2555 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 2556 I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 2557 if (!size) 2558 break; 2559 2560 i40e_trace(clean_rx_irq, rx_ring, rx_desc, xdp); 2561 /* retrieve a buffer from the ring */ 2562 rx_buffer = i40e_get_rx_buffer(rx_ring, size); 2563 2564 neop = i40e_is_non_eop(rx_ring, rx_desc); 2565 i40e_inc_ntp(rx_ring); 2566 2567 if (!xdp->data) { 2568 unsigned char *hard_start; 2569 2570 hard_start = page_address(rx_buffer->page) + 2571 rx_buffer->page_offset - offset; 2572 xdp_prepare_buff(xdp, hard_start, offset, size, true); 2573 #if (PAGE_SIZE > 4096) 2574 /* At larger PAGE_SIZE, frame_sz depend on len size */ 2575 xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size); 2576 #endif 2577 } else if (i40e_add_xdp_frag(xdp, &nfrags, rx_buffer, size) && 2578 !neop) { 2579 /* Overflowing packet: Drop all frags on EOP */ 2580 i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer); 2581 break; 2582 } 2583 2584 if (neop) 2585 continue; 2586 2587 xdp_res = i40e_run_xdp(rx_ring, xdp, xdp_prog); 2588 2589 if (xdp_res) { 2590 xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); 2591 2592 if (unlikely(xdp_buff_has_frags(xdp))) { 2593 i40e_process_rx_buffs(rx_ring, xdp_res, xdp); 2594 size = xdp_get_buff_len(xdp); 2595 } else if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { 2596 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); 2597 } else { 2598 rx_buffer->pagecnt_bias++; 2599 } 2600 total_rx_bytes += size; 2601 } else { 2602 if (ring_uses_build_skb(rx_ring)) 2603 skb = i40e_build_skb(rx_ring, xdp); 2604 else 2605 skb = i40e_construct_skb(rx_ring, xdp); 2606 2607 /* drop if we failed to retrieve a buffer */ 2608 if (!skb) { 2609 rx_ring->rx_stats.alloc_buff_failed++; 2610 i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer); 2611 break; 2612 } 2613 2614 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) 2615 goto process_next; 2616 2617 /* probably a little skewed due to removing CRC */ 2618 total_rx_bytes += skb->len; 2619 2620 /* populate checksum, VLAN, and protocol */ 2621 i40e_process_skb_fields(rx_ring, rx_desc, skb); 2622 2623 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, xdp); 2624 napi_gro_receive(&rx_ring->q_vector->napi, skb); 2625 } 2626 2627 /* update budget accounting */ 2628 total_rx_packets++; 2629 process_next: 2630 cleaned_count += nfrags + 1; 2631 i40e_put_rx_buffer(rx_ring, rx_buffer); 2632 rx_ring->next_to_clean = rx_ring->next_to_process; 2633 2634 xdp->data = NULL; 2635 } 2636 2637 i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 2638 2639 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); 2640 2641 *rx_cleaned = total_rx_packets; 2642 2643 /* guarantee a trip back through this routine if there was a failure */ 2644 return failure ? budget : (int)total_rx_packets; 2645 } 2646 2647 static inline u32 i40e_buildreg_itr(const int type, u16 itr) 2648 { 2649 u32 val; 2650 2651 /* We don't bother with setting the CLEARPBA bit as the data sheet 2652 * points out doing so is "meaningless since it was already 2653 * auto-cleared". The auto-clearing happens when the interrupt is 2654 * asserted. 2655 * 2656 * Hardware errata 28 for also indicates that writing to a 2657 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear 2658 * an event in the PBA anyway so we need to rely on the automask 2659 * to hold pending events for us until the interrupt is re-enabled 2660 * 2661 * The itr value is reported in microseconds, and the register 2662 * value is recorded in 2 microsecond units. For this reason we 2663 * only need to shift by the interval shift - 1 instead of the 2664 * full value. 2665 */ 2666 itr &= I40E_ITR_MASK; 2667 2668 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2669 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | 2670 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); 2671 2672 return val; 2673 } 2674 2675 /* a small macro to shorten up some long lines */ 2676 #define INTREG I40E_PFINT_DYN_CTLN 2677 2678 /* The act of updating the ITR will cause it to immediately trigger. In order 2679 * to prevent this from throwing off adaptive update statistics we defer the 2680 * update so that it can only happen so often. So after either Tx or Rx are 2681 * updated we make the adaptive scheme wait until either the ITR completely 2682 * expires via the next_update expiration or we have been through at least 2683 * 3 interrupts. 2684 */ 2685 #define ITR_COUNTDOWN_START 3 2686 2687 /** 2688 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt 2689 * @vsi: the VSI we care about 2690 * @q_vector: q_vector for which itr is being updated and interrupt enabled 2691 * 2692 **/ 2693 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, 2694 struct i40e_q_vector *q_vector) 2695 { 2696 struct i40e_hw *hw = &vsi->back->hw; 2697 u32 intval; 2698 2699 /* If we don't have MSIX, then we only need to re-enable icr0 */ 2700 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { 2701 i40e_irq_dynamic_enable_icr0(vsi->back); 2702 return; 2703 } 2704 2705 /* These will do nothing if dynamic updates are not enabled */ 2706 i40e_update_itr(q_vector, &q_vector->tx); 2707 i40e_update_itr(q_vector, &q_vector->rx); 2708 2709 /* This block of logic allows us to get away with only updating 2710 * one ITR value with each interrupt. The idea is to perform a 2711 * pseudo-lazy update with the following criteria. 2712 * 2713 * 1. Rx is given higher priority than Tx if both are in same state 2714 * 2. If we must reduce an ITR that is given highest priority. 2715 * 3. We then give priority to increasing ITR based on amount. 2716 */ 2717 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { 2718 /* Rx ITR needs to be reduced, this is highest priority */ 2719 intval = i40e_buildreg_itr(I40E_RX_ITR, 2720 q_vector->rx.target_itr); 2721 q_vector->rx.current_itr = q_vector->rx.target_itr; 2722 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2723 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || 2724 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < 2725 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { 2726 /* Tx ITR needs to be reduced, this is second priority 2727 * Tx ITR needs to be increased more than Rx, fourth priority 2728 */ 2729 intval = i40e_buildreg_itr(I40E_TX_ITR, 2730 q_vector->tx.target_itr); 2731 q_vector->tx.current_itr = q_vector->tx.target_itr; 2732 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2733 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { 2734 /* Rx ITR needs to be increased, third priority */ 2735 intval = i40e_buildreg_itr(I40E_RX_ITR, 2736 q_vector->rx.target_itr); 2737 q_vector->rx.current_itr = q_vector->rx.target_itr; 2738 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2739 } else { 2740 /* No ITR update, lowest priority */ 2741 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); 2742 if (q_vector->itr_countdown) 2743 q_vector->itr_countdown--; 2744 } 2745 2746 if (!test_bit(__I40E_VSI_DOWN, vsi->state)) 2747 wr32(hw, INTREG(q_vector->reg_idx), intval); 2748 } 2749 2750 /** 2751 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine 2752 * @napi: napi struct with our devices info in it 2753 * @budget: amount of work driver is allowed to do this pass, in packets 2754 * 2755 * This function will clean all queues associated with a q_vector. 2756 * 2757 * Returns the amount of work done 2758 **/ 2759 int i40e_napi_poll(struct napi_struct *napi, int budget) 2760 { 2761 struct i40e_q_vector *q_vector = 2762 container_of(napi, struct i40e_q_vector, napi); 2763 struct i40e_vsi *vsi = q_vector->vsi; 2764 struct i40e_ring *ring; 2765 bool tx_clean_complete = true; 2766 bool rx_clean_complete = true; 2767 unsigned int tx_cleaned = 0; 2768 unsigned int rx_cleaned = 0; 2769 bool clean_complete = true; 2770 bool arm_wb = false; 2771 int budget_per_ring; 2772 int work_done = 0; 2773 2774 if (test_bit(__I40E_VSI_DOWN, vsi->state)) { 2775 napi_complete(napi); 2776 return 0; 2777 } 2778 2779 /* Since the actual Tx work is minimal, we can give the Tx a larger 2780 * budget and be more aggressive about cleaning up the Tx descriptors. 2781 */ 2782 i40e_for_each_ring(ring, q_vector->tx) { 2783 bool wd = ring->xsk_pool ? 2784 i40e_clean_xdp_tx_irq(vsi, ring) : 2785 i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned); 2786 2787 if (!wd) { 2788 clean_complete = tx_clean_complete = false; 2789 continue; 2790 } 2791 arm_wb |= ring->arm_wb; 2792 ring->arm_wb = false; 2793 } 2794 2795 /* Handle case where we are called by netpoll with a budget of 0 */ 2796 if (budget <= 0) 2797 goto tx_only; 2798 2799 /* normally we have 1 Rx ring per q_vector */ 2800 if (unlikely(q_vector->num_ringpairs > 1)) 2801 /* We attempt to distribute budget to each Rx queue fairly, but 2802 * don't allow the budget to go below 1 because that would exit 2803 * polling early. 2804 */ 2805 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1); 2806 else 2807 /* Max of 1 Rx ring in this q_vector so give it the budget */ 2808 budget_per_ring = budget; 2809 2810 i40e_for_each_ring(ring, q_vector->rx) { 2811 int cleaned = ring->xsk_pool ? 2812 i40e_clean_rx_irq_zc(ring, budget_per_ring) : 2813 i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned); 2814 2815 work_done += cleaned; 2816 /* if we clean as many as budgeted, we must not be done */ 2817 if (cleaned >= budget_per_ring) 2818 clean_complete = rx_clean_complete = false; 2819 } 2820 2821 if (!i40e_enabled_xdp_vsi(vsi)) 2822 trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned, 2823 tx_cleaned, rx_clean_complete, tx_clean_complete); 2824 2825 /* If work not completed, return budget and polling will return */ 2826 if (!clean_complete) { 2827 int cpu_id = smp_processor_id(); 2828 2829 /* It is possible that the interrupt affinity has changed but, 2830 * if the cpu is pegged at 100%, polling will never exit while 2831 * traffic continues and the interrupt will be stuck on this 2832 * cpu. We check to make sure affinity is correct before we 2833 * continue to poll, otherwise we must stop polling so the 2834 * interrupt can move to the correct cpu. 2835 */ 2836 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { 2837 /* Tell napi that we are done polling */ 2838 napi_complete_done(napi, work_done); 2839 2840 /* Force an interrupt */ 2841 i40e_force_wb(vsi, q_vector); 2842 2843 /* Return budget-1 so that polling stops */ 2844 return budget - 1; 2845 } 2846 tx_only: 2847 if (arm_wb) { 2848 q_vector->tx.ring[0].tx_stats.tx_force_wb++; 2849 i40e_enable_wb_on_itr(vsi, q_vector); 2850 } 2851 return budget; 2852 } 2853 2854 if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR) 2855 q_vector->arm_wb_state = false; 2856 2857 /* Exit the polling mode, but don't re-enable interrupts if stack might 2858 * poll us due to busy-polling 2859 */ 2860 if (likely(napi_complete_done(napi, work_done))) 2861 i40e_update_enable_itr(vsi, q_vector); 2862 2863 return min(work_done, budget - 1); 2864 } 2865 2866 /** 2867 * i40e_atr - Add a Flow Director ATR filter 2868 * @tx_ring: ring to add programming descriptor to 2869 * @skb: send buffer 2870 * @tx_flags: send tx flags 2871 **/ 2872 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, 2873 u32 tx_flags) 2874 { 2875 struct i40e_filter_program_desc *fdir_desc; 2876 struct i40e_pf *pf = tx_ring->vsi->back; 2877 union { 2878 unsigned char *network; 2879 struct iphdr *ipv4; 2880 struct ipv6hdr *ipv6; 2881 } hdr; 2882 struct tcphdr *th; 2883 unsigned int hlen; 2884 u32 flex_ptype, dtype_cmd; 2885 int l4_proto; 2886 u16 i; 2887 2888 /* make sure ATR is enabled */ 2889 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) 2890 return; 2891 2892 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) 2893 return; 2894 2895 /* if sampling is disabled do nothing */ 2896 if (!tx_ring->atr_sample_rate) 2897 return; 2898 2899 /* Currently only IPv4/IPv6 with TCP is supported */ 2900 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) 2901 return; 2902 2903 /* snag network header to get L4 type and address */ 2904 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ? 2905 skb_inner_network_header(skb) : skb_network_header(skb); 2906 2907 /* Note: tx_flags gets modified to reflect inner protocols in 2908 * tx_enable_csum function if encap is enabled. 2909 */ 2910 if (tx_flags & I40E_TX_FLAGS_IPV4) { 2911 /* access ihl as u8 to avoid unaligned access on ia64 */ 2912 hlen = (hdr.network[0] & 0x0F) << 2; 2913 l4_proto = hdr.ipv4->protocol; 2914 } else { 2915 /* find the start of the innermost ipv6 header */ 2916 unsigned int inner_hlen = hdr.network - skb->data; 2917 unsigned int h_offset = inner_hlen; 2918 2919 /* this function updates h_offset to the end of the header */ 2920 l4_proto = 2921 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); 2922 /* hlen will contain our best estimate of the tcp header */ 2923 hlen = h_offset - inner_hlen; 2924 } 2925 2926 if (l4_proto != IPPROTO_TCP) 2927 return; 2928 2929 th = (struct tcphdr *)(hdr.network + hlen); 2930 2931 /* Due to lack of space, no more new filters can be programmed */ 2932 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) 2933 return; 2934 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { 2935 /* HW ATR eviction will take care of removing filters on FIN 2936 * and RST packets. 2937 */ 2938 if (th->fin || th->rst) 2939 return; 2940 } 2941 2942 tx_ring->atr_count++; 2943 2944 /* sample on all syn/fin/rst packets or once every atr sample rate */ 2945 if (!th->fin && 2946 !th->syn && 2947 !th->rst && 2948 (tx_ring->atr_count < tx_ring->atr_sample_rate)) 2949 return; 2950 2951 tx_ring->atr_count = 0; 2952 2953 /* grab the next descriptor */ 2954 i = tx_ring->next_to_use; 2955 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); 2956 2957 i++; 2958 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2959 2960 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & 2961 I40E_TXD_FLTR_QW0_QINDEX_MASK; 2962 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? 2963 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << 2964 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : 2965 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << 2966 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); 2967 2968 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; 2969 2970 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; 2971 2972 dtype_cmd |= (th->fin || th->rst) ? 2973 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << 2974 I40E_TXD_FLTR_QW1_PCMD_SHIFT) : 2975 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << 2976 I40E_TXD_FLTR_QW1_PCMD_SHIFT); 2977 2978 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << 2979 I40E_TXD_FLTR_QW1_DEST_SHIFT; 2980 2981 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << 2982 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; 2983 2984 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 2985 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) 2986 dtype_cmd |= 2987 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << 2988 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2989 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2990 else 2991 dtype_cmd |= 2992 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << 2993 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2994 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2995 2996 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) 2997 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; 2998 2999 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 3000 fdir_desc->rsvd = cpu_to_le32(0); 3001 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 3002 fdir_desc->fd_id = cpu_to_le32(0); 3003 } 3004 3005 /** 3006 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 3007 * @skb: send buffer 3008 * @tx_ring: ring to send buffer on 3009 * @flags: the tx flags to be set 3010 * 3011 * Checks the skb and set up correspondingly several generic transmit flags 3012 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 3013 * 3014 * Returns error code indicate the frame should be dropped upon error and the 3015 * otherwise returns 0 to indicate the flags has been set properly. 3016 **/ 3017 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, 3018 struct i40e_ring *tx_ring, 3019 u32 *flags) 3020 { 3021 __be16 protocol = skb->protocol; 3022 u32 tx_flags = 0; 3023 3024 if (protocol == htons(ETH_P_8021Q) && 3025 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 3026 /* When HW VLAN acceleration is turned off by the user the 3027 * stack sets the protocol to 8021q so that the driver 3028 * can take any steps required to support the SW only 3029 * VLAN handling. In our case the driver doesn't need 3030 * to take any further steps so just set the protocol 3031 * to the encapsulated ethertype. 3032 */ 3033 skb->protocol = vlan_get_protocol(skb); 3034 goto out; 3035 } 3036 3037 /* if we have a HW VLAN tag being added, default to the HW one */ 3038 if (skb_vlan_tag_present(skb)) { 3039 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 3040 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 3041 /* else if it is a SW VLAN, check the next protocol and store the tag */ 3042 } else if (protocol == htons(ETH_P_8021Q)) { 3043 struct vlan_hdr *vhdr, _vhdr; 3044 3045 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 3046 if (!vhdr) 3047 return -EINVAL; 3048 3049 protocol = vhdr->h_vlan_encapsulated_proto; 3050 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; 3051 tx_flags |= I40E_TX_FLAGS_SW_VLAN; 3052 } 3053 3054 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 3055 goto out; 3056 3057 /* Insert 802.1p priority into VLAN header */ 3058 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || 3059 (skb->priority != TC_PRIO_CONTROL)) { 3060 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK; 3061 tx_flags |= (skb->priority & 0x7) << 3062 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 3063 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { 3064 struct vlan_ethhdr *vhdr; 3065 int rc; 3066 3067 rc = skb_cow_head(skb, 0); 3068 if (rc < 0) 3069 return rc; 3070 vhdr = skb_vlan_eth_hdr(skb); 3071 vhdr->h_vlan_TCI = htons(tx_flags >> 3072 I40E_TX_FLAGS_VLAN_SHIFT); 3073 } else { 3074 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 3075 } 3076 } 3077 3078 out: 3079 *flags = tx_flags; 3080 return 0; 3081 } 3082 3083 /** 3084 * i40e_tso - set up the tso context descriptor 3085 * @first: pointer to first Tx buffer for xmit 3086 * @hdr_len: ptr to the size of the packet header 3087 * @cd_type_cmd_tso_mss: Quad Word 1 3088 * 3089 * Returns 0 if no TSO can happen, 1 if tso is going, or error 3090 **/ 3091 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, 3092 u64 *cd_type_cmd_tso_mss) 3093 { 3094 struct sk_buff *skb = first->skb; 3095 u64 cd_cmd, cd_tso_len, cd_mss; 3096 __be16 protocol; 3097 union { 3098 struct iphdr *v4; 3099 struct ipv6hdr *v6; 3100 unsigned char *hdr; 3101 } ip; 3102 union { 3103 struct tcphdr *tcp; 3104 struct udphdr *udp; 3105 unsigned char *hdr; 3106 } l4; 3107 u32 paylen, l4_offset; 3108 u16 gso_size; 3109 int err; 3110 3111 if (skb->ip_summed != CHECKSUM_PARTIAL) 3112 return 0; 3113 3114 if (!skb_is_gso(skb)) 3115 return 0; 3116 3117 err = skb_cow_head(skb, 0); 3118 if (err < 0) 3119 return err; 3120 3121 protocol = vlan_get_protocol(skb); 3122 3123 if (eth_p_mpls(protocol)) 3124 ip.hdr = skb_inner_network_header(skb); 3125 else 3126 ip.hdr = skb_network_header(skb); 3127 l4.hdr = skb_checksum_start(skb); 3128 3129 /* initialize outer IP header fields */ 3130 if (ip.v4->version == 4) { 3131 ip.v4->tot_len = 0; 3132 ip.v4->check = 0; 3133 3134 first->tx_flags |= I40E_TX_FLAGS_TSO; 3135 } else { 3136 ip.v6->payload_len = 0; 3137 first->tx_flags |= I40E_TX_FLAGS_TSO; 3138 } 3139 3140 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 3141 SKB_GSO_GRE_CSUM | 3142 SKB_GSO_IPXIP4 | 3143 SKB_GSO_IPXIP6 | 3144 SKB_GSO_UDP_TUNNEL | 3145 SKB_GSO_UDP_TUNNEL_CSUM)) { 3146 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 3147 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 3148 l4.udp->len = 0; 3149 3150 /* determine offset of outer transport header */ 3151 l4_offset = l4.hdr - skb->data; 3152 3153 /* remove payload length from outer checksum */ 3154 paylen = skb->len - l4_offset; 3155 csum_replace_by_diff(&l4.udp->check, 3156 (__force __wsum)htonl(paylen)); 3157 } 3158 3159 /* reset pointers to inner headers */ 3160 ip.hdr = skb_inner_network_header(skb); 3161 l4.hdr = skb_inner_transport_header(skb); 3162 3163 /* initialize inner IP header fields */ 3164 if (ip.v4->version == 4) { 3165 ip.v4->tot_len = 0; 3166 ip.v4->check = 0; 3167 } else { 3168 ip.v6->payload_len = 0; 3169 } 3170 } 3171 3172 /* determine offset of inner transport header */ 3173 l4_offset = l4.hdr - skb->data; 3174 3175 /* remove payload length from inner checksum */ 3176 paylen = skb->len - l4_offset; 3177 3178 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 3179 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); 3180 /* compute length of segmentation header */ 3181 *hdr_len = sizeof(*l4.udp) + l4_offset; 3182 } else { 3183 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 3184 /* compute length of segmentation header */ 3185 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 3186 } 3187 3188 /* pull values out of skb_shinfo */ 3189 gso_size = skb_shinfo(skb)->gso_size; 3190 3191 /* update GSO size and bytecount with header size */ 3192 first->gso_segs = skb_shinfo(skb)->gso_segs; 3193 first->bytecount += (first->gso_segs - 1) * *hdr_len; 3194 3195 /* find the field values */ 3196 cd_cmd = I40E_TX_CTX_DESC_TSO; 3197 cd_tso_len = skb->len - *hdr_len; 3198 cd_mss = gso_size; 3199 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | 3200 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | 3201 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); 3202 return 1; 3203 } 3204 3205 /** 3206 * i40e_tsyn - set up the tsyn context descriptor 3207 * @tx_ring: ptr to the ring to send 3208 * @skb: ptr to the skb we're sending 3209 * @tx_flags: the collected send information 3210 * @cd_type_cmd_tso_mss: Quad Word 1 3211 * 3212 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen 3213 **/ 3214 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, 3215 u32 tx_flags, u64 *cd_type_cmd_tso_mss) 3216 { 3217 struct i40e_pf *pf; 3218 3219 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 3220 return 0; 3221 3222 /* Tx timestamps cannot be sampled when doing TSO */ 3223 if (tx_flags & I40E_TX_FLAGS_TSO) 3224 return 0; 3225 3226 /* only timestamp the outbound packet if the user has requested it and 3227 * we are not already transmitting a packet to be timestamped 3228 */ 3229 pf = i40e_netdev_to_pf(tx_ring->netdev); 3230 if (!(pf->flags & I40E_FLAG_PTP)) 3231 return 0; 3232 3233 if (pf->ptp_tx && 3234 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { 3235 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3236 pf->ptp_tx_start = jiffies; 3237 pf->ptp_tx_skb = skb_get(skb); 3238 } else { 3239 pf->tx_hwtstamp_skipped++; 3240 return 0; 3241 } 3242 3243 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << 3244 I40E_TXD_CTX_QW1_CMD_SHIFT; 3245 3246 return 1; 3247 } 3248 3249 /** 3250 * i40e_tx_enable_csum - Enable Tx checksum offloads 3251 * @skb: send buffer 3252 * @tx_flags: pointer to Tx flags currently set 3253 * @td_cmd: Tx descriptor command bits to set 3254 * @td_offset: Tx descriptor header offsets to set 3255 * @tx_ring: Tx descriptor ring 3256 * @cd_tunneling: ptr to context desc bits 3257 **/ 3258 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, 3259 u32 *td_cmd, u32 *td_offset, 3260 struct i40e_ring *tx_ring, 3261 u32 *cd_tunneling) 3262 { 3263 union { 3264 struct iphdr *v4; 3265 struct ipv6hdr *v6; 3266 unsigned char *hdr; 3267 } ip; 3268 union { 3269 struct tcphdr *tcp; 3270 struct udphdr *udp; 3271 unsigned char *hdr; 3272 } l4; 3273 unsigned char *exthdr; 3274 u32 offset, cmd = 0; 3275 __be16 frag_off; 3276 __be16 protocol; 3277 u8 l4_proto = 0; 3278 3279 if (skb->ip_summed != CHECKSUM_PARTIAL) 3280 return 0; 3281 3282 protocol = vlan_get_protocol(skb); 3283 3284 if (eth_p_mpls(protocol)) { 3285 ip.hdr = skb_inner_network_header(skb); 3286 l4.hdr = skb_checksum_start(skb); 3287 } else { 3288 ip.hdr = skb_network_header(skb); 3289 l4.hdr = skb_transport_header(skb); 3290 } 3291 3292 /* set the tx_flags to indicate the IP protocol type. this is 3293 * required so that checksum header computation below is accurate. 3294 */ 3295 if (ip.v4->version == 4) 3296 *tx_flags |= I40E_TX_FLAGS_IPV4; 3297 else 3298 *tx_flags |= I40E_TX_FLAGS_IPV6; 3299 3300 /* compute outer L2 header size */ 3301 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; 3302 3303 if (skb->encapsulation) { 3304 u32 tunnel = 0; 3305 /* define outer network header type */ 3306 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 3307 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 3308 I40E_TX_CTX_EXT_IP_IPV4 : 3309 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 3310 3311 l4_proto = ip.v4->protocol; 3312 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 3313 int ret; 3314 3315 tunnel |= I40E_TX_CTX_EXT_IP_IPV6; 3316 3317 exthdr = ip.hdr + sizeof(*ip.v6); 3318 l4_proto = ip.v6->nexthdr; 3319 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 3320 &l4_proto, &frag_off); 3321 if (ret < 0) 3322 return -1; 3323 } 3324 3325 /* define outer transport */ 3326 switch (l4_proto) { 3327 case IPPROTO_UDP: 3328 tunnel |= I40E_TXD_CTX_UDP_TUNNELING; 3329 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3330 break; 3331 case IPPROTO_GRE: 3332 tunnel |= I40E_TXD_CTX_GRE_TUNNELING; 3333 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3334 break; 3335 case IPPROTO_IPIP: 3336 case IPPROTO_IPV6: 3337 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3338 l4.hdr = skb_inner_network_header(skb); 3339 break; 3340 default: 3341 if (*tx_flags & I40E_TX_FLAGS_TSO) 3342 return -1; 3343 3344 skb_checksum_help(skb); 3345 return 0; 3346 } 3347 3348 /* compute outer L3 header size */ 3349 tunnel |= ((l4.hdr - ip.hdr) / 4) << 3350 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; 3351 3352 /* switch IP header pointer from outer to inner header */ 3353 ip.hdr = skb_inner_network_header(skb); 3354 3355 /* compute tunnel header size */ 3356 tunnel |= ((ip.hdr - l4.hdr) / 2) << 3357 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 3358 3359 /* indicate if we need to offload outer UDP header */ 3360 if ((*tx_flags & I40E_TX_FLAGS_TSO) && 3361 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 3362 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 3363 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; 3364 3365 /* record tunnel offload values */ 3366 *cd_tunneling |= tunnel; 3367 3368 /* switch L4 header pointer from outer to inner */ 3369 l4.hdr = skb_inner_transport_header(skb); 3370 l4_proto = 0; 3371 3372 /* reset type as we transition from outer to inner headers */ 3373 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); 3374 if (ip.v4->version == 4) 3375 *tx_flags |= I40E_TX_FLAGS_IPV4; 3376 if (ip.v6->version == 6) 3377 *tx_flags |= I40E_TX_FLAGS_IPV6; 3378 } 3379 3380 /* Enable IP checksum offloads */ 3381 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 3382 l4_proto = ip.v4->protocol; 3383 /* the stack computes the IP header already, the only time we 3384 * need the hardware to recompute it is in the case of TSO. 3385 */ 3386 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 3387 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : 3388 I40E_TX_DESC_CMD_IIPT_IPV4; 3389 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 3390 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; 3391 3392 exthdr = ip.hdr + sizeof(*ip.v6); 3393 l4_proto = ip.v6->nexthdr; 3394 if (l4.hdr != exthdr) 3395 ipv6_skip_exthdr(skb, exthdr - skb->data, 3396 &l4_proto, &frag_off); 3397 } 3398 3399 /* compute inner L3 header size */ 3400 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; 3401 3402 /* Enable L4 checksum offloads */ 3403 switch (l4_proto) { 3404 case IPPROTO_TCP: 3405 /* enable checksum offloads */ 3406 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; 3407 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3408 break; 3409 case IPPROTO_SCTP: 3410 /* enable SCTP checksum offload */ 3411 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; 3412 offset |= (sizeof(struct sctphdr) >> 2) << 3413 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3414 break; 3415 case IPPROTO_UDP: 3416 /* enable UDP checksum offload */ 3417 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; 3418 offset |= (sizeof(struct udphdr) >> 2) << 3419 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3420 break; 3421 default: 3422 if (*tx_flags & I40E_TX_FLAGS_TSO) 3423 return -1; 3424 skb_checksum_help(skb); 3425 return 0; 3426 } 3427 3428 *td_cmd |= cmd; 3429 *td_offset |= offset; 3430 3431 return 1; 3432 } 3433 3434 /** 3435 * i40e_create_tx_ctx - Build the Tx context descriptor 3436 * @tx_ring: ring to create the descriptor on 3437 * @cd_type_cmd_tso_mss: Quad Word 1 3438 * @cd_tunneling: Quad Word 0 - bits 0-31 3439 * @cd_l2tag2: Quad Word 0 - bits 32-63 3440 **/ 3441 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, 3442 const u64 cd_type_cmd_tso_mss, 3443 const u32 cd_tunneling, const u32 cd_l2tag2) 3444 { 3445 struct i40e_tx_context_desc *context_desc; 3446 int i = tx_ring->next_to_use; 3447 3448 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && 3449 !cd_tunneling && !cd_l2tag2) 3450 return; 3451 3452 /* grab the next descriptor */ 3453 context_desc = I40E_TX_CTXTDESC(tx_ring, i); 3454 3455 i++; 3456 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 3457 3458 /* cpu_to_le32 and assign to struct fields */ 3459 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); 3460 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); 3461 context_desc->rsvd = cpu_to_le16(0); 3462 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 3463 } 3464 3465 /** 3466 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions 3467 * @tx_ring: the ring to be checked 3468 * @size: the size buffer we want to assure is available 3469 * 3470 * Returns -EBUSY if a stop is needed, else 0 3471 **/ 3472 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 3473 { 3474 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3475 /* Memory barrier before checking head and tail */ 3476 smp_mb(); 3477 3478 ++tx_ring->tx_stats.tx_stopped; 3479 3480 /* Check again in a case another CPU has just made room available. */ 3481 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) 3482 return -EBUSY; 3483 3484 /* A reprieve! - use start_queue because it doesn't call schedule */ 3485 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3486 ++tx_ring->tx_stats.restart_queue; 3487 return 0; 3488 } 3489 3490 /** 3491 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet 3492 * @skb: send buffer 3493 * 3494 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 3495 * and so we need to figure out the cases where we need to linearize the skb. 3496 * 3497 * For TSO we need to count the TSO header and segment payload separately. 3498 * As such we need to check cases where we have 7 fragments or more as we 3499 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 3500 * the segment payload in the first descriptor, and another 7 for the 3501 * fragments. 3502 **/ 3503 bool __i40e_chk_linearize(struct sk_buff *skb) 3504 { 3505 const skb_frag_t *frag, *stale; 3506 int nr_frags, sum; 3507 3508 /* no need to check if number of frags is less than 7 */ 3509 nr_frags = skb_shinfo(skb)->nr_frags; 3510 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) 3511 return false; 3512 3513 /* We need to walk through the list and validate that each group 3514 * of 6 fragments totals at least gso_size. 3515 */ 3516 nr_frags -= I40E_MAX_BUFFER_TXD - 2; 3517 frag = &skb_shinfo(skb)->frags[0]; 3518 3519 /* Initialize size to the negative value of gso_size minus 1. We 3520 * use this as the worst case scenerio in which the frag ahead 3521 * of us only provides one byte which is why we are limited to 6 3522 * descriptors for a single transmit as the header and previous 3523 * fragment are already consuming 2 descriptors. 3524 */ 3525 sum = 1 - skb_shinfo(skb)->gso_size; 3526 3527 /* Add size of frags 0 through 4 to create our initial sum */ 3528 sum += skb_frag_size(frag++); 3529 sum += skb_frag_size(frag++); 3530 sum += skb_frag_size(frag++); 3531 sum += skb_frag_size(frag++); 3532 sum += skb_frag_size(frag++); 3533 3534 /* Walk through fragments adding latest fragment, testing it, and 3535 * then removing stale fragments from the sum. 3536 */ 3537 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 3538 int stale_size = skb_frag_size(stale); 3539 3540 sum += skb_frag_size(frag++); 3541 3542 /* The stale fragment may present us with a smaller 3543 * descriptor than the actual fragment size. To account 3544 * for that we need to remove all the data on the front and 3545 * figure out what the remainder would be in the last 3546 * descriptor associated with the fragment. 3547 */ 3548 if (stale_size > I40E_MAX_DATA_PER_TXD) { 3549 int align_pad = -(skb_frag_off(stale)) & 3550 (I40E_MAX_READ_REQ_SIZE - 1); 3551 3552 sum -= align_pad; 3553 stale_size -= align_pad; 3554 3555 do { 3556 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; 3557 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; 3558 } while (stale_size > I40E_MAX_DATA_PER_TXD); 3559 } 3560 3561 /* if sum is negative we failed to make sufficient progress */ 3562 if (sum < 0) 3563 return true; 3564 3565 if (!nr_frags--) 3566 break; 3567 3568 sum -= stale_size; 3569 } 3570 3571 return false; 3572 } 3573 3574 /** 3575 * i40e_tx_map - Build the Tx descriptor 3576 * @tx_ring: ring to send buffer on 3577 * @skb: send buffer 3578 * @first: first buffer info buffer to use 3579 * @tx_flags: collected send information 3580 * @hdr_len: size of the packet header 3581 * @td_cmd: the command field in the descriptor 3582 * @td_offset: offset for checksum or crc 3583 * 3584 * Returns 0 on success, -1 on failure to DMA 3585 **/ 3586 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, 3587 struct i40e_tx_buffer *first, u32 tx_flags, 3588 const u8 hdr_len, u32 td_cmd, u32 td_offset) 3589 { 3590 unsigned int data_len = skb->data_len; 3591 unsigned int size = skb_headlen(skb); 3592 skb_frag_t *frag; 3593 struct i40e_tx_buffer *tx_bi; 3594 struct i40e_tx_desc *tx_desc; 3595 u16 i = tx_ring->next_to_use; 3596 u32 td_tag = 0; 3597 dma_addr_t dma; 3598 u16 desc_count = 1; 3599 3600 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { 3601 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; 3602 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> 3603 I40E_TX_FLAGS_VLAN_SHIFT; 3604 } 3605 3606 first->tx_flags = tx_flags; 3607 3608 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3609 3610 tx_desc = I40E_TX_DESC(tx_ring, i); 3611 tx_bi = first; 3612 3613 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 3614 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; 3615 3616 if (dma_mapping_error(tx_ring->dev, dma)) 3617 goto dma_error; 3618 3619 /* record length, and DMA address */ 3620 dma_unmap_len_set(tx_bi, len, size); 3621 dma_unmap_addr_set(tx_bi, dma, dma); 3622 3623 /* align size to end of page */ 3624 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); 3625 tx_desc->buffer_addr = cpu_to_le64(dma); 3626 3627 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { 3628 tx_desc->cmd_type_offset_bsz = 3629 build_ctob(td_cmd, td_offset, 3630 max_data, td_tag); 3631 3632 tx_desc++; 3633 i++; 3634 desc_count++; 3635 3636 if (i == tx_ring->count) { 3637 tx_desc = I40E_TX_DESC(tx_ring, 0); 3638 i = 0; 3639 } 3640 3641 dma += max_data; 3642 size -= max_data; 3643 3644 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; 3645 tx_desc->buffer_addr = cpu_to_le64(dma); 3646 } 3647 3648 if (likely(!data_len)) 3649 break; 3650 3651 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 3652 size, td_tag); 3653 3654 tx_desc++; 3655 i++; 3656 desc_count++; 3657 3658 if (i == tx_ring->count) { 3659 tx_desc = I40E_TX_DESC(tx_ring, 0); 3660 i = 0; 3661 } 3662 3663 size = skb_frag_size(frag); 3664 data_len -= size; 3665 3666 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3667 DMA_TO_DEVICE); 3668 3669 tx_bi = &tx_ring->tx_bi[i]; 3670 } 3671 3672 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 3673 3674 i++; 3675 if (i == tx_ring->count) 3676 i = 0; 3677 3678 tx_ring->next_to_use = i; 3679 3680 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); 3681 3682 /* write last descriptor with EOP bit */ 3683 td_cmd |= I40E_TX_DESC_CMD_EOP; 3684 3685 /* We OR these values together to check both against 4 (WB_STRIDE) 3686 * below. This is safe since we don't re-use desc_count afterwards. 3687 */ 3688 desc_count |= ++tx_ring->packet_stride; 3689 3690 if (desc_count >= WB_STRIDE) { 3691 /* write last descriptor with RS bit set */ 3692 td_cmd |= I40E_TX_DESC_CMD_RS; 3693 tx_ring->packet_stride = 0; 3694 } 3695 3696 tx_desc->cmd_type_offset_bsz = 3697 build_ctob(td_cmd, td_offset, size, td_tag); 3698 3699 skb_tx_timestamp(skb); 3700 3701 /* Force memory writes to complete before letting h/w know there 3702 * are new descriptors to fetch. 3703 * 3704 * We also use this memory barrier to make certain all of the 3705 * status bits have been updated before next_to_watch is written. 3706 */ 3707 wmb(); 3708 3709 /* set next_to_watch value indicating a packet is present */ 3710 first->next_to_watch = tx_desc; 3711 3712 /* notify HW of packet */ 3713 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 3714 writel(i, tx_ring->tail); 3715 } 3716 3717 return 0; 3718 3719 dma_error: 3720 dev_info(tx_ring->dev, "TX DMA map failed\n"); 3721 3722 /* clear dma mappings for failed tx_bi map */ 3723 for (;;) { 3724 tx_bi = &tx_ring->tx_bi[i]; 3725 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); 3726 if (tx_bi == first) 3727 break; 3728 if (i == 0) 3729 i = tx_ring->count; 3730 i--; 3731 } 3732 3733 tx_ring->next_to_use = i; 3734 3735 return -1; 3736 } 3737 3738 static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev, 3739 const struct sk_buff *skb, 3740 u16 num_tx_queues) 3741 { 3742 u32 jhash_initval_salt = 0xd631614b; 3743 u32 hash; 3744 3745 if (skb->sk && skb->sk->sk_hash) 3746 hash = skb->sk->sk_hash; 3747 else 3748 hash = (__force u16)skb->protocol ^ skb->hash; 3749 3750 hash = jhash_1word(hash, jhash_initval_salt); 3751 3752 return (u16)(((u64)hash * num_tx_queues) >> 32); 3753 } 3754 3755 u16 i40e_lan_select_queue(struct net_device *netdev, 3756 struct sk_buff *skb, 3757 struct net_device __always_unused *sb_dev) 3758 { 3759 struct i40e_netdev_priv *np = netdev_priv(netdev); 3760 struct i40e_vsi *vsi = np->vsi; 3761 struct i40e_hw *hw; 3762 u16 qoffset; 3763 u16 qcount; 3764 u8 tclass; 3765 u16 hash; 3766 u8 prio; 3767 3768 /* is DCB enabled at all? */ 3769 if (vsi->tc_config.numtc == 1 || 3770 i40e_is_tc_mqprio_enabled(vsi->back)) 3771 return netdev_pick_tx(netdev, skb, sb_dev); 3772 3773 prio = skb->priority; 3774 hw = &vsi->back->hw; 3775 tclass = hw->local_dcbx_config.etscfg.prioritytable[prio]; 3776 /* sanity check */ 3777 if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass)))) 3778 tclass = 0; 3779 3780 /* select a queue assigned for the given TC */ 3781 qcount = vsi->tc_config.tc_info[tclass].qcount; 3782 hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount); 3783 3784 qoffset = vsi->tc_config.tc_info[tclass].qoffset; 3785 return qoffset + hash; 3786 } 3787 3788 /** 3789 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring 3790 * @xdpf: data to transmit 3791 * @xdp_ring: XDP Tx ring 3792 **/ 3793 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 3794 struct i40e_ring *xdp_ring) 3795 { 3796 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 3797 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; 3798 u16 i = 0, index = xdp_ring->next_to_use; 3799 struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index]; 3800 struct i40e_tx_buffer *tx_bi = tx_head; 3801 struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index); 3802 void *data = xdpf->data; 3803 u32 size = xdpf->len; 3804 3805 if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) { 3806 xdp_ring->tx_stats.tx_busy++; 3807 return I40E_XDP_CONSUMED; 3808 } 3809 3810 tx_head->bytecount = xdp_get_frame_len(xdpf); 3811 tx_head->gso_segs = 1; 3812 tx_head->xdpf = xdpf; 3813 3814 for (;;) { 3815 dma_addr_t dma; 3816 3817 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); 3818 if (dma_mapping_error(xdp_ring->dev, dma)) 3819 goto unmap; 3820 3821 /* record length, and DMA address */ 3822 dma_unmap_len_set(tx_bi, len, size); 3823 dma_unmap_addr_set(tx_bi, dma, dma); 3824 3825 tx_desc->buffer_addr = cpu_to_le64(dma); 3826 tx_desc->cmd_type_offset_bsz = 3827 build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0); 3828 3829 if (++index == xdp_ring->count) 3830 index = 0; 3831 3832 if (i == nr_frags) 3833 break; 3834 3835 tx_bi = &xdp_ring->tx_bi[index]; 3836 tx_desc = I40E_TX_DESC(xdp_ring, index); 3837 3838 data = skb_frag_address(&sinfo->frags[i]); 3839 size = skb_frag_size(&sinfo->frags[i]); 3840 i++; 3841 } 3842 3843 tx_desc->cmd_type_offset_bsz |= 3844 cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); 3845 3846 /* Make certain all of the status bits have been updated 3847 * before next_to_watch is written. 3848 */ 3849 smp_wmb(); 3850 3851 xdp_ring->xdp_tx_active++; 3852 3853 tx_head->next_to_watch = tx_desc; 3854 xdp_ring->next_to_use = index; 3855 3856 return I40E_XDP_TX; 3857 3858 unmap: 3859 for (;;) { 3860 tx_bi = &xdp_ring->tx_bi[index]; 3861 if (dma_unmap_len(tx_bi, len)) 3862 dma_unmap_page(xdp_ring->dev, 3863 dma_unmap_addr(tx_bi, dma), 3864 dma_unmap_len(tx_bi, len), 3865 DMA_TO_DEVICE); 3866 dma_unmap_len_set(tx_bi, len, 0); 3867 if (tx_bi == tx_head) 3868 break; 3869 3870 if (!index) 3871 index += xdp_ring->count; 3872 index--; 3873 } 3874 3875 return I40E_XDP_CONSUMED; 3876 } 3877 3878 /** 3879 * i40e_xmit_frame_ring - Sends buffer on Tx ring 3880 * @skb: send buffer 3881 * @tx_ring: ring to send buffer on 3882 * 3883 * Returns NETDEV_TX_OK if sent, else an error code 3884 **/ 3885 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, 3886 struct i40e_ring *tx_ring) 3887 { 3888 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; 3889 u32 cd_tunneling = 0, cd_l2tag2 = 0; 3890 struct i40e_tx_buffer *first; 3891 u32 td_offset = 0; 3892 u32 tx_flags = 0; 3893 u32 td_cmd = 0; 3894 u8 hdr_len = 0; 3895 int tso, count; 3896 int tsyn; 3897 3898 /* prefetch the data, we'll need it later */ 3899 prefetch(skb->data); 3900 3901 i40e_trace(xmit_frame_ring, skb, tx_ring); 3902 3903 count = i40e_xmit_descriptor_count(skb); 3904 if (i40e_chk_linearize(skb, count)) { 3905 if (__skb_linearize(skb)) { 3906 dev_kfree_skb_any(skb); 3907 return NETDEV_TX_OK; 3908 } 3909 count = i40e_txd_use_count(skb->len); 3910 tx_ring->tx_stats.tx_linearize++; 3911 } 3912 3913 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 3914 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, 3915 * + 4 desc gap to avoid the cache line where head is, 3916 * + 1 desc for context descriptor, 3917 * otherwise try next time 3918 */ 3919 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 3920 tx_ring->tx_stats.tx_busy++; 3921 return NETDEV_TX_BUSY; 3922 } 3923 3924 /* record the location of the first descriptor for this packet */ 3925 first = &tx_ring->tx_bi[tx_ring->next_to_use]; 3926 first->skb = skb; 3927 first->bytecount = skb->len; 3928 first->gso_segs = 1; 3929 3930 /* prepare the xmit flags */ 3931 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) 3932 goto out_drop; 3933 3934 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); 3935 3936 if (tso < 0) 3937 goto out_drop; 3938 else if (tso) 3939 tx_flags |= I40E_TX_FLAGS_TSO; 3940 3941 /* Always offload the checksum, since it's in the data descriptor */ 3942 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, 3943 tx_ring, &cd_tunneling); 3944 if (tso < 0) 3945 goto out_drop; 3946 3947 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); 3948 3949 if (tsyn) 3950 tx_flags |= I40E_TX_FLAGS_TSYN; 3951 3952 /* always enable CRC insertion offload */ 3953 td_cmd |= I40E_TX_DESC_CMD_ICRC; 3954 3955 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 3956 cd_tunneling, cd_l2tag2); 3957 3958 /* Add Flow Director ATR if it's enabled. 3959 * 3960 * NOTE: this must always be directly before the data descriptor. 3961 */ 3962 i40e_atr(tx_ring, skb, tx_flags); 3963 3964 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 3965 td_cmd, td_offset)) 3966 goto cleanup_tx_tstamp; 3967 3968 return NETDEV_TX_OK; 3969 3970 out_drop: 3971 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); 3972 dev_kfree_skb_any(first->skb); 3973 first->skb = NULL; 3974 cleanup_tx_tstamp: 3975 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) { 3976 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); 3977 3978 dev_kfree_skb_any(pf->ptp_tx_skb); 3979 pf->ptp_tx_skb = NULL; 3980 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); 3981 } 3982 3983 return NETDEV_TX_OK; 3984 } 3985 3986 /** 3987 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer 3988 * @skb: send buffer 3989 * @netdev: network interface device structure 3990 * 3991 * Returns NETDEV_TX_OK if sent, else an error code 3992 **/ 3993 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3994 { 3995 struct i40e_netdev_priv *np = netdev_priv(netdev); 3996 struct i40e_vsi *vsi = np->vsi; 3997 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; 3998 3999 /* hardware can't handle really short frames, hardware padding works 4000 * beyond this point 4001 */ 4002 if (skb_put_padto(skb, I40E_MIN_TX_LEN)) 4003 return NETDEV_TX_OK; 4004 4005 return i40e_xmit_frame_ring(skb, tx_ring); 4006 } 4007 4008 /** 4009 * i40e_xdp_xmit - Implements ndo_xdp_xmit 4010 * @dev: netdev 4011 * @n: number of frames 4012 * @frames: array of XDP buffer pointers 4013 * @flags: XDP extra info 4014 * 4015 * Returns number of frames successfully sent. Failed frames 4016 * will be free'ed by XDP core. 4017 * 4018 * For error cases, a negative errno code is returned and no-frames 4019 * are transmitted (caller must handle freeing frames). 4020 **/ 4021 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 4022 u32 flags) 4023 { 4024 struct i40e_netdev_priv *np = netdev_priv(dev); 4025 unsigned int queue_index = smp_processor_id(); 4026 struct i40e_vsi *vsi = np->vsi; 4027 struct i40e_pf *pf = vsi->back; 4028 struct i40e_ring *xdp_ring; 4029 int nxmit = 0; 4030 int i; 4031 4032 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 4033 return -ENETDOWN; 4034 4035 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || 4036 test_bit(__I40E_CONFIG_BUSY, pf->state)) 4037 return -ENXIO; 4038 4039 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 4040 return -EINVAL; 4041 4042 xdp_ring = vsi->xdp_rings[queue_index]; 4043 4044 for (i = 0; i < n; i++) { 4045 struct xdp_frame *xdpf = frames[i]; 4046 int err; 4047 4048 err = i40e_xmit_xdp_ring(xdpf, xdp_ring); 4049 if (err != I40E_XDP_TX) 4050 break; 4051 nxmit++; 4052 } 4053 4054 if (unlikely(flags & XDP_XMIT_FLUSH)) 4055 i40e_xdp_ring_update_tail(xdp_ring); 4056 4057 return nxmit; 4058 } 4059