1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/prefetch.h> 5 #include <linux/bpf_trace.h> 6 #include <net/xdp.h> 7 #include "i40e.h" 8 #include "i40e_trace.h" 9 #include "i40e_prototype.h" 10 #include "i40e_txrx_common.h" 11 #include "i40e_xsk.h" 12 13 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) 14 /** 15 * i40e_fdir - Generate a Flow Director descriptor based on fdata 16 * @tx_ring: Tx ring to send buffer on 17 * @fdata: Flow director filter data 18 * @add: Indicate if we are adding a rule or deleting one 19 * 20 **/ 21 static void i40e_fdir(struct i40e_ring *tx_ring, 22 struct i40e_fdir_filter *fdata, bool add) 23 { 24 struct i40e_filter_program_desc *fdir_desc; 25 struct i40e_pf *pf = tx_ring->vsi->back; 26 u32 flex_ptype, dtype_cmd; 27 u16 i; 28 29 /* grab the next descriptor */ 30 i = tx_ring->next_to_use; 31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); 32 33 i++; 34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 35 36 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK & 37 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT); 38 39 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK & 40 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); 41 42 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & 43 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); 44 45 /* Use LAN VSI Id if not programmed by user */ 46 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK & 47 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << 48 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT); 49 50 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; 51 52 dtype_cmd |= add ? 53 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << 54 I40E_TXD_FLTR_QW1_PCMD_SHIFT : 55 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << 56 I40E_TXD_FLTR_QW1_PCMD_SHIFT; 57 58 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK & 59 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT); 60 61 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK & 62 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT); 63 64 if (fdata->cnt_index) { 65 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 66 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK & 67 ((u32)fdata->cnt_index << 68 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT); 69 } 70 71 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 72 fdir_desc->rsvd = cpu_to_le32(0); 73 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 74 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id); 75 } 76 77 #define I40E_FD_CLEAN_DELAY 10 78 /** 79 * i40e_program_fdir_filter - Program a Flow Director filter 80 * @fdir_data: Packet data that will be filter parameters 81 * @raw_packet: the pre-allocated packet buffer for FDir 82 * @pf: The PF pointer 83 * @add: True for add/update, False for remove 84 **/ 85 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, 86 u8 *raw_packet, struct i40e_pf *pf, 87 bool add) 88 { 89 struct i40e_tx_buffer *tx_buf, *first; 90 struct i40e_tx_desc *tx_desc; 91 struct i40e_ring *tx_ring; 92 struct i40e_vsi *vsi; 93 struct device *dev; 94 dma_addr_t dma; 95 u32 td_cmd = 0; 96 u16 i; 97 98 /* find existing FDIR VSI */ 99 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); 100 if (!vsi) 101 return -ENOENT; 102 103 tx_ring = vsi->tx_rings[0]; 104 dev = tx_ring->dev; 105 106 /* we need two descriptors to add/del a filter and we can wait */ 107 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { 108 if (!i) 109 return -EAGAIN; 110 msleep_interruptible(1); 111 } 112 113 dma = dma_map_single(dev, raw_packet, 114 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE); 115 if (dma_mapping_error(dev, dma)) 116 goto dma_fail; 117 118 /* grab the next descriptor */ 119 i = tx_ring->next_to_use; 120 first = &tx_ring->tx_bi[i]; 121 i40e_fdir(tx_ring, fdir_data, add); 122 123 /* Now program a dummy descriptor */ 124 i = tx_ring->next_to_use; 125 tx_desc = I40E_TX_DESC(tx_ring, i); 126 tx_buf = &tx_ring->tx_bi[i]; 127 128 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; 129 130 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer)); 131 132 /* record length, and DMA address */ 133 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); 134 dma_unmap_addr_set(tx_buf, dma, dma); 135 136 tx_desc->buffer_addr = cpu_to_le64(dma); 137 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; 138 139 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; 140 tx_buf->raw_buf = (void *)raw_packet; 141 142 tx_desc->cmd_type_offset_bsz = 143 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); 144 145 /* Force memory writes to complete before letting h/w 146 * know there are new descriptors to fetch. 147 */ 148 wmb(); 149 150 /* Mark the data descriptor to be watched */ 151 first->next_to_watch = tx_desc; 152 153 writel(tx_ring->next_to_use, tx_ring->tail); 154 return 0; 155 156 dma_fail: 157 return -1; 158 } 159 160 /** 161 * i40e_create_dummy_packet - Constructs dummy packet for HW 162 * @dummy_packet: preallocated space for dummy packet 163 * @ipv4: is layer 3 packet of version 4 or 6 164 * @l4proto: next level protocol used in data portion of l3 165 * @data: filter data 166 * 167 * Returns address of layer 4 protocol dummy packet. 168 **/ 169 static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto, 170 struct i40e_fdir_filter *data) 171 { 172 bool is_vlan = !!data->vlan_tag; 173 struct vlan_hdr vlan; 174 struct ipv6hdr ipv6; 175 struct ethhdr eth; 176 struct iphdr ip; 177 u8 *tmp; 178 179 if (ipv4) { 180 eth.h_proto = cpu_to_be16(ETH_P_IP); 181 ip.protocol = l4proto; 182 ip.version = 0x4; 183 ip.ihl = 0x5; 184 185 ip.daddr = data->dst_ip; 186 ip.saddr = data->src_ip; 187 } else { 188 eth.h_proto = cpu_to_be16(ETH_P_IPV6); 189 ipv6.nexthdr = l4proto; 190 ipv6.version = 0x6; 191 192 memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6, 193 sizeof(__be32) * 4); 194 memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6, 195 sizeof(__be32) * 4); 196 } 197 198 if (is_vlan) { 199 vlan.h_vlan_TCI = data->vlan_tag; 200 vlan.h_vlan_encapsulated_proto = eth.h_proto; 201 eth.h_proto = data->vlan_etype; 202 } 203 204 tmp = dummy_packet; 205 memcpy(tmp, ð, sizeof(eth)); 206 tmp += sizeof(eth); 207 208 if (is_vlan) { 209 memcpy(tmp, &vlan, sizeof(vlan)); 210 tmp += sizeof(vlan); 211 } 212 213 if (ipv4) { 214 memcpy(tmp, &ip, sizeof(ip)); 215 tmp += sizeof(ip); 216 } else { 217 memcpy(tmp, &ipv6, sizeof(ipv6)); 218 tmp += sizeof(ipv6); 219 } 220 221 return tmp; 222 } 223 224 /** 225 * i40e_create_dummy_udp_packet - helper function to create UDP packet 226 * @raw_packet: preallocated space for dummy packet 227 * @ipv4: is layer 3 packet of version 4 or 6 228 * @l4proto: next level protocol used in data portion of l3 229 * @data: filter data 230 * 231 * Helper function to populate udp fields. 232 **/ 233 static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto, 234 struct i40e_fdir_filter *data) 235 { 236 struct udphdr *udp; 237 u8 *tmp; 238 239 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data); 240 udp = (struct udphdr *)(tmp); 241 udp->dest = data->dst_port; 242 udp->source = data->src_port; 243 } 244 245 /** 246 * i40e_create_dummy_tcp_packet - helper function to create TCP packet 247 * @raw_packet: preallocated space for dummy packet 248 * @ipv4: is layer 3 packet of version 4 or 6 249 * @l4proto: next level protocol used in data portion of l3 250 * @data: filter data 251 * 252 * Helper function to populate tcp fields. 253 **/ 254 static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto, 255 struct i40e_fdir_filter *data) 256 { 257 struct tcphdr *tcp; 258 u8 *tmp; 259 /* Dummy tcp packet */ 260 static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 261 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0}; 262 263 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data); 264 265 tcp = (struct tcphdr *)tmp; 266 memcpy(tcp, tcp_packet, sizeof(tcp_packet)); 267 tcp->dest = data->dst_port; 268 tcp->source = data->src_port; 269 } 270 271 /** 272 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet 273 * @raw_packet: preallocated space for dummy packet 274 * @ipv4: is layer 3 packet of version 4 or 6 275 * @l4proto: next level protocol used in data portion of l3 276 * @data: filter data 277 * 278 * Helper function to populate sctp fields. 279 **/ 280 static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4, 281 u8 l4proto, 282 struct i40e_fdir_filter *data) 283 { 284 struct sctphdr *sctp; 285 u8 *tmp; 286 287 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data); 288 289 sctp = (struct sctphdr *)tmp; 290 sctp->dest = data->dst_port; 291 sctp->source = data->src_port; 292 } 293 294 /** 295 * i40e_prepare_fdir_filter - Prepare and program fdir filter 296 * @pf: physical function to attach filter to 297 * @fd_data: filter data 298 * @add: add or delete filter 299 * @packet_addr: address of dummy packet, used in filtering 300 * @payload_offset: offset from dummy packet address to user defined data 301 * @pctype: Packet type for which filter is used 302 * 303 * Helper function to offset data of dummy packet, program it and 304 * handle errors. 305 **/ 306 static int i40e_prepare_fdir_filter(struct i40e_pf *pf, 307 struct i40e_fdir_filter *fd_data, 308 bool add, char *packet_addr, 309 int payload_offset, u8 pctype) 310 { 311 int ret; 312 313 if (fd_data->flex_filter) { 314 u8 *payload; 315 __be16 pattern = fd_data->flex_word; 316 u16 off = fd_data->flex_offset; 317 318 payload = packet_addr + payload_offset; 319 320 /* If user provided vlan, offset payload by vlan header length */ 321 if (!!fd_data->vlan_tag) 322 payload += VLAN_HLEN; 323 324 *((__force __be16 *)(payload + off)) = pattern; 325 } 326 327 fd_data->pctype = pctype; 328 ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add); 329 if (ret) { 330 dev_info(&pf->pdev->dev, 331 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 332 fd_data->pctype, fd_data->fd_id, ret); 333 /* Free the packet buffer since it wasn't added to the ring */ 334 return -EOPNOTSUPP; 335 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 336 if (add) 337 dev_info(&pf->pdev->dev, 338 "Filter OK for PCTYPE %d loc = %d\n", 339 fd_data->pctype, fd_data->fd_id); 340 else 341 dev_info(&pf->pdev->dev, 342 "Filter deleted for PCTYPE %d loc = %d\n", 343 fd_data->pctype, fd_data->fd_id); 344 } 345 346 return ret; 347 } 348 349 /** 350 * i40e_change_filter_num - Prepare and program fdir filter 351 * @ipv4: is layer 3 packet of version 4 or 6 352 * @add: add or delete filter 353 * @ipv4_filter_num: field to update 354 * @ipv6_filter_num: field to update 355 * 356 * Update filter number field for pf. 357 **/ 358 static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num, 359 u16 *ipv6_filter_num) 360 { 361 if (add) { 362 if (ipv4) 363 (*ipv4_filter_num)++; 364 else 365 (*ipv6_filter_num)++; 366 } else { 367 if (ipv4) 368 (*ipv4_filter_num)--; 369 else 370 (*ipv6_filter_num)--; 371 } 372 } 373 374 #define IP_HEADER_OFFSET 14 375 #define I40E_UDPIP_DUMMY_PACKET_LEN 42 376 #define I40E_UDPIP6_DUMMY_PACKET_LEN 62 377 /** 378 * i40e_add_del_fdir_udp - Add/Remove UDP filters 379 * @vsi: pointer to the targeted VSI 380 * @fd_data: the flow director data required for the FDir descriptor 381 * @add: true adds a filter, false removes it 382 * @ipv4: true is v4, false is v6 383 * 384 * Returns 0 if the filters were successfully added or removed 385 **/ 386 static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi, 387 struct i40e_fdir_filter *fd_data, 388 bool add, 389 bool ipv4) 390 { 391 struct i40e_pf *pf = vsi->back; 392 u8 *raw_packet; 393 int ret; 394 395 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 396 if (!raw_packet) 397 return -ENOMEM; 398 399 i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data); 400 401 if (ipv4) 402 ret = i40e_prepare_fdir_filter 403 (pf, fd_data, add, raw_packet, 404 I40E_UDPIP_DUMMY_PACKET_LEN, 405 I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 406 else 407 ret = i40e_prepare_fdir_filter 408 (pf, fd_data, add, raw_packet, 409 I40E_UDPIP6_DUMMY_PACKET_LEN, 410 I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 411 412 if (ret) { 413 kfree(raw_packet); 414 return ret; 415 } 416 417 i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt, 418 &pf->fd_udp6_filter_cnt); 419 420 return 0; 421 } 422 423 #define I40E_TCPIP_DUMMY_PACKET_LEN 54 424 #define I40E_TCPIP6_DUMMY_PACKET_LEN 74 425 /** 426 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters 427 * @vsi: pointer to the targeted VSI 428 * @fd_data: the flow director data required for the FDir descriptor 429 * @add: true adds a filter, false removes it 430 * @ipv4: true is v4, false is v6 431 * 432 * Returns 0 if the filters were successfully added or removed 433 **/ 434 static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi, 435 struct i40e_fdir_filter *fd_data, 436 bool add, 437 bool ipv4) 438 { 439 struct i40e_pf *pf = vsi->back; 440 u8 *raw_packet; 441 int ret; 442 443 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 444 if (!raw_packet) 445 return -ENOMEM; 446 447 i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data); 448 if (ipv4) 449 ret = i40e_prepare_fdir_filter 450 (pf, fd_data, add, raw_packet, 451 I40E_TCPIP_DUMMY_PACKET_LEN, 452 I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 453 else 454 ret = i40e_prepare_fdir_filter 455 (pf, fd_data, add, raw_packet, 456 I40E_TCPIP6_DUMMY_PACKET_LEN, 457 I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 458 459 if (ret) { 460 kfree(raw_packet); 461 return ret; 462 } 463 464 i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt, 465 &pf->fd_tcp6_filter_cnt); 466 467 if (add) { 468 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 469 I40E_DEBUG_FD & pf->hw.debug_mask) 470 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); 471 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); 472 } 473 return 0; 474 } 475 476 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46 477 #define I40E_SCTPIP6_DUMMY_PACKET_LEN 66 478 /** 479 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for 480 * a specific flow spec 481 * @vsi: pointer to the targeted VSI 482 * @fd_data: the flow director data required for the FDir descriptor 483 * @add: true adds a filter, false removes it 484 * @ipv4: true is v4, false is v6 485 * 486 * Returns 0 if the filters were successfully added or removed 487 **/ 488 static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi, 489 struct i40e_fdir_filter *fd_data, 490 bool add, 491 bool ipv4) 492 { 493 struct i40e_pf *pf = vsi->back; 494 u8 *raw_packet; 495 int ret; 496 497 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 498 if (!raw_packet) 499 return -ENOMEM; 500 501 i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data); 502 503 if (ipv4) 504 ret = i40e_prepare_fdir_filter 505 (pf, fd_data, add, raw_packet, 506 I40E_SCTPIP_DUMMY_PACKET_LEN, 507 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP); 508 else 509 ret = i40e_prepare_fdir_filter 510 (pf, fd_data, add, raw_packet, 511 I40E_SCTPIP6_DUMMY_PACKET_LEN, 512 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP); 513 514 if (ret) { 515 kfree(raw_packet); 516 return ret; 517 } 518 519 i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt, 520 &pf->fd_sctp6_filter_cnt); 521 522 return 0; 523 } 524 525 #define I40E_IP_DUMMY_PACKET_LEN 34 526 #define I40E_IP6_DUMMY_PACKET_LEN 54 527 /** 528 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for 529 * a specific flow spec 530 * @vsi: pointer to the targeted VSI 531 * @fd_data: the flow director data required for the FDir descriptor 532 * @add: true adds a filter, false removes it 533 * @ipv4: true is v4, false is v6 534 * 535 * Returns 0 if the filters were successfully added or removed 536 **/ 537 static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi, 538 struct i40e_fdir_filter *fd_data, 539 bool add, 540 bool ipv4) 541 { 542 struct i40e_pf *pf = vsi->back; 543 int payload_offset; 544 u8 *raw_packet; 545 int iter_start; 546 int iter_end; 547 int ret; 548 int i; 549 550 if (ipv4) { 551 iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; 552 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4; 553 } else { 554 iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; 555 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6; 556 } 557 558 for (i = iter_start; i <= iter_end; i++) { 559 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 560 if (!raw_packet) 561 return -ENOMEM; 562 563 /* IPv6 no header option differs from IPv4 */ 564 (void)i40e_create_dummy_packet 565 (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE, 566 fd_data); 567 568 payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN : 569 I40E_IP6_DUMMY_PACKET_LEN; 570 ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet, 571 payload_offset, i); 572 if (ret) 573 goto err; 574 } 575 576 i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt, 577 &pf->fd_ip6_filter_cnt); 578 579 return 0; 580 err: 581 kfree(raw_packet); 582 return ret; 583 } 584 585 /** 586 * i40e_add_del_fdir - Build raw packets to add/del fdir filter 587 * @vsi: pointer to the targeted VSI 588 * @input: filter to add or delete 589 * @add: true adds a filter, false removes it 590 * 591 **/ 592 int i40e_add_del_fdir(struct i40e_vsi *vsi, 593 struct i40e_fdir_filter *input, bool add) 594 { 595 enum ip_ver { ipv6 = 0, ipv4 = 1 }; 596 struct i40e_pf *pf = vsi->back; 597 int ret; 598 599 switch (input->flow_type & ~FLOW_EXT) { 600 case TCP_V4_FLOW: 601 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4); 602 break; 603 case UDP_V4_FLOW: 604 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4); 605 break; 606 case SCTP_V4_FLOW: 607 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4); 608 break; 609 case TCP_V6_FLOW: 610 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6); 611 break; 612 case UDP_V6_FLOW: 613 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6); 614 break; 615 case SCTP_V6_FLOW: 616 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6); 617 break; 618 case IP_USER_FLOW: 619 switch (input->ipl4_proto) { 620 case IPPROTO_TCP: 621 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4); 622 break; 623 case IPPROTO_UDP: 624 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4); 625 break; 626 case IPPROTO_SCTP: 627 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4); 628 break; 629 case IPPROTO_IP: 630 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4); 631 break; 632 default: 633 /* We cannot support masking based on protocol */ 634 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n", 635 input->ipl4_proto); 636 return -EINVAL; 637 } 638 break; 639 case IPV6_USER_FLOW: 640 switch (input->ipl4_proto) { 641 case IPPROTO_TCP: 642 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6); 643 break; 644 case IPPROTO_UDP: 645 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6); 646 break; 647 case IPPROTO_SCTP: 648 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6); 649 break; 650 case IPPROTO_IP: 651 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6); 652 break; 653 default: 654 /* We cannot support masking based on protocol */ 655 dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n", 656 input->ipl4_proto); 657 return -EINVAL; 658 } 659 break; 660 default: 661 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n", 662 input->flow_type); 663 return -EINVAL; 664 } 665 666 /* The buffer allocated here will be normally be freed by 667 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit 668 * completion. In the event of an error adding the buffer to the FDIR 669 * ring, it will immediately be freed. It may also be freed by 670 * i40e_clean_tx_ring() when closing the VSI. 671 */ 672 return ret; 673 } 674 675 /** 676 * i40e_fd_handle_status - check the Programming Status for FD 677 * @rx_ring: the Rx ring for this descriptor 678 * @qword0_raw: qword0 679 * @qword1: qword1 after le_to_cpu 680 * @prog_id: the id originally used for programming 681 * 682 * This is used to verify if the FD programming or invalidation 683 * requested by SW to the HW is successful or not and take actions accordingly. 684 **/ 685 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, 686 u64 qword1, u8 prog_id) 687 { 688 struct i40e_pf *pf = rx_ring->vsi->back; 689 struct pci_dev *pdev = pf->pdev; 690 struct i40e_16b_rx_wb_qw0 *qw0; 691 u32 fcnt_prog, fcnt_avail; 692 u32 error; 693 694 qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw; 695 error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> 696 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; 697 698 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { 699 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id); 700 if (qw0->hi_dword.fd_id != 0 || 701 (I40E_DEBUG_FD & pf->hw.debug_mask)) 702 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", 703 pf->fd_inv); 704 705 /* Check if the programming error is for ATR. 706 * If so, auto disable ATR and set a state for 707 * flush in progress. Next time we come here if flush is in 708 * progress do nothing, once flush is complete the state will 709 * be cleared. 710 */ 711 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 712 return; 713 714 pf->fd_add_err++; 715 /* store the current atr filter count */ 716 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); 717 718 if (qw0->hi_dword.fd_id == 0 && 719 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { 720 /* These set_bit() calls aren't atomic with the 721 * test_bit() here, but worse case we potentially 722 * disable ATR and queue a flush right after SB 723 * support is re-enabled. That shouldn't cause an 724 * issue in practice 725 */ 726 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); 727 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); 728 } 729 730 /* filter programming failed most likely due to table full */ 731 fcnt_prog = i40e_get_global_fd_count(pf); 732 fcnt_avail = pf->fdir_pf_filter_count; 733 /* If ATR is running fcnt_prog can quickly change, 734 * if we are very close to full, it makes sense to disable 735 * FD ATR/SB and then re-enable it when there is room. 736 */ 737 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 738 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 739 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED, 740 pf->state)) 741 if (I40E_DEBUG_FD & pf->hw.debug_mask) 742 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 743 } 744 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 745 if (I40E_DEBUG_FD & pf->hw.debug_mask) 746 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", 747 qw0->hi_dword.fd_id); 748 } 749 } 750 751 /** 752 * i40e_unmap_and_free_tx_resource - Release a Tx buffer 753 * @ring: the ring that owns the buffer 754 * @tx_buffer: the buffer to free 755 **/ 756 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, 757 struct i40e_tx_buffer *tx_buffer) 758 { 759 if (tx_buffer->skb) { 760 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) 761 kfree(tx_buffer->raw_buf); 762 else if (ring_is_xdp(ring)) 763 xdp_return_frame(tx_buffer->xdpf); 764 else 765 dev_kfree_skb_any(tx_buffer->skb); 766 if (dma_unmap_len(tx_buffer, len)) 767 dma_unmap_single(ring->dev, 768 dma_unmap_addr(tx_buffer, dma), 769 dma_unmap_len(tx_buffer, len), 770 DMA_TO_DEVICE); 771 } else if (dma_unmap_len(tx_buffer, len)) { 772 dma_unmap_page(ring->dev, 773 dma_unmap_addr(tx_buffer, dma), 774 dma_unmap_len(tx_buffer, len), 775 DMA_TO_DEVICE); 776 } 777 778 tx_buffer->next_to_watch = NULL; 779 tx_buffer->skb = NULL; 780 dma_unmap_len_set(tx_buffer, len, 0); 781 /* tx_buffer must be completely set up in the transmit path */ 782 } 783 784 /** 785 * i40e_clean_tx_ring - Free any empty Tx buffers 786 * @tx_ring: ring to be cleaned 787 **/ 788 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) 789 { 790 unsigned long bi_size; 791 u16 i; 792 793 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 794 i40e_xsk_clean_tx_ring(tx_ring); 795 } else { 796 /* ring already cleared, nothing to do */ 797 if (!tx_ring->tx_bi) 798 return; 799 800 /* Free all the Tx ring sk_buffs */ 801 for (i = 0; i < tx_ring->count; i++) 802 i40e_unmap_and_free_tx_resource(tx_ring, 803 &tx_ring->tx_bi[i]); 804 } 805 806 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 807 memset(tx_ring->tx_bi, 0, bi_size); 808 809 /* Zero out the descriptor ring */ 810 memset(tx_ring->desc, 0, tx_ring->size); 811 812 tx_ring->next_to_use = 0; 813 tx_ring->next_to_clean = 0; 814 815 if (!tx_ring->netdev) 816 return; 817 818 /* cleanup Tx queue statistics */ 819 netdev_tx_reset_queue(txring_txq(tx_ring)); 820 } 821 822 /** 823 * i40e_free_tx_resources - Free Tx resources per queue 824 * @tx_ring: Tx descriptor ring for a specific queue 825 * 826 * Free all transmit software resources 827 **/ 828 void i40e_free_tx_resources(struct i40e_ring *tx_ring) 829 { 830 i40e_clean_tx_ring(tx_ring); 831 kfree(tx_ring->tx_bi); 832 tx_ring->tx_bi = NULL; 833 kfree(tx_ring->xsk_descs); 834 tx_ring->xsk_descs = NULL; 835 836 if (tx_ring->desc) { 837 dma_free_coherent(tx_ring->dev, tx_ring->size, 838 tx_ring->desc, tx_ring->dma); 839 tx_ring->desc = NULL; 840 } 841 } 842 843 /** 844 * i40e_get_tx_pending - how many tx descriptors not processed 845 * @ring: the ring of descriptors 846 * @in_sw: use SW variables 847 * 848 * Since there is no access to the ring head register 849 * in XL710, we need to use our local copies 850 **/ 851 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) 852 { 853 u32 head, tail; 854 855 if (!in_sw) { 856 head = i40e_get_head(ring); 857 tail = readl(ring->tail); 858 } else { 859 head = ring->next_to_clean; 860 tail = ring->next_to_use; 861 } 862 863 if (head != tail) 864 return (head < tail) ? 865 tail - head : (tail + ring->count - head); 866 867 return 0; 868 } 869 870 /** 871 * i40e_detect_recover_hung - Function to detect and recover hung_queues 872 * @vsi: pointer to vsi struct with tx queues 873 * 874 * VSI has netdev and netdev has TX queues. This function is to check each of 875 * those TX queues if they are hung, trigger recovery by issuing SW interrupt. 876 **/ 877 void i40e_detect_recover_hung(struct i40e_vsi *vsi) 878 { 879 struct i40e_ring *tx_ring = NULL; 880 struct net_device *netdev; 881 unsigned int i; 882 int packets; 883 884 if (!vsi) 885 return; 886 887 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 888 return; 889 890 netdev = vsi->netdev; 891 if (!netdev) 892 return; 893 894 if (!netif_carrier_ok(netdev)) 895 return; 896 897 for (i = 0; i < vsi->num_queue_pairs; i++) { 898 tx_ring = vsi->tx_rings[i]; 899 if (tx_ring && tx_ring->desc) { 900 /* If packet counter has not changed the queue is 901 * likely stalled, so force an interrupt for this 902 * queue. 903 * 904 * prev_pkt_ctr would be negative if there was no 905 * pending work. 906 */ 907 packets = tx_ring->stats.packets & INT_MAX; 908 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { 909 i40e_force_wb(vsi, tx_ring->q_vector); 910 continue; 911 } 912 913 /* Memory barrier between read of packet count and call 914 * to i40e_get_tx_pending() 915 */ 916 smp_rmb(); 917 tx_ring->tx_stats.prev_pkt_ctr = 918 i40e_get_tx_pending(tx_ring, true) ? packets : -1; 919 } 920 } 921 } 922 923 /** 924 * i40e_clean_tx_irq - Reclaim resources after transmit completes 925 * @vsi: the VSI we care about 926 * @tx_ring: Tx ring to clean 927 * @napi_budget: Used to determine if we are in netpoll 928 * 929 * Returns true if there's any budget left (e.g. the clean is finished) 930 **/ 931 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, 932 struct i40e_ring *tx_ring, int napi_budget) 933 { 934 int i = tx_ring->next_to_clean; 935 struct i40e_tx_buffer *tx_buf; 936 struct i40e_tx_desc *tx_head; 937 struct i40e_tx_desc *tx_desc; 938 unsigned int total_bytes = 0, total_packets = 0; 939 unsigned int budget = vsi->work_limit; 940 941 tx_buf = &tx_ring->tx_bi[i]; 942 tx_desc = I40E_TX_DESC(tx_ring, i); 943 i -= tx_ring->count; 944 945 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); 946 947 do { 948 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 949 950 /* if next_to_watch is not set then there is no work pending */ 951 if (!eop_desc) 952 break; 953 954 /* prevent any other reads prior to eop_desc */ 955 smp_rmb(); 956 957 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 958 /* we have caught up to head, no work left to do */ 959 if (tx_head == tx_desc) 960 break; 961 962 /* clear next_to_watch to prevent false hangs */ 963 tx_buf->next_to_watch = NULL; 964 965 /* update the statistics for this packet */ 966 total_bytes += tx_buf->bytecount; 967 total_packets += tx_buf->gso_segs; 968 969 /* free the skb/XDP data */ 970 if (ring_is_xdp(tx_ring)) 971 xdp_return_frame(tx_buf->xdpf); 972 else 973 napi_consume_skb(tx_buf->skb, napi_budget); 974 975 /* unmap skb header data */ 976 dma_unmap_single(tx_ring->dev, 977 dma_unmap_addr(tx_buf, dma), 978 dma_unmap_len(tx_buf, len), 979 DMA_TO_DEVICE); 980 981 /* clear tx_buffer data */ 982 tx_buf->skb = NULL; 983 dma_unmap_len_set(tx_buf, len, 0); 984 985 /* unmap remaining buffers */ 986 while (tx_desc != eop_desc) { 987 i40e_trace(clean_tx_irq_unmap, 988 tx_ring, tx_desc, tx_buf); 989 990 tx_buf++; 991 tx_desc++; 992 i++; 993 if (unlikely(!i)) { 994 i -= tx_ring->count; 995 tx_buf = tx_ring->tx_bi; 996 tx_desc = I40E_TX_DESC(tx_ring, 0); 997 } 998 999 /* unmap any remaining paged data */ 1000 if (dma_unmap_len(tx_buf, len)) { 1001 dma_unmap_page(tx_ring->dev, 1002 dma_unmap_addr(tx_buf, dma), 1003 dma_unmap_len(tx_buf, len), 1004 DMA_TO_DEVICE); 1005 dma_unmap_len_set(tx_buf, len, 0); 1006 } 1007 } 1008 1009 /* move us one more past the eop_desc for start of next pkt */ 1010 tx_buf++; 1011 tx_desc++; 1012 i++; 1013 if (unlikely(!i)) { 1014 i -= tx_ring->count; 1015 tx_buf = tx_ring->tx_bi; 1016 tx_desc = I40E_TX_DESC(tx_ring, 0); 1017 } 1018 1019 prefetch(tx_desc); 1020 1021 /* update budget accounting */ 1022 budget--; 1023 } while (likely(budget)); 1024 1025 i += tx_ring->count; 1026 tx_ring->next_to_clean = i; 1027 i40e_update_tx_stats(tx_ring, total_packets, total_bytes); 1028 i40e_arm_wb(tx_ring, vsi, budget); 1029 1030 if (ring_is_xdp(tx_ring)) 1031 return !!budget; 1032 1033 /* notify netdev of completed buffers */ 1034 netdev_tx_completed_queue(txring_txq(tx_ring), 1035 total_packets, total_bytes); 1036 1037 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 1038 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1039 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 1040 /* Make sure that anybody stopping the queue after this 1041 * sees the new next_to_clean. 1042 */ 1043 smp_mb(); 1044 if (__netif_subqueue_stopped(tx_ring->netdev, 1045 tx_ring->queue_index) && 1046 !test_bit(__I40E_VSI_DOWN, vsi->state)) { 1047 netif_wake_subqueue(tx_ring->netdev, 1048 tx_ring->queue_index); 1049 ++tx_ring->tx_stats.restart_queue; 1050 } 1051 } 1052 1053 return !!budget; 1054 } 1055 1056 /** 1057 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled 1058 * @vsi: the VSI we care about 1059 * @q_vector: the vector on which to enable writeback 1060 * 1061 **/ 1062 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, 1063 struct i40e_q_vector *q_vector) 1064 { 1065 u16 flags = q_vector->tx.ring[0].flags; 1066 u32 val; 1067 1068 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) 1069 return; 1070 1071 if (q_vector->arm_wb_state) 1072 return; 1073 1074 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 1075 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | 1076 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ 1077 1078 wr32(&vsi->back->hw, 1079 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), 1080 val); 1081 } else { 1082 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | 1083 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ 1084 1085 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); 1086 } 1087 q_vector->arm_wb_state = true; 1088 } 1089 1090 /** 1091 * i40e_force_wb - Issue SW Interrupt so HW does a wb 1092 * @vsi: the VSI we care about 1093 * @q_vector: the vector on which to force writeback 1094 * 1095 **/ 1096 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) 1097 { 1098 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 1099 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1100 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ 1101 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 1102 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; 1103 /* allow 00 to be written to the index */ 1104 1105 wr32(&vsi->back->hw, 1106 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); 1107 } else { 1108 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1109 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */ 1110 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | 1111 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK; 1112 /* allow 00 to be written to the index */ 1113 1114 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); 1115 } 1116 } 1117 1118 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, 1119 struct i40e_ring_container *rc) 1120 { 1121 return &q_vector->rx == rc; 1122 } 1123 1124 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) 1125 { 1126 unsigned int divisor; 1127 1128 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) { 1129 case I40E_LINK_SPEED_40GB: 1130 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; 1131 break; 1132 case I40E_LINK_SPEED_25GB: 1133 case I40E_LINK_SPEED_20GB: 1134 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; 1135 break; 1136 default: 1137 case I40E_LINK_SPEED_10GB: 1138 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; 1139 break; 1140 case I40E_LINK_SPEED_1GB: 1141 case I40E_LINK_SPEED_100MB: 1142 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; 1143 break; 1144 } 1145 1146 return divisor; 1147 } 1148 1149 /** 1150 * i40e_update_itr - update the dynamic ITR value based on statistics 1151 * @q_vector: structure containing interrupt and ring information 1152 * @rc: structure containing ring performance data 1153 * 1154 * Stores a new ITR value based on packets and byte 1155 * counts during the last interrupt. The advantage of per interrupt 1156 * computation is faster updates and more accurate ITR for the current 1157 * traffic pattern. Constants in this function were computed 1158 * based on theoretical maximum wire speed and thresholds were set based 1159 * on testing data as well as attempting to minimize response time 1160 * while increasing bulk throughput. 1161 **/ 1162 static void i40e_update_itr(struct i40e_q_vector *q_vector, 1163 struct i40e_ring_container *rc) 1164 { 1165 unsigned int avg_wire_size, packets, bytes, itr; 1166 unsigned long next_update = jiffies; 1167 1168 /* If we don't have any rings just leave ourselves set for maximum 1169 * possible latency so we take ourselves out of the equation. 1170 */ 1171 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) 1172 return; 1173 1174 /* For Rx we want to push the delay up and default to low latency. 1175 * for Tx we want to pull the delay down and default to high latency. 1176 */ 1177 itr = i40e_container_is_rx(q_vector, rc) ? 1178 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : 1179 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; 1180 1181 /* If we didn't update within up to 1 - 2 jiffies we can assume 1182 * that either packets are coming in so slow there hasn't been 1183 * any work, or that there is so much work that NAPI is dealing 1184 * with interrupt moderation and we don't need to do anything. 1185 */ 1186 if (time_after(next_update, rc->next_update)) 1187 goto clear_counts; 1188 1189 /* If itr_countdown is set it means we programmed an ITR within 1190 * the last 4 interrupt cycles. This has a side effect of us 1191 * potentially firing an early interrupt. In order to work around 1192 * this we need to throw out any data received for a few 1193 * interrupts following the update. 1194 */ 1195 if (q_vector->itr_countdown) { 1196 itr = rc->target_itr; 1197 goto clear_counts; 1198 } 1199 1200 packets = rc->total_packets; 1201 bytes = rc->total_bytes; 1202 1203 if (i40e_container_is_rx(q_vector, rc)) { 1204 /* If Rx there are 1 to 4 packets and bytes are less than 1205 * 9000 assume insufficient data to use bulk rate limiting 1206 * approach unless Tx is already in bulk rate limiting. We 1207 * are likely latency driven. 1208 */ 1209 if (packets && packets < 4 && bytes < 9000 && 1210 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { 1211 itr = I40E_ITR_ADAPTIVE_LATENCY; 1212 goto adjust_by_size; 1213 } 1214 } else if (packets < 4) { 1215 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1216 * bulk mode and we are receiving 4 or fewer packets just 1217 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1218 * that the Rx can relax. 1219 */ 1220 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && 1221 (q_vector->rx.target_itr & I40E_ITR_MASK) == 1222 I40E_ITR_ADAPTIVE_MAX_USECS) 1223 goto clear_counts; 1224 } else if (packets > 32) { 1225 /* If we have processed over 32 packets in a single interrupt 1226 * for Tx assume we need to switch over to "bulk" mode. 1227 */ 1228 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; 1229 } 1230 1231 /* We have no packets to actually measure against. This means 1232 * either one of the other queues on this vector is active or 1233 * we are a Tx queue doing TSO with too high of an interrupt rate. 1234 * 1235 * Between 4 and 56 we can assume that our current interrupt delay 1236 * is only slightly too low. As such we should increase it by a small 1237 * fixed amount. 1238 */ 1239 if (packets < 56) { 1240 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; 1241 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { 1242 itr &= I40E_ITR_ADAPTIVE_LATENCY; 1243 itr += I40E_ITR_ADAPTIVE_MAX_USECS; 1244 } 1245 goto clear_counts; 1246 } 1247 1248 if (packets <= 256) { 1249 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1250 itr &= I40E_ITR_MASK; 1251 1252 /* Between 56 and 112 is our "goldilocks" zone where we are 1253 * working out "just right". Just report that our current 1254 * ITR is good for us. 1255 */ 1256 if (packets <= 112) 1257 goto clear_counts; 1258 1259 /* If packet count is 128 or greater we are likely looking 1260 * at a slight overrun of the delay we want. Try halving 1261 * our delay to see if that will cut the number of packets 1262 * in half per interrupt. 1263 */ 1264 itr /= 2; 1265 itr &= I40E_ITR_MASK; 1266 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) 1267 itr = I40E_ITR_ADAPTIVE_MIN_USECS; 1268 1269 goto clear_counts; 1270 } 1271 1272 /* The paths below assume we are dealing with a bulk ITR since 1273 * number of packets is greater than 256. We are just going to have 1274 * to compute a value and try to bring the count under control, 1275 * though for smaller packet sizes there isn't much we can do as 1276 * NAPI polling will likely be kicking in sooner rather than later. 1277 */ 1278 itr = I40E_ITR_ADAPTIVE_BULK; 1279 1280 adjust_by_size: 1281 /* If packet counts are 256 or greater we can assume we have a gross 1282 * overestimation of what the rate should be. Instead of trying to fine 1283 * tune it just use the formula below to try and dial in an exact value 1284 * give the current packet size of the frame. 1285 */ 1286 avg_wire_size = bytes / packets; 1287 1288 /* The following is a crude approximation of: 1289 * wmem_default / (size + overhead) = desired_pkts_per_int 1290 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1291 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1292 * 1293 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1294 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1295 * formula down to 1296 * 1297 * (170 * (size + 24)) / (size + 640) = ITR 1298 * 1299 * We first do some math on the packet size and then finally bitshift 1300 * by 8 after rounding up. We also have to account for PCIe link speed 1301 * difference as ITR scales based on this. 1302 */ 1303 if (avg_wire_size <= 60) { 1304 /* Start at 250k ints/sec */ 1305 avg_wire_size = 4096; 1306 } else if (avg_wire_size <= 380) { 1307 /* 250K ints/sec to 60K ints/sec */ 1308 avg_wire_size *= 40; 1309 avg_wire_size += 1696; 1310 } else if (avg_wire_size <= 1084) { 1311 /* 60K ints/sec to 36K ints/sec */ 1312 avg_wire_size *= 15; 1313 avg_wire_size += 11452; 1314 } else if (avg_wire_size <= 1980) { 1315 /* 36K ints/sec to 30K ints/sec */ 1316 avg_wire_size *= 5; 1317 avg_wire_size += 22420; 1318 } else { 1319 /* plateau at a limit of 30K ints/sec */ 1320 avg_wire_size = 32256; 1321 } 1322 1323 /* If we are in low latency mode halve our delay which doubles the 1324 * rate to somewhere between 100K to 16K ints/sec 1325 */ 1326 if (itr & I40E_ITR_ADAPTIVE_LATENCY) 1327 avg_wire_size /= 2; 1328 1329 /* Resultant value is 256 times larger than it needs to be. This 1330 * gives us room to adjust the value as needed to either increase 1331 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. 1332 * 1333 * Use addition as we have already recorded the new latency flag 1334 * for the ITR value. 1335 */ 1336 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * 1337 I40E_ITR_ADAPTIVE_MIN_INC; 1338 1339 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { 1340 itr &= I40E_ITR_ADAPTIVE_LATENCY; 1341 itr += I40E_ITR_ADAPTIVE_MAX_USECS; 1342 } 1343 1344 clear_counts: 1345 /* write back value */ 1346 rc->target_itr = itr; 1347 1348 /* next update should occur within next jiffy */ 1349 rc->next_update = next_update + 1; 1350 1351 rc->total_bytes = 0; 1352 rc->total_packets = 0; 1353 } 1354 1355 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) 1356 { 1357 return &rx_ring->rx_bi[idx]; 1358 } 1359 1360 /** 1361 * i40e_reuse_rx_page - page flip buffer and store it back on the ring 1362 * @rx_ring: rx descriptor ring to store buffers on 1363 * @old_buff: donor buffer to have page reused 1364 * 1365 * Synchronizes page for reuse by the adapter 1366 **/ 1367 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, 1368 struct i40e_rx_buffer *old_buff) 1369 { 1370 struct i40e_rx_buffer *new_buff; 1371 u16 nta = rx_ring->next_to_alloc; 1372 1373 new_buff = i40e_rx_bi(rx_ring, nta); 1374 1375 /* update, and store next to alloc */ 1376 nta++; 1377 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1378 1379 /* transfer page from old buffer to new buffer */ 1380 new_buff->dma = old_buff->dma; 1381 new_buff->page = old_buff->page; 1382 new_buff->page_offset = old_buff->page_offset; 1383 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1384 1385 rx_ring->rx_stats.page_reuse_count++; 1386 1387 /* clear contents of buffer_info */ 1388 old_buff->page = NULL; 1389 } 1390 1391 /** 1392 * i40e_clean_programming_status - clean the programming status descriptor 1393 * @rx_ring: the rx ring that has this descriptor 1394 * @qword0_raw: qword0 1395 * @qword1: qword1 representing status_error_len in CPU ordering 1396 * 1397 * Flow director should handle FD_FILTER_STATUS to check its filter programming 1398 * status being successful or not and take actions accordingly. FCoE should 1399 * handle its context/filter programming/invalidation status and take actions. 1400 * 1401 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL. 1402 **/ 1403 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, 1404 u64 qword1) 1405 { 1406 u8 id; 1407 1408 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> 1409 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 1410 1411 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) 1412 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id); 1413 } 1414 1415 /** 1416 * i40e_setup_tx_descriptors - Allocate the Tx descriptors 1417 * @tx_ring: the tx ring to set up 1418 * 1419 * Return 0 on success, negative on error 1420 **/ 1421 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) 1422 { 1423 struct device *dev = tx_ring->dev; 1424 int bi_size; 1425 1426 if (!dev) 1427 return -ENOMEM; 1428 1429 /* warn if we are about to overwrite the pointer */ 1430 WARN_ON(tx_ring->tx_bi); 1431 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 1432 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); 1433 if (!tx_ring->tx_bi) 1434 goto err; 1435 1436 if (ring_is_xdp(tx_ring)) { 1437 tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs), 1438 GFP_KERNEL); 1439 if (!tx_ring->xsk_descs) 1440 goto err; 1441 } 1442 1443 u64_stats_init(&tx_ring->syncp); 1444 1445 /* round up to nearest 4K */ 1446 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1447 /* add u32 for head writeback, align after this takes care of 1448 * guaranteeing this is at least one cache line in size 1449 */ 1450 tx_ring->size += sizeof(u32); 1451 tx_ring->size = ALIGN(tx_ring->size, 4096); 1452 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 1453 &tx_ring->dma, GFP_KERNEL); 1454 if (!tx_ring->desc) { 1455 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 1456 tx_ring->size); 1457 goto err; 1458 } 1459 1460 tx_ring->next_to_use = 0; 1461 tx_ring->next_to_clean = 0; 1462 tx_ring->tx_stats.prev_pkt_ctr = -1; 1463 return 0; 1464 1465 err: 1466 kfree(tx_ring->xsk_descs); 1467 tx_ring->xsk_descs = NULL; 1468 kfree(tx_ring->tx_bi); 1469 tx_ring->tx_bi = NULL; 1470 return -ENOMEM; 1471 } 1472 1473 int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) 1474 { 1475 unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; 1476 1477 rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); 1478 return rx_ring->rx_bi ? 0 : -ENOMEM; 1479 } 1480 1481 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) 1482 { 1483 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); 1484 } 1485 1486 /** 1487 * i40e_clean_rx_ring - Free Rx buffers 1488 * @rx_ring: ring to be cleaned 1489 **/ 1490 void i40e_clean_rx_ring(struct i40e_ring *rx_ring) 1491 { 1492 u16 i; 1493 1494 /* ring already cleared, nothing to do */ 1495 if (!rx_ring->rx_bi) 1496 return; 1497 1498 if (rx_ring->skb) { 1499 dev_kfree_skb(rx_ring->skb); 1500 rx_ring->skb = NULL; 1501 } 1502 1503 if (rx_ring->xsk_pool) { 1504 i40e_xsk_clean_rx_ring(rx_ring); 1505 goto skip_free; 1506 } 1507 1508 /* Free all the Rx ring sk_buffs */ 1509 for (i = 0; i < rx_ring->count; i++) { 1510 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i); 1511 1512 if (!rx_bi->page) 1513 continue; 1514 1515 /* Invalidate cache lines that may have been written to by 1516 * device so that we avoid corrupting memory. 1517 */ 1518 dma_sync_single_range_for_cpu(rx_ring->dev, 1519 rx_bi->dma, 1520 rx_bi->page_offset, 1521 rx_ring->rx_buf_len, 1522 DMA_FROM_DEVICE); 1523 1524 /* free resources associated with mapping */ 1525 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, 1526 i40e_rx_pg_size(rx_ring), 1527 DMA_FROM_DEVICE, 1528 I40E_RX_DMA_ATTR); 1529 1530 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); 1531 1532 rx_bi->page = NULL; 1533 rx_bi->page_offset = 0; 1534 } 1535 1536 skip_free: 1537 if (rx_ring->xsk_pool) 1538 i40e_clear_rx_bi_zc(rx_ring); 1539 else 1540 i40e_clear_rx_bi(rx_ring); 1541 1542 /* Zero out the descriptor ring */ 1543 memset(rx_ring->desc, 0, rx_ring->size); 1544 1545 rx_ring->next_to_alloc = 0; 1546 rx_ring->next_to_clean = 0; 1547 rx_ring->next_to_use = 0; 1548 } 1549 1550 /** 1551 * i40e_free_rx_resources - Free Rx resources 1552 * @rx_ring: ring to clean the resources from 1553 * 1554 * Free all receive software resources 1555 **/ 1556 void i40e_free_rx_resources(struct i40e_ring *rx_ring) 1557 { 1558 i40e_clean_rx_ring(rx_ring); 1559 if (rx_ring->vsi->type == I40E_VSI_MAIN) 1560 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 1561 rx_ring->xdp_prog = NULL; 1562 kfree(rx_ring->rx_bi); 1563 rx_ring->rx_bi = NULL; 1564 1565 if (rx_ring->desc) { 1566 dma_free_coherent(rx_ring->dev, rx_ring->size, 1567 rx_ring->desc, rx_ring->dma); 1568 rx_ring->desc = NULL; 1569 } 1570 } 1571 1572 /** 1573 * i40e_setup_rx_descriptors - Allocate Rx descriptors 1574 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 1575 * 1576 * Returns 0 on success, negative on failure 1577 **/ 1578 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) 1579 { 1580 struct device *dev = rx_ring->dev; 1581 int err; 1582 1583 u64_stats_init(&rx_ring->syncp); 1584 1585 /* Round up to nearest 4K */ 1586 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); 1587 rx_ring->size = ALIGN(rx_ring->size, 4096); 1588 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 1589 &rx_ring->dma, GFP_KERNEL); 1590 1591 if (!rx_ring->desc) { 1592 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 1593 rx_ring->size); 1594 return -ENOMEM; 1595 } 1596 1597 rx_ring->next_to_alloc = 0; 1598 rx_ring->next_to_clean = 0; 1599 rx_ring->next_to_use = 0; 1600 1601 /* XDP RX-queue info only needed for RX rings exposed to XDP */ 1602 if (rx_ring->vsi->type == I40E_VSI_MAIN) { 1603 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 1604 rx_ring->queue_index, rx_ring->q_vector->napi.napi_id); 1605 if (err < 0) 1606 return err; 1607 } 1608 1609 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; 1610 1611 return 0; 1612 } 1613 1614 /** 1615 * i40e_release_rx_desc - Store the new tail and head values 1616 * @rx_ring: ring to bump 1617 * @val: new head index 1618 **/ 1619 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 1620 { 1621 rx_ring->next_to_use = val; 1622 1623 /* update next to alloc since we have filled the ring */ 1624 rx_ring->next_to_alloc = val; 1625 1626 /* Force memory writes to complete before letting h/w 1627 * know there are new descriptors to fetch. (Only 1628 * applicable for weak-ordered memory model archs, 1629 * such as IA-64). 1630 */ 1631 wmb(); 1632 writel(val, rx_ring->tail); 1633 } 1634 1635 static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring, 1636 unsigned int size) 1637 { 1638 unsigned int truesize; 1639 1640 #if (PAGE_SIZE < 8192) 1641 truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 1642 #else 1643 truesize = rx_ring->rx_offset ? 1644 SKB_DATA_ALIGN(size + rx_ring->rx_offset) + 1645 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 1646 SKB_DATA_ALIGN(size); 1647 #endif 1648 return truesize; 1649 } 1650 1651 /** 1652 * i40e_alloc_mapped_page - recycle or make a new page 1653 * @rx_ring: ring to use 1654 * @bi: rx_buffer struct to modify 1655 * 1656 * Returns true if the page was successfully allocated or 1657 * reused. 1658 **/ 1659 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, 1660 struct i40e_rx_buffer *bi) 1661 { 1662 struct page *page = bi->page; 1663 dma_addr_t dma; 1664 1665 /* since we are recycling buffers we should seldom need to alloc */ 1666 if (likely(page)) { 1667 rx_ring->rx_stats.page_reuse_count++; 1668 return true; 1669 } 1670 1671 /* alloc new page for storage */ 1672 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); 1673 if (unlikely(!page)) { 1674 rx_ring->rx_stats.alloc_page_failed++; 1675 return false; 1676 } 1677 1678 /* map page for use */ 1679 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1680 i40e_rx_pg_size(rx_ring), 1681 DMA_FROM_DEVICE, 1682 I40E_RX_DMA_ATTR); 1683 1684 /* if mapping failed free memory back to system since 1685 * there isn't much point in holding memory we can't use 1686 */ 1687 if (dma_mapping_error(rx_ring->dev, dma)) { 1688 __free_pages(page, i40e_rx_pg_order(rx_ring)); 1689 rx_ring->rx_stats.alloc_page_failed++; 1690 return false; 1691 } 1692 1693 bi->dma = dma; 1694 bi->page = page; 1695 bi->page_offset = rx_ring->rx_offset; 1696 page_ref_add(page, USHRT_MAX - 1); 1697 bi->pagecnt_bias = USHRT_MAX; 1698 1699 return true; 1700 } 1701 1702 /** 1703 * i40e_alloc_rx_buffers - Replace used receive buffers 1704 * @rx_ring: ring to place buffers on 1705 * @cleaned_count: number of buffers to replace 1706 * 1707 * Returns false if all allocations were successful, true if any fail 1708 **/ 1709 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) 1710 { 1711 u16 ntu = rx_ring->next_to_use; 1712 union i40e_rx_desc *rx_desc; 1713 struct i40e_rx_buffer *bi; 1714 1715 /* do nothing if no valid netdev defined */ 1716 if (!rx_ring->netdev || !cleaned_count) 1717 return false; 1718 1719 rx_desc = I40E_RX_DESC(rx_ring, ntu); 1720 bi = i40e_rx_bi(rx_ring, ntu); 1721 1722 do { 1723 if (!i40e_alloc_mapped_page(rx_ring, bi)) 1724 goto no_buffers; 1725 1726 /* sync the buffer for use by the device */ 1727 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 1728 bi->page_offset, 1729 rx_ring->rx_buf_len, 1730 DMA_FROM_DEVICE); 1731 1732 /* Refresh the desc even if buffer_addrs didn't change 1733 * because each write-back erases this info. 1734 */ 1735 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1736 1737 rx_desc++; 1738 bi++; 1739 ntu++; 1740 if (unlikely(ntu == rx_ring->count)) { 1741 rx_desc = I40E_RX_DESC(rx_ring, 0); 1742 bi = i40e_rx_bi(rx_ring, 0); 1743 ntu = 0; 1744 } 1745 1746 /* clear the status bits for the next_to_use descriptor */ 1747 rx_desc->wb.qword1.status_error_len = 0; 1748 1749 cleaned_count--; 1750 } while (cleaned_count); 1751 1752 if (rx_ring->next_to_use != ntu) 1753 i40e_release_rx_desc(rx_ring, ntu); 1754 1755 return false; 1756 1757 no_buffers: 1758 if (rx_ring->next_to_use != ntu) 1759 i40e_release_rx_desc(rx_ring, ntu); 1760 1761 /* make sure to come back via polling to try again after 1762 * allocation failure 1763 */ 1764 return true; 1765 } 1766 1767 /** 1768 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum 1769 * @vsi: the VSI we care about 1770 * @skb: skb currently being received and modified 1771 * @rx_desc: the receive descriptor 1772 **/ 1773 static inline void i40e_rx_checksum(struct i40e_vsi *vsi, 1774 struct sk_buff *skb, 1775 union i40e_rx_desc *rx_desc) 1776 { 1777 struct i40e_rx_ptype_decoded decoded; 1778 u32 rx_error, rx_status; 1779 bool ipv4, ipv6; 1780 u8 ptype; 1781 u64 qword; 1782 1783 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1784 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; 1785 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> 1786 I40E_RXD_QW1_ERROR_SHIFT; 1787 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1788 I40E_RXD_QW1_STATUS_SHIFT; 1789 decoded = decode_rx_desc_ptype(ptype); 1790 1791 skb->ip_summed = CHECKSUM_NONE; 1792 1793 skb_checksum_none_assert(skb); 1794 1795 /* Rx csum enabled and ip headers found? */ 1796 if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 1797 return; 1798 1799 /* did the hardware decode the packet and checksum? */ 1800 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1801 return; 1802 1803 /* both known and outer_ip must be set for the below code to work */ 1804 if (!(decoded.known && decoded.outer_ip)) 1805 return; 1806 1807 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && 1808 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); 1809 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && 1810 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); 1811 1812 if (ipv4 && 1813 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | 1814 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) 1815 goto checksum_fail; 1816 1817 /* likely incorrect csum if alternate IP extension headers found */ 1818 if (ipv6 && 1819 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1820 /* don't increment checksum err here, non-fatal err */ 1821 return; 1822 1823 /* there was some L4 error, count error and punt packet to the stack */ 1824 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) 1825 goto checksum_fail; 1826 1827 /* handle packets that were not able to be checksummed due 1828 * to arrival speed, in this case the stack can compute 1829 * the csum. 1830 */ 1831 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 1832 return; 1833 1834 /* If there is an outer header present that might contain a checksum 1835 * we need to bump the checksum level by 1 to reflect the fact that 1836 * we are indicating we validated the inner checksum. 1837 */ 1838 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) 1839 skb->csum_level = 1; 1840 1841 /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 1842 switch (decoded.inner_prot) { 1843 case I40E_RX_PTYPE_INNER_PROT_TCP: 1844 case I40E_RX_PTYPE_INNER_PROT_UDP: 1845 case I40E_RX_PTYPE_INNER_PROT_SCTP: 1846 skb->ip_summed = CHECKSUM_UNNECESSARY; 1847 fallthrough; 1848 default: 1849 break; 1850 } 1851 1852 return; 1853 1854 checksum_fail: 1855 vsi->back->hw_csum_rx_error++; 1856 } 1857 1858 /** 1859 * i40e_ptype_to_htype - get a hash type 1860 * @ptype: the ptype value from the descriptor 1861 * 1862 * Returns a hash type to be used by skb_set_hash 1863 **/ 1864 static inline int i40e_ptype_to_htype(u8 ptype) 1865 { 1866 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); 1867 1868 if (!decoded.known) 1869 return PKT_HASH_TYPE_NONE; 1870 1871 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1872 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) 1873 return PKT_HASH_TYPE_L4; 1874 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1875 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) 1876 return PKT_HASH_TYPE_L3; 1877 else 1878 return PKT_HASH_TYPE_L2; 1879 } 1880 1881 /** 1882 * i40e_rx_hash - set the hash value in the skb 1883 * @ring: descriptor ring 1884 * @rx_desc: specific descriptor 1885 * @skb: skb currently being received and modified 1886 * @rx_ptype: Rx packet type 1887 **/ 1888 static inline void i40e_rx_hash(struct i40e_ring *ring, 1889 union i40e_rx_desc *rx_desc, 1890 struct sk_buff *skb, 1891 u8 rx_ptype) 1892 { 1893 u32 hash; 1894 const __le64 rss_mask = 1895 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << 1896 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); 1897 1898 if (!(ring->netdev->features & NETIF_F_RXHASH)) 1899 return; 1900 1901 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { 1902 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); 1903 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); 1904 } 1905 } 1906 1907 /** 1908 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor 1909 * @rx_ring: rx descriptor ring packet is being transacted on 1910 * @rx_desc: pointer to the EOP Rx descriptor 1911 * @skb: pointer to current skb being populated 1912 * 1913 * This function checks the ring, descriptor, and packet information in 1914 * order to populate the hash, checksum, VLAN, protocol, and 1915 * other fields within the skb. 1916 **/ 1917 void i40e_process_skb_fields(struct i40e_ring *rx_ring, 1918 union i40e_rx_desc *rx_desc, struct sk_buff *skb) 1919 { 1920 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1921 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1922 I40E_RXD_QW1_STATUS_SHIFT; 1923 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK; 1924 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> 1925 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; 1926 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> 1927 I40E_RXD_QW1_PTYPE_SHIFT; 1928 1929 if (unlikely(tsynvalid)) 1930 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); 1931 1932 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); 1933 1934 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); 1935 1936 skb_record_rx_queue(skb, rx_ring->queue_index); 1937 1938 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { 1939 __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; 1940 1941 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1942 le16_to_cpu(vlan_tag)); 1943 } 1944 1945 /* modifies the skb - consumes the enet header */ 1946 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1947 } 1948 1949 /** 1950 * i40e_cleanup_headers - Correct empty headers 1951 * @rx_ring: rx descriptor ring packet is being transacted on 1952 * @skb: pointer to current skb being fixed 1953 * @rx_desc: pointer to the EOP Rx descriptor 1954 * 1955 * In addition if skb is not at least 60 bytes we need to pad it so that 1956 * it is large enough to qualify as a valid Ethernet frame. 1957 * 1958 * Returns true if an error was encountered and skb was freed. 1959 **/ 1960 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, 1961 union i40e_rx_desc *rx_desc) 1962 1963 { 1964 /* ERR_MASK will only have valid bits if EOP set, and 1965 * what we are doing here is actually checking 1966 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in 1967 * the error field 1968 */ 1969 if (unlikely(i40e_test_staterr(rx_desc, 1970 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { 1971 dev_kfree_skb_any(skb); 1972 return true; 1973 } 1974 1975 /* if eth_skb_pad returns an error the skb was freed */ 1976 if (eth_skb_pad(skb)) 1977 return true; 1978 1979 return false; 1980 } 1981 1982 /** 1983 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx 1984 * @rx_buffer: buffer containing the page 1985 * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call 1986 * 1987 * If page is reusable, we have a green light for calling i40e_reuse_rx_page, 1988 * which will assign the current buffer to the buffer that next_to_alloc is 1989 * pointing to; otherwise, the DMA mapping needs to be destroyed and 1990 * page freed 1991 */ 1992 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, 1993 int rx_buffer_pgcnt) 1994 { 1995 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1996 struct page *page = rx_buffer->page; 1997 1998 /* Is any reuse possible? */ 1999 if (!dev_page_is_reusable(page)) 2000 return false; 2001 2002 #if (PAGE_SIZE < 8192) 2003 /* if we are only owner of page we can reuse it */ 2004 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 2005 return false; 2006 #else 2007 #define I40E_LAST_OFFSET \ 2008 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) 2009 if (rx_buffer->page_offset > I40E_LAST_OFFSET) 2010 return false; 2011 #endif 2012 2013 /* If we have drained the page fragment pool we need to update 2014 * the pagecnt_bias and page count so that we fully restock the 2015 * number of references the driver holds. 2016 */ 2017 if (unlikely(pagecnt_bias == 1)) { 2018 page_ref_add(page, USHRT_MAX - 1); 2019 rx_buffer->pagecnt_bias = USHRT_MAX; 2020 } 2021 2022 return true; 2023 } 2024 2025 /** 2026 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff 2027 * @rx_ring: rx descriptor ring to transact packets on 2028 * @rx_buffer: buffer containing page to add 2029 * @skb: sk_buff to place the data into 2030 * @size: packet length from rx_desc 2031 * 2032 * This function will add the data contained in rx_buffer->page to the skb. 2033 * It will just attach the page as a frag to the skb. 2034 * 2035 * The function will then update the page offset. 2036 **/ 2037 static void i40e_add_rx_frag(struct i40e_ring *rx_ring, 2038 struct i40e_rx_buffer *rx_buffer, 2039 struct sk_buff *skb, 2040 unsigned int size) 2041 { 2042 #if (PAGE_SIZE < 8192) 2043 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2044 #else 2045 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 2046 #endif 2047 2048 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 2049 rx_buffer->page_offset, size, truesize); 2050 2051 /* page is being used so we must update the page offset */ 2052 #if (PAGE_SIZE < 8192) 2053 rx_buffer->page_offset ^= truesize; 2054 #else 2055 rx_buffer->page_offset += truesize; 2056 #endif 2057 } 2058 2059 /** 2060 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use 2061 * @rx_ring: rx descriptor ring to transact packets on 2062 * @size: size of buffer to add to skb 2063 * @rx_buffer_pgcnt: buffer page refcount 2064 * 2065 * This function will pull an Rx buffer from the ring and synchronize it 2066 * for use by the CPU. 2067 */ 2068 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, 2069 const unsigned int size, 2070 int *rx_buffer_pgcnt) 2071 { 2072 struct i40e_rx_buffer *rx_buffer; 2073 2074 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 2075 *rx_buffer_pgcnt = 2076 #if (PAGE_SIZE < 8192) 2077 page_count(rx_buffer->page); 2078 #else 2079 0; 2080 #endif 2081 prefetch_page_address(rx_buffer->page); 2082 2083 /* we are reusing so sync this buffer for CPU use */ 2084 dma_sync_single_range_for_cpu(rx_ring->dev, 2085 rx_buffer->dma, 2086 rx_buffer->page_offset, 2087 size, 2088 DMA_FROM_DEVICE); 2089 2090 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 2091 rx_buffer->pagecnt_bias--; 2092 2093 return rx_buffer; 2094 } 2095 2096 /** 2097 * i40e_construct_skb - Allocate skb and populate it 2098 * @rx_ring: rx descriptor ring to transact packets on 2099 * @rx_buffer: rx buffer to pull data from 2100 * @xdp: xdp_buff pointing to the data 2101 * 2102 * This function allocates an skb. It then populates it with the page 2103 * data from the current receive descriptor, taking care to set up the 2104 * skb correctly. 2105 */ 2106 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, 2107 struct i40e_rx_buffer *rx_buffer, 2108 struct xdp_buff *xdp) 2109 { 2110 unsigned int size = xdp->data_end - xdp->data; 2111 #if (PAGE_SIZE < 8192) 2112 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2113 #else 2114 unsigned int truesize = SKB_DATA_ALIGN(size); 2115 #endif 2116 unsigned int headlen; 2117 struct sk_buff *skb; 2118 2119 /* prefetch first cache line of first page */ 2120 net_prefetch(xdp->data); 2121 2122 /* Note, we get here by enabling legacy-rx via: 2123 * 2124 * ethtool --set-priv-flags <dev> legacy-rx on 2125 * 2126 * In this mode, we currently get 0 extra XDP headroom as 2127 * opposed to having legacy-rx off, where we process XDP 2128 * packets going to stack via i40e_build_skb(). The latter 2129 * provides us currently with 192 bytes of headroom. 2130 * 2131 * For i40e_construct_skb() mode it means that the 2132 * xdp->data_meta will always point to xdp->data, since 2133 * the helper cannot expand the head. Should this ever 2134 * change in future for legacy-rx mode on, then lets also 2135 * add xdp->data_meta handling here. 2136 */ 2137 2138 /* allocate a skb to store the frags */ 2139 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 2140 I40E_RX_HDR_SIZE, 2141 GFP_ATOMIC | __GFP_NOWARN); 2142 if (unlikely(!skb)) 2143 return NULL; 2144 2145 /* Determine available headroom for copy */ 2146 headlen = size; 2147 if (headlen > I40E_RX_HDR_SIZE) 2148 headlen = eth_get_headlen(skb->dev, xdp->data, 2149 I40E_RX_HDR_SIZE); 2150 2151 /* align pull length to size of long to optimize memcpy performance */ 2152 memcpy(__skb_put(skb, headlen), xdp->data, 2153 ALIGN(headlen, sizeof(long))); 2154 2155 /* update all of the pointers */ 2156 size -= headlen; 2157 if (size) { 2158 skb_add_rx_frag(skb, 0, rx_buffer->page, 2159 rx_buffer->page_offset + headlen, 2160 size, truesize); 2161 2162 /* buffer is used by skb, update page_offset */ 2163 #if (PAGE_SIZE < 8192) 2164 rx_buffer->page_offset ^= truesize; 2165 #else 2166 rx_buffer->page_offset += truesize; 2167 #endif 2168 } else { 2169 /* buffer is unused, reset bias back to rx_buffer */ 2170 rx_buffer->pagecnt_bias++; 2171 } 2172 2173 return skb; 2174 } 2175 2176 /** 2177 * i40e_build_skb - Build skb around an existing buffer 2178 * @rx_ring: Rx descriptor ring to transact packets on 2179 * @rx_buffer: Rx buffer to pull data from 2180 * @xdp: xdp_buff pointing to the data 2181 * 2182 * This function builds an skb around an existing Rx buffer, taking care 2183 * to set up the skb correctly and avoid any memcpy overhead. 2184 */ 2185 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, 2186 struct i40e_rx_buffer *rx_buffer, 2187 struct xdp_buff *xdp) 2188 { 2189 unsigned int metasize = xdp->data - xdp->data_meta; 2190 #if (PAGE_SIZE < 8192) 2191 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2192 #else 2193 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 2194 SKB_DATA_ALIGN(xdp->data_end - 2195 xdp->data_hard_start); 2196 #endif 2197 struct sk_buff *skb; 2198 2199 /* Prefetch first cache line of first page. If xdp->data_meta 2200 * is unused, this points exactly as xdp->data, otherwise we 2201 * likely have a consumer accessing first few bytes of meta 2202 * data, and then actual data. 2203 */ 2204 net_prefetch(xdp->data_meta); 2205 2206 /* build an skb around the page buffer */ 2207 skb = build_skb(xdp->data_hard_start, truesize); 2208 if (unlikely(!skb)) 2209 return NULL; 2210 2211 /* update pointers within the skb to store the data */ 2212 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2213 __skb_put(skb, xdp->data_end - xdp->data); 2214 if (metasize) 2215 skb_metadata_set(skb, metasize); 2216 2217 /* buffer is used by skb, update page_offset */ 2218 #if (PAGE_SIZE < 8192) 2219 rx_buffer->page_offset ^= truesize; 2220 #else 2221 rx_buffer->page_offset += truesize; 2222 #endif 2223 2224 return skb; 2225 } 2226 2227 /** 2228 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free 2229 * @rx_ring: rx descriptor ring to transact packets on 2230 * @rx_buffer: rx buffer to pull data from 2231 * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call 2232 * 2233 * This function will clean up the contents of the rx_buffer. It will 2234 * either recycle the buffer or unmap it and free the associated resources. 2235 */ 2236 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, 2237 struct i40e_rx_buffer *rx_buffer, 2238 int rx_buffer_pgcnt) 2239 { 2240 if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 2241 /* hand second half of page back to the ring */ 2242 i40e_reuse_rx_page(rx_ring, rx_buffer); 2243 } else { 2244 /* we are not reusing the buffer so unmap it */ 2245 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 2246 i40e_rx_pg_size(rx_ring), 2247 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); 2248 __page_frag_cache_drain(rx_buffer->page, 2249 rx_buffer->pagecnt_bias); 2250 /* clear contents of buffer_info */ 2251 rx_buffer->page = NULL; 2252 } 2253 } 2254 2255 /** 2256 * i40e_is_non_eop - process handling of non-EOP buffers 2257 * @rx_ring: Rx ring being processed 2258 * @rx_desc: Rx descriptor for current buffer 2259 * 2260 * If the buffer is an EOP buffer, this function exits returning false, 2261 * otherwise return true indicating that this is in fact a non-EOP buffer. 2262 */ 2263 static bool i40e_is_non_eop(struct i40e_ring *rx_ring, 2264 union i40e_rx_desc *rx_desc) 2265 { 2266 /* if we are the last buffer then there is nothing else to do */ 2267 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) 2268 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) 2269 return false; 2270 2271 rx_ring->rx_stats.non_eop_descs++; 2272 2273 return true; 2274 } 2275 2276 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 2277 struct i40e_ring *xdp_ring); 2278 2279 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) 2280 { 2281 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2282 2283 if (unlikely(!xdpf)) 2284 return I40E_XDP_CONSUMED; 2285 2286 return i40e_xmit_xdp_ring(xdpf, xdp_ring); 2287 } 2288 2289 /** 2290 * i40e_run_xdp - run an XDP program 2291 * @rx_ring: Rx ring being processed 2292 * @xdp: XDP buffer containing the frame 2293 **/ 2294 static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) 2295 { 2296 int err, result = I40E_XDP_PASS; 2297 struct i40e_ring *xdp_ring; 2298 struct bpf_prog *xdp_prog; 2299 u32 act; 2300 2301 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 2302 2303 if (!xdp_prog) 2304 goto xdp_out; 2305 2306 prefetchw(xdp->data_hard_start); /* xdp_frame write */ 2307 2308 act = bpf_prog_run_xdp(xdp_prog, xdp); 2309 switch (act) { 2310 case XDP_PASS: 2311 break; 2312 case XDP_TX: 2313 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2314 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); 2315 if (result == I40E_XDP_CONSUMED) 2316 goto out_failure; 2317 break; 2318 case XDP_REDIRECT: 2319 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 2320 if (err) 2321 goto out_failure; 2322 result = I40E_XDP_REDIR; 2323 break; 2324 default: 2325 bpf_warn_invalid_xdp_action(act); 2326 fallthrough; 2327 case XDP_ABORTED: 2328 out_failure: 2329 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 2330 fallthrough; /* handle aborts by dropping packet */ 2331 case XDP_DROP: 2332 result = I40E_XDP_CONSUMED; 2333 break; 2334 } 2335 xdp_out: 2336 return result; 2337 } 2338 2339 /** 2340 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region 2341 * @rx_ring: Rx ring 2342 * @rx_buffer: Rx buffer to adjust 2343 * @size: Size of adjustment 2344 **/ 2345 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, 2346 struct i40e_rx_buffer *rx_buffer, 2347 unsigned int size) 2348 { 2349 unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size); 2350 2351 #if (PAGE_SIZE < 8192) 2352 rx_buffer->page_offset ^= truesize; 2353 #else 2354 rx_buffer->page_offset += truesize; 2355 #endif 2356 } 2357 2358 /** 2359 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register 2360 * @xdp_ring: XDP Tx ring 2361 * 2362 * This function updates the XDP Tx ring tail register. 2363 **/ 2364 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) 2365 { 2366 /* Force memory writes to complete before letting h/w 2367 * know there are new descriptors to fetch. 2368 */ 2369 wmb(); 2370 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); 2371 } 2372 2373 /** 2374 * i40e_update_rx_stats - Update Rx ring statistics 2375 * @rx_ring: rx descriptor ring 2376 * @total_rx_bytes: number of bytes received 2377 * @total_rx_packets: number of packets received 2378 * 2379 * This function updates the Rx ring statistics. 2380 **/ 2381 void i40e_update_rx_stats(struct i40e_ring *rx_ring, 2382 unsigned int total_rx_bytes, 2383 unsigned int total_rx_packets) 2384 { 2385 u64_stats_update_begin(&rx_ring->syncp); 2386 rx_ring->stats.packets += total_rx_packets; 2387 rx_ring->stats.bytes += total_rx_bytes; 2388 u64_stats_update_end(&rx_ring->syncp); 2389 rx_ring->q_vector->rx.total_packets += total_rx_packets; 2390 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 2391 } 2392 2393 /** 2394 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map 2395 * @rx_ring: Rx ring 2396 * @xdp_res: Result of the receive batch 2397 * 2398 * This function bumps XDP Tx tail and/or flush redirect map, and 2399 * should be called when a batch of packets has been processed in the 2400 * napi loop. 2401 **/ 2402 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) 2403 { 2404 if (xdp_res & I40E_XDP_REDIR) 2405 xdp_do_flush_map(); 2406 2407 if (xdp_res & I40E_XDP_TX) { 2408 struct i40e_ring *xdp_ring = 2409 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2410 2411 i40e_xdp_ring_update_tail(xdp_ring); 2412 } 2413 } 2414 2415 /** 2416 * i40e_inc_ntc: Advance the next_to_clean index 2417 * @rx_ring: Rx ring 2418 **/ 2419 static void i40e_inc_ntc(struct i40e_ring *rx_ring) 2420 { 2421 u32 ntc = rx_ring->next_to_clean + 1; 2422 2423 ntc = (ntc < rx_ring->count) ? ntc : 0; 2424 rx_ring->next_to_clean = ntc; 2425 prefetch(I40E_RX_DESC(rx_ring, ntc)); 2426 } 2427 2428 /** 2429 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 2430 * @rx_ring: rx descriptor ring to transact packets on 2431 * @budget: Total limit on number of packets to process 2432 * 2433 * This function provides a "bounce buffer" approach to Rx interrupt 2434 * processing. The advantage to this is that on systems that have 2435 * expensive overhead for IOMMU access this provides a means of avoiding 2436 * it by maintaining the mapping of the page to the system. 2437 * 2438 * Returns amount of work completed 2439 **/ 2440 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) 2441 { 2442 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; 2443 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 2444 unsigned int offset = rx_ring->rx_offset; 2445 struct sk_buff *skb = rx_ring->skb; 2446 unsigned int xdp_xmit = 0; 2447 bool failure = false; 2448 struct xdp_buff xdp; 2449 int xdp_res = 0; 2450 2451 #if (PAGE_SIZE < 8192) 2452 frame_sz = i40e_rx_frame_truesize(rx_ring, 0); 2453 #endif 2454 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 2455 2456 while (likely(total_rx_packets < (unsigned int)budget)) { 2457 struct i40e_rx_buffer *rx_buffer; 2458 union i40e_rx_desc *rx_desc; 2459 int rx_buffer_pgcnt; 2460 unsigned int size; 2461 u64 qword; 2462 2463 /* return some buffers to hardware, one at a time is too slow */ 2464 if (cleaned_count >= I40E_RX_BUFFER_WRITE) { 2465 failure = failure || 2466 i40e_alloc_rx_buffers(rx_ring, cleaned_count); 2467 cleaned_count = 0; 2468 } 2469 2470 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); 2471 2472 /* status_error_len will always be zero for unused descriptors 2473 * because it's cleared in cleanup, and overlaps with hdr_addr 2474 * which is always zero because packet split isn't used, if the 2475 * hardware wrote DD then the length will be non-zero 2476 */ 2477 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 2478 2479 /* This memory barrier is needed to keep us from reading 2480 * any other fields out of the rx_desc until we have 2481 * verified the descriptor has been written back. 2482 */ 2483 dma_rmb(); 2484 2485 if (i40e_rx_is_programming_status(qword)) { 2486 i40e_clean_programming_status(rx_ring, 2487 rx_desc->raw.qword[0], 2488 qword); 2489 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 2490 i40e_inc_ntc(rx_ring); 2491 i40e_reuse_rx_page(rx_ring, rx_buffer); 2492 cleaned_count++; 2493 continue; 2494 } 2495 2496 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 2497 I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 2498 if (!size) 2499 break; 2500 2501 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); 2502 rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 2503 2504 /* retrieve a buffer from the ring */ 2505 if (!skb) { 2506 unsigned char *hard_start; 2507 2508 hard_start = page_address(rx_buffer->page) + 2509 rx_buffer->page_offset - offset; 2510 xdp_prepare_buff(&xdp, hard_start, offset, size, true); 2511 #if (PAGE_SIZE > 4096) 2512 /* At larger PAGE_SIZE, frame_sz depend on len size */ 2513 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); 2514 #endif 2515 xdp_res = i40e_run_xdp(rx_ring, &xdp); 2516 } 2517 2518 if (xdp_res) { 2519 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { 2520 xdp_xmit |= xdp_res; 2521 i40e_rx_buffer_flip(rx_ring, rx_buffer, size); 2522 } else { 2523 rx_buffer->pagecnt_bias++; 2524 } 2525 total_rx_bytes += size; 2526 total_rx_packets++; 2527 } else if (skb) { 2528 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); 2529 } else if (ring_uses_build_skb(rx_ring)) { 2530 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp); 2531 } else { 2532 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp); 2533 } 2534 2535 /* exit if we failed to retrieve a buffer */ 2536 if (!xdp_res && !skb) { 2537 rx_ring->rx_stats.alloc_buff_failed++; 2538 rx_buffer->pagecnt_bias++; 2539 break; 2540 } 2541 2542 i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 2543 cleaned_count++; 2544 2545 i40e_inc_ntc(rx_ring); 2546 if (i40e_is_non_eop(rx_ring, rx_desc)) 2547 continue; 2548 2549 if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) { 2550 skb = NULL; 2551 continue; 2552 } 2553 2554 /* probably a little skewed due to removing CRC */ 2555 total_rx_bytes += skb->len; 2556 2557 /* populate checksum, VLAN, and protocol */ 2558 i40e_process_skb_fields(rx_ring, rx_desc, skb); 2559 2560 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); 2561 napi_gro_receive(&rx_ring->q_vector->napi, skb); 2562 skb = NULL; 2563 2564 /* update budget accounting */ 2565 total_rx_packets++; 2566 } 2567 2568 i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 2569 rx_ring->skb = skb; 2570 2571 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); 2572 2573 /* guarantee a trip back through this routine if there was a failure */ 2574 return failure ? budget : (int)total_rx_packets; 2575 } 2576 2577 static inline u32 i40e_buildreg_itr(const int type, u16 itr) 2578 { 2579 u32 val; 2580 2581 /* We don't bother with setting the CLEARPBA bit as the data sheet 2582 * points out doing so is "meaningless since it was already 2583 * auto-cleared". The auto-clearing happens when the interrupt is 2584 * asserted. 2585 * 2586 * Hardware errata 28 for also indicates that writing to a 2587 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear 2588 * an event in the PBA anyway so we need to rely on the automask 2589 * to hold pending events for us until the interrupt is re-enabled 2590 * 2591 * The itr value is reported in microseconds, and the register 2592 * value is recorded in 2 microsecond units. For this reason we 2593 * only need to shift by the interval shift - 1 instead of the 2594 * full value. 2595 */ 2596 itr &= I40E_ITR_MASK; 2597 2598 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2599 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | 2600 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); 2601 2602 return val; 2603 } 2604 2605 /* a small macro to shorten up some long lines */ 2606 #define INTREG I40E_PFINT_DYN_CTLN 2607 2608 /* The act of updating the ITR will cause it to immediately trigger. In order 2609 * to prevent this from throwing off adaptive update statistics we defer the 2610 * update so that it can only happen so often. So after either Tx or Rx are 2611 * updated we make the adaptive scheme wait until either the ITR completely 2612 * expires via the next_update expiration or we have been through at least 2613 * 3 interrupts. 2614 */ 2615 #define ITR_COUNTDOWN_START 3 2616 2617 /** 2618 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt 2619 * @vsi: the VSI we care about 2620 * @q_vector: q_vector for which itr is being updated and interrupt enabled 2621 * 2622 **/ 2623 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, 2624 struct i40e_q_vector *q_vector) 2625 { 2626 struct i40e_hw *hw = &vsi->back->hw; 2627 u32 intval; 2628 2629 /* If we don't have MSIX, then we only need to re-enable icr0 */ 2630 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { 2631 i40e_irq_dynamic_enable_icr0(vsi->back); 2632 return; 2633 } 2634 2635 /* These will do nothing if dynamic updates are not enabled */ 2636 i40e_update_itr(q_vector, &q_vector->tx); 2637 i40e_update_itr(q_vector, &q_vector->rx); 2638 2639 /* This block of logic allows us to get away with only updating 2640 * one ITR value with each interrupt. The idea is to perform a 2641 * pseudo-lazy update with the following criteria. 2642 * 2643 * 1. Rx is given higher priority than Tx if both are in same state 2644 * 2. If we must reduce an ITR that is given highest priority. 2645 * 3. We then give priority to increasing ITR based on amount. 2646 */ 2647 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { 2648 /* Rx ITR needs to be reduced, this is highest priority */ 2649 intval = i40e_buildreg_itr(I40E_RX_ITR, 2650 q_vector->rx.target_itr); 2651 q_vector->rx.current_itr = q_vector->rx.target_itr; 2652 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2653 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || 2654 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < 2655 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { 2656 /* Tx ITR needs to be reduced, this is second priority 2657 * Tx ITR needs to be increased more than Rx, fourth priority 2658 */ 2659 intval = i40e_buildreg_itr(I40E_TX_ITR, 2660 q_vector->tx.target_itr); 2661 q_vector->tx.current_itr = q_vector->tx.target_itr; 2662 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2663 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { 2664 /* Rx ITR needs to be increased, third priority */ 2665 intval = i40e_buildreg_itr(I40E_RX_ITR, 2666 q_vector->rx.target_itr); 2667 q_vector->rx.current_itr = q_vector->rx.target_itr; 2668 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2669 } else { 2670 /* No ITR update, lowest priority */ 2671 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); 2672 if (q_vector->itr_countdown) 2673 q_vector->itr_countdown--; 2674 } 2675 2676 if (!test_bit(__I40E_VSI_DOWN, vsi->state)) 2677 wr32(hw, INTREG(q_vector->reg_idx), intval); 2678 } 2679 2680 /** 2681 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine 2682 * @napi: napi struct with our devices info in it 2683 * @budget: amount of work driver is allowed to do this pass, in packets 2684 * 2685 * This function will clean all queues associated with a q_vector. 2686 * 2687 * Returns the amount of work done 2688 **/ 2689 int i40e_napi_poll(struct napi_struct *napi, int budget) 2690 { 2691 struct i40e_q_vector *q_vector = 2692 container_of(napi, struct i40e_q_vector, napi); 2693 struct i40e_vsi *vsi = q_vector->vsi; 2694 struct i40e_ring *ring; 2695 bool clean_complete = true; 2696 bool arm_wb = false; 2697 int budget_per_ring; 2698 int work_done = 0; 2699 2700 if (test_bit(__I40E_VSI_DOWN, vsi->state)) { 2701 napi_complete(napi); 2702 return 0; 2703 } 2704 2705 /* Since the actual Tx work is minimal, we can give the Tx a larger 2706 * budget and be more aggressive about cleaning up the Tx descriptors. 2707 */ 2708 i40e_for_each_ring(ring, q_vector->tx) { 2709 bool wd = ring->xsk_pool ? 2710 i40e_clean_xdp_tx_irq(vsi, ring) : 2711 i40e_clean_tx_irq(vsi, ring, budget); 2712 2713 if (!wd) { 2714 clean_complete = false; 2715 continue; 2716 } 2717 arm_wb |= ring->arm_wb; 2718 ring->arm_wb = false; 2719 } 2720 2721 /* Handle case where we are called by netpoll with a budget of 0 */ 2722 if (budget <= 0) 2723 goto tx_only; 2724 2725 /* normally we have 1 Rx ring per q_vector */ 2726 if (unlikely(q_vector->num_ringpairs > 1)) 2727 /* We attempt to distribute budget to each Rx queue fairly, but 2728 * don't allow the budget to go below 1 because that would exit 2729 * polling early. 2730 */ 2731 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1); 2732 else 2733 /* Max of 1 Rx ring in this q_vector so give it the budget */ 2734 budget_per_ring = budget; 2735 2736 i40e_for_each_ring(ring, q_vector->rx) { 2737 int cleaned = ring->xsk_pool ? 2738 i40e_clean_rx_irq_zc(ring, budget_per_ring) : 2739 i40e_clean_rx_irq(ring, budget_per_ring); 2740 2741 work_done += cleaned; 2742 /* if we clean as many as budgeted, we must not be done */ 2743 if (cleaned >= budget_per_ring) 2744 clean_complete = false; 2745 } 2746 2747 /* If work not completed, return budget and polling will return */ 2748 if (!clean_complete) { 2749 int cpu_id = smp_processor_id(); 2750 2751 /* It is possible that the interrupt affinity has changed but, 2752 * if the cpu is pegged at 100%, polling will never exit while 2753 * traffic continues and the interrupt will be stuck on this 2754 * cpu. We check to make sure affinity is correct before we 2755 * continue to poll, otherwise we must stop polling so the 2756 * interrupt can move to the correct cpu. 2757 */ 2758 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { 2759 /* Tell napi that we are done polling */ 2760 napi_complete_done(napi, work_done); 2761 2762 /* Force an interrupt */ 2763 i40e_force_wb(vsi, q_vector); 2764 2765 /* Return budget-1 so that polling stops */ 2766 return budget - 1; 2767 } 2768 tx_only: 2769 if (arm_wb) { 2770 q_vector->tx.ring[0].tx_stats.tx_force_wb++; 2771 i40e_enable_wb_on_itr(vsi, q_vector); 2772 } 2773 return budget; 2774 } 2775 2776 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) 2777 q_vector->arm_wb_state = false; 2778 2779 /* Exit the polling mode, but don't re-enable interrupts if stack might 2780 * poll us due to busy-polling 2781 */ 2782 if (likely(napi_complete_done(napi, work_done))) 2783 i40e_update_enable_itr(vsi, q_vector); 2784 2785 return min(work_done, budget - 1); 2786 } 2787 2788 /** 2789 * i40e_atr - Add a Flow Director ATR filter 2790 * @tx_ring: ring to add programming descriptor to 2791 * @skb: send buffer 2792 * @tx_flags: send tx flags 2793 **/ 2794 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, 2795 u32 tx_flags) 2796 { 2797 struct i40e_filter_program_desc *fdir_desc; 2798 struct i40e_pf *pf = tx_ring->vsi->back; 2799 union { 2800 unsigned char *network; 2801 struct iphdr *ipv4; 2802 struct ipv6hdr *ipv6; 2803 } hdr; 2804 struct tcphdr *th; 2805 unsigned int hlen; 2806 u32 flex_ptype, dtype_cmd; 2807 int l4_proto; 2808 u16 i; 2809 2810 /* make sure ATR is enabled */ 2811 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) 2812 return; 2813 2814 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) 2815 return; 2816 2817 /* if sampling is disabled do nothing */ 2818 if (!tx_ring->atr_sample_rate) 2819 return; 2820 2821 /* Currently only IPv4/IPv6 with TCP is supported */ 2822 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) 2823 return; 2824 2825 /* snag network header to get L4 type and address */ 2826 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ? 2827 skb_inner_network_header(skb) : skb_network_header(skb); 2828 2829 /* Note: tx_flags gets modified to reflect inner protocols in 2830 * tx_enable_csum function if encap is enabled. 2831 */ 2832 if (tx_flags & I40E_TX_FLAGS_IPV4) { 2833 /* access ihl as u8 to avoid unaligned access on ia64 */ 2834 hlen = (hdr.network[0] & 0x0F) << 2; 2835 l4_proto = hdr.ipv4->protocol; 2836 } else { 2837 /* find the start of the innermost ipv6 header */ 2838 unsigned int inner_hlen = hdr.network - skb->data; 2839 unsigned int h_offset = inner_hlen; 2840 2841 /* this function updates h_offset to the end of the header */ 2842 l4_proto = 2843 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); 2844 /* hlen will contain our best estimate of the tcp header */ 2845 hlen = h_offset - inner_hlen; 2846 } 2847 2848 if (l4_proto != IPPROTO_TCP) 2849 return; 2850 2851 th = (struct tcphdr *)(hdr.network + hlen); 2852 2853 /* Due to lack of space, no more new filters can be programmed */ 2854 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) 2855 return; 2856 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { 2857 /* HW ATR eviction will take care of removing filters on FIN 2858 * and RST packets. 2859 */ 2860 if (th->fin || th->rst) 2861 return; 2862 } 2863 2864 tx_ring->atr_count++; 2865 2866 /* sample on all syn/fin/rst packets or once every atr sample rate */ 2867 if (!th->fin && 2868 !th->syn && 2869 !th->rst && 2870 (tx_ring->atr_count < tx_ring->atr_sample_rate)) 2871 return; 2872 2873 tx_ring->atr_count = 0; 2874 2875 /* grab the next descriptor */ 2876 i = tx_ring->next_to_use; 2877 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); 2878 2879 i++; 2880 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2881 2882 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & 2883 I40E_TXD_FLTR_QW0_QINDEX_MASK; 2884 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? 2885 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << 2886 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : 2887 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << 2888 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); 2889 2890 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; 2891 2892 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; 2893 2894 dtype_cmd |= (th->fin || th->rst) ? 2895 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << 2896 I40E_TXD_FLTR_QW1_PCMD_SHIFT) : 2897 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << 2898 I40E_TXD_FLTR_QW1_PCMD_SHIFT); 2899 2900 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << 2901 I40E_TXD_FLTR_QW1_DEST_SHIFT; 2902 2903 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << 2904 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; 2905 2906 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 2907 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) 2908 dtype_cmd |= 2909 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << 2910 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2911 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2912 else 2913 dtype_cmd |= 2914 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << 2915 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2916 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2917 2918 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) 2919 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; 2920 2921 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 2922 fdir_desc->rsvd = cpu_to_le32(0); 2923 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 2924 fdir_desc->fd_id = cpu_to_le32(0); 2925 } 2926 2927 /** 2928 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 2929 * @skb: send buffer 2930 * @tx_ring: ring to send buffer on 2931 * @flags: the tx flags to be set 2932 * 2933 * Checks the skb and set up correspondingly several generic transmit flags 2934 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 2935 * 2936 * Returns error code indicate the frame should be dropped upon error and the 2937 * otherwise returns 0 to indicate the flags has been set properly. 2938 **/ 2939 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, 2940 struct i40e_ring *tx_ring, 2941 u32 *flags) 2942 { 2943 __be16 protocol = skb->protocol; 2944 u32 tx_flags = 0; 2945 2946 if (protocol == htons(ETH_P_8021Q) && 2947 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 2948 /* When HW VLAN acceleration is turned off by the user the 2949 * stack sets the protocol to 8021q so that the driver 2950 * can take any steps required to support the SW only 2951 * VLAN handling. In our case the driver doesn't need 2952 * to take any further steps so just set the protocol 2953 * to the encapsulated ethertype. 2954 */ 2955 skb->protocol = vlan_get_protocol(skb); 2956 goto out; 2957 } 2958 2959 /* if we have a HW VLAN tag being added, default to the HW one */ 2960 if (skb_vlan_tag_present(skb)) { 2961 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 2962 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 2963 /* else if it is a SW VLAN, check the next protocol and store the tag */ 2964 } else if (protocol == htons(ETH_P_8021Q)) { 2965 struct vlan_hdr *vhdr, _vhdr; 2966 2967 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 2968 if (!vhdr) 2969 return -EINVAL; 2970 2971 protocol = vhdr->h_vlan_encapsulated_proto; 2972 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; 2973 tx_flags |= I40E_TX_FLAGS_SW_VLAN; 2974 } 2975 2976 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2977 goto out; 2978 2979 /* Insert 802.1p priority into VLAN header */ 2980 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || 2981 (skb->priority != TC_PRIO_CONTROL)) { 2982 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK; 2983 tx_flags |= (skb->priority & 0x7) << 2984 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 2985 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { 2986 struct vlan_ethhdr *vhdr; 2987 int rc; 2988 2989 rc = skb_cow_head(skb, 0); 2990 if (rc < 0) 2991 return rc; 2992 vhdr = (struct vlan_ethhdr *)skb->data; 2993 vhdr->h_vlan_TCI = htons(tx_flags >> 2994 I40E_TX_FLAGS_VLAN_SHIFT); 2995 } else { 2996 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 2997 } 2998 } 2999 3000 out: 3001 *flags = tx_flags; 3002 return 0; 3003 } 3004 3005 /** 3006 * i40e_tso - set up the tso context descriptor 3007 * @first: pointer to first Tx buffer for xmit 3008 * @hdr_len: ptr to the size of the packet header 3009 * @cd_type_cmd_tso_mss: Quad Word 1 3010 * 3011 * Returns 0 if no TSO can happen, 1 if tso is going, or error 3012 **/ 3013 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, 3014 u64 *cd_type_cmd_tso_mss) 3015 { 3016 struct sk_buff *skb = first->skb; 3017 u64 cd_cmd, cd_tso_len, cd_mss; 3018 union { 3019 struct iphdr *v4; 3020 struct ipv6hdr *v6; 3021 unsigned char *hdr; 3022 } ip; 3023 union { 3024 struct tcphdr *tcp; 3025 struct udphdr *udp; 3026 unsigned char *hdr; 3027 } l4; 3028 u32 paylen, l4_offset; 3029 u16 gso_segs, gso_size; 3030 int err; 3031 3032 if (skb->ip_summed != CHECKSUM_PARTIAL) 3033 return 0; 3034 3035 if (!skb_is_gso(skb)) 3036 return 0; 3037 3038 err = skb_cow_head(skb, 0); 3039 if (err < 0) 3040 return err; 3041 3042 ip.hdr = skb_network_header(skb); 3043 l4.hdr = skb_transport_header(skb); 3044 3045 /* initialize outer IP header fields */ 3046 if (ip.v4->version == 4) { 3047 ip.v4->tot_len = 0; 3048 ip.v4->check = 0; 3049 } else { 3050 ip.v6->payload_len = 0; 3051 } 3052 3053 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 3054 SKB_GSO_GRE_CSUM | 3055 SKB_GSO_IPXIP4 | 3056 SKB_GSO_IPXIP6 | 3057 SKB_GSO_UDP_TUNNEL | 3058 SKB_GSO_UDP_TUNNEL_CSUM)) { 3059 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 3060 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 3061 l4.udp->len = 0; 3062 3063 /* determine offset of outer transport header */ 3064 l4_offset = l4.hdr - skb->data; 3065 3066 /* remove payload length from outer checksum */ 3067 paylen = skb->len - l4_offset; 3068 csum_replace_by_diff(&l4.udp->check, 3069 (__force __wsum)htonl(paylen)); 3070 } 3071 3072 /* reset pointers to inner headers */ 3073 ip.hdr = skb_inner_network_header(skb); 3074 l4.hdr = skb_inner_transport_header(skb); 3075 3076 /* initialize inner IP header fields */ 3077 if (ip.v4->version == 4) { 3078 ip.v4->tot_len = 0; 3079 ip.v4->check = 0; 3080 } else { 3081 ip.v6->payload_len = 0; 3082 } 3083 } 3084 3085 /* determine offset of inner transport header */ 3086 l4_offset = l4.hdr - skb->data; 3087 3088 /* remove payload length from inner checksum */ 3089 paylen = skb->len - l4_offset; 3090 3091 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 3092 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); 3093 /* compute length of segmentation header */ 3094 *hdr_len = sizeof(*l4.udp) + l4_offset; 3095 } else { 3096 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 3097 /* compute length of segmentation header */ 3098 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 3099 } 3100 3101 /* pull values out of skb_shinfo */ 3102 gso_size = skb_shinfo(skb)->gso_size; 3103 gso_segs = skb_shinfo(skb)->gso_segs; 3104 3105 /* update GSO size and bytecount with header size */ 3106 first->gso_segs = gso_segs; 3107 first->bytecount += (first->gso_segs - 1) * *hdr_len; 3108 3109 /* find the field values */ 3110 cd_cmd = I40E_TX_CTX_DESC_TSO; 3111 cd_tso_len = skb->len - *hdr_len; 3112 cd_mss = gso_size; 3113 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | 3114 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | 3115 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); 3116 return 1; 3117 } 3118 3119 /** 3120 * i40e_tsyn - set up the tsyn context descriptor 3121 * @tx_ring: ptr to the ring to send 3122 * @skb: ptr to the skb we're sending 3123 * @tx_flags: the collected send information 3124 * @cd_type_cmd_tso_mss: Quad Word 1 3125 * 3126 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen 3127 **/ 3128 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, 3129 u32 tx_flags, u64 *cd_type_cmd_tso_mss) 3130 { 3131 struct i40e_pf *pf; 3132 3133 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 3134 return 0; 3135 3136 /* Tx timestamps cannot be sampled when doing TSO */ 3137 if (tx_flags & I40E_TX_FLAGS_TSO) 3138 return 0; 3139 3140 /* only timestamp the outbound packet if the user has requested it and 3141 * we are not already transmitting a packet to be timestamped 3142 */ 3143 pf = i40e_netdev_to_pf(tx_ring->netdev); 3144 if (!(pf->flags & I40E_FLAG_PTP)) 3145 return 0; 3146 3147 if (pf->ptp_tx && 3148 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { 3149 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3150 pf->ptp_tx_start = jiffies; 3151 pf->ptp_tx_skb = skb_get(skb); 3152 } else { 3153 pf->tx_hwtstamp_skipped++; 3154 return 0; 3155 } 3156 3157 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << 3158 I40E_TXD_CTX_QW1_CMD_SHIFT; 3159 3160 return 1; 3161 } 3162 3163 /** 3164 * i40e_tx_enable_csum - Enable Tx checksum offloads 3165 * @skb: send buffer 3166 * @tx_flags: pointer to Tx flags currently set 3167 * @td_cmd: Tx descriptor command bits to set 3168 * @td_offset: Tx descriptor header offsets to set 3169 * @tx_ring: Tx descriptor ring 3170 * @cd_tunneling: ptr to context desc bits 3171 **/ 3172 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, 3173 u32 *td_cmd, u32 *td_offset, 3174 struct i40e_ring *tx_ring, 3175 u32 *cd_tunneling) 3176 { 3177 union { 3178 struct iphdr *v4; 3179 struct ipv6hdr *v6; 3180 unsigned char *hdr; 3181 } ip; 3182 union { 3183 struct tcphdr *tcp; 3184 struct udphdr *udp; 3185 unsigned char *hdr; 3186 } l4; 3187 unsigned char *exthdr; 3188 u32 offset, cmd = 0; 3189 __be16 frag_off; 3190 u8 l4_proto = 0; 3191 3192 if (skb->ip_summed != CHECKSUM_PARTIAL) 3193 return 0; 3194 3195 ip.hdr = skb_network_header(skb); 3196 l4.hdr = skb_transport_header(skb); 3197 3198 /* compute outer L2 header size */ 3199 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; 3200 3201 if (skb->encapsulation) { 3202 u32 tunnel = 0; 3203 /* define outer network header type */ 3204 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 3205 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 3206 I40E_TX_CTX_EXT_IP_IPV4 : 3207 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 3208 3209 l4_proto = ip.v4->protocol; 3210 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 3211 int ret; 3212 3213 tunnel |= I40E_TX_CTX_EXT_IP_IPV6; 3214 3215 exthdr = ip.hdr + sizeof(*ip.v6); 3216 l4_proto = ip.v6->nexthdr; 3217 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 3218 &l4_proto, &frag_off); 3219 if (ret < 0) 3220 return -1; 3221 } 3222 3223 /* define outer transport */ 3224 switch (l4_proto) { 3225 case IPPROTO_UDP: 3226 tunnel |= I40E_TXD_CTX_UDP_TUNNELING; 3227 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3228 break; 3229 case IPPROTO_GRE: 3230 tunnel |= I40E_TXD_CTX_GRE_TUNNELING; 3231 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3232 break; 3233 case IPPROTO_IPIP: 3234 case IPPROTO_IPV6: 3235 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3236 l4.hdr = skb_inner_network_header(skb); 3237 break; 3238 default: 3239 if (*tx_flags & I40E_TX_FLAGS_TSO) 3240 return -1; 3241 3242 skb_checksum_help(skb); 3243 return 0; 3244 } 3245 3246 /* compute outer L3 header size */ 3247 tunnel |= ((l4.hdr - ip.hdr) / 4) << 3248 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; 3249 3250 /* switch IP header pointer from outer to inner header */ 3251 ip.hdr = skb_inner_network_header(skb); 3252 3253 /* compute tunnel header size */ 3254 tunnel |= ((ip.hdr - l4.hdr) / 2) << 3255 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 3256 3257 /* indicate if we need to offload outer UDP header */ 3258 if ((*tx_flags & I40E_TX_FLAGS_TSO) && 3259 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 3260 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 3261 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; 3262 3263 /* record tunnel offload values */ 3264 *cd_tunneling |= tunnel; 3265 3266 /* switch L4 header pointer from outer to inner */ 3267 l4.hdr = skb_inner_transport_header(skb); 3268 l4_proto = 0; 3269 3270 /* reset type as we transition from outer to inner headers */ 3271 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); 3272 if (ip.v4->version == 4) 3273 *tx_flags |= I40E_TX_FLAGS_IPV4; 3274 if (ip.v6->version == 6) 3275 *tx_flags |= I40E_TX_FLAGS_IPV6; 3276 } 3277 3278 /* Enable IP checksum offloads */ 3279 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 3280 l4_proto = ip.v4->protocol; 3281 /* the stack computes the IP header already, the only time we 3282 * need the hardware to recompute it is in the case of TSO. 3283 */ 3284 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 3285 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : 3286 I40E_TX_DESC_CMD_IIPT_IPV4; 3287 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 3288 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; 3289 3290 exthdr = ip.hdr + sizeof(*ip.v6); 3291 l4_proto = ip.v6->nexthdr; 3292 if (l4.hdr != exthdr) 3293 ipv6_skip_exthdr(skb, exthdr - skb->data, 3294 &l4_proto, &frag_off); 3295 } 3296 3297 /* compute inner L3 header size */ 3298 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; 3299 3300 /* Enable L4 checksum offloads */ 3301 switch (l4_proto) { 3302 case IPPROTO_TCP: 3303 /* enable checksum offloads */ 3304 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; 3305 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3306 break; 3307 case IPPROTO_SCTP: 3308 /* enable SCTP checksum offload */ 3309 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; 3310 offset |= (sizeof(struct sctphdr) >> 2) << 3311 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3312 break; 3313 case IPPROTO_UDP: 3314 /* enable UDP checksum offload */ 3315 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; 3316 offset |= (sizeof(struct udphdr) >> 2) << 3317 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3318 break; 3319 default: 3320 if (*tx_flags & I40E_TX_FLAGS_TSO) 3321 return -1; 3322 skb_checksum_help(skb); 3323 return 0; 3324 } 3325 3326 *td_cmd |= cmd; 3327 *td_offset |= offset; 3328 3329 return 1; 3330 } 3331 3332 /** 3333 * i40e_create_tx_ctx - Build the Tx context descriptor 3334 * @tx_ring: ring to create the descriptor on 3335 * @cd_type_cmd_tso_mss: Quad Word 1 3336 * @cd_tunneling: Quad Word 0 - bits 0-31 3337 * @cd_l2tag2: Quad Word 0 - bits 32-63 3338 **/ 3339 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, 3340 const u64 cd_type_cmd_tso_mss, 3341 const u32 cd_tunneling, const u32 cd_l2tag2) 3342 { 3343 struct i40e_tx_context_desc *context_desc; 3344 int i = tx_ring->next_to_use; 3345 3346 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && 3347 !cd_tunneling && !cd_l2tag2) 3348 return; 3349 3350 /* grab the next descriptor */ 3351 context_desc = I40E_TX_CTXTDESC(tx_ring, i); 3352 3353 i++; 3354 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 3355 3356 /* cpu_to_le32 and assign to struct fields */ 3357 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); 3358 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); 3359 context_desc->rsvd = cpu_to_le16(0); 3360 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 3361 } 3362 3363 /** 3364 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions 3365 * @tx_ring: the ring to be checked 3366 * @size: the size buffer we want to assure is available 3367 * 3368 * Returns -EBUSY if a stop is needed, else 0 3369 **/ 3370 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 3371 { 3372 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3373 /* Memory barrier before checking head and tail */ 3374 smp_mb(); 3375 3376 /* Check again in a case another CPU has just made room available. */ 3377 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) 3378 return -EBUSY; 3379 3380 /* A reprieve! - use start_queue because it doesn't call schedule */ 3381 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3382 ++tx_ring->tx_stats.restart_queue; 3383 return 0; 3384 } 3385 3386 /** 3387 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet 3388 * @skb: send buffer 3389 * 3390 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 3391 * and so we need to figure out the cases where we need to linearize the skb. 3392 * 3393 * For TSO we need to count the TSO header and segment payload separately. 3394 * As such we need to check cases where we have 7 fragments or more as we 3395 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 3396 * the segment payload in the first descriptor, and another 7 for the 3397 * fragments. 3398 **/ 3399 bool __i40e_chk_linearize(struct sk_buff *skb) 3400 { 3401 const skb_frag_t *frag, *stale; 3402 int nr_frags, sum; 3403 3404 /* no need to check if number of frags is less than 7 */ 3405 nr_frags = skb_shinfo(skb)->nr_frags; 3406 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) 3407 return false; 3408 3409 /* We need to walk through the list and validate that each group 3410 * of 6 fragments totals at least gso_size. 3411 */ 3412 nr_frags -= I40E_MAX_BUFFER_TXD - 2; 3413 frag = &skb_shinfo(skb)->frags[0]; 3414 3415 /* Initialize size to the negative value of gso_size minus 1. We 3416 * use this as the worst case scenerio in which the frag ahead 3417 * of us only provides one byte which is why we are limited to 6 3418 * descriptors for a single transmit as the header and previous 3419 * fragment are already consuming 2 descriptors. 3420 */ 3421 sum = 1 - skb_shinfo(skb)->gso_size; 3422 3423 /* Add size of frags 0 through 4 to create our initial sum */ 3424 sum += skb_frag_size(frag++); 3425 sum += skb_frag_size(frag++); 3426 sum += skb_frag_size(frag++); 3427 sum += skb_frag_size(frag++); 3428 sum += skb_frag_size(frag++); 3429 3430 /* Walk through fragments adding latest fragment, testing it, and 3431 * then removing stale fragments from the sum. 3432 */ 3433 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 3434 int stale_size = skb_frag_size(stale); 3435 3436 sum += skb_frag_size(frag++); 3437 3438 /* The stale fragment may present us with a smaller 3439 * descriptor than the actual fragment size. To account 3440 * for that we need to remove all the data on the front and 3441 * figure out what the remainder would be in the last 3442 * descriptor associated with the fragment. 3443 */ 3444 if (stale_size > I40E_MAX_DATA_PER_TXD) { 3445 int align_pad = -(skb_frag_off(stale)) & 3446 (I40E_MAX_READ_REQ_SIZE - 1); 3447 3448 sum -= align_pad; 3449 stale_size -= align_pad; 3450 3451 do { 3452 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; 3453 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; 3454 } while (stale_size > I40E_MAX_DATA_PER_TXD); 3455 } 3456 3457 /* if sum is negative we failed to make sufficient progress */ 3458 if (sum < 0) 3459 return true; 3460 3461 if (!nr_frags--) 3462 break; 3463 3464 sum -= stale_size; 3465 } 3466 3467 return false; 3468 } 3469 3470 /** 3471 * i40e_tx_map - Build the Tx descriptor 3472 * @tx_ring: ring to send buffer on 3473 * @skb: send buffer 3474 * @first: first buffer info buffer to use 3475 * @tx_flags: collected send information 3476 * @hdr_len: size of the packet header 3477 * @td_cmd: the command field in the descriptor 3478 * @td_offset: offset for checksum or crc 3479 * 3480 * Returns 0 on success, -1 on failure to DMA 3481 **/ 3482 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, 3483 struct i40e_tx_buffer *first, u32 tx_flags, 3484 const u8 hdr_len, u32 td_cmd, u32 td_offset) 3485 { 3486 unsigned int data_len = skb->data_len; 3487 unsigned int size = skb_headlen(skb); 3488 skb_frag_t *frag; 3489 struct i40e_tx_buffer *tx_bi; 3490 struct i40e_tx_desc *tx_desc; 3491 u16 i = tx_ring->next_to_use; 3492 u32 td_tag = 0; 3493 dma_addr_t dma; 3494 u16 desc_count = 1; 3495 3496 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { 3497 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; 3498 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> 3499 I40E_TX_FLAGS_VLAN_SHIFT; 3500 } 3501 3502 first->tx_flags = tx_flags; 3503 3504 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3505 3506 tx_desc = I40E_TX_DESC(tx_ring, i); 3507 tx_bi = first; 3508 3509 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 3510 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; 3511 3512 if (dma_mapping_error(tx_ring->dev, dma)) 3513 goto dma_error; 3514 3515 /* record length, and DMA address */ 3516 dma_unmap_len_set(tx_bi, len, size); 3517 dma_unmap_addr_set(tx_bi, dma, dma); 3518 3519 /* align size to end of page */ 3520 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); 3521 tx_desc->buffer_addr = cpu_to_le64(dma); 3522 3523 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { 3524 tx_desc->cmd_type_offset_bsz = 3525 build_ctob(td_cmd, td_offset, 3526 max_data, td_tag); 3527 3528 tx_desc++; 3529 i++; 3530 desc_count++; 3531 3532 if (i == tx_ring->count) { 3533 tx_desc = I40E_TX_DESC(tx_ring, 0); 3534 i = 0; 3535 } 3536 3537 dma += max_data; 3538 size -= max_data; 3539 3540 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; 3541 tx_desc->buffer_addr = cpu_to_le64(dma); 3542 } 3543 3544 if (likely(!data_len)) 3545 break; 3546 3547 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 3548 size, td_tag); 3549 3550 tx_desc++; 3551 i++; 3552 desc_count++; 3553 3554 if (i == tx_ring->count) { 3555 tx_desc = I40E_TX_DESC(tx_ring, 0); 3556 i = 0; 3557 } 3558 3559 size = skb_frag_size(frag); 3560 data_len -= size; 3561 3562 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3563 DMA_TO_DEVICE); 3564 3565 tx_bi = &tx_ring->tx_bi[i]; 3566 } 3567 3568 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 3569 3570 i++; 3571 if (i == tx_ring->count) 3572 i = 0; 3573 3574 tx_ring->next_to_use = i; 3575 3576 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); 3577 3578 /* write last descriptor with EOP bit */ 3579 td_cmd |= I40E_TX_DESC_CMD_EOP; 3580 3581 /* We OR these values together to check both against 4 (WB_STRIDE) 3582 * below. This is safe since we don't re-use desc_count afterwards. 3583 */ 3584 desc_count |= ++tx_ring->packet_stride; 3585 3586 if (desc_count >= WB_STRIDE) { 3587 /* write last descriptor with RS bit set */ 3588 td_cmd |= I40E_TX_DESC_CMD_RS; 3589 tx_ring->packet_stride = 0; 3590 } 3591 3592 tx_desc->cmd_type_offset_bsz = 3593 build_ctob(td_cmd, td_offset, size, td_tag); 3594 3595 skb_tx_timestamp(skb); 3596 3597 /* Force memory writes to complete before letting h/w know there 3598 * are new descriptors to fetch. 3599 * 3600 * We also use this memory barrier to make certain all of the 3601 * status bits have been updated before next_to_watch is written. 3602 */ 3603 wmb(); 3604 3605 /* set next_to_watch value indicating a packet is present */ 3606 first->next_to_watch = tx_desc; 3607 3608 /* notify HW of packet */ 3609 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 3610 writel(i, tx_ring->tail); 3611 } 3612 3613 return 0; 3614 3615 dma_error: 3616 dev_info(tx_ring->dev, "TX DMA map failed\n"); 3617 3618 /* clear dma mappings for failed tx_bi map */ 3619 for (;;) { 3620 tx_bi = &tx_ring->tx_bi[i]; 3621 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); 3622 if (tx_bi == first) 3623 break; 3624 if (i == 0) 3625 i = tx_ring->count; 3626 i--; 3627 } 3628 3629 tx_ring->next_to_use = i; 3630 3631 return -1; 3632 } 3633 3634 static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev, 3635 const struct sk_buff *skb, 3636 u16 num_tx_queues) 3637 { 3638 u32 jhash_initval_salt = 0xd631614b; 3639 u32 hash; 3640 3641 if (skb->sk && skb->sk->sk_hash) 3642 hash = skb->sk->sk_hash; 3643 else 3644 hash = (__force u16)skb->protocol ^ skb->hash; 3645 3646 hash = jhash_1word(hash, jhash_initval_salt); 3647 3648 return (u16)(((u64)hash * num_tx_queues) >> 32); 3649 } 3650 3651 u16 i40e_lan_select_queue(struct net_device *netdev, 3652 struct sk_buff *skb, 3653 struct net_device __always_unused *sb_dev) 3654 { 3655 struct i40e_netdev_priv *np = netdev_priv(netdev); 3656 struct i40e_vsi *vsi = np->vsi; 3657 struct i40e_hw *hw; 3658 u16 qoffset; 3659 u16 qcount; 3660 u8 tclass; 3661 u16 hash; 3662 u8 prio; 3663 3664 /* is DCB enabled at all? */ 3665 if (vsi->tc_config.numtc == 1) 3666 return i40e_swdcb_skb_tx_hash(netdev, skb, 3667 netdev->real_num_tx_queues); 3668 3669 prio = skb->priority; 3670 hw = &vsi->back->hw; 3671 tclass = hw->local_dcbx_config.etscfg.prioritytable[prio]; 3672 /* sanity check */ 3673 if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass)))) 3674 tclass = 0; 3675 3676 /* select a queue assigned for the given TC */ 3677 qcount = vsi->tc_config.tc_info[tclass].qcount; 3678 hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount); 3679 3680 qoffset = vsi->tc_config.tc_info[tclass].qoffset; 3681 return qoffset + hash; 3682 } 3683 3684 /** 3685 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring 3686 * @xdpf: data to transmit 3687 * @xdp_ring: XDP Tx ring 3688 **/ 3689 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 3690 struct i40e_ring *xdp_ring) 3691 { 3692 u16 i = xdp_ring->next_to_use; 3693 struct i40e_tx_buffer *tx_bi; 3694 struct i40e_tx_desc *tx_desc; 3695 void *data = xdpf->data; 3696 u32 size = xdpf->len; 3697 dma_addr_t dma; 3698 3699 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { 3700 xdp_ring->tx_stats.tx_busy++; 3701 return I40E_XDP_CONSUMED; 3702 } 3703 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); 3704 if (dma_mapping_error(xdp_ring->dev, dma)) 3705 return I40E_XDP_CONSUMED; 3706 3707 tx_bi = &xdp_ring->tx_bi[i]; 3708 tx_bi->bytecount = size; 3709 tx_bi->gso_segs = 1; 3710 tx_bi->xdpf = xdpf; 3711 3712 /* record length, and DMA address */ 3713 dma_unmap_len_set(tx_bi, len, size); 3714 dma_unmap_addr_set(tx_bi, dma, dma); 3715 3716 tx_desc = I40E_TX_DESC(xdp_ring, i); 3717 tx_desc->buffer_addr = cpu_to_le64(dma); 3718 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC 3719 | I40E_TXD_CMD, 3720 0, size, 0); 3721 3722 /* Make certain all of the status bits have been updated 3723 * before next_to_watch is written. 3724 */ 3725 smp_wmb(); 3726 3727 xdp_ring->xdp_tx_active++; 3728 i++; 3729 if (i == xdp_ring->count) 3730 i = 0; 3731 3732 tx_bi->next_to_watch = tx_desc; 3733 xdp_ring->next_to_use = i; 3734 3735 return I40E_XDP_TX; 3736 } 3737 3738 /** 3739 * i40e_xmit_frame_ring - Sends buffer on Tx ring 3740 * @skb: send buffer 3741 * @tx_ring: ring to send buffer on 3742 * 3743 * Returns NETDEV_TX_OK if sent, else an error code 3744 **/ 3745 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, 3746 struct i40e_ring *tx_ring) 3747 { 3748 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; 3749 u32 cd_tunneling = 0, cd_l2tag2 = 0; 3750 struct i40e_tx_buffer *first; 3751 u32 td_offset = 0; 3752 u32 tx_flags = 0; 3753 __be16 protocol; 3754 u32 td_cmd = 0; 3755 u8 hdr_len = 0; 3756 int tso, count; 3757 int tsyn; 3758 3759 /* prefetch the data, we'll need it later */ 3760 prefetch(skb->data); 3761 3762 i40e_trace(xmit_frame_ring, skb, tx_ring); 3763 3764 count = i40e_xmit_descriptor_count(skb); 3765 if (i40e_chk_linearize(skb, count)) { 3766 if (__skb_linearize(skb)) { 3767 dev_kfree_skb_any(skb); 3768 return NETDEV_TX_OK; 3769 } 3770 count = i40e_txd_use_count(skb->len); 3771 tx_ring->tx_stats.tx_linearize++; 3772 } 3773 3774 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 3775 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, 3776 * + 4 desc gap to avoid the cache line where head is, 3777 * + 1 desc for context descriptor, 3778 * otherwise try next time 3779 */ 3780 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 3781 tx_ring->tx_stats.tx_busy++; 3782 return NETDEV_TX_BUSY; 3783 } 3784 3785 /* record the location of the first descriptor for this packet */ 3786 first = &tx_ring->tx_bi[tx_ring->next_to_use]; 3787 first->skb = skb; 3788 first->bytecount = skb->len; 3789 first->gso_segs = 1; 3790 3791 /* prepare the xmit flags */ 3792 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) 3793 goto out_drop; 3794 3795 /* obtain protocol of skb */ 3796 protocol = vlan_get_protocol(skb); 3797 3798 /* setup IPv4/IPv6 offloads */ 3799 if (protocol == htons(ETH_P_IP)) 3800 tx_flags |= I40E_TX_FLAGS_IPV4; 3801 else if (protocol == htons(ETH_P_IPV6)) 3802 tx_flags |= I40E_TX_FLAGS_IPV6; 3803 3804 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); 3805 3806 if (tso < 0) 3807 goto out_drop; 3808 else if (tso) 3809 tx_flags |= I40E_TX_FLAGS_TSO; 3810 3811 /* Always offload the checksum, since it's in the data descriptor */ 3812 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, 3813 tx_ring, &cd_tunneling); 3814 if (tso < 0) 3815 goto out_drop; 3816 3817 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); 3818 3819 if (tsyn) 3820 tx_flags |= I40E_TX_FLAGS_TSYN; 3821 3822 /* always enable CRC insertion offload */ 3823 td_cmd |= I40E_TX_DESC_CMD_ICRC; 3824 3825 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 3826 cd_tunneling, cd_l2tag2); 3827 3828 /* Add Flow Director ATR if it's enabled. 3829 * 3830 * NOTE: this must always be directly before the data descriptor. 3831 */ 3832 i40e_atr(tx_ring, skb, tx_flags); 3833 3834 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 3835 td_cmd, td_offset)) 3836 goto cleanup_tx_tstamp; 3837 3838 return NETDEV_TX_OK; 3839 3840 out_drop: 3841 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); 3842 dev_kfree_skb_any(first->skb); 3843 first->skb = NULL; 3844 cleanup_tx_tstamp: 3845 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) { 3846 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); 3847 3848 dev_kfree_skb_any(pf->ptp_tx_skb); 3849 pf->ptp_tx_skb = NULL; 3850 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); 3851 } 3852 3853 return NETDEV_TX_OK; 3854 } 3855 3856 /** 3857 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer 3858 * @skb: send buffer 3859 * @netdev: network interface device structure 3860 * 3861 * Returns NETDEV_TX_OK if sent, else an error code 3862 **/ 3863 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3864 { 3865 struct i40e_netdev_priv *np = netdev_priv(netdev); 3866 struct i40e_vsi *vsi = np->vsi; 3867 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; 3868 3869 /* hardware can't handle really short frames, hardware padding works 3870 * beyond this point 3871 */ 3872 if (skb_put_padto(skb, I40E_MIN_TX_LEN)) 3873 return NETDEV_TX_OK; 3874 3875 return i40e_xmit_frame_ring(skb, tx_ring); 3876 } 3877 3878 /** 3879 * i40e_xdp_xmit - Implements ndo_xdp_xmit 3880 * @dev: netdev 3881 * @n: number of frames 3882 * @frames: array of XDP buffer pointers 3883 * @flags: XDP extra info 3884 * 3885 * Returns number of frames successfully sent. Failed frames 3886 * will be free'ed by XDP core. 3887 * 3888 * For error cases, a negative errno code is returned and no-frames 3889 * are transmitted (caller must handle freeing frames). 3890 **/ 3891 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 3892 u32 flags) 3893 { 3894 struct i40e_netdev_priv *np = netdev_priv(dev); 3895 unsigned int queue_index = smp_processor_id(); 3896 struct i40e_vsi *vsi = np->vsi; 3897 struct i40e_pf *pf = vsi->back; 3898 struct i40e_ring *xdp_ring; 3899 int nxmit = 0; 3900 int i; 3901 3902 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 3903 return -ENETDOWN; 3904 3905 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || 3906 test_bit(__I40E_CONFIG_BUSY, pf->state)) 3907 return -ENXIO; 3908 3909 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3910 return -EINVAL; 3911 3912 xdp_ring = vsi->xdp_rings[queue_index]; 3913 3914 for (i = 0; i < n; i++) { 3915 struct xdp_frame *xdpf = frames[i]; 3916 int err; 3917 3918 err = i40e_xmit_xdp_ring(xdpf, xdp_ring); 3919 if (err != I40E_XDP_TX) 3920 break; 3921 nxmit++; 3922 } 3923 3924 if (unlikely(flags & XDP_XMIT_FLUSH)) 3925 i40e_xdp_ring_update_tail(xdp_ring); 3926 3927 return nxmit; 3928 } 3929