1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/prefetch.h> 5 #include <linux/bpf_trace.h> 6 #include <net/mpls.h> 7 #include <net/xdp.h> 8 #include "i40e.h" 9 #include "i40e_trace.h" 10 #include "i40e_prototype.h" 11 #include "i40e_txrx_common.h" 12 #include "i40e_xsk.h" 13 14 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) 15 /** 16 * i40e_fdir - Generate a Flow Director descriptor based on fdata 17 * @tx_ring: Tx ring to send buffer on 18 * @fdata: Flow director filter data 19 * @add: Indicate if we are adding a rule or deleting one 20 * 21 **/ 22 static void i40e_fdir(struct i40e_ring *tx_ring, 23 struct i40e_fdir_filter *fdata, bool add) 24 { 25 struct i40e_filter_program_desc *fdir_desc; 26 struct i40e_pf *pf = tx_ring->vsi->back; 27 u32 flex_ptype, dtype_cmd; 28 u16 i; 29 30 /* grab the next descriptor */ 31 i = tx_ring->next_to_use; 32 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); 33 34 i++; 35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 36 37 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK & 38 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT); 39 40 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK & 41 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); 42 43 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & 44 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); 45 46 /* Use LAN VSI Id if not programmed by user */ 47 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK & 48 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << 49 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT); 50 51 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; 52 53 dtype_cmd |= add ? 54 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << 55 I40E_TXD_FLTR_QW1_PCMD_SHIFT : 56 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << 57 I40E_TXD_FLTR_QW1_PCMD_SHIFT; 58 59 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK & 60 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT); 61 62 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK & 63 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT); 64 65 if (fdata->cnt_index) { 66 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 67 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK & 68 ((u32)fdata->cnt_index << 69 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT); 70 } 71 72 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 73 fdir_desc->rsvd = cpu_to_le32(0); 74 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 75 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id); 76 } 77 78 #define I40E_FD_CLEAN_DELAY 10 79 /** 80 * i40e_program_fdir_filter - Program a Flow Director filter 81 * @fdir_data: Packet data that will be filter parameters 82 * @raw_packet: the pre-allocated packet buffer for FDir 83 * @pf: The PF pointer 84 * @add: True for add/update, False for remove 85 **/ 86 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, 87 u8 *raw_packet, struct i40e_pf *pf, 88 bool add) 89 { 90 struct i40e_tx_buffer *tx_buf, *first; 91 struct i40e_tx_desc *tx_desc; 92 struct i40e_ring *tx_ring; 93 struct i40e_vsi *vsi; 94 struct device *dev; 95 dma_addr_t dma; 96 u32 td_cmd = 0; 97 u16 i; 98 99 /* find existing FDIR VSI */ 100 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); 101 if (!vsi) 102 return -ENOENT; 103 104 tx_ring = vsi->tx_rings[0]; 105 dev = tx_ring->dev; 106 107 /* we need two descriptors to add/del a filter and we can wait */ 108 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { 109 if (!i) 110 return -EAGAIN; 111 msleep_interruptible(1); 112 } 113 114 dma = dma_map_single(dev, raw_packet, 115 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE); 116 if (dma_mapping_error(dev, dma)) 117 goto dma_fail; 118 119 /* grab the next descriptor */ 120 i = tx_ring->next_to_use; 121 first = &tx_ring->tx_bi[i]; 122 i40e_fdir(tx_ring, fdir_data, add); 123 124 /* Now program a dummy descriptor */ 125 i = tx_ring->next_to_use; 126 tx_desc = I40E_TX_DESC(tx_ring, i); 127 tx_buf = &tx_ring->tx_bi[i]; 128 129 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; 130 131 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer)); 132 133 /* record length, and DMA address */ 134 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); 135 dma_unmap_addr_set(tx_buf, dma, dma); 136 137 tx_desc->buffer_addr = cpu_to_le64(dma); 138 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; 139 140 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; 141 tx_buf->raw_buf = (void *)raw_packet; 142 143 tx_desc->cmd_type_offset_bsz = 144 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); 145 146 /* Force memory writes to complete before letting h/w 147 * know there are new descriptors to fetch. 148 */ 149 wmb(); 150 151 /* Mark the data descriptor to be watched */ 152 first->next_to_watch = tx_desc; 153 154 writel(tx_ring->next_to_use, tx_ring->tail); 155 return 0; 156 157 dma_fail: 158 return -1; 159 } 160 161 /** 162 * i40e_create_dummy_packet - Constructs dummy packet for HW 163 * @dummy_packet: preallocated space for dummy packet 164 * @ipv4: is layer 3 packet of version 4 or 6 165 * @l4proto: next level protocol used in data portion of l3 166 * @data: filter data 167 * 168 * Returns address of layer 4 protocol dummy packet. 169 **/ 170 static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto, 171 struct i40e_fdir_filter *data) 172 { 173 bool is_vlan = !!data->vlan_tag; 174 struct vlan_hdr vlan; 175 struct ipv6hdr ipv6; 176 struct ethhdr eth; 177 struct iphdr ip; 178 u8 *tmp; 179 180 if (ipv4) { 181 eth.h_proto = cpu_to_be16(ETH_P_IP); 182 ip.protocol = l4proto; 183 ip.version = 0x4; 184 ip.ihl = 0x5; 185 186 ip.daddr = data->dst_ip; 187 ip.saddr = data->src_ip; 188 } else { 189 eth.h_proto = cpu_to_be16(ETH_P_IPV6); 190 ipv6.nexthdr = l4proto; 191 ipv6.version = 0x6; 192 193 memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6, 194 sizeof(__be32) * 4); 195 memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6, 196 sizeof(__be32) * 4); 197 } 198 199 if (is_vlan) { 200 vlan.h_vlan_TCI = data->vlan_tag; 201 vlan.h_vlan_encapsulated_proto = eth.h_proto; 202 eth.h_proto = data->vlan_etype; 203 } 204 205 tmp = dummy_packet; 206 memcpy(tmp, ð, sizeof(eth)); 207 tmp += sizeof(eth); 208 209 if (is_vlan) { 210 memcpy(tmp, &vlan, sizeof(vlan)); 211 tmp += sizeof(vlan); 212 } 213 214 if (ipv4) { 215 memcpy(tmp, &ip, sizeof(ip)); 216 tmp += sizeof(ip); 217 } else { 218 memcpy(tmp, &ipv6, sizeof(ipv6)); 219 tmp += sizeof(ipv6); 220 } 221 222 return tmp; 223 } 224 225 /** 226 * i40e_create_dummy_udp_packet - helper function to create UDP packet 227 * @raw_packet: preallocated space for dummy packet 228 * @ipv4: is layer 3 packet of version 4 or 6 229 * @l4proto: next level protocol used in data portion of l3 230 * @data: filter data 231 * 232 * Helper function to populate udp fields. 233 **/ 234 static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto, 235 struct i40e_fdir_filter *data) 236 { 237 struct udphdr *udp; 238 u8 *tmp; 239 240 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data); 241 udp = (struct udphdr *)(tmp); 242 udp->dest = data->dst_port; 243 udp->source = data->src_port; 244 } 245 246 /** 247 * i40e_create_dummy_tcp_packet - helper function to create TCP packet 248 * @raw_packet: preallocated space for dummy packet 249 * @ipv4: is layer 3 packet of version 4 or 6 250 * @l4proto: next level protocol used in data portion of l3 251 * @data: filter data 252 * 253 * Helper function to populate tcp fields. 254 **/ 255 static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto, 256 struct i40e_fdir_filter *data) 257 { 258 struct tcphdr *tcp; 259 u8 *tmp; 260 /* Dummy tcp packet */ 261 static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 262 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0}; 263 264 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data); 265 266 tcp = (struct tcphdr *)tmp; 267 memcpy(tcp, tcp_packet, sizeof(tcp_packet)); 268 tcp->dest = data->dst_port; 269 tcp->source = data->src_port; 270 } 271 272 /** 273 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet 274 * @raw_packet: preallocated space for dummy packet 275 * @ipv4: is layer 3 packet of version 4 or 6 276 * @l4proto: next level protocol used in data portion of l3 277 * @data: filter data 278 * 279 * Helper function to populate sctp fields. 280 **/ 281 static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4, 282 u8 l4proto, 283 struct i40e_fdir_filter *data) 284 { 285 struct sctphdr *sctp; 286 u8 *tmp; 287 288 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data); 289 290 sctp = (struct sctphdr *)tmp; 291 sctp->dest = data->dst_port; 292 sctp->source = data->src_port; 293 } 294 295 /** 296 * i40e_prepare_fdir_filter - Prepare and program fdir filter 297 * @pf: physical function to attach filter to 298 * @fd_data: filter data 299 * @add: add or delete filter 300 * @packet_addr: address of dummy packet, used in filtering 301 * @payload_offset: offset from dummy packet address to user defined data 302 * @pctype: Packet type for which filter is used 303 * 304 * Helper function to offset data of dummy packet, program it and 305 * handle errors. 306 **/ 307 static int i40e_prepare_fdir_filter(struct i40e_pf *pf, 308 struct i40e_fdir_filter *fd_data, 309 bool add, char *packet_addr, 310 int payload_offset, u8 pctype) 311 { 312 int ret; 313 314 if (fd_data->flex_filter) { 315 u8 *payload; 316 __be16 pattern = fd_data->flex_word; 317 u16 off = fd_data->flex_offset; 318 319 payload = packet_addr + payload_offset; 320 321 /* If user provided vlan, offset payload by vlan header length */ 322 if (!!fd_data->vlan_tag) 323 payload += VLAN_HLEN; 324 325 *((__force __be16 *)(payload + off)) = pattern; 326 } 327 328 fd_data->pctype = pctype; 329 ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add); 330 if (ret) { 331 dev_info(&pf->pdev->dev, 332 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 333 fd_data->pctype, fd_data->fd_id, ret); 334 /* Free the packet buffer since it wasn't added to the ring */ 335 return -EOPNOTSUPP; 336 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 337 if (add) 338 dev_info(&pf->pdev->dev, 339 "Filter OK for PCTYPE %d loc = %d\n", 340 fd_data->pctype, fd_data->fd_id); 341 else 342 dev_info(&pf->pdev->dev, 343 "Filter deleted for PCTYPE %d loc = %d\n", 344 fd_data->pctype, fd_data->fd_id); 345 } 346 347 return ret; 348 } 349 350 /** 351 * i40e_change_filter_num - Prepare and program fdir filter 352 * @ipv4: is layer 3 packet of version 4 or 6 353 * @add: add or delete filter 354 * @ipv4_filter_num: field to update 355 * @ipv6_filter_num: field to update 356 * 357 * Update filter number field for pf. 358 **/ 359 static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num, 360 u16 *ipv6_filter_num) 361 { 362 if (add) { 363 if (ipv4) 364 (*ipv4_filter_num)++; 365 else 366 (*ipv6_filter_num)++; 367 } else { 368 if (ipv4) 369 (*ipv4_filter_num)--; 370 else 371 (*ipv6_filter_num)--; 372 } 373 } 374 375 #define I40E_UDPIP_DUMMY_PACKET_LEN 42 376 #define I40E_UDPIP6_DUMMY_PACKET_LEN 62 377 /** 378 * i40e_add_del_fdir_udp - Add/Remove UDP filters 379 * @vsi: pointer to the targeted VSI 380 * @fd_data: the flow director data required for the FDir descriptor 381 * @add: true adds a filter, false removes it 382 * @ipv4: true is v4, false is v6 383 * 384 * Returns 0 if the filters were successfully added or removed 385 **/ 386 static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi, 387 struct i40e_fdir_filter *fd_data, 388 bool add, 389 bool ipv4) 390 { 391 struct i40e_pf *pf = vsi->back; 392 u8 *raw_packet; 393 int ret; 394 395 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 396 if (!raw_packet) 397 return -ENOMEM; 398 399 i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data); 400 401 if (ipv4) 402 ret = i40e_prepare_fdir_filter 403 (pf, fd_data, add, raw_packet, 404 I40E_UDPIP_DUMMY_PACKET_LEN, 405 I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 406 else 407 ret = i40e_prepare_fdir_filter 408 (pf, fd_data, add, raw_packet, 409 I40E_UDPIP6_DUMMY_PACKET_LEN, 410 I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 411 412 if (ret) { 413 kfree(raw_packet); 414 return ret; 415 } 416 417 i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt, 418 &pf->fd_udp6_filter_cnt); 419 420 return 0; 421 } 422 423 #define I40E_TCPIP_DUMMY_PACKET_LEN 54 424 #define I40E_TCPIP6_DUMMY_PACKET_LEN 74 425 /** 426 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters 427 * @vsi: pointer to the targeted VSI 428 * @fd_data: the flow director data required for the FDir descriptor 429 * @add: true adds a filter, false removes it 430 * @ipv4: true is v4, false is v6 431 * 432 * Returns 0 if the filters were successfully added or removed 433 **/ 434 static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi, 435 struct i40e_fdir_filter *fd_data, 436 bool add, 437 bool ipv4) 438 { 439 struct i40e_pf *pf = vsi->back; 440 u8 *raw_packet; 441 int ret; 442 443 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 444 if (!raw_packet) 445 return -ENOMEM; 446 447 i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data); 448 if (ipv4) 449 ret = i40e_prepare_fdir_filter 450 (pf, fd_data, add, raw_packet, 451 I40E_TCPIP_DUMMY_PACKET_LEN, 452 I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 453 else 454 ret = i40e_prepare_fdir_filter 455 (pf, fd_data, add, raw_packet, 456 I40E_TCPIP6_DUMMY_PACKET_LEN, 457 I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 458 459 if (ret) { 460 kfree(raw_packet); 461 return ret; 462 } 463 464 i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt, 465 &pf->fd_tcp6_filter_cnt); 466 467 if (add) { 468 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 469 I40E_DEBUG_FD & pf->hw.debug_mask) 470 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); 471 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); 472 } 473 return 0; 474 } 475 476 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46 477 #define I40E_SCTPIP6_DUMMY_PACKET_LEN 66 478 /** 479 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for 480 * a specific flow spec 481 * @vsi: pointer to the targeted VSI 482 * @fd_data: the flow director data required for the FDir descriptor 483 * @add: true adds a filter, false removes it 484 * @ipv4: true is v4, false is v6 485 * 486 * Returns 0 if the filters were successfully added or removed 487 **/ 488 static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi, 489 struct i40e_fdir_filter *fd_data, 490 bool add, 491 bool ipv4) 492 { 493 struct i40e_pf *pf = vsi->back; 494 u8 *raw_packet; 495 int ret; 496 497 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 498 if (!raw_packet) 499 return -ENOMEM; 500 501 i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data); 502 503 if (ipv4) 504 ret = i40e_prepare_fdir_filter 505 (pf, fd_data, add, raw_packet, 506 I40E_SCTPIP_DUMMY_PACKET_LEN, 507 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP); 508 else 509 ret = i40e_prepare_fdir_filter 510 (pf, fd_data, add, raw_packet, 511 I40E_SCTPIP6_DUMMY_PACKET_LEN, 512 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP); 513 514 if (ret) { 515 kfree(raw_packet); 516 return ret; 517 } 518 519 i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt, 520 &pf->fd_sctp6_filter_cnt); 521 522 return 0; 523 } 524 525 #define I40E_IP_DUMMY_PACKET_LEN 34 526 #define I40E_IP6_DUMMY_PACKET_LEN 54 527 /** 528 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for 529 * a specific flow spec 530 * @vsi: pointer to the targeted VSI 531 * @fd_data: the flow director data required for the FDir descriptor 532 * @add: true adds a filter, false removes it 533 * @ipv4: true is v4, false is v6 534 * 535 * Returns 0 if the filters were successfully added or removed 536 **/ 537 static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi, 538 struct i40e_fdir_filter *fd_data, 539 bool add, 540 bool ipv4) 541 { 542 struct i40e_pf *pf = vsi->back; 543 int payload_offset; 544 u8 *raw_packet; 545 int iter_start; 546 int iter_end; 547 int ret; 548 int i; 549 550 if (ipv4) { 551 iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; 552 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4; 553 } else { 554 iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; 555 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6; 556 } 557 558 for (i = iter_start; i <= iter_end; i++) { 559 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 560 if (!raw_packet) 561 return -ENOMEM; 562 563 /* IPv6 no header option differs from IPv4 */ 564 (void)i40e_create_dummy_packet 565 (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE, 566 fd_data); 567 568 payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN : 569 I40E_IP6_DUMMY_PACKET_LEN; 570 ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet, 571 payload_offset, i); 572 if (ret) 573 goto err; 574 } 575 576 i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt, 577 &pf->fd_ip6_filter_cnt); 578 579 return 0; 580 err: 581 kfree(raw_packet); 582 return ret; 583 } 584 585 /** 586 * i40e_add_del_fdir - Build raw packets to add/del fdir filter 587 * @vsi: pointer to the targeted VSI 588 * @input: filter to add or delete 589 * @add: true adds a filter, false removes it 590 * 591 **/ 592 int i40e_add_del_fdir(struct i40e_vsi *vsi, 593 struct i40e_fdir_filter *input, bool add) 594 { 595 enum ip_ver { ipv6 = 0, ipv4 = 1 }; 596 struct i40e_pf *pf = vsi->back; 597 int ret; 598 599 switch (input->flow_type & ~FLOW_EXT) { 600 case TCP_V4_FLOW: 601 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4); 602 break; 603 case UDP_V4_FLOW: 604 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4); 605 break; 606 case SCTP_V4_FLOW: 607 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4); 608 break; 609 case TCP_V6_FLOW: 610 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6); 611 break; 612 case UDP_V6_FLOW: 613 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6); 614 break; 615 case SCTP_V6_FLOW: 616 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6); 617 break; 618 case IP_USER_FLOW: 619 switch (input->ipl4_proto) { 620 case IPPROTO_TCP: 621 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4); 622 break; 623 case IPPROTO_UDP: 624 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4); 625 break; 626 case IPPROTO_SCTP: 627 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4); 628 break; 629 case IPPROTO_IP: 630 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4); 631 break; 632 default: 633 /* We cannot support masking based on protocol */ 634 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n", 635 input->ipl4_proto); 636 return -EINVAL; 637 } 638 break; 639 case IPV6_USER_FLOW: 640 switch (input->ipl4_proto) { 641 case IPPROTO_TCP: 642 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6); 643 break; 644 case IPPROTO_UDP: 645 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6); 646 break; 647 case IPPROTO_SCTP: 648 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6); 649 break; 650 case IPPROTO_IP: 651 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6); 652 break; 653 default: 654 /* We cannot support masking based on protocol */ 655 dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n", 656 input->ipl4_proto); 657 return -EINVAL; 658 } 659 break; 660 default: 661 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n", 662 input->flow_type); 663 return -EINVAL; 664 } 665 666 /* The buffer allocated here will be normally be freed by 667 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit 668 * completion. In the event of an error adding the buffer to the FDIR 669 * ring, it will immediately be freed. It may also be freed by 670 * i40e_clean_tx_ring() when closing the VSI. 671 */ 672 return ret; 673 } 674 675 /** 676 * i40e_fd_handle_status - check the Programming Status for FD 677 * @rx_ring: the Rx ring for this descriptor 678 * @qword0_raw: qword0 679 * @qword1: qword1 after le_to_cpu 680 * @prog_id: the id originally used for programming 681 * 682 * This is used to verify if the FD programming or invalidation 683 * requested by SW to the HW is successful or not and take actions accordingly. 684 **/ 685 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, 686 u64 qword1, u8 prog_id) 687 { 688 struct i40e_pf *pf = rx_ring->vsi->back; 689 struct pci_dev *pdev = pf->pdev; 690 struct i40e_16b_rx_wb_qw0 *qw0; 691 u32 fcnt_prog, fcnt_avail; 692 u32 error; 693 694 qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw; 695 error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> 696 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; 697 698 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { 699 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id); 700 if (qw0->hi_dword.fd_id != 0 || 701 (I40E_DEBUG_FD & pf->hw.debug_mask)) 702 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", 703 pf->fd_inv); 704 705 /* Check if the programming error is for ATR. 706 * If so, auto disable ATR and set a state for 707 * flush in progress. Next time we come here if flush is in 708 * progress do nothing, once flush is complete the state will 709 * be cleared. 710 */ 711 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 712 return; 713 714 pf->fd_add_err++; 715 /* store the current atr filter count */ 716 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); 717 718 if (qw0->hi_dword.fd_id == 0 && 719 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { 720 /* These set_bit() calls aren't atomic with the 721 * test_bit() here, but worse case we potentially 722 * disable ATR and queue a flush right after SB 723 * support is re-enabled. That shouldn't cause an 724 * issue in practice 725 */ 726 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); 727 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); 728 } 729 730 /* filter programming failed most likely due to table full */ 731 fcnt_prog = i40e_get_global_fd_count(pf); 732 fcnt_avail = pf->fdir_pf_filter_count; 733 /* If ATR is running fcnt_prog can quickly change, 734 * if we are very close to full, it makes sense to disable 735 * FD ATR/SB and then re-enable it when there is room. 736 */ 737 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 738 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 739 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED, 740 pf->state)) 741 if (I40E_DEBUG_FD & pf->hw.debug_mask) 742 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 743 } 744 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 745 if (I40E_DEBUG_FD & pf->hw.debug_mask) 746 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", 747 qw0->hi_dword.fd_id); 748 } 749 } 750 751 /** 752 * i40e_unmap_and_free_tx_resource - Release a Tx buffer 753 * @ring: the ring that owns the buffer 754 * @tx_buffer: the buffer to free 755 **/ 756 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, 757 struct i40e_tx_buffer *tx_buffer) 758 { 759 if (tx_buffer->skb) { 760 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) 761 kfree(tx_buffer->raw_buf); 762 else if (ring_is_xdp(ring)) 763 xdp_return_frame(tx_buffer->xdpf); 764 else 765 dev_kfree_skb_any(tx_buffer->skb); 766 if (dma_unmap_len(tx_buffer, len)) 767 dma_unmap_single(ring->dev, 768 dma_unmap_addr(tx_buffer, dma), 769 dma_unmap_len(tx_buffer, len), 770 DMA_TO_DEVICE); 771 } else if (dma_unmap_len(tx_buffer, len)) { 772 dma_unmap_page(ring->dev, 773 dma_unmap_addr(tx_buffer, dma), 774 dma_unmap_len(tx_buffer, len), 775 DMA_TO_DEVICE); 776 } 777 778 tx_buffer->next_to_watch = NULL; 779 tx_buffer->skb = NULL; 780 dma_unmap_len_set(tx_buffer, len, 0); 781 /* tx_buffer must be completely set up in the transmit path */ 782 } 783 784 /** 785 * i40e_clean_tx_ring - Free any empty Tx buffers 786 * @tx_ring: ring to be cleaned 787 **/ 788 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) 789 { 790 unsigned long bi_size; 791 u16 i; 792 793 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 794 i40e_xsk_clean_tx_ring(tx_ring); 795 } else { 796 /* ring already cleared, nothing to do */ 797 if (!tx_ring->tx_bi) 798 return; 799 800 /* Free all the Tx ring sk_buffs */ 801 for (i = 0; i < tx_ring->count; i++) 802 i40e_unmap_and_free_tx_resource(tx_ring, 803 &tx_ring->tx_bi[i]); 804 } 805 806 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 807 memset(tx_ring->tx_bi, 0, bi_size); 808 809 /* Zero out the descriptor ring */ 810 memset(tx_ring->desc, 0, tx_ring->size); 811 812 tx_ring->next_to_use = 0; 813 tx_ring->next_to_clean = 0; 814 815 if (!tx_ring->netdev) 816 return; 817 818 /* cleanup Tx queue statistics */ 819 netdev_tx_reset_queue(txring_txq(tx_ring)); 820 } 821 822 /** 823 * i40e_free_tx_resources - Free Tx resources per queue 824 * @tx_ring: Tx descriptor ring for a specific queue 825 * 826 * Free all transmit software resources 827 **/ 828 void i40e_free_tx_resources(struct i40e_ring *tx_ring) 829 { 830 i40e_clean_tx_ring(tx_ring); 831 kfree(tx_ring->tx_bi); 832 tx_ring->tx_bi = NULL; 833 834 if (tx_ring->desc) { 835 dma_free_coherent(tx_ring->dev, tx_ring->size, 836 tx_ring->desc, tx_ring->dma); 837 tx_ring->desc = NULL; 838 } 839 } 840 841 /** 842 * i40e_get_tx_pending - how many tx descriptors not processed 843 * @ring: the ring of descriptors 844 * @in_sw: use SW variables 845 * 846 * Since there is no access to the ring head register 847 * in XL710, we need to use our local copies 848 **/ 849 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) 850 { 851 u32 head, tail; 852 853 if (!in_sw) { 854 head = i40e_get_head(ring); 855 tail = readl(ring->tail); 856 } else { 857 head = ring->next_to_clean; 858 tail = ring->next_to_use; 859 } 860 861 if (head != tail) 862 return (head < tail) ? 863 tail - head : (tail + ring->count - head); 864 865 return 0; 866 } 867 868 /** 869 * i40e_detect_recover_hung - Function to detect and recover hung_queues 870 * @vsi: pointer to vsi struct with tx queues 871 * 872 * VSI has netdev and netdev has TX queues. This function is to check each of 873 * those TX queues if they are hung, trigger recovery by issuing SW interrupt. 874 **/ 875 void i40e_detect_recover_hung(struct i40e_vsi *vsi) 876 { 877 struct i40e_ring *tx_ring = NULL; 878 struct net_device *netdev; 879 unsigned int i; 880 int packets; 881 882 if (!vsi) 883 return; 884 885 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 886 return; 887 888 netdev = vsi->netdev; 889 if (!netdev) 890 return; 891 892 if (!netif_carrier_ok(netdev)) 893 return; 894 895 for (i = 0; i < vsi->num_queue_pairs; i++) { 896 tx_ring = vsi->tx_rings[i]; 897 if (tx_ring && tx_ring->desc) { 898 /* If packet counter has not changed the queue is 899 * likely stalled, so force an interrupt for this 900 * queue. 901 * 902 * prev_pkt_ctr would be negative if there was no 903 * pending work. 904 */ 905 packets = tx_ring->stats.packets & INT_MAX; 906 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { 907 i40e_force_wb(vsi, tx_ring->q_vector); 908 continue; 909 } 910 911 /* Memory barrier between read of packet count and call 912 * to i40e_get_tx_pending() 913 */ 914 smp_rmb(); 915 tx_ring->tx_stats.prev_pkt_ctr = 916 i40e_get_tx_pending(tx_ring, true) ? packets : -1; 917 } 918 } 919 } 920 921 /** 922 * i40e_clean_tx_irq - Reclaim resources after transmit completes 923 * @vsi: the VSI we care about 924 * @tx_ring: Tx ring to clean 925 * @napi_budget: Used to determine if we are in netpoll 926 * 927 * Returns true if there's any budget left (e.g. the clean is finished) 928 **/ 929 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, 930 struct i40e_ring *tx_ring, int napi_budget) 931 { 932 int i = tx_ring->next_to_clean; 933 struct i40e_tx_buffer *tx_buf; 934 struct i40e_tx_desc *tx_head; 935 struct i40e_tx_desc *tx_desc; 936 unsigned int total_bytes = 0, total_packets = 0; 937 unsigned int budget = vsi->work_limit; 938 939 tx_buf = &tx_ring->tx_bi[i]; 940 tx_desc = I40E_TX_DESC(tx_ring, i); 941 i -= tx_ring->count; 942 943 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); 944 945 do { 946 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 947 948 /* if next_to_watch is not set then there is no work pending */ 949 if (!eop_desc) 950 break; 951 952 /* prevent any other reads prior to eop_desc */ 953 smp_rmb(); 954 955 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 956 /* we have caught up to head, no work left to do */ 957 if (tx_head == tx_desc) 958 break; 959 960 /* clear next_to_watch to prevent false hangs */ 961 tx_buf->next_to_watch = NULL; 962 963 /* update the statistics for this packet */ 964 total_bytes += tx_buf->bytecount; 965 total_packets += tx_buf->gso_segs; 966 967 /* free the skb/XDP data */ 968 if (ring_is_xdp(tx_ring)) 969 xdp_return_frame(tx_buf->xdpf); 970 else 971 napi_consume_skb(tx_buf->skb, napi_budget); 972 973 /* unmap skb header data */ 974 dma_unmap_single(tx_ring->dev, 975 dma_unmap_addr(tx_buf, dma), 976 dma_unmap_len(tx_buf, len), 977 DMA_TO_DEVICE); 978 979 /* clear tx_buffer data */ 980 tx_buf->skb = NULL; 981 dma_unmap_len_set(tx_buf, len, 0); 982 983 /* unmap remaining buffers */ 984 while (tx_desc != eop_desc) { 985 i40e_trace(clean_tx_irq_unmap, 986 tx_ring, tx_desc, tx_buf); 987 988 tx_buf++; 989 tx_desc++; 990 i++; 991 if (unlikely(!i)) { 992 i -= tx_ring->count; 993 tx_buf = tx_ring->tx_bi; 994 tx_desc = I40E_TX_DESC(tx_ring, 0); 995 } 996 997 /* unmap any remaining paged data */ 998 if (dma_unmap_len(tx_buf, len)) { 999 dma_unmap_page(tx_ring->dev, 1000 dma_unmap_addr(tx_buf, dma), 1001 dma_unmap_len(tx_buf, len), 1002 DMA_TO_DEVICE); 1003 dma_unmap_len_set(tx_buf, len, 0); 1004 } 1005 } 1006 1007 /* move us one more past the eop_desc for start of next pkt */ 1008 tx_buf++; 1009 tx_desc++; 1010 i++; 1011 if (unlikely(!i)) { 1012 i -= tx_ring->count; 1013 tx_buf = tx_ring->tx_bi; 1014 tx_desc = I40E_TX_DESC(tx_ring, 0); 1015 } 1016 1017 prefetch(tx_desc); 1018 1019 /* update budget accounting */ 1020 budget--; 1021 } while (likely(budget)); 1022 1023 i += tx_ring->count; 1024 tx_ring->next_to_clean = i; 1025 i40e_update_tx_stats(tx_ring, total_packets, total_bytes); 1026 i40e_arm_wb(tx_ring, vsi, budget); 1027 1028 if (ring_is_xdp(tx_ring)) 1029 return !!budget; 1030 1031 /* notify netdev of completed buffers */ 1032 netdev_tx_completed_queue(txring_txq(tx_ring), 1033 total_packets, total_bytes); 1034 1035 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 1036 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1037 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 1038 /* Make sure that anybody stopping the queue after this 1039 * sees the new next_to_clean. 1040 */ 1041 smp_mb(); 1042 if (__netif_subqueue_stopped(tx_ring->netdev, 1043 tx_ring->queue_index) && 1044 !test_bit(__I40E_VSI_DOWN, vsi->state)) { 1045 netif_wake_subqueue(tx_ring->netdev, 1046 tx_ring->queue_index); 1047 ++tx_ring->tx_stats.restart_queue; 1048 } 1049 } 1050 1051 return !!budget; 1052 } 1053 1054 /** 1055 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled 1056 * @vsi: the VSI we care about 1057 * @q_vector: the vector on which to enable writeback 1058 * 1059 **/ 1060 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, 1061 struct i40e_q_vector *q_vector) 1062 { 1063 u16 flags = q_vector->tx.ring[0].flags; 1064 u32 val; 1065 1066 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) 1067 return; 1068 1069 if (q_vector->arm_wb_state) 1070 return; 1071 1072 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 1073 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | 1074 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ 1075 1076 wr32(&vsi->back->hw, 1077 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), 1078 val); 1079 } else { 1080 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | 1081 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ 1082 1083 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); 1084 } 1085 q_vector->arm_wb_state = true; 1086 } 1087 1088 /** 1089 * i40e_force_wb - Issue SW Interrupt so HW does a wb 1090 * @vsi: the VSI we care about 1091 * @q_vector: the vector on which to force writeback 1092 * 1093 **/ 1094 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) 1095 { 1096 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 1097 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1098 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ 1099 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 1100 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; 1101 /* allow 00 to be written to the index */ 1102 1103 wr32(&vsi->back->hw, 1104 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); 1105 } else { 1106 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1107 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */ 1108 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | 1109 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK; 1110 /* allow 00 to be written to the index */ 1111 1112 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); 1113 } 1114 } 1115 1116 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, 1117 struct i40e_ring_container *rc) 1118 { 1119 return &q_vector->rx == rc; 1120 } 1121 1122 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) 1123 { 1124 unsigned int divisor; 1125 1126 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) { 1127 case I40E_LINK_SPEED_40GB: 1128 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; 1129 break; 1130 case I40E_LINK_SPEED_25GB: 1131 case I40E_LINK_SPEED_20GB: 1132 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; 1133 break; 1134 default: 1135 case I40E_LINK_SPEED_10GB: 1136 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; 1137 break; 1138 case I40E_LINK_SPEED_1GB: 1139 case I40E_LINK_SPEED_100MB: 1140 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; 1141 break; 1142 } 1143 1144 return divisor; 1145 } 1146 1147 /** 1148 * i40e_update_itr - update the dynamic ITR value based on statistics 1149 * @q_vector: structure containing interrupt and ring information 1150 * @rc: structure containing ring performance data 1151 * 1152 * Stores a new ITR value based on packets and byte 1153 * counts during the last interrupt. The advantage of per interrupt 1154 * computation is faster updates and more accurate ITR for the current 1155 * traffic pattern. Constants in this function were computed 1156 * based on theoretical maximum wire speed and thresholds were set based 1157 * on testing data as well as attempting to minimize response time 1158 * while increasing bulk throughput. 1159 **/ 1160 static void i40e_update_itr(struct i40e_q_vector *q_vector, 1161 struct i40e_ring_container *rc) 1162 { 1163 unsigned int avg_wire_size, packets, bytes, itr; 1164 unsigned long next_update = jiffies; 1165 1166 /* If we don't have any rings just leave ourselves set for maximum 1167 * possible latency so we take ourselves out of the equation. 1168 */ 1169 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) 1170 return; 1171 1172 /* For Rx we want to push the delay up and default to low latency. 1173 * for Tx we want to pull the delay down and default to high latency. 1174 */ 1175 itr = i40e_container_is_rx(q_vector, rc) ? 1176 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : 1177 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; 1178 1179 /* If we didn't update within up to 1 - 2 jiffies we can assume 1180 * that either packets are coming in so slow there hasn't been 1181 * any work, or that there is so much work that NAPI is dealing 1182 * with interrupt moderation and we don't need to do anything. 1183 */ 1184 if (time_after(next_update, rc->next_update)) 1185 goto clear_counts; 1186 1187 /* If itr_countdown is set it means we programmed an ITR within 1188 * the last 4 interrupt cycles. This has a side effect of us 1189 * potentially firing an early interrupt. In order to work around 1190 * this we need to throw out any data received for a few 1191 * interrupts following the update. 1192 */ 1193 if (q_vector->itr_countdown) { 1194 itr = rc->target_itr; 1195 goto clear_counts; 1196 } 1197 1198 packets = rc->total_packets; 1199 bytes = rc->total_bytes; 1200 1201 if (i40e_container_is_rx(q_vector, rc)) { 1202 /* If Rx there are 1 to 4 packets and bytes are less than 1203 * 9000 assume insufficient data to use bulk rate limiting 1204 * approach unless Tx is already in bulk rate limiting. We 1205 * are likely latency driven. 1206 */ 1207 if (packets && packets < 4 && bytes < 9000 && 1208 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { 1209 itr = I40E_ITR_ADAPTIVE_LATENCY; 1210 goto adjust_by_size; 1211 } 1212 } else if (packets < 4) { 1213 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1214 * bulk mode and we are receiving 4 or fewer packets just 1215 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1216 * that the Rx can relax. 1217 */ 1218 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && 1219 (q_vector->rx.target_itr & I40E_ITR_MASK) == 1220 I40E_ITR_ADAPTIVE_MAX_USECS) 1221 goto clear_counts; 1222 } else if (packets > 32) { 1223 /* If we have processed over 32 packets in a single interrupt 1224 * for Tx assume we need to switch over to "bulk" mode. 1225 */ 1226 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; 1227 } 1228 1229 /* We have no packets to actually measure against. This means 1230 * either one of the other queues on this vector is active or 1231 * we are a Tx queue doing TSO with too high of an interrupt rate. 1232 * 1233 * Between 4 and 56 we can assume that our current interrupt delay 1234 * is only slightly too low. As such we should increase it by a small 1235 * fixed amount. 1236 */ 1237 if (packets < 56) { 1238 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; 1239 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { 1240 itr &= I40E_ITR_ADAPTIVE_LATENCY; 1241 itr += I40E_ITR_ADAPTIVE_MAX_USECS; 1242 } 1243 goto clear_counts; 1244 } 1245 1246 if (packets <= 256) { 1247 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1248 itr &= I40E_ITR_MASK; 1249 1250 /* Between 56 and 112 is our "goldilocks" zone where we are 1251 * working out "just right". Just report that our current 1252 * ITR is good for us. 1253 */ 1254 if (packets <= 112) 1255 goto clear_counts; 1256 1257 /* If packet count is 128 or greater we are likely looking 1258 * at a slight overrun of the delay we want. Try halving 1259 * our delay to see if that will cut the number of packets 1260 * in half per interrupt. 1261 */ 1262 itr /= 2; 1263 itr &= I40E_ITR_MASK; 1264 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) 1265 itr = I40E_ITR_ADAPTIVE_MIN_USECS; 1266 1267 goto clear_counts; 1268 } 1269 1270 /* The paths below assume we are dealing with a bulk ITR since 1271 * number of packets is greater than 256. We are just going to have 1272 * to compute a value and try to bring the count under control, 1273 * though for smaller packet sizes there isn't much we can do as 1274 * NAPI polling will likely be kicking in sooner rather than later. 1275 */ 1276 itr = I40E_ITR_ADAPTIVE_BULK; 1277 1278 adjust_by_size: 1279 /* If packet counts are 256 or greater we can assume we have a gross 1280 * overestimation of what the rate should be. Instead of trying to fine 1281 * tune it just use the formula below to try and dial in an exact value 1282 * give the current packet size of the frame. 1283 */ 1284 avg_wire_size = bytes / packets; 1285 1286 /* The following is a crude approximation of: 1287 * wmem_default / (size + overhead) = desired_pkts_per_int 1288 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1289 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1290 * 1291 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1292 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1293 * formula down to 1294 * 1295 * (170 * (size + 24)) / (size + 640) = ITR 1296 * 1297 * We first do some math on the packet size and then finally bitshift 1298 * by 8 after rounding up. We also have to account for PCIe link speed 1299 * difference as ITR scales based on this. 1300 */ 1301 if (avg_wire_size <= 60) { 1302 /* Start at 250k ints/sec */ 1303 avg_wire_size = 4096; 1304 } else if (avg_wire_size <= 380) { 1305 /* 250K ints/sec to 60K ints/sec */ 1306 avg_wire_size *= 40; 1307 avg_wire_size += 1696; 1308 } else if (avg_wire_size <= 1084) { 1309 /* 60K ints/sec to 36K ints/sec */ 1310 avg_wire_size *= 15; 1311 avg_wire_size += 11452; 1312 } else if (avg_wire_size <= 1980) { 1313 /* 36K ints/sec to 30K ints/sec */ 1314 avg_wire_size *= 5; 1315 avg_wire_size += 22420; 1316 } else { 1317 /* plateau at a limit of 30K ints/sec */ 1318 avg_wire_size = 32256; 1319 } 1320 1321 /* If we are in low latency mode halve our delay which doubles the 1322 * rate to somewhere between 100K to 16K ints/sec 1323 */ 1324 if (itr & I40E_ITR_ADAPTIVE_LATENCY) 1325 avg_wire_size /= 2; 1326 1327 /* Resultant value is 256 times larger than it needs to be. This 1328 * gives us room to adjust the value as needed to either increase 1329 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. 1330 * 1331 * Use addition as we have already recorded the new latency flag 1332 * for the ITR value. 1333 */ 1334 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * 1335 I40E_ITR_ADAPTIVE_MIN_INC; 1336 1337 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { 1338 itr &= I40E_ITR_ADAPTIVE_LATENCY; 1339 itr += I40E_ITR_ADAPTIVE_MAX_USECS; 1340 } 1341 1342 clear_counts: 1343 /* write back value */ 1344 rc->target_itr = itr; 1345 1346 /* next update should occur within next jiffy */ 1347 rc->next_update = next_update + 1; 1348 1349 rc->total_bytes = 0; 1350 rc->total_packets = 0; 1351 } 1352 1353 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) 1354 { 1355 return &rx_ring->rx_bi[idx]; 1356 } 1357 1358 /** 1359 * i40e_reuse_rx_page - page flip buffer and store it back on the ring 1360 * @rx_ring: rx descriptor ring to store buffers on 1361 * @old_buff: donor buffer to have page reused 1362 * 1363 * Synchronizes page for reuse by the adapter 1364 **/ 1365 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, 1366 struct i40e_rx_buffer *old_buff) 1367 { 1368 struct i40e_rx_buffer *new_buff; 1369 u16 nta = rx_ring->next_to_alloc; 1370 1371 new_buff = i40e_rx_bi(rx_ring, nta); 1372 1373 /* update, and store next to alloc */ 1374 nta++; 1375 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1376 1377 /* transfer page from old buffer to new buffer */ 1378 new_buff->dma = old_buff->dma; 1379 new_buff->page = old_buff->page; 1380 new_buff->page_offset = old_buff->page_offset; 1381 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1382 1383 /* clear contents of buffer_info */ 1384 old_buff->page = NULL; 1385 } 1386 1387 /** 1388 * i40e_clean_programming_status - clean the programming status descriptor 1389 * @rx_ring: the rx ring that has this descriptor 1390 * @qword0_raw: qword0 1391 * @qword1: qword1 representing status_error_len in CPU ordering 1392 * 1393 * Flow director should handle FD_FILTER_STATUS to check its filter programming 1394 * status being successful or not and take actions accordingly. FCoE should 1395 * handle its context/filter programming/invalidation status and take actions. 1396 * 1397 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL. 1398 **/ 1399 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, 1400 u64 qword1) 1401 { 1402 u8 id; 1403 1404 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> 1405 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 1406 1407 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) 1408 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id); 1409 } 1410 1411 /** 1412 * i40e_setup_tx_descriptors - Allocate the Tx descriptors 1413 * @tx_ring: the tx ring to set up 1414 * 1415 * Return 0 on success, negative on error 1416 **/ 1417 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) 1418 { 1419 struct device *dev = tx_ring->dev; 1420 int bi_size; 1421 1422 if (!dev) 1423 return -ENOMEM; 1424 1425 /* warn if we are about to overwrite the pointer */ 1426 WARN_ON(tx_ring->tx_bi); 1427 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 1428 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); 1429 if (!tx_ring->tx_bi) 1430 goto err; 1431 1432 u64_stats_init(&tx_ring->syncp); 1433 1434 /* round up to nearest 4K */ 1435 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1436 /* add u32 for head writeback, align after this takes care of 1437 * guaranteeing this is at least one cache line in size 1438 */ 1439 tx_ring->size += sizeof(u32); 1440 tx_ring->size = ALIGN(tx_ring->size, 4096); 1441 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 1442 &tx_ring->dma, GFP_KERNEL); 1443 if (!tx_ring->desc) { 1444 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 1445 tx_ring->size); 1446 goto err; 1447 } 1448 1449 tx_ring->next_to_use = 0; 1450 tx_ring->next_to_clean = 0; 1451 tx_ring->tx_stats.prev_pkt_ctr = -1; 1452 return 0; 1453 1454 err: 1455 kfree(tx_ring->tx_bi); 1456 tx_ring->tx_bi = NULL; 1457 return -ENOMEM; 1458 } 1459 1460 int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) 1461 { 1462 unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; 1463 1464 rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); 1465 return rx_ring->rx_bi ? 0 : -ENOMEM; 1466 } 1467 1468 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) 1469 { 1470 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); 1471 } 1472 1473 /** 1474 * i40e_clean_rx_ring - Free Rx buffers 1475 * @rx_ring: ring to be cleaned 1476 **/ 1477 void i40e_clean_rx_ring(struct i40e_ring *rx_ring) 1478 { 1479 u16 i; 1480 1481 /* ring already cleared, nothing to do */ 1482 if (!rx_ring->rx_bi) 1483 return; 1484 1485 dev_kfree_skb(rx_ring->skb); 1486 rx_ring->skb = NULL; 1487 1488 if (rx_ring->xsk_pool) { 1489 i40e_xsk_clean_rx_ring(rx_ring); 1490 goto skip_free; 1491 } 1492 1493 /* Free all the Rx ring sk_buffs */ 1494 for (i = 0; i < rx_ring->count; i++) { 1495 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i); 1496 1497 if (!rx_bi->page) 1498 continue; 1499 1500 /* Invalidate cache lines that may have been written to by 1501 * device so that we avoid corrupting memory. 1502 */ 1503 dma_sync_single_range_for_cpu(rx_ring->dev, 1504 rx_bi->dma, 1505 rx_bi->page_offset, 1506 rx_ring->rx_buf_len, 1507 DMA_FROM_DEVICE); 1508 1509 /* free resources associated with mapping */ 1510 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, 1511 i40e_rx_pg_size(rx_ring), 1512 DMA_FROM_DEVICE, 1513 I40E_RX_DMA_ATTR); 1514 1515 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); 1516 1517 rx_bi->page = NULL; 1518 rx_bi->page_offset = 0; 1519 } 1520 1521 skip_free: 1522 if (rx_ring->xsk_pool) 1523 i40e_clear_rx_bi_zc(rx_ring); 1524 else 1525 i40e_clear_rx_bi(rx_ring); 1526 1527 /* Zero out the descriptor ring */ 1528 memset(rx_ring->desc, 0, rx_ring->size); 1529 1530 rx_ring->next_to_alloc = 0; 1531 rx_ring->next_to_clean = 0; 1532 rx_ring->next_to_use = 0; 1533 } 1534 1535 /** 1536 * i40e_free_rx_resources - Free Rx resources 1537 * @rx_ring: ring to clean the resources from 1538 * 1539 * Free all receive software resources 1540 **/ 1541 void i40e_free_rx_resources(struct i40e_ring *rx_ring) 1542 { 1543 i40e_clean_rx_ring(rx_ring); 1544 if (rx_ring->vsi->type == I40E_VSI_MAIN) 1545 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 1546 rx_ring->xdp_prog = NULL; 1547 kfree(rx_ring->rx_bi); 1548 rx_ring->rx_bi = NULL; 1549 1550 if (rx_ring->desc) { 1551 dma_free_coherent(rx_ring->dev, rx_ring->size, 1552 rx_ring->desc, rx_ring->dma); 1553 rx_ring->desc = NULL; 1554 } 1555 } 1556 1557 /** 1558 * i40e_setup_rx_descriptors - Allocate Rx descriptors 1559 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 1560 * 1561 * Returns 0 on success, negative on failure 1562 **/ 1563 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) 1564 { 1565 struct device *dev = rx_ring->dev; 1566 int err; 1567 1568 u64_stats_init(&rx_ring->syncp); 1569 1570 /* Round up to nearest 4K */ 1571 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); 1572 rx_ring->size = ALIGN(rx_ring->size, 4096); 1573 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 1574 &rx_ring->dma, GFP_KERNEL); 1575 1576 if (!rx_ring->desc) { 1577 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 1578 rx_ring->size); 1579 return -ENOMEM; 1580 } 1581 1582 rx_ring->next_to_alloc = 0; 1583 rx_ring->next_to_clean = 0; 1584 rx_ring->next_to_use = 0; 1585 1586 /* XDP RX-queue info only needed for RX rings exposed to XDP */ 1587 if (rx_ring->vsi->type == I40E_VSI_MAIN) { 1588 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 1589 rx_ring->queue_index, rx_ring->q_vector->napi.napi_id); 1590 if (err < 0) 1591 return err; 1592 } 1593 1594 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; 1595 1596 return 0; 1597 } 1598 1599 /** 1600 * i40e_release_rx_desc - Store the new tail and head values 1601 * @rx_ring: ring to bump 1602 * @val: new head index 1603 **/ 1604 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 1605 { 1606 rx_ring->next_to_use = val; 1607 1608 /* update next to alloc since we have filled the ring */ 1609 rx_ring->next_to_alloc = val; 1610 1611 /* Force memory writes to complete before letting h/w 1612 * know there are new descriptors to fetch. (Only 1613 * applicable for weak-ordered memory model archs, 1614 * such as IA-64). 1615 */ 1616 wmb(); 1617 writel(val, rx_ring->tail); 1618 } 1619 1620 static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring, 1621 unsigned int size) 1622 { 1623 unsigned int truesize; 1624 1625 #if (PAGE_SIZE < 8192) 1626 truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 1627 #else 1628 truesize = rx_ring->rx_offset ? 1629 SKB_DATA_ALIGN(size + rx_ring->rx_offset) + 1630 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 1631 SKB_DATA_ALIGN(size); 1632 #endif 1633 return truesize; 1634 } 1635 1636 /** 1637 * i40e_alloc_mapped_page - recycle or make a new page 1638 * @rx_ring: ring to use 1639 * @bi: rx_buffer struct to modify 1640 * 1641 * Returns true if the page was successfully allocated or 1642 * reused. 1643 **/ 1644 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, 1645 struct i40e_rx_buffer *bi) 1646 { 1647 struct page *page = bi->page; 1648 dma_addr_t dma; 1649 1650 /* since we are recycling buffers we should seldom need to alloc */ 1651 if (likely(page)) { 1652 rx_ring->rx_stats.page_reuse_count++; 1653 return true; 1654 } 1655 1656 /* alloc new page for storage */ 1657 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); 1658 if (unlikely(!page)) { 1659 rx_ring->rx_stats.alloc_page_failed++; 1660 return false; 1661 } 1662 1663 rx_ring->rx_stats.page_alloc_count++; 1664 1665 /* map page for use */ 1666 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1667 i40e_rx_pg_size(rx_ring), 1668 DMA_FROM_DEVICE, 1669 I40E_RX_DMA_ATTR); 1670 1671 /* if mapping failed free memory back to system since 1672 * there isn't much point in holding memory we can't use 1673 */ 1674 if (dma_mapping_error(rx_ring->dev, dma)) { 1675 __free_pages(page, i40e_rx_pg_order(rx_ring)); 1676 rx_ring->rx_stats.alloc_page_failed++; 1677 return false; 1678 } 1679 1680 bi->dma = dma; 1681 bi->page = page; 1682 bi->page_offset = rx_ring->rx_offset; 1683 page_ref_add(page, USHRT_MAX - 1); 1684 bi->pagecnt_bias = USHRT_MAX; 1685 1686 return true; 1687 } 1688 1689 /** 1690 * i40e_alloc_rx_buffers - Replace used receive buffers 1691 * @rx_ring: ring to place buffers on 1692 * @cleaned_count: number of buffers to replace 1693 * 1694 * Returns false if all allocations were successful, true if any fail 1695 **/ 1696 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) 1697 { 1698 u16 ntu = rx_ring->next_to_use; 1699 union i40e_rx_desc *rx_desc; 1700 struct i40e_rx_buffer *bi; 1701 1702 /* do nothing if no valid netdev defined */ 1703 if (!rx_ring->netdev || !cleaned_count) 1704 return false; 1705 1706 rx_desc = I40E_RX_DESC(rx_ring, ntu); 1707 bi = i40e_rx_bi(rx_ring, ntu); 1708 1709 do { 1710 if (!i40e_alloc_mapped_page(rx_ring, bi)) 1711 goto no_buffers; 1712 1713 /* sync the buffer for use by the device */ 1714 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 1715 bi->page_offset, 1716 rx_ring->rx_buf_len, 1717 DMA_FROM_DEVICE); 1718 1719 /* Refresh the desc even if buffer_addrs didn't change 1720 * because each write-back erases this info. 1721 */ 1722 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1723 1724 rx_desc++; 1725 bi++; 1726 ntu++; 1727 if (unlikely(ntu == rx_ring->count)) { 1728 rx_desc = I40E_RX_DESC(rx_ring, 0); 1729 bi = i40e_rx_bi(rx_ring, 0); 1730 ntu = 0; 1731 } 1732 1733 /* clear the status bits for the next_to_use descriptor */ 1734 rx_desc->wb.qword1.status_error_len = 0; 1735 1736 cleaned_count--; 1737 } while (cleaned_count); 1738 1739 if (rx_ring->next_to_use != ntu) 1740 i40e_release_rx_desc(rx_ring, ntu); 1741 1742 return false; 1743 1744 no_buffers: 1745 if (rx_ring->next_to_use != ntu) 1746 i40e_release_rx_desc(rx_ring, ntu); 1747 1748 /* make sure to come back via polling to try again after 1749 * allocation failure 1750 */ 1751 return true; 1752 } 1753 1754 /** 1755 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum 1756 * @vsi: the VSI we care about 1757 * @skb: skb currently being received and modified 1758 * @rx_desc: the receive descriptor 1759 **/ 1760 static inline void i40e_rx_checksum(struct i40e_vsi *vsi, 1761 struct sk_buff *skb, 1762 union i40e_rx_desc *rx_desc) 1763 { 1764 struct i40e_rx_ptype_decoded decoded; 1765 u32 rx_error, rx_status; 1766 bool ipv4, ipv6; 1767 u8 ptype; 1768 u64 qword; 1769 1770 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1771 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; 1772 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> 1773 I40E_RXD_QW1_ERROR_SHIFT; 1774 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1775 I40E_RXD_QW1_STATUS_SHIFT; 1776 decoded = decode_rx_desc_ptype(ptype); 1777 1778 skb->ip_summed = CHECKSUM_NONE; 1779 1780 skb_checksum_none_assert(skb); 1781 1782 /* Rx csum enabled and ip headers found? */ 1783 if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 1784 return; 1785 1786 /* did the hardware decode the packet and checksum? */ 1787 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1788 return; 1789 1790 /* both known and outer_ip must be set for the below code to work */ 1791 if (!(decoded.known && decoded.outer_ip)) 1792 return; 1793 1794 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && 1795 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); 1796 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && 1797 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); 1798 1799 if (ipv4 && 1800 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | 1801 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) 1802 goto checksum_fail; 1803 1804 /* likely incorrect csum if alternate IP extension headers found */ 1805 if (ipv6 && 1806 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1807 /* don't increment checksum err here, non-fatal err */ 1808 return; 1809 1810 /* there was some L4 error, count error and punt packet to the stack */ 1811 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) 1812 goto checksum_fail; 1813 1814 /* handle packets that were not able to be checksummed due 1815 * to arrival speed, in this case the stack can compute 1816 * the csum. 1817 */ 1818 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 1819 return; 1820 1821 /* If there is an outer header present that might contain a checksum 1822 * we need to bump the checksum level by 1 to reflect the fact that 1823 * we are indicating we validated the inner checksum. 1824 */ 1825 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) 1826 skb->csum_level = 1; 1827 1828 /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 1829 switch (decoded.inner_prot) { 1830 case I40E_RX_PTYPE_INNER_PROT_TCP: 1831 case I40E_RX_PTYPE_INNER_PROT_UDP: 1832 case I40E_RX_PTYPE_INNER_PROT_SCTP: 1833 skb->ip_summed = CHECKSUM_UNNECESSARY; 1834 fallthrough; 1835 default: 1836 break; 1837 } 1838 1839 return; 1840 1841 checksum_fail: 1842 vsi->back->hw_csum_rx_error++; 1843 } 1844 1845 /** 1846 * i40e_ptype_to_htype - get a hash type 1847 * @ptype: the ptype value from the descriptor 1848 * 1849 * Returns a hash type to be used by skb_set_hash 1850 **/ 1851 static inline int i40e_ptype_to_htype(u8 ptype) 1852 { 1853 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); 1854 1855 if (!decoded.known) 1856 return PKT_HASH_TYPE_NONE; 1857 1858 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1859 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) 1860 return PKT_HASH_TYPE_L4; 1861 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1862 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) 1863 return PKT_HASH_TYPE_L3; 1864 else 1865 return PKT_HASH_TYPE_L2; 1866 } 1867 1868 /** 1869 * i40e_rx_hash - set the hash value in the skb 1870 * @ring: descriptor ring 1871 * @rx_desc: specific descriptor 1872 * @skb: skb currently being received and modified 1873 * @rx_ptype: Rx packet type 1874 **/ 1875 static inline void i40e_rx_hash(struct i40e_ring *ring, 1876 union i40e_rx_desc *rx_desc, 1877 struct sk_buff *skb, 1878 u8 rx_ptype) 1879 { 1880 u32 hash; 1881 const __le64 rss_mask = 1882 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << 1883 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); 1884 1885 if (!(ring->netdev->features & NETIF_F_RXHASH)) 1886 return; 1887 1888 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { 1889 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); 1890 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); 1891 } 1892 } 1893 1894 /** 1895 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor 1896 * @rx_ring: rx descriptor ring packet is being transacted on 1897 * @rx_desc: pointer to the EOP Rx descriptor 1898 * @skb: pointer to current skb being populated 1899 * 1900 * This function checks the ring, descriptor, and packet information in 1901 * order to populate the hash, checksum, VLAN, protocol, and 1902 * other fields within the skb. 1903 **/ 1904 void i40e_process_skb_fields(struct i40e_ring *rx_ring, 1905 union i40e_rx_desc *rx_desc, struct sk_buff *skb) 1906 { 1907 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1908 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1909 I40E_RXD_QW1_STATUS_SHIFT; 1910 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK; 1911 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> 1912 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; 1913 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> 1914 I40E_RXD_QW1_PTYPE_SHIFT; 1915 1916 if (unlikely(tsynvalid)) 1917 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); 1918 1919 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); 1920 1921 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); 1922 1923 skb_record_rx_queue(skb, rx_ring->queue_index); 1924 1925 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { 1926 __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; 1927 1928 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1929 le16_to_cpu(vlan_tag)); 1930 } 1931 1932 /* modifies the skb - consumes the enet header */ 1933 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1934 } 1935 1936 /** 1937 * i40e_cleanup_headers - Correct empty headers 1938 * @rx_ring: rx descriptor ring packet is being transacted on 1939 * @skb: pointer to current skb being fixed 1940 * @rx_desc: pointer to the EOP Rx descriptor 1941 * 1942 * In addition if skb is not at least 60 bytes we need to pad it so that 1943 * it is large enough to qualify as a valid Ethernet frame. 1944 * 1945 * Returns true if an error was encountered and skb was freed. 1946 **/ 1947 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, 1948 union i40e_rx_desc *rx_desc) 1949 1950 { 1951 /* ERR_MASK will only have valid bits if EOP set, and 1952 * what we are doing here is actually checking 1953 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in 1954 * the error field 1955 */ 1956 if (unlikely(i40e_test_staterr(rx_desc, 1957 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { 1958 dev_kfree_skb_any(skb); 1959 return true; 1960 } 1961 1962 /* if eth_skb_pad returns an error the skb was freed */ 1963 if (eth_skb_pad(skb)) 1964 return true; 1965 1966 return false; 1967 } 1968 1969 /** 1970 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx 1971 * @rx_buffer: buffer containing the page 1972 * @rx_stats: rx stats structure for the rx ring 1973 * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call 1974 * 1975 * If page is reusable, we have a green light for calling i40e_reuse_rx_page, 1976 * which will assign the current buffer to the buffer that next_to_alloc is 1977 * pointing to; otherwise, the DMA mapping needs to be destroyed and 1978 * page freed. 1979 * 1980 * rx_stats will be updated to indicate whether the page was waived 1981 * or busy if it could not be reused. 1982 */ 1983 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, 1984 struct i40e_rx_queue_stats *rx_stats, 1985 int rx_buffer_pgcnt) 1986 { 1987 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1988 struct page *page = rx_buffer->page; 1989 1990 /* Is any reuse possible? */ 1991 if (!dev_page_is_reusable(page)) { 1992 rx_stats->page_waive_count++; 1993 return false; 1994 } 1995 1996 #if (PAGE_SIZE < 8192) 1997 /* if we are only owner of page we can reuse it */ 1998 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) { 1999 rx_stats->page_busy_count++; 2000 return false; 2001 } 2002 #else 2003 #define I40E_LAST_OFFSET \ 2004 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) 2005 if (rx_buffer->page_offset > I40E_LAST_OFFSET) { 2006 rx_stats->page_busy_count++; 2007 return false; 2008 } 2009 #endif 2010 2011 /* If we have drained the page fragment pool we need to update 2012 * the pagecnt_bias and page count so that we fully restock the 2013 * number of references the driver holds. 2014 */ 2015 if (unlikely(pagecnt_bias == 1)) { 2016 page_ref_add(page, USHRT_MAX - 1); 2017 rx_buffer->pagecnt_bias = USHRT_MAX; 2018 } 2019 2020 return true; 2021 } 2022 2023 /** 2024 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff 2025 * @rx_ring: rx descriptor ring to transact packets on 2026 * @rx_buffer: buffer containing page to add 2027 * @skb: sk_buff to place the data into 2028 * @size: packet length from rx_desc 2029 * 2030 * This function will add the data contained in rx_buffer->page to the skb. 2031 * It will just attach the page as a frag to the skb. 2032 * 2033 * The function will then update the page offset. 2034 **/ 2035 static void i40e_add_rx_frag(struct i40e_ring *rx_ring, 2036 struct i40e_rx_buffer *rx_buffer, 2037 struct sk_buff *skb, 2038 unsigned int size) 2039 { 2040 #if (PAGE_SIZE < 8192) 2041 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2042 #else 2043 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 2044 #endif 2045 2046 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 2047 rx_buffer->page_offset, size, truesize); 2048 2049 /* page is being used so we must update the page offset */ 2050 #if (PAGE_SIZE < 8192) 2051 rx_buffer->page_offset ^= truesize; 2052 #else 2053 rx_buffer->page_offset += truesize; 2054 #endif 2055 } 2056 2057 /** 2058 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use 2059 * @rx_ring: rx descriptor ring to transact packets on 2060 * @size: size of buffer to add to skb 2061 * @rx_buffer_pgcnt: buffer page refcount 2062 * 2063 * This function will pull an Rx buffer from the ring and synchronize it 2064 * for use by the CPU. 2065 */ 2066 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, 2067 const unsigned int size, 2068 int *rx_buffer_pgcnt) 2069 { 2070 struct i40e_rx_buffer *rx_buffer; 2071 2072 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 2073 *rx_buffer_pgcnt = 2074 #if (PAGE_SIZE < 8192) 2075 page_count(rx_buffer->page); 2076 #else 2077 0; 2078 #endif 2079 prefetch_page_address(rx_buffer->page); 2080 2081 /* we are reusing so sync this buffer for CPU use */ 2082 dma_sync_single_range_for_cpu(rx_ring->dev, 2083 rx_buffer->dma, 2084 rx_buffer->page_offset, 2085 size, 2086 DMA_FROM_DEVICE); 2087 2088 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 2089 rx_buffer->pagecnt_bias--; 2090 2091 return rx_buffer; 2092 } 2093 2094 /** 2095 * i40e_construct_skb - Allocate skb and populate it 2096 * @rx_ring: rx descriptor ring to transact packets on 2097 * @rx_buffer: rx buffer to pull data from 2098 * @xdp: xdp_buff pointing to the data 2099 * 2100 * This function allocates an skb. It then populates it with the page 2101 * data from the current receive descriptor, taking care to set up the 2102 * skb correctly. 2103 */ 2104 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, 2105 struct i40e_rx_buffer *rx_buffer, 2106 struct xdp_buff *xdp) 2107 { 2108 unsigned int size = xdp->data_end - xdp->data; 2109 #if (PAGE_SIZE < 8192) 2110 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2111 #else 2112 unsigned int truesize = SKB_DATA_ALIGN(size); 2113 #endif 2114 unsigned int headlen; 2115 struct sk_buff *skb; 2116 2117 /* prefetch first cache line of first page */ 2118 net_prefetch(xdp->data); 2119 2120 /* Note, we get here by enabling legacy-rx via: 2121 * 2122 * ethtool --set-priv-flags <dev> legacy-rx on 2123 * 2124 * In this mode, we currently get 0 extra XDP headroom as 2125 * opposed to having legacy-rx off, where we process XDP 2126 * packets going to stack via i40e_build_skb(). The latter 2127 * provides us currently with 192 bytes of headroom. 2128 * 2129 * For i40e_construct_skb() mode it means that the 2130 * xdp->data_meta will always point to xdp->data, since 2131 * the helper cannot expand the head. Should this ever 2132 * change in future for legacy-rx mode on, then lets also 2133 * add xdp->data_meta handling here. 2134 */ 2135 2136 /* allocate a skb to store the frags */ 2137 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 2138 I40E_RX_HDR_SIZE, 2139 GFP_ATOMIC | __GFP_NOWARN); 2140 if (unlikely(!skb)) 2141 return NULL; 2142 2143 /* Determine available headroom for copy */ 2144 headlen = size; 2145 if (headlen > I40E_RX_HDR_SIZE) 2146 headlen = eth_get_headlen(skb->dev, xdp->data, 2147 I40E_RX_HDR_SIZE); 2148 2149 /* align pull length to size of long to optimize memcpy performance */ 2150 memcpy(__skb_put(skb, headlen), xdp->data, 2151 ALIGN(headlen, sizeof(long))); 2152 2153 /* update all of the pointers */ 2154 size -= headlen; 2155 if (size) { 2156 skb_add_rx_frag(skb, 0, rx_buffer->page, 2157 rx_buffer->page_offset + headlen, 2158 size, truesize); 2159 2160 /* buffer is used by skb, update page_offset */ 2161 #if (PAGE_SIZE < 8192) 2162 rx_buffer->page_offset ^= truesize; 2163 #else 2164 rx_buffer->page_offset += truesize; 2165 #endif 2166 } else { 2167 /* buffer is unused, reset bias back to rx_buffer */ 2168 rx_buffer->pagecnt_bias++; 2169 } 2170 2171 return skb; 2172 } 2173 2174 /** 2175 * i40e_build_skb - Build skb around an existing buffer 2176 * @rx_ring: Rx descriptor ring to transact packets on 2177 * @rx_buffer: Rx buffer to pull data from 2178 * @xdp: xdp_buff pointing to the data 2179 * 2180 * This function builds an skb around an existing Rx buffer, taking care 2181 * to set up the skb correctly and avoid any memcpy overhead. 2182 */ 2183 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, 2184 struct i40e_rx_buffer *rx_buffer, 2185 struct xdp_buff *xdp) 2186 { 2187 unsigned int metasize = xdp->data - xdp->data_meta; 2188 #if (PAGE_SIZE < 8192) 2189 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2190 #else 2191 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 2192 SKB_DATA_ALIGN(xdp->data_end - 2193 xdp->data_hard_start); 2194 #endif 2195 struct sk_buff *skb; 2196 2197 /* Prefetch first cache line of first page. If xdp->data_meta 2198 * is unused, this points exactly as xdp->data, otherwise we 2199 * likely have a consumer accessing first few bytes of meta 2200 * data, and then actual data. 2201 */ 2202 net_prefetch(xdp->data_meta); 2203 2204 /* build an skb around the page buffer */ 2205 skb = napi_build_skb(xdp->data_hard_start, truesize); 2206 if (unlikely(!skb)) 2207 return NULL; 2208 2209 /* update pointers within the skb to store the data */ 2210 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2211 __skb_put(skb, xdp->data_end - xdp->data); 2212 if (metasize) 2213 skb_metadata_set(skb, metasize); 2214 2215 /* buffer is used by skb, update page_offset */ 2216 #if (PAGE_SIZE < 8192) 2217 rx_buffer->page_offset ^= truesize; 2218 #else 2219 rx_buffer->page_offset += truesize; 2220 #endif 2221 2222 return skb; 2223 } 2224 2225 /** 2226 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free 2227 * @rx_ring: rx descriptor ring to transact packets on 2228 * @rx_buffer: rx buffer to pull data from 2229 * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call 2230 * 2231 * This function will clean up the contents of the rx_buffer. It will 2232 * either recycle the buffer or unmap it and free the associated resources. 2233 */ 2234 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, 2235 struct i40e_rx_buffer *rx_buffer, 2236 int rx_buffer_pgcnt) 2237 { 2238 if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats, rx_buffer_pgcnt)) { 2239 /* hand second half of page back to the ring */ 2240 i40e_reuse_rx_page(rx_ring, rx_buffer); 2241 } else { 2242 /* we are not reusing the buffer so unmap it */ 2243 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 2244 i40e_rx_pg_size(rx_ring), 2245 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); 2246 __page_frag_cache_drain(rx_buffer->page, 2247 rx_buffer->pagecnt_bias); 2248 /* clear contents of buffer_info */ 2249 rx_buffer->page = NULL; 2250 } 2251 } 2252 2253 /** 2254 * i40e_is_non_eop - process handling of non-EOP buffers 2255 * @rx_ring: Rx ring being processed 2256 * @rx_desc: Rx descriptor for current buffer 2257 * 2258 * If the buffer is an EOP buffer, this function exits returning false, 2259 * otherwise return true indicating that this is in fact a non-EOP buffer. 2260 */ 2261 static bool i40e_is_non_eop(struct i40e_ring *rx_ring, 2262 union i40e_rx_desc *rx_desc) 2263 { 2264 /* if we are the last buffer then there is nothing else to do */ 2265 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) 2266 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) 2267 return false; 2268 2269 rx_ring->rx_stats.non_eop_descs++; 2270 2271 return true; 2272 } 2273 2274 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 2275 struct i40e_ring *xdp_ring); 2276 2277 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) 2278 { 2279 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2280 2281 if (unlikely(!xdpf)) 2282 return I40E_XDP_CONSUMED; 2283 2284 return i40e_xmit_xdp_ring(xdpf, xdp_ring); 2285 } 2286 2287 /** 2288 * i40e_run_xdp - run an XDP program 2289 * @rx_ring: Rx ring being processed 2290 * @xdp: XDP buffer containing the frame 2291 * @xdp_prog: XDP program to run 2292 **/ 2293 static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) 2294 { 2295 int err, result = I40E_XDP_PASS; 2296 struct i40e_ring *xdp_ring; 2297 u32 act; 2298 2299 if (!xdp_prog) 2300 goto xdp_out; 2301 2302 prefetchw(xdp->data_hard_start); /* xdp_frame write */ 2303 2304 act = bpf_prog_run_xdp(xdp_prog, xdp); 2305 switch (act) { 2306 case XDP_PASS: 2307 break; 2308 case XDP_TX: 2309 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2310 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); 2311 if (result == I40E_XDP_CONSUMED) 2312 goto out_failure; 2313 break; 2314 case XDP_REDIRECT: 2315 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 2316 if (err) 2317 goto out_failure; 2318 result = I40E_XDP_REDIR; 2319 break; 2320 default: 2321 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 2322 fallthrough; 2323 case XDP_ABORTED: 2324 out_failure: 2325 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 2326 fallthrough; /* handle aborts by dropping packet */ 2327 case XDP_DROP: 2328 result = I40E_XDP_CONSUMED; 2329 break; 2330 } 2331 xdp_out: 2332 return result; 2333 } 2334 2335 /** 2336 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region 2337 * @rx_ring: Rx ring 2338 * @rx_buffer: Rx buffer to adjust 2339 * @size: Size of adjustment 2340 **/ 2341 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, 2342 struct i40e_rx_buffer *rx_buffer, 2343 unsigned int size) 2344 { 2345 unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size); 2346 2347 #if (PAGE_SIZE < 8192) 2348 rx_buffer->page_offset ^= truesize; 2349 #else 2350 rx_buffer->page_offset += truesize; 2351 #endif 2352 } 2353 2354 /** 2355 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register 2356 * @xdp_ring: XDP Tx ring 2357 * 2358 * This function updates the XDP Tx ring tail register. 2359 **/ 2360 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) 2361 { 2362 /* Force memory writes to complete before letting h/w 2363 * know there are new descriptors to fetch. 2364 */ 2365 wmb(); 2366 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); 2367 } 2368 2369 /** 2370 * i40e_update_rx_stats - Update Rx ring statistics 2371 * @rx_ring: rx descriptor ring 2372 * @total_rx_bytes: number of bytes received 2373 * @total_rx_packets: number of packets received 2374 * 2375 * This function updates the Rx ring statistics. 2376 **/ 2377 void i40e_update_rx_stats(struct i40e_ring *rx_ring, 2378 unsigned int total_rx_bytes, 2379 unsigned int total_rx_packets) 2380 { 2381 u64_stats_update_begin(&rx_ring->syncp); 2382 rx_ring->stats.packets += total_rx_packets; 2383 rx_ring->stats.bytes += total_rx_bytes; 2384 u64_stats_update_end(&rx_ring->syncp); 2385 rx_ring->q_vector->rx.total_packets += total_rx_packets; 2386 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 2387 } 2388 2389 /** 2390 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map 2391 * @rx_ring: Rx ring 2392 * @xdp_res: Result of the receive batch 2393 * 2394 * This function bumps XDP Tx tail and/or flush redirect map, and 2395 * should be called when a batch of packets has been processed in the 2396 * napi loop. 2397 **/ 2398 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) 2399 { 2400 if (xdp_res & I40E_XDP_REDIR) 2401 xdp_do_flush_map(); 2402 2403 if (xdp_res & I40E_XDP_TX) { 2404 struct i40e_ring *xdp_ring = 2405 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2406 2407 i40e_xdp_ring_update_tail(xdp_ring); 2408 } 2409 } 2410 2411 /** 2412 * i40e_inc_ntc: Advance the next_to_clean index 2413 * @rx_ring: Rx ring 2414 **/ 2415 static void i40e_inc_ntc(struct i40e_ring *rx_ring) 2416 { 2417 u32 ntc = rx_ring->next_to_clean + 1; 2418 2419 ntc = (ntc < rx_ring->count) ? ntc : 0; 2420 rx_ring->next_to_clean = ntc; 2421 prefetch(I40E_RX_DESC(rx_ring, ntc)); 2422 } 2423 2424 /** 2425 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 2426 * @rx_ring: rx descriptor ring to transact packets on 2427 * @budget: Total limit on number of packets to process 2428 * 2429 * This function provides a "bounce buffer" approach to Rx interrupt 2430 * processing. The advantage to this is that on systems that have 2431 * expensive overhead for IOMMU access this provides a means of avoiding 2432 * it by maintaining the mapping of the page to the system. 2433 * 2434 * Returns amount of work completed 2435 **/ 2436 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) 2437 { 2438 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; 2439 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 2440 unsigned int offset = rx_ring->rx_offset; 2441 struct sk_buff *skb = rx_ring->skb; 2442 unsigned int xdp_xmit = 0; 2443 struct bpf_prog *xdp_prog; 2444 bool failure = false; 2445 struct xdp_buff xdp; 2446 int xdp_res = 0; 2447 2448 #if (PAGE_SIZE < 8192) 2449 frame_sz = i40e_rx_frame_truesize(rx_ring, 0); 2450 #endif 2451 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 2452 2453 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 2454 2455 while (likely(total_rx_packets < (unsigned int)budget)) { 2456 struct i40e_rx_buffer *rx_buffer; 2457 union i40e_rx_desc *rx_desc; 2458 int rx_buffer_pgcnt; 2459 unsigned int size; 2460 u64 qword; 2461 2462 /* return some buffers to hardware, one at a time is too slow */ 2463 if (cleaned_count >= I40E_RX_BUFFER_WRITE) { 2464 failure = failure || 2465 i40e_alloc_rx_buffers(rx_ring, cleaned_count); 2466 cleaned_count = 0; 2467 } 2468 2469 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); 2470 2471 /* status_error_len will always be zero for unused descriptors 2472 * because it's cleared in cleanup, and overlaps with hdr_addr 2473 * which is always zero because packet split isn't used, if the 2474 * hardware wrote DD then the length will be non-zero 2475 */ 2476 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 2477 2478 /* This memory barrier is needed to keep us from reading 2479 * any other fields out of the rx_desc until we have 2480 * verified the descriptor has been written back. 2481 */ 2482 dma_rmb(); 2483 2484 if (i40e_rx_is_programming_status(qword)) { 2485 i40e_clean_programming_status(rx_ring, 2486 rx_desc->raw.qword[0], 2487 qword); 2488 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 2489 i40e_inc_ntc(rx_ring); 2490 i40e_reuse_rx_page(rx_ring, rx_buffer); 2491 cleaned_count++; 2492 continue; 2493 } 2494 2495 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 2496 I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 2497 if (!size) 2498 break; 2499 2500 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); 2501 rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 2502 2503 /* retrieve a buffer from the ring */ 2504 if (!skb) { 2505 unsigned char *hard_start; 2506 2507 hard_start = page_address(rx_buffer->page) + 2508 rx_buffer->page_offset - offset; 2509 xdp_prepare_buff(&xdp, hard_start, offset, size, true); 2510 xdp_buff_clear_frags_flag(&xdp); 2511 #if (PAGE_SIZE > 4096) 2512 /* At larger PAGE_SIZE, frame_sz depend on len size */ 2513 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); 2514 #endif 2515 xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog); 2516 } 2517 2518 if (xdp_res) { 2519 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { 2520 xdp_xmit |= xdp_res; 2521 i40e_rx_buffer_flip(rx_ring, rx_buffer, size); 2522 } else { 2523 rx_buffer->pagecnt_bias++; 2524 } 2525 total_rx_bytes += size; 2526 total_rx_packets++; 2527 } else if (skb) { 2528 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); 2529 } else if (ring_uses_build_skb(rx_ring)) { 2530 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp); 2531 } else { 2532 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp); 2533 } 2534 2535 /* exit if we failed to retrieve a buffer */ 2536 if (!xdp_res && !skb) { 2537 rx_ring->rx_stats.alloc_buff_failed++; 2538 rx_buffer->pagecnt_bias++; 2539 break; 2540 } 2541 2542 i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 2543 cleaned_count++; 2544 2545 i40e_inc_ntc(rx_ring); 2546 if (i40e_is_non_eop(rx_ring, rx_desc)) 2547 continue; 2548 2549 if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) { 2550 skb = NULL; 2551 continue; 2552 } 2553 2554 /* probably a little skewed due to removing CRC */ 2555 total_rx_bytes += skb->len; 2556 2557 /* populate checksum, VLAN, and protocol */ 2558 i40e_process_skb_fields(rx_ring, rx_desc, skb); 2559 2560 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); 2561 napi_gro_receive(&rx_ring->q_vector->napi, skb); 2562 skb = NULL; 2563 2564 /* update budget accounting */ 2565 total_rx_packets++; 2566 } 2567 2568 i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 2569 rx_ring->skb = skb; 2570 2571 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); 2572 2573 /* guarantee a trip back through this routine if there was a failure */ 2574 return failure ? budget : (int)total_rx_packets; 2575 } 2576 2577 static inline u32 i40e_buildreg_itr(const int type, u16 itr) 2578 { 2579 u32 val; 2580 2581 /* We don't bother with setting the CLEARPBA bit as the data sheet 2582 * points out doing so is "meaningless since it was already 2583 * auto-cleared". The auto-clearing happens when the interrupt is 2584 * asserted. 2585 * 2586 * Hardware errata 28 for also indicates that writing to a 2587 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear 2588 * an event in the PBA anyway so we need to rely on the automask 2589 * to hold pending events for us until the interrupt is re-enabled 2590 * 2591 * The itr value is reported in microseconds, and the register 2592 * value is recorded in 2 microsecond units. For this reason we 2593 * only need to shift by the interval shift - 1 instead of the 2594 * full value. 2595 */ 2596 itr &= I40E_ITR_MASK; 2597 2598 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2599 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | 2600 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); 2601 2602 return val; 2603 } 2604 2605 /* a small macro to shorten up some long lines */ 2606 #define INTREG I40E_PFINT_DYN_CTLN 2607 2608 /* The act of updating the ITR will cause it to immediately trigger. In order 2609 * to prevent this from throwing off adaptive update statistics we defer the 2610 * update so that it can only happen so often. So after either Tx or Rx are 2611 * updated we make the adaptive scheme wait until either the ITR completely 2612 * expires via the next_update expiration or we have been through at least 2613 * 3 interrupts. 2614 */ 2615 #define ITR_COUNTDOWN_START 3 2616 2617 /** 2618 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt 2619 * @vsi: the VSI we care about 2620 * @q_vector: q_vector for which itr is being updated and interrupt enabled 2621 * 2622 **/ 2623 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, 2624 struct i40e_q_vector *q_vector) 2625 { 2626 struct i40e_hw *hw = &vsi->back->hw; 2627 u32 intval; 2628 2629 /* If we don't have MSIX, then we only need to re-enable icr0 */ 2630 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { 2631 i40e_irq_dynamic_enable_icr0(vsi->back); 2632 return; 2633 } 2634 2635 /* These will do nothing if dynamic updates are not enabled */ 2636 i40e_update_itr(q_vector, &q_vector->tx); 2637 i40e_update_itr(q_vector, &q_vector->rx); 2638 2639 /* This block of logic allows us to get away with only updating 2640 * one ITR value with each interrupt. The idea is to perform a 2641 * pseudo-lazy update with the following criteria. 2642 * 2643 * 1. Rx is given higher priority than Tx if both are in same state 2644 * 2. If we must reduce an ITR that is given highest priority. 2645 * 3. We then give priority to increasing ITR based on amount. 2646 */ 2647 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { 2648 /* Rx ITR needs to be reduced, this is highest priority */ 2649 intval = i40e_buildreg_itr(I40E_RX_ITR, 2650 q_vector->rx.target_itr); 2651 q_vector->rx.current_itr = q_vector->rx.target_itr; 2652 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2653 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || 2654 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < 2655 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { 2656 /* Tx ITR needs to be reduced, this is second priority 2657 * Tx ITR needs to be increased more than Rx, fourth priority 2658 */ 2659 intval = i40e_buildreg_itr(I40E_TX_ITR, 2660 q_vector->tx.target_itr); 2661 q_vector->tx.current_itr = q_vector->tx.target_itr; 2662 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2663 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { 2664 /* Rx ITR needs to be increased, third priority */ 2665 intval = i40e_buildreg_itr(I40E_RX_ITR, 2666 q_vector->rx.target_itr); 2667 q_vector->rx.current_itr = q_vector->rx.target_itr; 2668 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2669 } else { 2670 /* No ITR update, lowest priority */ 2671 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); 2672 if (q_vector->itr_countdown) 2673 q_vector->itr_countdown--; 2674 } 2675 2676 if (!test_bit(__I40E_VSI_DOWN, vsi->state)) 2677 wr32(hw, INTREG(q_vector->reg_idx), intval); 2678 } 2679 2680 /** 2681 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine 2682 * @napi: napi struct with our devices info in it 2683 * @budget: amount of work driver is allowed to do this pass, in packets 2684 * 2685 * This function will clean all queues associated with a q_vector. 2686 * 2687 * Returns the amount of work done 2688 **/ 2689 int i40e_napi_poll(struct napi_struct *napi, int budget) 2690 { 2691 struct i40e_q_vector *q_vector = 2692 container_of(napi, struct i40e_q_vector, napi); 2693 struct i40e_vsi *vsi = q_vector->vsi; 2694 struct i40e_ring *ring; 2695 bool clean_complete = true; 2696 bool arm_wb = false; 2697 int budget_per_ring; 2698 int work_done = 0; 2699 2700 if (test_bit(__I40E_VSI_DOWN, vsi->state)) { 2701 napi_complete(napi); 2702 return 0; 2703 } 2704 2705 /* Since the actual Tx work is minimal, we can give the Tx a larger 2706 * budget and be more aggressive about cleaning up the Tx descriptors. 2707 */ 2708 i40e_for_each_ring(ring, q_vector->tx) { 2709 bool wd = ring->xsk_pool ? 2710 i40e_clean_xdp_tx_irq(vsi, ring) : 2711 i40e_clean_tx_irq(vsi, ring, budget); 2712 2713 if (!wd) { 2714 clean_complete = false; 2715 continue; 2716 } 2717 arm_wb |= ring->arm_wb; 2718 ring->arm_wb = false; 2719 } 2720 2721 /* Handle case where we are called by netpoll with a budget of 0 */ 2722 if (budget <= 0) 2723 goto tx_only; 2724 2725 /* normally we have 1 Rx ring per q_vector */ 2726 if (unlikely(q_vector->num_ringpairs > 1)) 2727 /* We attempt to distribute budget to each Rx queue fairly, but 2728 * don't allow the budget to go below 1 because that would exit 2729 * polling early. 2730 */ 2731 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1); 2732 else 2733 /* Max of 1 Rx ring in this q_vector so give it the budget */ 2734 budget_per_ring = budget; 2735 2736 i40e_for_each_ring(ring, q_vector->rx) { 2737 int cleaned = ring->xsk_pool ? 2738 i40e_clean_rx_irq_zc(ring, budget_per_ring) : 2739 i40e_clean_rx_irq(ring, budget_per_ring); 2740 2741 work_done += cleaned; 2742 /* if we clean as many as budgeted, we must not be done */ 2743 if (cleaned >= budget_per_ring) 2744 clean_complete = false; 2745 } 2746 2747 /* If work not completed, return budget and polling will return */ 2748 if (!clean_complete) { 2749 int cpu_id = smp_processor_id(); 2750 2751 /* It is possible that the interrupt affinity has changed but, 2752 * if the cpu is pegged at 100%, polling will never exit while 2753 * traffic continues and the interrupt will be stuck on this 2754 * cpu. We check to make sure affinity is correct before we 2755 * continue to poll, otherwise we must stop polling so the 2756 * interrupt can move to the correct cpu. 2757 */ 2758 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { 2759 /* Tell napi that we are done polling */ 2760 napi_complete_done(napi, work_done); 2761 2762 /* Force an interrupt */ 2763 i40e_force_wb(vsi, q_vector); 2764 2765 /* Return budget-1 so that polling stops */ 2766 return budget - 1; 2767 } 2768 tx_only: 2769 if (arm_wb) { 2770 q_vector->tx.ring[0].tx_stats.tx_force_wb++; 2771 i40e_enable_wb_on_itr(vsi, q_vector); 2772 } 2773 return budget; 2774 } 2775 2776 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) 2777 q_vector->arm_wb_state = false; 2778 2779 /* Exit the polling mode, but don't re-enable interrupts if stack might 2780 * poll us due to busy-polling 2781 */ 2782 if (likely(napi_complete_done(napi, work_done))) 2783 i40e_update_enable_itr(vsi, q_vector); 2784 2785 return min(work_done, budget - 1); 2786 } 2787 2788 /** 2789 * i40e_atr - Add a Flow Director ATR filter 2790 * @tx_ring: ring to add programming descriptor to 2791 * @skb: send buffer 2792 * @tx_flags: send tx flags 2793 **/ 2794 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, 2795 u32 tx_flags) 2796 { 2797 struct i40e_filter_program_desc *fdir_desc; 2798 struct i40e_pf *pf = tx_ring->vsi->back; 2799 union { 2800 unsigned char *network; 2801 struct iphdr *ipv4; 2802 struct ipv6hdr *ipv6; 2803 } hdr; 2804 struct tcphdr *th; 2805 unsigned int hlen; 2806 u32 flex_ptype, dtype_cmd; 2807 int l4_proto; 2808 u16 i; 2809 2810 /* make sure ATR is enabled */ 2811 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) 2812 return; 2813 2814 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) 2815 return; 2816 2817 /* if sampling is disabled do nothing */ 2818 if (!tx_ring->atr_sample_rate) 2819 return; 2820 2821 /* Currently only IPv4/IPv6 with TCP is supported */ 2822 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) 2823 return; 2824 2825 /* snag network header to get L4 type and address */ 2826 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ? 2827 skb_inner_network_header(skb) : skb_network_header(skb); 2828 2829 /* Note: tx_flags gets modified to reflect inner protocols in 2830 * tx_enable_csum function if encap is enabled. 2831 */ 2832 if (tx_flags & I40E_TX_FLAGS_IPV4) { 2833 /* access ihl as u8 to avoid unaligned access on ia64 */ 2834 hlen = (hdr.network[0] & 0x0F) << 2; 2835 l4_proto = hdr.ipv4->protocol; 2836 } else { 2837 /* find the start of the innermost ipv6 header */ 2838 unsigned int inner_hlen = hdr.network - skb->data; 2839 unsigned int h_offset = inner_hlen; 2840 2841 /* this function updates h_offset to the end of the header */ 2842 l4_proto = 2843 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); 2844 /* hlen will contain our best estimate of the tcp header */ 2845 hlen = h_offset - inner_hlen; 2846 } 2847 2848 if (l4_proto != IPPROTO_TCP) 2849 return; 2850 2851 th = (struct tcphdr *)(hdr.network + hlen); 2852 2853 /* Due to lack of space, no more new filters can be programmed */ 2854 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) 2855 return; 2856 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { 2857 /* HW ATR eviction will take care of removing filters on FIN 2858 * and RST packets. 2859 */ 2860 if (th->fin || th->rst) 2861 return; 2862 } 2863 2864 tx_ring->atr_count++; 2865 2866 /* sample on all syn/fin/rst packets or once every atr sample rate */ 2867 if (!th->fin && 2868 !th->syn && 2869 !th->rst && 2870 (tx_ring->atr_count < tx_ring->atr_sample_rate)) 2871 return; 2872 2873 tx_ring->atr_count = 0; 2874 2875 /* grab the next descriptor */ 2876 i = tx_ring->next_to_use; 2877 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); 2878 2879 i++; 2880 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2881 2882 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & 2883 I40E_TXD_FLTR_QW0_QINDEX_MASK; 2884 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? 2885 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << 2886 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : 2887 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << 2888 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); 2889 2890 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; 2891 2892 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; 2893 2894 dtype_cmd |= (th->fin || th->rst) ? 2895 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << 2896 I40E_TXD_FLTR_QW1_PCMD_SHIFT) : 2897 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << 2898 I40E_TXD_FLTR_QW1_PCMD_SHIFT); 2899 2900 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << 2901 I40E_TXD_FLTR_QW1_DEST_SHIFT; 2902 2903 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << 2904 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; 2905 2906 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 2907 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) 2908 dtype_cmd |= 2909 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << 2910 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2911 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2912 else 2913 dtype_cmd |= 2914 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << 2915 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2916 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2917 2918 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) 2919 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; 2920 2921 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 2922 fdir_desc->rsvd = cpu_to_le32(0); 2923 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 2924 fdir_desc->fd_id = cpu_to_le32(0); 2925 } 2926 2927 /** 2928 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 2929 * @skb: send buffer 2930 * @tx_ring: ring to send buffer on 2931 * @flags: the tx flags to be set 2932 * 2933 * Checks the skb and set up correspondingly several generic transmit flags 2934 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 2935 * 2936 * Returns error code indicate the frame should be dropped upon error and the 2937 * otherwise returns 0 to indicate the flags has been set properly. 2938 **/ 2939 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, 2940 struct i40e_ring *tx_ring, 2941 u32 *flags) 2942 { 2943 __be16 protocol = skb->protocol; 2944 u32 tx_flags = 0; 2945 2946 if (protocol == htons(ETH_P_8021Q) && 2947 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 2948 /* When HW VLAN acceleration is turned off by the user the 2949 * stack sets the protocol to 8021q so that the driver 2950 * can take any steps required to support the SW only 2951 * VLAN handling. In our case the driver doesn't need 2952 * to take any further steps so just set the protocol 2953 * to the encapsulated ethertype. 2954 */ 2955 skb->protocol = vlan_get_protocol(skb); 2956 goto out; 2957 } 2958 2959 /* if we have a HW VLAN tag being added, default to the HW one */ 2960 if (skb_vlan_tag_present(skb)) { 2961 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 2962 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 2963 /* else if it is a SW VLAN, check the next protocol and store the tag */ 2964 } else if (protocol == htons(ETH_P_8021Q)) { 2965 struct vlan_hdr *vhdr, _vhdr; 2966 2967 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 2968 if (!vhdr) 2969 return -EINVAL; 2970 2971 protocol = vhdr->h_vlan_encapsulated_proto; 2972 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; 2973 tx_flags |= I40E_TX_FLAGS_SW_VLAN; 2974 } 2975 2976 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2977 goto out; 2978 2979 /* Insert 802.1p priority into VLAN header */ 2980 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || 2981 (skb->priority != TC_PRIO_CONTROL)) { 2982 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK; 2983 tx_flags |= (skb->priority & 0x7) << 2984 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 2985 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { 2986 struct vlan_ethhdr *vhdr; 2987 int rc; 2988 2989 rc = skb_cow_head(skb, 0); 2990 if (rc < 0) 2991 return rc; 2992 vhdr = (struct vlan_ethhdr *)skb->data; 2993 vhdr->h_vlan_TCI = htons(tx_flags >> 2994 I40E_TX_FLAGS_VLAN_SHIFT); 2995 } else { 2996 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 2997 } 2998 } 2999 3000 out: 3001 *flags = tx_flags; 3002 return 0; 3003 } 3004 3005 /** 3006 * i40e_tso - set up the tso context descriptor 3007 * @first: pointer to first Tx buffer for xmit 3008 * @hdr_len: ptr to the size of the packet header 3009 * @cd_type_cmd_tso_mss: Quad Word 1 3010 * 3011 * Returns 0 if no TSO can happen, 1 if tso is going, or error 3012 **/ 3013 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, 3014 u64 *cd_type_cmd_tso_mss) 3015 { 3016 struct sk_buff *skb = first->skb; 3017 u64 cd_cmd, cd_tso_len, cd_mss; 3018 __be16 protocol; 3019 union { 3020 struct iphdr *v4; 3021 struct ipv6hdr *v6; 3022 unsigned char *hdr; 3023 } ip; 3024 union { 3025 struct tcphdr *tcp; 3026 struct udphdr *udp; 3027 unsigned char *hdr; 3028 } l4; 3029 u32 paylen, l4_offset; 3030 u16 gso_size; 3031 int err; 3032 3033 if (skb->ip_summed != CHECKSUM_PARTIAL) 3034 return 0; 3035 3036 if (!skb_is_gso(skb)) 3037 return 0; 3038 3039 err = skb_cow_head(skb, 0); 3040 if (err < 0) 3041 return err; 3042 3043 protocol = vlan_get_protocol(skb); 3044 3045 if (eth_p_mpls(protocol)) 3046 ip.hdr = skb_inner_network_header(skb); 3047 else 3048 ip.hdr = skb_network_header(skb); 3049 l4.hdr = skb_checksum_start(skb); 3050 3051 /* initialize outer IP header fields */ 3052 if (ip.v4->version == 4) { 3053 ip.v4->tot_len = 0; 3054 ip.v4->check = 0; 3055 3056 first->tx_flags |= I40E_TX_FLAGS_TSO; 3057 } else { 3058 ip.v6->payload_len = 0; 3059 first->tx_flags |= I40E_TX_FLAGS_TSO; 3060 } 3061 3062 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 3063 SKB_GSO_GRE_CSUM | 3064 SKB_GSO_IPXIP4 | 3065 SKB_GSO_IPXIP6 | 3066 SKB_GSO_UDP_TUNNEL | 3067 SKB_GSO_UDP_TUNNEL_CSUM)) { 3068 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 3069 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 3070 l4.udp->len = 0; 3071 3072 /* determine offset of outer transport header */ 3073 l4_offset = l4.hdr - skb->data; 3074 3075 /* remove payload length from outer checksum */ 3076 paylen = skb->len - l4_offset; 3077 csum_replace_by_diff(&l4.udp->check, 3078 (__force __wsum)htonl(paylen)); 3079 } 3080 3081 /* reset pointers to inner headers */ 3082 ip.hdr = skb_inner_network_header(skb); 3083 l4.hdr = skb_inner_transport_header(skb); 3084 3085 /* initialize inner IP header fields */ 3086 if (ip.v4->version == 4) { 3087 ip.v4->tot_len = 0; 3088 ip.v4->check = 0; 3089 } else { 3090 ip.v6->payload_len = 0; 3091 } 3092 } 3093 3094 /* determine offset of inner transport header */ 3095 l4_offset = l4.hdr - skb->data; 3096 3097 /* remove payload length from inner checksum */ 3098 paylen = skb->len - l4_offset; 3099 3100 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 3101 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); 3102 /* compute length of segmentation header */ 3103 *hdr_len = sizeof(*l4.udp) + l4_offset; 3104 } else { 3105 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 3106 /* compute length of segmentation header */ 3107 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 3108 } 3109 3110 /* pull values out of skb_shinfo */ 3111 gso_size = skb_shinfo(skb)->gso_size; 3112 3113 /* update GSO size and bytecount with header size */ 3114 first->gso_segs = skb_shinfo(skb)->gso_segs; 3115 first->bytecount += (first->gso_segs - 1) * *hdr_len; 3116 3117 /* find the field values */ 3118 cd_cmd = I40E_TX_CTX_DESC_TSO; 3119 cd_tso_len = skb->len - *hdr_len; 3120 cd_mss = gso_size; 3121 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | 3122 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | 3123 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); 3124 return 1; 3125 } 3126 3127 /** 3128 * i40e_tsyn - set up the tsyn context descriptor 3129 * @tx_ring: ptr to the ring to send 3130 * @skb: ptr to the skb we're sending 3131 * @tx_flags: the collected send information 3132 * @cd_type_cmd_tso_mss: Quad Word 1 3133 * 3134 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen 3135 **/ 3136 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, 3137 u32 tx_flags, u64 *cd_type_cmd_tso_mss) 3138 { 3139 struct i40e_pf *pf; 3140 3141 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 3142 return 0; 3143 3144 /* Tx timestamps cannot be sampled when doing TSO */ 3145 if (tx_flags & I40E_TX_FLAGS_TSO) 3146 return 0; 3147 3148 /* only timestamp the outbound packet if the user has requested it and 3149 * we are not already transmitting a packet to be timestamped 3150 */ 3151 pf = i40e_netdev_to_pf(tx_ring->netdev); 3152 if (!(pf->flags & I40E_FLAG_PTP)) 3153 return 0; 3154 3155 if (pf->ptp_tx && 3156 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { 3157 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3158 pf->ptp_tx_start = jiffies; 3159 pf->ptp_tx_skb = skb_get(skb); 3160 } else { 3161 pf->tx_hwtstamp_skipped++; 3162 return 0; 3163 } 3164 3165 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << 3166 I40E_TXD_CTX_QW1_CMD_SHIFT; 3167 3168 return 1; 3169 } 3170 3171 /** 3172 * i40e_tx_enable_csum - Enable Tx checksum offloads 3173 * @skb: send buffer 3174 * @tx_flags: pointer to Tx flags currently set 3175 * @td_cmd: Tx descriptor command bits to set 3176 * @td_offset: Tx descriptor header offsets to set 3177 * @tx_ring: Tx descriptor ring 3178 * @cd_tunneling: ptr to context desc bits 3179 **/ 3180 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, 3181 u32 *td_cmd, u32 *td_offset, 3182 struct i40e_ring *tx_ring, 3183 u32 *cd_tunneling) 3184 { 3185 union { 3186 struct iphdr *v4; 3187 struct ipv6hdr *v6; 3188 unsigned char *hdr; 3189 } ip; 3190 union { 3191 struct tcphdr *tcp; 3192 struct udphdr *udp; 3193 unsigned char *hdr; 3194 } l4; 3195 unsigned char *exthdr; 3196 u32 offset, cmd = 0; 3197 __be16 frag_off; 3198 __be16 protocol; 3199 u8 l4_proto = 0; 3200 3201 if (skb->ip_summed != CHECKSUM_PARTIAL) 3202 return 0; 3203 3204 protocol = vlan_get_protocol(skb); 3205 3206 if (eth_p_mpls(protocol)) 3207 ip.hdr = skb_inner_network_header(skb); 3208 else 3209 ip.hdr = skb_network_header(skb); 3210 l4.hdr = skb_checksum_start(skb); 3211 3212 /* set the tx_flags to indicate the IP protocol type. this is 3213 * required so that checksum header computation below is accurate. 3214 */ 3215 if (ip.v4->version == 4) 3216 *tx_flags |= I40E_TX_FLAGS_IPV4; 3217 else 3218 *tx_flags |= I40E_TX_FLAGS_IPV6; 3219 3220 /* compute outer L2 header size */ 3221 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; 3222 3223 if (skb->encapsulation) { 3224 u32 tunnel = 0; 3225 /* define outer network header type */ 3226 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 3227 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 3228 I40E_TX_CTX_EXT_IP_IPV4 : 3229 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 3230 3231 l4_proto = ip.v4->protocol; 3232 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 3233 int ret; 3234 3235 tunnel |= I40E_TX_CTX_EXT_IP_IPV6; 3236 3237 exthdr = ip.hdr + sizeof(*ip.v6); 3238 l4_proto = ip.v6->nexthdr; 3239 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 3240 &l4_proto, &frag_off); 3241 if (ret < 0) 3242 return -1; 3243 } 3244 3245 /* define outer transport */ 3246 switch (l4_proto) { 3247 case IPPROTO_UDP: 3248 tunnel |= I40E_TXD_CTX_UDP_TUNNELING; 3249 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3250 break; 3251 case IPPROTO_GRE: 3252 tunnel |= I40E_TXD_CTX_GRE_TUNNELING; 3253 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3254 break; 3255 case IPPROTO_IPIP: 3256 case IPPROTO_IPV6: 3257 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3258 l4.hdr = skb_inner_network_header(skb); 3259 break; 3260 default: 3261 if (*tx_flags & I40E_TX_FLAGS_TSO) 3262 return -1; 3263 3264 skb_checksum_help(skb); 3265 return 0; 3266 } 3267 3268 /* compute outer L3 header size */ 3269 tunnel |= ((l4.hdr - ip.hdr) / 4) << 3270 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; 3271 3272 /* switch IP header pointer from outer to inner header */ 3273 ip.hdr = skb_inner_network_header(skb); 3274 3275 /* compute tunnel header size */ 3276 tunnel |= ((ip.hdr - l4.hdr) / 2) << 3277 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 3278 3279 /* indicate if we need to offload outer UDP header */ 3280 if ((*tx_flags & I40E_TX_FLAGS_TSO) && 3281 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 3282 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 3283 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; 3284 3285 /* record tunnel offload values */ 3286 *cd_tunneling |= tunnel; 3287 3288 /* switch L4 header pointer from outer to inner */ 3289 l4.hdr = skb_inner_transport_header(skb); 3290 l4_proto = 0; 3291 3292 /* reset type as we transition from outer to inner headers */ 3293 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); 3294 if (ip.v4->version == 4) 3295 *tx_flags |= I40E_TX_FLAGS_IPV4; 3296 if (ip.v6->version == 6) 3297 *tx_flags |= I40E_TX_FLAGS_IPV6; 3298 } 3299 3300 /* Enable IP checksum offloads */ 3301 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 3302 l4_proto = ip.v4->protocol; 3303 /* the stack computes the IP header already, the only time we 3304 * need the hardware to recompute it is in the case of TSO. 3305 */ 3306 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 3307 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : 3308 I40E_TX_DESC_CMD_IIPT_IPV4; 3309 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 3310 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; 3311 3312 exthdr = ip.hdr + sizeof(*ip.v6); 3313 l4_proto = ip.v6->nexthdr; 3314 if (l4.hdr != exthdr) 3315 ipv6_skip_exthdr(skb, exthdr - skb->data, 3316 &l4_proto, &frag_off); 3317 } 3318 3319 /* compute inner L3 header size */ 3320 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; 3321 3322 /* Enable L4 checksum offloads */ 3323 switch (l4_proto) { 3324 case IPPROTO_TCP: 3325 /* enable checksum offloads */ 3326 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; 3327 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3328 break; 3329 case IPPROTO_SCTP: 3330 /* enable SCTP checksum offload */ 3331 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; 3332 offset |= (sizeof(struct sctphdr) >> 2) << 3333 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3334 break; 3335 case IPPROTO_UDP: 3336 /* enable UDP checksum offload */ 3337 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; 3338 offset |= (sizeof(struct udphdr) >> 2) << 3339 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3340 break; 3341 default: 3342 if (*tx_flags & I40E_TX_FLAGS_TSO) 3343 return -1; 3344 skb_checksum_help(skb); 3345 return 0; 3346 } 3347 3348 *td_cmd |= cmd; 3349 *td_offset |= offset; 3350 3351 return 1; 3352 } 3353 3354 /** 3355 * i40e_create_tx_ctx - Build the Tx context descriptor 3356 * @tx_ring: ring to create the descriptor on 3357 * @cd_type_cmd_tso_mss: Quad Word 1 3358 * @cd_tunneling: Quad Word 0 - bits 0-31 3359 * @cd_l2tag2: Quad Word 0 - bits 32-63 3360 **/ 3361 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, 3362 const u64 cd_type_cmd_tso_mss, 3363 const u32 cd_tunneling, const u32 cd_l2tag2) 3364 { 3365 struct i40e_tx_context_desc *context_desc; 3366 int i = tx_ring->next_to_use; 3367 3368 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && 3369 !cd_tunneling && !cd_l2tag2) 3370 return; 3371 3372 /* grab the next descriptor */ 3373 context_desc = I40E_TX_CTXTDESC(tx_ring, i); 3374 3375 i++; 3376 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 3377 3378 /* cpu_to_le32 and assign to struct fields */ 3379 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); 3380 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); 3381 context_desc->rsvd = cpu_to_le16(0); 3382 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 3383 } 3384 3385 /** 3386 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions 3387 * @tx_ring: the ring to be checked 3388 * @size: the size buffer we want to assure is available 3389 * 3390 * Returns -EBUSY if a stop is needed, else 0 3391 **/ 3392 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 3393 { 3394 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3395 /* Memory barrier before checking head and tail */ 3396 smp_mb(); 3397 3398 ++tx_ring->tx_stats.tx_stopped; 3399 3400 /* Check again in a case another CPU has just made room available. */ 3401 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) 3402 return -EBUSY; 3403 3404 /* A reprieve! - use start_queue because it doesn't call schedule */ 3405 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3406 ++tx_ring->tx_stats.restart_queue; 3407 return 0; 3408 } 3409 3410 /** 3411 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet 3412 * @skb: send buffer 3413 * 3414 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 3415 * and so we need to figure out the cases where we need to linearize the skb. 3416 * 3417 * For TSO we need to count the TSO header and segment payload separately. 3418 * As such we need to check cases where we have 7 fragments or more as we 3419 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 3420 * the segment payload in the first descriptor, and another 7 for the 3421 * fragments. 3422 **/ 3423 bool __i40e_chk_linearize(struct sk_buff *skb) 3424 { 3425 const skb_frag_t *frag, *stale; 3426 int nr_frags, sum; 3427 3428 /* no need to check if number of frags is less than 7 */ 3429 nr_frags = skb_shinfo(skb)->nr_frags; 3430 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) 3431 return false; 3432 3433 /* We need to walk through the list and validate that each group 3434 * of 6 fragments totals at least gso_size. 3435 */ 3436 nr_frags -= I40E_MAX_BUFFER_TXD - 2; 3437 frag = &skb_shinfo(skb)->frags[0]; 3438 3439 /* Initialize size to the negative value of gso_size minus 1. We 3440 * use this as the worst case scenerio in which the frag ahead 3441 * of us only provides one byte which is why we are limited to 6 3442 * descriptors for a single transmit as the header and previous 3443 * fragment are already consuming 2 descriptors. 3444 */ 3445 sum = 1 - skb_shinfo(skb)->gso_size; 3446 3447 /* Add size of frags 0 through 4 to create our initial sum */ 3448 sum += skb_frag_size(frag++); 3449 sum += skb_frag_size(frag++); 3450 sum += skb_frag_size(frag++); 3451 sum += skb_frag_size(frag++); 3452 sum += skb_frag_size(frag++); 3453 3454 /* Walk through fragments adding latest fragment, testing it, and 3455 * then removing stale fragments from the sum. 3456 */ 3457 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 3458 int stale_size = skb_frag_size(stale); 3459 3460 sum += skb_frag_size(frag++); 3461 3462 /* The stale fragment may present us with a smaller 3463 * descriptor than the actual fragment size. To account 3464 * for that we need to remove all the data on the front and 3465 * figure out what the remainder would be in the last 3466 * descriptor associated with the fragment. 3467 */ 3468 if (stale_size > I40E_MAX_DATA_PER_TXD) { 3469 int align_pad = -(skb_frag_off(stale)) & 3470 (I40E_MAX_READ_REQ_SIZE - 1); 3471 3472 sum -= align_pad; 3473 stale_size -= align_pad; 3474 3475 do { 3476 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; 3477 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; 3478 } while (stale_size > I40E_MAX_DATA_PER_TXD); 3479 } 3480 3481 /* if sum is negative we failed to make sufficient progress */ 3482 if (sum < 0) 3483 return true; 3484 3485 if (!nr_frags--) 3486 break; 3487 3488 sum -= stale_size; 3489 } 3490 3491 return false; 3492 } 3493 3494 /** 3495 * i40e_tx_map - Build the Tx descriptor 3496 * @tx_ring: ring to send buffer on 3497 * @skb: send buffer 3498 * @first: first buffer info buffer to use 3499 * @tx_flags: collected send information 3500 * @hdr_len: size of the packet header 3501 * @td_cmd: the command field in the descriptor 3502 * @td_offset: offset for checksum or crc 3503 * 3504 * Returns 0 on success, -1 on failure to DMA 3505 **/ 3506 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, 3507 struct i40e_tx_buffer *first, u32 tx_flags, 3508 const u8 hdr_len, u32 td_cmd, u32 td_offset) 3509 { 3510 unsigned int data_len = skb->data_len; 3511 unsigned int size = skb_headlen(skb); 3512 skb_frag_t *frag; 3513 struct i40e_tx_buffer *tx_bi; 3514 struct i40e_tx_desc *tx_desc; 3515 u16 i = tx_ring->next_to_use; 3516 u32 td_tag = 0; 3517 dma_addr_t dma; 3518 u16 desc_count = 1; 3519 3520 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { 3521 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; 3522 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> 3523 I40E_TX_FLAGS_VLAN_SHIFT; 3524 } 3525 3526 first->tx_flags = tx_flags; 3527 3528 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3529 3530 tx_desc = I40E_TX_DESC(tx_ring, i); 3531 tx_bi = first; 3532 3533 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 3534 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; 3535 3536 if (dma_mapping_error(tx_ring->dev, dma)) 3537 goto dma_error; 3538 3539 /* record length, and DMA address */ 3540 dma_unmap_len_set(tx_bi, len, size); 3541 dma_unmap_addr_set(tx_bi, dma, dma); 3542 3543 /* align size to end of page */ 3544 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); 3545 tx_desc->buffer_addr = cpu_to_le64(dma); 3546 3547 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { 3548 tx_desc->cmd_type_offset_bsz = 3549 build_ctob(td_cmd, td_offset, 3550 max_data, td_tag); 3551 3552 tx_desc++; 3553 i++; 3554 desc_count++; 3555 3556 if (i == tx_ring->count) { 3557 tx_desc = I40E_TX_DESC(tx_ring, 0); 3558 i = 0; 3559 } 3560 3561 dma += max_data; 3562 size -= max_data; 3563 3564 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; 3565 tx_desc->buffer_addr = cpu_to_le64(dma); 3566 } 3567 3568 if (likely(!data_len)) 3569 break; 3570 3571 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 3572 size, td_tag); 3573 3574 tx_desc++; 3575 i++; 3576 desc_count++; 3577 3578 if (i == tx_ring->count) { 3579 tx_desc = I40E_TX_DESC(tx_ring, 0); 3580 i = 0; 3581 } 3582 3583 size = skb_frag_size(frag); 3584 data_len -= size; 3585 3586 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3587 DMA_TO_DEVICE); 3588 3589 tx_bi = &tx_ring->tx_bi[i]; 3590 } 3591 3592 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 3593 3594 i++; 3595 if (i == tx_ring->count) 3596 i = 0; 3597 3598 tx_ring->next_to_use = i; 3599 3600 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); 3601 3602 /* write last descriptor with EOP bit */ 3603 td_cmd |= I40E_TX_DESC_CMD_EOP; 3604 3605 /* We OR these values together to check both against 4 (WB_STRIDE) 3606 * below. This is safe since we don't re-use desc_count afterwards. 3607 */ 3608 desc_count |= ++tx_ring->packet_stride; 3609 3610 if (desc_count >= WB_STRIDE) { 3611 /* write last descriptor with RS bit set */ 3612 td_cmd |= I40E_TX_DESC_CMD_RS; 3613 tx_ring->packet_stride = 0; 3614 } 3615 3616 tx_desc->cmd_type_offset_bsz = 3617 build_ctob(td_cmd, td_offset, size, td_tag); 3618 3619 skb_tx_timestamp(skb); 3620 3621 /* Force memory writes to complete before letting h/w know there 3622 * are new descriptors to fetch. 3623 * 3624 * We also use this memory barrier to make certain all of the 3625 * status bits have been updated before next_to_watch is written. 3626 */ 3627 wmb(); 3628 3629 /* set next_to_watch value indicating a packet is present */ 3630 first->next_to_watch = tx_desc; 3631 3632 /* notify HW of packet */ 3633 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 3634 writel(i, tx_ring->tail); 3635 } 3636 3637 return 0; 3638 3639 dma_error: 3640 dev_info(tx_ring->dev, "TX DMA map failed\n"); 3641 3642 /* clear dma mappings for failed tx_bi map */ 3643 for (;;) { 3644 tx_bi = &tx_ring->tx_bi[i]; 3645 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); 3646 if (tx_bi == first) 3647 break; 3648 if (i == 0) 3649 i = tx_ring->count; 3650 i--; 3651 } 3652 3653 tx_ring->next_to_use = i; 3654 3655 return -1; 3656 } 3657 3658 static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev, 3659 const struct sk_buff *skb, 3660 u16 num_tx_queues) 3661 { 3662 u32 jhash_initval_salt = 0xd631614b; 3663 u32 hash; 3664 3665 if (skb->sk && skb->sk->sk_hash) 3666 hash = skb->sk->sk_hash; 3667 else 3668 hash = (__force u16)skb->protocol ^ skb->hash; 3669 3670 hash = jhash_1word(hash, jhash_initval_salt); 3671 3672 return (u16)(((u64)hash * num_tx_queues) >> 32); 3673 } 3674 3675 u16 i40e_lan_select_queue(struct net_device *netdev, 3676 struct sk_buff *skb, 3677 struct net_device __always_unused *sb_dev) 3678 { 3679 struct i40e_netdev_priv *np = netdev_priv(netdev); 3680 struct i40e_vsi *vsi = np->vsi; 3681 struct i40e_hw *hw; 3682 u16 qoffset; 3683 u16 qcount; 3684 u8 tclass; 3685 u16 hash; 3686 u8 prio; 3687 3688 /* is DCB enabled at all? */ 3689 if (vsi->tc_config.numtc == 1) 3690 return netdev_pick_tx(netdev, skb, sb_dev); 3691 3692 prio = skb->priority; 3693 hw = &vsi->back->hw; 3694 tclass = hw->local_dcbx_config.etscfg.prioritytable[prio]; 3695 /* sanity check */ 3696 if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass)))) 3697 tclass = 0; 3698 3699 /* select a queue assigned for the given TC */ 3700 qcount = vsi->tc_config.tc_info[tclass].qcount; 3701 hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount); 3702 3703 qoffset = vsi->tc_config.tc_info[tclass].qoffset; 3704 return qoffset + hash; 3705 } 3706 3707 /** 3708 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring 3709 * @xdpf: data to transmit 3710 * @xdp_ring: XDP Tx ring 3711 **/ 3712 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 3713 struct i40e_ring *xdp_ring) 3714 { 3715 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 3716 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; 3717 u16 i = 0, index = xdp_ring->next_to_use; 3718 struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index]; 3719 struct i40e_tx_buffer *tx_bi = tx_head; 3720 struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index); 3721 void *data = xdpf->data; 3722 u32 size = xdpf->len; 3723 3724 if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) { 3725 xdp_ring->tx_stats.tx_busy++; 3726 return I40E_XDP_CONSUMED; 3727 } 3728 3729 tx_head->bytecount = xdp_get_frame_len(xdpf); 3730 tx_head->gso_segs = 1; 3731 tx_head->xdpf = xdpf; 3732 3733 for (;;) { 3734 dma_addr_t dma; 3735 3736 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); 3737 if (dma_mapping_error(xdp_ring->dev, dma)) 3738 goto unmap; 3739 3740 /* record length, and DMA address */ 3741 dma_unmap_len_set(tx_bi, len, size); 3742 dma_unmap_addr_set(tx_bi, dma, dma); 3743 3744 tx_desc->buffer_addr = cpu_to_le64(dma); 3745 tx_desc->cmd_type_offset_bsz = 3746 build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0); 3747 3748 if (++index == xdp_ring->count) 3749 index = 0; 3750 3751 if (i == nr_frags) 3752 break; 3753 3754 tx_bi = &xdp_ring->tx_bi[index]; 3755 tx_desc = I40E_TX_DESC(xdp_ring, index); 3756 3757 data = skb_frag_address(&sinfo->frags[i]); 3758 size = skb_frag_size(&sinfo->frags[i]); 3759 i++; 3760 } 3761 3762 tx_desc->cmd_type_offset_bsz |= 3763 cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); 3764 3765 /* Make certain all of the status bits have been updated 3766 * before next_to_watch is written. 3767 */ 3768 smp_wmb(); 3769 3770 xdp_ring->xdp_tx_active++; 3771 3772 tx_head->next_to_watch = tx_desc; 3773 xdp_ring->next_to_use = index; 3774 3775 return I40E_XDP_TX; 3776 3777 unmap: 3778 for (;;) { 3779 tx_bi = &xdp_ring->tx_bi[index]; 3780 if (dma_unmap_len(tx_bi, len)) 3781 dma_unmap_page(xdp_ring->dev, 3782 dma_unmap_addr(tx_bi, dma), 3783 dma_unmap_len(tx_bi, len), 3784 DMA_TO_DEVICE); 3785 dma_unmap_len_set(tx_bi, len, 0); 3786 if (tx_bi == tx_head) 3787 break; 3788 3789 if (!index) 3790 index += xdp_ring->count; 3791 index--; 3792 } 3793 3794 return I40E_XDP_CONSUMED; 3795 } 3796 3797 /** 3798 * i40e_xmit_frame_ring - Sends buffer on Tx ring 3799 * @skb: send buffer 3800 * @tx_ring: ring to send buffer on 3801 * 3802 * Returns NETDEV_TX_OK if sent, else an error code 3803 **/ 3804 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, 3805 struct i40e_ring *tx_ring) 3806 { 3807 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; 3808 u32 cd_tunneling = 0, cd_l2tag2 = 0; 3809 struct i40e_tx_buffer *first; 3810 u32 td_offset = 0; 3811 u32 tx_flags = 0; 3812 u32 td_cmd = 0; 3813 u8 hdr_len = 0; 3814 int tso, count; 3815 int tsyn; 3816 3817 /* prefetch the data, we'll need it later */ 3818 prefetch(skb->data); 3819 3820 i40e_trace(xmit_frame_ring, skb, tx_ring); 3821 3822 count = i40e_xmit_descriptor_count(skb); 3823 if (i40e_chk_linearize(skb, count)) { 3824 if (__skb_linearize(skb)) { 3825 dev_kfree_skb_any(skb); 3826 return NETDEV_TX_OK; 3827 } 3828 count = i40e_txd_use_count(skb->len); 3829 tx_ring->tx_stats.tx_linearize++; 3830 } 3831 3832 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 3833 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, 3834 * + 4 desc gap to avoid the cache line where head is, 3835 * + 1 desc for context descriptor, 3836 * otherwise try next time 3837 */ 3838 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 3839 tx_ring->tx_stats.tx_busy++; 3840 return NETDEV_TX_BUSY; 3841 } 3842 3843 /* record the location of the first descriptor for this packet */ 3844 first = &tx_ring->tx_bi[tx_ring->next_to_use]; 3845 first->skb = skb; 3846 first->bytecount = skb->len; 3847 first->gso_segs = 1; 3848 3849 /* prepare the xmit flags */ 3850 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) 3851 goto out_drop; 3852 3853 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); 3854 3855 if (tso < 0) 3856 goto out_drop; 3857 else if (tso) 3858 tx_flags |= I40E_TX_FLAGS_TSO; 3859 3860 /* Always offload the checksum, since it's in the data descriptor */ 3861 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, 3862 tx_ring, &cd_tunneling); 3863 if (tso < 0) 3864 goto out_drop; 3865 3866 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); 3867 3868 if (tsyn) 3869 tx_flags |= I40E_TX_FLAGS_TSYN; 3870 3871 /* always enable CRC insertion offload */ 3872 td_cmd |= I40E_TX_DESC_CMD_ICRC; 3873 3874 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 3875 cd_tunneling, cd_l2tag2); 3876 3877 /* Add Flow Director ATR if it's enabled. 3878 * 3879 * NOTE: this must always be directly before the data descriptor. 3880 */ 3881 i40e_atr(tx_ring, skb, tx_flags); 3882 3883 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 3884 td_cmd, td_offset)) 3885 goto cleanup_tx_tstamp; 3886 3887 return NETDEV_TX_OK; 3888 3889 out_drop: 3890 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); 3891 dev_kfree_skb_any(first->skb); 3892 first->skb = NULL; 3893 cleanup_tx_tstamp: 3894 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) { 3895 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); 3896 3897 dev_kfree_skb_any(pf->ptp_tx_skb); 3898 pf->ptp_tx_skb = NULL; 3899 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); 3900 } 3901 3902 return NETDEV_TX_OK; 3903 } 3904 3905 /** 3906 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer 3907 * @skb: send buffer 3908 * @netdev: network interface device structure 3909 * 3910 * Returns NETDEV_TX_OK if sent, else an error code 3911 **/ 3912 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3913 { 3914 struct i40e_netdev_priv *np = netdev_priv(netdev); 3915 struct i40e_vsi *vsi = np->vsi; 3916 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; 3917 3918 /* hardware can't handle really short frames, hardware padding works 3919 * beyond this point 3920 */ 3921 if (skb_put_padto(skb, I40E_MIN_TX_LEN)) 3922 return NETDEV_TX_OK; 3923 3924 return i40e_xmit_frame_ring(skb, tx_ring); 3925 } 3926 3927 /** 3928 * i40e_xdp_xmit - Implements ndo_xdp_xmit 3929 * @dev: netdev 3930 * @n: number of frames 3931 * @frames: array of XDP buffer pointers 3932 * @flags: XDP extra info 3933 * 3934 * Returns number of frames successfully sent. Failed frames 3935 * will be free'ed by XDP core. 3936 * 3937 * For error cases, a negative errno code is returned and no-frames 3938 * are transmitted (caller must handle freeing frames). 3939 **/ 3940 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 3941 u32 flags) 3942 { 3943 struct i40e_netdev_priv *np = netdev_priv(dev); 3944 unsigned int queue_index = smp_processor_id(); 3945 struct i40e_vsi *vsi = np->vsi; 3946 struct i40e_pf *pf = vsi->back; 3947 struct i40e_ring *xdp_ring; 3948 int nxmit = 0; 3949 int i; 3950 3951 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 3952 return -ENETDOWN; 3953 3954 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || 3955 test_bit(__I40E_CONFIG_BUSY, pf->state)) 3956 return -ENXIO; 3957 3958 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3959 return -EINVAL; 3960 3961 xdp_ring = vsi->xdp_rings[queue_index]; 3962 3963 for (i = 0; i < n; i++) { 3964 struct xdp_frame *xdpf = frames[i]; 3965 int err; 3966 3967 err = i40e_xmit_xdp_ring(xdpf, xdp_ring); 3968 if (err != I40E_XDP_TX) 3969 break; 3970 nxmit++; 3971 } 3972 3973 if (unlikely(flags & XDP_XMIT_FLUSH)) 3974 i40e_xdp_ring_update_tail(xdp_ring); 3975 3976 return nxmit; 3977 } 3978