1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 8 /* busy wait delay in msec */ 9 #define IAVF_BUSY_WAIT_DELAY 10 10 #define IAVF_BUSY_WAIT_COUNT 50 11 12 /** 13 * iavf_send_pf_msg 14 * @adapter: adapter structure 15 * @op: virtual channel opcode 16 * @msg: pointer to message buffer 17 * @len: message length 18 * 19 * Send message to PF and print status if failure. 20 **/ 21 static int iavf_send_pf_msg(struct iavf_adapter *adapter, 22 enum virtchnl_ops op, u8 *msg, u16 len) 23 { 24 struct iavf_hw *hw = &adapter->hw; 25 enum iavf_status status; 26 27 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 28 return 0; /* nothing to see here, move along */ 29 30 status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 31 if (status) 32 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n", 33 op, iavf_stat_str(hw, status), 34 iavf_aq_str(hw, hw->aq.asq_last_status)); 35 return iavf_status_to_errno(status); 36 } 37 38 /** 39 * iavf_send_api_ver 40 * @adapter: adapter structure 41 * 42 * Send API version admin queue message to the PF. The reply is not checked 43 * in this function. Returns 0 if the message was successfully 44 * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 45 **/ 46 int iavf_send_api_ver(struct iavf_adapter *adapter) 47 { 48 struct virtchnl_version_info vvi; 49 50 vvi.major = VIRTCHNL_VERSION_MAJOR; 51 vvi.minor = VIRTCHNL_VERSION_MINOR; 52 53 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, 54 sizeof(vvi)); 55 } 56 57 /** 58 * iavf_poll_virtchnl_msg 59 * @hw: HW configuration structure 60 * @event: event to populate on success 61 * @op_to_poll: requested virtchnl op to poll for 62 * 63 * Initialize poll for virtchnl msg matching the requested_op. Returns 0 64 * if a message of the correct opcode is in the queue or an error code 65 * if no message matching the op code is waiting and other failures. 66 */ 67 static int 68 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, 69 enum virtchnl_ops op_to_poll) 70 { 71 enum virtchnl_ops received_op; 72 enum iavf_status status; 73 u32 v_retval; 74 75 while (1) { 76 /* When the AQ is empty, iavf_clean_arq_element will return 77 * nonzero and this loop will terminate. 78 */ 79 status = iavf_clean_arq_element(hw, event, NULL); 80 if (status != IAVF_SUCCESS) 81 return iavf_status_to_errno(status); 82 received_op = 83 (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high); 84 if (op_to_poll == received_op) 85 break; 86 } 87 88 v_retval = le32_to_cpu(event->desc.cookie_low); 89 return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval); 90 } 91 92 /** 93 * iavf_verify_api_ver 94 * @adapter: adapter structure 95 * 96 * Compare API versions with the PF. Must be called after admin queue is 97 * initialized. Returns 0 if API versions match, -EIO if they do not, 98 * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors 99 * from the firmware are propagated. 100 **/ 101 int iavf_verify_api_ver(struct iavf_adapter *adapter) 102 { 103 struct iavf_arq_event_info event; 104 int err; 105 106 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 107 event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL); 108 if (!event.msg_buf) 109 return -ENOMEM; 110 111 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION); 112 if (!err) { 113 struct virtchnl_version_info *pf_vvi = 114 (struct virtchnl_version_info *)event.msg_buf; 115 adapter->pf_version = *pf_vvi; 116 117 if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR || 118 (pf_vvi->major == VIRTCHNL_VERSION_MAJOR && 119 pf_vvi->minor > VIRTCHNL_VERSION_MINOR)) 120 err = -EIO; 121 } 122 123 kfree(event.msg_buf); 124 125 return err; 126 } 127 128 /** 129 * iavf_send_vf_config_msg 130 * @adapter: adapter structure 131 * 132 * Send VF configuration request admin queue message to the PF. The reply 133 * is not checked in this function. Returns 0 if the message was 134 * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 135 **/ 136 int iavf_send_vf_config_msg(struct iavf_adapter *adapter) 137 { 138 u32 caps; 139 140 caps = VIRTCHNL_VF_OFFLOAD_L2 | 141 VIRTCHNL_VF_OFFLOAD_RSS_PF | 142 VIRTCHNL_VF_OFFLOAD_RSS_AQ | 143 VIRTCHNL_VF_OFFLOAD_RSS_REG | 144 VIRTCHNL_VF_OFFLOAD_VLAN | 145 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | 146 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | 147 VIRTCHNL_VF_OFFLOAD_ENCAP | 148 VIRTCHNL_VF_OFFLOAD_VLAN_V2 | 149 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | 150 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | 151 VIRTCHNL_VF_OFFLOAD_ADQ | 152 VIRTCHNL_VF_OFFLOAD_USO | 153 VIRTCHNL_VF_OFFLOAD_FDIR_PF | 154 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | 155 VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 156 157 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; 158 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; 159 if (PF_IS_V11(adapter)) 160 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 161 (u8 *)&caps, sizeof(caps)); 162 else 163 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 164 NULL, 0); 165 } 166 167 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) 168 { 169 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; 170 171 if (!VLAN_V2_ALLOWED(adapter)) 172 return -EOPNOTSUPP; 173 174 adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; 175 176 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 177 NULL, 0); 178 } 179 180 /** 181 * iavf_validate_num_queues 182 * @adapter: adapter structure 183 * 184 * Validate that the number of queues the PF has sent in 185 * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. 186 **/ 187 static void iavf_validate_num_queues(struct iavf_adapter *adapter) 188 { 189 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { 190 struct virtchnl_vsi_resource *vsi_res; 191 int i; 192 193 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", 194 adapter->vf_res->num_queue_pairs, 195 IAVF_MAX_REQ_QUEUES); 196 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", 197 IAVF_MAX_REQ_QUEUES); 198 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 199 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 200 vsi_res = &adapter->vf_res->vsi_res[i]; 201 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 202 } 203 } 204 } 205 206 /** 207 * iavf_get_vf_config 208 * @adapter: private adapter structure 209 * 210 * Get VF configuration from PF and populate hw structure. Must be called after 211 * admin queue is initialized. Busy waits until response is received from PF, 212 * with maximum timeout. Response from PF is returned in the buffer for further 213 * processing by the caller. 214 **/ 215 int iavf_get_vf_config(struct iavf_adapter *adapter) 216 { 217 struct iavf_hw *hw = &adapter->hw; 218 struct iavf_arq_event_info event; 219 u16 len; 220 int err; 221 222 len = sizeof(struct virtchnl_vf_resource) + 223 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); 224 event.buf_len = len; 225 event.msg_buf = kzalloc(len, GFP_KERNEL); 226 if (!event.msg_buf) 227 return -ENOMEM; 228 229 err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES); 230 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); 231 232 /* some PFs send more queues than we should have so validate that 233 * we aren't getting too many queues 234 */ 235 if (!err) 236 iavf_validate_num_queues(adapter); 237 iavf_vf_parse_hw_config(hw, adapter->vf_res); 238 239 kfree(event.msg_buf); 240 241 return err; 242 } 243 244 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) 245 { 246 struct iavf_arq_event_info event; 247 int err; 248 u16 len; 249 250 len = sizeof(struct virtchnl_vlan_caps); 251 event.buf_len = len; 252 event.msg_buf = kzalloc(len, GFP_KERNEL); 253 if (!event.msg_buf) 254 return -ENOMEM; 255 256 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, 257 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS); 258 if (!err) 259 memcpy(&adapter->vlan_v2_caps, event.msg_buf, 260 min(event.msg_len, len)); 261 262 kfree(event.msg_buf); 263 264 return err; 265 } 266 267 /** 268 * iavf_configure_queues 269 * @adapter: adapter structure 270 * 271 * Request that the PF set up our (previously allocated) queues. 272 **/ 273 void iavf_configure_queues(struct iavf_adapter *adapter) 274 { 275 struct virtchnl_vsi_queue_config_info *vqci; 276 struct virtchnl_queue_pair_info *vqpi; 277 int pairs = adapter->num_active_queues; 278 int i, max_frame = IAVF_MAX_RXBUFFER; 279 size_t len; 280 281 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 282 /* bail because we already have a command pending */ 283 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", 284 adapter->current_op); 285 return; 286 } 287 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; 288 len = struct_size(vqci, qpair, pairs); 289 vqci = kzalloc(len, GFP_KERNEL); 290 if (!vqci) 291 return; 292 293 /* Limit maximum frame size when jumbo frames is not enabled */ 294 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && 295 (adapter->netdev->mtu <= ETH_DATA_LEN)) 296 max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 297 298 vqci->vsi_id = adapter->vsi_res->vsi_id; 299 vqci->num_queue_pairs = pairs; 300 vqpi = vqci->qpair; 301 /* Size check is not needed here - HW max is 16 queue pairs, and we 302 * can fit info for 31 of them into the AQ buffer before it overflows. 303 */ 304 for (i = 0; i < pairs; i++) { 305 vqpi->txq.vsi_id = vqci->vsi_id; 306 vqpi->txq.queue_id = i; 307 vqpi->txq.ring_len = adapter->tx_rings[i].count; 308 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; 309 vqpi->rxq.vsi_id = vqci->vsi_id; 310 vqpi->rxq.queue_id = i; 311 vqpi->rxq.ring_len = adapter->rx_rings[i].count; 312 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; 313 vqpi->rxq.max_pkt_size = max_frame; 314 vqpi->rxq.databuffer_size = 315 ALIGN(adapter->rx_rings[i].rx_buf_len, 316 BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); 317 vqpi++; 318 } 319 320 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; 321 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 322 (u8 *)vqci, len); 323 kfree(vqci); 324 } 325 326 /** 327 * iavf_enable_queues 328 * @adapter: adapter structure 329 * 330 * Request that the PF enable all of our queues. 331 **/ 332 void iavf_enable_queues(struct iavf_adapter *adapter) 333 { 334 struct virtchnl_queue_select vqs; 335 336 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 337 /* bail because we already have a command pending */ 338 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", 339 adapter->current_op); 340 return; 341 } 342 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; 343 vqs.vsi_id = adapter->vsi_res->vsi_id; 344 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 345 vqs.rx_queues = vqs.tx_queues; 346 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; 347 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, 348 (u8 *)&vqs, sizeof(vqs)); 349 } 350 351 /** 352 * iavf_disable_queues 353 * @adapter: adapter structure 354 * 355 * Request that the PF disable all of our queues. 356 **/ 357 void iavf_disable_queues(struct iavf_adapter *adapter) 358 { 359 struct virtchnl_queue_select vqs; 360 361 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 362 /* bail because we already have a command pending */ 363 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", 364 adapter->current_op); 365 return; 366 } 367 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; 368 vqs.vsi_id = adapter->vsi_res->vsi_id; 369 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 370 vqs.rx_queues = vqs.tx_queues; 371 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; 372 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, 373 (u8 *)&vqs, sizeof(vqs)); 374 } 375 376 /** 377 * iavf_map_queues 378 * @adapter: adapter structure 379 * 380 * Request that the PF map queues to interrupt vectors. Misc causes, including 381 * admin queue, are always mapped to vector 0. 382 **/ 383 void iavf_map_queues(struct iavf_adapter *adapter) 384 { 385 struct virtchnl_irq_map_info *vimi; 386 struct virtchnl_vector_map *vecmap; 387 struct iavf_q_vector *q_vector; 388 int v_idx, q_vectors; 389 size_t len; 390 391 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 392 /* bail because we already have a command pending */ 393 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", 394 adapter->current_op); 395 return; 396 } 397 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; 398 399 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 400 401 len = struct_size(vimi, vecmap, adapter->num_msix_vectors); 402 vimi = kzalloc(len, GFP_KERNEL); 403 if (!vimi) 404 return; 405 406 vimi->num_vectors = adapter->num_msix_vectors; 407 /* Queue vectors first */ 408 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 409 q_vector = &adapter->q_vectors[v_idx]; 410 vecmap = &vimi->vecmap[v_idx]; 411 412 vecmap->vsi_id = adapter->vsi_res->vsi_id; 413 vecmap->vector_id = v_idx + NONQ_VECS; 414 vecmap->txq_map = q_vector->ring_mask; 415 vecmap->rxq_map = q_vector->ring_mask; 416 vecmap->rxitr_idx = IAVF_RX_ITR; 417 vecmap->txitr_idx = IAVF_TX_ITR; 418 } 419 /* Misc vector last - this is only for AdminQ messages */ 420 vecmap = &vimi->vecmap[v_idx]; 421 vecmap->vsi_id = adapter->vsi_res->vsi_id; 422 vecmap->vector_id = 0; 423 vecmap->txq_map = 0; 424 vecmap->rxq_map = 0; 425 426 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; 427 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, 428 (u8 *)vimi, len); 429 kfree(vimi); 430 } 431 432 /** 433 * iavf_set_mac_addr_type - Set the correct request type from the filter type 434 * @virtchnl_ether_addr: pointer to requested list element 435 * @filter: pointer to requested filter 436 **/ 437 static void 438 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, 439 const struct iavf_mac_filter *filter) 440 { 441 virtchnl_ether_addr->type = filter->is_primary ? 442 VIRTCHNL_ETHER_ADDR_PRIMARY : 443 VIRTCHNL_ETHER_ADDR_EXTRA; 444 } 445 446 /** 447 * iavf_add_ether_addrs 448 * @adapter: adapter structure 449 * 450 * Request that the PF add one or more addresses to our filters. 451 **/ 452 void iavf_add_ether_addrs(struct iavf_adapter *adapter) 453 { 454 struct virtchnl_ether_addr_list *veal; 455 struct iavf_mac_filter *f; 456 int i = 0, count = 0; 457 bool more = false; 458 size_t len; 459 460 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 461 /* bail because we already have a command pending */ 462 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", 463 adapter->current_op); 464 return; 465 } 466 467 spin_lock_bh(&adapter->mac_vlan_list_lock); 468 469 list_for_each_entry(f, &adapter->mac_filter_list, list) { 470 if (f->add) 471 count++; 472 } 473 if (!count) { 474 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 475 spin_unlock_bh(&adapter->mac_vlan_list_lock); 476 return; 477 } 478 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; 479 480 len = struct_size(veal, list, count); 481 if (len > IAVF_MAX_AQ_BUF_SIZE) { 482 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); 483 count = (IAVF_MAX_AQ_BUF_SIZE - 484 sizeof(struct virtchnl_ether_addr_list)) / 485 sizeof(struct virtchnl_ether_addr); 486 len = struct_size(veal, list, count); 487 more = true; 488 } 489 490 veal = kzalloc(len, GFP_ATOMIC); 491 if (!veal) { 492 spin_unlock_bh(&adapter->mac_vlan_list_lock); 493 return; 494 } 495 496 veal->vsi_id = adapter->vsi_res->vsi_id; 497 veal->num_elements = count; 498 list_for_each_entry(f, &adapter->mac_filter_list, list) { 499 if (f->add) { 500 ether_addr_copy(veal->list[i].addr, f->macaddr); 501 iavf_set_mac_addr_type(&veal->list[i], f); 502 i++; 503 f->add = false; 504 if (i == count) 505 break; 506 } 507 } 508 if (!more) 509 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 510 511 spin_unlock_bh(&adapter->mac_vlan_list_lock); 512 513 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); 514 kfree(veal); 515 } 516 517 /** 518 * iavf_del_ether_addrs 519 * @adapter: adapter structure 520 * 521 * Request that the PF remove one or more addresses from our filters. 522 **/ 523 void iavf_del_ether_addrs(struct iavf_adapter *adapter) 524 { 525 struct virtchnl_ether_addr_list *veal; 526 struct iavf_mac_filter *f, *ftmp; 527 int i = 0, count = 0; 528 bool more = false; 529 size_t len; 530 531 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 532 /* bail because we already have a command pending */ 533 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", 534 adapter->current_op); 535 return; 536 } 537 538 spin_lock_bh(&adapter->mac_vlan_list_lock); 539 540 list_for_each_entry(f, &adapter->mac_filter_list, list) { 541 if (f->remove) 542 count++; 543 } 544 if (!count) { 545 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 546 spin_unlock_bh(&adapter->mac_vlan_list_lock); 547 return; 548 } 549 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; 550 551 len = struct_size(veal, list, count); 552 if (len > IAVF_MAX_AQ_BUF_SIZE) { 553 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); 554 count = (IAVF_MAX_AQ_BUF_SIZE - 555 sizeof(struct virtchnl_ether_addr_list)) / 556 sizeof(struct virtchnl_ether_addr); 557 len = struct_size(veal, list, count); 558 more = true; 559 } 560 veal = kzalloc(len, GFP_ATOMIC); 561 if (!veal) { 562 spin_unlock_bh(&adapter->mac_vlan_list_lock); 563 return; 564 } 565 566 veal->vsi_id = adapter->vsi_res->vsi_id; 567 veal->num_elements = count; 568 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 569 if (f->remove) { 570 ether_addr_copy(veal->list[i].addr, f->macaddr); 571 iavf_set_mac_addr_type(&veal->list[i], f); 572 i++; 573 list_del(&f->list); 574 kfree(f); 575 if (i == count) 576 break; 577 } 578 } 579 if (!more) 580 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 581 582 spin_unlock_bh(&adapter->mac_vlan_list_lock); 583 584 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); 585 kfree(veal); 586 } 587 588 /** 589 * iavf_mac_add_ok 590 * @adapter: adapter structure 591 * 592 * Submit list of filters based on PF response. 593 **/ 594 static void iavf_mac_add_ok(struct iavf_adapter *adapter) 595 { 596 struct iavf_mac_filter *f, *ftmp; 597 598 spin_lock_bh(&adapter->mac_vlan_list_lock); 599 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 600 f->is_new_mac = false; 601 if (!f->add && !f->add_handled) 602 f->add_handled = true; 603 } 604 spin_unlock_bh(&adapter->mac_vlan_list_lock); 605 } 606 607 /** 608 * iavf_mac_add_reject 609 * @adapter: adapter structure 610 * 611 * Remove filters from list based on PF response. 612 **/ 613 static void iavf_mac_add_reject(struct iavf_adapter *adapter) 614 { 615 struct net_device *netdev = adapter->netdev; 616 struct iavf_mac_filter *f, *ftmp; 617 618 spin_lock_bh(&adapter->mac_vlan_list_lock); 619 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 620 if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) 621 f->remove = false; 622 623 if (!f->add && !f->add_handled) 624 f->add_handled = true; 625 626 if (f->is_new_mac) { 627 list_del(&f->list); 628 kfree(f); 629 } 630 } 631 spin_unlock_bh(&adapter->mac_vlan_list_lock); 632 } 633 634 /** 635 * iavf_add_vlans 636 * @adapter: adapter structure 637 * 638 * Request that the PF add one or more VLAN filters to our VSI. 639 **/ 640 void iavf_add_vlans(struct iavf_adapter *adapter) 641 { 642 int len, i = 0, count = 0; 643 struct iavf_vlan_filter *f; 644 bool more = false; 645 646 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 647 /* bail because we already have a command pending */ 648 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", 649 adapter->current_op); 650 return; 651 } 652 653 spin_lock_bh(&adapter->mac_vlan_list_lock); 654 655 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 656 if (f->add) 657 count++; 658 } 659 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 660 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 661 spin_unlock_bh(&adapter->mac_vlan_list_lock); 662 return; 663 } 664 665 if (VLAN_ALLOWED(adapter)) { 666 struct virtchnl_vlan_filter_list *vvfl; 667 668 adapter->current_op = VIRTCHNL_OP_ADD_VLAN; 669 670 len = sizeof(*vvfl) + (count * sizeof(u16)); 671 if (len > IAVF_MAX_AQ_BUF_SIZE) { 672 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 673 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / 674 sizeof(u16); 675 len = sizeof(*vvfl) + (count * sizeof(u16)); 676 more = true; 677 } 678 vvfl = kzalloc(len, GFP_ATOMIC); 679 if (!vvfl) { 680 spin_unlock_bh(&adapter->mac_vlan_list_lock); 681 return; 682 } 683 684 vvfl->vsi_id = adapter->vsi_res->vsi_id; 685 vvfl->num_elements = count; 686 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 687 if (f->add) { 688 vvfl->vlan_id[i] = f->vlan.vid; 689 i++; 690 f->add = false; 691 if (i == count) 692 break; 693 } 694 } 695 if (!more) 696 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 697 698 spin_unlock_bh(&adapter->mac_vlan_list_lock); 699 700 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 701 kfree(vvfl); 702 } else { 703 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 704 705 adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; 706 707 len = sizeof(*vvfl_v2) + ((count - 1) * 708 sizeof(struct virtchnl_vlan_filter)); 709 if (len > IAVF_MAX_AQ_BUF_SIZE) { 710 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 711 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) / 712 sizeof(struct virtchnl_vlan_filter); 713 len = sizeof(*vvfl_v2) + 714 ((count - 1) * 715 sizeof(struct virtchnl_vlan_filter)); 716 more = true; 717 } 718 719 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 720 if (!vvfl_v2) { 721 spin_unlock_bh(&adapter->mac_vlan_list_lock); 722 return; 723 } 724 725 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 726 vvfl_v2->num_elements = count; 727 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 728 if (f->add) { 729 struct virtchnl_vlan_supported_caps *filtering_support = 730 &adapter->vlan_v2_caps.filtering.filtering_support; 731 struct virtchnl_vlan *vlan; 732 733 /* give priority over outer if it's enabled */ 734 if (filtering_support->outer) 735 vlan = &vvfl_v2->filters[i].outer; 736 else 737 vlan = &vvfl_v2->filters[i].inner; 738 739 vlan->tci = f->vlan.vid; 740 vlan->tpid = f->vlan.tpid; 741 742 i++; 743 f->add = false; 744 if (i == count) 745 break; 746 } 747 } 748 749 if (!more) 750 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 751 752 spin_unlock_bh(&adapter->mac_vlan_list_lock); 753 754 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, 755 (u8 *)vvfl_v2, len); 756 kfree(vvfl_v2); 757 } 758 } 759 760 /** 761 * iavf_del_vlans 762 * @adapter: adapter structure 763 * 764 * Request that the PF remove one or more VLAN filters from our VSI. 765 **/ 766 void iavf_del_vlans(struct iavf_adapter *adapter) 767 { 768 struct iavf_vlan_filter *f, *ftmp; 769 int len, i = 0, count = 0; 770 bool more = false; 771 772 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 773 /* bail because we already have a command pending */ 774 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", 775 adapter->current_op); 776 return; 777 } 778 779 spin_lock_bh(&adapter->mac_vlan_list_lock); 780 781 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 782 /* since VLAN capabilities are not allowed, we dont want to send 783 * a VLAN delete request because it will most likely fail and 784 * create unnecessary errors/noise, so just free the VLAN 785 * filters marked for removal to enable bailing out before 786 * sending a virtchnl message 787 */ 788 if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) { 789 list_del(&f->list); 790 kfree(f); 791 } else if (f->remove) { 792 count++; 793 } 794 } 795 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 796 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 797 spin_unlock_bh(&adapter->mac_vlan_list_lock); 798 return; 799 } 800 801 if (VLAN_ALLOWED(adapter)) { 802 struct virtchnl_vlan_filter_list *vvfl; 803 804 adapter->current_op = VIRTCHNL_OP_DEL_VLAN; 805 806 len = sizeof(*vvfl) + (count * sizeof(u16)); 807 if (len > IAVF_MAX_AQ_BUF_SIZE) { 808 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); 809 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / 810 sizeof(u16); 811 len = sizeof(*vvfl) + (count * sizeof(u16)); 812 more = true; 813 } 814 vvfl = kzalloc(len, GFP_ATOMIC); 815 if (!vvfl) { 816 spin_unlock_bh(&adapter->mac_vlan_list_lock); 817 return; 818 } 819 820 vvfl->vsi_id = adapter->vsi_res->vsi_id; 821 vvfl->num_elements = count; 822 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 823 if (f->remove) { 824 vvfl->vlan_id[i] = f->vlan.vid; 825 i++; 826 list_del(&f->list); 827 kfree(f); 828 if (i == count) 829 break; 830 } 831 } 832 833 if (!more) 834 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 835 836 spin_unlock_bh(&adapter->mac_vlan_list_lock); 837 838 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 839 kfree(vvfl); 840 } else { 841 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 842 843 adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; 844 845 len = sizeof(*vvfl_v2) + 846 ((count - 1) * sizeof(struct virtchnl_vlan_filter)); 847 if (len > IAVF_MAX_AQ_BUF_SIZE) { 848 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 849 count = (IAVF_MAX_AQ_BUF_SIZE - 850 sizeof(*vvfl_v2)) / 851 sizeof(struct virtchnl_vlan_filter); 852 len = sizeof(*vvfl_v2) + 853 ((count - 1) * 854 sizeof(struct virtchnl_vlan_filter)); 855 more = true; 856 } 857 858 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 859 if (!vvfl_v2) { 860 spin_unlock_bh(&adapter->mac_vlan_list_lock); 861 return; 862 } 863 864 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 865 vvfl_v2->num_elements = count; 866 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 867 if (f->remove) { 868 struct virtchnl_vlan_supported_caps *filtering_support = 869 &adapter->vlan_v2_caps.filtering.filtering_support; 870 struct virtchnl_vlan *vlan; 871 872 /* give priority over outer if it's enabled */ 873 if (filtering_support->outer) 874 vlan = &vvfl_v2->filters[i].outer; 875 else 876 vlan = &vvfl_v2->filters[i].inner; 877 878 vlan->tci = f->vlan.vid; 879 vlan->tpid = f->vlan.tpid; 880 881 list_del(&f->list); 882 kfree(f); 883 i++; 884 if (i == count) 885 break; 886 } 887 } 888 889 if (!more) 890 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 891 892 spin_unlock_bh(&adapter->mac_vlan_list_lock); 893 894 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, 895 (u8 *)vvfl_v2, len); 896 kfree(vvfl_v2); 897 } 898 } 899 900 /** 901 * iavf_set_promiscuous 902 * @adapter: adapter structure 903 * @flags: bitmask to control unicast/multicast promiscuous. 904 * 905 * Request that the PF enable promiscuous mode for our VSI. 906 **/ 907 void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) 908 { 909 struct virtchnl_promisc_info vpi; 910 int promisc_all; 911 912 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 913 /* bail because we already have a command pending */ 914 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", 915 adapter->current_op); 916 return; 917 } 918 919 promisc_all = FLAG_VF_UNICAST_PROMISC | 920 FLAG_VF_MULTICAST_PROMISC; 921 if ((flags & promisc_all) == promisc_all) { 922 adapter->flags |= IAVF_FLAG_PROMISC_ON; 923 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; 924 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 925 } 926 927 if (flags & FLAG_VF_MULTICAST_PROMISC) { 928 adapter->flags |= IAVF_FLAG_ALLMULTI_ON; 929 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; 930 dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", 931 adapter->netdev->name); 932 } 933 934 if (!flags) { 935 if (adapter->flags & IAVF_FLAG_PROMISC_ON) { 936 adapter->flags &= ~IAVF_FLAG_PROMISC_ON; 937 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; 938 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); 939 } 940 941 if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { 942 adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; 943 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; 944 dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", 945 adapter->netdev->name); 946 } 947 } 948 949 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; 950 vpi.vsi_id = adapter->vsi_res->vsi_id; 951 vpi.flags = flags; 952 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 953 (u8 *)&vpi, sizeof(vpi)); 954 } 955 956 /** 957 * iavf_request_stats 958 * @adapter: adapter structure 959 * 960 * Request VSI statistics from PF. 961 **/ 962 void iavf_request_stats(struct iavf_adapter *adapter) 963 { 964 struct virtchnl_queue_select vqs; 965 966 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 967 /* no error message, this isn't crucial */ 968 return; 969 } 970 971 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; 972 adapter->current_op = VIRTCHNL_OP_GET_STATS; 973 vqs.vsi_id = adapter->vsi_res->vsi_id; 974 /* queue maps are ignored for this message - only the vsi is used */ 975 if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, 976 sizeof(vqs))) 977 /* if the request failed, don't lock out others */ 978 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 979 } 980 981 /** 982 * iavf_get_hena 983 * @adapter: adapter structure 984 * 985 * Request hash enable capabilities from PF 986 **/ 987 void iavf_get_hena(struct iavf_adapter *adapter) 988 { 989 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 990 /* bail because we already have a command pending */ 991 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", 992 adapter->current_op); 993 return; 994 } 995 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; 996 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; 997 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); 998 } 999 1000 /** 1001 * iavf_set_hena 1002 * @adapter: adapter structure 1003 * 1004 * Request the PF to set our RSS hash capabilities 1005 **/ 1006 void iavf_set_hena(struct iavf_adapter *adapter) 1007 { 1008 struct virtchnl_rss_hena vrh; 1009 1010 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1011 /* bail because we already have a command pending */ 1012 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", 1013 adapter->current_op); 1014 return; 1015 } 1016 vrh.hena = adapter->hena; 1017 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; 1018 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; 1019 iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, 1020 sizeof(vrh)); 1021 } 1022 1023 /** 1024 * iavf_set_rss_key 1025 * @adapter: adapter structure 1026 * 1027 * Request the PF to set our RSS hash key 1028 **/ 1029 void iavf_set_rss_key(struct iavf_adapter *adapter) 1030 { 1031 struct virtchnl_rss_key *vrk; 1032 int len; 1033 1034 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1035 /* bail because we already have a command pending */ 1036 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", 1037 adapter->current_op); 1038 return; 1039 } 1040 len = sizeof(struct virtchnl_rss_key) + 1041 (adapter->rss_key_size * sizeof(u8)) - 1; 1042 vrk = kzalloc(len, GFP_KERNEL); 1043 if (!vrk) 1044 return; 1045 vrk->vsi_id = adapter->vsi.id; 1046 vrk->key_len = adapter->rss_key_size; 1047 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); 1048 1049 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; 1050 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; 1051 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); 1052 kfree(vrk); 1053 } 1054 1055 /** 1056 * iavf_set_rss_lut 1057 * @adapter: adapter structure 1058 * 1059 * Request the PF to set our RSS lookup table 1060 **/ 1061 void iavf_set_rss_lut(struct iavf_adapter *adapter) 1062 { 1063 struct virtchnl_rss_lut *vrl; 1064 int len; 1065 1066 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1067 /* bail because we already have a command pending */ 1068 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", 1069 adapter->current_op); 1070 return; 1071 } 1072 len = sizeof(struct virtchnl_rss_lut) + 1073 (adapter->rss_lut_size * sizeof(u8)) - 1; 1074 vrl = kzalloc(len, GFP_KERNEL); 1075 if (!vrl) 1076 return; 1077 vrl->vsi_id = adapter->vsi.id; 1078 vrl->lut_entries = adapter->rss_lut_size; 1079 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); 1080 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; 1081 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; 1082 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); 1083 kfree(vrl); 1084 } 1085 1086 /** 1087 * iavf_enable_vlan_stripping 1088 * @adapter: adapter structure 1089 * 1090 * Request VLAN header stripping to be enabled 1091 **/ 1092 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) 1093 { 1094 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1095 /* bail because we already have a command pending */ 1096 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", 1097 adapter->current_op); 1098 return; 1099 } 1100 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; 1101 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 1102 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); 1103 } 1104 1105 /** 1106 * iavf_disable_vlan_stripping 1107 * @adapter: adapter structure 1108 * 1109 * Request VLAN header stripping to be disabled 1110 **/ 1111 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) 1112 { 1113 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1114 /* bail because we already have a command pending */ 1115 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", 1116 adapter->current_op); 1117 return; 1118 } 1119 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; 1120 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 1121 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); 1122 } 1123 1124 /** 1125 * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype 1126 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1127 */ 1128 static u32 iavf_tpid_to_vc_ethertype(u16 tpid) 1129 { 1130 switch (tpid) { 1131 case ETH_P_8021Q: 1132 return VIRTCHNL_VLAN_ETHERTYPE_8100; 1133 case ETH_P_8021AD: 1134 return VIRTCHNL_VLAN_ETHERTYPE_88A8; 1135 } 1136 1137 return 0; 1138 } 1139 1140 /** 1141 * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message 1142 * @adapter: adapter structure 1143 * @msg: message structure used for updating offloads over virtchnl to update 1144 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1145 * @offload_op: opcode used to determine which support structure to check 1146 */ 1147 static int 1148 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, 1149 struct virtchnl_vlan_setting *msg, u16 tpid, 1150 enum virtchnl_ops offload_op) 1151 { 1152 struct virtchnl_vlan_supported_caps *offload_support; 1153 u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); 1154 1155 /* reference the correct offload support structure */ 1156 switch (offload_op) { 1157 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1158 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1159 offload_support = 1160 &adapter->vlan_v2_caps.offloads.stripping_support; 1161 break; 1162 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1163 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1164 offload_support = 1165 &adapter->vlan_v2_caps.offloads.insertion_support; 1166 break; 1167 default: 1168 dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", 1169 offload_op); 1170 return -EINVAL; 1171 } 1172 1173 /* make sure ethertype is supported */ 1174 if (offload_support->outer & vc_ethertype && 1175 offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { 1176 msg->outer_ethertype_setting = vc_ethertype; 1177 } else if (offload_support->inner & vc_ethertype && 1178 offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { 1179 msg->inner_ethertype_setting = vc_ethertype; 1180 } else { 1181 dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", 1182 offload_op, tpid); 1183 return -EINVAL; 1184 } 1185 1186 return 0; 1187 } 1188 1189 /** 1190 * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request 1191 * @adapter: adapter structure 1192 * @tpid: VLAN TPID 1193 * @offload_op: opcode used to determine which AQ required bit to clear 1194 */ 1195 static void 1196 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, 1197 enum virtchnl_ops offload_op) 1198 { 1199 switch (offload_op) { 1200 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1201 if (tpid == ETH_P_8021Q) 1202 adapter->aq_required &= 1203 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; 1204 else if (tpid == ETH_P_8021AD) 1205 adapter->aq_required &= 1206 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; 1207 break; 1208 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1209 if (tpid == ETH_P_8021Q) 1210 adapter->aq_required &= 1211 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; 1212 else if (tpid == ETH_P_8021AD) 1213 adapter->aq_required &= 1214 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; 1215 break; 1216 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1217 if (tpid == ETH_P_8021Q) 1218 adapter->aq_required &= 1219 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; 1220 else if (tpid == ETH_P_8021AD) 1221 adapter->aq_required &= 1222 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; 1223 break; 1224 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1225 if (tpid == ETH_P_8021Q) 1226 adapter->aq_required &= 1227 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; 1228 else if (tpid == ETH_P_8021AD) 1229 adapter->aq_required &= 1230 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; 1231 break; 1232 default: 1233 dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", 1234 offload_op); 1235 } 1236 } 1237 1238 /** 1239 * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl 1240 * @adapter: adapter structure 1241 * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) 1242 * @offload_op: offload_op used to make the request over virtchnl 1243 */ 1244 static void 1245 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, 1246 enum virtchnl_ops offload_op) 1247 { 1248 struct virtchnl_vlan_setting *msg; 1249 int len = sizeof(*msg); 1250 1251 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1252 /* bail because we already have a command pending */ 1253 dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", 1254 offload_op, adapter->current_op); 1255 return; 1256 } 1257 1258 adapter->current_op = offload_op; 1259 1260 msg = kzalloc(len, GFP_KERNEL); 1261 if (!msg) 1262 return; 1263 1264 msg->vport_id = adapter->vsi_res->vsi_id; 1265 1266 /* always clear to prevent unsupported and endless requests */ 1267 iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); 1268 1269 /* only send valid offload requests */ 1270 if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) 1271 iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); 1272 else 1273 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1274 1275 kfree(msg); 1276 } 1277 1278 /** 1279 * iavf_enable_vlan_stripping_v2 - enable VLAN stripping 1280 * @adapter: adapter structure 1281 * @tpid: VLAN TPID used to enable VLAN stripping 1282 */ 1283 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1284 { 1285 iavf_send_vlan_offload_v2(adapter, tpid, 1286 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); 1287 } 1288 1289 /** 1290 * iavf_disable_vlan_stripping_v2 - disable VLAN stripping 1291 * @adapter: adapter structure 1292 * @tpid: VLAN TPID used to disable VLAN stripping 1293 */ 1294 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1295 { 1296 iavf_send_vlan_offload_v2(adapter, tpid, 1297 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); 1298 } 1299 1300 /** 1301 * iavf_enable_vlan_insertion_v2 - enable VLAN insertion 1302 * @adapter: adapter structure 1303 * @tpid: VLAN TPID used to enable VLAN insertion 1304 */ 1305 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1306 { 1307 iavf_send_vlan_offload_v2(adapter, tpid, 1308 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); 1309 } 1310 1311 /** 1312 * iavf_disable_vlan_insertion_v2 - disable VLAN insertion 1313 * @adapter: adapter structure 1314 * @tpid: VLAN TPID used to disable VLAN insertion 1315 */ 1316 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1317 { 1318 iavf_send_vlan_offload_v2(adapter, tpid, 1319 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); 1320 } 1321 1322 #define IAVF_MAX_SPEED_STRLEN 13 1323 1324 /** 1325 * iavf_print_link_message - print link up or down 1326 * @adapter: adapter structure 1327 * 1328 * Log a message telling the world of our wonderous link status 1329 */ 1330 static void iavf_print_link_message(struct iavf_adapter *adapter) 1331 { 1332 struct net_device *netdev = adapter->netdev; 1333 int link_speed_mbps; 1334 char *speed; 1335 1336 if (!adapter->link_up) { 1337 netdev_info(netdev, "NIC Link is Down\n"); 1338 return; 1339 } 1340 1341 speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); 1342 if (!speed) 1343 return; 1344 1345 if (ADV_LINK_SUPPORT(adapter)) { 1346 link_speed_mbps = adapter->link_speed_mbps; 1347 goto print_link_msg; 1348 } 1349 1350 switch (adapter->link_speed) { 1351 case VIRTCHNL_LINK_SPEED_40GB: 1352 link_speed_mbps = SPEED_40000; 1353 break; 1354 case VIRTCHNL_LINK_SPEED_25GB: 1355 link_speed_mbps = SPEED_25000; 1356 break; 1357 case VIRTCHNL_LINK_SPEED_20GB: 1358 link_speed_mbps = SPEED_20000; 1359 break; 1360 case VIRTCHNL_LINK_SPEED_10GB: 1361 link_speed_mbps = SPEED_10000; 1362 break; 1363 case VIRTCHNL_LINK_SPEED_5GB: 1364 link_speed_mbps = SPEED_5000; 1365 break; 1366 case VIRTCHNL_LINK_SPEED_2_5GB: 1367 link_speed_mbps = SPEED_2500; 1368 break; 1369 case VIRTCHNL_LINK_SPEED_1GB: 1370 link_speed_mbps = SPEED_1000; 1371 break; 1372 case VIRTCHNL_LINK_SPEED_100MB: 1373 link_speed_mbps = SPEED_100; 1374 break; 1375 default: 1376 link_speed_mbps = SPEED_UNKNOWN; 1377 break; 1378 } 1379 1380 print_link_msg: 1381 if (link_speed_mbps > SPEED_1000) { 1382 if (link_speed_mbps == SPEED_2500) 1383 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); 1384 else 1385 /* convert to Gbps inline */ 1386 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1387 link_speed_mbps / 1000, "Gbps"); 1388 } else if (link_speed_mbps == SPEED_UNKNOWN) { 1389 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); 1390 } else { 1391 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1392 link_speed_mbps, "Mbps"); 1393 } 1394 1395 netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); 1396 kfree(speed); 1397 } 1398 1399 /** 1400 * iavf_get_vpe_link_status 1401 * @adapter: adapter structure 1402 * @vpe: virtchnl_pf_event structure 1403 * 1404 * Helper function for determining the link status 1405 **/ 1406 static bool 1407 iavf_get_vpe_link_status(struct iavf_adapter *adapter, 1408 struct virtchnl_pf_event *vpe) 1409 { 1410 if (ADV_LINK_SUPPORT(adapter)) 1411 return vpe->event_data.link_event_adv.link_status; 1412 else 1413 return vpe->event_data.link_event.link_status; 1414 } 1415 1416 /** 1417 * iavf_set_adapter_link_speed_from_vpe 1418 * @adapter: adapter structure for which we are setting the link speed 1419 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting 1420 * 1421 * Helper function for setting iavf_adapter link speed 1422 **/ 1423 static void 1424 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, 1425 struct virtchnl_pf_event *vpe) 1426 { 1427 if (ADV_LINK_SUPPORT(adapter)) 1428 adapter->link_speed_mbps = 1429 vpe->event_data.link_event_adv.link_speed; 1430 else 1431 adapter->link_speed = vpe->event_data.link_event.link_speed; 1432 } 1433 1434 /** 1435 * iavf_enable_channels 1436 * @adapter: adapter structure 1437 * 1438 * Request that the PF enable channels as specified by 1439 * the user via tc tool. 1440 **/ 1441 void iavf_enable_channels(struct iavf_adapter *adapter) 1442 { 1443 struct virtchnl_tc_info *vti = NULL; 1444 size_t len; 1445 int i; 1446 1447 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1448 /* bail because we already have a command pending */ 1449 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1450 adapter->current_op); 1451 return; 1452 } 1453 1454 len = struct_size(vti, list, adapter->num_tc - 1); 1455 vti = kzalloc(len, GFP_KERNEL); 1456 if (!vti) 1457 return; 1458 vti->num_tc = adapter->num_tc; 1459 for (i = 0; i < vti->num_tc; i++) { 1460 vti->list[i].count = adapter->ch_config.ch_info[i].count; 1461 vti->list[i].offset = adapter->ch_config.ch_info[i].offset; 1462 vti->list[i].pad = 0; 1463 vti->list[i].max_tx_rate = 1464 adapter->ch_config.ch_info[i].max_tx_rate; 1465 } 1466 1467 adapter->ch_config.state = __IAVF_TC_RUNNING; 1468 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1469 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; 1470 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; 1471 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); 1472 kfree(vti); 1473 } 1474 1475 /** 1476 * iavf_disable_channels 1477 * @adapter: adapter structure 1478 * 1479 * Request that the PF disable channels that are configured 1480 **/ 1481 void iavf_disable_channels(struct iavf_adapter *adapter) 1482 { 1483 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1484 /* bail because we already have a command pending */ 1485 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1486 adapter->current_op); 1487 return; 1488 } 1489 1490 adapter->ch_config.state = __IAVF_TC_INVALID; 1491 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1492 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; 1493 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; 1494 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); 1495 } 1496 1497 /** 1498 * iavf_print_cloud_filter 1499 * @adapter: adapter structure 1500 * @f: cloud filter to print 1501 * 1502 * Print the cloud filter 1503 **/ 1504 static void iavf_print_cloud_filter(struct iavf_adapter *adapter, 1505 struct virtchnl_filter *f) 1506 { 1507 switch (f->flow_type) { 1508 case VIRTCHNL_TCP_V4_FLOW: 1509 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", 1510 &f->data.tcp_spec.dst_mac, 1511 &f->data.tcp_spec.src_mac, 1512 ntohs(f->data.tcp_spec.vlan_id), 1513 &f->data.tcp_spec.dst_ip[0], 1514 &f->data.tcp_spec.src_ip[0], 1515 ntohs(f->data.tcp_spec.dst_port), 1516 ntohs(f->data.tcp_spec.src_port)); 1517 break; 1518 case VIRTCHNL_TCP_V6_FLOW: 1519 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", 1520 &f->data.tcp_spec.dst_mac, 1521 &f->data.tcp_spec.src_mac, 1522 ntohs(f->data.tcp_spec.vlan_id), 1523 &f->data.tcp_spec.dst_ip, 1524 &f->data.tcp_spec.src_ip, 1525 ntohs(f->data.tcp_spec.dst_port), 1526 ntohs(f->data.tcp_spec.src_port)); 1527 break; 1528 } 1529 } 1530 1531 /** 1532 * iavf_add_cloud_filter 1533 * @adapter: adapter structure 1534 * 1535 * Request that the PF add cloud filters as specified 1536 * by the user via tc tool. 1537 **/ 1538 void iavf_add_cloud_filter(struct iavf_adapter *adapter) 1539 { 1540 struct iavf_cloud_filter *cf; 1541 struct virtchnl_filter *f; 1542 int len = 0, count = 0; 1543 1544 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1545 /* bail because we already have a command pending */ 1546 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", 1547 adapter->current_op); 1548 return; 1549 } 1550 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1551 if (cf->add) { 1552 count++; 1553 break; 1554 } 1555 } 1556 if (!count) { 1557 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 1558 return; 1559 } 1560 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; 1561 1562 len = sizeof(struct virtchnl_filter); 1563 f = kzalloc(len, GFP_KERNEL); 1564 if (!f) 1565 return; 1566 1567 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1568 if (cf->add) { 1569 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1570 cf->add = false; 1571 cf->state = __IAVF_CF_ADD_PENDING; 1572 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, 1573 (u8 *)f, len); 1574 } 1575 } 1576 kfree(f); 1577 } 1578 1579 /** 1580 * iavf_del_cloud_filter 1581 * @adapter: adapter structure 1582 * 1583 * Request that the PF delete cloud filters as specified 1584 * by the user via tc tool. 1585 **/ 1586 void iavf_del_cloud_filter(struct iavf_adapter *adapter) 1587 { 1588 struct iavf_cloud_filter *cf, *cftmp; 1589 struct virtchnl_filter *f; 1590 int len = 0, count = 0; 1591 1592 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1593 /* bail because we already have a command pending */ 1594 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", 1595 adapter->current_op); 1596 return; 1597 } 1598 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1599 if (cf->del) { 1600 count++; 1601 break; 1602 } 1603 } 1604 if (!count) { 1605 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1606 return; 1607 } 1608 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; 1609 1610 len = sizeof(struct virtchnl_filter); 1611 f = kzalloc(len, GFP_KERNEL); 1612 if (!f) 1613 return; 1614 1615 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 1616 if (cf->del) { 1617 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1618 cf->del = false; 1619 cf->state = __IAVF_CF_DEL_PENDING; 1620 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, 1621 (u8 *)f, len); 1622 } 1623 } 1624 kfree(f); 1625 } 1626 1627 /** 1628 * iavf_add_fdir_filter 1629 * @adapter: the VF adapter structure 1630 * 1631 * Request that the PF add Flow Director filters as specified 1632 * by the user via ethtool. 1633 **/ 1634 void iavf_add_fdir_filter(struct iavf_adapter *adapter) 1635 { 1636 struct iavf_fdir_fltr *fdir; 1637 struct virtchnl_fdir_add *f; 1638 bool process_fltr = false; 1639 int len; 1640 1641 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1642 /* bail because we already have a command pending */ 1643 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", 1644 adapter->current_op); 1645 return; 1646 } 1647 1648 len = sizeof(struct virtchnl_fdir_add); 1649 f = kzalloc(len, GFP_KERNEL); 1650 if (!f) 1651 return; 1652 1653 spin_lock_bh(&adapter->fdir_fltr_lock); 1654 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1655 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { 1656 process_fltr = true; 1657 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; 1658 memcpy(f, &fdir->vc_add_msg, len); 1659 break; 1660 } 1661 } 1662 spin_unlock_bh(&adapter->fdir_fltr_lock); 1663 1664 if (!process_fltr) { 1665 /* prevent iavf_add_fdir_filter() from being called when there 1666 * are no filters to add 1667 */ 1668 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; 1669 kfree(f); 1670 return; 1671 } 1672 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; 1673 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); 1674 kfree(f); 1675 } 1676 1677 /** 1678 * iavf_del_fdir_filter 1679 * @adapter: the VF adapter structure 1680 * 1681 * Request that the PF delete Flow Director filters as specified 1682 * by the user via ethtool. 1683 **/ 1684 void iavf_del_fdir_filter(struct iavf_adapter *adapter) 1685 { 1686 struct iavf_fdir_fltr *fdir; 1687 struct virtchnl_fdir_del f; 1688 bool process_fltr = false; 1689 int len; 1690 1691 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1692 /* bail because we already have a command pending */ 1693 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", 1694 adapter->current_op); 1695 return; 1696 } 1697 1698 len = sizeof(struct virtchnl_fdir_del); 1699 1700 spin_lock_bh(&adapter->fdir_fltr_lock); 1701 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1702 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { 1703 process_fltr = true; 1704 memset(&f, 0, len); 1705 f.vsi_id = fdir->vc_add_msg.vsi_id; 1706 f.flow_id = fdir->flow_id; 1707 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; 1708 break; 1709 } 1710 } 1711 spin_unlock_bh(&adapter->fdir_fltr_lock); 1712 1713 if (!process_fltr) { 1714 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1715 return; 1716 } 1717 1718 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; 1719 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); 1720 } 1721 1722 /** 1723 * iavf_add_adv_rss_cfg 1724 * @adapter: the VF adapter structure 1725 * 1726 * Request that the PF add RSS configuration as specified 1727 * by the user via ethtool. 1728 **/ 1729 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) 1730 { 1731 struct virtchnl_rss_cfg *rss_cfg; 1732 struct iavf_adv_rss *rss; 1733 bool process_rss = false; 1734 int len; 1735 1736 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1737 /* bail because we already have a command pending */ 1738 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", 1739 adapter->current_op); 1740 return; 1741 } 1742 1743 len = sizeof(struct virtchnl_rss_cfg); 1744 rss_cfg = kzalloc(len, GFP_KERNEL); 1745 if (!rss_cfg) 1746 return; 1747 1748 spin_lock_bh(&adapter->adv_rss_lock); 1749 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1750 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { 1751 process_rss = true; 1752 rss->state = IAVF_ADV_RSS_ADD_PENDING; 1753 memcpy(rss_cfg, &rss->cfg_msg, len); 1754 iavf_print_adv_rss_cfg(adapter, rss, 1755 "Input set change for", 1756 "is pending"); 1757 break; 1758 } 1759 } 1760 spin_unlock_bh(&adapter->adv_rss_lock); 1761 1762 if (process_rss) { 1763 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; 1764 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, 1765 (u8 *)rss_cfg, len); 1766 } else { 1767 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; 1768 } 1769 1770 kfree(rss_cfg); 1771 } 1772 1773 /** 1774 * iavf_del_adv_rss_cfg 1775 * @adapter: the VF adapter structure 1776 * 1777 * Request that the PF delete RSS configuration as specified 1778 * by the user via ethtool. 1779 **/ 1780 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) 1781 { 1782 struct virtchnl_rss_cfg *rss_cfg; 1783 struct iavf_adv_rss *rss; 1784 bool process_rss = false; 1785 int len; 1786 1787 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1788 /* bail because we already have a command pending */ 1789 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", 1790 adapter->current_op); 1791 return; 1792 } 1793 1794 len = sizeof(struct virtchnl_rss_cfg); 1795 rss_cfg = kzalloc(len, GFP_KERNEL); 1796 if (!rss_cfg) 1797 return; 1798 1799 spin_lock_bh(&adapter->adv_rss_lock); 1800 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1801 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { 1802 process_rss = true; 1803 rss->state = IAVF_ADV_RSS_DEL_PENDING; 1804 memcpy(rss_cfg, &rss->cfg_msg, len); 1805 break; 1806 } 1807 } 1808 spin_unlock_bh(&adapter->adv_rss_lock); 1809 1810 if (process_rss) { 1811 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; 1812 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, 1813 (u8 *)rss_cfg, len); 1814 } else { 1815 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1816 } 1817 1818 kfree(rss_cfg); 1819 } 1820 1821 /** 1822 * iavf_request_reset 1823 * @adapter: adapter structure 1824 * 1825 * Request that the PF reset this VF. No response is expected. 1826 **/ 1827 int iavf_request_reset(struct iavf_adapter *adapter) 1828 { 1829 int err; 1830 /* Don't check CURRENT_OP - this is always higher priority */ 1831 err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); 1832 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1833 return err; 1834 } 1835 1836 /** 1837 * iavf_netdev_features_vlan_strip_set - update vlan strip status 1838 * @netdev: ptr to netdev being adjusted 1839 * @enable: enable or disable vlan strip 1840 * 1841 * Helper function to change vlan strip status in netdev->features. 1842 */ 1843 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, 1844 const bool enable) 1845 { 1846 if (enable) 1847 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1848 else 1849 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 1850 } 1851 1852 /** 1853 * iavf_virtchnl_completion 1854 * @adapter: adapter structure 1855 * @v_opcode: opcode sent by PF 1856 * @v_retval: retval sent by PF 1857 * @msg: message sent by PF 1858 * @msglen: message length 1859 * 1860 * Asynchronous completion function for admin queue messages. Rather than busy 1861 * wait, we fire off our requests and assume that no errors will be returned. 1862 * This function handles the reply messages. 1863 **/ 1864 void iavf_virtchnl_completion(struct iavf_adapter *adapter, 1865 enum virtchnl_ops v_opcode, 1866 enum iavf_status v_retval, u8 *msg, u16 msglen) 1867 { 1868 struct net_device *netdev = adapter->netdev; 1869 1870 if (v_opcode == VIRTCHNL_OP_EVENT) { 1871 struct virtchnl_pf_event *vpe = 1872 (struct virtchnl_pf_event *)msg; 1873 bool link_up = iavf_get_vpe_link_status(adapter, vpe); 1874 1875 switch (vpe->event) { 1876 case VIRTCHNL_EVENT_LINK_CHANGE: 1877 iavf_set_adapter_link_speed_from_vpe(adapter, vpe); 1878 1879 /* we've already got the right link status, bail */ 1880 if (adapter->link_up == link_up) 1881 break; 1882 1883 if (link_up) { 1884 /* If we get link up message and start queues 1885 * before our queues are configured it will 1886 * trigger a TX hang. In that case, just ignore 1887 * the link status message,we'll get another one 1888 * after we enable queues and actually prepared 1889 * to send traffic. 1890 */ 1891 if (adapter->state != __IAVF_RUNNING) 1892 break; 1893 1894 /* For ADq enabled VF, we reconfigure VSIs and 1895 * re-allocate queues. Hence wait till all 1896 * queues are enabled. 1897 */ 1898 if (adapter->flags & 1899 IAVF_FLAG_QUEUES_DISABLED) 1900 break; 1901 } 1902 1903 adapter->link_up = link_up; 1904 if (link_up) { 1905 netif_tx_start_all_queues(netdev); 1906 netif_carrier_on(netdev); 1907 } else { 1908 netif_tx_stop_all_queues(netdev); 1909 netif_carrier_off(netdev); 1910 } 1911 iavf_print_link_message(adapter); 1912 break; 1913 case VIRTCHNL_EVENT_RESET_IMPENDING: 1914 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); 1915 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { 1916 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1917 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 1918 queue_work(iavf_wq, &adapter->reset_task); 1919 } 1920 break; 1921 default: 1922 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", 1923 vpe->event); 1924 break; 1925 } 1926 return; 1927 } 1928 if (v_retval) { 1929 switch (v_opcode) { 1930 case VIRTCHNL_OP_ADD_VLAN: 1931 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 1932 iavf_stat_str(&adapter->hw, v_retval)); 1933 break; 1934 case VIRTCHNL_OP_ADD_ETH_ADDR: 1935 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", 1936 iavf_stat_str(&adapter->hw, v_retval)); 1937 iavf_mac_add_reject(adapter); 1938 /* restore administratively set MAC address */ 1939 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1940 wake_up(&adapter->vc_waitqueue); 1941 break; 1942 case VIRTCHNL_OP_DEL_VLAN: 1943 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", 1944 iavf_stat_str(&adapter->hw, v_retval)); 1945 break; 1946 case VIRTCHNL_OP_DEL_ETH_ADDR: 1947 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", 1948 iavf_stat_str(&adapter->hw, v_retval)); 1949 break; 1950 case VIRTCHNL_OP_ENABLE_CHANNELS: 1951 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", 1952 iavf_stat_str(&adapter->hw, v_retval)); 1953 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1954 adapter->ch_config.state = __IAVF_TC_INVALID; 1955 netdev_reset_tc(netdev); 1956 netif_tx_start_all_queues(netdev); 1957 break; 1958 case VIRTCHNL_OP_DISABLE_CHANNELS: 1959 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", 1960 iavf_stat_str(&adapter->hw, v_retval)); 1961 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1962 adapter->ch_config.state = __IAVF_TC_RUNNING; 1963 netif_tx_start_all_queues(netdev); 1964 break; 1965 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 1966 struct iavf_cloud_filter *cf, *cftmp; 1967 1968 list_for_each_entry_safe(cf, cftmp, 1969 &adapter->cloud_filter_list, 1970 list) { 1971 if (cf->state == __IAVF_CF_ADD_PENDING) { 1972 cf->state = __IAVF_CF_INVALID; 1973 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", 1974 iavf_stat_str(&adapter->hw, 1975 v_retval)); 1976 iavf_print_cloud_filter(adapter, 1977 &cf->f); 1978 list_del(&cf->list); 1979 kfree(cf); 1980 adapter->num_cloud_filters--; 1981 } 1982 } 1983 } 1984 break; 1985 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 1986 struct iavf_cloud_filter *cf; 1987 1988 list_for_each_entry(cf, &adapter->cloud_filter_list, 1989 list) { 1990 if (cf->state == __IAVF_CF_DEL_PENDING) { 1991 cf->state = __IAVF_CF_ACTIVE; 1992 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", 1993 iavf_stat_str(&adapter->hw, 1994 v_retval)); 1995 iavf_print_cloud_filter(adapter, 1996 &cf->f); 1997 } 1998 } 1999 } 2000 break; 2001 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2002 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2003 2004 spin_lock_bh(&adapter->fdir_fltr_lock); 2005 list_for_each_entry_safe(fdir, fdir_tmp, 2006 &adapter->fdir_list_head, 2007 list) { 2008 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2009 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", 2010 iavf_stat_str(&adapter->hw, 2011 v_retval)); 2012 iavf_print_fdir_fltr(adapter, fdir); 2013 if (msglen) 2014 dev_err(&adapter->pdev->dev, 2015 "%s\n", msg); 2016 list_del(&fdir->list); 2017 kfree(fdir); 2018 adapter->fdir_active_fltr--; 2019 } 2020 } 2021 spin_unlock_bh(&adapter->fdir_fltr_lock); 2022 } 2023 break; 2024 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2025 struct iavf_fdir_fltr *fdir; 2026 2027 spin_lock_bh(&adapter->fdir_fltr_lock); 2028 list_for_each_entry(fdir, &adapter->fdir_list_head, 2029 list) { 2030 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2031 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2032 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", 2033 iavf_stat_str(&adapter->hw, 2034 v_retval)); 2035 iavf_print_fdir_fltr(adapter, fdir); 2036 } 2037 } 2038 spin_unlock_bh(&adapter->fdir_fltr_lock); 2039 } 2040 break; 2041 case VIRTCHNL_OP_ADD_RSS_CFG: { 2042 struct iavf_adv_rss *rss, *rss_tmp; 2043 2044 spin_lock_bh(&adapter->adv_rss_lock); 2045 list_for_each_entry_safe(rss, rss_tmp, 2046 &adapter->adv_rss_list_head, 2047 list) { 2048 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2049 iavf_print_adv_rss_cfg(adapter, rss, 2050 "Failed to change the input set for", 2051 NULL); 2052 list_del(&rss->list); 2053 kfree(rss); 2054 } 2055 } 2056 spin_unlock_bh(&adapter->adv_rss_lock); 2057 } 2058 break; 2059 case VIRTCHNL_OP_DEL_RSS_CFG: { 2060 struct iavf_adv_rss *rss; 2061 2062 spin_lock_bh(&adapter->adv_rss_lock); 2063 list_for_each_entry(rss, &adapter->adv_rss_list_head, 2064 list) { 2065 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2066 rss->state = IAVF_ADV_RSS_ACTIVE; 2067 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", 2068 iavf_stat_str(&adapter->hw, 2069 v_retval)); 2070 } 2071 } 2072 spin_unlock_bh(&adapter->adv_rss_lock); 2073 } 2074 break; 2075 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2076 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2077 /* Vlan stripping could not be enabled by ethtool. 2078 * Disable it in netdev->features. 2079 */ 2080 iavf_netdev_features_vlan_strip_set(netdev, false); 2081 break; 2082 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2083 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2084 /* Vlan stripping could not be disabled by ethtool. 2085 * Enable it in netdev->features. 2086 */ 2087 iavf_netdev_features_vlan_strip_set(netdev, true); 2088 break; 2089 default: 2090 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", 2091 v_retval, iavf_stat_str(&adapter->hw, v_retval), 2092 v_opcode); 2093 } 2094 } 2095 switch (v_opcode) { 2096 case VIRTCHNL_OP_ADD_ETH_ADDR: 2097 if (!v_retval) 2098 iavf_mac_add_ok(adapter); 2099 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) 2100 if (!ether_addr_equal(netdev->dev_addr, 2101 adapter->hw.mac.addr)) { 2102 netif_addr_lock_bh(netdev); 2103 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2104 netif_addr_unlock_bh(netdev); 2105 } 2106 wake_up(&adapter->vc_waitqueue); 2107 break; 2108 case VIRTCHNL_OP_GET_STATS: { 2109 struct iavf_eth_stats *stats = 2110 (struct iavf_eth_stats *)msg; 2111 netdev->stats.rx_packets = stats->rx_unicast + 2112 stats->rx_multicast + 2113 stats->rx_broadcast; 2114 netdev->stats.tx_packets = stats->tx_unicast + 2115 stats->tx_multicast + 2116 stats->tx_broadcast; 2117 netdev->stats.rx_bytes = stats->rx_bytes; 2118 netdev->stats.tx_bytes = stats->tx_bytes; 2119 netdev->stats.tx_errors = stats->tx_errors; 2120 netdev->stats.rx_dropped = stats->rx_discards; 2121 netdev->stats.tx_dropped = stats->tx_discards; 2122 adapter->current_stats = *stats; 2123 } 2124 break; 2125 case VIRTCHNL_OP_GET_VF_RESOURCES: { 2126 u16 len = sizeof(struct virtchnl_vf_resource) + 2127 IAVF_MAX_VF_VSI * 2128 sizeof(struct virtchnl_vsi_resource); 2129 memcpy(adapter->vf_res, msg, min(msglen, len)); 2130 iavf_validate_num_queues(adapter); 2131 iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); 2132 if (is_zero_ether_addr(adapter->hw.mac.addr)) { 2133 /* restore current mac address */ 2134 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2135 } else { 2136 netif_addr_lock_bh(netdev); 2137 /* refresh current mac address if changed */ 2138 ether_addr_copy(netdev->perm_addr, 2139 adapter->hw.mac.addr); 2140 netif_addr_unlock_bh(netdev); 2141 } 2142 spin_lock_bh(&adapter->mac_vlan_list_lock); 2143 iavf_add_filter(adapter, adapter->hw.mac.addr); 2144 2145 if (VLAN_ALLOWED(adapter)) { 2146 if (!list_empty(&adapter->vlan_filter_list)) { 2147 struct iavf_vlan_filter *vlf; 2148 2149 /* re-add all VLAN filters over virtchnl */ 2150 list_for_each_entry(vlf, 2151 &adapter->vlan_filter_list, 2152 list) 2153 vlf->add = true; 2154 2155 adapter->aq_required |= 2156 IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2157 } 2158 } 2159 2160 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2161 2162 iavf_parse_vf_resource_msg(adapter); 2163 2164 /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the 2165 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish 2166 * configuration 2167 */ 2168 if (VLAN_V2_ALLOWED(adapter)) 2169 break; 2170 /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 2171 * wasn't successfully negotiated with the PF 2172 */ 2173 } 2174 fallthrough; 2175 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { 2176 struct iavf_mac_filter *f; 2177 bool was_mac_changed; 2178 u64 aq_required = 0; 2179 2180 if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) 2181 memcpy(&adapter->vlan_v2_caps, msg, 2182 min_t(u16, msglen, 2183 sizeof(adapter->vlan_v2_caps))); 2184 2185 iavf_process_config(adapter); 2186 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; 2187 was_mac_changed = !ether_addr_equal(netdev->dev_addr, 2188 adapter->hw.mac.addr); 2189 2190 spin_lock_bh(&adapter->mac_vlan_list_lock); 2191 2192 /* re-add all MAC filters */ 2193 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2194 if (was_mac_changed && 2195 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2196 ether_addr_copy(f->macaddr, 2197 adapter->hw.mac.addr); 2198 2199 f->is_new_mac = true; 2200 f->add = true; 2201 f->add_handled = false; 2202 f->remove = false; 2203 } 2204 2205 /* re-add all VLAN filters */ 2206 if (VLAN_FILTERING_ALLOWED(adapter)) { 2207 struct iavf_vlan_filter *vlf; 2208 2209 if (!list_empty(&adapter->vlan_filter_list)) { 2210 list_for_each_entry(vlf, 2211 &adapter->vlan_filter_list, 2212 list) 2213 vlf->add = true; 2214 2215 aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2216 } 2217 } 2218 2219 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2220 2221 netif_addr_lock_bh(netdev); 2222 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2223 netif_addr_unlock_bh(netdev); 2224 2225 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER | 2226 aq_required; 2227 } 2228 break; 2229 case VIRTCHNL_OP_ENABLE_QUEUES: 2230 /* enable transmits */ 2231 iavf_irq_enable(adapter, true); 2232 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; 2233 break; 2234 case VIRTCHNL_OP_DISABLE_QUEUES: 2235 iavf_free_all_tx_resources(adapter); 2236 iavf_free_all_rx_resources(adapter); 2237 if (adapter->state == __IAVF_DOWN_PENDING) { 2238 iavf_change_state(adapter, __IAVF_DOWN); 2239 wake_up(&adapter->down_waitqueue); 2240 } 2241 break; 2242 case VIRTCHNL_OP_VERSION: 2243 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2244 /* Don't display an error if we get these out of sequence. 2245 * If the firmware needed to get kicked, we'll get these and 2246 * it's no problem. 2247 */ 2248 if (v_opcode != adapter->current_op) 2249 return; 2250 break; 2251 case VIRTCHNL_OP_IWARP: 2252 /* Gobble zero-length replies from the PF. They indicate that 2253 * a previous message was received OK, and the client doesn't 2254 * care about that. 2255 */ 2256 if (msglen && CLIENT_ENABLED(adapter)) 2257 iavf_notify_client_message(&adapter->vsi, msg, msglen); 2258 break; 2259 2260 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2261 adapter->client_pending &= 2262 ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); 2263 break; 2264 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { 2265 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; 2266 2267 if (msglen == sizeof(*vrh)) 2268 adapter->hena = vrh->hena; 2269 else 2270 dev_warn(&adapter->pdev->dev, 2271 "Invalid message %d from PF\n", v_opcode); 2272 } 2273 break; 2274 case VIRTCHNL_OP_REQUEST_QUEUES: { 2275 struct virtchnl_vf_res_request *vfres = 2276 (struct virtchnl_vf_res_request *)msg; 2277 2278 if (vfres->num_queue_pairs != adapter->num_req_queues) { 2279 dev_info(&adapter->pdev->dev, 2280 "Requested %d queues, PF can support %d\n", 2281 adapter->num_req_queues, 2282 vfres->num_queue_pairs); 2283 adapter->num_req_queues = 0; 2284 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2285 } 2286 } 2287 break; 2288 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 2289 struct iavf_cloud_filter *cf; 2290 2291 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2292 if (cf->state == __IAVF_CF_ADD_PENDING) 2293 cf->state = __IAVF_CF_ACTIVE; 2294 } 2295 } 2296 break; 2297 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 2298 struct iavf_cloud_filter *cf, *cftmp; 2299 2300 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2301 list) { 2302 if (cf->state == __IAVF_CF_DEL_PENDING) { 2303 cf->state = __IAVF_CF_INVALID; 2304 list_del(&cf->list); 2305 kfree(cf); 2306 adapter->num_cloud_filters--; 2307 } 2308 } 2309 } 2310 break; 2311 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2312 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; 2313 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2314 2315 spin_lock_bh(&adapter->fdir_fltr_lock); 2316 list_for_each_entry_safe(fdir, fdir_tmp, 2317 &adapter->fdir_list_head, 2318 list) { 2319 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2320 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2321 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", 2322 fdir->loc); 2323 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2324 fdir->flow_id = add_fltr->flow_id; 2325 } else { 2326 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", 2327 add_fltr->status); 2328 iavf_print_fdir_fltr(adapter, fdir); 2329 list_del(&fdir->list); 2330 kfree(fdir); 2331 adapter->fdir_active_fltr--; 2332 } 2333 } 2334 } 2335 spin_unlock_bh(&adapter->fdir_fltr_lock); 2336 } 2337 break; 2338 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2339 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; 2340 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2341 2342 spin_lock_bh(&adapter->fdir_fltr_lock); 2343 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, 2344 list) { 2345 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2346 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2347 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", 2348 fdir->loc); 2349 list_del(&fdir->list); 2350 kfree(fdir); 2351 adapter->fdir_active_fltr--; 2352 } else { 2353 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2354 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", 2355 del_fltr->status); 2356 iavf_print_fdir_fltr(adapter, fdir); 2357 } 2358 } 2359 } 2360 spin_unlock_bh(&adapter->fdir_fltr_lock); 2361 } 2362 break; 2363 case VIRTCHNL_OP_ADD_RSS_CFG: { 2364 struct iavf_adv_rss *rss; 2365 2366 spin_lock_bh(&adapter->adv_rss_lock); 2367 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 2368 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2369 iavf_print_adv_rss_cfg(adapter, rss, 2370 "Input set change for", 2371 "successful"); 2372 rss->state = IAVF_ADV_RSS_ACTIVE; 2373 } 2374 } 2375 spin_unlock_bh(&adapter->adv_rss_lock); 2376 } 2377 break; 2378 case VIRTCHNL_OP_DEL_RSS_CFG: { 2379 struct iavf_adv_rss *rss, *rss_tmp; 2380 2381 spin_lock_bh(&adapter->adv_rss_lock); 2382 list_for_each_entry_safe(rss, rss_tmp, 2383 &adapter->adv_rss_list_head, list) { 2384 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2385 list_del(&rss->list); 2386 kfree(rss); 2387 } 2388 } 2389 spin_unlock_bh(&adapter->adv_rss_lock); 2390 } 2391 break; 2392 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2393 /* PF enabled vlan strip on this VF. 2394 * Update netdev->features if needed to be in sync with ethtool. 2395 */ 2396 if (!v_retval) 2397 iavf_netdev_features_vlan_strip_set(netdev, true); 2398 break; 2399 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2400 /* PF disabled vlan strip on this VF. 2401 * Update netdev->features if needed to be in sync with ethtool. 2402 */ 2403 if (!v_retval) 2404 iavf_netdev_features_vlan_strip_set(netdev, false); 2405 break; 2406 default: 2407 if (adapter->current_op && (v_opcode != adapter->current_op)) 2408 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 2409 adapter->current_op, v_opcode); 2410 break; 2411 } /* switch v_opcode */ 2412 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2413 } 2414