1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 8 /* busy wait delay in msec */ 9 #define IAVF_BUSY_WAIT_DELAY 10 10 #define IAVF_BUSY_WAIT_COUNT 50 11 12 /** 13 * iavf_send_pf_msg 14 * @adapter: adapter structure 15 * @op: virtual channel opcode 16 * @msg: pointer to message buffer 17 * @len: message length 18 * 19 * Send message to PF and print status if failure. 20 **/ 21 static int iavf_send_pf_msg(struct iavf_adapter *adapter, 22 enum virtchnl_ops op, u8 *msg, u16 len) 23 { 24 struct iavf_hw *hw = &adapter->hw; 25 enum iavf_status status; 26 27 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 28 return 0; /* nothing to see here, move along */ 29 30 status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 31 if (status) 32 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n", 33 op, iavf_stat_str(hw, status), 34 iavf_aq_str(hw, hw->aq.asq_last_status)); 35 return iavf_status_to_errno(status); 36 } 37 38 /** 39 * iavf_send_api_ver 40 * @adapter: adapter structure 41 * 42 * Send API version admin queue message to the PF. The reply is not checked 43 * in this function. Returns 0 if the message was successfully 44 * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 45 **/ 46 int iavf_send_api_ver(struct iavf_adapter *adapter) 47 { 48 struct virtchnl_version_info vvi; 49 50 vvi.major = VIRTCHNL_VERSION_MAJOR; 51 vvi.minor = VIRTCHNL_VERSION_MINOR; 52 53 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, 54 sizeof(vvi)); 55 } 56 57 /** 58 * iavf_poll_virtchnl_msg 59 * @hw: HW configuration structure 60 * @event: event to populate on success 61 * @op_to_poll: requested virtchnl op to poll for 62 * 63 * Initialize poll for virtchnl msg matching the requested_op. Returns 0 64 * if a message of the correct opcode is in the queue or an error code 65 * if no message matching the op code is waiting and other failures. 66 */ 67 static int 68 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, 69 enum virtchnl_ops op_to_poll) 70 { 71 enum virtchnl_ops received_op; 72 enum iavf_status status; 73 u32 v_retval; 74 75 while (1) { 76 /* When the AQ is empty, iavf_clean_arq_element will return 77 * nonzero and this loop will terminate. 78 */ 79 status = iavf_clean_arq_element(hw, event, NULL); 80 if (status != IAVF_SUCCESS) 81 return iavf_status_to_errno(status); 82 received_op = 83 (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high); 84 if (op_to_poll == received_op) 85 break; 86 } 87 88 v_retval = le32_to_cpu(event->desc.cookie_low); 89 return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval); 90 } 91 92 /** 93 * iavf_verify_api_ver 94 * @adapter: adapter structure 95 * 96 * Compare API versions with the PF. Must be called after admin queue is 97 * initialized. Returns 0 if API versions match, -EIO if they do not, 98 * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors 99 * from the firmware are propagated. 100 **/ 101 int iavf_verify_api_ver(struct iavf_adapter *adapter) 102 { 103 struct iavf_arq_event_info event; 104 int err; 105 106 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 107 event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL); 108 if (!event.msg_buf) 109 return -ENOMEM; 110 111 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION); 112 if (!err) { 113 struct virtchnl_version_info *pf_vvi = 114 (struct virtchnl_version_info *)event.msg_buf; 115 adapter->pf_version = *pf_vvi; 116 117 if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR || 118 (pf_vvi->major == VIRTCHNL_VERSION_MAJOR && 119 pf_vvi->minor > VIRTCHNL_VERSION_MINOR)) 120 err = -EIO; 121 } 122 123 kfree(event.msg_buf); 124 125 return err; 126 } 127 128 /** 129 * iavf_send_vf_config_msg 130 * @adapter: adapter structure 131 * 132 * Send VF configuration request admin queue message to the PF. The reply 133 * is not checked in this function. Returns 0 if the message was 134 * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 135 **/ 136 int iavf_send_vf_config_msg(struct iavf_adapter *adapter) 137 { 138 u32 caps; 139 140 caps = VIRTCHNL_VF_OFFLOAD_L2 | 141 VIRTCHNL_VF_OFFLOAD_RSS_PF | 142 VIRTCHNL_VF_OFFLOAD_RSS_AQ | 143 VIRTCHNL_VF_OFFLOAD_RSS_REG | 144 VIRTCHNL_VF_OFFLOAD_VLAN | 145 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | 146 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | 147 VIRTCHNL_VF_OFFLOAD_ENCAP | 148 VIRTCHNL_VF_OFFLOAD_VLAN_V2 | 149 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | 150 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | 151 VIRTCHNL_VF_OFFLOAD_ADQ | 152 VIRTCHNL_VF_OFFLOAD_USO | 153 VIRTCHNL_VF_OFFLOAD_FDIR_PF | 154 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | 155 VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 156 157 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; 158 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; 159 if (PF_IS_V11(adapter)) 160 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 161 (u8 *)&caps, sizeof(caps)); 162 else 163 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 164 NULL, 0); 165 } 166 167 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) 168 { 169 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; 170 171 if (!VLAN_V2_ALLOWED(adapter)) 172 return -EOPNOTSUPP; 173 174 adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; 175 176 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 177 NULL, 0); 178 } 179 180 /** 181 * iavf_validate_num_queues 182 * @adapter: adapter structure 183 * 184 * Validate that the number of queues the PF has sent in 185 * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. 186 **/ 187 static void iavf_validate_num_queues(struct iavf_adapter *adapter) 188 { 189 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { 190 struct virtchnl_vsi_resource *vsi_res; 191 int i; 192 193 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", 194 adapter->vf_res->num_queue_pairs, 195 IAVF_MAX_REQ_QUEUES); 196 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", 197 IAVF_MAX_REQ_QUEUES); 198 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 199 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 200 vsi_res = &adapter->vf_res->vsi_res[i]; 201 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 202 } 203 } 204 } 205 206 /** 207 * iavf_get_vf_config 208 * @adapter: private adapter structure 209 * 210 * Get VF configuration from PF and populate hw structure. Must be called after 211 * admin queue is initialized. Busy waits until response is received from PF, 212 * with maximum timeout. Response from PF is returned in the buffer for further 213 * processing by the caller. 214 **/ 215 int iavf_get_vf_config(struct iavf_adapter *adapter) 216 { 217 struct iavf_hw *hw = &adapter->hw; 218 struct iavf_arq_event_info event; 219 u16 len; 220 int err; 221 222 len = sizeof(struct virtchnl_vf_resource) + 223 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); 224 event.buf_len = len; 225 event.msg_buf = kzalloc(len, GFP_KERNEL); 226 if (!event.msg_buf) 227 return -ENOMEM; 228 229 err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES); 230 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); 231 232 /* some PFs send more queues than we should have so validate that 233 * we aren't getting too many queues 234 */ 235 if (!err) 236 iavf_validate_num_queues(adapter); 237 iavf_vf_parse_hw_config(hw, adapter->vf_res); 238 239 kfree(event.msg_buf); 240 241 return err; 242 } 243 244 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) 245 { 246 struct iavf_arq_event_info event; 247 int err; 248 u16 len; 249 250 len = sizeof(struct virtchnl_vlan_caps); 251 event.buf_len = len; 252 event.msg_buf = kzalloc(len, GFP_KERNEL); 253 if (!event.msg_buf) 254 return -ENOMEM; 255 256 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, 257 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS); 258 if (!err) 259 memcpy(&adapter->vlan_v2_caps, event.msg_buf, 260 min(event.msg_len, len)); 261 262 kfree(event.msg_buf); 263 264 return err; 265 } 266 267 /** 268 * iavf_configure_queues 269 * @adapter: adapter structure 270 * 271 * Request that the PF set up our (previously allocated) queues. 272 **/ 273 void iavf_configure_queues(struct iavf_adapter *adapter) 274 { 275 struct virtchnl_vsi_queue_config_info *vqci; 276 struct virtchnl_queue_pair_info *vqpi; 277 int pairs = adapter->num_active_queues; 278 int i, max_frame = IAVF_MAX_RXBUFFER; 279 size_t len; 280 281 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 282 /* bail because we already have a command pending */ 283 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", 284 adapter->current_op); 285 return; 286 } 287 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; 288 len = struct_size(vqci, qpair, pairs); 289 vqci = kzalloc(len, GFP_KERNEL); 290 if (!vqci) 291 return; 292 293 /* Limit maximum frame size when jumbo frames is not enabled */ 294 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && 295 (adapter->netdev->mtu <= ETH_DATA_LEN)) 296 max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 297 298 vqci->vsi_id = adapter->vsi_res->vsi_id; 299 vqci->num_queue_pairs = pairs; 300 vqpi = vqci->qpair; 301 /* Size check is not needed here - HW max is 16 queue pairs, and we 302 * can fit info for 31 of them into the AQ buffer before it overflows. 303 */ 304 for (i = 0; i < pairs; i++) { 305 vqpi->txq.vsi_id = vqci->vsi_id; 306 vqpi->txq.queue_id = i; 307 vqpi->txq.ring_len = adapter->tx_rings[i].count; 308 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; 309 vqpi->rxq.vsi_id = vqci->vsi_id; 310 vqpi->rxq.queue_id = i; 311 vqpi->rxq.ring_len = adapter->rx_rings[i].count; 312 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; 313 vqpi->rxq.max_pkt_size = max_frame; 314 vqpi->rxq.databuffer_size = 315 ALIGN(adapter->rx_rings[i].rx_buf_len, 316 BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); 317 vqpi++; 318 } 319 320 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; 321 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 322 (u8 *)vqci, len); 323 kfree(vqci); 324 } 325 326 /** 327 * iavf_enable_queues 328 * @adapter: adapter structure 329 * 330 * Request that the PF enable all of our queues. 331 **/ 332 void iavf_enable_queues(struct iavf_adapter *adapter) 333 { 334 struct virtchnl_queue_select vqs; 335 336 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 337 /* bail because we already have a command pending */ 338 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", 339 adapter->current_op); 340 return; 341 } 342 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; 343 vqs.vsi_id = adapter->vsi_res->vsi_id; 344 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 345 vqs.rx_queues = vqs.tx_queues; 346 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; 347 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, 348 (u8 *)&vqs, sizeof(vqs)); 349 } 350 351 /** 352 * iavf_disable_queues 353 * @adapter: adapter structure 354 * 355 * Request that the PF disable all of our queues. 356 **/ 357 void iavf_disable_queues(struct iavf_adapter *adapter) 358 { 359 struct virtchnl_queue_select vqs; 360 361 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 362 /* bail because we already have a command pending */ 363 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", 364 adapter->current_op); 365 return; 366 } 367 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; 368 vqs.vsi_id = adapter->vsi_res->vsi_id; 369 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 370 vqs.rx_queues = vqs.tx_queues; 371 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; 372 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, 373 (u8 *)&vqs, sizeof(vqs)); 374 } 375 376 /** 377 * iavf_map_queues 378 * @adapter: adapter structure 379 * 380 * Request that the PF map queues to interrupt vectors. Misc causes, including 381 * admin queue, are always mapped to vector 0. 382 **/ 383 void iavf_map_queues(struct iavf_adapter *adapter) 384 { 385 struct virtchnl_irq_map_info *vimi; 386 struct virtchnl_vector_map *vecmap; 387 struct iavf_q_vector *q_vector; 388 int v_idx, q_vectors; 389 size_t len; 390 391 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 392 /* bail because we already have a command pending */ 393 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", 394 adapter->current_op); 395 return; 396 } 397 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; 398 399 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 400 401 len = struct_size(vimi, vecmap, adapter->num_msix_vectors); 402 vimi = kzalloc(len, GFP_KERNEL); 403 if (!vimi) 404 return; 405 406 vimi->num_vectors = adapter->num_msix_vectors; 407 /* Queue vectors first */ 408 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 409 q_vector = &adapter->q_vectors[v_idx]; 410 vecmap = &vimi->vecmap[v_idx]; 411 412 vecmap->vsi_id = adapter->vsi_res->vsi_id; 413 vecmap->vector_id = v_idx + NONQ_VECS; 414 vecmap->txq_map = q_vector->ring_mask; 415 vecmap->rxq_map = q_vector->ring_mask; 416 vecmap->rxitr_idx = IAVF_RX_ITR; 417 vecmap->txitr_idx = IAVF_TX_ITR; 418 } 419 /* Misc vector last - this is only for AdminQ messages */ 420 vecmap = &vimi->vecmap[v_idx]; 421 vecmap->vsi_id = adapter->vsi_res->vsi_id; 422 vecmap->vector_id = 0; 423 vecmap->txq_map = 0; 424 vecmap->rxq_map = 0; 425 426 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; 427 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, 428 (u8 *)vimi, len); 429 kfree(vimi); 430 } 431 432 /** 433 * iavf_set_mac_addr_type - Set the correct request type from the filter type 434 * @virtchnl_ether_addr: pointer to requested list element 435 * @filter: pointer to requested filter 436 **/ 437 static void 438 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, 439 const struct iavf_mac_filter *filter) 440 { 441 virtchnl_ether_addr->type = filter->is_primary ? 442 VIRTCHNL_ETHER_ADDR_PRIMARY : 443 VIRTCHNL_ETHER_ADDR_EXTRA; 444 } 445 446 /** 447 * iavf_add_ether_addrs 448 * @adapter: adapter structure 449 * 450 * Request that the PF add one or more addresses to our filters. 451 **/ 452 void iavf_add_ether_addrs(struct iavf_adapter *adapter) 453 { 454 struct virtchnl_ether_addr_list *veal; 455 struct iavf_mac_filter *f; 456 int i = 0, count = 0; 457 bool more = false; 458 size_t len; 459 460 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 461 /* bail because we already have a command pending */ 462 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", 463 adapter->current_op); 464 return; 465 } 466 467 spin_lock_bh(&adapter->mac_vlan_list_lock); 468 469 list_for_each_entry(f, &adapter->mac_filter_list, list) { 470 if (f->add) 471 count++; 472 } 473 if (!count) { 474 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 475 spin_unlock_bh(&adapter->mac_vlan_list_lock); 476 return; 477 } 478 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; 479 480 len = struct_size(veal, list, count); 481 if (len > IAVF_MAX_AQ_BUF_SIZE) { 482 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); 483 count = (IAVF_MAX_AQ_BUF_SIZE - 484 sizeof(struct virtchnl_ether_addr_list)) / 485 sizeof(struct virtchnl_ether_addr); 486 len = struct_size(veal, list, count); 487 more = true; 488 } 489 490 veal = kzalloc(len, GFP_ATOMIC); 491 if (!veal) { 492 spin_unlock_bh(&adapter->mac_vlan_list_lock); 493 return; 494 } 495 496 veal->vsi_id = adapter->vsi_res->vsi_id; 497 veal->num_elements = count; 498 list_for_each_entry(f, &adapter->mac_filter_list, list) { 499 if (f->add) { 500 ether_addr_copy(veal->list[i].addr, f->macaddr); 501 iavf_set_mac_addr_type(&veal->list[i], f); 502 i++; 503 f->add = false; 504 if (i == count) 505 break; 506 } 507 } 508 if (!more) 509 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 510 511 spin_unlock_bh(&adapter->mac_vlan_list_lock); 512 513 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); 514 kfree(veal); 515 } 516 517 /** 518 * iavf_del_ether_addrs 519 * @adapter: adapter structure 520 * 521 * Request that the PF remove one or more addresses from our filters. 522 **/ 523 void iavf_del_ether_addrs(struct iavf_adapter *adapter) 524 { 525 struct virtchnl_ether_addr_list *veal; 526 struct iavf_mac_filter *f, *ftmp; 527 int i = 0, count = 0; 528 bool more = false; 529 size_t len; 530 531 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 532 /* bail because we already have a command pending */ 533 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", 534 adapter->current_op); 535 return; 536 } 537 538 spin_lock_bh(&adapter->mac_vlan_list_lock); 539 540 list_for_each_entry(f, &adapter->mac_filter_list, list) { 541 if (f->remove) 542 count++; 543 } 544 if (!count) { 545 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 546 spin_unlock_bh(&adapter->mac_vlan_list_lock); 547 return; 548 } 549 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; 550 551 len = struct_size(veal, list, count); 552 if (len > IAVF_MAX_AQ_BUF_SIZE) { 553 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); 554 count = (IAVF_MAX_AQ_BUF_SIZE - 555 sizeof(struct virtchnl_ether_addr_list)) / 556 sizeof(struct virtchnl_ether_addr); 557 len = struct_size(veal, list, count); 558 more = true; 559 } 560 veal = kzalloc(len, GFP_ATOMIC); 561 if (!veal) { 562 spin_unlock_bh(&adapter->mac_vlan_list_lock); 563 return; 564 } 565 566 veal->vsi_id = adapter->vsi_res->vsi_id; 567 veal->num_elements = count; 568 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 569 if (f->remove) { 570 ether_addr_copy(veal->list[i].addr, f->macaddr); 571 iavf_set_mac_addr_type(&veal->list[i], f); 572 i++; 573 list_del(&f->list); 574 kfree(f); 575 if (i == count) 576 break; 577 } 578 } 579 if (!more) 580 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 581 582 spin_unlock_bh(&adapter->mac_vlan_list_lock); 583 584 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); 585 kfree(veal); 586 } 587 588 /** 589 * iavf_mac_add_ok 590 * @adapter: adapter structure 591 * 592 * Submit list of filters based on PF response. 593 **/ 594 static void iavf_mac_add_ok(struct iavf_adapter *adapter) 595 { 596 struct iavf_mac_filter *f, *ftmp; 597 598 spin_lock_bh(&adapter->mac_vlan_list_lock); 599 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 600 f->is_new_mac = false; 601 } 602 spin_unlock_bh(&adapter->mac_vlan_list_lock); 603 } 604 605 /** 606 * iavf_mac_add_reject 607 * @adapter: adapter structure 608 * 609 * Remove filters from list based on PF response. 610 **/ 611 static void iavf_mac_add_reject(struct iavf_adapter *adapter) 612 { 613 struct net_device *netdev = adapter->netdev; 614 struct iavf_mac_filter *f, *ftmp; 615 616 spin_lock_bh(&adapter->mac_vlan_list_lock); 617 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 618 if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) 619 f->remove = false; 620 621 if (f->is_new_mac) { 622 list_del(&f->list); 623 kfree(f); 624 } 625 } 626 spin_unlock_bh(&adapter->mac_vlan_list_lock); 627 } 628 629 /** 630 * iavf_add_vlans 631 * @adapter: adapter structure 632 * 633 * Request that the PF add one or more VLAN filters to our VSI. 634 **/ 635 void iavf_add_vlans(struct iavf_adapter *adapter) 636 { 637 int len, i = 0, count = 0; 638 struct iavf_vlan_filter *f; 639 bool more = false; 640 641 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 642 /* bail because we already have a command pending */ 643 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", 644 adapter->current_op); 645 return; 646 } 647 648 spin_lock_bh(&adapter->mac_vlan_list_lock); 649 650 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 651 if (f->add) 652 count++; 653 } 654 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 655 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 656 spin_unlock_bh(&adapter->mac_vlan_list_lock); 657 return; 658 } 659 660 if (VLAN_ALLOWED(adapter)) { 661 struct virtchnl_vlan_filter_list *vvfl; 662 663 adapter->current_op = VIRTCHNL_OP_ADD_VLAN; 664 665 len = sizeof(*vvfl) + (count * sizeof(u16)); 666 if (len > IAVF_MAX_AQ_BUF_SIZE) { 667 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 668 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / 669 sizeof(u16); 670 len = sizeof(*vvfl) + (count * sizeof(u16)); 671 more = true; 672 } 673 vvfl = kzalloc(len, GFP_ATOMIC); 674 if (!vvfl) { 675 spin_unlock_bh(&adapter->mac_vlan_list_lock); 676 return; 677 } 678 679 vvfl->vsi_id = adapter->vsi_res->vsi_id; 680 vvfl->num_elements = count; 681 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 682 if (f->add) { 683 vvfl->vlan_id[i] = f->vlan.vid; 684 i++; 685 f->add = false; 686 if (i == count) 687 break; 688 } 689 } 690 if (!more) 691 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 692 693 spin_unlock_bh(&adapter->mac_vlan_list_lock); 694 695 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 696 kfree(vvfl); 697 } else { 698 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 699 700 adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; 701 702 len = sizeof(*vvfl_v2) + ((count - 1) * 703 sizeof(struct virtchnl_vlan_filter)); 704 if (len > IAVF_MAX_AQ_BUF_SIZE) { 705 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 706 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) / 707 sizeof(struct virtchnl_vlan_filter); 708 len = sizeof(*vvfl_v2) + 709 ((count - 1) * 710 sizeof(struct virtchnl_vlan_filter)); 711 more = true; 712 } 713 714 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 715 if (!vvfl_v2) { 716 spin_unlock_bh(&adapter->mac_vlan_list_lock); 717 return; 718 } 719 720 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 721 vvfl_v2->num_elements = count; 722 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 723 if (f->add) { 724 struct virtchnl_vlan_supported_caps *filtering_support = 725 &adapter->vlan_v2_caps.filtering.filtering_support; 726 struct virtchnl_vlan *vlan; 727 728 /* give priority over outer if it's enabled */ 729 if (filtering_support->outer) 730 vlan = &vvfl_v2->filters[i].outer; 731 else 732 vlan = &vvfl_v2->filters[i].inner; 733 734 vlan->tci = f->vlan.vid; 735 vlan->tpid = f->vlan.tpid; 736 737 i++; 738 f->add = false; 739 if (i == count) 740 break; 741 } 742 } 743 744 if (!more) 745 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 746 747 spin_unlock_bh(&adapter->mac_vlan_list_lock); 748 749 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, 750 (u8 *)vvfl_v2, len); 751 kfree(vvfl_v2); 752 } 753 } 754 755 /** 756 * iavf_del_vlans 757 * @adapter: adapter structure 758 * 759 * Request that the PF remove one or more VLAN filters from our VSI. 760 **/ 761 void iavf_del_vlans(struct iavf_adapter *adapter) 762 { 763 struct iavf_vlan_filter *f, *ftmp; 764 int len, i = 0, count = 0; 765 bool more = false; 766 767 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 768 /* bail because we already have a command pending */ 769 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", 770 adapter->current_op); 771 return; 772 } 773 774 spin_lock_bh(&adapter->mac_vlan_list_lock); 775 776 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 777 /* since VLAN capabilities are not allowed, we dont want to send 778 * a VLAN delete request because it will most likely fail and 779 * create unnecessary errors/noise, so just free the VLAN 780 * filters marked for removal to enable bailing out before 781 * sending a virtchnl message 782 */ 783 if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) { 784 list_del(&f->list); 785 kfree(f); 786 } else if (f->remove) { 787 count++; 788 } 789 } 790 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 791 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 792 spin_unlock_bh(&adapter->mac_vlan_list_lock); 793 return; 794 } 795 796 if (VLAN_ALLOWED(adapter)) { 797 struct virtchnl_vlan_filter_list *vvfl; 798 799 adapter->current_op = VIRTCHNL_OP_DEL_VLAN; 800 801 len = sizeof(*vvfl) + (count * sizeof(u16)); 802 if (len > IAVF_MAX_AQ_BUF_SIZE) { 803 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); 804 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / 805 sizeof(u16); 806 len = sizeof(*vvfl) + (count * sizeof(u16)); 807 more = true; 808 } 809 vvfl = kzalloc(len, GFP_ATOMIC); 810 if (!vvfl) { 811 spin_unlock_bh(&adapter->mac_vlan_list_lock); 812 return; 813 } 814 815 vvfl->vsi_id = adapter->vsi_res->vsi_id; 816 vvfl->num_elements = count; 817 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 818 if (f->remove) { 819 vvfl->vlan_id[i] = f->vlan.vid; 820 i++; 821 list_del(&f->list); 822 kfree(f); 823 if (i == count) 824 break; 825 } 826 } 827 828 if (!more) 829 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 830 831 spin_unlock_bh(&adapter->mac_vlan_list_lock); 832 833 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 834 kfree(vvfl); 835 } else { 836 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 837 838 adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; 839 840 len = sizeof(*vvfl_v2) + 841 ((count - 1) * sizeof(struct virtchnl_vlan_filter)); 842 if (len > IAVF_MAX_AQ_BUF_SIZE) { 843 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 844 count = (IAVF_MAX_AQ_BUF_SIZE - 845 sizeof(*vvfl_v2)) / 846 sizeof(struct virtchnl_vlan_filter); 847 len = sizeof(*vvfl_v2) + 848 ((count - 1) * 849 sizeof(struct virtchnl_vlan_filter)); 850 more = true; 851 } 852 853 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 854 if (!vvfl_v2) { 855 spin_unlock_bh(&adapter->mac_vlan_list_lock); 856 return; 857 } 858 859 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 860 vvfl_v2->num_elements = count; 861 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 862 if (f->remove) { 863 struct virtchnl_vlan_supported_caps *filtering_support = 864 &adapter->vlan_v2_caps.filtering.filtering_support; 865 struct virtchnl_vlan *vlan; 866 867 /* give priority over outer if it's enabled */ 868 if (filtering_support->outer) 869 vlan = &vvfl_v2->filters[i].outer; 870 else 871 vlan = &vvfl_v2->filters[i].inner; 872 873 vlan->tci = f->vlan.vid; 874 vlan->tpid = f->vlan.tpid; 875 876 list_del(&f->list); 877 kfree(f); 878 i++; 879 if (i == count) 880 break; 881 } 882 } 883 884 if (!more) 885 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 886 887 spin_unlock_bh(&adapter->mac_vlan_list_lock); 888 889 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, 890 (u8 *)vvfl_v2, len); 891 kfree(vvfl_v2); 892 } 893 } 894 895 /** 896 * iavf_set_promiscuous 897 * @adapter: adapter structure 898 * @flags: bitmask to control unicast/multicast promiscuous. 899 * 900 * Request that the PF enable promiscuous mode for our VSI. 901 **/ 902 void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) 903 { 904 struct virtchnl_promisc_info vpi; 905 int promisc_all; 906 907 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 908 /* bail because we already have a command pending */ 909 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", 910 adapter->current_op); 911 return; 912 } 913 914 promisc_all = FLAG_VF_UNICAST_PROMISC | 915 FLAG_VF_MULTICAST_PROMISC; 916 if ((flags & promisc_all) == promisc_all) { 917 adapter->flags |= IAVF_FLAG_PROMISC_ON; 918 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; 919 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 920 } 921 922 if (flags & FLAG_VF_MULTICAST_PROMISC) { 923 adapter->flags |= IAVF_FLAG_ALLMULTI_ON; 924 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; 925 dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", 926 adapter->netdev->name); 927 } 928 929 if (!flags) { 930 if (adapter->flags & IAVF_FLAG_PROMISC_ON) { 931 adapter->flags &= ~IAVF_FLAG_PROMISC_ON; 932 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; 933 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); 934 } 935 936 if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { 937 adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; 938 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; 939 dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", 940 adapter->netdev->name); 941 } 942 } 943 944 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; 945 vpi.vsi_id = adapter->vsi_res->vsi_id; 946 vpi.flags = flags; 947 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 948 (u8 *)&vpi, sizeof(vpi)); 949 } 950 951 /** 952 * iavf_request_stats 953 * @adapter: adapter structure 954 * 955 * Request VSI statistics from PF. 956 **/ 957 void iavf_request_stats(struct iavf_adapter *adapter) 958 { 959 struct virtchnl_queue_select vqs; 960 961 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 962 /* no error message, this isn't crucial */ 963 return; 964 } 965 966 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; 967 adapter->current_op = VIRTCHNL_OP_GET_STATS; 968 vqs.vsi_id = adapter->vsi_res->vsi_id; 969 /* queue maps are ignored for this message - only the vsi is used */ 970 if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, 971 sizeof(vqs))) 972 /* if the request failed, don't lock out others */ 973 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 974 } 975 976 /** 977 * iavf_get_hena 978 * @adapter: adapter structure 979 * 980 * Request hash enable capabilities from PF 981 **/ 982 void iavf_get_hena(struct iavf_adapter *adapter) 983 { 984 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 985 /* bail because we already have a command pending */ 986 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", 987 adapter->current_op); 988 return; 989 } 990 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; 991 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; 992 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); 993 } 994 995 /** 996 * iavf_set_hena 997 * @adapter: adapter structure 998 * 999 * Request the PF to set our RSS hash capabilities 1000 **/ 1001 void iavf_set_hena(struct iavf_adapter *adapter) 1002 { 1003 struct virtchnl_rss_hena vrh; 1004 1005 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1006 /* bail because we already have a command pending */ 1007 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", 1008 adapter->current_op); 1009 return; 1010 } 1011 vrh.hena = adapter->hena; 1012 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; 1013 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; 1014 iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, 1015 sizeof(vrh)); 1016 } 1017 1018 /** 1019 * iavf_set_rss_key 1020 * @adapter: adapter structure 1021 * 1022 * Request the PF to set our RSS hash key 1023 **/ 1024 void iavf_set_rss_key(struct iavf_adapter *adapter) 1025 { 1026 struct virtchnl_rss_key *vrk; 1027 int len; 1028 1029 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1030 /* bail because we already have a command pending */ 1031 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", 1032 adapter->current_op); 1033 return; 1034 } 1035 len = sizeof(struct virtchnl_rss_key) + 1036 (adapter->rss_key_size * sizeof(u8)) - 1; 1037 vrk = kzalloc(len, GFP_KERNEL); 1038 if (!vrk) 1039 return; 1040 vrk->vsi_id = adapter->vsi.id; 1041 vrk->key_len = adapter->rss_key_size; 1042 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); 1043 1044 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; 1045 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; 1046 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); 1047 kfree(vrk); 1048 } 1049 1050 /** 1051 * iavf_set_rss_lut 1052 * @adapter: adapter structure 1053 * 1054 * Request the PF to set our RSS lookup table 1055 **/ 1056 void iavf_set_rss_lut(struct iavf_adapter *adapter) 1057 { 1058 struct virtchnl_rss_lut *vrl; 1059 int len; 1060 1061 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1062 /* bail because we already have a command pending */ 1063 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", 1064 adapter->current_op); 1065 return; 1066 } 1067 len = sizeof(struct virtchnl_rss_lut) + 1068 (adapter->rss_lut_size * sizeof(u8)) - 1; 1069 vrl = kzalloc(len, GFP_KERNEL); 1070 if (!vrl) 1071 return; 1072 vrl->vsi_id = adapter->vsi.id; 1073 vrl->lut_entries = adapter->rss_lut_size; 1074 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); 1075 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; 1076 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; 1077 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); 1078 kfree(vrl); 1079 } 1080 1081 /** 1082 * iavf_enable_vlan_stripping 1083 * @adapter: adapter structure 1084 * 1085 * Request VLAN header stripping to be enabled 1086 **/ 1087 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) 1088 { 1089 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1090 /* bail because we already have a command pending */ 1091 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", 1092 adapter->current_op); 1093 return; 1094 } 1095 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; 1096 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 1097 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); 1098 } 1099 1100 /** 1101 * iavf_disable_vlan_stripping 1102 * @adapter: adapter structure 1103 * 1104 * Request VLAN header stripping to be disabled 1105 **/ 1106 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) 1107 { 1108 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1109 /* bail because we already have a command pending */ 1110 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", 1111 adapter->current_op); 1112 return; 1113 } 1114 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; 1115 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 1116 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); 1117 } 1118 1119 /** 1120 * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype 1121 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1122 */ 1123 static u32 iavf_tpid_to_vc_ethertype(u16 tpid) 1124 { 1125 switch (tpid) { 1126 case ETH_P_8021Q: 1127 return VIRTCHNL_VLAN_ETHERTYPE_8100; 1128 case ETH_P_8021AD: 1129 return VIRTCHNL_VLAN_ETHERTYPE_88A8; 1130 } 1131 1132 return 0; 1133 } 1134 1135 /** 1136 * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message 1137 * @adapter: adapter structure 1138 * @msg: message structure used for updating offloads over virtchnl to update 1139 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1140 * @offload_op: opcode used to determine which support structure to check 1141 */ 1142 static int 1143 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, 1144 struct virtchnl_vlan_setting *msg, u16 tpid, 1145 enum virtchnl_ops offload_op) 1146 { 1147 struct virtchnl_vlan_supported_caps *offload_support; 1148 u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); 1149 1150 /* reference the correct offload support structure */ 1151 switch (offload_op) { 1152 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1153 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1154 offload_support = 1155 &adapter->vlan_v2_caps.offloads.stripping_support; 1156 break; 1157 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1158 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1159 offload_support = 1160 &adapter->vlan_v2_caps.offloads.insertion_support; 1161 break; 1162 default: 1163 dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", 1164 offload_op); 1165 return -EINVAL; 1166 } 1167 1168 /* make sure ethertype is supported */ 1169 if (offload_support->outer & vc_ethertype && 1170 offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { 1171 msg->outer_ethertype_setting = vc_ethertype; 1172 } else if (offload_support->inner & vc_ethertype && 1173 offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { 1174 msg->inner_ethertype_setting = vc_ethertype; 1175 } else { 1176 dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", 1177 offload_op, tpid); 1178 return -EINVAL; 1179 } 1180 1181 return 0; 1182 } 1183 1184 /** 1185 * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request 1186 * @adapter: adapter structure 1187 * @tpid: VLAN TPID 1188 * @offload_op: opcode used to determine which AQ required bit to clear 1189 */ 1190 static void 1191 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, 1192 enum virtchnl_ops offload_op) 1193 { 1194 switch (offload_op) { 1195 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1196 if (tpid == ETH_P_8021Q) 1197 adapter->aq_required &= 1198 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; 1199 else if (tpid == ETH_P_8021AD) 1200 adapter->aq_required &= 1201 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; 1202 break; 1203 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1204 if (tpid == ETH_P_8021Q) 1205 adapter->aq_required &= 1206 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; 1207 else if (tpid == ETH_P_8021AD) 1208 adapter->aq_required &= 1209 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; 1210 break; 1211 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1212 if (tpid == ETH_P_8021Q) 1213 adapter->aq_required &= 1214 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; 1215 else if (tpid == ETH_P_8021AD) 1216 adapter->aq_required &= 1217 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; 1218 break; 1219 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1220 if (tpid == ETH_P_8021Q) 1221 adapter->aq_required &= 1222 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; 1223 else if (tpid == ETH_P_8021AD) 1224 adapter->aq_required &= 1225 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; 1226 break; 1227 default: 1228 dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", 1229 offload_op); 1230 } 1231 } 1232 1233 /** 1234 * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl 1235 * @adapter: adapter structure 1236 * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) 1237 * @offload_op: offload_op used to make the request over virtchnl 1238 */ 1239 static void 1240 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, 1241 enum virtchnl_ops offload_op) 1242 { 1243 struct virtchnl_vlan_setting *msg; 1244 int len = sizeof(*msg); 1245 1246 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1247 /* bail because we already have a command pending */ 1248 dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", 1249 offload_op, adapter->current_op); 1250 return; 1251 } 1252 1253 adapter->current_op = offload_op; 1254 1255 msg = kzalloc(len, GFP_KERNEL); 1256 if (!msg) 1257 return; 1258 1259 msg->vport_id = adapter->vsi_res->vsi_id; 1260 1261 /* always clear to prevent unsupported and endless requests */ 1262 iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); 1263 1264 /* only send valid offload requests */ 1265 if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) 1266 iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); 1267 else 1268 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1269 1270 kfree(msg); 1271 } 1272 1273 /** 1274 * iavf_enable_vlan_stripping_v2 - enable VLAN stripping 1275 * @adapter: adapter structure 1276 * @tpid: VLAN TPID used to enable VLAN stripping 1277 */ 1278 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1279 { 1280 iavf_send_vlan_offload_v2(adapter, tpid, 1281 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); 1282 } 1283 1284 /** 1285 * iavf_disable_vlan_stripping_v2 - disable VLAN stripping 1286 * @adapter: adapter structure 1287 * @tpid: VLAN TPID used to disable VLAN stripping 1288 */ 1289 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1290 { 1291 iavf_send_vlan_offload_v2(adapter, tpid, 1292 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); 1293 } 1294 1295 /** 1296 * iavf_enable_vlan_insertion_v2 - enable VLAN insertion 1297 * @adapter: adapter structure 1298 * @tpid: VLAN TPID used to enable VLAN insertion 1299 */ 1300 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1301 { 1302 iavf_send_vlan_offload_v2(adapter, tpid, 1303 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); 1304 } 1305 1306 /** 1307 * iavf_disable_vlan_insertion_v2 - disable VLAN insertion 1308 * @adapter: adapter structure 1309 * @tpid: VLAN TPID used to disable VLAN insertion 1310 */ 1311 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1312 { 1313 iavf_send_vlan_offload_v2(adapter, tpid, 1314 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); 1315 } 1316 1317 #define IAVF_MAX_SPEED_STRLEN 13 1318 1319 /** 1320 * iavf_print_link_message - print link up or down 1321 * @adapter: adapter structure 1322 * 1323 * Log a message telling the world of our wonderous link status 1324 */ 1325 static void iavf_print_link_message(struct iavf_adapter *adapter) 1326 { 1327 struct net_device *netdev = adapter->netdev; 1328 int link_speed_mbps; 1329 char *speed; 1330 1331 if (!adapter->link_up) { 1332 netdev_info(netdev, "NIC Link is Down\n"); 1333 return; 1334 } 1335 1336 speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); 1337 if (!speed) 1338 return; 1339 1340 if (ADV_LINK_SUPPORT(adapter)) { 1341 link_speed_mbps = adapter->link_speed_mbps; 1342 goto print_link_msg; 1343 } 1344 1345 switch (adapter->link_speed) { 1346 case VIRTCHNL_LINK_SPEED_40GB: 1347 link_speed_mbps = SPEED_40000; 1348 break; 1349 case VIRTCHNL_LINK_SPEED_25GB: 1350 link_speed_mbps = SPEED_25000; 1351 break; 1352 case VIRTCHNL_LINK_SPEED_20GB: 1353 link_speed_mbps = SPEED_20000; 1354 break; 1355 case VIRTCHNL_LINK_SPEED_10GB: 1356 link_speed_mbps = SPEED_10000; 1357 break; 1358 case VIRTCHNL_LINK_SPEED_5GB: 1359 link_speed_mbps = SPEED_5000; 1360 break; 1361 case VIRTCHNL_LINK_SPEED_2_5GB: 1362 link_speed_mbps = SPEED_2500; 1363 break; 1364 case VIRTCHNL_LINK_SPEED_1GB: 1365 link_speed_mbps = SPEED_1000; 1366 break; 1367 case VIRTCHNL_LINK_SPEED_100MB: 1368 link_speed_mbps = SPEED_100; 1369 break; 1370 default: 1371 link_speed_mbps = SPEED_UNKNOWN; 1372 break; 1373 } 1374 1375 print_link_msg: 1376 if (link_speed_mbps > SPEED_1000) { 1377 if (link_speed_mbps == SPEED_2500) 1378 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); 1379 else 1380 /* convert to Gbps inline */ 1381 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1382 link_speed_mbps / 1000, "Gbps"); 1383 } else if (link_speed_mbps == SPEED_UNKNOWN) { 1384 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); 1385 } else { 1386 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1387 link_speed_mbps, "Mbps"); 1388 } 1389 1390 netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); 1391 kfree(speed); 1392 } 1393 1394 /** 1395 * iavf_get_vpe_link_status 1396 * @adapter: adapter structure 1397 * @vpe: virtchnl_pf_event structure 1398 * 1399 * Helper function for determining the link status 1400 **/ 1401 static bool 1402 iavf_get_vpe_link_status(struct iavf_adapter *adapter, 1403 struct virtchnl_pf_event *vpe) 1404 { 1405 if (ADV_LINK_SUPPORT(adapter)) 1406 return vpe->event_data.link_event_adv.link_status; 1407 else 1408 return vpe->event_data.link_event.link_status; 1409 } 1410 1411 /** 1412 * iavf_set_adapter_link_speed_from_vpe 1413 * @adapter: adapter structure for which we are setting the link speed 1414 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting 1415 * 1416 * Helper function for setting iavf_adapter link speed 1417 **/ 1418 static void 1419 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, 1420 struct virtchnl_pf_event *vpe) 1421 { 1422 if (ADV_LINK_SUPPORT(adapter)) 1423 adapter->link_speed_mbps = 1424 vpe->event_data.link_event_adv.link_speed; 1425 else 1426 adapter->link_speed = vpe->event_data.link_event.link_speed; 1427 } 1428 1429 /** 1430 * iavf_enable_channels 1431 * @adapter: adapter structure 1432 * 1433 * Request that the PF enable channels as specified by 1434 * the user via tc tool. 1435 **/ 1436 void iavf_enable_channels(struct iavf_adapter *adapter) 1437 { 1438 struct virtchnl_tc_info *vti = NULL; 1439 size_t len; 1440 int i; 1441 1442 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1443 /* bail because we already have a command pending */ 1444 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1445 adapter->current_op); 1446 return; 1447 } 1448 1449 len = struct_size(vti, list, adapter->num_tc - 1); 1450 vti = kzalloc(len, GFP_KERNEL); 1451 if (!vti) 1452 return; 1453 vti->num_tc = adapter->num_tc; 1454 for (i = 0; i < vti->num_tc; i++) { 1455 vti->list[i].count = adapter->ch_config.ch_info[i].count; 1456 vti->list[i].offset = adapter->ch_config.ch_info[i].offset; 1457 vti->list[i].pad = 0; 1458 vti->list[i].max_tx_rate = 1459 adapter->ch_config.ch_info[i].max_tx_rate; 1460 } 1461 1462 adapter->ch_config.state = __IAVF_TC_RUNNING; 1463 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1464 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; 1465 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; 1466 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); 1467 kfree(vti); 1468 } 1469 1470 /** 1471 * iavf_disable_channels 1472 * @adapter: adapter structure 1473 * 1474 * Request that the PF disable channels that are configured 1475 **/ 1476 void iavf_disable_channels(struct iavf_adapter *adapter) 1477 { 1478 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1479 /* bail because we already have a command pending */ 1480 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1481 adapter->current_op); 1482 return; 1483 } 1484 1485 adapter->ch_config.state = __IAVF_TC_INVALID; 1486 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1487 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; 1488 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; 1489 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); 1490 } 1491 1492 /** 1493 * iavf_print_cloud_filter 1494 * @adapter: adapter structure 1495 * @f: cloud filter to print 1496 * 1497 * Print the cloud filter 1498 **/ 1499 static void iavf_print_cloud_filter(struct iavf_adapter *adapter, 1500 struct virtchnl_filter *f) 1501 { 1502 switch (f->flow_type) { 1503 case VIRTCHNL_TCP_V4_FLOW: 1504 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", 1505 &f->data.tcp_spec.dst_mac, 1506 &f->data.tcp_spec.src_mac, 1507 ntohs(f->data.tcp_spec.vlan_id), 1508 &f->data.tcp_spec.dst_ip[0], 1509 &f->data.tcp_spec.src_ip[0], 1510 ntohs(f->data.tcp_spec.dst_port), 1511 ntohs(f->data.tcp_spec.src_port)); 1512 break; 1513 case VIRTCHNL_TCP_V6_FLOW: 1514 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", 1515 &f->data.tcp_spec.dst_mac, 1516 &f->data.tcp_spec.src_mac, 1517 ntohs(f->data.tcp_spec.vlan_id), 1518 &f->data.tcp_spec.dst_ip, 1519 &f->data.tcp_spec.src_ip, 1520 ntohs(f->data.tcp_spec.dst_port), 1521 ntohs(f->data.tcp_spec.src_port)); 1522 break; 1523 } 1524 } 1525 1526 /** 1527 * iavf_add_cloud_filter 1528 * @adapter: adapter structure 1529 * 1530 * Request that the PF add cloud filters as specified 1531 * by the user via tc tool. 1532 **/ 1533 void iavf_add_cloud_filter(struct iavf_adapter *adapter) 1534 { 1535 struct iavf_cloud_filter *cf; 1536 struct virtchnl_filter *f; 1537 int len = 0, count = 0; 1538 1539 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1540 /* bail because we already have a command pending */ 1541 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", 1542 adapter->current_op); 1543 return; 1544 } 1545 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1546 if (cf->add) { 1547 count++; 1548 break; 1549 } 1550 } 1551 if (!count) { 1552 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 1553 return; 1554 } 1555 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; 1556 1557 len = sizeof(struct virtchnl_filter); 1558 f = kzalloc(len, GFP_KERNEL); 1559 if (!f) 1560 return; 1561 1562 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1563 if (cf->add) { 1564 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1565 cf->add = false; 1566 cf->state = __IAVF_CF_ADD_PENDING; 1567 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, 1568 (u8 *)f, len); 1569 } 1570 } 1571 kfree(f); 1572 } 1573 1574 /** 1575 * iavf_del_cloud_filter 1576 * @adapter: adapter structure 1577 * 1578 * Request that the PF delete cloud filters as specified 1579 * by the user via tc tool. 1580 **/ 1581 void iavf_del_cloud_filter(struct iavf_adapter *adapter) 1582 { 1583 struct iavf_cloud_filter *cf, *cftmp; 1584 struct virtchnl_filter *f; 1585 int len = 0, count = 0; 1586 1587 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1588 /* bail because we already have a command pending */ 1589 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", 1590 adapter->current_op); 1591 return; 1592 } 1593 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1594 if (cf->del) { 1595 count++; 1596 break; 1597 } 1598 } 1599 if (!count) { 1600 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1601 return; 1602 } 1603 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; 1604 1605 len = sizeof(struct virtchnl_filter); 1606 f = kzalloc(len, GFP_KERNEL); 1607 if (!f) 1608 return; 1609 1610 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 1611 if (cf->del) { 1612 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1613 cf->del = false; 1614 cf->state = __IAVF_CF_DEL_PENDING; 1615 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, 1616 (u8 *)f, len); 1617 } 1618 } 1619 kfree(f); 1620 } 1621 1622 /** 1623 * iavf_add_fdir_filter 1624 * @adapter: the VF adapter structure 1625 * 1626 * Request that the PF add Flow Director filters as specified 1627 * by the user via ethtool. 1628 **/ 1629 void iavf_add_fdir_filter(struct iavf_adapter *adapter) 1630 { 1631 struct iavf_fdir_fltr *fdir; 1632 struct virtchnl_fdir_add *f; 1633 bool process_fltr = false; 1634 int len; 1635 1636 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1637 /* bail because we already have a command pending */ 1638 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", 1639 adapter->current_op); 1640 return; 1641 } 1642 1643 len = sizeof(struct virtchnl_fdir_add); 1644 f = kzalloc(len, GFP_KERNEL); 1645 if (!f) 1646 return; 1647 1648 spin_lock_bh(&adapter->fdir_fltr_lock); 1649 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1650 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { 1651 process_fltr = true; 1652 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; 1653 memcpy(f, &fdir->vc_add_msg, len); 1654 break; 1655 } 1656 } 1657 spin_unlock_bh(&adapter->fdir_fltr_lock); 1658 1659 if (!process_fltr) { 1660 /* prevent iavf_add_fdir_filter() from being called when there 1661 * are no filters to add 1662 */ 1663 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; 1664 kfree(f); 1665 return; 1666 } 1667 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; 1668 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); 1669 kfree(f); 1670 } 1671 1672 /** 1673 * iavf_del_fdir_filter 1674 * @adapter: the VF adapter structure 1675 * 1676 * Request that the PF delete Flow Director filters as specified 1677 * by the user via ethtool. 1678 **/ 1679 void iavf_del_fdir_filter(struct iavf_adapter *adapter) 1680 { 1681 struct iavf_fdir_fltr *fdir; 1682 struct virtchnl_fdir_del f; 1683 bool process_fltr = false; 1684 int len; 1685 1686 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1687 /* bail because we already have a command pending */ 1688 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", 1689 adapter->current_op); 1690 return; 1691 } 1692 1693 len = sizeof(struct virtchnl_fdir_del); 1694 1695 spin_lock_bh(&adapter->fdir_fltr_lock); 1696 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1697 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { 1698 process_fltr = true; 1699 memset(&f, 0, len); 1700 f.vsi_id = fdir->vc_add_msg.vsi_id; 1701 f.flow_id = fdir->flow_id; 1702 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; 1703 break; 1704 } 1705 } 1706 spin_unlock_bh(&adapter->fdir_fltr_lock); 1707 1708 if (!process_fltr) { 1709 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1710 return; 1711 } 1712 1713 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; 1714 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); 1715 } 1716 1717 /** 1718 * iavf_add_adv_rss_cfg 1719 * @adapter: the VF adapter structure 1720 * 1721 * Request that the PF add RSS configuration as specified 1722 * by the user via ethtool. 1723 **/ 1724 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) 1725 { 1726 struct virtchnl_rss_cfg *rss_cfg; 1727 struct iavf_adv_rss *rss; 1728 bool process_rss = false; 1729 int len; 1730 1731 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1732 /* bail because we already have a command pending */ 1733 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", 1734 adapter->current_op); 1735 return; 1736 } 1737 1738 len = sizeof(struct virtchnl_rss_cfg); 1739 rss_cfg = kzalloc(len, GFP_KERNEL); 1740 if (!rss_cfg) 1741 return; 1742 1743 spin_lock_bh(&adapter->adv_rss_lock); 1744 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1745 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { 1746 process_rss = true; 1747 rss->state = IAVF_ADV_RSS_ADD_PENDING; 1748 memcpy(rss_cfg, &rss->cfg_msg, len); 1749 iavf_print_adv_rss_cfg(adapter, rss, 1750 "Input set change for", 1751 "is pending"); 1752 break; 1753 } 1754 } 1755 spin_unlock_bh(&adapter->adv_rss_lock); 1756 1757 if (process_rss) { 1758 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; 1759 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, 1760 (u8 *)rss_cfg, len); 1761 } else { 1762 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; 1763 } 1764 1765 kfree(rss_cfg); 1766 } 1767 1768 /** 1769 * iavf_del_adv_rss_cfg 1770 * @adapter: the VF adapter structure 1771 * 1772 * Request that the PF delete RSS configuration as specified 1773 * by the user via ethtool. 1774 **/ 1775 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) 1776 { 1777 struct virtchnl_rss_cfg *rss_cfg; 1778 struct iavf_adv_rss *rss; 1779 bool process_rss = false; 1780 int len; 1781 1782 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1783 /* bail because we already have a command pending */ 1784 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", 1785 adapter->current_op); 1786 return; 1787 } 1788 1789 len = sizeof(struct virtchnl_rss_cfg); 1790 rss_cfg = kzalloc(len, GFP_KERNEL); 1791 if (!rss_cfg) 1792 return; 1793 1794 spin_lock_bh(&adapter->adv_rss_lock); 1795 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1796 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { 1797 process_rss = true; 1798 rss->state = IAVF_ADV_RSS_DEL_PENDING; 1799 memcpy(rss_cfg, &rss->cfg_msg, len); 1800 break; 1801 } 1802 } 1803 spin_unlock_bh(&adapter->adv_rss_lock); 1804 1805 if (process_rss) { 1806 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; 1807 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, 1808 (u8 *)rss_cfg, len); 1809 } else { 1810 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1811 } 1812 1813 kfree(rss_cfg); 1814 } 1815 1816 /** 1817 * iavf_request_reset 1818 * @adapter: adapter structure 1819 * 1820 * Request that the PF reset this VF. No response is expected. 1821 **/ 1822 int iavf_request_reset(struct iavf_adapter *adapter) 1823 { 1824 int err; 1825 /* Don't check CURRENT_OP - this is always higher priority */ 1826 err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); 1827 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1828 return err; 1829 } 1830 1831 /** 1832 * iavf_netdev_features_vlan_strip_set - update vlan strip status 1833 * @netdev: ptr to netdev being adjusted 1834 * @enable: enable or disable vlan strip 1835 * 1836 * Helper function to change vlan strip status in netdev->features. 1837 */ 1838 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, 1839 const bool enable) 1840 { 1841 if (enable) 1842 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1843 else 1844 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 1845 } 1846 1847 /** 1848 * iavf_virtchnl_completion 1849 * @adapter: adapter structure 1850 * @v_opcode: opcode sent by PF 1851 * @v_retval: retval sent by PF 1852 * @msg: message sent by PF 1853 * @msglen: message length 1854 * 1855 * Asynchronous completion function for admin queue messages. Rather than busy 1856 * wait, we fire off our requests and assume that no errors will be returned. 1857 * This function handles the reply messages. 1858 **/ 1859 void iavf_virtchnl_completion(struct iavf_adapter *adapter, 1860 enum virtchnl_ops v_opcode, 1861 enum iavf_status v_retval, u8 *msg, u16 msglen) 1862 { 1863 struct net_device *netdev = adapter->netdev; 1864 1865 if (v_opcode == VIRTCHNL_OP_EVENT) { 1866 struct virtchnl_pf_event *vpe = 1867 (struct virtchnl_pf_event *)msg; 1868 bool link_up = iavf_get_vpe_link_status(adapter, vpe); 1869 1870 switch (vpe->event) { 1871 case VIRTCHNL_EVENT_LINK_CHANGE: 1872 iavf_set_adapter_link_speed_from_vpe(adapter, vpe); 1873 1874 /* we've already got the right link status, bail */ 1875 if (adapter->link_up == link_up) 1876 break; 1877 1878 if (link_up) { 1879 /* If we get link up message and start queues 1880 * before our queues are configured it will 1881 * trigger a TX hang. In that case, just ignore 1882 * the link status message,we'll get another one 1883 * after we enable queues and actually prepared 1884 * to send traffic. 1885 */ 1886 if (adapter->state != __IAVF_RUNNING) 1887 break; 1888 1889 /* For ADq enabled VF, we reconfigure VSIs and 1890 * re-allocate queues. Hence wait till all 1891 * queues are enabled. 1892 */ 1893 if (adapter->flags & 1894 IAVF_FLAG_QUEUES_DISABLED) 1895 break; 1896 } 1897 1898 adapter->link_up = link_up; 1899 if (link_up) { 1900 netif_tx_start_all_queues(netdev); 1901 netif_carrier_on(netdev); 1902 } else { 1903 netif_tx_stop_all_queues(netdev); 1904 netif_carrier_off(netdev); 1905 } 1906 iavf_print_link_message(adapter); 1907 break; 1908 case VIRTCHNL_EVENT_RESET_IMPENDING: 1909 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); 1910 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { 1911 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1912 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 1913 queue_work(iavf_wq, &adapter->reset_task); 1914 } 1915 break; 1916 default: 1917 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", 1918 vpe->event); 1919 break; 1920 } 1921 return; 1922 } 1923 if (v_retval) { 1924 switch (v_opcode) { 1925 case VIRTCHNL_OP_ADD_VLAN: 1926 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 1927 iavf_stat_str(&adapter->hw, v_retval)); 1928 break; 1929 case VIRTCHNL_OP_ADD_ETH_ADDR: 1930 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", 1931 iavf_stat_str(&adapter->hw, v_retval)); 1932 iavf_mac_add_reject(adapter); 1933 /* restore administratively set MAC address */ 1934 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1935 break; 1936 case VIRTCHNL_OP_DEL_VLAN: 1937 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", 1938 iavf_stat_str(&adapter->hw, v_retval)); 1939 break; 1940 case VIRTCHNL_OP_DEL_ETH_ADDR: 1941 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", 1942 iavf_stat_str(&adapter->hw, v_retval)); 1943 break; 1944 case VIRTCHNL_OP_ENABLE_CHANNELS: 1945 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", 1946 iavf_stat_str(&adapter->hw, v_retval)); 1947 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1948 adapter->ch_config.state = __IAVF_TC_INVALID; 1949 netdev_reset_tc(netdev); 1950 netif_tx_start_all_queues(netdev); 1951 break; 1952 case VIRTCHNL_OP_DISABLE_CHANNELS: 1953 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", 1954 iavf_stat_str(&adapter->hw, v_retval)); 1955 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1956 adapter->ch_config.state = __IAVF_TC_RUNNING; 1957 netif_tx_start_all_queues(netdev); 1958 break; 1959 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 1960 struct iavf_cloud_filter *cf, *cftmp; 1961 1962 list_for_each_entry_safe(cf, cftmp, 1963 &adapter->cloud_filter_list, 1964 list) { 1965 if (cf->state == __IAVF_CF_ADD_PENDING) { 1966 cf->state = __IAVF_CF_INVALID; 1967 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", 1968 iavf_stat_str(&adapter->hw, 1969 v_retval)); 1970 iavf_print_cloud_filter(adapter, 1971 &cf->f); 1972 list_del(&cf->list); 1973 kfree(cf); 1974 adapter->num_cloud_filters--; 1975 } 1976 } 1977 } 1978 break; 1979 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 1980 struct iavf_cloud_filter *cf; 1981 1982 list_for_each_entry(cf, &adapter->cloud_filter_list, 1983 list) { 1984 if (cf->state == __IAVF_CF_DEL_PENDING) { 1985 cf->state = __IAVF_CF_ACTIVE; 1986 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", 1987 iavf_stat_str(&adapter->hw, 1988 v_retval)); 1989 iavf_print_cloud_filter(adapter, 1990 &cf->f); 1991 } 1992 } 1993 } 1994 break; 1995 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 1996 struct iavf_fdir_fltr *fdir, *fdir_tmp; 1997 1998 spin_lock_bh(&adapter->fdir_fltr_lock); 1999 list_for_each_entry_safe(fdir, fdir_tmp, 2000 &adapter->fdir_list_head, 2001 list) { 2002 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2003 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", 2004 iavf_stat_str(&adapter->hw, 2005 v_retval)); 2006 iavf_print_fdir_fltr(adapter, fdir); 2007 if (msglen) 2008 dev_err(&adapter->pdev->dev, 2009 "%s\n", msg); 2010 list_del(&fdir->list); 2011 kfree(fdir); 2012 adapter->fdir_active_fltr--; 2013 } 2014 } 2015 spin_unlock_bh(&adapter->fdir_fltr_lock); 2016 } 2017 break; 2018 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2019 struct iavf_fdir_fltr *fdir; 2020 2021 spin_lock_bh(&adapter->fdir_fltr_lock); 2022 list_for_each_entry(fdir, &adapter->fdir_list_head, 2023 list) { 2024 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2025 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2026 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", 2027 iavf_stat_str(&adapter->hw, 2028 v_retval)); 2029 iavf_print_fdir_fltr(adapter, fdir); 2030 } 2031 } 2032 spin_unlock_bh(&adapter->fdir_fltr_lock); 2033 } 2034 break; 2035 case VIRTCHNL_OP_ADD_RSS_CFG: { 2036 struct iavf_adv_rss *rss, *rss_tmp; 2037 2038 spin_lock_bh(&adapter->adv_rss_lock); 2039 list_for_each_entry_safe(rss, rss_tmp, 2040 &adapter->adv_rss_list_head, 2041 list) { 2042 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2043 iavf_print_adv_rss_cfg(adapter, rss, 2044 "Failed to change the input set for", 2045 NULL); 2046 list_del(&rss->list); 2047 kfree(rss); 2048 } 2049 } 2050 spin_unlock_bh(&adapter->adv_rss_lock); 2051 } 2052 break; 2053 case VIRTCHNL_OP_DEL_RSS_CFG: { 2054 struct iavf_adv_rss *rss; 2055 2056 spin_lock_bh(&adapter->adv_rss_lock); 2057 list_for_each_entry(rss, &adapter->adv_rss_list_head, 2058 list) { 2059 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2060 rss->state = IAVF_ADV_RSS_ACTIVE; 2061 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", 2062 iavf_stat_str(&adapter->hw, 2063 v_retval)); 2064 } 2065 } 2066 spin_unlock_bh(&adapter->adv_rss_lock); 2067 } 2068 break; 2069 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2070 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2071 /* Vlan stripping could not be enabled by ethtool. 2072 * Disable it in netdev->features. 2073 */ 2074 iavf_netdev_features_vlan_strip_set(netdev, false); 2075 break; 2076 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2077 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2078 /* Vlan stripping could not be disabled by ethtool. 2079 * Enable it in netdev->features. 2080 */ 2081 iavf_netdev_features_vlan_strip_set(netdev, true); 2082 break; 2083 default: 2084 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", 2085 v_retval, iavf_stat_str(&adapter->hw, v_retval), 2086 v_opcode); 2087 } 2088 } 2089 switch (v_opcode) { 2090 case VIRTCHNL_OP_ADD_ETH_ADDR: 2091 if (!v_retval) 2092 iavf_mac_add_ok(adapter); 2093 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) 2094 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2095 break; 2096 case VIRTCHNL_OP_GET_STATS: { 2097 struct iavf_eth_stats *stats = 2098 (struct iavf_eth_stats *)msg; 2099 netdev->stats.rx_packets = stats->rx_unicast + 2100 stats->rx_multicast + 2101 stats->rx_broadcast; 2102 netdev->stats.tx_packets = stats->tx_unicast + 2103 stats->tx_multicast + 2104 stats->tx_broadcast; 2105 netdev->stats.rx_bytes = stats->rx_bytes; 2106 netdev->stats.tx_bytes = stats->tx_bytes; 2107 netdev->stats.tx_errors = stats->tx_errors; 2108 netdev->stats.rx_dropped = stats->rx_discards; 2109 netdev->stats.tx_dropped = stats->tx_discards; 2110 adapter->current_stats = *stats; 2111 } 2112 break; 2113 case VIRTCHNL_OP_GET_VF_RESOURCES: { 2114 u16 len = sizeof(struct virtchnl_vf_resource) + 2115 IAVF_MAX_VF_VSI * 2116 sizeof(struct virtchnl_vsi_resource); 2117 memcpy(adapter->vf_res, msg, min(msglen, len)); 2118 iavf_validate_num_queues(adapter); 2119 iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); 2120 if (is_zero_ether_addr(adapter->hw.mac.addr)) { 2121 /* restore current mac address */ 2122 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2123 } else { 2124 /* refresh current mac address if changed */ 2125 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2126 ether_addr_copy(netdev->perm_addr, 2127 adapter->hw.mac.addr); 2128 } 2129 spin_lock_bh(&adapter->mac_vlan_list_lock); 2130 iavf_add_filter(adapter, adapter->hw.mac.addr); 2131 2132 if (VLAN_ALLOWED(adapter)) { 2133 if (!list_empty(&adapter->vlan_filter_list)) { 2134 struct iavf_vlan_filter *vlf; 2135 2136 /* re-add all VLAN filters over virtchnl */ 2137 list_for_each_entry(vlf, 2138 &adapter->vlan_filter_list, 2139 list) 2140 vlf->add = true; 2141 2142 adapter->aq_required |= 2143 IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2144 } 2145 } 2146 2147 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2148 2149 iavf_parse_vf_resource_msg(adapter); 2150 2151 /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the 2152 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish 2153 * configuration 2154 */ 2155 if (VLAN_V2_ALLOWED(adapter)) 2156 break; 2157 /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 2158 * wasn't successfully negotiated with the PF 2159 */ 2160 } 2161 fallthrough; 2162 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { 2163 if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) 2164 memcpy(&adapter->vlan_v2_caps, msg, 2165 min_t(u16, msglen, 2166 sizeof(adapter->vlan_v2_caps))); 2167 2168 iavf_process_config(adapter); 2169 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; 2170 } 2171 break; 2172 case VIRTCHNL_OP_ENABLE_QUEUES: 2173 /* enable transmits */ 2174 iavf_irq_enable(adapter, true); 2175 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; 2176 break; 2177 case VIRTCHNL_OP_DISABLE_QUEUES: 2178 iavf_free_all_tx_resources(adapter); 2179 iavf_free_all_rx_resources(adapter); 2180 if (adapter->state == __IAVF_DOWN_PENDING) { 2181 iavf_change_state(adapter, __IAVF_DOWN); 2182 wake_up(&adapter->down_waitqueue); 2183 } 2184 break; 2185 case VIRTCHNL_OP_VERSION: 2186 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2187 /* Don't display an error if we get these out of sequence. 2188 * If the firmware needed to get kicked, we'll get these and 2189 * it's no problem. 2190 */ 2191 if (v_opcode != adapter->current_op) 2192 return; 2193 break; 2194 case VIRTCHNL_OP_IWARP: 2195 /* Gobble zero-length replies from the PF. They indicate that 2196 * a previous message was received OK, and the client doesn't 2197 * care about that. 2198 */ 2199 if (msglen && CLIENT_ENABLED(adapter)) 2200 iavf_notify_client_message(&adapter->vsi, msg, msglen); 2201 break; 2202 2203 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2204 adapter->client_pending &= 2205 ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); 2206 break; 2207 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { 2208 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; 2209 2210 if (msglen == sizeof(*vrh)) 2211 adapter->hena = vrh->hena; 2212 else 2213 dev_warn(&adapter->pdev->dev, 2214 "Invalid message %d from PF\n", v_opcode); 2215 } 2216 break; 2217 case VIRTCHNL_OP_REQUEST_QUEUES: { 2218 struct virtchnl_vf_res_request *vfres = 2219 (struct virtchnl_vf_res_request *)msg; 2220 2221 if (vfres->num_queue_pairs != adapter->num_req_queues) { 2222 dev_info(&adapter->pdev->dev, 2223 "Requested %d queues, PF can support %d\n", 2224 adapter->num_req_queues, 2225 vfres->num_queue_pairs); 2226 adapter->num_req_queues = 0; 2227 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2228 } 2229 } 2230 break; 2231 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 2232 struct iavf_cloud_filter *cf; 2233 2234 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2235 if (cf->state == __IAVF_CF_ADD_PENDING) 2236 cf->state = __IAVF_CF_ACTIVE; 2237 } 2238 } 2239 break; 2240 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 2241 struct iavf_cloud_filter *cf, *cftmp; 2242 2243 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2244 list) { 2245 if (cf->state == __IAVF_CF_DEL_PENDING) { 2246 cf->state = __IAVF_CF_INVALID; 2247 list_del(&cf->list); 2248 kfree(cf); 2249 adapter->num_cloud_filters--; 2250 } 2251 } 2252 } 2253 break; 2254 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2255 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; 2256 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2257 2258 spin_lock_bh(&adapter->fdir_fltr_lock); 2259 list_for_each_entry_safe(fdir, fdir_tmp, 2260 &adapter->fdir_list_head, 2261 list) { 2262 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2263 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2264 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", 2265 fdir->loc); 2266 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2267 fdir->flow_id = add_fltr->flow_id; 2268 } else { 2269 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", 2270 add_fltr->status); 2271 iavf_print_fdir_fltr(adapter, fdir); 2272 list_del(&fdir->list); 2273 kfree(fdir); 2274 adapter->fdir_active_fltr--; 2275 } 2276 } 2277 } 2278 spin_unlock_bh(&adapter->fdir_fltr_lock); 2279 } 2280 break; 2281 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2282 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; 2283 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2284 2285 spin_lock_bh(&adapter->fdir_fltr_lock); 2286 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, 2287 list) { 2288 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2289 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2290 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", 2291 fdir->loc); 2292 list_del(&fdir->list); 2293 kfree(fdir); 2294 adapter->fdir_active_fltr--; 2295 } else { 2296 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2297 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", 2298 del_fltr->status); 2299 iavf_print_fdir_fltr(adapter, fdir); 2300 } 2301 } 2302 } 2303 spin_unlock_bh(&adapter->fdir_fltr_lock); 2304 } 2305 break; 2306 case VIRTCHNL_OP_ADD_RSS_CFG: { 2307 struct iavf_adv_rss *rss; 2308 2309 spin_lock_bh(&adapter->adv_rss_lock); 2310 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 2311 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2312 iavf_print_adv_rss_cfg(adapter, rss, 2313 "Input set change for", 2314 "successful"); 2315 rss->state = IAVF_ADV_RSS_ACTIVE; 2316 } 2317 } 2318 spin_unlock_bh(&adapter->adv_rss_lock); 2319 } 2320 break; 2321 case VIRTCHNL_OP_DEL_RSS_CFG: { 2322 struct iavf_adv_rss *rss, *rss_tmp; 2323 2324 spin_lock_bh(&adapter->adv_rss_lock); 2325 list_for_each_entry_safe(rss, rss_tmp, 2326 &adapter->adv_rss_list_head, list) { 2327 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2328 list_del(&rss->list); 2329 kfree(rss); 2330 } 2331 } 2332 spin_unlock_bh(&adapter->adv_rss_lock); 2333 } 2334 break; 2335 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2336 /* PF enabled vlan strip on this VF. 2337 * Update netdev->features if needed to be in sync with ethtool. 2338 */ 2339 if (!v_retval) 2340 iavf_netdev_features_vlan_strip_set(netdev, true); 2341 break; 2342 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2343 /* PF disabled vlan strip on this VF. 2344 * Update netdev->features if needed to be in sync with ethtool. 2345 */ 2346 if (!v_retval) 2347 iavf_netdev_features_vlan_strip_set(netdev, false); 2348 break; 2349 default: 2350 if (adapter->current_op && (v_opcode != adapter->current_op)) 2351 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 2352 adapter->current_op, v_opcode); 2353 break; 2354 } /* switch v_opcode */ 2355 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2356 } 2357