1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 8 /** 9 * iavf_send_pf_msg 10 * @adapter: adapter structure 11 * @op: virtual channel opcode 12 * @msg: pointer to message buffer 13 * @len: message length 14 * 15 * Send message to PF and print status if failure. 16 **/ 17 static int iavf_send_pf_msg(struct iavf_adapter *adapter, 18 enum virtchnl_ops op, u8 *msg, u16 len) 19 { 20 struct iavf_hw *hw = &adapter->hw; 21 enum iavf_status status; 22 23 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 24 return 0; /* nothing to see here, move along */ 25 26 status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 27 if (status) 28 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n", 29 op, iavf_stat_str(hw, status), 30 iavf_aq_str(hw, hw->aq.asq_last_status)); 31 return iavf_status_to_errno(status); 32 } 33 34 /** 35 * iavf_send_api_ver 36 * @adapter: adapter structure 37 * 38 * Send API version admin queue message to the PF. The reply is not checked 39 * in this function. Returns 0 if the message was successfully 40 * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 41 **/ 42 int iavf_send_api_ver(struct iavf_adapter *adapter) 43 { 44 struct virtchnl_version_info vvi; 45 46 vvi.major = VIRTCHNL_VERSION_MAJOR; 47 vvi.minor = VIRTCHNL_VERSION_MINOR; 48 49 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, 50 sizeof(vvi)); 51 } 52 53 /** 54 * iavf_poll_virtchnl_msg 55 * @hw: HW configuration structure 56 * @event: event to populate on success 57 * @op_to_poll: requested virtchnl op to poll for 58 * 59 * Initialize poll for virtchnl msg matching the requested_op. Returns 0 60 * if a message of the correct opcode is in the queue or an error code 61 * if no message matching the op code is waiting and other failures. 62 */ 63 static int 64 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, 65 enum virtchnl_ops op_to_poll) 66 { 67 enum virtchnl_ops received_op; 68 enum iavf_status status; 69 u32 v_retval; 70 71 while (1) { 72 /* When the AQ is empty, iavf_clean_arq_element will return 73 * nonzero and this loop will terminate. 74 */ 75 status = iavf_clean_arq_element(hw, event, NULL); 76 if (status != IAVF_SUCCESS) 77 return iavf_status_to_errno(status); 78 received_op = 79 (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high); 80 if (op_to_poll == received_op) 81 break; 82 } 83 84 v_retval = le32_to_cpu(event->desc.cookie_low); 85 return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval); 86 } 87 88 /** 89 * iavf_verify_api_ver 90 * @adapter: adapter structure 91 * 92 * Compare API versions with the PF. Must be called after admin queue is 93 * initialized. Returns 0 if API versions match, -EIO if they do not, 94 * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors 95 * from the firmware are propagated. 96 **/ 97 int iavf_verify_api_ver(struct iavf_adapter *adapter) 98 { 99 struct iavf_arq_event_info event; 100 int err; 101 102 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 103 event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL); 104 if (!event.msg_buf) 105 return -ENOMEM; 106 107 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION); 108 if (!err) { 109 struct virtchnl_version_info *pf_vvi = 110 (struct virtchnl_version_info *)event.msg_buf; 111 adapter->pf_version = *pf_vvi; 112 113 if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR || 114 (pf_vvi->major == VIRTCHNL_VERSION_MAJOR && 115 pf_vvi->minor > VIRTCHNL_VERSION_MINOR)) 116 err = -EIO; 117 } 118 119 kfree(event.msg_buf); 120 121 return err; 122 } 123 124 /** 125 * iavf_send_vf_config_msg 126 * @adapter: adapter structure 127 * 128 * Send VF configuration request admin queue message to the PF. The reply 129 * is not checked in this function. Returns 0 if the message was 130 * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 131 **/ 132 int iavf_send_vf_config_msg(struct iavf_adapter *adapter) 133 { 134 u32 caps; 135 136 caps = VIRTCHNL_VF_OFFLOAD_L2 | 137 VIRTCHNL_VF_OFFLOAD_RSS_PF | 138 VIRTCHNL_VF_OFFLOAD_RSS_AQ | 139 VIRTCHNL_VF_OFFLOAD_RSS_REG | 140 VIRTCHNL_VF_OFFLOAD_VLAN | 141 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | 142 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | 143 VIRTCHNL_VF_OFFLOAD_ENCAP | 144 VIRTCHNL_VF_OFFLOAD_VLAN_V2 | 145 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | 146 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | 147 VIRTCHNL_VF_OFFLOAD_ADQ | 148 VIRTCHNL_VF_OFFLOAD_USO | 149 VIRTCHNL_VF_OFFLOAD_FDIR_PF | 150 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | 151 VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 152 153 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; 154 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; 155 if (PF_IS_V11(adapter)) 156 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 157 (u8 *)&caps, sizeof(caps)); 158 else 159 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 160 NULL, 0); 161 } 162 163 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) 164 { 165 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; 166 167 if (!VLAN_V2_ALLOWED(adapter)) 168 return -EOPNOTSUPP; 169 170 adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; 171 172 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 173 NULL, 0); 174 } 175 176 /** 177 * iavf_validate_num_queues 178 * @adapter: adapter structure 179 * 180 * Validate that the number of queues the PF has sent in 181 * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. 182 **/ 183 static void iavf_validate_num_queues(struct iavf_adapter *adapter) 184 { 185 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { 186 struct virtchnl_vsi_resource *vsi_res; 187 int i; 188 189 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", 190 adapter->vf_res->num_queue_pairs, 191 IAVF_MAX_REQ_QUEUES); 192 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", 193 IAVF_MAX_REQ_QUEUES); 194 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 195 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 196 vsi_res = &adapter->vf_res->vsi_res[i]; 197 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 198 } 199 } 200 } 201 202 /** 203 * iavf_get_vf_config 204 * @adapter: private adapter structure 205 * 206 * Get VF configuration from PF and populate hw structure. Must be called after 207 * admin queue is initialized. Busy waits until response is received from PF, 208 * with maximum timeout. Response from PF is returned in the buffer for further 209 * processing by the caller. 210 **/ 211 int iavf_get_vf_config(struct iavf_adapter *adapter) 212 { 213 struct iavf_hw *hw = &adapter->hw; 214 struct iavf_arq_event_info event; 215 u16 len; 216 int err; 217 218 len = sizeof(struct virtchnl_vf_resource) + 219 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); 220 event.buf_len = len; 221 event.msg_buf = kzalloc(len, GFP_KERNEL); 222 if (!event.msg_buf) 223 return -ENOMEM; 224 225 err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES); 226 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); 227 228 /* some PFs send more queues than we should have so validate that 229 * we aren't getting too many queues 230 */ 231 if (!err) 232 iavf_validate_num_queues(adapter); 233 iavf_vf_parse_hw_config(hw, adapter->vf_res); 234 235 kfree(event.msg_buf); 236 237 return err; 238 } 239 240 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) 241 { 242 struct iavf_arq_event_info event; 243 int err; 244 u16 len; 245 246 len = sizeof(struct virtchnl_vlan_caps); 247 event.buf_len = len; 248 event.msg_buf = kzalloc(len, GFP_KERNEL); 249 if (!event.msg_buf) 250 return -ENOMEM; 251 252 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, 253 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS); 254 if (!err) 255 memcpy(&adapter->vlan_v2_caps, event.msg_buf, 256 min(event.msg_len, len)); 257 258 kfree(event.msg_buf); 259 260 return err; 261 } 262 263 /** 264 * iavf_configure_queues 265 * @adapter: adapter structure 266 * 267 * Request that the PF set up our (previously allocated) queues. 268 **/ 269 void iavf_configure_queues(struct iavf_adapter *adapter) 270 { 271 struct virtchnl_vsi_queue_config_info *vqci; 272 struct virtchnl_queue_pair_info *vqpi; 273 int pairs = adapter->num_active_queues; 274 int i, max_frame = IAVF_MAX_RXBUFFER; 275 size_t len; 276 277 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 278 /* bail because we already have a command pending */ 279 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", 280 adapter->current_op); 281 return; 282 } 283 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; 284 len = struct_size(vqci, qpair, pairs); 285 vqci = kzalloc(len, GFP_KERNEL); 286 if (!vqci) 287 return; 288 289 /* Limit maximum frame size when jumbo frames is not enabled */ 290 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && 291 (adapter->netdev->mtu <= ETH_DATA_LEN)) 292 max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 293 294 vqci->vsi_id = adapter->vsi_res->vsi_id; 295 vqci->num_queue_pairs = pairs; 296 vqpi = vqci->qpair; 297 /* Size check is not needed here - HW max is 16 queue pairs, and we 298 * can fit info for 31 of them into the AQ buffer before it overflows. 299 */ 300 for (i = 0; i < pairs; i++) { 301 vqpi->txq.vsi_id = vqci->vsi_id; 302 vqpi->txq.queue_id = i; 303 vqpi->txq.ring_len = adapter->tx_rings[i].count; 304 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; 305 vqpi->rxq.vsi_id = vqci->vsi_id; 306 vqpi->rxq.queue_id = i; 307 vqpi->rxq.ring_len = adapter->rx_rings[i].count; 308 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; 309 vqpi->rxq.max_pkt_size = max_frame; 310 vqpi->rxq.databuffer_size = 311 ALIGN(adapter->rx_rings[i].rx_buf_len, 312 BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); 313 vqpi++; 314 } 315 316 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; 317 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 318 (u8 *)vqci, len); 319 kfree(vqci); 320 } 321 322 /** 323 * iavf_enable_queues 324 * @adapter: adapter structure 325 * 326 * Request that the PF enable all of our queues. 327 **/ 328 void iavf_enable_queues(struct iavf_adapter *adapter) 329 { 330 struct virtchnl_queue_select vqs; 331 332 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 333 /* bail because we already have a command pending */ 334 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", 335 adapter->current_op); 336 return; 337 } 338 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; 339 vqs.vsi_id = adapter->vsi_res->vsi_id; 340 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 341 vqs.rx_queues = vqs.tx_queues; 342 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; 343 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, 344 (u8 *)&vqs, sizeof(vqs)); 345 } 346 347 /** 348 * iavf_disable_queues 349 * @adapter: adapter structure 350 * 351 * Request that the PF disable all of our queues. 352 **/ 353 void iavf_disable_queues(struct iavf_adapter *adapter) 354 { 355 struct virtchnl_queue_select vqs; 356 357 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 358 /* bail because we already have a command pending */ 359 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", 360 adapter->current_op); 361 return; 362 } 363 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; 364 vqs.vsi_id = adapter->vsi_res->vsi_id; 365 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 366 vqs.rx_queues = vqs.tx_queues; 367 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; 368 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, 369 (u8 *)&vqs, sizeof(vqs)); 370 } 371 372 /** 373 * iavf_map_queues 374 * @adapter: adapter structure 375 * 376 * Request that the PF map queues to interrupt vectors. Misc causes, including 377 * admin queue, are always mapped to vector 0. 378 **/ 379 void iavf_map_queues(struct iavf_adapter *adapter) 380 { 381 struct virtchnl_irq_map_info *vimi; 382 struct virtchnl_vector_map *vecmap; 383 struct iavf_q_vector *q_vector; 384 int v_idx, q_vectors; 385 size_t len; 386 387 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 388 /* bail because we already have a command pending */ 389 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", 390 adapter->current_op); 391 return; 392 } 393 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; 394 395 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 396 397 len = struct_size(vimi, vecmap, adapter->num_msix_vectors); 398 vimi = kzalloc(len, GFP_KERNEL); 399 if (!vimi) 400 return; 401 402 vimi->num_vectors = adapter->num_msix_vectors; 403 /* Queue vectors first */ 404 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 405 q_vector = &adapter->q_vectors[v_idx]; 406 vecmap = &vimi->vecmap[v_idx]; 407 408 vecmap->vsi_id = adapter->vsi_res->vsi_id; 409 vecmap->vector_id = v_idx + NONQ_VECS; 410 vecmap->txq_map = q_vector->ring_mask; 411 vecmap->rxq_map = q_vector->ring_mask; 412 vecmap->rxitr_idx = IAVF_RX_ITR; 413 vecmap->txitr_idx = IAVF_TX_ITR; 414 } 415 /* Misc vector last - this is only for AdminQ messages */ 416 vecmap = &vimi->vecmap[v_idx]; 417 vecmap->vsi_id = adapter->vsi_res->vsi_id; 418 vecmap->vector_id = 0; 419 vecmap->txq_map = 0; 420 vecmap->rxq_map = 0; 421 422 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; 423 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, 424 (u8 *)vimi, len); 425 kfree(vimi); 426 } 427 428 /** 429 * iavf_set_mac_addr_type - Set the correct request type from the filter type 430 * @virtchnl_ether_addr: pointer to requested list element 431 * @filter: pointer to requested filter 432 **/ 433 static void 434 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, 435 const struct iavf_mac_filter *filter) 436 { 437 virtchnl_ether_addr->type = filter->is_primary ? 438 VIRTCHNL_ETHER_ADDR_PRIMARY : 439 VIRTCHNL_ETHER_ADDR_EXTRA; 440 } 441 442 /** 443 * iavf_add_ether_addrs 444 * @adapter: adapter structure 445 * 446 * Request that the PF add one or more addresses to our filters. 447 **/ 448 void iavf_add_ether_addrs(struct iavf_adapter *adapter) 449 { 450 struct virtchnl_ether_addr_list *veal; 451 struct iavf_mac_filter *f; 452 int i = 0, count = 0; 453 bool more = false; 454 size_t len; 455 456 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 457 /* bail because we already have a command pending */ 458 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", 459 adapter->current_op); 460 return; 461 } 462 463 spin_lock_bh(&adapter->mac_vlan_list_lock); 464 465 list_for_each_entry(f, &adapter->mac_filter_list, list) { 466 if (f->add) 467 count++; 468 } 469 if (!count) { 470 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 471 spin_unlock_bh(&adapter->mac_vlan_list_lock); 472 return; 473 } 474 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; 475 476 len = struct_size(veal, list, count); 477 if (len > IAVF_MAX_AQ_BUF_SIZE) { 478 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); 479 count = (IAVF_MAX_AQ_BUF_SIZE - 480 sizeof(struct virtchnl_ether_addr_list)) / 481 sizeof(struct virtchnl_ether_addr); 482 len = struct_size(veal, list, count); 483 more = true; 484 } 485 486 veal = kzalloc(len, GFP_ATOMIC); 487 if (!veal) { 488 spin_unlock_bh(&adapter->mac_vlan_list_lock); 489 return; 490 } 491 492 veal->vsi_id = adapter->vsi_res->vsi_id; 493 veal->num_elements = count; 494 list_for_each_entry(f, &adapter->mac_filter_list, list) { 495 if (f->add) { 496 ether_addr_copy(veal->list[i].addr, f->macaddr); 497 iavf_set_mac_addr_type(&veal->list[i], f); 498 i++; 499 f->add = false; 500 if (i == count) 501 break; 502 } 503 } 504 if (!more) 505 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 506 507 spin_unlock_bh(&adapter->mac_vlan_list_lock); 508 509 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); 510 kfree(veal); 511 } 512 513 /** 514 * iavf_del_ether_addrs 515 * @adapter: adapter structure 516 * 517 * Request that the PF remove one or more addresses from our filters. 518 **/ 519 void iavf_del_ether_addrs(struct iavf_adapter *adapter) 520 { 521 struct virtchnl_ether_addr_list *veal; 522 struct iavf_mac_filter *f, *ftmp; 523 int i = 0, count = 0; 524 bool more = false; 525 size_t len; 526 527 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 528 /* bail because we already have a command pending */ 529 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", 530 adapter->current_op); 531 return; 532 } 533 534 spin_lock_bh(&adapter->mac_vlan_list_lock); 535 536 list_for_each_entry(f, &adapter->mac_filter_list, list) { 537 if (f->remove) 538 count++; 539 } 540 if (!count) { 541 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 542 spin_unlock_bh(&adapter->mac_vlan_list_lock); 543 return; 544 } 545 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; 546 547 len = struct_size(veal, list, count); 548 if (len > IAVF_MAX_AQ_BUF_SIZE) { 549 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); 550 count = (IAVF_MAX_AQ_BUF_SIZE - 551 sizeof(struct virtchnl_ether_addr_list)) / 552 sizeof(struct virtchnl_ether_addr); 553 len = struct_size(veal, list, count); 554 more = true; 555 } 556 veal = kzalloc(len, GFP_ATOMIC); 557 if (!veal) { 558 spin_unlock_bh(&adapter->mac_vlan_list_lock); 559 return; 560 } 561 562 veal->vsi_id = adapter->vsi_res->vsi_id; 563 veal->num_elements = count; 564 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 565 if (f->remove) { 566 ether_addr_copy(veal->list[i].addr, f->macaddr); 567 iavf_set_mac_addr_type(&veal->list[i], f); 568 i++; 569 list_del(&f->list); 570 kfree(f); 571 if (i == count) 572 break; 573 } 574 } 575 if (!more) 576 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 577 578 spin_unlock_bh(&adapter->mac_vlan_list_lock); 579 580 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); 581 kfree(veal); 582 } 583 584 /** 585 * iavf_mac_add_ok 586 * @adapter: adapter structure 587 * 588 * Submit list of filters based on PF response. 589 **/ 590 static void iavf_mac_add_ok(struct iavf_adapter *adapter) 591 { 592 struct iavf_mac_filter *f, *ftmp; 593 594 spin_lock_bh(&adapter->mac_vlan_list_lock); 595 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 596 f->is_new_mac = false; 597 if (!f->add && !f->add_handled) 598 f->add_handled = true; 599 } 600 spin_unlock_bh(&adapter->mac_vlan_list_lock); 601 } 602 603 /** 604 * iavf_mac_add_reject 605 * @adapter: adapter structure 606 * 607 * Remove filters from list based on PF response. 608 **/ 609 static void iavf_mac_add_reject(struct iavf_adapter *adapter) 610 { 611 struct net_device *netdev = adapter->netdev; 612 struct iavf_mac_filter *f, *ftmp; 613 614 spin_lock_bh(&adapter->mac_vlan_list_lock); 615 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 616 if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) 617 f->remove = false; 618 619 if (!f->add && !f->add_handled) 620 f->add_handled = true; 621 622 if (f->is_new_mac) { 623 list_del(&f->list); 624 kfree(f); 625 } 626 } 627 spin_unlock_bh(&adapter->mac_vlan_list_lock); 628 } 629 630 /** 631 * iavf_add_vlans 632 * @adapter: adapter structure 633 * 634 * Request that the PF add one or more VLAN filters to our VSI. 635 **/ 636 void iavf_add_vlans(struct iavf_adapter *adapter) 637 { 638 int len, i = 0, count = 0; 639 struct iavf_vlan_filter *f; 640 bool more = false; 641 642 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 643 /* bail because we already have a command pending */ 644 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", 645 adapter->current_op); 646 return; 647 } 648 649 spin_lock_bh(&adapter->mac_vlan_list_lock); 650 651 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 652 if (f->add) 653 count++; 654 } 655 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 656 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 657 spin_unlock_bh(&adapter->mac_vlan_list_lock); 658 return; 659 } 660 661 if (VLAN_ALLOWED(adapter)) { 662 struct virtchnl_vlan_filter_list *vvfl; 663 664 adapter->current_op = VIRTCHNL_OP_ADD_VLAN; 665 666 len = sizeof(*vvfl) + (count * sizeof(u16)); 667 if (len > IAVF_MAX_AQ_BUF_SIZE) { 668 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 669 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / 670 sizeof(u16); 671 len = sizeof(*vvfl) + (count * sizeof(u16)); 672 more = true; 673 } 674 vvfl = kzalloc(len, GFP_ATOMIC); 675 if (!vvfl) { 676 spin_unlock_bh(&adapter->mac_vlan_list_lock); 677 return; 678 } 679 680 vvfl->vsi_id = adapter->vsi_res->vsi_id; 681 vvfl->num_elements = count; 682 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 683 if (f->add) { 684 vvfl->vlan_id[i] = f->vlan.vid; 685 i++; 686 f->add = false; 687 if (i == count) 688 break; 689 } 690 } 691 if (!more) 692 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 693 694 spin_unlock_bh(&adapter->mac_vlan_list_lock); 695 696 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 697 kfree(vvfl); 698 } else { 699 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 700 701 adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; 702 703 len = sizeof(*vvfl_v2) + ((count - 1) * 704 sizeof(struct virtchnl_vlan_filter)); 705 if (len > IAVF_MAX_AQ_BUF_SIZE) { 706 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 707 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) / 708 sizeof(struct virtchnl_vlan_filter); 709 len = sizeof(*vvfl_v2) + 710 ((count - 1) * 711 sizeof(struct virtchnl_vlan_filter)); 712 more = true; 713 } 714 715 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 716 if (!vvfl_v2) { 717 spin_unlock_bh(&adapter->mac_vlan_list_lock); 718 return; 719 } 720 721 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 722 vvfl_v2->num_elements = count; 723 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 724 if (f->add) { 725 struct virtchnl_vlan_supported_caps *filtering_support = 726 &adapter->vlan_v2_caps.filtering.filtering_support; 727 struct virtchnl_vlan *vlan; 728 729 /* give priority over outer if it's enabled */ 730 if (filtering_support->outer) 731 vlan = &vvfl_v2->filters[i].outer; 732 else 733 vlan = &vvfl_v2->filters[i].inner; 734 735 vlan->tci = f->vlan.vid; 736 vlan->tpid = f->vlan.tpid; 737 738 i++; 739 f->add = false; 740 if (i == count) 741 break; 742 } 743 } 744 745 if (!more) 746 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 747 748 spin_unlock_bh(&adapter->mac_vlan_list_lock); 749 750 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, 751 (u8 *)vvfl_v2, len); 752 kfree(vvfl_v2); 753 } 754 } 755 756 /** 757 * iavf_del_vlans 758 * @adapter: adapter structure 759 * 760 * Request that the PF remove one or more VLAN filters from our VSI. 761 **/ 762 void iavf_del_vlans(struct iavf_adapter *adapter) 763 { 764 struct iavf_vlan_filter *f, *ftmp; 765 int len, i = 0, count = 0; 766 bool more = false; 767 768 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 769 /* bail because we already have a command pending */ 770 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", 771 adapter->current_op); 772 return; 773 } 774 775 spin_lock_bh(&adapter->mac_vlan_list_lock); 776 777 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 778 /* since VLAN capabilities are not allowed, we dont want to send 779 * a VLAN delete request because it will most likely fail and 780 * create unnecessary errors/noise, so just free the VLAN 781 * filters marked for removal to enable bailing out before 782 * sending a virtchnl message 783 */ 784 if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) { 785 list_del(&f->list); 786 kfree(f); 787 } else if (f->remove) { 788 count++; 789 } 790 } 791 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 792 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 793 spin_unlock_bh(&adapter->mac_vlan_list_lock); 794 return; 795 } 796 797 if (VLAN_ALLOWED(adapter)) { 798 struct virtchnl_vlan_filter_list *vvfl; 799 800 adapter->current_op = VIRTCHNL_OP_DEL_VLAN; 801 802 len = sizeof(*vvfl) + (count * sizeof(u16)); 803 if (len > IAVF_MAX_AQ_BUF_SIZE) { 804 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); 805 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / 806 sizeof(u16); 807 len = sizeof(*vvfl) + (count * sizeof(u16)); 808 more = true; 809 } 810 vvfl = kzalloc(len, GFP_ATOMIC); 811 if (!vvfl) { 812 spin_unlock_bh(&adapter->mac_vlan_list_lock); 813 return; 814 } 815 816 vvfl->vsi_id = adapter->vsi_res->vsi_id; 817 vvfl->num_elements = count; 818 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 819 if (f->remove) { 820 vvfl->vlan_id[i] = f->vlan.vid; 821 i++; 822 list_del(&f->list); 823 kfree(f); 824 if (i == count) 825 break; 826 } 827 } 828 829 if (!more) 830 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 831 832 spin_unlock_bh(&adapter->mac_vlan_list_lock); 833 834 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 835 kfree(vvfl); 836 } else { 837 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 838 839 adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; 840 841 len = sizeof(*vvfl_v2) + 842 ((count - 1) * sizeof(struct virtchnl_vlan_filter)); 843 if (len > IAVF_MAX_AQ_BUF_SIZE) { 844 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 845 count = (IAVF_MAX_AQ_BUF_SIZE - 846 sizeof(*vvfl_v2)) / 847 sizeof(struct virtchnl_vlan_filter); 848 len = sizeof(*vvfl_v2) + 849 ((count - 1) * 850 sizeof(struct virtchnl_vlan_filter)); 851 more = true; 852 } 853 854 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 855 if (!vvfl_v2) { 856 spin_unlock_bh(&adapter->mac_vlan_list_lock); 857 return; 858 } 859 860 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 861 vvfl_v2->num_elements = count; 862 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 863 if (f->remove) { 864 struct virtchnl_vlan_supported_caps *filtering_support = 865 &adapter->vlan_v2_caps.filtering.filtering_support; 866 struct virtchnl_vlan *vlan; 867 868 /* give priority over outer if it's enabled */ 869 if (filtering_support->outer) 870 vlan = &vvfl_v2->filters[i].outer; 871 else 872 vlan = &vvfl_v2->filters[i].inner; 873 874 vlan->tci = f->vlan.vid; 875 vlan->tpid = f->vlan.tpid; 876 877 list_del(&f->list); 878 kfree(f); 879 i++; 880 if (i == count) 881 break; 882 } 883 } 884 885 if (!more) 886 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 887 888 spin_unlock_bh(&adapter->mac_vlan_list_lock); 889 890 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, 891 (u8 *)vvfl_v2, len); 892 kfree(vvfl_v2); 893 } 894 } 895 896 /** 897 * iavf_set_promiscuous 898 * @adapter: adapter structure 899 * @flags: bitmask to control unicast/multicast promiscuous. 900 * 901 * Request that the PF enable promiscuous mode for our VSI. 902 **/ 903 void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) 904 { 905 struct virtchnl_promisc_info vpi; 906 int promisc_all; 907 908 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 909 /* bail because we already have a command pending */ 910 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", 911 adapter->current_op); 912 return; 913 } 914 915 promisc_all = FLAG_VF_UNICAST_PROMISC | 916 FLAG_VF_MULTICAST_PROMISC; 917 if ((flags & promisc_all) == promisc_all) { 918 adapter->flags |= IAVF_FLAG_PROMISC_ON; 919 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; 920 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 921 } 922 923 if (flags & FLAG_VF_MULTICAST_PROMISC) { 924 adapter->flags |= IAVF_FLAG_ALLMULTI_ON; 925 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; 926 dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", 927 adapter->netdev->name); 928 } 929 930 if (!flags) { 931 if (adapter->flags & IAVF_FLAG_PROMISC_ON) { 932 adapter->flags &= ~IAVF_FLAG_PROMISC_ON; 933 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; 934 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); 935 } 936 937 if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { 938 adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; 939 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; 940 dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", 941 adapter->netdev->name); 942 } 943 } 944 945 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; 946 vpi.vsi_id = adapter->vsi_res->vsi_id; 947 vpi.flags = flags; 948 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 949 (u8 *)&vpi, sizeof(vpi)); 950 } 951 952 /** 953 * iavf_request_stats 954 * @adapter: adapter structure 955 * 956 * Request VSI statistics from PF. 957 **/ 958 void iavf_request_stats(struct iavf_adapter *adapter) 959 { 960 struct virtchnl_queue_select vqs; 961 962 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 963 /* no error message, this isn't crucial */ 964 return; 965 } 966 967 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; 968 adapter->current_op = VIRTCHNL_OP_GET_STATS; 969 vqs.vsi_id = adapter->vsi_res->vsi_id; 970 /* queue maps are ignored for this message - only the vsi is used */ 971 if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, 972 sizeof(vqs))) 973 /* if the request failed, don't lock out others */ 974 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 975 } 976 977 /** 978 * iavf_get_hena 979 * @adapter: adapter structure 980 * 981 * Request hash enable capabilities from PF 982 **/ 983 void iavf_get_hena(struct iavf_adapter *adapter) 984 { 985 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 986 /* bail because we already have a command pending */ 987 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", 988 adapter->current_op); 989 return; 990 } 991 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; 992 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; 993 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); 994 } 995 996 /** 997 * iavf_set_hena 998 * @adapter: adapter structure 999 * 1000 * Request the PF to set our RSS hash capabilities 1001 **/ 1002 void iavf_set_hena(struct iavf_adapter *adapter) 1003 { 1004 struct virtchnl_rss_hena vrh; 1005 1006 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1007 /* bail because we already have a command pending */ 1008 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", 1009 adapter->current_op); 1010 return; 1011 } 1012 vrh.hena = adapter->hena; 1013 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; 1014 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; 1015 iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, 1016 sizeof(vrh)); 1017 } 1018 1019 /** 1020 * iavf_set_rss_key 1021 * @adapter: adapter structure 1022 * 1023 * Request the PF to set our RSS hash key 1024 **/ 1025 void iavf_set_rss_key(struct iavf_adapter *adapter) 1026 { 1027 struct virtchnl_rss_key *vrk; 1028 int len; 1029 1030 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1031 /* bail because we already have a command pending */ 1032 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", 1033 adapter->current_op); 1034 return; 1035 } 1036 len = sizeof(struct virtchnl_rss_key) + 1037 (adapter->rss_key_size * sizeof(u8)) - 1; 1038 vrk = kzalloc(len, GFP_KERNEL); 1039 if (!vrk) 1040 return; 1041 vrk->vsi_id = adapter->vsi.id; 1042 vrk->key_len = adapter->rss_key_size; 1043 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); 1044 1045 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; 1046 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; 1047 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); 1048 kfree(vrk); 1049 } 1050 1051 /** 1052 * iavf_set_rss_lut 1053 * @adapter: adapter structure 1054 * 1055 * Request the PF to set our RSS lookup table 1056 **/ 1057 void iavf_set_rss_lut(struct iavf_adapter *adapter) 1058 { 1059 struct virtchnl_rss_lut *vrl; 1060 int len; 1061 1062 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1063 /* bail because we already have a command pending */ 1064 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", 1065 adapter->current_op); 1066 return; 1067 } 1068 len = sizeof(struct virtchnl_rss_lut) + 1069 (adapter->rss_lut_size * sizeof(u8)) - 1; 1070 vrl = kzalloc(len, GFP_KERNEL); 1071 if (!vrl) 1072 return; 1073 vrl->vsi_id = adapter->vsi.id; 1074 vrl->lut_entries = adapter->rss_lut_size; 1075 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); 1076 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; 1077 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; 1078 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); 1079 kfree(vrl); 1080 } 1081 1082 /** 1083 * iavf_enable_vlan_stripping 1084 * @adapter: adapter structure 1085 * 1086 * Request VLAN header stripping to be enabled 1087 **/ 1088 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) 1089 { 1090 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1091 /* bail because we already have a command pending */ 1092 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", 1093 adapter->current_op); 1094 return; 1095 } 1096 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; 1097 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 1098 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); 1099 } 1100 1101 /** 1102 * iavf_disable_vlan_stripping 1103 * @adapter: adapter structure 1104 * 1105 * Request VLAN header stripping to be disabled 1106 **/ 1107 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) 1108 { 1109 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1110 /* bail because we already have a command pending */ 1111 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", 1112 adapter->current_op); 1113 return; 1114 } 1115 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; 1116 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 1117 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); 1118 } 1119 1120 /** 1121 * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype 1122 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1123 */ 1124 static u32 iavf_tpid_to_vc_ethertype(u16 tpid) 1125 { 1126 switch (tpid) { 1127 case ETH_P_8021Q: 1128 return VIRTCHNL_VLAN_ETHERTYPE_8100; 1129 case ETH_P_8021AD: 1130 return VIRTCHNL_VLAN_ETHERTYPE_88A8; 1131 } 1132 1133 return 0; 1134 } 1135 1136 /** 1137 * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message 1138 * @adapter: adapter structure 1139 * @msg: message structure used for updating offloads over virtchnl to update 1140 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1141 * @offload_op: opcode used to determine which support structure to check 1142 */ 1143 static int 1144 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, 1145 struct virtchnl_vlan_setting *msg, u16 tpid, 1146 enum virtchnl_ops offload_op) 1147 { 1148 struct virtchnl_vlan_supported_caps *offload_support; 1149 u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); 1150 1151 /* reference the correct offload support structure */ 1152 switch (offload_op) { 1153 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1154 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1155 offload_support = 1156 &adapter->vlan_v2_caps.offloads.stripping_support; 1157 break; 1158 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1159 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1160 offload_support = 1161 &adapter->vlan_v2_caps.offloads.insertion_support; 1162 break; 1163 default: 1164 dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", 1165 offload_op); 1166 return -EINVAL; 1167 } 1168 1169 /* make sure ethertype is supported */ 1170 if (offload_support->outer & vc_ethertype && 1171 offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { 1172 msg->outer_ethertype_setting = vc_ethertype; 1173 } else if (offload_support->inner & vc_ethertype && 1174 offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { 1175 msg->inner_ethertype_setting = vc_ethertype; 1176 } else { 1177 dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", 1178 offload_op, tpid); 1179 return -EINVAL; 1180 } 1181 1182 return 0; 1183 } 1184 1185 /** 1186 * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request 1187 * @adapter: adapter structure 1188 * @tpid: VLAN TPID 1189 * @offload_op: opcode used to determine which AQ required bit to clear 1190 */ 1191 static void 1192 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, 1193 enum virtchnl_ops offload_op) 1194 { 1195 switch (offload_op) { 1196 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1197 if (tpid == ETH_P_8021Q) 1198 adapter->aq_required &= 1199 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; 1200 else if (tpid == ETH_P_8021AD) 1201 adapter->aq_required &= 1202 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; 1203 break; 1204 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1205 if (tpid == ETH_P_8021Q) 1206 adapter->aq_required &= 1207 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; 1208 else if (tpid == ETH_P_8021AD) 1209 adapter->aq_required &= 1210 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; 1211 break; 1212 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1213 if (tpid == ETH_P_8021Q) 1214 adapter->aq_required &= 1215 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; 1216 else if (tpid == ETH_P_8021AD) 1217 adapter->aq_required &= 1218 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; 1219 break; 1220 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1221 if (tpid == ETH_P_8021Q) 1222 adapter->aq_required &= 1223 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; 1224 else if (tpid == ETH_P_8021AD) 1225 adapter->aq_required &= 1226 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; 1227 break; 1228 default: 1229 dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", 1230 offload_op); 1231 } 1232 } 1233 1234 /** 1235 * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl 1236 * @adapter: adapter structure 1237 * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) 1238 * @offload_op: offload_op used to make the request over virtchnl 1239 */ 1240 static void 1241 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, 1242 enum virtchnl_ops offload_op) 1243 { 1244 struct virtchnl_vlan_setting *msg; 1245 int len = sizeof(*msg); 1246 1247 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1248 /* bail because we already have a command pending */ 1249 dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", 1250 offload_op, adapter->current_op); 1251 return; 1252 } 1253 1254 adapter->current_op = offload_op; 1255 1256 msg = kzalloc(len, GFP_KERNEL); 1257 if (!msg) 1258 return; 1259 1260 msg->vport_id = adapter->vsi_res->vsi_id; 1261 1262 /* always clear to prevent unsupported and endless requests */ 1263 iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); 1264 1265 /* only send valid offload requests */ 1266 if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) 1267 iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); 1268 else 1269 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1270 1271 kfree(msg); 1272 } 1273 1274 /** 1275 * iavf_enable_vlan_stripping_v2 - enable VLAN stripping 1276 * @adapter: adapter structure 1277 * @tpid: VLAN TPID used to enable VLAN stripping 1278 */ 1279 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1280 { 1281 iavf_send_vlan_offload_v2(adapter, tpid, 1282 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); 1283 } 1284 1285 /** 1286 * iavf_disable_vlan_stripping_v2 - disable VLAN stripping 1287 * @adapter: adapter structure 1288 * @tpid: VLAN TPID used to disable VLAN stripping 1289 */ 1290 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1291 { 1292 iavf_send_vlan_offload_v2(adapter, tpid, 1293 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); 1294 } 1295 1296 /** 1297 * iavf_enable_vlan_insertion_v2 - enable VLAN insertion 1298 * @adapter: adapter structure 1299 * @tpid: VLAN TPID used to enable VLAN insertion 1300 */ 1301 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1302 { 1303 iavf_send_vlan_offload_v2(adapter, tpid, 1304 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); 1305 } 1306 1307 /** 1308 * iavf_disable_vlan_insertion_v2 - disable VLAN insertion 1309 * @adapter: adapter structure 1310 * @tpid: VLAN TPID used to disable VLAN insertion 1311 */ 1312 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1313 { 1314 iavf_send_vlan_offload_v2(adapter, tpid, 1315 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); 1316 } 1317 1318 #define IAVF_MAX_SPEED_STRLEN 13 1319 1320 /** 1321 * iavf_print_link_message - print link up or down 1322 * @adapter: adapter structure 1323 * 1324 * Log a message telling the world of our wonderous link status 1325 */ 1326 static void iavf_print_link_message(struct iavf_adapter *adapter) 1327 { 1328 struct net_device *netdev = adapter->netdev; 1329 int link_speed_mbps; 1330 char *speed; 1331 1332 if (!adapter->link_up) { 1333 netdev_info(netdev, "NIC Link is Down\n"); 1334 return; 1335 } 1336 1337 speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); 1338 if (!speed) 1339 return; 1340 1341 if (ADV_LINK_SUPPORT(adapter)) { 1342 link_speed_mbps = adapter->link_speed_mbps; 1343 goto print_link_msg; 1344 } 1345 1346 switch (adapter->link_speed) { 1347 case VIRTCHNL_LINK_SPEED_40GB: 1348 link_speed_mbps = SPEED_40000; 1349 break; 1350 case VIRTCHNL_LINK_SPEED_25GB: 1351 link_speed_mbps = SPEED_25000; 1352 break; 1353 case VIRTCHNL_LINK_SPEED_20GB: 1354 link_speed_mbps = SPEED_20000; 1355 break; 1356 case VIRTCHNL_LINK_SPEED_10GB: 1357 link_speed_mbps = SPEED_10000; 1358 break; 1359 case VIRTCHNL_LINK_SPEED_5GB: 1360 link_speed_mbps = SPEED_5000; 1361 break; 1362 case VIRTCHNL_LINK_SPEED_2_5GB: 1363 link_speed_mbps = SPEED_2500; 1364 break; 1365 case VIRTCHNL_LINK_SPEED_1GB: 1366 link_speed_mbps = SPEED_1000; 1367 break; 1368 case VIRTCHNL_LINK_SPEED_100MB: 1369 link_speed_mbps = SPEED_100; 1370 break; 1371 default: 1372 link_speed_mbps = SPEED_UNKNOWN; 1373 break; 1374 } 1375 1376 print_link_msg: 1377 if (link_speed_mbps > SPEED_1000) { 1378 if (link_speed_mbps == SPEED_2500) 1379 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); 1380 else 1381 /* convert to Gbps inline */ 1382 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1383 link_speed_mbps / 1000, "Gbps"); 1384 } else if (link_speed_mbps == SPEED_UNKNOWN) { 1385 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); 1386 } else { 1387 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1388 link_speed_mbps, "Mbps"); 1389 } 1390 1391 netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); 1392 kfree(speed); 1393 } 1394 1395 /** 1396 * iavf_get_vpe_link_status 1397 * @adapter: adapter structure 1398 * @vpe: virtchnl_pf_event structure 1399 * 1400 * Helper function for determining the link status 1401 **/ 1402 static bool 1403 iavf_get_vpe_link_status(struct iavf_adapter *adapter, 1404 struct virtchnl_pf_event *vpe) 1405 { 1406 if (ADV_LINK_SUPPORT(adapter)) 1407 return vpe->event_data.link_event_adv.link_status; 1408 else 1409 return vpe->event_data.link_event.link_status; 1410 } 1411 1412 /** 1413 * iavf_set_adapter_link_speed_from_vpe 1414 * @adapter: adapter structure for which we are setting the link speed 1415 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting 1416 * 1417 * Helper function for setting iavf_adapter link speed 1418 **/ 1419 static void 1420 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, 1421 struct virtchnl_pf_event *vpe) 1422 { 1423 if (ADV_LINK_SUPPORT(adapter)) 1424 adapter->link_speed_mbps = 1425 vpe->event_data.link_event_adv.link_speed; 1426 else 1427 adapter->link_speed = vpe->event_data.link_event.link_speed; 1428 } 1429 1430 /** 1431 * iavf_enable_channels 1432 * @adapter: adapter structure 1433 * 1434 * Request that the PF enable channels as specified by 1435 * the user via tc tool. 1436 **/ 1437 void iavf_enable_channels(struct iavf_adapter *adapter) 1438 { 1439 struct virtchnl_tc_info *vti = NULL; 1440 size_t len; 1441 int i; 1442 1443 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1444 /* bail because we already have a command pending */ 1445 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1446 adapter->current_op); 1447 return; 1448 } 1449 1450 len = struct_size(vti, list, adapter->num_tc - 1); 1451 vti = kzalloc(len, GFP_KERNEL); 1452 if (!vti) 1453 return; 1454 vti->num_tc = adapter->num_tc; 1455 for (i = 0; i < vti->num_tc; i++) { 1456 vti->list[i].count = adapter->ch_config.ch_info[i].count; 1457 vti->list[i].offset = adapter->ch_config.ch_info[i].offset; 1458 vti->list[i].pad = 0; 1459 vti->list[i].max_tx_rate = 1460 adapter->ch_config.ch_info[i].max_tx_rate; 1461 } 1462 1463 adapter->ch_config.state = __IAVF_TC_RUNNING; 1464 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1465 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; 1466 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; 1467 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); 1468 kfree(vti); 1469 } 1470 1471 /** 1472 * iavf_disable_channels 1473 * @adapter: adapter structure 1474 * 1475 * Request that the PF disable channels that are configured 1476 **/ 1477 void iavf_disable_channels(struct iavf_adapter *adapter) 1478 { 1479 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1480 /* bail because we already have a command pending */ 1481 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1482 adapter->current_op); 1483 return; 1484 } 1485 1486 adapter->ch_config.state = __IAVF_TC_INVALID; 1487 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1488 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; 1489 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; 1490 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); 1491 } 1492 1493 /** 1494 * iavf_print_cloud_filter 1495 * @adapter: adapter structure 1496 * @f: cloud filter to print 1497 * 1498 * Print the cloud filter 1499 **/ 1500 static void iavf_print_cloud_filter(struct iavf_adapter *adapter, 1501 struct virtchnl_filter *f) 1502 { 1503 switch (f->flow_type) { 1504 case VIRTCHNL_TCP_V4_FLOW: 1505 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", 1506 &f->data.tcp_spec.dst_mac, 1507 &f->data.tcp_spec.src_mac, 1508 ntohs(f->data.tcp_spec.vlan_id), 1509 &f->data.tcp_spec.dst_ip[0], 1510 &f->data.tcp_spec.src_ip[0], 1511 ntohs(f->data.tcp_spec.dst_port), 1512 ntohs(f->data.tcp_spec.src_port)); 1513 break; 1514 case VIRTCHNL_TCP_V6_FLOW: 1515 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", 1516 &f->data.tcp_spec.dst_mac, 1517 &f->data.tcp_spec.src_mac, 1518 ntohs(f->data.tcp_spec.vlan_id), 1519 &f->data.tcp_spec.dst_ip, 1520 &f->data.tcp_spec.src_ip, 1521 ntohs(f->data.tcp_spec.dst_port), 1522 ntohs(f->data.tcp_spec.src_port)); 1523 break; 1524 } 1525 } 1526 1527 /** 1528 * iavf_add_cloud_filter 1529 * @adapter: adapter structure 1530 * 1531 * Request that the PF add cloud filters as specified 1532 * by the user via tc tool. 1533 **/ 1534 void iavf_add_cloud_filter(struct iavf_adapter *adapter) 1535 { 1536 struct iavf_cloud_filter *cf; 1537 struct virtchnl_filter *f; 1538 int len = 0, count = 0; 1539 1540 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1541 /* bail because we already have a command pending */ 1542 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", 1543 adapter->current_op); 1544 return; 1545 } 1546 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1547 if (cf->add) { 1548 count++; 1549 break; 1550 } 1551 } 1552 if (!count) { 1553 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 1554 return; 1555 } 1556 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; 1557 1558 len = sizeof(struct virtchnl_filter); 1559 f = kzalloc(len, GFP_KERNEL); 1560 if (!f) 1561 return; 1562 1563 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1564 if (cf->add) { 1565 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1566 cf->add = false; 1567 cf->state = __IAVF_CF_ADD_PENDING; 1568 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, 1569 (u8 *)f, len); 1570 } 1571 } 1572 kfree(f); 1573 } 1574 1575 /** 1576 * iavf_del_cloud_filter 1577 * @adapter: adapter structure 1578 * 1579 * Request that the PF delete cloud filters as specified 1580 * by the user via tc tool. 1581 **/ 1582 void iavf_del_cloud_filter(struct iavf_adapter *adapter) 1583 { 1584 struct iavf_cloud_filter *cf, *cftmp; 1585 struct virtchnl_filter *f; 1586 int len = 0, count = 0; 1587 1588 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1589 /* bail because we already have a command pending */ 1590 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", 1591 adapter->current_op); 1592 return; 1593 } 1594 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1595 if (cf->del) { 1596 count++; 1597 break; 1598 } 1599 } 1600 if (!count) { 1601 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1602 return; 1603 } 1604 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; 1605 1606 len = sizeof(struct virtchnl_filter); 1607 f = kzalloc(len, GFP_KERNEL); 1608 if (!f) 1609 return; 1610 1611 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 1612 if (cf->del) { 1613 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1614 cf->del = false; 1615 cf->state = __IAVF_CF_DEL_PENDING; 1616 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, 1617 (u8 *)f, len); 1618 } 1619 } 1620 kfree(f); 1621 } 1622 1623 /** 1624 * iavf_add_fdir_filter 1625 * @adapter: the VF adapter structure 1626 * 1627 * Request that the PF add Flow Director filters as specified 1628 * by the user via ethtool. 1629 **/ 1630 void iavf_add_fdir_filter(struct iavf_adapter *adapter) 1631 { 1632 struct iavf_fdir_fltr *fdir; 1633 struct virtchnl_fdir_add *f; 1634 bool process_fltr = false; 1635 int len; 1636 1637 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1638 /* bail because we already have a command pending */ 1639 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", 1640 adapter->current_op); 1641 return; 1642 } 1643 1644 len = sizeof(struct virtchnl_fdir_add); 1645 f = kzalloc(len, GFP_KERNEL); 1646 if (!f) 1647 return; 1648 1649 spin_lock_bh(&adapter->fdir_fltr_lock); 1650 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1651 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { 1652 process_fltr = true; 1653 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; 1654 memcpy(f, &fdir->vc_add_msg, len); 1655 break; 1656 } 1657 } 1658 spin_unlock_bh(&adapter->fdir_fltr_lock); 1659 1660 if (!process_fltr) { 1661 /* prevent iavf_add_fdir_filter() from being called when there 1662 * are no filters to add 1663 */ 1664 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; 1665 kfree(f); 1666 return; 1667 } 1668 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; 1669 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); 1670 kfree(f); 1671 } 1672 1673 /** 1674 * iavf_del_fdir_filter 1675 * @adapter: the VF adapter structure 1676 * 1677 * Request that the PF delete Flow Director filters as specified 1678 * by the user via ethtool. 1679 **/ 1680 void iavf_del_fdir_filter(struct iavf_adapter *adapter) 1681 { 1682 struct iavf_fdir_fltr *fdir; 1683 struct virtchnl_fdir_del f; 1684 bool process_fltr = false; 1685 int len; 1686 1687 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1688 /* bail because we already have a command pending */ 1689 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", 1690 adapter->current_op); 1691 return; 1692 } 1693 1694 len = sizeof(struct virtchnl_fdir_del); 1695 1696 spin_lock_bh(&adapter->fdir_fltr_lock); 1697 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1698 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { 1699 process_fltr = true; 1700 memset(&f, 0, len); 1701 f.vsi_id = fdir->vc_add_msg.vsi_id; 1702 f.flow_id = fdir->flow_id; 1703 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; 1704 break; 1705 } 1706 } 1707 spin_unlock_bh(&adapter->fdir_fltr_lock); 1708 1709 if (!process_fltr) { 1710 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1711 return; 1712 } 1713 1714 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; 1715 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); 1716 } 1717 1718 /** 1719 * iavf_add_adv_rss_cfg 1720 * @adapter: the VF adapter structure 1721 * 1722 * Request that the PF add RSS configuration as specified 1723 * by the user via ethtool. 1724 **/ 1725 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) 1726 { 1727 struct virtchnl_rss_cfg *rss_cfg; 1728 struct iavf_adv_rss *rss; 1729 bool process_rss = false; 1730 int len; 1731 1732 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1733 /* bail because we already have a command pending */ 1734 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", 1735 adapter->current_op); 1736 return; 1737 } 1738 1739 len = sizeof(struct virtchnl_rss_cfg); 1740 rss_cfg = kzalloc(len, GFP_KERNEL); 1741 if (!rss_cfg) 1742 return; 1743 1744 spin_lock_bh(&adapter->adv_rss_lock); 1745 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1746 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { 1747 process_rss = true; 1748 rss->state = IAVF_ADV_RSS_ADD_PENDING; 1749 memcpy(rss_cfg, &rss->cfg_msg, len); 1750 iavf_print_adv_rss_cfg(adapter, rss, 1751 "Input set change for", 1752 "is pending"); 1753 break; 1754 } 1755 } 1756 spin_unlock_bh(&adapter->adv_rss_lock); 1757 1758 if (process_rss) { 1759 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; 1760 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, 1761 (u8 *)rss_cfg, len); 1762 } else { 1763 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; 1764 } 1765 1766 kfree(rss_cfg); 1767 } 1768 1769 /** 1770 * iavf_del_adv_rss_cfg 1771 * @adapter: the VF adapter structure 1772 * 1773 * Request that the PF delete RSS configuration as specified 1774 * by the user via ethtool. 1775 **/ 1776 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) 1777 { 1778 struct virtchnl_rss_cfg *rss_cfg; 1779 struct iavf_adv_rss *rss; 1780 bool process_rss = false; 1781 int len; 1782 1783 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1784 /* bail because we already have a command pending */ 1785 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", 1786 adapter->current_op); 1787 return; 1788 } 1789 1790 len = sizeof(struct virtchnl_rss_cfg); 1791 rss_cfg = kzalloc(len, GFP_KERNEL); 1792 if (!rss_cfg) 1793 return; 1794 1795 spin_lock_bh(&adapter->adv_rss_lock); 1796 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1797 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { 1798 process_rss = true; 1799 rss->state = IAVF_ADV_RSS_DEL_PENDING; 1800 memcpy(rss_cfg, &rss->cfg_msg, len); 1801 break; 1802 } 1803 } 1804 spin_unlock_bh(&adapter->adv_rss_lock); 1805 1806 if (process_rss) { 1807 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; 1808 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, 1809 (u8 *)rss_cfg, len); 1810 } else { 1811 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1812 } 1813 1814 kfree(rss_cfg); 1815 } 1816 1817 /** 1818 * iavf_request_reset 1819 * @adapter: adapter structure 1820 * 1821 * Request that the PF reset this VF. No response is expected. 1822 **/ 1823 int iavf_request_reset(struct iavf_adapter *adapter) 1824 { 1825 int err; 1826 /* Don't check CURRENT_OP - this is always higher priority */ 1827 err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); 1828 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1829 return err; 1830 } 1831 1832 /** 1833 * iavf_netdev_features_vlan_strip_set - update vlan strip status 1834 * @netdev: ptr to netdev being adjusted 1835 * @enable: enable or disable vlan strip 1836 * 1837 * Helper function to change vlan strip status in netdev->features. 1838 */ 1839 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, 1840 const bool enable) 1841 { 1842 if (enable) 1843 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1844 else 1845 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 1846 } 1847 1848 /** 1849 * iavf_virtchnl_completion 1850 * @adapter: adapter structure 1851 * @v_opcode: opcode sent by PF 1852 * @v_retval: retval sent by PF 1853 * @msg: message sent by PF 1854 * @msglen: message length 1855 * 1856 * Asynchronous completion function for admin queue messages. Rather than busy 1857 * wait, we fire off our requests and assume that no errors will be returned. 1858 * This function handles the reply messages. 1859 **/ 1860 void iavf_virtchnl_completion(struct iavf_adapter *adapter, 1861 enum virtchnl_ops v_opcode, 1862 enum iavf_status v_retval, u8 *msg, u16 msglen) 1863 { 1864 struct net_device *netdev = adapter->netdev; 1865 1866 if (v_opcode == VIRTCHNL_OP_EVENT) { 1867 struct virtchnl_pf_event *vpe = 1868 (struct virtchnl_pf_event *)msg; 1869 bool link_up = iavf_get_vpe_link_status(adapter, vpe); 1870 1871 switch (vpe->event) { 1872 case VIRTCHNL_EVENT_LINK_CHANGE: 1873 iavf_set_adapter_link_speed_from_vpe(adapter, vpe); 1874 1875 /* we've already got the right link status, bail */ 1876 if (adapter->link_up == link_up) 1877 break; 1878 1879 if (link_up) { 1880 /* If we get link up message and start queues 1881 * before our queues are configured it will 1882 * trigger a TX hang. In that case, just ignore 1883 * the link status message,we'll get another one 1884 * after we enable queues and actually prepared 1885 * to send traffic. 1886 */ 1887 if (adapter->state != __IAVF_RUNNING) 1888 break; 1889 1890 /* For ADq enabled VF, we reconfigure VSIs and 1891 * re-allocate queues. Hence wait till all 1892 * queues are enabled. 1893 */ 1894 if (adapter->flags & 1895 IAVF_FLAG_QUEUES_DISABLED) 1896 break; 1897 } 1898 1899 adapter->link_up = link_up; 1900 if (link_up) { 1901 netif_tx_start_all_queues(netdev); 1902 netif_carrier_on(netdev); 1903 } else { 1904 netif_tx_stop_all_queues(netdev); 1905 netif_carrier_off(netdev); 1906 } 1907 iavf_print_link_message(adapter); 1908 break; 1909 case VIRTCHNL_EVENT_RESET_IMPENDING: 1910 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); 1911 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { 1912 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1913 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 1914 queue_work(iavf_wq, &adapter->reset_task); 1915 } 1916 break; 1917 default: 1918 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", 1919 vpe->event); 1920 break; 1921 } 1922 return; 1923 } 1924 if (v_retval) { 1925 switch (v_opcode) { 1926 case VIRTCHNL_OP_ADD_VLAN: 1927 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 1928 iavf_stat_str(&adapter->hw, v_retval)); 1929 break; 1930 case VIRTCHNL_OP_ADD_ETH_ADDR: 1931 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", 1932 iavf_stat_str(&adapter->hw, v_retval)); 1933 iavf_mac_add_reject(adapter); 1934 /* restore administratively set MAC address */ 1935 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1936 wake_up(&adapter->vc_waitqueue); 1937 break; 1938 case VIRTCHNL_OP_DEL_VLAN: 1939 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", 1940 iavf_stat_str(&adapter->hw, v_retval)); 1941 break; 1942 case VIRTCHNL_OP_DEL_ETH_ADDR: 1943 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", 1944 iavf_stat_str(&adapter->hw, v_retval)); 1945 break; 1946 case VIRTCHNL_OP_ENABLE_CHANNELS: 1947 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", 1948 iavf_stat_str(&adapter->hw, v_retval)); 1949 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1950 adapter->ch_config.state = __IAVF_TC_INVALID; 1951 netdev_reset_tc(netdev); 1952 netif_tx_start_all_queues(netdev); 1953 break; 1954 case VIRTCHNL_OP_DISABLE_CHANNELS: 1955 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", 1956 iavf_stat_str(&adapter->hw, v_retval)); 1957 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1958 adapter->ch_config.state = __IAVF_TC_RUNNING; 1959 netif_tx_start_all_queues(netdev); 1960 break; 1961 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 1962 struct iavf_cloud_filter *cf, *cftmp; 1963 1964 list_for_each_entry_safe(cf, cftmp, 1965 &adapter->cloud_filter_list, 1966 list) { 1967 if (cf->state == __IAVF_CF_ADD_PENDING) { 1968 cf->state = __IAVF_CF_INVALID; 1969 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", 1970 iavf_stat_str(&adapter->hw, 1971 v_retval)); 1972 iavf_print_cloud_filter(adapter, 1973 &cf->f); 1974 list_del(&cf->list); 1975 kfree(cf); 1976 adapter->num_cloud_filters--; 1977 } 1978 } 1979 } 1980 break; 1981 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 1982 struct iavf_cloud_filter *cf; 1983 1984 list_for_each_entry(cf, &adapter->cloud_filter_list, 1985 list) { 1986 if (cf->state == __IAVF_CF_DEL_PENDING) { 1987 cf->state = __IAVF_CF_ACTIVE; 1988 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", 1989 iavf_stat_str(&adapter->hw, 1990 v_retval)); 1991 iavf_print_cloud_filter(adapter, 1992 &cf->f); 1993 } 1994 } 1995 } 1996 break; 1997 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 1998 struct iavf_fdir_fltr *fdir, *fdir_tmp; 1999 2000 spin_lock_bh(&adapter->fdir_fltr_lock); 2001 list_for_each_entry_safe(fdir, fdir_tmp, 2002 &adapter->fdir_list_head, 2003 list) { 2004 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2005 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", 2006 iavf_stat_str(&adapter->hw, 2007 v_retval)); 2008 iavf_print_fdir_fltr(adapter, fdir); 2009 if (msglen) 2010 dev_err(&adapter->pdev->dev, 2011 "%s\n", msg); 2012 list_del(&fdir->list); 2013 kfree(fdir); 2014 adapter->fdir_active_fltr--; 2015 } 2016 } 2017 spin_unlock_bh(&adapter->fdir_fltr_lock); 2018 } 2019 break; 2020 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2021 struct iavf_fdir_fltr *fdir; 2022 2023 spin_lock_bh(&adapter->fdir_fltr_lock); 2024 list_for_each_entry(fdir, &adapter->fdir_list_head, 2025 list) { 2026 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2027 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2028 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", 2029 iavf_stat_str(&adapter->hw, 2030 v_retval)); 2031 iavf_print_fdir_fltr(adapter, fdir); 2032 } 2033 } 2034 spin_unlock_bh(&adapter->fdir_fltr_lock); 2035 } 2036 break; 2037 case VIRTCHNL_OP_ADD_RSS_CFG: { 2038 struct iavf_adv_rss *rss, *rss_tmp; 2039 2040 spin_lock_bh(&adapter->adv_rss_lock); 2041 list_for_each_entry_safe(rss, rss_tmp, 2042 &adapter->adv_rss_list_head, 2043 list) { 2044 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2045 iavf_print_adv_rss_cfg(adapter, rss, 2046 "Failed to change the input set for", 2047 NULL); 2048 list_del(&rss->list); 2049 kfree(rss); 2050 } 2051 } 2052 spin_unlock_bh(&adapter->adv_rss_lock); 2053 } 2054 break; 2055 case VIRTCHNL_OP_DEL_RSS_CFG: { 2056 struct iavf_adv_rss *rss; 2057 2058 spin_lock_bh(&adapter->adv_rss_lock); 2059 list_for_each_entry(rss, &adapter->adv_rss_list_head, 2060 list) { 2061 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2062 rss->state = IAVF_ADV_RSS_ACTIVE; 2063 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", 2064 iavf_stat_str(&adapter->hw, 2065 v_retval)); 2066 } 2067 } 2068 spin_unlock_bh(&adapter->adv_rss_lock); 2069 } 2070 break; 2071 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2072 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2073 /* Vlan stripping could not be enabled by ethtool. 2074 * Disable it in netdev->features. 2075 */ 2076 iavf_netdev_features_vlan_strip_set(netdev, false); 2077 break; 2078 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2079 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2080 /* Vlan stripping could not be disabled by ethtool. 2081 * Enable it in netdev->features. 2082 */ 2083 iavf_netdev_features_vlan_strip_set(netdev, true); 2084 break; 2085 default: 2086 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", 2087 v_retval, iavf_stat_str(&adapter->hw, v_retval), 2088 v_opcode); 2089 } 2090 } 2091 switch (v_opcode) { 2092 case VIRTCHNL_OP_ADD_ETH_ADDR: 2093 if (!v_retval) 2094 iavf_mac_add_ok(adapter); 2095 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) 2096 if (!ether_addr_equal(netdev->dev_addr, 2097 adapter->hw.mac.addr)) { 2098 netif_addr_lock_bh(netdev); 2099 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2100 netif_addr_unlock_bh(netdev); 2101 } 2102 wake_up(&adapter->vc_waitqueue); 2103 break; 2104 case VIRTCHNL_OP_GET_STATS: { 2105 struct iavf_eth_stats *stats = 2106 (struct iavf_eth_stats *)msg; 2107 netdev->stats.rx_packets = stats->rx_unicast + 2108 stats->rx_multicast + 2109 stats->rx_broadcast; 2110 netdev->stats.tx_packets = stats->tx_unicast + 2111 stats->tx_multicast + 2112 stats->tx_broadcast; 2113 netdev->stats.rx_bytes = stats->rx_bytes; 2114 netdev->stats.tx_bytes = stats->tx_bytes; 2115 netdev->stats.tx_errors = stats->tx_errors; 2116 netdev->stats.rx_dropped = stats->rx_discards; 2117 netdev->stats.tx_dropped = stats->tx_discards; 2118 adapter->current_stats = *stats; 2119 } 2120 break; 2121 case VIRTCHNL_OP_GET_VF_RESOURCES: { 2122 u16 len = sizeof(struct virtchnl_vf_resource) + 2123 IAVF_MAX_VF_VSI * 2124 sizeof(struct virtchnl_vsi_resource); 2125 memcpy(adapter->vf_res, msg, min(msglen, len)); 2126 iavf_validate_num_queues(adapter); 2127 iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); 2128 if (is_zero_ether_addr(adapter->hw.mac.addr)) { 2129 /* restore current mac address */ 2130 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2131 } else { 2132 netif_addr_lock_bh(netdev); 2133 /* refresh current mac address if changed */ 2134 ether_addr_copy(netdev->perm_addr, 2135 adapter->hw.mac.addr); 2136 netif_addr_unlock_bh(netdev); 2137 } 2138 spin_lock_bh(&adapter->mac_vlan_list_lock); 2139 iavf_add_filter(adapter, adapter->hw.mac.addr); 2140 2141 if (VLAN_ALLOWED(adapter)) { 2142 if (!list_empty(&adapter->vlan_filter_list)) { 2143 struct iavf_vlan_filter *vlf; 2144 2145 /* re-add all VLAN filters over virtchnl */ 2146 list_for_each_entry(vlf, 2147 &adapter->vlan_filter_list, 2148 list) 2149 vlf->add = true; 2150 2151 adapter->aq_required |= 2152 IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2153 } 2154 } 2155 2156 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2157 2158 iavf_parse_vf_resource_msg(adapter); 2159 2160 /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the 2161 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish 2162 * configuration 2163 */ 2164 if (VLAN_V2_ALLOWED(adapter)) 2165 break; 2166 /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 2167 * wasn't successfully negotiated with the PF 2168 */ 2169 } 2170 fallthrough; 2171 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { 2172 struct iavf_mac_filter *f; 2173 bool was_mac_changed; 2174 u64 aq_required = 0; 2175 2176 if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) 2177 memcpy(&adapter->vlan_v2_caps, msg, 2178 min_t(u16, msglen, 2179 sizeof(adapter->vlan_v2_caps))); 2180 2181 iavf_process_config(adapter); 2182 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; 2183 was_mac_changed = !ether_addr_equal(netdev->dev_addr, 2184 adapter->hw.mac.addr); 2185 2186 spin_lock_bh(&adapter->mac_vlan_list_lock); 2187 2188 /* re-add all MAC filters */ 2189 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2190 if (was_mac_changed && 2191 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2192 ether_addr_copy(f->macaddr, 2193 adapter->hw.mac.addr); 2194 2195 f->is_new_mac = true; 2196 f->add = true; 2197 f->add_handled = false; 2198 f->remove = false; 2199 } 2200 2201 /* re-add all VLAN filters */ 2202 if (VLAN_FILTERING_ALLOWED(adapter)) { 2203 struct iavf_vlan_filter *vlf; 2204 2205 if (!list_empty(&adapter->vlan_filter_list)) { 2206 list_for_each_entry(vlf, 2207 &adapter->vlan_filter_list, 2208 list) 2209 vlf->add = true; 2210 2211 aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2212 } 2213 } 2214 2215 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2216 2217 netif_addr_lock_bh(netdev); 2218 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2219 netif_addr_unlock_bh(netdev); 2220 2221 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER | 2222 aq_required; 2223 } 2224 break; 2225 case VIRTCHNL_OP_ENABLE_QUEUES: 2226 /* enable transmits */ 2227 iavf_irq_enable(adapter, true); 2228 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; 2229 break; 2230 case VIRTCHNL_OP_DISABLE_QUEUES: 2231 iavf_free_all_tx_resources(adapter); 2232 iavf_free_all_rx_resources(adapter); 2233 if (adapter->state == __IAVF_DOWN_PENDING) { 2234 iavf_change_state(adapter, __IAVF_DOWN); 2235 wake_up(&adapter->down_waitqueue); 2236 } 2237 break; 2238 case VIRTCHNL_OP_VERSION: 2239 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2240 /* Don't display an error if we get these out of sequence. 2241 * If the firmware needed to get kicked, we'll get these and 2242 * it's no problem. 2243 */ 2244 if (v_opcode != adapter->current_op) 2245 return; 2246 break; 2247 case VIRTCHNL_OP_IWARP: 2248 /* Gobble zero-length replies from the PF. They indicate that 2249 * a previous message was received OK, and the client doesn't 2250 * care about that. 2251 */ 2252 if (msglen && CLIENT_ENABLED(adapter)) 2253 iavf_notify_client_message(&adapter->vsi, msg, msglen); 2254 break; 2255 2256 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2257 adapter->client_pending &= 2258 ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); 2259 break; 2260 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { 2261 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; 2262 2263 if (msglen == sizeof(*vrh)) 2264 adapter->hena = vrh->hena; 2265 else 2266 dev_warn(&adapter->pdev->dev, 2267 "Invalid message %d from PF\n", v_opcode); 2268 } 2269 break; 2270 case VIRTCHNL_OP_REQUEST_QUEUES: { 2271 struct virtchnl_vf_res_request *vfres = 2272 (struct virtchnl_vf_res_request *)msg; 2273 2274 if (vfres->num_queue_pairs != adapter->num_req_queues) { 2275 dev_info(&adapter->pdev->dev, 2276 "Requested %d queues, PF can support %d\n", 2277 adapter->num_req_queues, 2278 vfres->num_queue_pairs); 2279 adapter->num_req_queues = 0; 2280 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2281 } 2282 } 2283 break; 2284 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 2285 struct iavf_cloud_filter *cf; 2286 2287 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2288 if (cf->state == __IAVF_CF_ADD_PENDING) 2289 cf->state = __IAVF_CF_ACTIVE; 2290 } 2291 } 2292 break; 2293 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 2294 struct iavf_cloud_filter *cf, *cftmp; 2295 2296 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2297 list) { 2298 if (cf->state == __IAVF_CF_DEL_PENDING) { 2299 cf->state = __IAVF_CF_INVALID; 2300 list_del(&cf->list); 2301 kfree(cf); 2302 adapter->num_cloud_filters--; 2303 } 2304 } 2305 } 2306 break; 2307 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2308 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; 2309 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2310 2311 spin_lock_bh(&adapter->fdir_fltr_lock); 2312 list_for_each_entry_safe(fdir, fdir_tmp, 2313 &adapter->fdir_list_head, 2314 list) { 2315 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2316 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2317 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", 2318 fdir->loc); 2319 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2320 fdir->flow_id = add_fltr->flow_id; 2321 } else { 2322 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", 2323 add_fltr->status); 2324 iavf_print_fdir_fltr(adapter, fdir); 2325 list_del(&fdir->list); 2326 kfree(fdir); 2327 adapter->fdir_active_fltr--; 2328 } 2329 } 2330 } 2331 spin_unlock_bh(&adapter->fdir_fltr_lock); 2332 } 2333 break; 2334 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2335 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; 2336 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2337 2338 spin_lock_bh(&adapter->fdir_fltr_lock); 2339 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, 2340 list) { 2341 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2342 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2343 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", 2344 fdir->loc); 2345 list_del(&fdir->list); 2346 kfree(fdir); 2347 adapter->fdir_active_fltr--; 2348 } else { 2349 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2350 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", 2351 del_fltr->status); 2352 iavf_print_fdir_fltr(adapter, fdir); 2353 } 2354 } 2355 } 2356 spin_unlock_bh(&adapter->fdir_fltr_lock); 2357 } 2358 break; 2359 case VIRTCHNL_OP_ADD_RSS_CFG: { 2360 struct iavf_adv_rss *rss; 2361 2362 spin_lock_bh(&adapter->adv_rss_lock); 2363 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 2364 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2365 iavf_print_adv_rss_cfg(adapter, rss, 2366 "Input set change for", 2367 "successful"); 2368 rss->state = IAVF_ADV_RSS_ACTIVE; 2369 } 2370 } 2371 spin_unlock_bh(&adapter->adv_rss_lock); 2372 } 2373 break; 2374 case VIRTCHNL_OP_DEL_RSS_CFG: { 2375 struct iavf_adv_rss *rss, *rss_tmp; 2376 2377 spin_lock_bh(&adapter->adv_rss_lock); 2378 list_for_each_entry_safe(rss, rss_tmp, 2379 &adapter->adv_rss_list_head, list) { 2380 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2381 list_del(&rss->list); 2382 kfree(rss); 2383 } 2384 } 2385 spin_unlock_bh(&adapter->adv_rss_lock); 2386 } 2387 break; 2388 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2389 /* PF enabled vlan strip on this VF. 2390 * Update netdev->features if needed to be in sync with ethtool. 2391 */ 2392 if (!v_retval) 2393 iavf_netdev_features_vlan_strip_set(netdev, true); 2394 break; 2395 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2396 /* PF disabled vlan strip on this VF. 2397 * Update netdev->features if needed to be in sync with ethtool. 2398 */ 2399 if (!v_retval) 2400 iavf_netdev_features_vlan_strip_set(netdev, false); 2401 break; 2402 default: 2403 if (adapter->current_op && (v_opcode != adapter->current_op)) 2404 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 2405 adapter->current_op, v_opcode); 2406 break; 2407 } /* switch v_opcode */ 2408 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2409 } 2410