1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 8 /** 9 * iavf_send_pf_msg 10 * @adapter: adapter structure 11 * @op: virtual channel opcode 12 * @msg: pointer to message buffer 13 * @len: message length 14 * 15 * Send message to PF and print status if failure. 16 **/ 17 static int iavf_send_pf_msg(struct iavf_adapter *adapter, 18 enum virtchnl_ops op, u8 *msg, u16 len) 19 { 20 struct iavf_hw *hw = &adapter->hw; 21 enum iavf_status status; 22 23 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 24 return 0; /* nothing to see here, move along */ 25 26 status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 27 if (status) 28 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n", 29 op, iavf_stat_str(hw, status), 30 iavf_aq_str(hw, hw->aq.asq_last_status)); 31 return iavf_status_to_errno(status); 32 } 33 34 /** 35 * iavf_send_api_ver 36 * @adapter: adapter structure 37 * 38 * Send API version admin queue message to the PF. The reply is not checked 39 * in this function. Returns 0 if the message was successfully 40 * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 41 **/ 42 int iavf_send_api_ver(struct iavf_adapter *adapter) 43 { 44 struct virtchnl_version_info vvi; 45 46 vvi.major = VIRTCHNL_VERSION_MAJOR; 47 vvi.minor = VIRTCHNL_VERSION_MINOR; 48 49 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, 50 sizeof(vvi)); 51 } 52 53 /** 54 * iavf_poll_virtchnl_msg 55 * @hw: HW configuration structure 56 * @event: event to populate on success 57 * @op_to_poll: requested virtchnl op to poll for 58 * 59 * Initialize poll for virtchnl msg matching the requested_op. Returns 0 60 * if a message of the correct opcode is in the queue or an error code 61 * if no message matching the op code is waiting and other failures. 62 */ 63 static int 64 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, 65 enum virtchnl_ops op_to_poll) 66 { 67 enum virtchnl_ops received_op; 68 enum iavf_status status; 69 u32 v_retval; 70 71 while (1) { 72 /* When the AQ is empty, iavf_clean_arq_element will return 73 * nonzero and this loop will terminate. 74 */ 75 status = iavf_clean_arq_element(hw, event, NULL); 76 if (status != IAVF_SUCCESS) 77 return iavf_status_to_errno(status); 78 received_op = 79 (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high); 80 if (op_to_poll == received_op) 81 break; 82 } 83 84 v_retval = le32_to_cpu(event->desc.cookie_low); 85 return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval); 86 } 87 88 /** 89 * iavf_verify_api_ver 90 * @adapter: adapter structure 91 * 92 * Compare API versions with the PF. Must be called after admin queue is 93 * initialized. Returns 0 if API versions match, -EIO if they do not, 94 * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors 95 * from the firmware are propagated. 96 **/ 97 int iavf_verify_api_ver(struct iavf_adapter *adapter) 98 { 99 struct iavf_arq_event_info event; 100 int err; 101 102 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 103 event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL); 104 if (!event.msg_buf) 105 return -ENOMEM; 106 107 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION); 108 if (!err) { 109 struct virtchnl_version_info *pf_vvi = 110 (struct virtchnl_version_info *)event.msg_buf; 111 adapter->pf_version = *pf_vvi; 112 113 if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR || 114 (pf_vvi->major == VIRTCHNL_VERSION_MAJOR && 115 pf_vvi->minor > VIRTCHNL_VERSION_MINOR)) 116 err = -EIO; 117 } 118 119 kfree(event.msg_buf); 120 121 return err; 122 } 123 124 /** 125 * iavf_send_vf_config_msg 126 * @adapter: adapter structure 127 * 128 * Send VF configuration request admin queue message to the PF. The reply 129 * is not checked in this function. Returns 0 if the message was 130 * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. 131 **/ 132 int iavf_send_vf_config_msg(struct iavf_adapter *adapter) 133 { 134 u32 caps; 135 136 caps = VIRTCHNL_VF_OFFLOAD_L2 | 137 VIRTCHNL_VF_OFFLOAD_RSS_PF | 138 VIRTCHNL_VF_OFFLOAD_RSS_AQ | 139 VIRTCHNL_VF_OFFLOAD_RSS_REG | 140 VIRTCHNL_VF_OFFLOAD_VLAN | 141 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | 142 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | 143 VIRTCHNL_VF_OFFLOAD_ENCAP | 144 VIRTCHNL_VF_OFFLOAD_VLAN_V2 | 145 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | 146 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | 147 VIRTCHNL_VF_OFFLOAD_ADQ | 148 VIRTCHNL_VF_OFFLOAD_USO | 149 VIRTCHNL_VF_OFFLOAD_FDIR_PF | 150 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | 151 VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 152 153 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; 154 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; 155 if (PF_IS_V11(adapter)) 156 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 157 (u8 *)&caps, sizeof(caps)); 158 else 159 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, 160 NULL, 0); 161 } 162 163 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) 164 { 165 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; 166 167 if (!VLAN_V2_ALLOWED(adapter)) 168 return -EOPNOTSUPP; 169 170 adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; 171 172 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 173 NULL, 0); 174 } 175 176 /** 177 * iavf_validate_num_queues 178 * @adapter: adapter structure 179 * 180 * Validate that the number of queues the PF has sent in 181 * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. 182 **/ 183 static void iavf_validate_num_queues(struct iavf_adapter *adapter) 184 { 185 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { 186 struct virtchnl_vsi_resource *vsi_res; 187 int i; 188 189 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", 190 adapter->vf_res->num_queue_pairs, 191 IAVF_MAX_REQ_QUEUES); 192 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", 193 IAVF_MAX_REQ_QUEUES); 194 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 195 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 196 vsi_res = &adapter->vf_res->vsi_res[i]; 197 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; 198 } 199 } 200 } 201 202 /** 203 * iavf_get_vf_config 204 * @adapter: private adapter structure 205 * 206 * Get VF configuration from PF and populate hw structure. Must be called after 207 * admin queue is initialized. Busy waits until response is received from PF, 208 * with maximum timeout. Response from PF is returned in the buffer for further 209 * processing by the caller. 210 **/ 211 int iavf_get_vf_config(struct iavf_adapter *adapter) 212 { 213 struct iavf_hw *hw = &adapter->hw; 214 struct iavf_arq_event_info event; 215 u16 len; 216 int err; 217 218 len = sizeof(struct virtchnl_vf_resource) + 219 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); 220 event.buf_len = len; 221 event.msg_buf = kzalloc(len, GFP_KERNEL); 222 if (!event.msg_buf) 223 return -ENOMEM; 224 225 err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES); 226 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); 227 228 /* some PFs send more queues than we should have so validate that 229 * we aren't getting too many queues 230 */ 231 if (!err) 232 iavf_validate_num_queues(adapter); 233 iavf_vf_parse_hw_config(hw, adapter->vf_res); 234 235 kfree(event.msg_buf); 236 237 return err; 238 } 239 240 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) 241 { 242 struct iavf_arq_event_info event; 243 int err; 244 u16 len; 245 246 len = sizeof(struct virtchnl_vlan_caps); 247 event.buf_len = len; 248 event.msg_buf = kzalloc(len, GFP_KERNEL); 249 if (!event.msg_buf) 250 return -ENOMEM; 251 252 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, 253 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS); 254 if (!err) 255 memcpy(&adapter->vlan_v2_caps, event.msg_buf, 256 min(event.msg_len, len)); 257 258 kfree(event.msg_buf); 259 260 return err; 261 } 262 263 /** 264 * iavf_configure_queues 265 * @adapter: adapter structure 266 * 267 * Request that the PF set up our (previously allocated) queues. 268 **/ 269 void iavf_configure_queues(struct iavf_adapter *adapter) 270 { 271 struct virtchnl_vsi_queue_config_info *vqci; 272 int i, max_frame = adapter->vf_res->max_mtu; 273 int pairs = adapter->num_active_queues; 274 struct virtchnl_queue_pair_info *vqpi; 275 size_t len; 276 277 if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) 278 max_frame = IAVF_MAX_RXBUFFER; 279 280 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 281 /* bail because we already have a command pending */ 282 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", 283 adapter->current_op); 284 return; 285 } 286 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; 287 len = struct_size(vqci, qpair, pairs); 288 vqci = kzalloc(len, GFP_KERNEL); 289 if (!vqci) 290 return; 291 292 /* Limit maximum frame size when jumbo frames is not enabled */ 293 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && 294 (adapter->netdev->mtu <= ETH_DATA_LEN)) 295 max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 296 297 vqci->vsi_id = adapter->vsi_res->vsi_id; 298 vqci->num_queue_pairs = pairs; 299 vqpi = vqci->qpair; 300 /* Size check is not needed here - HW max is 16 queue pairs, and we 301 * can fit info for 31 of them into the AQ buffer before it overflows. 302 */ 303 for (i = 0; i < pairs; i++) { 304 vqpi->txq.vsi_id = vqci->vsi_id; 305 vqpi->txq.queue_id = i; 306 vqpi->txq.ring_len = adapter->tx_rings[i].count; 307 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; 308 vqpi->rxq.vsi_id = vqci->vsi_id; 309 vqpi->rxq.queue_id = i; 310 vqpi->rxq.ring_len = adapter->rx_rings[i].count; 311 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; 312 vqpi->rxq.max_pkt_size = max_frame; 313 vqpi->rxq.databuffer_size = 314 ALIGN(adapter->rx_rings[i].rx_buf_len, 315 BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); 316 vqpi++; 317 } 318 319 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; 320 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 321 (u8 *)vqci, len); 322 kfree(vqci); 323 } 324 325 /** 326 * iavf_enable_queues 327 * @adapter: adapter structure 328 * 329 * Request that the PF enable all of our queues. 330 **/ 331 void iavf_enable_queues(struct iavf_adapter *adapter) 332 { 333 struct virtchnl_queue_select vqs; 334 335 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 336 /* bail because we already have a command pending */ 337 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", 338 adapter->current_op); 339 return; 340 } 341 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; 342 vqs.vsi_id = adapter->vsi_res->vsi_id; 343 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 344 vqs.rx_queues = vqs.tx_queues; 345 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; 346 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, 347 (u8 *)&vqs, sizeof(vqs)); 348 } 349 350 /** 351 * iavf_disable_queues 352 * @adapter: adapter structure 353 * 354 * Request that the PF disable all of our queues. 355 **/ 356 void iavf_disable_queues(struct iavf_adapter *adapter) 357 { 358 struct virtchnl_queue_select vqs; 359 360 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 361 /* bail because we already have a command pending */ 362 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", 363 adapter->current_op); 364 return; 365 } 366 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; 367 vqs.vsi_id = adapter->vsi_res->vsi_id; 368 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; 369 vqs.rx_queues = vqs.tx_queues; 370 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; 371 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, 372 (u8 *)&vqs, sizeof(vqs)); 373 } 374 375 /** 376 * iavf_map_queues 377 * @adapter: adapter structure 378 * 379 * Request that the PF map queues to interrupt vectors. Misc causes, including 380 * admin queue, are always mapped to vector 0. 381 **/ 382 void iavf_map_queues(struct iavf_adapter *adapter) 383 { 384 struct virtchnl_irq_map_info *vimi; 385 struct virtchnl_vector_map *vecmap; 386 struct iavf_q_vector *q_vector; 387 int v_idx, q_vectors; 388 size_t len; 389 390 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 391 /* bail because we already have a command pending */ 392 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", 393 adapter->current_op); 394 return; 395 } 396 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; 397 398 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 399 400 len = struct_size(vimi, vecmap, adapter->num_msix_vectors); 401 vimi = kzalloc(len, GFP_KERNEL); 402 if (!vimi) 403 return; 404 405 vimi->num_vectors = adapter->num_msix_vectors; 406 /* Queue vectors first */ 407 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 408 q_vector = &adapter->q_vectors[v_idx]; 409 vecmap = &vimi->vecmap[v_idx]; 410 411 vecmap->vsi_id = adapter->vsi_res->vsi_id; 412 vecmap->vector_id = v_idx + NONQ_VECS; 413 vecmap->txq_map = q_vector->ring_mask; 414 vecmap->rxq_map = q_vector->ring_mask; 415 vecmap->rxitr_idx = IAVF_RX_ITR; 416 vecmap->txitr_idx = IAVF_TX_ITR; 417 } 418 /* Misc vector last - this is only for AdminQ messages */ 419 vecmap = &vimi->vecmap[v_idx]; 420 vecmap->vsi_id = adapter->vsi_res->vsi_id; 421 vecmap->vector_id = 0; 422 vecmap->txq_map = 0; 423 vecmap->rxq_map = 0; 424 425 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; 426 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, 427 (u8 *)vimi, len); 428 kfree(vimi); 429 } 430 431 /** 432 * iavf_set_mac_addr_type - Set the correct request type from the filter type 433 * @virtchnl_ether_addr: pointer to requested list element 434 * @filter: pointer to requested filter 435 **/ 436 static void 437 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, 438 const struct iavf_mac_filter *filter) 439 { 440 virtchnl_ether_addr->type = filter->is_primary ? 441 VIRTCHNL_ETHER_ADDR_PRIMARY : 442 VIRTCHNL_ETHER_ADDR_EXTRA; 443 } 444 445 /** 446 * iavf_add_ether_addrs 447 * @adapter: adapter structure 448 * 449 * Request that the PF add one or more addresses to our filters. 450 **/ 451 void iavf_add_ether_addrs(struct iavf_adapter *adapter) 452 { 453 struct virtchnl_ether_addr_list *veal; 454 struct iavf_mac_filter *f; 455 int i = 0, count = 0; 456 bool more = false; 457 size_t len; 458 459 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 460 /* bail because we already have a command pending */ 461 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", 462 adapter->current_op); 463 return; 464 } 465 466 spin_lock_bh(&adapter->mac_vlan_list_lock); 467 468 list_for_each_entry(f, &adapter->mac_filter_list, list) { 469 if (f->add) 470 count++; 471 } 472 if (!count) { 473 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 474 spin_unlock_bh(&adapter->mac_vlan_list_lock); 475 return; 476 } 477 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; 478 479 len = struct_size(veal, list, count); 480 if (len > IAVF_MAX_AQ_BUF_SIZE) { 481 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); 482 count = (IAVF_MAX_AQ_BUF_SIZE - 483 sizeof(struct virtchnl_ether_addr_list)) / 484 sizeof(struct virtchnl_ether_addr); 485 len = struct_size(veal, list, count); 486 more = true; 487 } 488 489 veal = kzalloc(len, GFP_ATOMIC); 490 if (!veal) { 491 spin_unlock_bh(&adapter->mac_vlan_list_lock); 492 return; 493 } 494 495 veal->vsi_id = adapter->vsi_res->vsi_id; 496 veal->num_elements = count; 497 list_for_each_entry(f, &adapter->mac_filter_list, list) { 498 if (f->add) { 499 ether_addr_copy(veal->list[i].addr, f->macaddr); 500 iavf_set_mac_addr_type(&veal->list[i], f); 501 i++; 502 f->add = false; 503 if (i == count) 504 break; 505 } 506 } 507 if (!more) 508 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; 509 510 spin_unlock_bh(&adapter->mac_vlan_list_lock); 511 512 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); 513 kfree(veal); 514 } 515 516 /** 517 * iavf_del_ether_addrs 518 * @adapter: adapter structure 519 * 520 * Request that the PF remove one or more addresses from our filters. 521 **/ 522 void iavf_del_ether_addrs(struct iavf_adapter *adapter) 523 { 524 struct virtchnl_ether_addr_list *veal; 525 struct iavf_mac_filter *f, *ftmp; 526 int i = 0, count = 0; 527 bool more = false; 528 size_t len; 529 530 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 531 /* bail because we already have a command pending */ 532 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", 533 adapter->current_op); 534 return; 535 } 536 537 spin_lock_bh(&adapter->mac_vlan_list_lock); 538 539 list_for_each_entry(f, &adapter->mac_filter_list, list) { 540 if (f->remove) 541 count++; 542 } 543 if (!count) { 544 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 545 spin_unlock_bh(&adapter->mac_vlan_list_lock); 546 return; 547 } 548 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; 549 550 len = struct_size(veal, list, count); 551 if (len > IAVF_MAX_AQ_BUF_SIZE) { 552 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); 553 count = (IAVF_MAX_AQ_BUF_SIZE - 554 sizeof(struct virtchnl_ether_addr_list)) / 555 sizeof(struct virtchnl_ether_addr); 556 len = struct_size(veal, list, count); 557 more = true; 558 } 559 veal = kzalloc(len, GFP_ATOMIC); 560 if (!veal) { 561 spin_unlock_bh(&adapter->mac_vlan_list_lock); 562 return; 563 } 564 565 veal->vsi_id = adapter->vsi_res->vsi_id; 566 veal->num_elements = count; 567 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 568 if (f->remove) { 569 ether_addr_copy(veal->list[i].addr, f->macaddr); 570 iavf_set_mac_addr_type(&veal->list[i], f); 571 i++; 572 list_del(&f->list); 573 kfree(f); 574 if (i == count) 575 break; 576 } 577 } 578 if (!more) 579 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; 580 581 spin_unlock_bh(&adapter->mac_vlan_list_lock); 582 583 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); 584 kfree(veal); 585 } 586 587 /** 588 * iavf_mac_add_ok 589 * @adapter: adapter structure 590 * 591 * Submit list of filters based on PF response. 592 **/ 593 static void iavf_mac_add_ok(struct iavf_adapter *adapter) 594 { 595 struct iavf_mac_filter *f, *ftmp; 596 597 spin_lock_bh(&adapter->mac_vlan_list_lock); 598 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 599 f->is_new_mac = false; 600 if (!f->add && !f->add_handled) 601 f->add_handled = true; 602 } 603 spin_unlock_bh(&adapter->mac_vlan_list_lock); 604 } 605 606 /** 607 * iavf_mac_add_reject 608 * @adapter: adapter structure 609 * 610 * Remove filters from list based on PF response. 611 **/ 612 static void iavf_mac_add_reject(struct iavf_adapter *adapter) 613 { 614 struct net_device *netdev = adapter->netdev; 615 struct iavf_mac_filter *f, *ftmp; 616 617 spin_lock_bh(&adapter->mac_vlan_list_lock); 618 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 619 if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) 620 f->remove = false; 621 622 if (!f->add && !f->add_handled) 623 f->add_handled = true; 624 625 if (f->is_new_mac) { 626 list_del(&f->list); 627 kfree(f); 628 } 629 } 630 spin_unlock_bh(&adapter->mac_vlan_list_lock); 631 } 632 633 /** 634 * iavf_vlan_add_reject 635 * @adapter: adapter structure 636 * 637 * Remove VLAN filters from list based on PF response. 638 **/ 639 static void iavf_vlan_add_reject(struct iavf_adapter *adapter) 640 { 641 struct iavf_vlan_filter *f, *ftmp; 642 643 spin_lock_bh(&adapter->mac_vlan_list_lock); 644 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 645 if (f->is_new_vlan) { 646 if (f->vlan.tpid == ETH_P_8021Q) 647 clear_bit(f->vlan.vid, 648 adapter->vsi.active_cvlans); 649 else 650 clear_bit(f->vlan.vid, 651 adapter->vsi.active_svlans); 652 653 list_del(&f->list); 654 kfree(f); 655 } 656 } 657 spin_unlock_bh(&adapter->mac_vlan_list_lock); 658 } 659 660 /** 661 * iavf_add_vlans 662 * @adapter: adapter structure 663 * 664 * Request that the PF add one or more VLAN filters to our VSI. 665 **/ 666 void iavf_add_vlans(struct iavf_adapter *adapter) 667 { 668 int len, i = 0, count = 0; 669 struct iavf_vlan_filter *f; 670 bool more = false; 671 672 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 673 /* bail because we already have a command pending */ 674 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", 675 adapter->current_op); 676 return; 677 } 678 679 spin_lock_bh(&adapter->mac_vlan_list_lock); 680 681 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 682 if (f->add) 683 count++; 684 } 685 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 686 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 687 spin_unlock_bh(&adapter->mac_vlan_list_lock); 688 return; 689 } 690 691 if (VLAN_ALLOWED(adapter)) { 692 struct virtchnl_vlan_filter_list *vvfl; 693 694 adapter->current_op = VIRTCHNL_OP_ADD_VLAN; 695 696 len = sizeof(*vvfl) + (count * sizeof(u16)); 697 if (len > IAVF_MAX_AQ_BUF_SIZE) { 698 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 699 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / 700 sizeof(u16); 701 len = sizeof(*vvfl) + (count * sizeof(u16)); 702 more = true; 703 } 704 vvfl = kzalloc(len, GFP_ATOMIC); 705 if (!vvfl) { 706 spin_unlock_bh(&adapter->mac_vlan_list_lock); 707 return; 708 } 709 710 vvfl->vsi_id = adapter->vsi_res->vsi_id; 711 vvfl->num_elements = count; 712 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 713 if (f->add) { 714 vvfl->vlan_id[i] = f->vlan.vid; 715 i++; 716 f->add = false; 717 f->is_new_vlan = true; 718 if (i == count) 719 break; 720 } 721 } 722 if (!more) 723 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 724 725 spin_unlock_bh(&adapter->mac_vlan_list_lock); 726 727 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); 728 kfree(vvfl); 729 } else { 730 u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters; 731 u16 current_vlans = iavf_get_num_vlans_added(adapter); 732 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 733 734 adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; 735 736 if ((count + current_vlans) > max_vlans && 737 current_vlans < max_vlans) { 738 count = max_vlans - iavf_get_num_vlans_added(adapter); 739 more = true; 740 } 741 742 len = sizeof(*vvfl_v2) + ((count - 1) * 743 sizeof(struct virtchnl_vlan_filter)); 744 if (len > IAVF_MAX_AQ_BUF_SIZE) { 745 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 746 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) / 747 sizeof(struct virtchnl_vlan_filter); 748 len = sizeof(*vvfl_v2) + 749 ((count - 1) * 750 sizeof(struct virtchnl_vlan_filter)); 751 more = true; 752 } 753 754 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 755 if (!vvfl_v2) { 756 spin_unlock_bh(&adapter->mac_vlan_list_lock); 757 return; 758 } 759 760 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 761 vvfl_v2->num_elements = count; 762 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 763 if (f->add) { 764 struct virtchnl_vlan_supported_caps *filtering_support = 765 &adapter->vlan_v2_caps.filtering.filtering_support; 766 struct virtchnl_vlan *vlan; 767 768 if (i == count) 769 break; 770 771 /* give priority over outer if it's enabled */ 772 if (filtering_support->outer) 773 vlan = &vvfl_v2->filters[i].outer; 774 else 775 vlan = &vvfl_v2->filters[i].inner; 776 777 vlan->tci = f->vlan.vid; 778 vlan->tpid = f->vlan.tpid; 779 780 i++; 781 f->add = false; 782 f->is_new_vlan = true; 783 } 784 } 785 786 if (!more) 787 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; 788 789 spin_unlock_bh(&adapter->mac_vlan_list_lock); 790 791 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, 792 (u8 *)vvfl_v2, len); 793 kfree(vvfl_v2); 794 } 795 } 796 797 /** 798 * iavf_del_vlans 799 * @adapter: adapter structure 800 * 801 * Request that the PF remove one or more VLAN filters from our VSI. 802 **/ 803 void iavf_del_vlans(struct iavf_adapter *adapter) 804 { 805 struct iavf_vlan_filter *f, *ftmp; 806 int len, i = 0, count = 0; 807 bool more = false; 808 809 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 810 /* bail because we already have a command pending */ 811 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", 812 adapter->current_op); 813 return; 814 } 815 816 spin_lock_bh(&adapter->mac_vlan_list_lock); 817 818 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 819 /* since VLAN capabilities are not allowed, we dont want to send 820 * a VLAN delete request because it will most likely fail and 821 * create unnecessary errors/noise, so just free the VLAN 822 * filters marked for removal to enable bailing out before 823 * sending a virtchnl message 824 */ 825 if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) { 826 list_del(&f->list); 827 kfree(f); 828 } else if (f->remove) { 829 count++; 830 } 831 } 832 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { 833 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 834 spin_unlock_bh(&adapter->mac_vlan_list_lock); 835 return; 836 } 837 838 if (VLAN_ALLOWED(adapter)) { 839 struct virtchnl_vlan_filter_list *vvfl; 840 841 adapter->current_op = VIRTCHNL_OP_DEL_VLAN; 842 843 len = sizeof(*vvfl) + (count * sizeof(u16)); 844 if (len > IAVF_MAX_AQ_BUF_SIZE) { 845 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); 846 count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / 847 sizeof(u16); 848 len = sizeof(*vvfl) + (count * sizeof(u16)); 849 more = true; 850 } 851 vvfl = kzalloc(len, GFP_ATOMIC); 852 if (!vvfl) { 853 spin_unlock_bh(&adapter->mac_vlan_list_lock); 854 return; 855 } 856 857 vvfl->vsi_id = adapter->vsi_res->vsi_id; 858 vvfl->num_elements = count; 859 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 860 if (f->remove) { 861 vvfl->vlan_id[i] = f->vlan.vid; 862 i++; 863 list_del(&f->list); 864 kfree(f); 865 if (i == count) 866 break; 867 } 868 } 869 870 if (!more) 871 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 872 873 spin_unlock_bh(&adapter->mac_vlan_list_lock); 874 875 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); 876 kfree(vvfl); 877 } else { 878 struct virtchnl_vlan_filter_list_v2 *vvfl_v2; 879 880 adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; 881 882 len = sizeof(*vvfl_v2) + 883 ((count - 1) * sizeof(struct virtchnl_vlan_filter)); 884 if (len > IAVF_MAX_AQ_BUF_SIZE) { 885 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); 886 count = (IAVF_MAX_AQ_BUF_SIZE - 887 sizeof(*vvfl_v2)) / 888 sizeof(struct virtchnl_vlan_filter); 889 len = sizeof(*vvfl_v2) + 890 ((count - 1) * 891 sizeof(struct virtchnl_vlan_filter)); 892 more = true; 893 } 894 895 vvfl_v2 = kzalloc(len, GFP_ATOMIC); 896 if (!vvfl_v2) { 897 spin_unlock_bh(&adapter->mac_vlan_list_lock); 898 return; 899 } 900 901 vvfl_v2->vport_id = adapter->vsi_res->vsi_id; 902 vvfl_v2->num_elements = count; 903 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { 904 if (f->remove) { 905 struct virtchnl_vlan_supported_caps *filtering_support = 906 &adapter->vlan_v2_caps.filtering.filtering_support; 907 struct virtchnl_vlan *vlan; 908 909 /* give priority over outer if it's enabled */ 910 if (filtering_support->outer) 911 vlan = &vvfl_v2->filters[i].outer; 912 else 913 vlan = &vvfl_v2->filters[i].inner; 914 915 vlan->tci = f->vlan.vid; 916 vlan->tpid = f->vlan.tpid; 917 918 list_del(&f->list); 919 kfree(f); 920 i++; 921 if (i == count) 922 break; 923 } 924 } 925 926 if (!more) 927 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; 928 929 spin_unlock_bh(&adapter->mac_vlan_list_lock); 930 931 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, 932 (u8 *)vvfl_v2, len); 933 kfree(vvfl_v2); 934 } 935 } 936 937 /** 938 * iavf_set_promiscuous 939 * @adapter: adapter structure 940 * @flags: bitmask to control unicast/multicast promiscuous. 941 * 942 * Request that the PF enable promiscuous mode for our VSI. 943 **/ 944 void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) 945 { 946 struct virtchnl_promisc_info vpi; 947 int promisc_all; 948 949 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 950 /* bail because we already have a command pending */ 951 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", 952 adapter->current_op); 953 return; 954 } 955 956 promisc_all = FLAG_VF_UNICAST_PROMISC | 957 FLAG_VF_MULTICAST_PROMISC; 958 if ((flags & promisc_all) == promisc_all) { 959 adapter->flags |= IAVF_FLAG_PROMISC_ON; 960 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; 961 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); 962 } 963 964 if (flags & FLAG_VF_MULTICAST_PROMISC) { 965 adapter->flags |= IAVF_FLAG_ALLMULTI_ON; 966 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; 967 dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", 968 adapter->netdev->name); 969 } 970 971 if (!flags) { 972 if (adapter->flags & IAVF_FLAG_PROMISC_ON) { 973 adapter->flags &= ~IAVF_FLAG_PROMISC_ON; 974 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; 975 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); 976 } 977 978 if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { 979 adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; 980 adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; 981 dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", 982 adapter->netdev->name); 983 } 984 } 985 986 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; 987 vpi.vsi_id = adapter->vsi_res->vsi_id; 988 vpi.flags = flags; 989 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 990 (u8 *)&vpi, sizeof(vpi)); 991 } 992 993 /** 994 * iavf_request_stats 995 * @adapter: adapter structure 996 * 997 * Request VSI statistics from PF. 998 **/ 999 void iavf_request_stats(struct iavf_adapter *adapter) 1000 { 1001 struct virtchnl_queue_select vqs; 1002 1003 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1004 /* no error message, this isn't crucial */ 1005 return; 1006 } 1007 1008 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; 1009 adapter->current_op = VIRTCHNL_OP_GET_STATS; 1010 vqs.vsi_id = adapter->vsi_res->vsi_id; 1011 /* queue maps are ignored for this message - only the vsi is used */ 1012 if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, 1013 sizeof(vqs))) 1014 /* if the request failed, don't lock out others */ 1015 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1016 } 1017 1018 /** 1019 * iavf_get_hena 1020 * @adapter: adapter structure 1021 * 1022 * Request hash enable capabilities from PF 1023 **/ 1024 void iavf_get_hena(struct iavf_adapter *adapter) 1025 { 1026 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1027 /* bail because we already have a command pending */ 1028 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", 1029 adapter->current_op); 1030 return; 1031 } 1032 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; 1033 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; 1034 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); 1035 } 1036 1037 /** 1038 * iavf_set_hena 1039 * @adapter: adapter structure 1040 * 1041 * Request the PF to set our RSS hash capabilities 1042 **/ 1043 void iavf_set_hena(struct iavf_adapter *adapter) 1044 { 1045 struct virtchnl_rss_hena vrh; 1046 1047 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1048 /* bail because we already have a command pending */ 1049 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", 1050 adapter->current_op); 1051 return; 1052 } 1053 vrh.hena = adapter->hena; 1054 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; 1055 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; 1056 iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, 1057 sizeof(vrh)); 1058 } 1059 1060 /** 1061 * iavf_set_rss_key 1062 * @adapter: adapter structure 1063 * 1064 * Request the PF to set our RSS hash key 1065 **/ 1066 void iavf_set_rss_key(struct iavf_adapter *adapter) 1067 { 1068 struct virtchnl_rss_key *vrk; 1069 int len; 1070 1071 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1072 /* bail because we already have a command pending */ 1073 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", 1074 adapter->current_op); 1075 return; 1076 } 1077 len = sizeof(struct virtchnl_rss_key) + 1078 (adapter->rss_key_size * sizeof(u8)) - 1; 1079 vrk = kzalloc(len, GFP_KERNEL); 1080 if (!vrk) 1081 return; 1082 vrk->vsi_id = adapter->vsi.id; 1083 vrk->key_len = adapter->rss_key_size; 1084 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); 1085 1086 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; 1087 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; 1088 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); 1089 kfree(vrk); 1090 } 1091 1092 /** 1093 * iavf_set_rss_lut 1094 * @adapter: adapter structure 1095 * 1096 * Request the PF to set our RSS lookup table 1097 **/ 1098 void iavf_set_rss_lut(struct iavf_adapter *adapter) 1099 { 1100 struct virtchnl_rss_lut *vrl; 1101 int len; 1102 1103 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1104 /* bail because we already have a command pending */ 1105 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", 1106 adapter->current_op); 1107 return; 1108 } 1109 len = sizeof(struct virtchnl_rss_lut) + 1110 (adapter->rss_lut_size * sizeof(u8)) - 1; 1111 vrl = kzalloc(len, GFP_KERNEL); 1112 if (!vrl) 1113 return; 1114 vrl->vsi_id = adapter->vsi.id; 1115 vrl->lut_entries = adapter->rss_lut_size; 1116 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); 1117 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; 1118 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; 1119 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); 1120 kfree(vrl); 1121 } 1122 1123 /** 1124 * iavf_enable_vlan_stripping 1125 * @adapter: adapter structure 1126 * 1127 * Request VLAN header stripping to be enabled 1128 **/ 1129 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) 1130 { 1131 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1132 /* bail because we already have a command pending */ 1133 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", 1134 adapter->current_op); 1135 return; 1136 } 1137 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; 1138 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 1139 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); 1140 } 1141 1142 /** 1143 * iavf_disable_vlan_stripping 1144 * @adapter: adapter structure 1145 * 1146 * Request VLAN header stripping to be disabled 1147 **/ 1148 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) 1149 { 1150 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1151 /* bail because we already have a command pending */ 1152 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", 1153 adapter->current_op); 1154 return; 1155 } 1156 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; 1157 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 1158 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); 1159 } 1160 1161 /** 1162 * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype 1163 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1164 */ 1165 static u32 iavf_tpid_to_vc_ethertype(u16 tpid) 1166 { 1167 switch (tpid) { 1168 case ETH_P_8021Q: 1169 return VIRTCHNL_VLAN_ETHERTYPE_8100; 1170 case ETH_P_8021AD: 1171 return VIRTCHNL_VLAN_ETHERTYPE_88A8; 1172 } 1173 1174 return 0; 1175 } 1176 1177 /** 1178 * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message 1179 * @adapter: adapter structure 1180 * @msg: message structure used for updating offloads over virtchnl to update 1181 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) 1182 * @offload_op: opcode used to determine which support structure to check 1183 */ 1184 static int 1185 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, 1186 struct virtchnl_vlan_setting *msg, u16 tpid, 1187 enum virtchnl_ops offload_op) 1188 { 1189 struct virtchnl_vlan_supported_caps *offload_support; 1190 u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); 1191 1192 /* reference the correct offload support structure */ 1193 switch (offload_op) { 1194 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1195 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1196 offload_support = 1197 &adapter->vlan_v2_caps.offloads.stripping_support; 1198 break; 1199 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1200 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1201 offload_support = 1202 &adapter->vlan_v2_caps.offloads.insertion_support; 1203 break; 1204 default: 1205 dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", 1206 offload_op); 1207 return -EINVAL; 1208 } 1209 1210 /* make sure ethertype is supported */ 1211 if (offload_support->outer & vc_ethertype && 1212 offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { 1213 msg->outer_ethertype_setting = vc_ethertype; 1214 } else if (offload_support->inner & vc_ethertype && 1215 offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { 1216 msg->inner_ethertype_setting = vc_ethertype; 1217 } else { 1218 dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", 1219 offload_op, tpid); 1220 return -EINVAL; 1221 } 1222 1223 return 0; 1224 } 1225 1226 /** 1227 * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request 1228 * @adapter: adapter structure 1229 * @tpid: VLAN TPID 1230 * @offload_op: opcode used to determine which AQ required bit to clear 1231 */ 1232 static void 1233 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, 1234 enum virtchnl_ops offload_op) 1235 { 1236 switch (offload_op) { 1237 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1238 if (tpid == ETH_P_8021Q) 1239 adapter->aq_required &= 1240 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; 1241 else if (tpid == ETH_P_8021AD) 1242 adapter->aq_required &= 1243 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; 1244 break; 1245 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1246 if (tpid == ETH_P_8021Q) 1247 adapter->aq_required &= 1248 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; 1249 else if (tpid == ETH_P_8021AD) 1250 adapter->aq_required &= 1251 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; 1252 break; 1253 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1254 if (tpid == ETH_P_8021Q) 1255 adapter->aq_required &= 1256 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; 1257 else if (tpid == ETH_P_8021AD) 1258 adapter->aq_required &= 1259 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; 1260 break; 1261 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1262 if (tpid == ETH_P_8021Q) 1263 adapter->aq_required &= 1264 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; 1265 else if (tpid == ETH_P_8021AD) 1266 adapter->aq_required &= 1267 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; 1268 break; 1269 default: 1270 dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", 1271 offload_op); 1272 } 1273 } 1274 1275 /** 1276 * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl 1277 * @adapter: adapter structure 1278 * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) 1279 * @offload_op: offload_op used to make the request over virtchnl 1280 */ 1281 static void 1282 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, 1283 enum virtchnl_ops offload_op) 1284 { 1285 struct virtchnl_vlan_setting *msg; 1286 int len = sizeof(*msg); 1287 1288 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1289 /* bail because we already have a command pending */ 1290 dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", 1291 offload_op, adapter->current_op); 1292 return; 1293 } 1294 1295 adapter->current_op = offload_op; 1296 1297 msg = kzalloc(len, GFP_KERNEL); 1298 if (!msg) 1299 return; 1300 1301 msg->vport_id = adapter->vsi_res->vsi_id; 1302 1303 /* always clear to prevent unsupported and endless requests */ 1304 iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); 1305 1306 /* only send valid offload requests */ 1307 if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) 1308 iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); 1309 else 1310 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1311 1312 kfree(msg); 1313 } 1314 1315 /** 1316 * iavf_enable_vlan_stripping_v2 - enable VLAN stripping 1317 * @adapter: adapter structure 1318 * @tpid: VLAN TPID used to enable VLAN stripping 1319 */ 1320 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1321 { 1322 iavf_send_vlan_offload_v2(adapter, tpid, 1323 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); 1324 } 1325 1326 /** 1327 * iavf_disable_vlan_stripping_v2 - disable VLAN stripping 1328 * @adapter: adapter structure 1329 * @tpid: VLAN TPID used to disable VLAN stripping 1330 */ 1331 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) 1332 { 1333 iavf_send_vlan_offload_v2(adapter, tpid, 1334 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); 1335 } 1336 1337 /** 1338 * iavf_enable_vlan_insertion_v2 - enable VLAN insertion 1339 * @adapter: adapter structure 1340 * @tpid: VLAN TPID used to enable VLAN insertion 1341 */ 1342 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1343 { 1344 iavf_send_vlan_offload_v2(adapter, tpid, 1345 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); 1346 } 1347 1348 /** 1349 * iavf_disable_vlan_insertion_v2 - disable VLAN insertion 1350 * @adapter: adapter structure 1351 * @tpid: VLAN TPID used to disable VLAN insertion 1352 */ 1353 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) 1354 { 1355 iavf_send_vlan_offload_v2(adapter, tpid, 1356 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); 1357 } 1358 1359 #define IAVF_MAX_SPEED_STRLEN 13 1360 1361 /** 1362 * iavf_print_link_message - print link up or down 1363 * @adapter: adapter structure 1364 * 1365 * Log a message telling the world of our wonderous link status 1366 */ 1367 static void iavf_print_link_message(struct iavf_adapter *adapter) 1368 { 1369 struct net_device *netdev = adapter->netdev; 1370 int link_speed_mbps; 1371 char *speed; 1372 1373 if (!adapter->link_up) { 1374 netdev_info(netdev, "NIC Link is Down\n"); 1375 return; 1376 } 1377 1378 speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); 1379 if (!speed) 1380 return; 1381 1382 if (ADV_LINK_SUPPORT(adapter)) { 1383 link_speed_mbps = adapter->link_speed_mbps; 1384 goto print_link_msg; 1385 } 1386 1387 switch (adapter->link_speed) { 1388 case VIRTCHNL_LINK_SPEED_40GB: 1389 link_speed_mbps = SPEED_40000; 1390 break; 1391 case VIRTCHNL_LINK_SPEED_25GB: 1392 link_speed_mbps = SPEED_25000; 1393 break; 1394 case VIRTCHNL_LINK_SPEED_20GB: 1395 link_speed_mbps = SPEED_20000; 1396 break; 1397 case VIRTCHNL_LINK_SPEED_10GB: 1398 link_speed_mbps = SPEED_10000; 1399 break; 1400 case VIRTCHNL_LINK_SPEED_5GB: 1401 link_speed_mbps = SPEED_5000; 1402 break; 1403 case VIRTCHNL_LINK_SPEED_2_5GB: 1404 link_speed_mbps = SPEED_2500; 1405 break; 1406 case VIRTCHNL_LINK_SPEED_1GB: 1407 link_speed_mbps = SPEED_1000; 1408 break; 1409 case VIRTCHNL_LINK_SPEED_100MB: 1410 link_speed_mbps = SPEED_100; 1411 break; 1412 default: 1413 link_speed_mbps = SPEED_UNKNOWN; 1414 break; 1415 } 1416 1417 print_link_msg: 1418 if (link_speed_mbps > SPEED_1000) { 1419 if (link_speed_mbps == SPEED_2500) 1420 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); 1421 else 1422 /* convert to Gbps inline */ 1423 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1424 link_speed_mbps / 1000, "Gbps"); 1425 } else if (link_speed_mbps == SPEED_UNKNOWN) { 1426 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); 1427 } else { 1428 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", 1429 link_speed_mbps, "Mbps"); 1430 } 1431 1432 netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); 1433 kfree(speed); 1434 } 1435 1436 /** 1437 * iavf_get_vpe_link_status 1438 * @adapter: adapter structure 1439 * @vpe: virtchnl_pf_event structure 1440 * 1441 * Helper function for determining the link status 1442 **/ 1443 static bool 1444 iavf_get_vpe_link_status(struct iavf_adapter *adapter, 1445 struct virtchnl_pf_event *vpe) 1446 { 1447 if (ADV_LINK_SUPPORT(adapter)) 1448 return vpe->event_data.link_event_adv.link_status; 1449 else 1450 return vpe->event_data.link_event.link_status; 1451 } 1452 1453 /** 1454 * iavf_set_adapter_link_speed_from_vpe 1455 * @adapter: adapter structure for which we are setting the link speed 1456 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting 1457 * 1458 * Helper function for setting iavf_adapter link speed 1459 **/ 1460 static void 1461 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, 1462 struct virtchnl_pf_event *vpe) 1463 { 1464 if (ADV_LINK_SUPPORT(adapter)) 1465 adapter->link_speed_mbps = 1466 vpe->event_data.link_event_adv.link_speed; 1467 else 1468 adapter->link_speed = vpe->event_data.link_event.link_speed; 1469 } 1470 1471 /** 1472 * iavf_enable_channels 1473 * @adapter: adapter structure 1474 * 1475 * Request that the PF enable channels as specified by 1476 * the user via tc tool. 1477 **/ 1478 void iavf_enable_channels(struct iavf_adapter *adapter) 1479 { 1480 struct virtchnl_tc_info *vti = NULL; 1481 size_t len; 1482 int i; 1483 1484 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1485 /* bail because we already have a command pending */ 1486 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1487 adapter->current_op); 1488 return; 1489 } 1490 1491 len = struct_size(vti, list, adapter->num_tc - 1); 1492 vti = kzalloc(len, GFP_KERNEL); 1493 if (!vti) 1494 return; 1495 vti->num_tc = adapter->num_tc; 1496 for (i = 0; i < vti->num_tc; i++) { 1497 vti->list[i].count = adapter->ch_config.ch_info[i].count; 1498 vti->list[i].offset = adapter->ch_config.ch_info[i].offset; 1499 vti->list[i].pad = 0; 1500 vti->list[i].max_tx_rate = 1501 adapter->ch_config.ch_info[i].max_tx_rate; 1502 } 1503 1504 adapter->ch_config.state = __IAVF_TC_RUNNING; 1505 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1506 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; 1507 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; 1508 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); 1509 kfree(vti); 1510 } 1511 1512 /** 1513 * iavf_disable_channels 1514 * @adapter: adapter structure 1515 * 1516 * Request that the PF disable channels that are configured 1517 **/ 1518 void iavf_disable_channels(struct iavf_adapter *adapter) 1519 { 1520 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1521 /* bail because we already have a command pending */ 1522 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1523 adapter->current_op); 1524 return; 1525 } 1526 1527 adapter->ch_config.state = __IAVF_TC_INVALID; 1528 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1529 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; 1530 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; 1531 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); 1532 } 1533 1534 /** 1535 * iavf_print_cloud_filter 1536 * @adapter: adapter structure 1537 * @f: cloud filter to print 1538 * 1539 * Print the cloud filter 1540 **/ 1541 static void iavf_print_cloud_filter(struct iavf_adapter *adapter, 1542 struct virtchnl_filter *f) 1543 { 1544 switch (f->flow_type) { 1545 case VIRTCHNL_TCP_V4_FLOW: 1546 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", 1547 &f->data.tcp_spec.dst_mac, 1548 &f->data.tcp_spec.src_mac, 1549 ntohs(f->data.tcp_spec.vlan_id), 1550 &f->data.tcp_spec.dst_ip[0], 1551 &f->data.tcp_spec.src_ip[0], 1552 ntohs(f->data.tcp_spec.dst_port), 1553 ntohs(f->data.tcp_spec.src_port)); 1554 break; 1555 case VIRTCHNL_TCP_V6_FLOW: 1556 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", 1557 &f->data.tcp_spec.dst_mac, 1558 &f->data.tcp_spec.src_mac, 1559 ntohs(f->data.tcp_spec.vlan_id), 1560 &f->data.tcp_spec.dst_ip, 1561 &f->data.tcp_spec.src_ip, 1562 ntohs(f->data.tcp_spec.dst_port), 1563 ntohs(f->data.tcp_spec.src_port)); 1564 break; 1565 } 1566 } 1567 1568 /** 1569 * iavf_add_cloud_filter 1570 * @adapter: adapter structure 1571 * 1572 * Request that the PF add cloud filters as specified 1573 * by the user via tc tool. 1574 **/ 1575 void iavf_add_cloud_filter(struct iavf_adapter *adapter) 1576 { 1577 struct iavf_cloud_filter *cf; 1578 struct virtchnl_filter *f; 1579 int len = 0, count = 0; 1580 1581 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1582 /* bail because we already have a command pending */ 1583 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", 1584 adapter->current_op); 1585 return; 1586 } 1587 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1588 if (cf->add) { 1589 count++; 1590 break; 1591 } 1592 } 1593 if (!count) { 1594 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 1595 return; 1596 } 1597 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; 1598 1599 len = sizeof(struct virtchnl_filter); 1600 f = kzalloc(len, GFP_KERNEL); 1601 if (!f) 1602 return; 1603 1604 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1605 if (cf->add) { 1606 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1607 cf->add = false; 1608 cf->state = __IAVF_CF_ADD_PENDING; 1609 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, 1610 (u8 *)f, len); 1611 } 1612 } 1613 kfree(f); 1614 } 1615 1616 /** 1617 * iavf_del_cloud_filter 1618 * @adapter: adapter structure 1619 * 1620 * Request that the PF delete cloud filters as specified 1621 * by the user via tc tool. 1622 **/ 1623 void iavf_del_cloud_filter(struct iavf_adapter *adapter) 1624 { 1625 struct iavf_cloud_filter *cf, *cftmp; 1626 struct virtchnl_filter *f; 1627 int len = 0, count = 0; 1628 1629 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1630 /* bail because we already have a command pending */ 1631 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", 1632 adapter->current_op); 1633 return; 1634 } 1635 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1636 if (cf->del) { 1637 count++; 1638 break; 1639 } 1640 } 1641 if (!count) { 1642 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1643 return; 1644 } 1645 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; 1646 1647 len = sizeof(struct virtchnl_filter); 1648 f = kzalloc(len, GFP_KERNEL); 1649 if (!f) 1650 return; 1651 1652 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 1653 if (cf->del) { 1654 memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1655 cf->del = false; 1656 cf->state = __IAVF_CF_DEL_PENDING; 1657 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, 1658 (u8 *)f, len); 1659 } 1660 } 1661 kfree(f); 1662 } 1663 1664 /** 1665 * iavf_add_fdir_filter 1666 * @adapter: the VF adapter structure 1667 * 1668 * Request that the PF add Flow Director filters as specified 1669 * by the user via ethtool. 1670 **/ 1671 void iavf_add_fdir_filter(struct iavf_adapter *adapter) 1672 { 1673 struct iavf_fdir_fltr *fdir; 1674 struct virtchnl_fdir_add *f; 1675 bool process_fltr = false; 1676 int len; 1677 1678 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1679 /* bail because we already have a command pending */ 1680 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", 1681 adapter->current_op); 1682 return; 1683 } 1684 1685 len = sizeof(struct virtchnl_fdir_add); 1686 f = kzalloc(len, GFP_KERNEL); 1687 if (!f) 1688 return; 1689 1690 spin_lock_bh(&adapter->fdir_fltr_lock); 1691 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1692 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { 1693 process_fltr = true; 1694 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; 1695 memcpy(f, &fdir->vc_add_msg, len); 1696 break; 1697 } 1698 } 1699 spin_unlock_bh(&adapter->fdir_fltr_lock); 1700 1701 if (!process_fltr) { 1702 /* prevent iavf_add_fdir_filter() from being called when there 1703 * are no filters to add 1704 */ 1705 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; 1706 kfree(f); 1707 return; 1708 } 1709 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; 1710 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); 1711 kfree(f); 1712 } 1713 1714 /** 1715 * iavf_del_fdir_filter 1716 * @adapter: the VF adapter structure 1717 * 1718 * Request that the PF delete Flow Director filters as specified 1719 * by the user via ethtool. 1720 **/ 1721 void iavf_del_fdir_filter(struct iavf_adapter *adapter) 1722 { 1723 struct iavf_fdir_fltr *fdir; 1724 struct virtchnl_fdir_del f; 1725 bool process_fltr = false; 1726 int len; 1727 1728 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1729 /* bail because we already have a command pending */ 1730 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", 1731 adapter->current_op); 1732 return; 1733 } 1734 1735 len = sizeof(struct virtchnl_fdir_del); 1736 1737 spin_lock_bh(&adapter->fdir_fltr_lock); 1738 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1739 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { 1740 process_fltr = true; 1741 memset(&f, 0, len); 1742 f.vsi_id = fdir->vc_add_msg.vsi_id; 1743 f.flow_id = fdir->flow_id; 1744 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; 1745 break; 1746 } 1747 } 1748 spin_unlock_bh(&adapter->fdir_fltr_lock); 1749 1750 if (!process_fltr) { 1751 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1752 return; 1753 } 1754 1755 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; 1756 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); 1757 } 1758 1759 /** 1760 * iavf_add_adv_rss_cfg 1761 * @adapter: the VF adapter structure 1762 * 1763 * Request that the PF add RSS configuration as specified 1764 * by the user via ethtool. 1765 **/ 1766 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) 1767 { 1768 struct virtchnl_rss_cfg *rss_cfg; 1769 struct iavf_adv_rss *rss; 1770 bool process_rss = false; 1771 int len; 1772 1773 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1774 /* bail because we already have a command pending */ 1775 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", 1776 adapter->current_op); 1777 return; 1778 } 1779 1780 len = sizeof(struct virtchnl_rss_cfg); 1781 rss_cfg = kzalloc(len, GFP_KERNEL); 1782 if (!rss_cfg) 1783 return; 1784 1785 spin_lock_bh(&adapter->adv_rss_lock); 1786 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1787 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { 1788 process_rss = true; 1789 rss->state = IAVF_ADV_RSS_ADD_PENDING; 1790 memcpy(rss_cfg, &rss->cfg_msg, len); 1791 iavf_print_adv_rss_cfg(adapter, rss, 1792 "Input set change for", 1793 "is pending"); 1794 break; 1795 } 1796 } 1797 spin_unlock_bh(&adapter->adv_rss_lock); 1798 1799 if (process_rss) { 1800 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; 1801 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, 1802 (u8 *)rss_cfg, len); 1803 } else { 1804 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; 1805 } 1806 1807 kfree(rss_cfg); 1808 } 1809 1810 /** 1811 * iavf_del_adv_rss_cfg 1812 * @adapter: the VF adapter structure 1813 * 1814 * Request that the PF delete RSS configuration as specified 1815 * by the user via ethtool. 1816 **/ 1817 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) 1818 { 1819 struct virtchnl_rss_cfg *rss_cfg; 1820 struct iavf_adv_rss *rss; 1821 bool process_rss = false; 1822 int len; 1823 1824 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1825 /* bail because we already have a command pending */ 1826 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", 1827 adapter->current_op); 1828 return; 1829 } 1830 1831 len = sizeof(struct virtchnl_rss_cfg); 1832 rss_cfg = kzalloc(len, GFP_KERNEL); 1833 if (!rss_cfg) 1834 return; 1835 1836 spin_lock_bh(&adapter->adv_rss_lock); 1837 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 1838 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { 1839 process_rss = true; 1840 rss->state = IAVF_ADV_RSS_DEL_PENDING; 1841 memcpy(rss_cfg, &rss->cfg_msg, len); 1842 break; 1843 } 1844 } 1845 spin_unlock_bh(&adapter->adv_rss_lock); 1846 1847 if (process_rss) { 1848 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; 1849 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, 1850 (u8 *)rss_cfg, len); 1851 } else { 1852 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1853 } 1854 1855 kfree(rss_cfg); 1856 } 1857 1858 /** 1859 * iavf_request_reset 1860 * @adapter: adapter structure 1861 * 1862 * Request that the PF reset this VF. No response is expected. 1863 **/ 1864 int iavf_request_reset(struct iavf_adapter *adapter) 1865 { 1866 int err; 1867 /* Don't check CURRENT_OP - this is always higher priority */ 1868 err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); 1869 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1870 return err; 1871 } 1872 1873 /** 1874 * iavf_netdev_features_vlan_strip_set - update vlan strip status 1875 * @netdev: ptr to netdev being adjusted 1876 * @enable: enable or disable vlan strip 1877 * 1878 * Helper function to change vlan strip status in netdev->features. 1879 */ 1880 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, 1881 const bool enable) 1882 { 1883 if (enable) 1884 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1885 else 1886 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 1887 } 1888 1889 /** 1890 * iavf_virtchnl_completion 1891 * @adapter: adapter structure 1892 * @v_opcode: opcode sent by PF 1893 * @v_retval: retval sent by PF 1894 * @msg: message sent by PF 1895 * @msglen: message length 1896 * 1897 * Asynchronous completion function for admin queue messages. Rather than busy 1898 * wait, we fire off our requests and assume that no errors will be returned. 1899 * This function handles the reply messages. 1900 **/ 1901 void iavf_virtchnl_completion(struct iavf_adapter *adapter, 1902 enum virtchnl_ops v_opcode, 1903 enum iavf_status v_retval, u8 *msg, u16 msglen) 1904 { 1905 struct net_device *netdev = adapter->netdev; 1906 1907 if (v_opcode == VIRTCHNL_OP_EVENT) { 1908 struct virtchnl_pf_event *vpe = 1909 (struct virtchnl_pf_event *)msg; 1910 bool link_up = iavf_get_vpe_link_status(adapter, vpe); 1911 1912 switch (vpe->event) { 1913 case VIRTCHNL_EVENT_LINK_CHANGE: 1914 iavf_set_adapter_link_speed_from_vpe(adapter, vpe); 1915 1916 /* we've already got the right link status, bail */ 1917 if (adapter->link_up == link_up) 1918 break; 1919 1920 if (link_up) { 1921 /* If we get link up message and start queues 1922 * before our queues are configured it will 1923 * trigger a TX hang. In that case, just ignore 1924 * the link status message,we'll get another one 1925 * after we enable queues and actually prepared 1926 * to send traffic. 1927 */ 1928 if (adapter->state != __IAVF_RUNNING) 1929 break; 1930 1931 /* For ADq enabled VF, we reconfigure VSIs and 1932 * re-allocate queues. Hence wait till all 1933 * queues are enabled. 1934 */ 1935 if (adapter->flags & 1936 IAVF_FLAG_QUEUES_DISABLED) 1937 break; 1938 } 1939 1940 adapter->link_up = link_up; 1941 if (link_up) { 1942 netif_tx_start_all_queues(netdev); 1943 netif_carrier_on(netdev); 1944 } else { 1945 netif_tx_stop_all_queues(netdev); 1946 netif_carrier_off(netdev); 1947 } 1948 iavf_print_link_message(adapter); 1949 break; 1950 case VIRTCHNL_EVENT_RESET_IMPENDING: 1951 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); 1952 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { 1953 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1954 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 1955 queue_work(adapter->wq, &adapter->reset_task); 1956 } 1957 break; 1958 default: 1959 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", 1960 vpe->event); 1961 break; 1962 } 1963 return; 1964 } 1965 if (v_retval) { 1966 switch (v_opcode) { 1967 case VIRTCHNL_OP_ADD_VLAN: 1968 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 1969 iavf_stat_str(&adapter->hw, v_retval)); 1970 break; 1971 case VIRTCHNL_OP_ADD_ETH_ADDR: 1972 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", 1973 iavf_stat_str(&adapter->hw, v_retval)); 1974 iavf_mac_add_reject(adapter); 1975 /* restore administratively set MAC address */ 1976 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1977 wake_up(&adapter->vc_waitqueue); 1978 break; 1979 case VIRTCHNL_OP_DEL_VLAN: 1980 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", 1981 iavf_stat_str(&adapter->hw, v_retval)); 1982 break; 1983 case VIRTCHNL_OP_DEL_ETH_ADDR: 1984 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", 1985 iavf_stat_str(&adapter->hw, v_retval)); 1986 break; 1987 case VIRTCHNL_OP_ENABLE_CHANNELS: 1988 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", 1989 iavf_stat_str(&adapter->hw, v_retval)); 1990 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1991 adapter->ch_config.state = __IAVF_TC_INVALID; 1992 netdev_reset_tc(netdev); 1993 netif_tx_start_all_queues(netdev); 1994 break; 1995 case VIRTCHNL_OP_DISABLE_CHANNELS: 1996 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", 1997 iavf_stat_str(&adapter->hw, v_retval)); 1998 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 1999 adapter->ch_config.state = __IAVF_TC_RUNNING; 2000 netif_tx_start_all_queues(netdev); 2001 break; 2002 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 2003 struct iavf_cloud_filter *cf, *cftmp; 2004 2005 list_for_each_entry_safe(cf, cftmp, 2006 &adapter->cloud_filter_list, 2007 list) { 2008 if (cf->state == __IAVF_CF_ADD_PENDING) { 2009 cf->state = __IAVF_CF_INVALID; 2010 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", 2011 iavf_stat_str(&adapter->hw, 2012 v_retval)); 2013 iavf_print_cloud_filter(adapter, 2014 &cf->f); 2015 list_del(&cf->list); 2016 kfree(cf); 2017 adapter->num_cloud_filters--; 2018 } 2019 } 2020 } 2021 break; 2022 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 2023 struct iavf_cloud_filter *cf; 2024 2025 list_for_each_entry(cf, &adapter->cloud_filter_list, 2026 list) { 2027 if (cf->state == __IAVF_CF_DEL_PENDING) { 2028 cf->state = __IAVF_CF_ACTIVE; 2029 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", 2030 iavf_stat_str(&adapter->hw, 2031 v_retval)); 2032 iavf_print_cloud_filter(adapter, 2033 &cf->f); 2034 } 2035 } 2036 } 2037 break; 2038 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2039 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2040 2041 spin_lock_bh(&adapter->fdir_fltr_lock); 2042 list_for_each_entry_safe(fdir, fdir_tmp, 2043 &adapter->fdir_list_head, 2044 list) { 2045 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2046 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", 2047 iavf_stat_str(&adapter->hw, 2048 v_retval)); 2049 iavf_print_fdir_fltr(adapter, fdir); 2050 if (msglen) 2051 dev_err(&adapter->pdev->dev, 2052 "%s\n", msg); 2053 list_del(&fdir->list); 2054 kfree(fdir); 2055 adapter->fdir_active_fltr--; 2056 } 2057 } 2058 spin_unlock_bh(&adapter->fdir_fltr_lock); 2059 } 2060 break; 2061 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2062 struct iavf_fdir_fltr *fdir; 2063 2064 spin_lock_bh(&adapter->fdir_fltr_lock); 2065 list_for_each_entry(fdir, &adapter->fdir_list_head, 2066 list) { 2067 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2068 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2069 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", 2070 iavf_stat_str(&adapter->hw, 2071 v_retval)); 2072 iavf_print_fdir_fltr(adapter, fdir); 2073 } 2074 } 2075 spin_unlock_bh(&adapter->fdir_fltr_lock); 2076 } 2077 break; 2078 case VIRTCHNL_OP_ADD_RSS_CFG: { 2079 struct iavf_adv_rss *rss, *rss_tmp; 2080 2081 spin_lock_bh(&adapter->adv_rss_lock); 2082 list_for_each_entry_safe(rss, rss_tmp, 2083 &adapter->adv_rss_list_head, 2084 list) { 2085 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2086 iavf_print_adv_rss_cfg(adapter, rss, 2087 "Failed to change the input set for", 2088 NULL); 2089 list_del(&rss->list); 2090 kfree(rss); 2091 } 2092 } 2093 spin_unlock_bh(&adapter->adv_rss_lock); 2094 } 2095 break; 2096 case VIRTCHNL_OP_DEL_RSS_CFG: { 2097 struct iavf_adv_rss *rss; 2098 2099 spin_lock_bh(&adapter->adv_rss_lock); 2100 list_for_each_entry(rss, &adapter->adv_rss_list_head, 2101 list) { 2102 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2103 rss->state = IAVF_ADV_RSS_ACTIVE; 2104 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", 2105 iavf_stat_str(&adapter->hw, 2106 v_retval)); 2107 } 2108 } 2109 spin_unlock_bh(&adapter->adv_rss_lock); 2110 } 2111 break; 2112 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2113 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2114 /* Vlan stripping could not be enabled by ethtool. 2115 * Disable it in netdev->features. 2116 */ 2117 iavf_netdev_features_vlan_strip_set(netdev, false); 2118 break; 2119 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2120 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); 2121 /* Vlan stripping could not be disabled by ethtool. 2122 * Enable it in netdev->features. 2123 */ 2124 iavf_netdev_features_vlan_strip_set(netdev, true); 2125 break; 2126 case VIRTCHNL_OP_ADD_VLAN_V2: 2127 iavf_vlan_add_reject(adapter); 2128 dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", 2129 iavf_stat_str(&adapter->hw, v_retval)); 2130 break; 2131 default: 2132 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", 2133 v_retval, iavf_stat_str(&adapter->hw, v_retval), 2134 v_opcode); 2135 } 2136 } 2137 switch (v_opcode) { 2138 case VIRTCHNL_OP_ADD_ETH_ADDR: 2139 if (!v_retval) 2140 iavf_mac_add_ok(adapter); 2141 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) 2142 if (!ether_addr_equal(netdev->dev_addr, 2143 adapter->hw.mac.addr)) { 2144 netif_addr_lock_bh(netdev); 2145 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2146 netif_addr_unlock_bh(netdev); 2147 } 2148 wake_up(&adapter->vc_waitqueue); 2149 break; 2150 case VIRTCHNL_OP_GET_STATS: { 2151 struct iavf_eth_stats *stats = 2152 (struct iavf_eth_stats *)msg; 2153 netdev->stats.rx_packets = stats->rx_unicast + 2154 stats->rx_multicast + 2155 stats->rx_broadcast; 2156 netdev->stats.tx_packets = stats->tx_unicast + 2157 stats->tx_multicast + 2158 stats->tx_broadcast; 2159 netdev->stats.rx_bytes = stats->rx_bytes; 2160 netdev->stats.tx_bytes = stats->tx_bytes; 2161 netdev->stats.tx_errors = stats->tx_errors; 2162 netdev->stats.rx_dropped = stats->rx_discards; 2163 netdev->stats.tx_dropped = stats->tx_discards; 2164 adapter->current_stats = *stats; 2165 } 2166 break; 2167 case VIRTCHNL_OP_GET_VF_RESOURCES: { 2168 u16 len = sizeof(struct virtchnl_vf_resource) + 2169 IAVF_MAX_VF_VSI * 2170 sizeof(struct virtchnl_vsi_resource); 2171 memcpy(adapter->vf_res, msg, min(msglen, len)); 2172 iavf_validate_num_queues(adapter); 2173 iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); 2174 if (is_zero_ether_addr(adapter->hw.mac.addr)) { 2175 /* restore current mac address */ 2176 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2177 } else { 2178 netif_addr_lock_bh(netdev); 2179 /* refresh current mac address if changed */ 2180 ether_addr_copy(netdev->perm_addr, 2181 adapter->hw.mac.addr); 2182 netif_addr_unlock_bh(netdev); 2183 } 2184 spin_lock_bh(&adapter->mac_vlan_list_lock); 2185 iavf_add_filter(adapter, adapter->hw.mac.addr); 2186 2187 if (VLAN_ALLOWED(adapter)) { 2188 if (!list_empty(&adapter->vlan_filter_list)) { 2189 struct iavf_vlan_filter *vlf; 2190 2191 /* re-add all VLAN filters over virtchnl */ 2192 list_for_each_entry(vlf, 2193 &adapter->vlan_filter_list, 2194 list) 2195 vlf->add = true; 2196 2197 adapter->aq_required |= 2198 IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2199 } 2200 } 2201 2202 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2203 2204 iavf_parse_vf_resource_msg(adapter); 2205 2206 /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the 2207 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish 2208 * configuration 2209 */ 2210 if (VLAN_V2_ALLOWED(adapter)) 2211 break; 2212 /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 2213 * wasn't successfully negotiated with the PF 2214 */ 2215 } 2216 fallthrough; 2217 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { 2218 struct iavf_mac_filter *f; 2219 bool was_mac_changed; 2220 u64 aq_required = 0; 2221 2222 if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) 2223 memcpy(&adapter->vlan_v2_caps, msg, 2224 min_t(u16, msglen, 2225 sizeof(adapter->vlan_v2_caps))); 2226 2227 iavf_process_config(adapter); 2228 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; 2229 2230 /* Request VLAN offload settings */ 2231 if (VLAN_V2_ALLOWED(adapter)) 2232 iavf_set_vlan_offload_features(adapter, 0, 2233 netdev->features); 2234 2235 iavf_set_queue_vlan_tag_loc(adapter); 2236 2237 was_mac_changed = !ether_addr_equal(netdev->dev_addr, 2238 adapter->hw.mac.addr); 2239 2240 spin_lock_bh(&adapter->mac_vlan_list_lock); 2241 2242 /* re-add all MAC filters */ 2243 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2244 if (was_mac_changed && 2245 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2246 ether_addr_copy(f->macaddr, 2247 adapter->hw.mac.addr); 2248 2249 f->is_new_mac = true; 2250 f->add = true; 2251 f->add_handled = false; 2252 f->remove = false; 2253 } 2254 2255 /* re-add all VLAN filters */ 2256 if (VLAN_FILTERING_ALLOWED(adapter)) { 2257 struct iavf_vlan_filter *vlf; 2258 2259 if (!list_empty(&adapter->vlan_filter_list)) { 2260 list_for_each_entry(vlf, 2261 &adapter->vlan_filter_list, 2262 list) 2263 vlf->add = true; 2264 2265 aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2266 } 2267 } 2268 2269 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2270 2271 netif_addr_lock_bh(netdev); 2272 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2273 netif_addr_unlock_bh(netdev); 2274 2275 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER | 2276 aq_required; 2277 } 2278 break; 2279 case VIRTCHNL_OP_ENABLE_QUEUES: 2280 /* enable transmits */ 2281 iavf_irq_enable(adapter, true); 2282 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; 2283 break; 2284 case VIRTCHNL_OP_DISABLE_QUEUES: 2285 iavf_free_all_tx_resources(adapter); 2286 iavf_free_all_rx_resources(adapter); 2287 if (adapter->state == __IAVF_DOWN_PENDING) { 2288 iavf_change_state(adapter, __IAVF_DOWN); 2289 wake_up(&adapter->down_waitqueue); 2290 } 2291 break; 2292 case VIRTCHNL_OP_VERSION: 2293 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2294 /* Don't display an error if we get these out of sequence. 2295 * If the firmware needed to get kicked, we'll get these and 2296 * it's no problem. 2297 */ 2298 if (v_opcode != adapter->current_op) 2299 return; 2300 break; 2301 case VIRTCHNL_OP_IWARP: 2302 /* Gobble zero-length replies from the PF. They indicate that 2303 * a previous message was received OK, and the client doesn't 2304 * care about that. 2305 */ 2306 if (msglen && CLIENT_ENABLED(adapter)) 2307 iavf_notify_client_message(&adapter->vsi, msg, msglen); 2308 break; 2309 2310 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: 2311 adapter->client_pending &= 2312 ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); 2313 break; 2314 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { 2315 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; 2316 2317 if (msglen == sizeof(*vrh)) 2318 adapter->hena = vrh->hena; 2319 else 2320 dev_warn(&adapter->pdev->dev, 2321 "Invalid message %d from PF\n", v_opcode); 2322 } 2323 break; 2324 case VIRTCHNL_OP_REQUEST_QUEUES: { 2325 struct virtchnl_vf_res_request *vfres = 2326 (struct virtchnl_vf_res_request *)msg; 2327 2328 if (vfres->num_queue_pairs != adapter->num_req_queues) { 2329 dev_info(&adapter->pdev->dev, 2330 "Requested %d queues, PF can support %d\n", 2331 adapter->num_req_queues, 2332 vfres->num_queue_pairs); 2333 adapter->num_req_queues = 0; 2334 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2335 } 2336 } 2337 break; 2338 case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 2339 struct iavf_cloud_filter *cf; 2340 2341 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2342 if (cf->state == __IAVF_CF_ADD_PENDING) 2343 cf->state = __IAVF_CF_ACTIVE; 2344 } 2345 } 2346 break; 2347 case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 2348 struct iavf_cloud_filter *cf, *cftmp; 2349 2350 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2351 list) { 2352 if (cf->state == __IAVF_CF_DEL_PENDING) { 2353 cf->state = __IAVF_CF_INVALID; 2354 list_del(&cf->list); 2355 kfree(cf); 2356 adapter->num_cloud_filters--; 2357 } 2358 } 2359 } 2360 break; 2361 case VIRTCHNL_OP_ADD_FDIR_FILTER: { 2362 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; 2363 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2364 2365 spin_lock_bh(&adapter->fdir_fltr_lock); 2366 list_for_each_entry_safe(fdir, fdir_tmp, 2367 &adapter->fdir_list_head, 2368 list) { 2369 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { 2370 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2371 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", 2372 fdir->loc); 2373 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2374 fdir->flow_id = add_fltr->flow_id; 2375 } else { 2376 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", 2377 add_fltr->status); 2378 iavf_print_fdir_fltr(adapter, fdir); 2379 list_del(&fdir->list); 2380 kfree(fdir); 2381 adapter->fdir_active_fltr--; 2382 } 2383 } 2384 } 2385 spin_unlock_bh(&adapter->fdir_fltr_lock); 2386 } 2387 break; 2388 case VIRTCHNL_OP_DEL_FDIR_FILTER: { 2389 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; 2390 struct iavf_fdir_fltr *fdir, *fdir_tmp; 2391 2392 spin_lock_bh(&adapter->fdir_fltr_lock); 2393 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, 2394 list) { 2395 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { 2396 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { 2397 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", 2398 fdir->loc); 2399 list_del(&fdir->list); 2400 kfree(fdir); 2401 adapter->fdir_active_fltr--; 2402 } else { 2403 fdir->state = IAVF_FDIR_FLTR_ACTIVE; 2404 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", 2405 del_fltr->status); 2406 iavf_print_fdir_fltr(adapter, fdir); 2407 } 2408 } 2409 } 2410 spin_unlock_bh(&adapter->fdir_fltr_lock); 2411 } 2412 break; 2413 case VIRTCHNL_OP_ADD_RSS_CFG: { 2414 struct iavf_adv_rss *rss; 2415 2416 spin_lock_bh(&adapter->adv_rss_lock); 2417 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { 2418 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { 2419 iavf_print_adv_rss_cfg(adapter, rss, 2420 "Input set change for", 2421 "successful"); 2422 rss->state = IAVF_ADV_RSS_ACTIVE; 2423 } 2424 } 2425 spin_unlock_bh(&adapter->adv_rss_lock); 2426 } 2427 break; 2428 case VIRTCHNL_OP_DEL_RSS_CFG: { 2429 struct iavf_adv_rss *rss, *rss_tmp; 2430 2431 spin_lock_bh(&adapter->adv_rss_lock); 2432 list_for_each_entry_safe(rss, rss_tmp, 2433 &adapter->adv_rss_list_head, list) { 2434 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { 2435 list_del(&rss->list); 2436 kfree(rss); 2437 } 2438 } 2439 spin_unlock_bh(&adapter->adv_rss_lock); 2440 } 2441 break; 2442 case VIRTCHNL_OP_ADD_VLAN_V2: { 2443 struct iavf_vlan_filter *f; 2444 2445 spin_lock_bh(&adapter->mac_vlan_list_lock); 2446 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 2447 if (f->is_new_vlan) { 2448 f->is_new_vlan = false; 2449 if (!f->vlan.vid) 2450 continue; 2451 if (f->vlan.tpid == ETH_P_8021Q) 2452 set_bit(f->vlan.vid, 2453 adapter->vsi.active_cvlans); 2454 else 2455 set_bit(f->vlan.vid, 2456 adapter->vsi.active_svlans); 2457 } 2458 } 2459 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2460 } 2461 break; 2462 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2463 /* PF enabled vlan strip on this VF. 2464 * Update netdev->features if needed to be in sync with ethtool. 2465 */ 2466 if (!v_retval) 2467 iavf_netdev_features_vlan_strip_set(netdev, true); 2468 break; 2469 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2470 /* PF disabled vlan strip on this VF. 2471 * Update netdev->features if needed to be in sync with ethtool. 2472 */ 2473 if (!v_retval) 2474 iavf_netdev_features_vlan_strip_set(netdev, false); 2475 break; 2476 default: 2477 if (adapter->current_op && (v_opcode != adapter->current_op)) 2478 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", 2479 adapter->current_op, v_opcode); 2480 break; 2481 } /* switch v_opcode */ 2482 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2483 } 2484