1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * aQuantia Corporation Network Driver 4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 5 */ 6 7 /* File aq_nic.c: Definition of common code for NIC. */ 8 9 #include "aq_nic.h" 10 #include "aq_ring.h" 11 #include "aq_vec.h" 12 #include "aq_hw.h" 13 #include "aq_pci_func.h" 14 #include "aq_main.h" 15 16 #include <linux/moduleparam.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/timer.h> 20 #include <linux/cpu.h> 21 #include <linux/ip.h> 22 #include <linux/tcp.h> 23 #include <net/ip.h> 24 25 static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO; 26 module_param_named(aq_itr, aq_itr, uint, 0644); 27 MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode"); 28 29 static unsigned int aq_itr_tx; 30 module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644); 31 MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate"); 32 33 static unsigned int aq_itr_rx; 34 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); 35 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); 36 37 static void aq_nic_update_ndev_stats(struct aq_nic_s *self); 38 39 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 40 { 41 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 42 struct aq_rss_parameters *rss_params = &cfg->aq_rss; 43 int i = 0; 44 45 static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = { 46 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, 47 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, 48 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, 49 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, 50 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c 51 }; 52 53 rss_params->hash_secret_key_size = sizeof(rss_key); 54 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); 55 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX; 56 57 for (i = rss_params->indirection_table_size; i--;) 58 rss_params->indirection_table[i] = i & (num_rss_queues - 1); 59 } 60 61 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ 62 void aq_nic_cfg_start(struct aq_nic_s *self) 63 { 64 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 65 66 cfg->tcs = AQ_CFG_TCS_DEF; 67 68 cfg->is_polling = AQ_CFG_IS_POLLING_DEF; 69 70 cfg->itr = aq_itr; 71 cfg->tx_itr = aq_itr_tx; 72 cfg->rx_itr = aq_itr_rx; 73 74 cfg->rxpageorder = AQ_CFG_RX_PAGEORDER; 75 cfg->is_rss = AQ_CFG_IS_RSS_DEF; 76 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; 77 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; 78 cfg->flow_control = AQ_CFG_FC_MODE; 79 80 cfg->mtu = AQ_CFG_MTU_DEF; 81 cfg->link_speed_msk = AQ_CFG_SPEED_MSK; 82 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; 83 84 cfg->is_lro = AQ_CFG_IS_LRO_DEF; 85 86 /*descriptors */ 87 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF); 88 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF); 89 90 /*rss rings */ 91 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); 92 cfg->vecs = min(cfg->vecs, num_online_cpus()); 93 if (self->irqvecs > AQ_HW_SERVICE_IRQS) 94 cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS); 95 /* cfg->vecs should be power of 2 for RSS */ 96 if (cfg->vecs >= 8U) 97 cfg->vecs = 8U; 98 else if (cfg->vecs >= 4U) 99 cfg->vecs = 4U; 100 else if (cfg->vecs >= 2U) 101 cfg->vecs = 2U; 102 else 103 cfg->vecs = 1U; 104 105 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF); 106 107 aq_nic_rss_init(self, cfg->num_rss_queues); 108 109 cfg->irq_type = aq_pci_func_get_irq_type(self); 110 111 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || 112 (cfg->aq_hw_caps->vecs == 1U) || 113 (cfg->vecs == 1U)) { 114 cfg->is_rss = 0U; 115 cfg->vecs = 1U; 116 } 117 118 /* Check if we have enough vectors allocated for 119 * link status IRQ. If no - we'll know link state from 120 * slower service task. 121 */ 122 if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs) 123 cfg->link_irq_vec = cfg->vecs; 124 else 125 cfg->link_irq_vec = 0; 126 127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; 128 cfg->features = cfg->aq_hw_caps->hw_features; 129 } 130 131 static int aq_nic_update_link_status(struct aq_nic_s *self) 132 { 133 int err = self->aq_fw_ops->update_link_status(self->aq_hw); 134 u32 fc = 0; 135 136 if (err) 137 return err; 138 139 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) { 140 pr_info("%s: link change old %d new %d\n", 141 AQ_CFG_DRV_NAME, self->link_status.mbps, 142 self->aq_hw->aq_link_status.mbps); 143 aq_nic_update_interrupt_moderation_settings(self); 144 145 /* Driver has to update flow control settings on RX block 146 * on any link event. 147 * We should query FW whether it negotiated FC. 148 */ 149 if (self->aq_fw_ops->get_flow_control) 150 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc); 151 if (self->aq_hw_ops->hw_set_fc) 152 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0); 153 } 154 155 self->link_status = self->aq_hw->aq_link_status; 156 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { 157 aq_utils_obj_set(&self->flags, 158 AQ_NIC_FLAG_STARTED); 159 aq_utils_obj_clear(&self->flags, 160 AQ_NIC_LINK_DOWN); 161 netif_carrier_on(self->ndev); 162 netif_tx_wake_all_queues(self->ndev); 163 } 164 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) { 165 netif_carrier_off(self->ndev); 166 netif_tx_disable(self->ndev); 167 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN); 168 } 169 return 0; 170 } 171 172 static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private) 173 { 174 struct aq_nic_s *self = private; 175 176 if (!self) 177 return IRQ_NONE; 178 179 aq_nic_update_link_status(self); 180 181 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 182 BIT(self->aq_nic_cfg.link_irq_vec)); 183 return IRQ_HANDLED; 184 } 185 186 static void aq_nic_service_task(struct work_struct *work) 187 { 188 struct aq_nic_s *self = container_of(work, struct aq_nic_s, 189 service_task); 190 int err; 191 192 if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY)) 193 return; 194 195 err = aq_nic_update_link_status(self); 196 if (err) 197 return; 198 199 mutex_lock(&self->fwreq_mutex); 200 if (self->aq_fw_ops->update_stats) 201 self->aq_fw_ops->update_stats(self->aq_hw); 202 mutex_unlock(&self->fwreq_mutex); 203 204 aq_nic_update_ndev_stats(self); 205 } 206 207 static void aq_nic_service_timer_cb(struct timer_list *t) 208 { 209 struct aq_nic_s *self = from_timer(self, t, service_timer); 210 211 mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); 212 213 aq_ndev_schedule_work(&self->service_task); 214 } 215 216 static void aq_nic_polling_timer_cb(struct timer_list *t) 217 { 218 struct aq_nic_s *self = from_timer(self, t, polling_timer); 219 struct aq_vec_s *aq_vec = NULL; 220 unsigned int i = 0U; 221 222 for (i = 0U, aq_vec = self->aq_vec[0]; 223 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 224 aq_vec_isr(i, (void *)aq_vec); 225 226 mod_timer(&self->polling_timer, jiffies + 227 AQ_CFG_POLLING_TIMER_INTERVAL); 228 } 229 230 int aq_nic_ndev_register(struct aq_nic_s *self) 231 { 232 int err = 0; 233 234 if (!self->ndev) { 235 err = -EINVAL; 236 goto err_exit; 237 } 238 239 err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops); 240 if (err) 241 goto err_exit; 242 243 mutex_lock(&self->fwreq_mutex); 244 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, 245 self->ndev->dev_addr); 246 mutex_unlock(&self->fwreq_mutex); 247 if (err) 248 goto err_exit; 249 250 #if defined(AQ_CFG_MAC_ADDR_PERMANENT) 251 { 252 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; 253 254 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); 255 } 256 #endif 257 258 for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs; 259 self->aq_vecs++) { 260 self->aq_vec[self->aq_vecs] = 261 aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self)); 262 if (!self->aq_vec[self->aq_vecs]) { 263 err = -ENOMEM; 264 goto err_exit; 265 } 266 } 267 268 netif_carrier_off(self->ndev); 269 270 netif_tx_disable(self->ndev); 271 272 err = register_netdev(self->ndev); 273 if (err) 274 goto err_exit; 275 276 err_exit: 277 return err; 278 } 279 280 void aq_nic_ndev_init(struct aq_nic_s *self) 281 { 282 const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; 283 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; 284 285 self->ndev->hw_features |= aq_hw_caps->hw_features; 286 self->ndev->features = aq_hw_caps->hw_features; 287 self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM | 288 NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO; 289 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; 290 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 291 292 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; 293 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; 294 295 } 296 297 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, 298 struct aq_ring_s *ring) 299 { 300 self->aq_ring_tx[idx] = ring; 301 } 302 303 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) 304 { 305 return self->ndev; 306 } 307 308 int aq_nic_init(struct aq_nic_s *self) 309 { 310 struct aq_vec_s *aq_vec = NULL; 311 int err = 0; 312 unsigned int i = 0U; 313 314 self->power_state = AQ_HW_POWER_STATE_D0; 315 mutex_lock(&self->fwreq_mutex); 316 err = self->aq_hw_ops->hw_reset(self->aq_hw); 317 mutex_unlock(&self->fwreq_mutex); 318 if (err < 0) 319 goto err_exit; 320 321 err = self->aq_hw_ops->hw_init(self->aq_hw, 322 aq_nic_get_ndev(self)->dev_addr); 323 if (err < 0) 324 goto err_exit; 325 326 for (i = 0U, aq_vec = self->aq_vec[0]; 327 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 328 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw); 329 330 netif_carrier_off(self->ndev); 331 332 err_exit: 333 return err; 334 } 335 336 int aq_nic_start(struct aq_nic_s *self) 337 { 338 struct aq_vec_s *aq_vec = NULL; 339 int err = 0; 340 unsigned int i = 0U; 341 342 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, 343 self->mc_list.ar, 344 self->mc_list.count); 345 if (err < 0) 346 goto err_exit; 347 348 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, 349 self->packet_filter); 350 if (err < 0) 351 goto err_exit; 352 353 for (i = 0U, aq_vec = self->aq_vec[0]; 354 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 355 err = aq_vec_start(aq_vec); 356 if (err < 0) 357 goto err_exit; 358 } 359 360 err = self->aq_hw_ops->hw_start(self->aq_hw); 361 if (err < 0) 362 goto err_exit; 363 364 err = aq_nic_update_interrupt_moderation_settings(self); 365 if (err) 366 goto err_exit; 367 368 INIT_WORK(&self->service_task, aq_nic_service_task); 369 370 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); 371 aq_nic_service_timer_cb(&self->service_timer); 372 373 if (self->aq_nic_cfg.is_polling) { 374 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0); 375 mod_timer(&self->polling_timer, jiffies + 376 AQ_CFG_POLLING_TIMER_INTERVAL); 377 } else { 378 for (i = 0U, aq_vec = self->aq_vec[0]; 379 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 380 err = aq_pci_func_alloc_irq(self, i, self->ndev->name, 381 aq_vec_isr, aq_vec, 382 aq_vec_get_affinity_mask(aq_vec)); 383 if (err < 0) 384 goto err_exit; 385 } 386 387 if (self->aq_nic_cfg.link_irq_vec) { 388 int irqvec = pci_irq_vector(self->pdev, 389 self->aq_nic_cfg.link_irq_vec); 390 err = request_threaded_irq(irqvec, NULL, 391 aq_linkstate_threaded_isr, 392 IRQF_SHARED, 393 self->ndev->name, self); 394 if (err < 0) 395 goto err_exit; 396 self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec); 397 } 398 399 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, 400 AQ_CFG_IRQ_MASK); 401 if (err < 0) 402 goto err_exit; 403 } 404 405 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); 406 if (err < 0) 407 goto err_exit; 408 409 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs); 410 if (err < 0) 411 goto err_exit; 412 413 netif_tx_start_all_queues(self->ndev); 414 415 err_exit: 416 return err; 417 } 418 419 static unsigned int aq_nic_map_skb(struct aq_nic_s *self, 420 struct sk_buff *skb, 421 struct aq_ring_s *ring) 422 { 423 unsigned int ret = 0U; 424 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 425 unsigned int frag_count = 0U; 426 unsigned int dx = ring->sw_tail; 427 struct aq_ring_buff_s *first = NULL; 428 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; 429 430 if (unlikely(skb_is_gso(skb))) { 431 dx_buff->flags = 0U; 432 dx_buff->len_pkt = skb->len; 433 dx_buff->len_l2 = ETH_HLEN; 434 dx_buff->len_l3 = ip_hdrlen(skb); 435 dx_buff->len_l4 = tcp_hdrlen(skb); 436 dx_buff->mss = skb_shinfo(skb)->gso_size; 437 dx_buff->is_txc = 1U; 438 dx_buff->eop_index = 0xffffU; 439 440 dx_buff->is_ipv6 = 441 (ip_hdr(skb)->version == 6) ? 1U : 0U; 442 443 dx = aq_ring_next_dx(ring, dx); 444 dx_buff = &ring->buff_ring[dx]; 445 ++ret; 446 } 447 448 dx_buff->flags = 0U; 449 dx_buff->len = skb_headlen(skb); 450 dx_buff->pa = dma_map_single(aq_nic_get_dev(self), 451 skb->data, 452 dx_buff->len, 453 DMA_TO_DEVICE); 454 455 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) 456 goto exit; 457 458 first = dx_buff; 459 dx_buff->len_pkt = skb->len; 460 dx_buff->is_sop = 1U; 461 dx_buff->is_mapped = 1U; 462 ++ret; 463 464 if (skb->ip_summed == CHECKSUM_PARTIAL) { 465 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 466 1U : 0U; 467 468 if (ip_hdr(skb)->version == 4) { 469 dx_buff->is_tcp_cso = 470 (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 471 1U : 0U; 472 dx_buff->is_udp_cso = 473 (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 474 1U : 0U; 475 } else if (ip_hdr(skb)->version == 6) { 476 dx_buff->is_tcp_cso = 477 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ? 478 1U : 0U; 479 dx_buff->is_udp_cso = 480 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ? 481 1U : 0U; 482 } 483 } 484 485 for (; nr_frags--; ++frag_count) { 486 unsigned int frag_len = 0U; 487 unsigned int buff_offset = 0U; 488 unsigned int buff_size = 0U; 489 dma_addr_t frag_pa; 490 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; 491 492 frag_len = skb_frag_size(frag); 493 494 while (frag_len) { 495 if (frag_len > AQ_CFG_TX_FRAME_MAX) 496 buff_size = AQ_CFG_TX_FRAME_MAX; 497 else 498 buff_size = frag_len; 499 500 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), 501 frag, 502 buff_offset, 503 buff_size, 504 DMA_TO_DEVICE); 505 506 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), 507 frag_pa))) 508 goto mapping_error; 509 510 dx = aq_ring_next_dx(ring, dx); 511 dx_buff = &ring->buff_ring[dx]; 512 513 dx_buff->flags = 0U; 514 dx_buff->len = buff_size; 515 dx_buff->pa = frag_pa; 516 dx_buff->is_mapped = 1U; 517 dx_buff->eop_index = 0xffffU; 518 519 frag_len -= buff_size; 520 buff_offset += buff_size; 521 522 ++ret; 523 } 524 } 525 526 first->eop_index = dx; 527 dx_buff->is_eop = 1U; 528 dx_buff->skb = skb; 529 goto exit; 530 531 mapping_error: 532 for (dx = ring->sw_tail; 533 ret > 0; 534 --ret, dx = aq_ring_next_dx(ring, dx)) { 535 dx_buff = &ring->buff_ring[dx]; 536 537 if (!dx_buff->is_txc && dx_buff->pa) { 538 if (unlikely(dx_buff->is_sop)) { 539 dma_unmap_single(aq_nic_get_dev(self), 540 dx_buff->pa, 541 dx_buff->len, 542 DMA_TO_DEVICE); 543 } else { 544 dma_unmap_page(aq_nic_get_dev(self), 545 dx_buff->pa, 546 dx_buff->len, 547 DMA_TO_DEVICE); 548 } 549 } 550 } 551 552 exit: 553 return ret; 554 } 555 556 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) 557 { 558 struct aq_ring_s *ring = NULL; 559 unsigned int frags = 0U; 560 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; 561 unsigned int tc = 0U; 562 int err = NETDEV_TX_OK; 563 564 frags = skb_shinfo(skb)->nr_frags + 1; 565 566 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; 567 568 if (frags > AQ_CFG_SKB_FRAGS_MAX) { 569 dev_kfree_skb_any(skb); 570 goto err_exit; 571 } 572 573 aq_ring_update_queue_state(ring); 574 575 /* Above status update may stop the queue. Check this. */ 576 if (__netif_subqueue_stopped(self->ndev, ring->idx)) { 577 err = NETDEV_TX_BUSY; 578 goto err_exit; 579 } 580 581 frags = aq_nic_map_skb(self, skb, ring); 582 583 if (likely(frags)) { 584 err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw, 585 ring, frags); 586 if (err >= 0) { 587 ++ring->stats.tx.packets; 588 ring->stats.tx.bytes += skb->len; 589 } 590 } else { 591 err = NETDEV_TX_BUSY; 592 } 593 594 err_exit: 595 return err; 596 } 597 598 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self) 599 { 600 return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw); 601 } 602 603 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) 604 { 605 int err = 0; 606 607 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags); 608 if (err < 0) 609 goto err_exit; 610 611 self->packet_filter = flags; 612 613 err_exit: 614 return err; 615 } 616 617 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) 618 { 619 unsigned int packet_filter = self->packet_filter; 620 struct netdev_hw_addr *ha = NULL; 621 unsigned int i = 0U; 622 623 self->mc_list.count = 0; 624 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { 625 packet_filter |= IFF_PROMISC; 626 } else { 627 netdev_for_each_uc_addr(ha, ndev) { 628 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 629 630 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) 631 break; 632 } 633 } 634 635 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { 636 packet_filter |= IFF_ALLMULTI; 637 } else { 638 netdev_for_each_mc_addr(ha, ndev) { 639 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 640 641 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) 642 break; 643 } 644 } 645 646 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) { 647 packet_filter |= IFF_MULTICAST; 648 self->mc_list.count = i; 649 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, 650 self->mc_list.ar, 651 self->mc_list.count); 652 } 653 return aq_nic_set_packet_filter(self, packet_filter); 654 } 655 656 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 657 { 658 self->aq_nic_cfg.mtu = new_mtu; 659 660 return 0; 661 } 662 663 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) 664 { 665 return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr); 666 } 667 668 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) 669 { 670 return self->link_status.mbps; 671 } 672 673 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) 674 { 675 u32 *regs_buff = p; 676 int err = 0; 677 678 regs->version = 1; 679 680 err = self->aq_hw_ops->hw_get_regs(self->aq_hw, 681 self->aq_nic_cfg.aq_hw_caps, 682 regs_buff); 683 if (err < 0) 684 goto err_exit; 685 686 err_exit: 687 return err; 688 } 689 690 int aq_nic_get_regs_count(struct aq_nic_s *self) 691 { 692 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count; 693 } 694 695 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) 696 { 697 unsigned int i = 0U; 698 unsigned int count = 0U; 699 struct aq_vec_s *aq_vec = NULL; 700 struct aq_stats_s *stats; 701 702 if (self->aq_fw_ops->update_stats) { 703 mutex_lock(&self->fwreq_mutex); 704 self->aq_fw_ops->update_stats(self->aq_hw); 705 mutex_unlock(&self->fwreq_mutex); 706 } 707 stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); 708 709 if (!stats) 710 goto err_exit; 711 712 data[i] = stats->uprc + stats->mprc + stats->bprc; 713 data[++i] = stats->uprc; 714 data[++i] = stats->mprc; 715 data[++i] = stats->bprc; 716 data[++i] = stats->erpt; 717 data[++i] = stats->uptc + stats->mptc + stats->bptc; 718 data[++i] = stats->uptc; 719 data[++i] = stats->mptc; 720 data[++i] = stats->bptc; 721 data[++i] = stats->ubrc; 722 data[++i] = stats->ubtc; 723 data[++i] = stats->mbrc; 724 data[++i] = stats->mbtc; 725 data[++i] = stats->bbrc; 726 data[++i] = stats->bbtc; 727 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; 728 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; 729 data[++i] = stats->dma_pkt_rc; 730 data[++i] = stats->dma_pkt_tc; 731 data[++i] = stats->dma_oct_rc; 732 data[++i] = stats->dma_oct_tc; 733 data[++i] = stats->dpc; 734 735 i++; 736 737 data += i; 738 739 for (i = 0U, aq_vec = self->aq_vec[0]; 740 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 741 data += count; 742 aq_vec_get_sw_stats(aq_vec, data, &count); 743 } 744 745 err_exit:; 746 } 747 748 static void aq_nic_update_ndev_stats(struct aq_nic_s *self) 749 { 750 struct net_device *ndev = self->ndev; 751 struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); 752 753 ndev->stats.rx_packets = stats->dma_pkt_rc; 754 ndev->stats.rx_bytes = stats->dma_oct_rc; 755 ndev->stats.rx_errors = stats->erpr; 756 ndev->stats.rx_dropped = stats->dpc; 757 ndev->stats.tx_packets = stats->dma_pkt_tc; 758 ndev->stats.tx_bytes = stats->dma_oct_tc; 759 ndev->stats.tx_errors = stats->erpt; 760 ndev->stats.multicast = stats->mprc; 761 } 762 763 void aq_nic_get_link_ksettings(struct aq_nic_s *self, 764 struct ethtool_link_ksettings *cmd) 765 { 766 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) 767 cmd->base.port = PORT_FIBRE; 768 else 769 cmd->base.port = PORT_TP; 770 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ 771 cmd->base.duplex = DUPLEX_FULL; 772 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; 773 774 ethtool_link_ksettings_zero_link_mode(cmd, supported); 775 776 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G) 777 ethtool_link_ksettings_add_link_mode(cmd, supported, 778 10000baseT_Full); 779 780 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G) 781 ethtool_link_ksettings_add_link_mode(cmd, supported, 782 5000baseT_Full); 783 784 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS) 785 ethtool_link_ksettings_add_link_mode(cmd, supported, 786 2500baseT_Full); 787 788 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G) 789 ethtool_link_ksettings_add_link_mode(cmd, supported, 790 1000baseT_Full); 791 792 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M) 793 ethtool_link_ksettings_add_link_mode(cmd, supported, 794 100baseT_Full); 795 796 if (self->aq_nic_cfg.aq_hw_caps->flow_control) 797 ethtool_link_ksettings_add_link_mode(cmd, supported, 798 Pause); 799 800 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 801 802 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) 803 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 804 else 805 ethtool_link_ksettings_add_link_mode(cmd, supported, TP); 806 807 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 808 809 if (self->aq_nic_cfg.is_autoneg) 810 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 811 812 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) 813 ethtool_link_ksettings_add_link_mode(cmd, advertising, 814 10000baseT_Full); 815 816 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G) 817 ethtool_link_ksettings_add_link_mode(cmd, advertising, 818 5000baseT_Full); 819 820 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS) 821 ethtool_link_ksettings_add_link_mode(cmd, advertising, 822 2500baseT_Full); 823 824 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) 825 ethtool_link_ksettings_add_link_mode(cmd, advertising, 826 1000baseT_Full); 827 828 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) 829 ethtool_link_ksettings_add_link_mode(cmd, advertising, 830 100baseT_Full); 831 832 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX) 833 ethtool_link_ksettings_add_link_mode(cmd, advertising, 834 Pause); 835 836 /* Asym is when either RX or TX, but not both */ 837 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^ 838 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)) 839 ethtool_link_ksettings_add_link_mode(cmd, advertising, 840 Asym_Pause); 841 842 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) 843 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); 844 else 845 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); 846 } 847 848 int aq_nic_set_link_ksettings(struct aq_nic_s *self, 849 const struct ethtool_link_ksettings *cmd) 850 { 851 u32 speed = 0U; 852 u32 rate = 0U; 853 int err = 0; 854 855 if (cmd->base.autoneg == AUTONEG_ENABLE) { 856 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk; 857 self->aq_nic_cfg.is_autoneg = true; 858 } else { 859 speed = cmd->base.speed; 860 861 switch (speed) { 862 case SPEED_100: 863 rate = AQ_NIC_RATE_100M; 864 break; 865 866 case SPEED_1000: 867 rate = AQ_NIC_RATE_1G; 868 break; 869 870 case SPEED_2500: 871 rate = AQ_NIC_RATE_2GS; 872 break; 873 874 case SPEED_5000: 875 rate = AQ_NIC_RATE_5G; 876 break; 877 878 case SPEED_10000: 879 rate = AQ_NIC_RATE_10G; 880 break; 881 882 default: 883 err = -1; 884 goto err_exit; 885 break; 886 } 887 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) { 888 err = -1; 889 goto err_exit; 890 } 891 892 self->aq_nic_cfg.is_autoneg = false; 893 } 894 895 mutex_lock(&self->fwreq_mutex); 896 err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate); 897 mutex_unlock(&self->fwreq_mutex); 898 if (err < 0) 899 goto err_exit; 900 901 self->aq_nic_cfg.link_speed_msk = rate; 902 903 err_exit: 904 return err; 905 } 906 907 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self) 908 { 909 return &self->aq_nic_cfg; 910 } 911 912 u32 aq_nic_get_fw_version(struct aq_nic_s *self) 913 { 914 u32 fw_version = 0U; 915 916 self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version); 917 918 return fw_version; 919 } 920 921 int aq_nic_stop(struct aq_nic_s *self) 922 { 923 struct aq_vec_s *aq_vec = NULL; 924 unsigned int i = 0U; 925 926 netif_tx_disable(self->ndev); 927 netif_carrier_off(self->ndev); 928 929 del_timer_sync(&self->service_timer); 930 cancel_work_sync(&self->service_task); 931 932 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); 933 934 if (self->aq_nic_cfg.is_polling) 935 del_timer_sync(&self->polling_timer); 936 else 937 aq_pci_func_free_irqs(self); 938 939 for (i = 0U, aq_vec = self->aq_vec[0]; 940 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 941 aq_vec_stop(aq_vec); 942 943 return self->aq_hw_ops->hw_stop(self->aq_hw); 944 } 945 946 void aq_nic_deinit(struct aq_nic_s *self) 947 { 948 struct aq_vec_s *aq_vec = NULL; 949 unsigned int i = 0U; 950 951 if (!self) 952 goto err_exit; 953 954 for (i = 0U, aq_vec = self->aq_vec[0]; 955 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 956 aq_vec_deinit(aq_vec); 957 958 if (likely(self->aq_fw_ops->deinit)) { 959 mutex_lock(&self->fwreq_mutex); 960 self->aq_fw_ops->deinit(self->aq_hw); 961 mutex_unlock(&self->fwreq_mutex); 962 } 963 964 if (self->power_state != AQ_HW_POWER_STATE_D0 || 965 self->aq_hw->aq_nic_cfg->wol) 966 if (likely(self->aq_fw_ops->set_power)) { 967 mutex_lock(&self->fwreq_mutex); 968 self->aq_fw_ops->set_power(self->aq_hw, 969 self->power_state, 970 self->ndev->dev_addr); 971 mutex_unlock(&self->fwreq_mutex); 972 } 973 974 975 err_exit:; 976 } 977 978 void aq_nic_free_vectors(struct aq_nic_s *self) 979 { 980 unsigned int i = 0U; 981 982 if (!self) 983 goto err_exit; 984 985 for (i = ARRAY_SIZE(self->aq_vec); i--;) { 986 if (self->aq_vec[i]) { 987 aq_vec_free(self->aq_vec[i]); 988 self->aq_vec[i] = NULL; 989 } 990 } 991 992 err_exit:; 993 } 994 995 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) 996 { 997 int err = 0; 998 999 if (!netif_running(self->ndev)) { 1000 err = 0; 1001 goto out; 1002 } 1003 rtnl_lock(); 1004 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { 1005 self->power_state = AQ_HW_POWER_STATE_D3; 1006 netif_device_detach(self->ndev); 1007 netif_tx_stop_all_queues(self->ndev); 1008 1009 err = aq_nic_stop(self); 1010 if (err < 0) 1011 goto err_exit; 1012 1013 aq_nic_deinit(self); 1014 } else { 1015 err = aq_nic_init(self); 1016 if (err < 0) 1017 goto err_exit; 1018 1019 err = aq_nic_start(self); 1020 if (err < 0) 1021 goto err_exit; 1022 1023 netif_device_attach(self->ndev); 1024 netif_tx_start_all_queues(self->ndev); 1025 } 1026 1027 err_exit: 1028 rtnl_unlock(); 1029 out: 1030 return err; 1031 } 1032 1033 void aq_nic_shutdown(struct aq_nic_s *self) 1034 { 1035 int err = 0; 1036 1037 if (!self->ndev) 1038 return; 1039 1040 rtnl_lock(); 1041 1042 netif_device_detach(self->ndev); 1043 1044 if (netif_running(self->ndev)) { 1045 err = aq_nic_stop(self); 1046 if (err < 0) 1047 goto err_exit; 1048 } 1049 aq_nic_deinit(self); 1050 1051 err_exit: 1052 rtnl_unlock(); 1053 } 1054