1 /* 2 * aQuantia Corporation Network Driver 3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 */ 9 10 /* File aq_nic.c: Definition of common code for NIC. */ 11 12 #include "aq_nic.h" 13 #include "aq_ring.h" 14 #include "aq_vec.h" 15 #include "aq_hw.h" 16 #include "aq_pci_func.h" 17 #include "aq_nic_internal.h" 18 19 #include <linux/netdevice.h> 20 #include <linux/etherdevice.h> 21 #include <linux/timer.h> 22 #include <linux/cpu.h> 23 #include <linux/ip.h> 24 #include <linux/tcp.h> 25 #include <net/ip.h> 26 27 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 28 { 29 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 30 struct aq_rss_parameters *rss_params = &cfg->aq_rss; 31 int i = 0; 32 33 static u8 rss_key[40] = { 34 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, 35 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, 36 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, 37 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, 38 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c 39 }; 40 41 rss_params->hash_secret_key_size = sizeof(rss_key); 42 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); 43 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX; 44 45 for (i = rss_params->indirection_table_size; i--;) 46 rss_params->indirection_table[i] = i & (num_rss_queues - 1); 47 } 48 49 /* Fills aq_nic_cfg with valid defaults */ 50 static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) 51 { 52 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 53 54 cfg->aq_hw_caps = &self->aq_hw_caps; 55 56 cfg->vecs = AQ_CFG_VECS_DEF; 57 cfg->tcs = AQ_CFG_TCS_DEF; 58 59 cfg->rxds = AQ_CFG_RXDS_DEF; 60 cfg->txds = AQ_CFG_TXDS_DEF; 61 62 cfg->is_polling = AQ_CFG_IS_POLLING_DEF; 63 64 cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; 65 cfg->itr = cfg->is_interrupt_moderation ? 66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; 67 68 cfg->is_rss = AQ_CFG_IS_RSS_DEF; 69 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; 70 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; 71 cfg->flow_control = AQ_CFG_FC_MODE; 72 73 cfg->mtu = AQ_CFG_MTU_DEF; 74 cfg->link_speed_msk = AQ_CFG_SPEED_MSK; 75 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; 76 77 cfg->is_lro = AQ_CFG_IS_LRO_DEF; 78 79 cfg->vlan_id = 0U; 80 81 aq_nic_rss_init(self, cfg->num_rss_queues); 82 } 83 84 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ 85 int aq_nic_cfg_start(struct aq_nic_s *self) 86 { 87 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 88 89 /*descriptors */ 90 cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds); 91 cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds); 92 93 /*rss rings */ 94 cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs); 95 cfg->vecs = min(cfg->vecs, num_online_cpus()); 96 /* cfg->vecs should be power of 2 for RSS */ 97 if (cfg->vecs >= 8U) 98 cfg->vecs = 8U; 99 else if (cfg->vecs >= 4U) 100 cfg->vecs = 4U; 101 else if (cfg->vecs >= 2U) 102 cfg->vecs = 2U; 103 else 104 cfg->vecs = 1U; 105 106 cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); 107 108 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || 109 (self->aq_hw_caps.vecs == 1U) || 110 (cfg->vecs == 1U)) { 111 cfg->is_rss = 0U; 112 cfg->vecs = 1U; 113 } 114 115 cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk; 116 cfg->hw_features = self->aq_hw_caps.hw_features; 117 return 0; 118 } 119 120 static void aq_nic_service_timer_cb(unsigned long param) 121 { 122 struct aq_nic_s *self = (struct aq_nic_s *)param; 123 struct net_device *ndev = aq_nic_get_ndev(self); 124 int err = 0; 125 unsigned int i = 0U; 126 struct aq_hw_link_status_s link_status; 127 struct aq_ring_stats_rx_s stats_rx; 128 struct aq_ring_stats_tx_s stats_tx; 129 130 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 131 goto err_exit; 132 133 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); 134 if (err < 0) 135 goto err_exit; 136 137 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 138 self->aq_nic_cfg.is_interrupt_moderation); 139 140 if (memcmp(&link_status, &self->link_status, sizeof(link_status))) { 141 if (link_status.mbps) { 142 aq_utils_obj_set(&self->header.flags, 143 AQ_NIC_FLAG_STARTED); 144 aq_utils_obj_clear(&self->header.flags, 145 AQ_NIC_LINK_DOWN); 146 netif_carrier_on(self->ndev); 147 } else { 148 netif_carrier_off(self->ndev); 149 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); 150 } 151 152 self->link_status = link_status; 153 } 154 155 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 156 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 157 for (i = AQ_DIMOF(self->aq_vec); i--;) { 158 if (self->aq_vec[i]) 159 aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx); 160 } 161 162 ndev->stats.rx_packets = stats_rx.packets; 163 ndev->stats.rx_bytes = stats_rx.bytes; 164 ndev->stats.rx_errors = stats_rx.errors; 165 ndev->stats.tx_packets = stats_tx.packets; 166 ndev->stats.tx_bytes = stats_tx.bytes; 167 ndev->stats.tx_errors = stats_tx.errors; 168 169 err_exit: 170 mod_timer(&self->service_timer, 171 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); 172 } 173 174 static void aq_nic_polling_timer_cb(unsigned long param) 175 { 176 struct aq_nic_s *self = (struct aq_nic_s *)param; 177 struct aq_vec_s *aq_vec = NULL; 178 unsigned int i = 0U; 179 180 for (i = 0U, aq_vec = self->aq_vec[0]; 181 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 182 aq_vec_isr(i, (void *)aq_vec); 183 184 mod_timer(&self->polling_timer, jiffies + 185 AQ_CFG_POLLING_TIMER_INTERVAL); 186 } 187 188 static struct net_device *aq_nic_ndev_alloc(void) 189 { 190 return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX); 191 } 192 193 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 194 const struct ethtool_ops *et_ops, 195 struct device *dev, 196 struct aq_pci_func_s *aq_pci_func, 197 unsigned int port, 198 const struct aq_hw_ops *aq_hw_ops) 199 { 200 struct net_device *ndev = NULL; 201 struct aq_nic_s *self = NULL; 202 int err = 0; 203 204 ndev = aq_nic_ndev_alloc(); 205 if (!ndev) { 206 err = -ENOMEM; 207 goto err_exit; 208 } 209 210 self = netdev_priv(ndev); 211 212 ndev->netdev_ops = ndev_ops; 213 ndev->ethtool_ops = et_ops; 214 215 SET_NETDEV_DEV(ndev, dev); 216 217 ndev->if_port = port; 218 ndev->min_mtu = ETH_MIN_MTU; 219 self->ndev = ndev; 220 221 self->aq_pci_func = aq_pci_func; 222 223 self->aq_hw_ops = *aq_hw_ops; 224 self->port = (u8)port; 225 226 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, 227 &self->aq_hw_ops); 228 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); 229 if (err < 0) 230 goto err_exit; 231 232 aq_nic_cfg_init_defaults(self); 233 234 err_exit: 235 if (err < 0) { 236 aq_nic_free_hot_resources(self); 237 self = NULL; 238 } 239 return self; 240 } 241 242 int aq_nic_ndev_register(struct aq_nic_s *self) 243 { 244 int err = 0; 245 unsigned int i = 0U; 246 247 if (!self->ndev) { 248 err = -EINVAL; 249 goto err_exit; 250 } 251 err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw, 252 self->aq_nic_cfg.aq_hw_caps, 253 self->ndev->dev_addr); 254 if (err < 0) 255 goto err_exit; 256 257 #if defined(AQ_CFG_MAC_ADDR_PERMANENT) 258 { 259 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; 260 261 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); 262 } 263 #endif 264 265 netif_carrier_off(self->ndev); 266 267 for (i = AQ_CFG_VECS_MAX; i--;) 268 aq_nic_ndev_queue_stop(self, i); 269 270 err = register_netdev(self->ndev); 271 if (err < 0) 272 goto err_exit; 273 274 err_exit: 275 return err; 276 } 277 278 int aq_nic_ndev_init(struct aq_nic_s *self) 279 { 280 struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; 281 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; 282 283 self->ndev->hw_features |= aq_hw_caps->hw_features; 284 self->ndev->features = aq_hw_caps->hw_features; 285 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; 286 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; 287 288 return 0; 289 } 290 291 void aq_nic_ndev_free(struct aq_nic_s *self) 292 { 293 if (!self->ndev) 294 goto err_exit; 295 296 if (self->ndev->reg_state == NETREG_REGISTERED) 297 unregister_netdev(self->ndev); 298 299 if (self->aq_hw) 300 self->aq_hw_ops.destroy(self->aq_hw); 301 302 free_netdev(self->ndev); 303 304 err_exit:; 305 } 306 307 struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) 308 { 309 struct aq_nic_s *self = NULL; 310 int err = 0; 311 312 if (!ndev) { 313 err = -EINVAL; 314 goto err_exit; 315 } 316 self = netdev_priv(ndev); 317 318 if (!self) { 319 err = -EINVAL; 320 goto err_exit; 321 } 322 if (netif_running(ndev)) { 323 unsigned int i; 324 325 for (i = AQ_CFG_VECS_MAX; i--;) 326 netif_stop_subqueue(ndev, i); 327 } 328 329 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; 330 self->aq_vecs++) { 331 self->aq_vec[self->aq_vecs] = 332 aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg); 333 if (!self->aq_vec[self->aq_vecs]) { 334 err = -ENOMEM; 335 goto err_exit; 336 } 337 } 338 339 err_exit: 340 if (err < 0) { 341 aq_nic_free_hot_resources(self); 342 self = NULL; 343 } 344 return self; 345 } 346 347 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, 348 struct aq_ring_s *ring) 349 { 350 self->aq_ring_tx[idx] = ring; 351 } 352 353 struct device *aq_nic_get_dev(struct aq_nic_s *self) 354 { 355 return self->ndev->dev.parent; 356 } 357 358 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) 359 { 360 return self->ndev; 361 } 362 363 int aq_nic_init(struct aq_nic_s *self) 364 { 365 struct aq_vec_s *aq_vec = NULL; 366 int err = 0; 367 unsigned int i = 0U; 368 369 self->power_state = AQ_HW_POWER_STATE_D0; 370 err = self->aq_hw_ops.hw_reset(self->aq_hw); 371 if (err < 0) 372 goto err_exit; 373 374 err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg, 375 aq_nic_get_ndev(self)->dev_addr); 376 if (err < 0) 377 goto err_exit; 378 379 for (i = 0U, aq_vec = self->aq_vec[0]; 380 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 381 aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw); 382 383 err_exit: 384 return err; 385 } 386 387 void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) 388 { 389 netif_start_subqueue(self->ndev, idx); 390 } 391 392 void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) 393 { 394 netif_stop_subqueue(self->ndev, idx); 395 } 396 397 int aq_nic_start(struct aq_nic_s *self) 398 { 399 struct aq_vec_s *aq_vec = NULL; 400 int err = 0; 401 unsigned int i = 0U; 402 403 err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, 404 self->mc_list.ar, 405 self->mc_list.count); 406 if (err < 0) 407 goto err_exit; 408 409 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, 410 self->packet_filter); 411 if (err < 0) 412 goto err_exit; 413 414 for (i = 0U, aq_vec = self->aq_vec[0]; 415 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 416 err = aq_vec_start(aq_vec); 417 if (err < 0) 418 goto err_exit; 419 } 420 421 err = self->aq_hw_ops.hw_start(self->aq_hw); 422 if (err < 0) 423 goto err_exit; 424 425 err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 426 self->aq_nic_cfg.is_interrupt_moderation); 427 if (err < 0) 428 goto err_exit; 429 setup_timer(&self->service_timer, &aq_nic_service_timer_cb, 430 (unsigned long)self); 431 mod_timer(&self->service_timer, jiffies + 432 AQ_CFG_SERVICE_TIMER_INTERVAL); 433 434 if (self->aq_nic_cfg.is_polling) { 435 setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb, 436 (unsigned long)self); 437 mod_timer(&self->polling_timer, jiffies + 438 AQ_CFG_POLLING_TIMER_INTERVAL); 439 } else { 440 for (i = 0U, aq_vec = self->aq_vec[0]; 441 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 442 err = aq_pci_func_alloc_irq(self->aq_pci_func, i, 443 self->ndev->name, aq_vec, 444 aq_vec_get_affinity_mask(aq_vec)); 445 if (err < 0) 446 goto err_exit; 447 } 448 449 err = self->aq_hw_ops.hw_irq_enable(self->aq_hw, 450 AQ_CFG_IRQ_MASK); 451 if (err < 0) 452 goto err_exit; 453 } 454 455 for (i = 0U, aq_vec = self->aq_vec[0]; 456 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 457 aq_nic_ndev_queue_start(self, i); 458 459 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); 460 if (err < 0) 461 goto err_exit; 462 463 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs); 464 if (err < 0) 465 goto err_exit; 466 467 err_exit: 468 return err; 469 } 470 471 static unsigned int aq_nic_map_skb(struct aq_nic_s *self, 472 struct sk_buff *skb, 473 struct aq_ring_s *ring) 474 { 475 unsigned int ret = 0U; 476 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 477 unsigned int frag_count = 0U; 478 unsigned int dx = ring->sw_tail; 479 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; 480 481 if (unlikely(skb_is_gso(skb))) { 482 dx_buff->flags = 0U; 483 dx_buff->len_pkt = skb->len; 484 dx_buff->len_l2 = ETH_HLEN; 485 dx_buff->len_l3 = ip_hdrlen(skb); 486 dx_buff->len_l4 = tcp_hdrlen(skb); 487 dx_buff->mss = skb_shinfo(skb)->gso_size; 488 dx_buff->is_txc = 1U; 489 490 dx = aq_ring_next_dx(ring, dx); 491 dx_buff = &ring->buff_ring[dx]; 492 ++ret; 493 } 494 495 dx_buff->flags = 0U; 496 dx_buff->len = skb_headlen(skb); 497 dx_buff->pa = dma_map_single(aq_nic_get_dev(self), 498 skb->data, 499 dx_buff->len, 500 DMA_TO_DEVICE); 501 502 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) 503 goto exit; 504 505 dx_buff->len_pkt = skb->len; 506 dx_buff->is_sop = 1U; 507 dx_buff->is_mapped = 1U; 508 ++ret; 509 510 if (skb->ip_summed == CHECKSUM_PARTIAL) { 511 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 512 1U : 0U; 513 dx_buff->is_tcp_cso = 514 (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; 515 dx_buff->is_udp_cso = 516 (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; 517 } 518 519 for (; nr_frags--; ++frag_count) { 520 unsigned int frag_len = 0U; 521 dma_addr_t frag_pa; 522 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; 523 524 frag_len = skb_frag_size(frag); 525 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, 526 frag_len, DMA_TO_DEVICE); 527 528 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) 529 goto mapping_error; 530 531 while (frag_len > AQ_CFG_TX_FRAME_MAX) { 532 dx = aq_ring_next_dx(ring, dx); 533 dx_buff = &ring->buff_ring[dx]; 534 535 dx_buff->flags = 0U; 536 dx_buff->len = AQ_CFG_TX_FRAME_MAX; 537 dx_buff->pa = frag_pa; 538 dx_buff->is_mapped = 1U; 539 540 frag_len -= AQ_CFG_TX_FRAME_MAX; 541 frag_pa += AQ_CFG_TX_FRAME_MAX; 542 ++ret; 543 } 544 545 dx = aq_ring_next_dx(ring, dx); 546 dx_buff = &ring->buff_ring[dx]; 547 548 dx_buff->flags = 0U; 549 dx_buff->len = frag_len; 550 dx_buff->pa = frag_pa; 551 dx_buff->is_mapped = 1U; 552 ++ret; 553 } 554 555 dx_buff->is_eop = 1U; 556 dx_buff->skb = skb; 557 goto exit; 558 559 mapping_error: 560 for (dx = ring->sw_tail; 561 ret > 0; 562 --ret, dx = aq_ring_next_dx(ring, dx)) { 563 dx_buff = &ring->buff_ring[dx]; 564 565 if (!dx_buff->is_txc && dx_buff->pa) { 566 if (unlikely(dx_buff->is_sop)) { 567 dma_unmap_single(aq_nic_get_dev(self), 568 dx_buff->pa, 569 dx_buff->len, 570 DMA_TO_DEVICE); 571 } else { 572 dma_unmap_page(aq_nic_get_dev(self), 573 dx_buff->pa, 574 dx_buff->len, 575 DMA_TO_DEVICE); 576 } 577 } 578 } 579 580 exit: 581 return ret; 582 } 583 584 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) 585 __releases(&ring->lock) 586 __acquires(&ring->lock) 587 { 588 struct aq_ring_s *ring = NULL; 589 unsigned int frags = 0U; 590 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; 591 unsigned int tc = 0U; 592 unsigned int trys = AQ_CFG_LOCK_TRYS; 593 int err = NETDEV_TX_OK; 594 bool is_nic_in_bad_state; 595 596 frags = skb_shinfo(skb)->nr_frags + 1; 597 598 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; 599 600 if (frags > AQ_CFG_SKB_FRAGS_MAX) { 601 dev_kfree_skb_any(skb); 602 goto err_exit; 603 } 604 605 is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, 606 AQ_NIC_FLAGS_IS_NOT_TX_READY) || 607 (aq_ring_avail_dx(ring) < 608 AQ_CFG_SKB_FRAGS_MAX); 609 610 if (is_nic_in_bad_state) { 611 aq_nic_ndev_queue_stop(self, ring->idx); 612 err = NETDEV_TX_BUSY; 613 goto err_exit; 614 } 615 616 do { 617 if (spin_trylock(&ring->header.lock)) { 618 frags = aq_nic_map_skb(self, skb, ring); 619 620 if (likely(frags)) { 621 err = self->aq_hw_ops.hw_ring_tx_xmit( 622 self->aq_hw, 623 ring, frags); 624 if (err >= 0) { 625 if (aq_ring_avail_dx(ring) < 626 AQ_CFG_SKB_FRAGS_MAX + 1) 627 aq_nic_ndev_queue_stop( 628 self, 629 ring->idx); 630 631 ++ring->stats.tx.packets; 632 ring->stats.tx.bytes += skb->len; 633 } 634 } else { 635 err = NETDEV_TX_BUSY; 636 } 637 638 spin_unlock(&ring->header.lock); 639 break; 640 } 641 } while (--trys); 642 643 if (!trys) { 644 err = NETDEV_TX_BUSY; 645 goto err_exit; 646 } 647 648 err_exit: 649 return err; 650 } 651 652 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) 653 { 654 int err = 0; 655 656 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags); 657 if (err < 0) 658 goto err_exit; 659 660 self->packet_filter = flags; 661 662 err_exit: 663 return err; 664 } 665 666 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) 667 { 668 struct netdev_hw_addr *ha = NULL; 669 unsigned int i = 0U; 670 671 self->mc_list.count = 0U; 672 673 netdev_for_each_mc_addr(ha, ndev) { 674 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 675 ++self->mc_list.count; 676 } 677 678 return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, 679 self->mc_list.ar, 680 self->mc_list.count); 681 } 682 683 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 684 { 685 int err = 0; 686 687 if (new_mtu > self->aq_hw_caps.mtu) { 688 err = -EINVAL; 689 goto err_exit; 690 } 691 self->aq_nic_cfg.mtu = new_mtu; 692 693 err_exit: 694 return err; 695 } 696 697 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) 698 { 699 return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr); 700 } 701 702 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) 703 { 704 return self->link_status.mbps; 705 } 706 707 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) 708 { 709 u32 *regs_buff = p; 710 int err = 0; 711 712 regs->version = 1; 713 714 err = self->aq_hw_ops.hw_get_regs(self->aq_hw, 715 &self->aq_hw_caps, regs_buff); 716 if (err < 0) 717 goto err_exit; 718 719 err_exit: 720 return err; 721 } 722 723 int aq_nic_get_regs_count(struct aq_nic_s *self) 724 { 725 return self->aq_hw_caps.mac_regs_count; 726 } 727 728 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) 729 { 730 struct aq_vec_s *aq_vec = NULL; 731 unsigned int i = 0U; 732 unsigned int count = 0U; 733 int err = 0; 734 735 err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); 736 if (err < 0) 737 goto err_exit; 738 739 data += count; 740 count = 0U; 741 742 for (i = 0U, aq_vec = self->aq_vec[0]; 743 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 744 data += count; 745 aq_vec_get_sw_stats(aq_vec, data, &count); 746 } 747 748 err_exit:; 749 (void)err; 750 } 751 752 void aq_nic_get_link_ksettings(struct aq_nic_s *self, 753 struct ethtool_link_ksettings *cmd) 754 { 755 cmd->base.port = PORT_TP; 756 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ 757 cmd->base.duplex = DUPLEX_FULL; 758 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; 759 760 ethtool_link_ksettings_zero_link_mode(cmd, supported); 761 762 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) 763 ethtool_link_ksettings_add_link_mode(cmd, supported, 764 10000baseT_Full); 765 766 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G) 767 ethtool_link_ksettings_add_link_mode(cmd, supported, 768 5000baseT_Full); 769 770 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS) 771 ethtool_link_ksettings_add_link_mode(cmd, supported, 772 2500baseT_Full); 773 774 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) 775 ethtool_link_ksettings_add_link_mode(cmd, supported, 776 1000baseT_Full); 777 778 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) 779 ethtool_link_ksettings_add_link_mode(cmd, supported, 780 100baseT_Full); 781 782 if (self->aq_hw_caps.flow_control) 783 ethtool_link_ksettings_add_link_mode(cmd, supported, 784 Pause); 785 786 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 787 ethtool_link_ksettings_add_link_mode(cmd, supported, TP); 788 789 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 790 791 if (self->aq_nic_cfg.is_autoneg) 792 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 793 794 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) 795 ethtool_link_ksettings_add_link_mode(cmd, advertising, 796 10000baseT_Full); 797 798 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G) 799 ethtool_link_ksettings_add_link_mode(cmd, advertising, 800 5000baseT_Full); 801 802 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS) 803 ethtool_link_ksettings_add_link_mode(cmd, advertising, 804 2500baseT_Full); 805 806 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) 807 ethtool_link_ksettings_add_link_mode(cmd, advertising, 808 1000baseT_Full); 809 810 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) 811 ethtool_link_ksettings_add_link_mode(cmd, advertising, 812 100baseT_Full); 813 814 if (self->aq_nic_cfg.flow_control) 815 ethtool_link_ksettings_add_link_mode(cmd, advertising, 816 Pause); 817 818 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); 819 } 820 821 int aq_nic_set_link_ksettings(struct aq_nic_s *self, 822 const struct ethtool_link_ksettings *cmd) 823 { 824 u32 speed = 0U; 825 u32 rate = 0U; 826 int err = 0; 827 828 if (cmd->base.autoneg == AUTONEG_ENABLE) { 829 rate = self->aq_hw_caps.link_speed_msk; 830 self->aq_nic_cfg.is_autoneg = true; 831 } else { 832 speed = cmd->base.speed; 833 834 switch (speed) { 835 case SPEED_100: 836 rate = AQ_NIC_RATE_100M; 837 break; 838 839 case SPEED_1000: 840 rate = AQ_NIC_RATE_1G; 841 break; 842 843 case SPEED_2500: 844 rate = AQ_NIC_RATE_2GS; 845 break; 846 847 case SPEED_5000: 848 rate = AQ_NIC_RATE_5G; 849 break; 850 851 case SPEED_10000: 852 rate = AQ_NIC_RATE_10G; 853 break; 854 855 default: 856 err = -1; 857 goto err_exit; 858 break; 859 } 860 if (!(self->aq_hw_caps.link_speed_msk & rate)) { 861 err = -1; 862 goto err_exit; 863 } 864 865 self->aq_nic_cfg.is_autoneg = false; 866 } 867 868 err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate); 869 if (err < 0) 870 goto err_exit; 871 872 self->aq_nic_cfg.link_speed_msk = rate; 873 874 err_exit: 875 return err; 876 } 877 878 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self) 879 { 880 return &self->aq_nic_cfg; 881 } 882 883 u32 aq_nic_get_fw_version(struct aq_nic_s *self) 884 { 885 u32 fw_version = 0U; 886 887 self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version); 888 889 return fw_version; 890 } 891 892 int aq_nic_stop(struct aq_nic_s *self) 893 { 894 struct aq_vec_s *aq_vec = NULL; 895 unsigned int i = 0U; 896 897 for (i = 0U, aq_vec = self->aq_vec[0]; 898 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 899 aq_nic_ndev_queue_stop(self, i); 900 901 del_timer_sync(&self->service_timer); 902 903 self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); 904 905 if (self->aq_nic_cfg.is_polling) 906 del_timer_sync(&self->polling_timer); 907 else 908 aq_pci_func_free_irqs(self->aq_pci_func); 909 910 for (i = 0U, aq_vec = self->aq_vec[0]; 911 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 912 aq_vec_stop(aq_vec); 913 914 return self->aq_hw_ops.hw_stop(self->aq_hw); 915 } 916 917 void aq_nic_deinit(struct aq_nic_s *self) 918 { 919 struct aq_vec_s *aq_vec = NULL; 920 unsigned int i = 0U; 921 922 if (!self) 923 goto err_exit; 924 925 for (i = 0U, aq_vec = self->aq_vec[0]; 926 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 927 aq_vec_deinit(aq_vec); 928 929 if (self->power_state == AQ_HW_POWER_STATE_D0) { 930 (void)self->aq_hw_ops.hw_deinit(self->aq_hw); 931 } else { 932 (void)self->aq_hw_ops.hw_set_power(self->aq_hw, 933 self->power_state); 934 } 935 936 err_exit:; 937 } 938 939 void aq_nic_free_hot_resources(struct aq_nic_s *self) 940 { 941 unsigned int i = 0U; 942 943 if (!self) 944 goto err_exit; 945 946 for (i = AQ_DIMOF(self->aq_vec); i--;) { 947 if (self->aq_vec[i]) 948 aq_vec_free(self->aq_vec[i]); 949 } 950 951 err_exit:; 952 } 953 954 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) 955 { 956 int err = 0; 957 958 if (!netif_running(self->ndev)) { 959 err = 0; 960 goto out; 961 } 962 rtnl_lock(); 963 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { 964 self->power_state = AQ_HW_POWER_STATE_D3; 965 netif_device_detach(self->ndev); 966 netif_tx_stop_all_queues(self->ndev); 967 968 err = aq_nic_stop(self); 969 if (err < 0) 970 goto err_exit; 971 972 aq_nic_deinit(self); 973 } else { 974 err = aq_nic_init(self); 975 if (err < 0) 976 goto err_exit; 977 978 err = aq_nic_start(self); 979 if (err < 0) 980 goto err_exit; 981 982 netif_device_attach(self->ndev); 983 netif_tx_start_all_queues(self->ndev); 984 } 985 986 err_exit: 987 rtnl_unlock(); 988 out: 989 return err; 990 } 991