1 /* 2 * aQuantia Corporation Network Driver 3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 */ 9 10 /* File aq_nic.c: Definition of common code for NIC. */ 11 12 #include "aq_nic.h" 13 #include "aq_ring.h" 14 #include "aq_vec.h" 15 #include "aq_hw.h" 16 #include "aq_pci_func.h" 17 #include "aq_nic_internal.h" 18 19 #include <linux/netdevice.h> 20 #include <linux/etherdevice.h> 21 #include <linux/timer.h> 22 #include <linux/cpu.h> 23 #include <linux/ip.h> 24 #include <linux/tcp.h> 25 #include <net/ip.h> 26 27 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 28 { 29 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 30 struct aq_rss_parameters *rss_params = &cfg->aq_rss; 31 int i = 0; 32 33 static u8 rss_key[40] = { 34 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, 35 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, 36 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, 37 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, 38 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c 39 }; 40 41 rss_params->hash_secret_key_size = sizeof(rss_key); 42 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); 43 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX; 44 45 for (i = rss_params->indirection_table_size; i--;) 46 rss_params->indirection_table[i] = i & (num_rss_queues - 1); 47 } 48 49 /* Fills aq_nic_cfg with valid defaults */ 50 static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) 51 { 52 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 53 54 cfg->aq_hw_caps = &self->aq_hw_caps; 55 56 cfg->vecs = AQ_CFG_VECS_DEF; 57 cfg->tcs = AQ_CFG_TCS_DEF; 58 59 cfg->rxds = AQ_CFG_RXDS_DEF; 60 cfg->txds = AQ_CFG_TXDS_DEF; 61 62 cfg->is_polling = AQ_CFG_IS_POLLING_DEF; 63 64 cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; 65 cfg->itr = cfg->is_interrupt_moderation ? 66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; 67 68 cfg->is_rss = AQ_CFG_IS_RSS_DEF; 69 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; 70 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; 71 cfg->flow_control = AQ_CFG_FC_MODE; 72 73 cfg->mtu = AQ_CFG_MTU_DEF; 74 cfg->link_speed_msk = AQ_CFG_SPEED_MSK; 75 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; 76 77 cfg->is_lro = AQ_CFG_IS_LRO_DEF; 78 79 cfg->vlan_id = 0U; 80 81 aq_nic_rss_init(self, cfg->num_rss_queues); 82 } 83 84 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ 85 int aq_nic_cfg_start(struct aq_nic_s *self) 86 { 87 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 88 89 /*descriptors */ 90 cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds); 91 cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds); 92 93 /*rss rings */ 94 cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs); 95 cfg->vecs = min(cfg->vecs, num_online_cpus()); 96 /* cfg->vecs should be power of 2 for RSS */ 97 if (cfg->vecs >= 8U) 98 cfg->vecs = 8U; 99 else if (cfg->vecs >= 4U) 100 cfg->vecs = 4U; 101 else if (cfg->vecs >= 2U) 102 cfg->vecs = 2U; 103 else 104 cfg->vecs = 1U; 105 106 cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); 107 108 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || 109 (self->aq_hw_caps.vecs == 1U) || 110 (cfg->vecs == 1U)) { 111 cfg->is_rss = 0U; 112 cfg->vecs = 1U; 113 } 114 115 cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk; 116 cfg->hw_features = self->aq_hw_caps.hw_features; 117 return 0; 118 } 119 120 static void aq_nic_service_timer_cb(unsigned long param) 121 { 122 struct aq_nic_s *self = (struct aq_nic_s *)param; 123 struct net_device *ndev = aq_nic_get_ndev(self); 124 int err = 0; 125 unsigned int i = 0U; 126 struct aq_hw_link_status_s link_status; 127 struct aq_ring_stats_rx_s stats_rx; 128 struct aq_ring_stats_tx_s stats_tx; 129 130 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 131 goto err_exit; 132 133 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); 134 if (err < 0) 135 goto err_exit; 136 137 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 138 self->aq_nic_cfg.is_interrupt_moderation); 139 140 if (memcmp(&link_status, &self->link_status, sizeof(link_status))) { 141 if (link_status.mbps) { 142 aq_utils_obj_set(&self->header.flags, 143 AQ_NIC_FLAG_STARTED); 144 aq_utils_obj_clear(&self->header.flags, 145 AQ_NIC_LINK_DOWN); 146 netif_carrier_on(self->ndev); 147 } else { 148 netif_carrier_off(self->ndev); 149 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); 150 } 151 152 self->link_status = link_status; 153 } 154 155 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 156 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 157 for (i = AQ_DIMOF(self->aq_vec); i--;) { 158 if (self->aq_vec[i]) 159 aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx); 160 } 161 162 ndev->stats.rx_packets = stats_rx.packets; 163 ndev->stats.rx_bytes = stats_rx.bytes; 164 ndev->stats.rx_errors = stats_rx.errors; 165 ndev->stats.tx_packets = stats_tx.packets; 166 ndev->stats.tx_bytes = stats_tx.bytes; 167 ndev->stats.tx_errors = stats_tx.errors; 168 169 err_exit: 170 mod_timer(&self->service_timer, 171 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); 172 } 173 174 static void aq_nic_polling_timer_cb(unsigned long param) 175 { 176 struct aq_nic_s *self = (struct aq_nic_s *)param; 177 struct aq_vec_s *aq_vec = NULL; 178 unsigned int i = 0U; 179 180 for (i = 0U, aq_vec = self->aq_vec[0]; 181 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 182 aq_vec_isr(i, (void *)aq_vec); 183 184 mod_timer(&self->polling_timer, jiffies + 185 AQ_CFG_POLLING_TIMER_INTERVAL); 186 } 187 188 static struct net_device *aq_nic_ndev_alloc(void) 189 { 190 return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX); 191 } 192 193 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 194 const struct ethtool_ops *et_ops, 195 struct device *dev, 196 struct aq_pci_func_s *aq_pci_func, 197 unsigned int port, 198 const struct aq_hw_ops *aq_hw_ops) 199 { 200 struct net_device *ndev = NULL; 201 struct aq_nic_s *self = NULL; 202 int err = 0; 203 204 ndev = aq_nic_ndev_alloc(); 205 if (!ndev) { 206 err = -ENOMEM; 207 goto err_exit; 208 } 209 210 self = netdev_priv(ndev); 211 212 ndev->netdev_ops = ndev_ops; 213 ndev->ethtool_ops = et_ops; 214 215 SET_NETDEV_DEV(ndev, dev); 216 217 ndev->if_port = port; 218 ndev->min_mtu = ETH_MIN_MTU; 219 self->ndev = ndev; 220 221 self->aq_pci_func = aq_pci_func; 222 223 self->aq_hw_ops = *aq_hw_ops; 224 self->port = (u8)port; 225 226 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, 227 &self->aq_hw_ops); 228 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); 229 if (err < 0) 230 goto err_exit; 231 232 aq_nic_cfg_init_defaults(self); 233 234 err_exit: 235 if (err < 0) { 236 aq_nic_free_hot_resources(self); 237 self = NULL; 238 } 239 return self; 240 } 241 242 int aq_nic_ndev_register(struct aq_nic_s *self) 243 { 244 int err = 0; 245 unsigned int i = 0U; 246 247 if (!self->ndev) { 248 err = -EINVAL; 249 goto err_exit; 250 } 251 err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw, 252 self->aq_nic_cfg.aq_hw_caps, 253 self->ndev->dev_addr); 254 if (err < 0) 255 goto err_exit; 256 257 #if defined(AQ_CFG_MAC_ADDR_PERMANENT) 258 { 259 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; 260 261 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); 262 } 263 #endif 264 265 netif_carrier_off(self->ndev); 266 267 for (i = AQ_CFG_VECS_MAX; i--;) 268 aq_nic_ndev_queue_stop(self, i); 269 270 err = register_netdev(self->ndev); 271 if (err < 0) 272 goto err_exit; 273 274 err_exit: 275 return err; 276 } 277 278 int aq_nic_ndev_init(struct aq_nic_s *self) 279 { 280 struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; 281 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; 282 283 self->ndev->hw_features |= aq_hw_caps->hw_features; 284 self->ndev->features = aq_hw_caps->hw_features; 285 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; 286 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; 287 288 return 0; 289 } 290 291 void aq_nic_ndev_free(struct aq_nic_s *self) 292 { 293 if (!self->ndev) 294 goto err_exit; 295 296 if (self->ndev->reg_state == NETREG_REGISTERED) 297 unregister_netdev(self->ndev); 298 299 if (self->aq_hw) 300 self->aq_hw_ops.destroy(self->aq_hw); 301 302 free_netdev(self->ndev); 303 304 err_exit:; 305 } 306 307 struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) 308 { 309 struct aq_nic_s *self = NULL; 310 int err = 0; 311 312 if (!ndev) { 313 err = -EINVAL; 314 goto err_exit; 315 } 316 self = netdev_priv(ndev); 317 318 if (!self) { 319 err = -EINVAL; 320 goto err_exit; 321 } 322 if (netif_running(ndev)) { 323 unsigned int i; 324 325 for (i = AQ_CFG_VECS_MAX; i--;) 326 netif_stop_subqueue(ndev, i); 327 } 328 329 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; 330 self->aq_vecs++) { 331 self->aq_vec[self->aq_vecs] = 332 aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg); 333 if (!self->aq_vec[self->aq_vecs]) { 334 err = -ENOMEM; 335 goto err_exit; 336 } 337 } 338 339 err_exit: 340 if (err < 0) { 341 aq_nic_free_hot_resources(self); 342 self = NULL; 343 } 344 return self; 345 } 346 347 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, 348 struct aq_ring_s *ring) 349 { 350 self->aq_ring_tx[idx] = ring; 351 } 352 353 struct device *aq_nic_get_dev(struct aq_nic_s *self) 354 { 355 return self->ndev->dev.parent; 356 } 357 358 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) 359 { 360 return self->ndev; 361 } 362 363 int aq_nic_init(struct aq_nic_s *self) 364 { 365 struct aq_vec_s *aq_vec = NULL; 366 int err = 0; 367 unsigned int i = 0U; 368 369 self->power_state = AQ_HW_POWER_STATE_D0; 370 err = self->aq_hw_ops.hw_reset(self->aq_hw); 371 if (err < 0) 372 goto err_exit; 373 374 err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg, 375 aq_nic_get_ndev(self)->dev_addr); 376 if (err < 0) 377 goto err_exit; 378 379 for (i = 0U, aq_vec = self->aq_vec[0]; 380 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 381 aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw); 382 383 err_exit: 384 return err; 385 } 386 387 void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) 388 { 389 netif_start_subqueue(self->ndev, idx); 390 } 391 392 void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) 393 { 394 netif_stop_subqueue(self->ndev, idx); 395 } 396 397 int aq_nic_start(struct aq_nic_s *self) 398 { 399 struct aq_vec_s *aq_vec = NULL; 400 int err = 0; 401 unsigned int i = 0U; 402 403 err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, 404 self->mc_list.ar, 405 self->mc_list.count); 406 if (err < 0) 407 goto err_exit; 408 409 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, 410 self->packet_filter); 411 if (err < 0) 412 goto err_exit; 413 414 for (i = 0U, aq_vec = self->aq_vec[0]; 415 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 416 err = aq_vec_start(aq_vec); 417 if (err < 0) 418 goto err_exit; 419 } 420 421 err = self->aq_hw_ops.hw_start(self->aq_hw); 422 if (err < 0) 423 goto err_exit; 424 425 err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 426 self->aq_nic_cfg.is_interrupt_moderation); 427 if (err < 0) 428 goto err_exit; 429 setup_timer(&self->service_timer, &aq_nic_service_timer_cb, 430 (unsigned long)self); 431 mod_timer(&self->service_timer, jiffies + 432 AQ_CFG_SERVICE_TIMER_INTERVAL); 433 434 if (self->aq_nic_cfg.is_polling) { 435 setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb, 436 (unsigned long)self); 437 mod_timer(&self->polling_timer, jiffies + 438 AQ_CFG_POLLING_TIMER_INTERVAL); 439 } else { 440 for (i = 0U, aq_vec = self->aq_vec[0]; 441 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 442 err = aq_pci_func_alloc_irq(self->aq_pci_func, i, 443 self->ndev->name, aq_vec, 444 aq_vec_get_affinity_mask(aq_vec)); 445 if (err < 0) 446 goto err_exit; 447 } 448 449 err = self->aq_hw_ops.hw_irq_enable(self->aq_hw, 450 AQ_CFG_IRQ_MASK); 451 if (err < 0) 452 goto err_exit; 453 } 454 455 for (i = 0U, aq_vec = self->aq_vec[0]; 456 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 457 aq_nic_ndev_queue_start(self, i); 458 459 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); 460 if (err < 0) 461 goto err_exit; 462 463 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs); 464 if (err < 0) 465 goto err_exit; 466 467 err_exit: 468 return err; 469 } 470 471 static unsigned int aq_nic_map_skb(struct aq_nic_s *self, 472 struct sk_buff *skb, 473 struct aq_ring_s *ring) 474 { 475 unsigned int ret = 0U; 476 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 477 unsigned int frag_count = 0U; 478 unsigned int dx = ring->sw_tail; 479 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; 480 481 if (unlikely(skb_is_gso(skb))) { 482 dx_buff->flags = 0U; 483 dx_buff->len_pkt = skb->len; 484 dx_buff->len_l2 = ETH_HLEN; 485 dx_buff->len_l3 = ip_hdrlen(skb); 486 dx_buff->len_l4 = tcp_hdrlen(skb); 487 dx_buff->mss = skb_shinfo(skb)->gso_size; 488 dx_buff->is_txc = 1U; 489 490 dx_buff->is_ipv6 = 491 (ip_hdr(skb)->version == 6) ? 1U : 0U; 492 493 dx = aq_ring_next_dx(ring, dx); 494 dx_buff = &ring->buff_ring[dx]; 495 ++ret; 496 } 497 498 dx_buff->flags = 0U; 499 dx_buff->len = skb_headlen(skb); 500 dx_buff->pa = dma_map_single(aq_nic_get_dev(self), 501 skb->data, 502 dx_buff->len, 503 DMA_TO_DEVICE); 504 505 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) 506 goto exit; 507 508 dx_buff->len_pkt = skb->len; 509 dx_buff->is_sop = 1U; 510 dx_buff->is_mapped = 1U; 511 ++ret; 512 513 if (skb->ip_summed == CHECKSUM_PARTIAL) { 514 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 515 1U : 0U; 516 517 if (ip_hdr(skb)->version == 4) { 518 dx_buff->is_tcp_cso = 519 (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 520 1U : 0U; 521 dx_buff->is_udp_cso = 522 (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 523 1U : 0U; 524 } else if (ip_hdr(skb)->version == 6) { 525 dx_buff->is_tcp_cso = 526 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ? 527 1U : 0U; 528 dx_buff->is_udp_cso = 529 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ? 530 1U : 0U; 531 } 532 } 533 534 for (; nr_frags--; ++frag_count) { 535 unsigned int frag_len = 0U; 536 dma_addr_t frag_pa; 537 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; 538 539 frag_len = skb_frag_size(frag); 540 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, 541 frag_len, DMA_TO_DEVICE); 542 543 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) 544 goto mapping_error; 545 546 while (frag_len > AQ_CFG_TX_FRAME_MAX) { 547 dx = aq_ring_next_dx(ring, dx); 548 dx_buff = &ring->buff_ring[dx]; 549 550 dx_buff->flags = 0U; 551 dx_buff->len = AQ_CFG_TX_FRAME_MAX; 552 dx_buff->pa = frag_pa; 553 dx_buff->is_mapped = 1U; 554 555 frag_len -= AQ_CFG_TX_FRAME_MAX; 556 frag_pa += AQ_CFG_TX_FRAME_MAX; 557 ++ret; 558 } 559 560 dx = aq_ring_next_dx(ring, dx); 561 dx_buff = &ring->buff_ring[dx]; 562 563 dx_buff->flags = 0U; 564 dx_buff->len = frag_len; 565 dx_buff->pa = frag_pa; 566 dx_buff->is_mapped = 1U; 567 ++ret; 568 } 569 570 dx_buff->is_eop = 1U; 571 dx_buff->skb = skb; 572 goto exit; 573 574 mapping_error: 575 for (dx = ring->sw_tail; 576 ret > 0; 577 --ret, dx = aq_ring_next_dx(ring, dx)) { 578 dx_buff = &ring->buff_ring[dx]; 579 580 if (!dx_buff->is_txc && dx_buff->pa) { 581 if (unlikely(dx_buff->is_sop)) { 582 dma_unmap_single(aq_nic_get_dev(self), 583 dx_buff->pa, 584 dx_buff->len, 585 DMA_TO_DEVICE); 586 } else { 587 dma_unmap_page(aq_nic_get_dev(self), 588 dx_buff->pa, 589 dx_buff->len, 590 DMA_TO_DEVICE); 591 } 592 } 593 } 594 595 exit: 596 return ret; 597 } 598 599 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) 600 __releases(&ring->lock) 601 __acquires(&ring->lock) 602 { 603 struct aq_ring_s *ring = NULL; 604 unsigned int frags = 0U; 605 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; 606 unsigned int tc = 0U; 607 unsigned int trys = AQ_CFG_LOCK_TRYS; 608 int err = NETDEV_TX_OK; 609 bool is_nic_in_bad_state; 610 611 frags = skb_shinfo(skb)->nr_frags + 1; 612 613 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; 614 615 if (frags > AQ_CFG_SKB_FRAGS_MAX) { 616 dev_kfree_skb_any(skb); 617 goto err_exit; 618 } 619 620 is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, 621 AQ_NIC_FLAGS_IS_NOT_TX_READY) || 622 (aq_ring_avail_dx(ring) < 623 AQ_CFG_SKB_FRAGS_MAX); 624 625 if (is_nic_in_bad_state) { 626 aq_nic_ndev_queue_stop(self, ring->idx); 627 err = NETDEV_TX_BUSY; 628 goto err_exit; 629 } 630 631 do { 632 if (spin_trylock(&ring->header.lock)) { 633 frags = aq_nic_map_skb(self, skb, ring); 634 635 if (likely(frags)) { 636 err = self->aq_hw_ops.hw_ring_tx_xmit( 637 self->aq_hw, 638 ring, frags); 639 if (err >= 0) { 640 if (aq_ring_avail_dx(ring) < 641 AQ_CFG_SKB_FRAGS_MAX + 1) 642 aq_nic_ndev_queue_stop( 643 self, 644 ring->idx); 645 646 ++ring->stats.tx.packets; 647 ring->stats.tx.bytes += skb->len; 648 } 649 } else { 650 err = NETDEV_TX_BUSY; 651 } 652 653 spin_unlock(&ring->header.lock); 654 break; 655 } 656 } while (--trys); 657 658 if (!trys) { 659 err = NETDEV_TX_BUSY; 660 goto err_exit; 661 } 662 663 err_exit: 664 return err; 665 } 666 667 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) 668 { 669 int err = 0; 670 671 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags); 672 if (err < 0) 673 goto err_exit; 674 675 self->packet_filter = flags; 676 677 err_exit: 678 return err; 679 } 680 681 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) 682 { 683 struct netdev_hw_addr *ha = NULL; 684 unsigned int i = 0U; 685 686 self->mc_list.count = 0U; 687 688 netdev_for_each_mc_addr(ha, ndev) { 689 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 690 ++self->mc_list.count; 691 } 692 693 return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, 694 self->mc_list.ar, 695 self->mc_list.count); 696 } 697 698 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 699 { 700 int err = 0; 701 702 if (new_mtu > self->aq_hw_caps.mtu) { 703 err = -EINVAL; 704 goto err_exit; 705 } 706 self->aq_nic_cfg.mtu = new_mtu; 707 708 err_exit: 709 return err; 710 } 711 712 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) 713 { 714 return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr); 715 } 716 717 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) 718 { 719 return self->link_status.mbps; 720 } 721 722 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) 723 { 724 u32 *regs_buff = p; 725 int err = 0; 726 727 regs->version = 1; 728 729 err = self->aq_hw_ops.hw_get_regs(self->aq_hw, 730 &self->aq_hw_caps, regs_buff); 731 if (err < 0) 732 goto err_exit; 733 734 err_exit: 735 return err; 736 } 737 738 int aq_nic_get_regs_count(struct aq_nic_s *self) 739 { 740 return self->aq_hw_caps.mac_regs_count; 741 } 742 743 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) 744 { 745 struct aq_vec_s *aq_vec = NULL; 746 unsigned int i = 0U; 747 unsigned int count = 0U; 748 int err = 0; 749 750 err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); 751 if (err < 0) 752 goto err_exit; 753 754 data += count; 755 count = 0U; 756 757 for (i = 0U, aq_vec = self->aq_vec[0]; 758 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { 759 data += count; 760 aq_vec_get_sw_stats(aq_vec, data, &count); 761 } 762 763 err_exit:; 764 (void)err; 765 } 766 767 void aq_nic_get_link_ksettings(struct aq_nic_s *self, 768 struct ethtool_link_ksettings *cmd) 769 { 770 cmd->base.port = PORT_TP; 771 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ 772 cmd->base.duplex = DUPLEX_FULL; 773 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; 774 775 ethtool_link_ksettings_zero_link_mode(cmd, supported); 776 777 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) 778 ethtool_link_ksettings_add_link_mode(cmd, supported, 779 10000baseT_Full); 780 781 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G) 782 ethtool_link_ksettings_add_link_mode(cmd, supported, 783 5000baseT_Full); 784 785 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS) 786 ethtool_link_ksettings_add_link_mode(cmd, supported, 787 2500baseT_Full); 788 789 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) 790 ethtool_link_ksettings_add_link_mode(cmd, supported, 791 1000baseT_Full); 792 793 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) 794 ethtool_link_ksettings_add_link_mode(cmd, supported, 795 100baseT_Full); 796 797 if (self->aq_hw_caps.flow_control) 798 ethtool_link_ksettings_add_link_mode(cmd, supported, 799 Pause); 800 801 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 802 ethtool_link_ksettings_add_link_mode(cmd, supported, TP); 803 804 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 805 806 if (self->aq_nic_cfg.is_autoneg) 807 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 808 809 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) 810 ethtool_link_ksettings_add_link_mode(cmd, advertising, 811 10000baseT_Full); 812 813 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G) 814 ethtool_link_ksettings_add_link_mode(cmd, advertising, 815 5000baseT_Full); 816 817 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS) 818 ethtool_link_ksettings_add_link_mode(cmd, advertising, 819 2500baseT_Full); 820 821 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) 822 ethtool_link_ksettings_add_link_mode(cmd, advertising, 823 1000baseT_Full); 824 825 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) 826 ethtool_link_ksettings_add_link_mode(cmd, advertising, 827 100baseT_Full); 828 829 if (self->aq_nic_cfg.flow_control) 830 ethtool_link_ksettings_add_link_mode(cmd, advertising, 831 Pause); 832 833 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); 834 } 835 836 int aq_nic_set_link_ksettings(struct aq_nic_s *self, 837 const struct ethtool_link_ksettings *cmd) 838 { 839 u32 speed = 0U; 840 u32 rate = 0U; 841 int err = 0; 842 843 if (cmd->base.autoneg == AUTONEG_ENABLE) { 844 rate = self->aq_hw_caps.link_speed_msk; 845 self->aq_nic_cfg.is_autoneg = true; 846 } else { 847 speed = cmd->base.speed; 848 849 switch (speed) { 850 case SPEED_100: 851 rate = AQ_NIC_RATE_100M; 852 break; 853 854 case SPEED_1000: 855 rate = AQ_NIC_RATE_1G; 856 break; 857 858 case SPEED_2500: 859 rate = AQ_NIC_RATE_2GS; 860 break; 861 862 case SPEED_5000: 863 rate = AQ_NIC_RATE_5G; 864 break; 865 866 case SPEED_10000: 867 rate = AQ_NIC_RATE_10G; 868 break; 869 870 default: 871 err = -1; 872 goto err_exit; 873 break; 874 } 875 if (!(self->aq_hw_caps.link_speed_msk & rate)) { 876 err = -1; 877 goto err_exit; 878 } 879 880 self->aq_nic_cfg.is_autoneg = false; 881 } 882 883 err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate); 884 if (err < 0) 885 goto err_exit; 886 887 self->aq_nic_cfg.link_speed_msk = rate; 888 889 err_exit: 890 return err; 891 } 892 893 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self) 894 { 895 return &self->aq_nic_cfg; 896 } 897 898 u32 aq_nic_get_fw_version(struct aq_nic_s *self) 899 { 900 u32 fw_version = 0U; 901 902 self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version); 903 904 return fw_version; 905 } 906 907 int aq_nic_stop(struct aq_nic_s *self) 908 { 909 struct aq_vec_s *aq_vec = NULL; 910 unsigned int i = 0U; 911 912 for (i = 0U, aq_vec = self->aq_vec[0]; 913 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 914 aq_nic_ndev_queue_stop(self, i); 915 916 del_timer_sync(&self->service_timer); 917 918 self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); 919 920 if (self->aq_nic_cfg.is_polling) 921 del_timer_sync(&self->polling_timer); 922 else 923 aq_pci_func_free_irqs(self->aq_pci_func); 924 925 for (i = 0U, aq_vec = self->aq_vec[0]; 926 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 927 aq_vec_stop(aq_vec); 928 929 return self->aq_hw_ops.hw_stop(self->aq_hw); 930 } 931 932 void aq_nic_deinit(struct aq_nic_s *self) 933 { 934 struct aq_vec_s *aq_vec = NULL; 935 unsigned int i = 0U; 936 937 if (!self) 938 goto err_exit; 939 940 for (i = 0U, aq_vec = self->aq_vec[0]; 941 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 942 aq_vec_deinit(aq_vec); 943 944 if (self->power_state == AQ_HW_POWER_STATE_D0) { 945 (void)self->aq_hw_ops.hw_deinit(self->aq_hw); 946 } else { 947 (void)self->aq_hw_ops.hw_set_power(self->aq_hw, 948 self->power_state); 949 } 950 951 err_exit:; 952 } 953 954 void aq_nic_free_hot_resources(struct aq_nic_s *self) 955 { 956 unsigned int i = 0U; 957 958 if (!self) 959 goto err_exit; 960 961 for (i = AQ_DIMOF(self->aq_vec); i--;) { 962 if (self->aq_vec[i]) { 963 aq_vec_free(self->aq_vec[i]); 964 self->aq_vec[i] = NULL; 965 } 966 } 967 968 err_exit:; 969 } 970 971 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) 972 { 973 int err = 0; 974 975 if (!netif_running(self->ndev)) { 976 err = 0; 977 goto out; 978 } 979 rtnl_lock(); 980 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { 981 self->power_state = AQ_HW_POWER_STATE_D3; 982 netif_device_detach(self->ndev); 983 netif_tx_stop_all_queues(self->ndev); 984 985 err = aq_nic_stop(self); 986 if (err < 0) 987 goto err_exit; 988 989 aq_nic_deinit(self); 990 } else { 991 err = aq_nic_init(self); 992 if (err < 0) 993 goto err_exit; 994 995 err = aq_nic_start(self); 996 if (err < 0) 997 goto err_exit; 998 999 netif_device_attach(self->ndev); 1000 netif_tx_start_all_queues(self->ndev); 1001 } 1002 1003 err_exit: 1004 rtnl_unlock(); 1005 out: 1006 return err; 1007 } 1008