1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /* Messages are passed between the VNIC driver and the VNIC server using */ 17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 18 /* issue and receive commands that initiate communication with the server */ 19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 20 /* are used by the driver to notify the server that a packet is */ 21 /* ready for transmission or that a buffer has been added to receive a */ 22 /* packet. Subsequently, sCRQs are used by the server to notify the */ 23 /* driver that a packet transmission has been completed or that a packet */ 24 /* has been received and placed in a waiting buffer. */ 25 /* */ 26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 27 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 28 /* or receive has been completed, the VNIC driver is required to use */ 29 /* "long term mapping". This entails that large, continuous DMA mapped */ 30 /* buffers are allocated on driver initialization and these buffers are */ 31 /* then continuously reused to pass skbs to and from the VNIC server. */ 32 /* */ 33 /**************************************************************************/ 34 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/types.h> 38 #include <linux/errno.h> 39 #include <linux/completion.h> 40 #include <linux/ioport.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kernel.h> 43 #include <linux/netdevice.h> 44 #include <linux/etherdevice.h> 45 #include <linux/skbuff.h> 46 #include <linux/init.h> 47 #include <linux/delay.h> 48 #include <linux/mm.h> 49 #include <linux/ethtool.h> 50 #include <linux/proc_fs.h> 51 #include <linux/if_arp.h> 52 #include <linux/in.h> 53 #include <linux/ip.h> 54 #include <linux/ipv6.h> 55 #include <linux/irq.h> 56 #include <linux/irqdomain.h> 57 #include <linux/kthread.h> 58 #include <linux/seq_file.h> 59 #include <linux/interrupt.h> 60 #include <net/net_namespace.h> 61 #include <asm/hvcall.h> 62 #include <linux/atomic.h> 63 #include <asm/vio.h> 64 #include <asm/xive.h> 65 #include <asm/iommu.h> 66 #include <linux/uaccess.h> 67 #include <asm/firmware.h> 68 #include <linux/workqueue.h> 69 #include <linux/if_vlan.h> 70 #include <linux/utsname.h> 71 #include <linux/cpu.h> 72 73 #include "ibmvnic.h" 74 75 static const char ibmvnic_driver_name[] = "ibmvnic"; 76 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 77 78 MODULE_AUTHOR("Santiago Leon"); 79 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 80 MODULE_LICENSE("GPL"); 81 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 82 83 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 84 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 85 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 86 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 87 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 88 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 91 static int enable_scrq_irq(struct ibmvnic_adapter *, 92 struct ibmvnic_sub_crq_queue *); 93 static int disable_scrq_irq(struct ibmvnic_adapter *, 94 struct ibmvnic_sub_crq_queue *); 95 static int pending_scrq(struct ibmvnic_adapter *, 96 struct ibmvnic_sub_crq_queue *); 97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 98 struct ibmvnic_sub_crq_queue *); 99 static int ibmvnic_poll(struct napi_struct *napi, int data); 100 static void send_query_map(struct ibmvnic_adapter *adapter); 101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8); 102 static int send_request_unmap(struct ibmvnic_adapter *, u8); 103 static int send_login(struct ibmvnic_adapter *adapter); 104 static void send_query_cap(struct ibmvnic_adapter *adapter); 105 static int init_sub_crqs(struct ibmvnic_adapter *); 106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 107 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); 108 static void release_crq_queue(struct ibmvnic_adapter *); 109 static int __ibmvnic_set_mac(struct net_device *, u8 *); 110 static int init_crq_queue(struct ibmvnic_adapter *adapter); 111 static int send_query_phys_parms(struct ibmvnic_adapter *adapter); 112 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 113 struct ibmvnic_sub_crq_queue *tx_scrq); 114 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 115 struct ibmvnic_long_term_buff *ltb); 116 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); 117 118 struct ibmvnic_stat { 119 char name[ETH_GSTRING_LEN]; 120 int offset; 121 }; 122 123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 124 offsetof(struct ibmvnic_statistics, stat)) 125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) 126 127 static const struct ibmvnic_stat ibmvnic_stats[] = { 128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 138 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 150 }; 151 152 static int send_crq_init_complete(struct ibmvnic_adapter *adapter) 153 { 154 union ibmvnic_crq crq; 155 156 memset(&crq, 0, sizeof(crq)); 157 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 158 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; 159 160 return ibmvnic_send_crq(adapter, &crq); 161 } 162 163 static int send_version_xchg(struct ibmvnic_adapter *adapter) 164 { 165 union ibmvnic_crq crq; 166 167 memset(&crq, 0, sizeof(crq)); 168 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 169 crq.version_exchange.cmd = VERSION_EXCHANGE; 170 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 171 172 return ibmvnic_send_crq(adapter, &crq); 173 } 174 175 static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter, 176 struct ibmvnic_sub_crq_queue *queue) 177 { 178 if (!(queue && queue->irq)) 179 return; 180 181 cpumask_clear(queue->affinity_mask); 182 183 if (irq_set_affinity_and_hint(queue->irq, NULL)) 184 netdev_warn(adapter->netdev, 185 "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n", 186 __func__, queue, queue->irq); 187 } 188 189 static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter) 190 { 191 struct ibmvnic_sub_crq_queue **rxqs; 192 struct ibmvnic_sub_crq_queue **txqs; 193 int num_rxqs, num_txqs; 194 int i; 195 196 rxqs = adapter->rx_scrq; 197 txqs = adapter->tx_scrq; 198 num_txqs = adapter->num_active_tx_scrqs; 199 num_rxqs = adapter->num_active_rx_scrqs; 200 201 netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__); 202 if (txqs) { 203 for (i = 0; i < num_txqs; i++) 204 ibmvnic_clean_queue_affinity(adapter, txqs[i]); 205 } 206 if (rxqs) { 207 for (i = 0; i < num_rxqs; i++) 208 ibmvnic_clean_queue_affinity(adapter, rxqs[i]); 209 } 210 } 211 212 static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue, 213 unsigned int *cpu, int *stragglers, 214 int stride) 215 { 216 cpumask_var_t mask; 217 int i; 218 int rc = 0; 219 220 if (!(queue && queue->irq)) 221 return rc; 222 223 /* cpumask_var_t is either a pointer or array, allocation works here */ 224 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 225 return -ENOMEM; 226 227 /* while we have extra cpu give one extra to this irq */ 228 if (*stragglers) { 229 stride++; 230 (*stragglers)--; 231 } 232 /* atomic write is safer than writing bit by bit directly */ 233 for (i = 0; i < stride; i++) { 234 cpumask_set_cpu(*cpu, mask); 235 *cpu = cpumask_next_wrap(*cpu, cpu_online_mask, 236 nr_cpu_ids, false); 237 } 238 /* set queue affinity mask */ 239 cpumask_copy(queue->affinity_mask, mask); 240 rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask); 241 free_cpumask_var(mask); 242 243 return rc; 244 } 245 246 /* assumes cpu read lock is held */ 247 static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter) 248 { 249 struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq; 250 struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq; 251 struct ibmvnic_sub_crq_queue *queue; 252 int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0; 253 int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0; 254 int total_queues, stride, stragglers, i; 255 unsigned int num_cpu, cpu; 256 bool is_rx_queue; 257 int rc = 0; 258 259 netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__); 260 if (!(adapter->rx_scrq && adapter->tx_scrq)) { 261 netdev_warn(adapter->netdev, 262 "%s: Set affinity failed, queues not allocated\n", 263 __func__); 264 return; 265 } 266 267 total_queues = num_rxqs + num_txqs; 268 num_cpu = num_online_cpus(); 269 /* number of cpu's assigned per irq */ 270 stride = max_t(int, num_cpu / total_queues, 1); 271 /* number of leftover cpu's */ 272 stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0; 273 /* next available cpu to assign irq to */ 274 cpu = cpumask_next(-1, cpu_online_mask); 275 276 for (i = 0; i < total_queues; i++) { 277 is_rx_queue = false; 278 /* balance core load by alternating rx and tx assignments 279 * ex: TX0 -> RX0 -> TX1 -> RX1 etc. 280 */ 281 if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) { 282 queue = rxqs[i_rxqs++]; 283 is_rx_queue = true; 284 } else { 285 queue = txqs[i_txqs++]; 286 } 287 288 rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers, 289 stride); 290 if (rc) 291 goto out; 292 293 if (!queue || is_rx_queue) 294 continue; 295 296 rc = __netif_set_xps_queue(adapter->netdev, 297 cpumask_bits(queue->affinity_mask), 298 i_txqs - 1, XPS_CPUS); 299 if (rc) 300 netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n", 301 __func__, i_txqs - 1, rc); 302 } 303 304 out: 305 if (rc) { 306 netdev_warn(adapter->netdev, 307 "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n", 308 __func__, queue, queue->irq, rc); 309 ibmvnic_clean_affinity(adapter); 310 } 311 } 312 313 static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node) 314 { 315 struct ibmvnic_adapter *adapter; 316 317 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node); 318 ibmvnic_set_affinity(adapter); 319 return 0; 320 } 321 322 static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node) 323 { 324 struct ibmvnic_adapter *adapter; 325 326 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead); 327 ibmvnic_set_affinity(adapter); 328 return 0; 329 } 330 331 static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 332 { 333 struct ibmvnic_adapter *adapter; 334 335 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node); 336 ibmvnic_clean_affinity(adapter); 337 return 0; 338 } 339 340 static enum cpuhp_state ibmvnic_online; 341 342 static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter) 343 { 344 int ret; 345 346 ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node); 347 if (ret) 348 return ret; 349 ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD, 350 &adapter->node_dead); 351 if (!ret) 352 return ret; 353 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node); 354 return ret; 355 } 356 357 static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter) 358 { 359 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node); 360 cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD, 361 &adapter->node_dead); 362 } 363 364 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 365 unsigned long length, unsigned long *number, 366 unsigned long *irq) 367 { 368 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 369 long rc; 370 371 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 372 *number = retbuf[0]; 373 *irq = retbuf[1]; 374 375 return rc; 376 } 377 378 /** 379 * ibmvnic_wait_for_completion - Check device state and wait for completion 380 * @adapter: private device data 381 * @comp_done: completion structure to wait for 382 * @timeout: time to wait in milliseconds 383 * 384 * Wait for a completion signal or until the timeout limit is reached 385 * while checking that the device is still active. 386 */ 387 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, 388 struct completion *comp_done, 389 unsigned long timeout) 390 { 391 struct net_device *netdev; 392 unsigned long div_timeout; 393 u8 retry; 394 395 netdev = adapter->netdev; 396 retry = 5; 397 div_timeout = msecs_to_jiffies(timeout / retry); 398 while (true) { 399 if (!adapter->crq.active) { 400 netdev_err(netdev, "Device down!\n"); 401 return -ENODEV; 402 } 403 if (!retry--) 404 break; 405 if (wait_for_completion_timeout(comp_done, div_timeout)) 406 return 0; 407 } 408 netdev_err(netdev, "Operation timed out.\n"); 409 return -ETIMEDOUT; 410 } 411 412 /** 413 * reuse_ltb() - Check if a long term buffer can be reused 414 * @ltb: The long term buffer to be checked 415 * @size: The size of the long term buffer. 416 * 417 * An LTB can be reused unless its size has changed. 418 * 419 * Return: Return true if the LTB can be reused, false otherwise. 420 */ 421 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) 422 { 423 return (ltb->buff && ltb->size == size); 424 } 425 426 /** 427 * alloc_long_term_buff() - Allocate a long term buffer (LTB) 428 * 429 * @adapter: ibmvnic adapter associated to the LTB 430 * @ltb: container object for the LTB 431 * @size: size of the LTB 432 * 433 * Allocate an LTB of the specified size and notify VIOS. 434 * 435 * If the given @ltb already has the correct size, reuse it. Otherwise if 436 * its non-NULL, free it. Then allocate a new one of the correct size. 437 * Notify the VIOS either way since we may now be working with a new VIOS. 438 * 439 * Allocating larger chunks of memory during resets, specially LPM or under 440 * low memory situations can cause resets to fail/timeout and for LPAR to 441 * lose connectivity. So hold onto the LTB even if we fail to communicate 442 * with the VIOS and reuse it on next open. Free LTB when adapter is closed. 443 * 444 * Return: 0 if we were able to allocate the LTB and notify the VIOS and 445 * a negative value otherwise. 446 */ 447 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 448 struct ibmvnic_long_term_buff *ltb, int size) 449 { 450 struct device *dev = &adapter->vdev->dev; 451 u64 prev = 0; 452 int rc; 453 454 if (!reuse_ltb(ltb, size)) { 455 dev_dbg(dev, 456 "LTB size changed from 0x%llx to 0x%x, reallocating\n", 457 ltb->size, size); 458 prev = ltb->size; 459 free_long_term_buff(adapter, ltb); 460 } 461 462 if (ltb->buff) { 463 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", 464 ltb->map_id, ltb->size); 465 } else { 466 ltb->buff = dma_alloc_coherent(dev, size, <b->addr, 467 GFP_KERNEL); 468 if (!ltb->buff) { 469 dev_err(dev, "Couldn't alloc long term buffer\n"); 470 return -ENOMEM; 471 } 472 ltb->size = size; 473 474 ltb->map_id = find_first_zero_bit(adapter->map_ids, 475 MAX_MAP_ID); 476 bitmap_set(adapter->map_ids, ltb->map_id, 1); 477 478 dev_dbg(dev, 479 "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n", 480 ltb->map_id, ltb->size, prev); 481 } 482 483 /* Ensure ltb is zeroed - specially when reusing it. */ 484 memset(ltb->buff, 0, ltb->size); 485 486 mutex_lock(&adapter->fw_lock); 487 adapter->fw_done_rc = 0; 488 reinit_completion(&adapter->fw_done); 489 490 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 491 if (rc) { 492 dev_err(dev, "send_request_map failed, rc = %d\n", rc); 493 goto out; 494 } 495 496 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 497 if (rc) { 498 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", 499 rc); 500 goto out; 501 } 502 503 if (adapter->fw_done_rc) { 504 dev_err(dev, "Couldn't map LTB, rc = %d\n", 505 adapter->fw_done_rc); 506 rc = -EIO; 507 goto out; 508 } 509 rc = 0; 510 out: 511 /* don't free LTB on communication error - see function header */ 512 mutex_unlock(&adapter->fw_lock); 513 return rc; 514 } 515 516 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 517 struct ibmvnic_long_term_buff *ltb) 518 { 519 struct device *dev = &adapter->vdev->dev; 520 521 if (!ltb->buff) 522 return; 523 524 /* VIOS automatically unmaps the long term buffer at remote 525 * end for the following resets: 526 * FAILOVER, MOBILITY, TIMEOUT. 527 */ 528 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 529 adapter->reset_reason != VNIC_RESET_MOBILITY && 530 adapter->reset_reason != VNIC_RESET_TIMEOUT) 531 send_request_unmap(adapter, ltb->map_id); 532 533 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 534 535 ltb->buff = NULL; 536 /* mark this map_id free */ 537 bitmap_clear(adapter->map_ids, ltb->map_id, 1); 538 ltb->map_id = 0; 539 } 540 541 /** 542 * free_ltb_set - free the given set of long term buffers (LTBS) 543 * @adapter: The ibmvnic adapter containing this ltb set 544 * @ltb_set: The ltb_set to be freed 545 * 546 * Free the set of LTBs in the given set. 547 */ 548 549 static void free_ltb_set(struct ibmvnic_adapter *adapter, 550 struct ibmvnic_ltb_set *ltb_set) 551 { 552 int i; 553 554 for (i = 0; i < ltb_set->num_ltbs; i++) 555 free_long_term_buff(adapter, <b_set->ltbs[i]); 556 557 kfree(ltb_set->ltbs); 558 ltb_set->ltbs = NULL; 559 ltb_set->num_ltbs = 0; 560 } 561 562 /** 563 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs) 564 * 565 * @adapter: ibmvnic adapter associated to the LTB 566 * @ltb_set: container object for the set of LTBs 567 * @num_buffs: Number of buffers in the LTB 568 * @buff_size: Size of each buffer in the LTB 569 * 570 * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size 571 * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the 572 * new set of LTBs have fewer LTBs than the old set, free the excess LTBs. 573 * If new set needs more than in old set, allocate the remaining ones. 574 * Try and reuse as many LTBs as possible and avoid reallocation. 575 * 576 * Any changes to this allocation strategy must be reflected in 577 * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb(). 578 */ 579 static int alloc_ltb_set(struct ibmvnic_adapter *adapter, 580 struct ibmvnic_ltb_set *ltb_set, int num_buffs, 581 int buff_size) 582 { 583 struct device *dev = &adapter->vdev->dev; 584 struct ibmvnic_ltb_set old_set; 585 struct ibmvnic_ltb_set new_set; 586 int rem_size; 587 int tot_size; /* size of all ltbs */ 588 int ltb_size; /* size of one ltb */ 589 int nltbs; 590 int rc; 591 int n; 592 int i; 593 594 dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs, 595 buff_size); 596 597 ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size); 598 tot_size = num_buffs * buff_size; 599 600 if (ltb_size > tot_size) 601 ltb_size = tot_size; 602 603 nltbs = tot_size / ltb_size; 604 if (tot_size % ltb_size) 605 nltbs++; 606 607 old_set = *ltb_set; 608 609 if (old_set.num_ltbs == nltbs) { 610 new_set = old_set; 611 } else { 612 int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff); 613 614 new_set.ltbs = kzalloc(tmp, GFP_KERNEL); 615 if (!new_set.ltbs) 616 return -ENOMEM; 617 618 new_set.num_ltbs = nltbs; 619 620 /* Free any excess ltbs in old set */ 621 for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++) 622 free_long_term_buff(adapter, &old_set.ltbs[i]); 623 624 /* Copy remaining ltbs to new set. All LTBs except the 625 * last one are of the same size. alloc_long_term_buff() 626 * will realloc if the size changes. 627 */ 628 n = min(old_set.num_ltbs, new_set.num_ltbs); 629 for (i = 0; i < n; i++) 630 new_set.ltbs[i] = old_set.ltbs[i]; 631 632 /* Any additional ltbs in new set will have NULL ltbs for 633 * now and will be allocated in alloc_long_term_buff(). 634 */ 635 636 /* We no longer need the old_set so free it. Note that we 637 * may have reused some ltbs from old set and freed excess 638 * ltbs above. So we only need to free the container now 639 * not the LTBs themselves. (i.e. dont free_ltb_set()!) 640 */ 641 kfree(old_set.ltbs); 642 old_set.ltbs = NULL; 643 old_set.num_ltbs = 0; 644 645 /* Install the new set. If allocations fail below, we will 646 * retry later and know what size LTBs we need. 647 */ 648 *ltb_set = new_set; 649 } 650 651 i = 0; 652 rem_size = tot_size; 653 while (rem_size) { 654 if (ltb_size > rem_size) 655 ltb_size = rem_size; 656 657 rem_size -= ltb_size; 658 659 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size); 660 if (rc) 661 goto out; 662 i++; 663 } 664 665 WARN_ON(i != new_set.num_ltbs); 666 667 return 0; 668 out: 669 /* We may have allocated one/more LTBs before failing and we 670 * want to try and reuse on next reset. So don't free ltb set. 671 */ 672 return rc; 673 } 674 675 /** 676 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB. 677 * @rxpool: The receive buffer pool containing buffer 678 * @bufidx: Index of buffer in rxpool 679 * @ltbp: (Output) pointer to the long term buffer containing the buffer 680 * @offset: (Output) offset of buffer in the LTB from @ltbp 681 * 682 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the 683 * pool and its corresponding offset. Assume for now that each LTB is of 684 * different size but could possibly be optimized based on the allocation 685 * strategy in alloc_ltb_set(). 686 */ 687 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, 688 unsigned int bufidx, 689 struct ibmvnic_long_term_buff **ltbp, 690 unsigned int *offset) 691 { 692 struct ibmvnic_long_term_buff *ltb; 693 int nbufs; /* # of buffers in one ltb */ 694 int i; 695 696 WARN_ON(bufidx >= rxpool->size); 697 698 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) { 699 ltb = &rxpool->ltb_set.ltbs[i]; 700 nbufs = ltb->size / rxpool->buff_size; 701 if (bufidx < nbufs) 702 break; 703 bufidx -= nbufs; 704 } 705 706 *ltbp = ltb; 707 *offset = bufidx * rxpool->buff_size; 708 } 709 710 /** 711 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB. 712 * @txpool: The transmit buffer pool containing buffer 713 * @bufidx: Index of buffer in txpool 714 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer 715 * @offset: (Output) offset of buffer in the LTB from @ltbp 716 * 717 * Map the given buffer identified by [txpool, bufidx] to an LTB in the 718 * pool and its corresponding offset. 719 */ 720 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool, 721 unsigned int bufidx, 722 struct ibmvnic_long_term_buff **ltbp, 723 unsigned int *offset) 724 { 725 struct ibmvnic_long_term_buff *ltb; 726 int nbufs; /* # of buffers in one ltb */ 727 int i; 728 729 WARN_ON_ONCE(bufidx >= txpool->num_buffers); 730 731 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) { 732 ltb = &txpool->ltb_set.ltbs[i]; 733 nbufs = ltb->size / txpool->buf_size; 734 if (bufidx < nbufs) 735 break; 736 bufidx -= nbufs; 737 } 738 739 *ltbp = ltb; 740 *offset = bufidx * txpool->buf_size; 741 } 742 743 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 744 { 745 int i; 746 747 for (i = 0; i < adapter->num_active_rx_pools; i++) 748 adapter->rx_pool[i].active = 0; 749 } 750 751 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 752 struct ibmvnic_rx_pool *pool) 753 { 754 int count = pool->size - atomic_read(&pool->available); 755 u64 handle = adapter->rx_scrq[pool->index]->handle; 756 struct device *dev = &adapter->vdev->dev; 757 struct ibmvnic_ind_xmit_queue *ind_bufp; 758 struct ibmvnic_sub_crq_queue *rx_scrq; 759 struct ibmvnic_long_term_buff *ltb; 760 union sub_crq *sub_crq; 761 int buffers_added = 0; 762 unsigned long lpar_rc; 763 struct sk_buff *skb; 764 unsigned int offset; 765 dma_addr_t dma_addr; 766 unsigned char *dst; 767 int shift = 0; 768 int bufidx; 769 int i; 770 771 if (!pool->active) 772 return; 773 774 rx_scrq = adapter->rx_scrq[pool->index]; 775 ind_bufp = &rx_scrq->ind_buf; 776 777 /* netdev_skb_alloc() could have failed after we saved a few skbs 778 * in the indir_buf and we would not have sent them to VIOS yet. 779 * To account for them, start the loop at ind_bufp->index rather 780 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will 781 * be 0. 782 */ 783 for (i = ind_bufp->index; i < count; ++i) { 784 bufidx = pool->free_map[pool->next_free]; 785 786 /* We maybe reusing the skb from earlier resets. Allocate 787 * only if necessary. But since the LTB may have changed 788 * during reset (see init_rx_pools()), update LTB below 789 * even if reusing skb. 790 */ 791 skb = pool->rx_buff[bufidx].skb; 792 if (!skb) { 793 skb = netdev_alloc_skb(adapter->netdev, 794 pool->buff_size); 795 if (!skb) { 796 dev_err(dev, "Couldn't replenish rx buff\n"); 797 adapter->replenish_no_mem++; 798 break; 799 } 800 } 801 802 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 803 pool->next_free = (pool->next_free + 1) % pool->size; 804 805 /* Copy the skb to the long term mapped DMA buffer */ 806 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset); 807 dst = ltb->buff + offset; 808 memset(dst, 0, pool->buff_size); 809 dma_addr = ltb->addr + offset; 810 811 /* add the skb to an rx_buff in the pool */ 812 pool->rx_buff[bufidx].data = dst; 813 pool->rx_buff[bufidx].dma = dma_addr; 814 pool->rx_buff[bufidx].skb = skb; 815 pool->rx_buff[bufidx].pool_index = pool->index; 816 pool->rx_buff[bufidx].size = pool->buff_size; 817 818 /* queue the rx_buff for the next send_subcrq_indirect */ 819 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; 820 memset(sub_crq, 0, sizeof(*sub_crq)); 821 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; 822 sub_crq->rx_add.correlator = 823 cpu_to_be64((u64)&pool->rx_buff[bufidx]); 824 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); 825 sub_crq->rx_add.map_id = ltb->map_id; 826 827 /* The length field of the sCRQ is defined to be 24 bits so the 828 * buffer size needs to be left shifted by a byte before it is 829 * converted to big endian to prevent the last byte from being 830 * truncated. 831 */ 832 #ifdef __LITTLE_ENDIAN__ 833 shift = 8; 834 #endif 835 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); 836 837 /* if send_subcrq_indirect queue is full, flush to VIOS */ 838 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || 839 i == count - 1) { 840 lpar_rc = 841 send_subcrq_indirect(adapter, handle, 842 (u64)ind_bufp->indir_dma, 843 (u64)ind_bufp->index); 844 if (lpar_rc != H_SUCCESS) 845 goto failure; 846 buffers_added += ind_bufp->index; 847 adapter->replenish_add_buff_success += ind_bufp->index; 848 ind_bufp->index = 0; 849 } 850 } 851 atomic_add(buffers_added, &pool->available); 852 return; 853 854 failure: 855 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) 856 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); 857 for (i = ind_bufp->index - 1; i >= 0; --i) { 858 struct ibmvnic_rx_buff *rx_buff; 859 860 pool->next_free = pool->next_free == 0 ? 861 pool->size - 1 : pool->next_free - 1; 862 sub_crq = &ind_bufp->indir_arr[i]; 863 rx_buff = (struct ibmvnic_rx_buff *) 864 be64_to_cpu(sub_crq->rx_add.correlator); 865 bufidx = (int)(rx_buff - pool->rx_buff); 866 pool->free_map[pool->next_free] = bufidx; 867 dev_kfree_skb_any(pool->rx_buff[bufidx].skb); 868 pool->rx_buff[bufidx].skb = NULL; 869 } 870 adapter->replenish_add_buff_failure += ind_bufp->index; 871 atomic_add(buffers_added, &pool->available); 872 ind_bufp->index = 0; 873 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 874 /* Disable buffer pool replenishment and report carrier off if 875 * queue is closed or pending failover. 876 * Firmware guarantees that a signal will be sent to the 877 * driver, triggering a reset. 878 */ 879 deactivate_rx_pools(adapter); 880 netif_carrier_off(adapter->netdev); 881 } 882 } 883 884 static void replenish_pools(struct ibmvnic_adapter *adapter) 885 { 886 int i; 887 888 adapter->replenish_task_cycles++; 889 for (i = 0; i < adapter->num_active_rx_pools; i++) { 890 if (adapter->rx_pool[i].active) 891 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 892 } 893 894 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); 895 } 896 897 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 898 { 899 kfree(adapter->tx_stats_buffers); 900 kfree(adapter->rx_stats_buffers); 901 adapter->tx_stats_buffers = NULL; 902 adapter->rx_stats_buffers = NULL; 903 } 904 905 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 906 { 907 adapter->tx_stats_buffers = 908 kcalloc(IBMVNIC_MAX_QUEUES, 909 sizeof(struct ibmvnic_tx_queue_stats), 910 GFP_KERNEL); 911 if (!adapter->tx_stats_buffers) 912 return -ENOMEM; 913 914 adapter->rx_stats_buffers = 915 kcalloc(IBMVNIC_MAX_QUEUES, 916 sizeof(struct ibmvnic_rx_queue_stats), 917 GFP_KERNEL); 918 if (!adapter->rx_stats_buffers) 919 return -ENOMEM; 920 921 return 0; 922 } 923 924 static void release_stats_token(struct ibmvnic_adapter *adapter) 925 { 926 struct device *dev = &adapter->vdev->dev; 927 928 if (!adapter->stats_token) 929 return; 930 931 dma_unmap_single(dev, adapter->stats_token, 932 sizeof(struct ibmvnic_statistics), 933 DMA_FROM_DEVICE); 934 adapter->stats_token = 0; 935 } 936 937 static int init_stats_token(struct ibmvnic_adapter *adapter) 938 { 939 struct device *dev = &adapter->vdev->dev; 940 dma_addr_t stok; 941 int rc; 942 943 stok = dma_map_single(dev, &adapter->stats, 944 sizeof(struct ibmvnic_statistics), 945 DMA_FROM_DEVICE); 946 rc = dma_mapping_error(dev, stok); 947 if (rc) { 948 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc); 949 return rc; 950 } 951 952 adapter->stats_token = stok; 953 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 954 return 0; 955 } 956 957 /** 958 * release_rx_pools() - Release any rx pools attached to @adapter. 959 * @adapter: ibmvnic adapter 960 * 961 * Safe to call this multiple times - even if no pools are attached. 962 */ 963 static void release_rx_pools(struct ibmvnic_adapter *adapter) 964 { 965 struct ibmvnic_rx_pool *rx_pool; 966 int i, j; 967 968 if (!adapter->rx_pool) 969 return; 970 971 for (i = 0; i < adapter->num_active_rx_pools; i++) { 972 rx_pool = &adapter->rx_pool[i]; 973 974 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 975 976 kfree(rx_pool->free_map); 977 978 free_ltb_set(adapter, &rx_pool->ltb_set); 979 980 if (!rx_pool->rx_buff) 981 continue; 982 983 for (j = 0; j < rx_pool->size; j++) { 984 if (rx_pool->rx_buff[j].skb) { 985 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); 986 rx_pool->rx_buff[j].skb = NULL; 987 } 988 } 989 990 kfree(rx_pool->rx_buff); 991 } 992 993 kfree(adapter->rx_pool); 994 adapter->rx_pool = NULL; 995 adapter->num_active_rx_pools = 0; 996 adapter->prev_rx_pool_size = 0; 997 } 998 999 /** 1000 * reuse_rx_pools() - Check if the existing rx pools can be reused. 1001 * @adapter: ibmvnic adapter 1002 * 1003 * Check if the existing rx pools in the adapter can be reused. The 1004 * pools can be reused if the pool parameters (number of pools, 1005 * number of buffers in the pool and size of each buffer) have not 1006 * changed. 1007 * 1008 * NOTE: This assumes that all pools have the same number of buffers 1009 * which is the case currently. If that changes, we must fix this. 1010 * 1011 * Return: true if the rx pools can be reused, false otherwise. 1012 */ 1013 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) 1014 { 1015 u64 old_num_pools, new_num_pools; 1016 u64 old_pool_size, new_pool_size; 1017 u64 old_buff_size, new_buff_size; 1018 1019 if (!adapter->rx_pool) 1020 return false; 1021 1022 old_num_pools = adapter->num_active_rx_pools; 1023 new_num_pools = adapter->req_rx_queues; 1024 1025 old_pool_size = adapter->prev_rx_pool_size; 1026 new_pool_size = adapter->req_rx_add_entries_per_subcrq; 1027 1028 old_buff_size = adapter->prev_rx_buf_sz; 1029 new_buff_size = adapter->cur_rx_buf_sz; 1030 1031 if (old_buff_size != new_buff_size || 1032 old_num_pools != new_num_pools || 1033 old_pool_size != new_pool_size) 1034 return false; 1035 1036 return true; 1037 } 1038 1039 /** 1040 * init_rx_pools(): Initialize the set of receiver pools in the adapter. 1041 * @netdev: net device associated with the vnic interface 1042 * 1043 * Initialize the set of receiver pools in the ibmvnic adapter associated 1044 * with the net_device @netdev. If possible, reuse the existing rx pools. 1045 * Otherwise free any existing pools and allocate a new set of pools 1046 * before initializing them. 1047 * 1048 * Return: 0 on success and negative value on error. 1049 */ 1050 static int init_rx_pools(struct net_device *netdev) 1051 { 1052 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1053 struct device *dev = &adapter->vdev->dev; 1054 struct ibmvnic_rx_pool *rx_pool; 1055 u64 num_pools; 1056 u64 pool_size; /* # of buffers in one pool */ 1057 u64 buff_size; 1058 int i, j, rc; 1059 1060 pool_size = adapter->req_rx_add_entries_per_subcrq; 1061 num_pools = adapter->req_rx_queues; 1062 buff_size = adapter->cur_rx_buf_sz; 1063 1064 if (reuse_rx_pools(adapter)) { 1065 dev_dbg(dev, "Reusing rx pools\n"); 1066 goto update_ltb; 1067 } 1068 1069 /* Allocate/populate the pools. */ 1070 release_rx_pools(adapter); 1071 1072 adapter->rx_pool = kcalloc(num_pools, 1073 sizeof(struct ibmvnic_rx_pool), 1074 GFP_KERNEL); 1075 if (!adapter->rx_pool) { 1076 dev_err(dev, "Failed to allocate rx pools\n"); 1077 return -ENOMEM; 1078 } 1079 1080 /* Set num_active_rx_pools early. If we fail below after partial 1081 * allocation, release_rx_pools() will know how many to look for. 1082 */ 1083 adapter->num_active_rx_pools = num_pools; 1084 1085 for (i = 0; i < num_pools; i++) { 1086 rx_pool = &adapter->rx_pool[i]; 1087 1088 netdev_dbg(adapter->netdev, 1089 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 1090 i, pool_size, buff_size); 1091 1092 rx_pool->size = pool_size; 1093 rx_pool->index = i; 1094 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 1095 1096 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 1097 GFP_KERNEL); 1098 if (!rx_pool->free_map) { 1099 dev_err(dev, "Couldn't alloc free_map %d\n", i); 1100 rc = -ENOMEM; 1101 goto out_release; 1102 } 1103 1104 rx_pool->rx_buff = kcalloc(rx_pool->size, 1105 sizeof(struct ibmvnic_rx_buff), 1106 GFP_KERNEL); 1107 if (!rx_pool->rx_buff) { 1108 dev_err(dev, "Couldn't alloc rx buffers\n"); 1109 rc = -ENOMEM; 1110 goto out_release; 1111 } 1112 } 1113 1114 adapter->prev_rx_pool_size = pool_size; 1115 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; 1116 1117 update_ltb: 1118 for (i = 0; i < num_pools; i++) { 1119 rx_pool = &adapter->rx_pool[i]; 1120 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", 1121 i, rx_pool->size, rx_pool->buff_size); 1122 1123 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set, 1124 rx_pool->size, rx_pool->buff_size); 1125 if (rc) 1126 goto out; 1127 1128 for (j = 0; j < rx_pool->size; ++j) { 1129 struct ibmvnic_rx_buff *rx_buff; 1130 1131 rx_pool->free_map[j] = j; 1132 1133 /* NOTE: Don't clear rx_buff->skb here - will leak 1134 * memory! replenish_rx_pool() will reuse skbs or 1135 * allocate as necessary. 1136 */ 1137 rx_buff = &rx_pool->rx_buff[j]; 1138 rx_buff->dma = 0; 1139 rx_buff->data = 0; 1140 rx_buff->size = 0; 1141 rx_buff->pool_index = 0; 1142 } 1143 1144 /* Mark pool "empty" so replenish_rx_pools() will 1145 * update the LTB info for each buffer 1146 */ 1147 atomic_set(&rx_pool->available, 0); 1148 rx_pool->next_alloc = 0; 1149 rx_pool->next_free = 0; 1150 /* replenish_rx_pool() may have called deactivate_rx_pools() 1151 * on failover. Ensure pool is active now. 1152 */ 1153 rx_pool->active = 1; 1154 } 1155 return 0; 1156 out_release: 1157 release_rx_pools(adapter); 1158 out: 1159 /* We failed to allocate one or more LTBs or map them on the VIOS. 1160 * Hold onto the pools and any LTBs that we did allocate/map. 1161 */ 1162 return rc; 1163 } 1164 1165 static void release_vpd_data(struct ibmvnic_adapter *adapter) 1166 { 1167 if (!adapter->vpd) 1168 return; 1169 1170 kfree(adapter->vpd->buff); 1171 kfree(adapter->vpd); 1172 1173 adapter->vpd = NULL; 1174 } 1175 1176 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 1177 struct ibmvnic_tx_pool *tx_pool) 1178 { 1179 kfree(tx_pool->tx_buff); 1180 kfree(tx_pool->free_map); 1181 free_ltb_set(adapter, &tx_pool->ltb_set); 1182 } 1183 1184 /** 1185 * release_tx_pools() - Release any tx pools attached to @adapter. 1186 * @adapter: ibmvnic adapter 1187 * 1188 * Safe to call this multiple times - even if no pools are attached. 1189 */ 1190 static void release_tx_pools(struct ibmvnic_adapter *adapter) 1191 { 1192 int i; 1193 1194 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are 1195 * both NULL or both non-NULL. So we only need to check one. 1196 */ 1197 if (!adapter->tx_pool) 1198 return; 1199 1200 for (i = 0; i < adapter->num_active_tx_pools; i++) { 1201 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 1202 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 1203 } 1204 1205 kfree(adapter->tx_pool); 1206 adapter->tx_pool = NULL; 1207 kfree(adapter->tso_pool); 1208 adapter->tso_pool = NULL; 1209 adapter->num_active_tx_pools = 0; 1210 adapter->prev_tx_pool_size = 0; 1211 } 1212 1213 static int init_one_tx_pool(struct net_device *netdev, 1214 struct ibmvnic_tx_pool *tx_pool, 1215 int pool_size, int buf_size) 1216 { 1217 int i; 1218 1219 tx_pool->tx_buff = kcalloc(pool_size, 1220 sizeof(struct ibmvnic_tx_buff), 1221 GFP_KERNEL); 1222 if (!tx_pool->tx_buff) 1223 return -ENOMEM; 1224 1225 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); 1226 if (!tx_pool->free_map) { 1227 kfree(tx_pool->tx_buff); 1228 tx_pool->tx_buff = NULL; 1229 return -ENOMEM; 1230 } 1231 1232 for (i = 0; i < pool_size; i++) 1233 tx_pool->free_map[i] = i; 1234 1235 tx_pool->consumer_index = 0; 1236 tx_pool->producer_index = 0; 1237 tx_pool->num_buffers = pool_size; 1238 tx_pool->buf_size = buf_size; 1239 1240 return 0; 1241 } 1242 1243 /** 1244 * reuse_tx_pools() - Check if the existing tx pools can be reused. 1245 * @adapter: ibmvnic adapter 1246 * 1247 * Check if the existing tx pools in the adapter can be reused. The 1248 * pools can be reused if the pool parameters (number of pools, 1249 * number of buffers in the pool and mtu) have not changed. 1250 * 1251 * NOTE: This assumes that all pools have the same number of buffers 1252 * which is the case currently. If that changes, we must fix this. 1253 * 1254 * Return: true if the tx pools can be reused, false otherwise. 1255 */ 1256 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) 1257 { 1258 u64 old_num_pools, new_num_pools; 1259 u64 old_pool_size, new_pool_size; 1260 u64 old_mtu, new_mtu; 1261 1262 if (!adapter->tx_pool) 1263 return false; 1264 1265 old_num_pools = adapter->num_active_tx_pools; 1266 new_num_pools = adapter->num_active_tx_scrqs; 1267 old_pool_size = adapter->prev_tx_pool_size; 1268 new_pool_size = adapter->req_tx_entries_per_subcrq; 1269 old_mtu = adapter->prev_mtu; 1270 new_mtu = adapter->req_mtu; 1271 1272 if (old_mtu != new_mtu || 1273 old_num_pools != new_num_pools || 1274 old_pool_size != new_pool_size) 1275 return false; 1276 1277 return true; 1278 } 1279 1280 /** 1281 * init_tx_pools(): Initialize the set of transmit pools in the adapter. 1282 * @netdev: net device associated with the vnic interface 1283 * 1284 * Initialize the set of transmit pools in the ibmvnic adapter associated 1285 * with the net_device @netdev. If possible, reuse the existing tx pools. 1286 * Otherwise free any existing pools and allocate a new set of pools 1287 * before initializing them. 1288 * 1289 * Return: 0 on success and negative value on error. 1290 */ 1291 static int init_tx_pools(struct net_device *netdev) 1292 { 1293 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1294 struct device *dev = &adapter->vdev->dev; 1295 int num_pools; 1296 u64 pool_size; /* # of buffers in pool */ 1297 u64 buff_size; 1298 int i, j, rc; 1299 1300 num_pools = adapter->req_tx_queues; 1301 1302 /* We must notify the VIOS about the LTB on all resets - but we only 1303 * need to alloc/populate pools if either the number of buffers or 1304 * size of each buffer in the pool has changed. 1305 */ 1306 if (reuse_tx_pools(adapter)) { 1307 netdev_dbg(netdev, "Reusing tx pools\n"); 1308 goto update_ltb; 1309 } 1310 1311 /* Allocate/populate the pools. */ 1312 release_tx_pools(adapter); 1313 1314 pool_size = adapter->req_tx_entries_per_subcrq; 1315 num_pools = adapter->num_active_tx_scrqs; 1316 1317 adapter->tx_pool = kcalloc(num_pools, 1318 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 1319 if (!adapter->tx_pool) 1320 return -ENOMEM; 1321 1322 adapter->tso_pool = kcalloc(num_pools, 1323 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 1324 /* To simplify release_tx_pools() ensure that ->tx_pool and 1325 * ->tso_pool are either both NULL or both non-NULL. 1326 */ 1327 if (!adapter->tso_pool) { 1328 kfree(adapter->tx_pool); 1329 adapter->tx_pool = NULL; 1330 return -ENOMEM; 1331 } 1332 1333 /* Set num_active_tx_pools early. If we fail below after partial 1334 * allocation, release_tx_pools() will know how many to look for. 1335 */ 1336 adapter->num_active_tx_pools = num_pools; 1337 1338 buff_size = adapter->req_mtu + VLAN_HLEN; 1339 buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 1340 1341 for (i = 0; i < num_pools; i++) { 1342 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", 1343 i, adapter->req_tx_entries_per_subcrq, buff_size); 1344 1345 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 1346 pool_size, buff_size); 1347 if (rc) 1348 goto out_release; 1349 1350 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], 1351 IBMVNIC_TSO_BUFS, 1352 IBMVNIC_TSO_BUF_SZ); 1353 if (rc) 1354 goto out_release; 1355 } 1356 1357 adapter->prev_tx_pool_size = pool_size; 1358 adapter->prev_mtu = adapter->req_mtu; 1359 1360 update_ltb: 1361 /* NOTE: All tx_pools have the same number of buffers (which is 1362 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS 1363 * buffers (see calls init_one_tx_pool() for these). 1364 * For consistency, we use tx_pool->num_buffers and 1365 * tso_pool->num_buffers below. 1366 */ 1367 rc = -1; 1368 for (i = 0; i < num_pools; i++) { 1369 struct ibmvnic_tx_pool *tso_pool; 1370 struct ibmvnic_tx_pool *tx_pool; 1371 1372 tx_pool = &adapter->tx_pool[i]; 1373 1374 dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n", 1375 i, tx_pool->num_buffers, tx_pool->buf_size); 1376 1377 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set, 1378 tx_pool->num_buffers, tx_pool->buf_size); 1379 if (rc) 1380 goto out; 1381 1382 tx_pool->consumer_index = 0; 1383 tx_pool->producer_index = 0; 1384 1385 for (j = 0; j < tx_pool->num_buffers; j++) 1386 tx_pool->free_map[j] = j; 1387 1388 tso_pool = &adapter->tso_pool[i]; 1389 1390 dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n", 1391 i, tso_pool->num_buffers, tso_pool->buf_size); 1392 1393 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set, 1394 tso_pool->num_buffers, tso_pool->buf_size); 1395 if (rc) 1396 goto out; 1397 1398 tso_pool->consumer_index = 0; 1399 tso_pool->producer_index = 0; 1400 1401 for (j = 0; j < tso_pool->num_buffers; j++) 1402 tso_pool->free_map[j] = j; 1403 } 1404 1405 return 0; 1406 out_release: 1407 release_tx_pools(adapter); 1408 out: 1409 /* We failed to allocate one or more LTBs or map them on the VIOS. 1410 * Hold onto the pools and any LTBs that we did allocate/map. 1411 */ 1412 return rc; 1413 } 1414 1415 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 1416 { 1417 int i; 1418 1419 if (adapter->napi_enabled) 1420 return; 1421 1422 for (i = 0; i < adapter->req_rx_queues; i++) 1423 napi_enable(&adapter->napi[i]); 1424 1425 adapter->napi_enabled = true; 1426 } 1427 1428 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 1429 { 1430 int i; 1431 1432 if (!adapter->napi_enabled) 1433 return; 1434 1435 for (i = 0; i < adapter->req_rx_queues; i++) { 1436 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 1437 napi_disable(&adapter->napi[i]); 1438 } 1439 1440 adapter->napi_enabled = false; 1441 } 1442 1443 static int init_napi(struct ibmvnic_adapter *adapter) 1444 { 1445 int i; 1446 1447 adapter->napi = kcalloc(adapter->req_rx_queues, 1448 sizeof(struct napi_struct), GFP_KERNEL); 1449 if (!adapter->napi) 1450 return -ENOMEM; 1451 1452 for (i = 0; i < adapter->req_rx_queues; i++) { 1453 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 1454 netif_napi_add(adapter->netdev, &adapter->napi[i], 1455 ibmvnic_poll); 1456 } 1457 1458 adapter->num_active_rx_napi = adapter->req_rx_queues; 1459 return 0; 1460 } 1461 1462 static void release_napi(struct ibmvnic_adapter *adapter) 1463 { 1464 int i; 1465 1466 if (!adapter->napi) 1467 return; 1468 1469 for (i = 0; i < adapter->num_active_rx_napi; i++) { 1470 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); 1471 netif_napi_del(&adapter->napi[i]); 1472 } 1473 1474 kfree(adapter->napi); 1475 adapter->napi = NULL; 1476 adapter->num_active_rx_napi = 0; 1477 adapter->napi_enabled = false; 1478 } 1479 1480 static const char *adapter_state_to_string(enum vnic_state state) 1481 { 1482 switch (state) { 1483 case VNIC_PROBING: 1484 return "PROBING"; 1485 case VNIC_PROBED: 1486 return "PROBED"; 1487 case VNIC_OPENING: 1488 return "OPENING"; 1489 case VNIC_OPEN: 1490 return "OPEN"; 1491 case VNIC_CLOSING: 1492 return "CLOSING"; 1493 case VNIC_CLOSED: 1494 return "CLOSED"; 1495 case VNIC_REMOVING: 1496 return "REMOVING"; 1497 case VNIC_REMOVED: 1498 return "REMOVED"; 1499 case VNIC_DOWN: 1500 return "DOWN"; 1501 } 1502 return "UNKNOWN"; 1503 } 1504 1505 static int ibmvnic_login(struct net_device *netdev) 1506 { 1507 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1508 unsigned long timeout = msecs_to_jiffies(20000); 1509 int retry_count = 0; 1510 int retries = 10; 1511 bool retry; 1512 int rc; 1513 1514 do { 1515 retry = false; 1516 if (retry_count > retries) { 1517 netdev_warn(netdev, "Login attempts exceeded\n"); 1518 return -EACCES; 1519 } 1520 1521 adapter->init_done_rc = 0; 1522 reinit_completion(&adapter->init_done); 1523 rc = send_login(adapter); 1524 if (rc) 1525 return rc; 1526 1527 if (!wait_for_completion_timeout(&adapter->init_done, 1528 timeout)) { 1529 netdev_warn(netdev, "Login timed out, retrying...\n"); 1530 retry = true; 1531 adapter->init_done_rc = 0; 1532 retry_count++; 1533 continue; 1534 } 1535 1536 if (adapter->init_done_rc == ABORTED) { 1537 netdev_warn(netdev, "Login aborted, retrying...\n"); 1538 retry = true; 1539 adapter->init_done_rc = 0; 1540 retry_count++; 1541 /* FW or device may be busy, so 1542 * wait a bit before retrying login 1543 */ 1544 msleep(500); 1545 } else if (adapter->init_done_rc == PARTIALSUCCESS) { 1546 retry_count++; 1547 release_sub_crqs(adapter, 1); 1548 1549 retry = true; 1550 netdev_dbg(netdev, 1551 "Received partial success, retrying...\n"); 1552 adapter->init_done_rc = 0; 1553 reinit_completion(&adapter->init_done); 1554 send_query_cap(adapter); 1555 if (!wait_for_completion_timeout(&adapter->init_done, 1556 timeout)) { 1557 netdev_warn(netdev, 1558 "Capabilities query timed out\n"); 1559 return -ETIMEDOUT; 1560 } 1561 1562 rc = init_sub_crqs(adapter); 1563 if (rc) { 1564 netdev_warn(netdev, 1565 "SCRQ initialization failed\n"); 1566 return rc; 1567 } 1568 1569 rc = init_sub_crq_irqs(adapter); 1570 if (rc) { 1571 netdev_warn(netdev, 1572 "SCRQ irq initialization failed\n"); 1573 return rc; 1574 } 1575 } else if (adapter->init_done_rc) { 1576 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", 1577 adapter->init_done_rc); 1578 return -EIO; 1579 } 1580 } while (retry); 1581 1582 __ibmvnic_set_mac(netdev, adapter->mac_addr); 1583 1584 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); 1585 return 0; 1586 } 1587 1588 static void release_login_buffer(struct ibmvnic_adapter *adapter) 1589 { 1590 kfree(adapter->login_buf); 1591 adapter->login_buf = NULL; 1592 } 1593 1594 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 1595 { 1596 kfree(adapter->login_rsp_buf); 1597 adapter->login_rsp_buf = NULL; 1598 } 1599 1600 static void release_resources(struct ibmvnic_adapter *adapter) 1601 { 1602 release_vpd_data(adapter); 1603 1604 release_napi(adapter); 1605 release_login_buffer(adapter); 1606 release_login_rsp_buffer(adapter); 1607 } 1608 1609 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 1610 { 1611 struct net_device *netdev = adapter->netdev; 1612 unsigned long timeout = msecs_to_jiffies(20000); 1613 union ibmvnic_crq crq; 1614 bool resend; 1615 int rc; 1616 1617 netdev_dbg(netdev, "setting link state %d\n", link_state); 1618 1619 memset(&crq, 0, sizeof(crq)); 1620 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 1621 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 1622 crq.logical_link_state.link_state = link_state; 1623 1624 do { 1625 resend = false; 1626 1627 reinit_completion(&adapter->init_done); 1628 rc = ibmvnic_send_crq(adapter, &crq); 1629 if (rc) { 1630 netdev_err(netdev, "Failed to set link state\n"); 1631 return rc; 1632 } 1633 1634 if (!wait_for_completion_timeout(&adapter->init_done, 1635 timeout)) { 1636 netdev_err(netdev, "timeout setting link state\n"); 1637 return -ETIMEDOUT; 1638 } 1639 1640 if (adapter->init_done_rc == PARTIALSUCCESS) { 1641 /* Partuial success, delay and re-send */ 1642 mdelay(1000); 1643 resend = true; 1644 } else if (adapter->init_done_rc) { 1645 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 1646 adapter->init_done_rc); 1647 return adapter->init_done_rc; 1648 } 1649 } while (resend); 1650 1651 return 0; 1652 } 1653 1654 static int set_real_num_queues(struct net_device *netdev) 1655 { 1656 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1657 int rc; 1658 1659 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 1660 adapter->req_tx_queues, adapter->req_rx_queues); 1661 1662 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 1663 if (rc) { 1664 netdev_err(netdev, "failed to set the number of tx queues\n"); 1665 return rc; 1666 } 1667 1668 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 1669 if (rc) 1670 netdev_err(netdev, "failed to set the number of rx queues\n"); 1671 1672 return rc; 1673 } 1674 1675 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 1676 { 1677 struct device *dev = &adapter->vdev->dev; 1678 union ibmvnic_crq crq; 1679 int len = 0; 1680 int rc; 1681 1682 if (adapter->vpd->buff) 1683 len = adapter->vpd->len; 1684 1685 mutex_lock(&adapter->fw_lock); 1686 adapter->fw_done_rc = 0; 1687 reinit_completion(&adapter->fw_done); 1688 1689 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 1690 crq.get_vpd_size.cmd = GET_VPD_SIZE; 1691 rc = ibmvnic_send_crq(adapter, &crq); 1692 if (rc) { 1693 mutex_unlock(&adapter->fw_lock); 1694 return rc; 1695 } 1696 1697 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1698 if (rc) { 1699 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); 1700 mutex_unlock(&adapter->fw_lock); 1701 return rc; 1702 } 1703 mutex_unlock(&adapter->fw_lock); 1704 1705 if (!adapter->vpd->len) 1706 return -ENODATA; 1707 1708 if (!adapter->vpd->buff) 1709 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 1710 else if (adapter->vpd->len != len) 1711 adapter->vpd->buff = 1712 krealloc(adapter->vpd->buff, 1713 adapter->vpd->len, GFP_KERNEL); 1714 1715 if (!adapter->vpd->buff) { 1716 dev_err(dev, "Could allocate VPD buffer\n"); 1717 return -ENOMEM; 1718 } 1719 1720 adapter->vpd->dma_addr = 1721 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 1722 DMA_FROM_DEVICE); 1723 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 1724 dev_err(dev, "Could not map VPD buffer\n"); 1725 kfree(adapter->vpd->buff); 1726 adapter->vpd->buff = NULL; 1727 return -ENOMEM; 1728 } 1729 1730 mutex_lock(&adapter->fw_lock); 1731 adapter->fw_done_rc = 0; 1732 reinit_completion(&adapter->fw_done); 1733 1734 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 1735 crq.get_vpd.cmd = GET_VPD; 1736 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 1737 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1738 rc = ibmvnic_send_crq(adapter, &crq); 1739 if (rc) { 1740 kfree(adapter->vpd->buff); 1741 adapter->vpd->buff = NULL; 1742 mutex_unlock(&adapter->fw_lock); 1743 return rc; 1744 } 1745 1746 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1747 if (rc) { 1748 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); 1749 kfree(adapter->vpd->buff); 1750 adapter->vpd->buff = NULL; 1751 mutex_unlock(&adapter->fw_lock); 1752 return rc; 1753 } 1754 1755 mutex_unlock(&adapter->fw_lock); 1756 return 0; 1757 } 1758 1759 static int init_resources(struct ibmvnic_adapter *adapter) 1760 { 1761 struct net_device *netdev = adapter->netdev; 1762 int rc; 1763 1764 rc = set_real_num_queues(netdev); 1765 if (rc) 1766 return rc; 1767 1768 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1769 if (!adapter->vpd) 1770 return -ENOMEM; 1771 1772 /* Vital Product Data (VPD) */ 1773 rc = ibmvnic_get_vpd(adapter); 1774 if (rc) { 1775 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1776 return rc; 1777 } 1778 1779 rc = init_napi(adapter); 1780 if (rc) 1781 return rc; 1782 1783 send_query_map(adapter); 1784 1785 rc = init_rx_pools(netdev); 1786 if (rc) 1787 return rc; 1788 1789 rc = init_tx_pools(netdev); 1790 return rc; 1791 } 1792 1793 static int __ibmvnic_open(struct net_device *netdev) 1794 { 1795 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1796 enum vnic_state prev_state = adapter->state; 1797 int i, rc; 1798 1799 adapter->state = VNIC_OPENING; 1800 replenish_pools(adapter); 1801 ibmvnic_napi_enable(adapter); 1802 1803 /* We're ready to receive frames, enable the sub-crq interrupts and 1804 * set the logical link state to up 1805 */ 1806 for (i = 0; i < adapter->req_rx_queues; i++) { 1807 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1808 if (prev_state == VNIC_CLOSED) 1809 enable_irq(adapter->rx_scrq[i]->irq); 1810 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1811 } 1812 1813 for (i = 0; i < adapter->req_tx_queues; i++) { 1814 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1815 if (prev_state == VNIC_CLOSED) 1816 enable_irq(adapter->tx_scrq[i]->irq); 1817 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1818 /* netdev_tx_reset_queue will reset dql stats. During NON_FATAL 1819 * resets, don't reset the stats because there could be batched 1820 * skb's waiting to be sent. If we reset dql stats, we risk 1821 * num_completed being greater than num_queued. This will cause 1822 * a BUG_ON in dql_completed(). 1823 */ 1824 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) 1825 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); 1826 } 1827 1828 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1829 if (rc) { 1830 ibmvnic_napi_disable(adapter); 1831 ibmvnic_disable_irqs(adapter); 1832 return rc; 1833 } 1834 1835 adapter->tx_queues_active = true; 1836 1837 /* Since queues were stopped until now, there shouldn't be any 1838 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we 1839 * don't need the synchronize_rcu()? Leaving it for consistency 1840 * with setting ->tx_queues_active = false. 1841 */ 1842 synchronize_rcu(); 1843 1844 netif_tx_start_all_queues(netdev); 1845 1846 if (prev_state == VNIC_CLOSED) { 1847 for (i = 0; i < adapter->req_rx_queues; i++) 1848 napi_schedule(&adapter->napi[i]); 1849 } 1850 1851 adapter->state = VNIC_OPEN; 1852 return rc; 1853 } 1854 1855 static int ibmvnic_open(struct net_device *netdev) 1856 { 1857 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1858 int rc; 1859 1860 ASSERT_RTNL(); 1861 1862 /* If device failover is pending or we are about to reset, just set 1863 * device state and return. Device operation will be handled by reset 1864 * routine. 1865 * 1866 * It should be safe to overwrite the adapter->state here. Since 1867 * we hold the rtnl, either the reset has not actually started or 1868 * the rtnl got dropped during the set_link_state() in do_reset(). 1869 * In the former case, no one else is changing the state (again we 1870 * have the rtnl) and in the latter case, do_reset() will detect and 1871 * honor our setting below. 1872 */ 1873 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { 1874 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n", 1875 adapter_state_to_string(adapter->state), 1876 adapter->failover_pending); 1877 adapter->state = VNIC_OPEN; 1878 rc = 0; 1879 goto out; 1880 } 1881 1882 if (adapter->state != VNIC_CLOSED) { 1883 rc = ibmvnic_login(netdev); 1884 if (rc) 1885 goto out; 1886 1887 rc = init_resources(adapter); 1888 if (rc) { 1889 netdev_err(netdev, "failed to initialize resources\n"); 1890 goto out; 1891 } 1892 } 1893 1894 rc = __ibmvnic_open(netdev); 1895 1896 out: 1897 /* If open failed and there is a pending failover or in-progress reset, 1898 * set device state and return. Device operation will be handled by 1899 * reset routine. See also comments above regarding rtnl. 1900 */ 1901 if (rc && 1902 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { 1903 adapter->state = VNIC_OPEN; 1904 rc = 0; 1905 } 1906 1907 if (rc) { 1908 release_resources(adapter); 1909 release_rx_pools(adapter); 1910 release_tx_pools(adapter); 1911 } 1912 1913 return rc; 1914 } 1915 1916 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1917 { 1918 struct ibmvnic_rx_pool *rx_pool; 1919 struct ibmvnic_rx_buff *rx_buff; 1920 u64 rx_entries; 1921 int rx_scrqs; 1922 int i, j; 1923 1924 if (!adapter->rx_pool) 1925 return; 1926 1927 rx_scrqs = adapter->num_active_rx_pools; 1928 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1929 1930 /* Free any remaining skbs in the rx buffer pools */ 1931 for (i = 0; i < rx_scrqs; i++) { 1932 rx_pool = &adapter->rx_pool[i]; 1933 if (!rx_pool || !rx_pool->rx_buff) 1934 continue; 1935 1936 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1937 for (j = 0; j < rx_entries; j++) { 1938 rx_buff = &rx_pool->rx_buff[j]; 1939 if (rx_buff && rx_buff->skb) { 1940 dev_kfree_skb_any(rx_buff->skb); 1941 rx_buff->skb = NULL; 1942 } 1943 } 1944 } 1945 } 1946 1947 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1948 struct ibmvnic_tx_pool *tx_pool) 1949 { 1950 struct ibmvnic_tx_buff *tx_buff; 1951 u64 tx_entries; 1952 int i; 1953 1954 if (!tx_pool || !tx_pool->tx_buff) 1955 return; 1956 1957 tx_entries = tx_pool->num_buffers; 1958 1959 for (i = 0; i < tx_entries; i++) { 1960 tx_buff = &tx_pool->tx_buff[i]; 1961 if (tx_buff && tx_buff->skb) { 1962 dev_kfree_skb_any(tx_buff->skb); 1963 tx_buff->skb = NULL; 1964 } 1965 } 1966 } 1967 1968 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1969 { 1970 int tx_scrqs; 1971 int i; 1972 1973 if (!adapter->tx_pool || !adapter->tso_pool) 1974 return; 1975 1976 tx_scrqs = adapter->num_active_tx_pools; 1977 1978 /* Free any remaining skbs in the tx buffer pools */ 1979 for (i = 0; i < tx_scrqs; i++) { 1980 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1981 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1982 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1983 } 1984 } 1985 1986 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1987 { 1988 struct net_device *netdev = adapter->netdev; 1989 int i; 1990 1991 if (adapter->tx_scrq) { 1992 for (i = 0; i < adapter->req_tx_queues; i++) 1993 if (adapter->tx_scrq[i]->irq) { 1994 netdev_dbg(netdev, 1995 "Disabling tx_scrq[%d] irq\n", i); 1996 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1997 disable_irq(adapter->tx_scrq[i]->irq); 1998 } 1999 } 2000 2001 if (adapter->rx_scrq) { 2002 for (i = 0; i < adapter->req_rx_queues; i++) { 2003 if (adapter->rx_scrq[i]->irq) { 2004 netdev_dbg(netdev, 2005 "Disabling rx_scrq[%d] irq\n", i); 2006 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 2007 disable_irq(adapter->rx_scrq[i]->irq); 2008 } 2009 } 2010 } 2011 } 2012 2013 static void ibmvnic_cleanup(struct net_device *netdev) 2014 { 2015 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2016 2017 /* ensure that transmissions are stopped if called by do_reset */ 2018 2019 adapter->tx_queues_active = false; 2020 2021 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active 2022 * update so they don't restart a queue after we stop it below. 2023 */ 2024 synchronize_rcu(); 2025 2026 if (test_bit(0, &adapter->resetting)) 2027 netif_tx_disable(netdev); 2028 else 2029 netif_tx_stop_all_queues(netdev); 2030 2031 ibmvnic_napi_disable(adapter); 2032 ibmvnic_disable_irqs(adapter); 2033 } 2034 2035 static int __ibmvnic_close(struct net_device *netdev) 2036 { 2037 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2038 int rc = 0; 2039 2040 adapter->state = VNIC_CLOSING; 2041 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2042 adapter->state = VNIC_CLOSED; 2043 return rc; 2044 } 2045 2046 static int ibmvnic_close(struct net_device *netdev) 2047 { 2048 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2049 int rc; 2050 2051 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n", 2052 adapter_state_to_string(adapter->state), 2053 adapter->failover_pending, 2054 adapter->force_reset_recovery); 2055 2056 /* If device failover is pending, just set device state and return. 2057 * Device operation will be handled by reset routine. 2058 */ 2059 if (adapter->failover_pending) { 2060 adapter->state = VNIC_CLOSED; 2061 return 0; 2062 } 2063 2064 rc = __ibmvnic_close(netdev); 2065 ibmvnic_cleanup(netdev); 2066 clean_rx_pools(adapter); 2067 clean_tx_pools(adapter); 2068 2069 return rc; 2070 } 2071 2072 /** 2073 * build_hdr_data - creates L2/L3/L4 header data buffer 2074 * @hdr_field: bitfield determining needed headers 2075 * @skb: socket buffer 2076 * @hdr_len: array of header lengths 2077 * @hdr_data: buffer to write the header to 2078 * 2079 * Reads hdr_field to determine which headers are needed by firmware. 2080 * Builds a buffer containing these headers. Saves individual header 2081 * lengths and total buffer length to be used to build descriptors. 2082 */ 2083 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 2084 int *hdr_len, u8 *hdr_data) 2085 { 2086 int len = 0; 2087 u8 *hdr; 2088 2089 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 2090 hdr_len[0] = sizeof(struct vlan_ethhdr); 2091 else 2092 hdr_len[0] = sizeof(struct ethhdr); 2093 2094 if (skb->protocol == htons(ETH_P_IP)) { 2095 hdr_len[1] = ip_hdr(skb)->ihl * 4; 2096 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2097 hdr_len[2] = tcp_hdrlen(skb); 2098 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 2099 hdr_len[2] = sizeof(struct udphdr); 2100 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2101 hdr_len[1] = sizeof(struct ipv6hdr); 2102 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2103 hdr_len[2] = tcp_hdrlen(skb); 2104 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 2105 hdr_len[2] = sizeof(struct udphdr); 2106 } else if (skb->protocol == htons(ETH_P_ARP)) { 2107 hdr_len[1] = arp_hdr_len(skb->dev); 2108 hdr_len[2] = 0; 2109 } 2110 2111 memset(hdr_data, 0, 120); 2112 if ((hdr_field >> 6) & 1) { 2113 hdr = skb_mac_header(skb); 2114 memcpy(hdr_data, hdr, hdr_len[0]); 2115 len += hdr_len[0]; 2116 } 2117 2118 if ((hdr_field >> 5) & 1) { 2119 hdr = skb_network_header(skb); 2120 memcpy(hdr_data + len, hdr, hdr_len[1]); 2121 len += hdr_len[1]; 2122 } 2123 2124 if ((hdr_field >> 4) & 1) { 2125 hdr = skb_transport_header(skb); 2126 memcpy(hdr_data + len, hdr, hdr_len[2]); 2127 len += hdr_len[2]; 2128 } 2129 return len; 2130 } 2131 2132 /** 2133 * create_hdr_descs - create header and header extension descriptors 2134 * @hdr_field: bitfield determining needed headers 2135 * @hdr_data: buffer containing header data 2136 * @len: length of data buffer 2137 * @hdr_len: array of individual header lengths 2138 * @scrq_arr: descriptor array 2139 * 2140 * Creates header and, if needed, header extension descriptors and 2141 * places them in a descriptor array, scrq_arr 2142 */ 2143 2144 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 2145 union sub_crq *scrq_arr) 2146 { 2147 union sub_crq hdr_desc; 2148 int tmp_len = len; 2149 int num_descs = 0; 2150 u8 *data, *cur; 2151 int tmp; 2152 2153 while (tmp_len > 0) { 2154 cur = hdr_data + len - tmp_len; 2155 2156 memset(&hdr_desc, 0, sizeof(hdr_desc)); 2157 if (cur != hdr_data) { 2158 data = hdr_desc.hdr_ext.data; 2159 tmp = tmp_len > 29 ? 29 : tmp_len; 2160 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 2161 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 2162 hdr_desc.hdr_ext.len = tmp; 2163 } else { 2164 data = hdr_desc.hdr.data; 2165 tmp = tmp_len > 24 ? 24 : tmp_len; 2166 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 2167 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 2168 hdr_desc.hdr.len = tmp; 2169 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 2170 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 2171 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 2172 hdr_desc.hdr.flag = hdr_field << 1; 2173 } 2174 memcpy(data, cur, tmp); 2175 tmp_len -= tmp; 2176 *scrq_arr = hdr_desc; 2177 scrq_arr++; 2178 num_descs++; 2179 } 2180 2181 return num_descs; 2182 } 2183 2184 /** 2185 * build_hdr_descs_arr - build a header descriptor array 2186 * @skb: tx socket buffer 2187 * @indir_arr: indirect array 2188 * @num_entries: number of descriptors to be sent 2189 * @hdr_field: bit field determining which headers will be sent 2190 * 2191 * This function will build a TX descriptor array with applicable 2192 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 2193 */ 2194 2195 static void build_hdr_descs_arr(struct sk_buff *skb, 2196 union sub_crq *indir_arr, 2197 int *num_entries, u8 hdr_field) 2198 { 2199 int hdr_len[3] = {0, 0, 0}; 2200 u8 hdr_data[140] = {0}; 2201 int tot_len; 2202 2203 tot_len = build_hdr_data(hdr_field, skb, hdr_len, 2204 hdr_data); 2205 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 2206 indir_arr + 1); 2207 } 2208 2209 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 2210 struct net_device *netdev) 2211 { 2212 /* For some backing devices, mishandling of small packets 2213 * can result in a loss of connection or TX stall. Device 2214 * architects recommend that no packet should be smaller 2215 * than the minimum MTU value provided to the driver, so 2216 * pad any packets to that length 2217 */ 2218 if (skb->len < netdev->min_mtu) 2219 return skb_put_padto(skb, netdev->min_mtu); 2220 2221 return 0; 2222 } 2223 2224 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 2225 struct ibmvnic_sub_crq_queue *tx_scrq) 2226 { 2227 struct ibmvnic_ind_xmit_queue *ind_bufp; 2228 struct ibmvnic_tx_buff *tx_buff; 2229 struct ibmvnic_tx_pool *tx_pool; 2230 union sub_crq tx_scrq_entry; 2231 int queue_num; 2232 int entries; 2233 int index; 2234 int i; 2235 2236 ind_bufp = &tx_scrq->ind_buf; 2237 entries = (u64)ind_bufp->index; 2238 queue_num = tx_scrq->pool_index; 2239 2240 for (i = entries - 1; i >= 0; --i) { 2241 tx_scrq_entry = ind_bufp->indir_arr[i]; 2242 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) 2243 continue; 2244 index = be32_to_cpu(tx_scrq_entry.v1.correlator); 2245 if (index & IBMVNIC_TSO_POOL_MASK) { 2246 tx_pool = &adapter->tso_pool[queue_num]; 2247 index &= ~IBMVNIC_TSO_POOL_MASK; 2248 } else { 2249 tx_pool = &adapter->tx_pool[queue_num]; 2250 } 2251 tx_pool->free_map[tx_pool->consumer_index] = index; 2252 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2253 tx_pool->num_buffers - 1 : 2254 tx_pool->consumer_index - 1; 2255 tx_buff = &tx_pool->tx_buff[index]; 2256 adapter->netdev->stats.tx_packets--; 2257 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; 2258 adapter->tx_stats_buffers[queue_num].packets--; 2259 adapter->tx_stats_buffers[queue_num].bytes -= 2260 tx_buff->skb->len; 2261 dev_kfree_skb_any(tx_buff->skb); 2262 tx_buff->skb = NULL; 2263 adapter->netdev->stats.tx_dropped++; 2264 } 2265 2266 ind_bufp->index = 0; 2267 2268 if (atomic_sub_return(entries, &tx_scrq->used) <= 2269 (adapter->req_tx_entries_per_subcrq / 2) && 2270 __netif_subqueue_stopped(adapter->netdev, queue_num)) { 2271 rcu_read_lock(); 2272 2273 if (adapter->tx_queues_active) { 2274 netif_wake_subqueue(adapter->netdev, queue_num); 2275 netdev_dbg(adapter->netdev, "Started queue %d\n", 2276 queue_num); 2277 } 2278 2279 rcu_read_unlock(); 2280 } 2281 } 2282 2283 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, 2284 struct ibmvnic_sub_crq_queue *tx_scrq) 2285 { 2286 struct ibmvnic_ind_xmit_queue *ind_bufp; 2287 u64 dma_addr; 2288 u64 entries; 2289 u64 handle; 2290 int rc; 2291 2292 ind_bufp = &tx_scrq->ind_buf; 2293 dma_addr = (u64)ind_bufp->indir_dma; 2294 entries = (u64)ind_bufp->index; 2295 handle = tx_scrq->handle; 2296 2297 if (!entries) 2298 return 0; 2299 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); 2300 if (rc) 2301 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); 2302 else 2303 ind_bufp->index = 0; 2304 return 0; 2305 } 2306 2307 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 2308 { 2309 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2310 int queue_num = skb_get_queue_mapping(skb); 2311 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 2312 struct device *dev = &adapter->vdev->dev; 2313 struct ibmvnic_ind_xmit_queue *ind_bufp; 2314 struct ibmvnic_tx_buff *tx_buff = NULL; 2315 struct ibmvnic_sub_crq_queue *tx_scrq; 2316 struct ibmvnic_long_term_buff *ltb; 2317 struct ibmvnic_tx_pool *tx_pool; 2318 unsigned int tx_send_failed = 0; 2319 netdev_tx_t ret = NETDEV_TX_OK; 2320 unsigned int tx_map_failed = 0; 2321 union sub_crq indir_arr[16]; 2322 unsigned int tx_dropped = 0; 2323 unsigned int tx_packets = 0; 2324 unsigned int tx_bytes = 0; 2325 dma_addr_t data_dma_addr; 2326 struct netdev_queue *txq; 2327 unsigned long lpar_rc; 2328 union sub_crq tx_crq; 2329 unsigned int offset; 2330 int num_entries = 1; 2331 unsigned char *dst; 2332 int bufidx = 0; 2333 u8 proto = 0; 2334 2335 /* If a reset is in progress, drop the packet since 2336 * the scrqs may get torn down. Otherwise use the 2337 * rcu to ensure reset waits for us to complete. 2338 */ 2339 rcu_read_lock(); 2340 if (!adapter->tx_queues_active) { 2341 dev_kfree_skb_any(skb); 2342 2343 tx_send_failed++; 2344 tx_dropped++; 2345 ret = NETDEV_TX_OK; 2346 goto out; 2347 } 2348 2349 tx_scrq = adapter->tx_scrq[queue_num]; 2350 txq = netdev_get_tx_queue(netdev, queue_num); 2351 ind_bufp = &tx_scrq->ind_buf; 2352 2353 if (ibmvnic_xmit_workarounds(skb, netdev)) { 2354 tx_dropped++; 2355 tx_send_failed++; 2356 ret = NETDEV_TX_OK; 2357 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2358 goto out; 2359 } 2360 2361 if (skb_is_gso(skb)) 2362 tx_pool = &adapter->tso_pool[queue_num]; 2363 else 2364 tx_pool = &adapter->tx_pool[queue_num]; 2365 2366 bufidx = tx_pool->free_map[tx_pool->consumer_index]; 2367 2368 if (bufidx == IBMVNIC_INVALID_MAP) { 2369 dev_kfree_skb_any(skb); 2370 tx_send_failed++; 2371 tx_dropped++; 2372 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2373 ret = NETDEV_TX_OK; 2374 goto out; 2375 } 2376 2377 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 2378 2379 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset); 2380 2381 dst = ltb->buff + offset; 2382 memset(dst, 0, tx_pool->buf_size); 2383 data_dma_addr = ltb->addr + offset; 2384 2385 if (skb_shinfo(skb)->nr_frags) { 2386 int cur, i; 2387 2388 /* Copy the head */ 2389 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 2390 cur = skb_headlen(skb); 2391 2392 /* Copy the frags */ 2393 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2394 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2395 2396 memcpy(dst + cur, skb_frag_address(frag), 2397 skb_frag_size(frag)); 2398 cur += skb_frag_size(frag); 2399 } 2400 } else { 2401 skb_copy_from_linear_data(skb, dst, skb->len); 2402 } 2403 2404 /* post changes to long_term_buff *dst before VIOS accessing it */ 2405 dma_wmb(); 2406 2407 tx_pool->consumer_index = 2408 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 2409 2410 tx_buff = &tx_pool->tx_buff[bufidx]; 2411 tx_buff->skb = skb; 2412 tx_buff->index = bufidx; 2413 tx_buff->pool_index = queue_num; 2414 2415 memset(&tx_crq, 0, sizeof(tx_crq)); 2416 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 2417 tx_crq.v1.type = IBMVNIC_TX_DESC; 2418 tx_crq.v1.n_crq_elem = 1; 2419 tx_crq.v1.n_sge = 1; 2420 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 2421 2422 if (skb_is_gso(skb)) 2423 tx_crq.v1.correlator = 2424 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK); 2425 else 2426 tx_crq.v1.correlator = cpu_to_be32(bufidx); 2427 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id); 2428 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 2429 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 2430 2431 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { 2432 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 2433 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 2434 } 2435 2436 if (skb->protocol == htons(ETH_P_IP)) { 2437 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 2438 proto = ip_hdr(skb)->protocol; 2439 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2440 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 2441 proto = ipv6_hdr(skb)->nexthdr; 2442 } 2443 2444 if (proto == IPPROTO_TCP) 2445 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 2446 else if (proto == IPPROTO_UDP) 2447 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 2448 2449 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2450 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 2451 hdrs += 2; 2452 } 2453 if (skb_is_gso(skb)) { 2454 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 2455 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 2456 hdrs += 2; 2457 } 2458 2459 if ((*hdrs >> 7) & 1) 2460 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); 2461 2462 tx_crq.v1.n_crq_elem = num_entries; 2463 tx_buff->num_entries = num_entries; 2464 /* flush buffer if current entry can not fit */ 2465 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { 2466 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2467 if (lpar_rc != H_SUCCESS) 2468 goto tx_flush_err; 2469 } 2470 2471 indir_arr[0] = tx_crq; 2472 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], 2473 num_entries * sizeof(struct ibmvnic_generic_scrq)); 2474 ind_bufp->index += num_entries; 2475 if (__netdev_tx_sent_queue(txq, skb->len, 2476 netdev_xmit_more() && 2477 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { 2478 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2479 if (lpar_rc != H_SUCCESS) 2480 goto tx_err; 2481 } 2482 2483 if (atomic_add_return(num_entries, &tx_scrq->used) 2484 >= adapter->req_tx_entries_per_subcrq) { 2485 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 2486 netif_stop_subqueue(netdev, queue_num); 2487 } 2488 2489 tx_packets++; 2490 tx_bytes += skb->len; 2491 txq_trans_cond_update(txq); 2492 ret = NETDEV_TX_OK; 2493 goto out; 2494 2495 tx_flush_err: 2496 dev_kfree_skb_any(skb); 2497 tx_buff->skb = NULL; 2498 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2499 tx_pool->num_buffers - 1 : 2500 tx_pool->consumer_index - 1; 2501 tx_dropped++; 2502 tx_err: 2503 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) 2504 dev_err_ratelimited(dev, "tx: send failed\n"); 2505 2506 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 2507 /* Disable TX and report carrier off if queue is closed 2508 * or pending failover. 2509 * Firmware guarantees that a signal will be sent to the 2510 * driver, triggering a reset or some other action. 2511 */ 2512 netif_tx_stop_all_queues(netdev); 2513 netif_carrier_off(netdev); 2514 } 2515 out: 2516 rcu_read_unlock(); 2517 netdev->stats.tx_dropped += tx_dropped; 2518 netdev->stats.tx_bytes += tx_bytes; 2519 netdev->stats.tx_packets += tx_packets; 2520 adapter->tx_send_failed += tx_send_failed; 2521 adapter->tx_map_failed += tx_map_failed; 2522 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 2523 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 2524 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 2525 2526 return ret; 2527 } 2528 2529 static void ibmvnic_set_multi(struct net_device *netdev) 2530 { 2531 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2532 struct netdev_hw_addr *ha; 2533 union ibmvnic_crq crq; 2534 2535 memset(&crq, 0, sizeof(crq)); 2536 crq.request_capability.first = IBMVNIC_CRQ_CMD; 2537 crq.request_capability.cmd = REQUEST_CAPABILITY; 2538 2539 if (netdev->flags & IFF_PROMISC) { 2540 if (!adapter->promisc_supported) 2541 return; 2542 } else { 2543 if (netdev->flags & IFF_ALLMULTI) { 2544 /* Accept all multicast */ 2545 memset(&crq, 0, sizeof(crq)); 2546 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2547 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2548 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 2549 ibmvnic_send_crq(adapter, &crq); 2550 } else if (netdev_mc_empty(netdev)) { 2551 /* Reject all multicast */ 2552 memset(&crq, 0, sizeof(crq)); 2553 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2554 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2555 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 2556 ibmvnic_send_crq(adapter, &crq); 2557 } else { 2558 /* Accept one or more multicast(s) */ 2559 netdev_for_each_mc_addr(ha, netdev) { 2560 memset(&crq, 0, sizeof(crq)); 2561 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2562 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2563 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 2564 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 2565 ha->addr); 2566 ibmvnic_send_crq(adapter, &crq); 2567 } 2568 } 2569 } 2570 } 2571 2572 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) 2573 { 2574 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2575 union ibmvnic_crq crq; 2576 int rc; 2577 2578 if (!is_valid_ether_addr(dev_addr)) { 2579 rc = -EADDRNOTAVAIL; 2580 goto err; 2581 } 2582 2583 memset(&crq, 0, sizeof(crq)); 2584 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 2585 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 2586 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); 2587 2588 mutex_lock(&adapter->fw_lock); 2589 adapter->fw_done_rc = 0; 2590 reinit_completion(&adapter->fw_done); 2591 2592 rc = ibmvnic_send_crq(adapter, &crq); 2593 if (rc) { 2594 rc = -EIO; 2595 mutex_unlock(&adapter->fw_lock); 2596 goto err; 2597 } 2598 2599 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 2600 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 2601 if (rc || adapter->fw_done_rc) { 2602 rc = -EIO; 2603 mutex_unlock(&adapter->fw_lock); 2604 goto err; 2605 } 2606 mutex_unlock(&adapter->fw_lock); 2607 return 0; 2608 err: 2609 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 2610 return rc; 2611 } 2612 2613 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 2614 { 2615 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2616 struct sockaddr *addr = p; 2617 int rc; 2618 2619 rc = 0; 2620 if (!is_valid_ether_addr(addr->sa_data)) 2621 return -EADDRNOTAVAIL; 2622 2623 ether_addr_copy(adapter->mac_addr, addr->sa_data); 2624 if (adapter->state != VNIC_PROBED) 2625 rc = __ibmvnic_set_mac(netdev, addr->sa_data); 2626 2627 return rc; 2628 } 2629 2630 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) 2631 { 2632 switch (reason) { 2633 case VNIC_RESET_FAILOVER: 2634 return "FAILOVER"; 2635 case VNIC_RESET_MOBILITY: 2636 return "MOBILITY"; 2637 case VNIC_RESET_FATAL: 2638 return "FATAL"; 2639 case VNIC_RESET_NON_FATAL: 2640 return "NON_FATAL"; 2641 case VNIC_RESET_TIMEOUT: 2642 return "TIMEOUT"; 2643 case VNIC_RESET_CHANGE_PARAM: 2644 return "CHANGE_PARAM"; 2645 case VNIC_RESET_PASSIVE_INIT: 2646 return "PASSIVE_INIT"; 2647 } 2648 return "UNKNOWN"; 2649 } 2650 2651 /* 2652 * Initialize the init_done completion and return code values. We 2653 * can get a transport event just after registering the CRQ and the 2654 * tasklet will use this to communicate the transport event. To ensure 2655 * we don't miss the notification/error, initialize these _before_ 2656 * regisering the CRQ. 2657 */ 2658 static inline void reinit_init_done(struct ibmvnic_adapter *adapter) 2659 { 2660 reinit_completion(&adapter->init_done); 2661 adapter->init_done_rc = 0; 2662 } 2663 2664 /* 2665 * do_reset returns zero if we are able to keep processing reset events, or 2666 * non-zero if we hit a fatal error and must halt. 2667 */ 2668 static int do_reset(struct ibmvnic_adapter *adapter, 2669 struct ibmvnic_rwi *rwi, u32 reset_state) 2670 { 2671 struct net_device *netdev = adapter->netdev; 2672 u64 old_num_rx_queues, old_num_tx_queues; 2673 u64 old_num_rx_slots, old_num_tx_slots; 2674 int rc; 2675 2676 netdev_dbg(adapter->netdev, 2677 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", 2678 adapter_state_to_string(adapter->state), 2679 adapter->failover_pending, 2680 reset_reason_to_string(rwi->reset_reason), 2681 adapter_state_to_string(reset_state)); 2682 2683 adapter->reset_reason = rwi->reset_reason; 2684 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ 2685 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2686 rtnl_lock(); 2687 2688 /* Now that we have the rtnl lock, clear any pending failover. 2689 * This will ensure ibmvnic_open() has either completed or will 2690 * block until failover is complete. 2691 */ 2692 if (rwi->reset_reason == VNIC_RESET_FAILOVER) 2693 adapter->failover_pending = false; 2694 2695 /* read the state and check (again) after getting rtnl */ 2696 reset_state = adapter->state; 2697 2698 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2699 rc = -EBUSY; 2700 goto out; 2701 } 2702 2703 netif_carrier_off(netdev); 2704 2705 old_num_rx_queues = adapter->req_rx_queues; 2706 old_num_tx_queues = adapter->req_tx_queues; 2707 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; 2708 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; 2709 2710 ibmvnic_cleanup(netdev); 2711 2712 if (reset_state == VNIC_OPEN && 2713 adapter->reset_reason != VNIC_RESET_MOBILITY && 2714 adapter->reset_reason != VNIC_RESET_FAILOVER) { 2715 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2716 rc = __ibmvnic_close(netdev); 2717 if (rc) 2718 goto out; 2719 } else { 2720 adapter->state = VNIC_CLOSING; 2721 2722 /* Release the RTNL lock before link state change and 2723 * re-acquire after the link state change to allow 2724 * linkwatch_event to grab the RTNL lock and run during 2725 * a reset. 2726 */ 2727 rtnl_unlock(); 2728 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2729 rtnl_lock(); 2730 if (rc) 2731 goto out; 2732 2733 if (adapter->state == VNIC_OPEN) { 2734 /* When we dropped rtnl, ibmvnic_open() got 2735 * it and noticed that we are resetting and 2736 * set the adapter state to OPEN. Update our 2737 * new "target" state, and resume the reset 2738 * from VNIC_CLOSING state. 2739 */ 2740 netdev_dbg(netdev, 2741 "Open changed state from %s, updating.\n", 2742 adapter_state_to_string(reset_state)); 2743 reset_state = VNIC_OPEN; 2744 adapter->state = VNIC_CLOSING; 2745 } 2746 2747 if (adapter->state != VNIC_CLOSING) { 2748 /* If someone else changed the adapter state 2749 * when we dropped the rtnl, fail the reset 2750 */ 2751 rc = -EAGAIN; 2752 goto out; 2753 } 2754 adapter->state = VNIC_CLOSED; 2755 } 2756 } 2757 2758 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2759 release_resources(adapter); 2760 release_sub_crqs(adapter, 1); 2761 release_crq_queue(adapter); 2762 } 2763 2764 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 2765 /* remove the closed state so when we call open it appears 2766 * we are coming from the probed state. 2767 */ 2768 adapter->state = VNIC_PROBED; 2769 2770 reinit_init_done(adapter); 2771 2772 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2773 rc = init_crq_queue(adapter); 2774 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 2775 rc = ibmvnic_reenable_crq_queue(adapter); 2776 release_sub_crqs(adapter, 1); 2777 } else { 2778 rc = ibmvnic_reset_crq(adapter); 2779 if (rc == H_CLOSED || rc == H_SUCCESS) { 2780 rc = vio_enable_interrupts(adapter->vdev); 2781 if (rc) 2782 netdev_err(adapter->netdev, 2783 "Reset failed to enable interrupts. rc=%d\n", 2784 rc); 2785 } 2786 } 2787 2788 if (rc) { 2789 netdev_err(adapter->netdev, 2790 "Reset couldn't initialize crq. rc=%d\n", rc); 2791 goto out; 2792 } 2793 2794 rc = ibmvnic_reset_init(adapter, true); 2795 if (rc) 2796 goto out; 2797 2798 /* If the adapter was in PROBE or DOWN state prior to the reset, 2799 * exit here. 2800 */ 2801 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) { 2802 rc = 0; 2803 goto out; 2804 } 2805 2806 rc = ibmvnic_login(netdev); 2807 if (rc) 2808 goto out; 2809 2810 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2811 rc = init_resources(adapter); 2812 if (rc) 2813 goto out; 2814 } else if (adapter->req_rx_queues != old_num_rx_queues || 2815 adapter->req_tx_queues != old_num_tx_queues || 2816 adapter->req_rx_add_entries_per_subcrq != 2817 old_num_rx_slots || 2818 adapter->req_tx_entries_per_subcrq != 2819 old_num_tx_slots || 2820 !adapter->rx_pool || 2821 !adapter->tso_pool || 2822 !adapter->tx_pool) { 2823 release_napi(adapter); 2824 release_vpd_data(adapter); 2825 2826 rc = init_resources(adapter); 2827 if (rc) 2828 goto out; 2829 2830 } else { 2831 rc = init_tx_pools(netdev); 2832 if (rc) { 2833 netdev_dbg(netdev, 2834 "init tx pools failed (%d)\n", 2835 rc); 2836 goto out; 2837 } 2838 2839 rc = init_rx_pools(netdev); 2840 if (rc) { 2841 netdev_dbg(netdev, 2842 "init rx pools failed (%d)\n", 2843 rc); 2844 goto out; 2845 } 2846 } 2847 ibmvnic_disable_irqs(adapter); 2848 } 2849 adapter->state = VNIC_CLOSED; 2850 2851 if (reset_state == VNIC_CLOSED) { 2852 rc = 0; 2853 goto out; 2854 } 2855 2856 rc = __ibmvnic_open(netdev); 2857 if (rc) { 2858 rc = IBMVNIC_OPEN_FAILED; 2859 goto out; 2860 } 2861 2862 /* refresh device's multicast list */ 2863 ibmvnic_set_multi(netdev); 2864 2865 if (adapter->reset_reason == VNIC_RESET_FAILOVER || 2866 adapter->reset_reason == VNIC_RESET_MOBILITY) 2867 __netdev_notify_peers(netdev); 2868 2869 rc = 0; 2870 2871 out: 2872 /* restore the adapter state if reset failed */ 2873 if (rc) 2874 adapter->state = reset_state; 2875 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */ 2876 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2877 rtnl_unlock(); 2878 2879 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", 2880 adapter_state_to_string(adapter->state), 2881 adapter->failover_pending, rc); 2882 return rc; 2883 } 2884 2885 static int do_hard_reset(struct ibmvnic_adapter *adapter, 2886 struct ibmvnic_rwi *rwi, u32 reset_state) 2887 { 2888 struct net_device *netdev = adapter->netdev; 2889 int rc; 2890 2891 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", 2892 reset_reason_to_string(rwi->reset_reason)); 2893 2894 /* read the state and check (again) after getting rtnl */ 2895 reset_state = adapter->state; 2896 2897 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2898 rc = -EBUSY; 2899 goto out; 2900 } 2901 2902 netif_carrier_off(netdev); 2903 adapter->reset_reason = rwi->reset_reason; 2904 2905 ibmvnic_cleanup(netdev); 2906 release_resources(adapter); 2907 release_sub_crqs(adapter, 0); 2908 release_crq_queue(adapter); 2909 2910 /* remove the closed state so when we call open it appears 2911 * we are coming from the probed state. 2912 */ 2913 adapter->state = VNIC_PROBED; 2914 2915 reinit_init_done(adapter); 2916 2917 rc = init_crq_queue(adapter); 2918 if (rc) { 2919 netdev_err(adapter->netdev, 2920 "Couldn't initialize crq. rc=%d\n", rc); 2921 goto out; 2922 } 2923 2924 rc = ibmvnic_reset_init(adapter, false); 2925 if (rc) 2926 goto out; 2927 2928 /* If the adapter was in PROBE or DOWN state prior to the reset, 2929 * exit here. 2930 */ 2931 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) 2932 goto out; 2933 2934 rc = ibmvnic_login(netdev); 2935 if (rc) 2936 goto out; 2937 2938 rc = init_resources(adapter); 2939 if (rc) 2940 goto out; 2941 2942 ibmvnic_disable_irqs(adapter); 2943 adapter->state = VNIC_CLOSED; 2944 2945 if (reset_state == VNIC_CLOSED) 2946 goto out; 2947 2948 rc = __ibmvnic_open(netdev); 2949 if (rc) { 2950 rc = IBMVNIC_OPEN_FAILED; 2951 goto out; 2952 } 2953 2954 __netdev_notify_peers(netdev); 2955 out: 2956 /* restore adapter state if reset failed */ 2957 if (rc) 2958 adapter->state = reset_state; 2959 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", 2960 adapter_state_to_string(adapter->state), 2961 adapter->failover_pending, rc); 2962 return rc; 2963 } 2964 2965 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 2966 { 2967 struct ibmvnic_rwi *rwi; 2968 unsigned long flags; 2969 2970 spin_lock_irqsave(&adapter->rwi_lock, flags); 2971 2972 if (!list_empty(&adapter->rwi_list)) { 2973 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 2974 list); 2975 list_del(&rwi->list); 2976 } else { 2977 rwi = NULL; 2978 } 2979 2980 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2981 return rwi; 2982 } 2983 2984 /** 2985 * do_passive_init - complete probing when partner device is detected. 2986 * @adapter: ibmvnic_adapter struct 2987 * 2988 * If the ibmvnic device does not have a partner device to communicate with at boot 2989 * and that partner device comes online at a later time, this function is called 2990 * to complete the initialization process of ibmvnic device. 2991 * Caller is expected to hold rtnl_lock(). 2992 * 2993 * Returns non-zero if sub-CRQs are not initialized properly leaving the device 2994 * in the down state. 2995 * Returns 0 upon success and the device is in PROBED state. 2996 */ 2997 2998 static int do_passive_init(struct ibmvnic_adapter *adapter) 2999 { 3000 unsigned long timeout = msecs_to_jiffies(30000); 3001 struct net_device *netdev = adapter->netdev; 3002 struct device *dev = &adapter->vdev->dev; 3003 int rc; 3004 3005 netdev_dbg(netdev, "Partner device found, probing.\n"); 3006 3007 adapter->state = VNIC_PROBING; 3008 reinit_completion(&adapter->init_done); 3009 adapter->init_done_rc = 0; 3010 adapter->crq.active = true; 3011 3012 rc = send_crq_init_complete(adapter); 3013 if (rc) 3014 goto out; 3015 3016 rc = send_version_xchg(adapter); 3017 if (rc) 3018 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); 3019 3020 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3021 dev_err(dev, "Initialization sequence timed out\n"); 3022 rc = -ETIMEDOUT; 3023 goto out; 3024 } 3025 3026 rc = init_sub_crqs(adapter); 3027 if (rc) { 3028 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc); 3029 goto out; 3030 } 3031 3032 rc = init_sub_crq_irqs(adapter); 3033 if (rc) { 3034 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc); 3035 goto init_failed; 3036 } 3037 3038 netdev->mtu = adapter->req_mtu - ETH_HLEN; 3039 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 3040 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 3041 3042 adapter->state = VNIC_PROBED; 3043 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n"); 3044 3045 return 0; 3046 3047 init_failed: 3048 release_sub_crqs(adapter, 1); 3049 out: 3050 adapter->state = VNIC_DOWN; 3051 return rc; 3052 } 3053 3054 static void __ibmvnic_reset(struct work_struct *work) 3055 { 3056 struct ibmvnic_adapter *adapter; 3057 unsigned int timeout = 5000; 3058 struct ibmvnic_rwi *tmprwi; 3059 bool saved_state = false; 3060 struct ibmvnic_rwi *rwi; 3061 unsigned long flags; 3062 struct device *dev; 3063 bool need_reset; 3064 int num_fails = 0; 3065 u32 reset_state; 3066 int rc = 0; 3067 3068 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 3069 dev = &adapter->vdev->dev; 3070 3071 /* Wait for ibmvnic_probe() to complete. If probe is taking too long 3072 * or if another reset is in progress, defer work for now. If probe 3073 * eventually fails it will flush and terminate our work. 3074 * 3075 * Three possibilities here: 3076 * 1. Adpater being removed - just return 3077 * 2. Timed out on probe or another reset in progress - delay the work 3078 * 3. Completed probe - perform any resets in queue 3079 */ 3080 if (adapter->state == VNIC_PROBING && 3081 !wait_for_completion_timeout(&adapter->probe_done, timeout)) { 3082 dev_err(dev, "Reset thread timed out on probe"); 3083 queue_delayed_work(system_long_wq, 3084 &adapter->ibmvnic_delayed_reset, 3085 IBMVNIC_RESET_DELAY); 3086 return; 3087 } 3088 3089 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */ 3090 if (adapter->state == VNIC_REMOVING) 3091 return; 3092 3093 /* ->rwi_list is stable now (no one else is removing entries) */ 3094 3095 /* ibmvnic_probe() may have purged the reset queue after we were 3096 * scheduled to process a reset so there maybe no resets to process. 3097 * Before setting the ->resetting bit though, we have to make sure 3098 * that there is infact a reset to process. Otherwise we may race 3099 * with ibmvnic_open() and end up leaving the vnic down: 3100 * 3101 * __ibmvnic_reset() ibmvnic_open() 3102 * ----------------- -------------- 3103 * 3104 * set ->resetting bit 3105 * find ->resetting bit is set 3106 * set ->state to IBMVNIC_OPEN (i.e 3107 * assume reset will open device) 3108 * return 3109 * find reset queue empty 3110 * return 3111 * 3112 * Neither performed vnic login/open and vnic stays down 3113 * 3114 * If we hold the lock and conditionally set the bit, either we 3115 * or ibmvnic_open() will complete the open. 3116 */ 3117 need_reset = false; 3118 spin_lock(&adapter->rwi_lock); 3119 if (!list_empty(&adapter->rwi_list)) { 3120 if (test_and_set_bit_lock(0, &adapter->resetting)) { 3121 queue_delayed_work(system_long_wq, 3122 &adapter->ibmvnic_delayed_reset, 3123 IBMVNIC_RESET_DELAY); 3124 } else { 3125 need_reset = true; 3126 } 3127 } 3128 spin_unlock(&adapter->rwi_lock); 3129 3130 if (!need_reset) 3131 return; 3132 3133 rwi = get_next_rwi(adapter); 3134 while (rwi) { 3135 spin_lock_irqsave(&adapter->state_lock, flags); 3136 3137 if (adapter->state == VNIC_REMOVING || 3138 adapter->state == VNIC_REMOVED) { 3139 spin_unlock_irqrestore(&adapter->state_lock, flags); 3140 kfree(rwi); 3141 rc = EBUSY; 3142 break; 3143 } 3144 3145 if (!saved_state) { 3146 reset_state = adapter->state; 3147 saved_state = true; 3148 } 3149 spin_unlock_irqrestore(&adapter->state_lock, flags); 3150 3151 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) { 3152 rtnl_lock(); 3153 rc = do_passive_init(adapter); 3154 rtnl_unlock(); 3155 if (!rc) 3156 netif_carrier_on(adapter->netdev); 3157 } else if (adapter->force_reset_recovery) { 3158 /* Since we are doing a hard reset now, clear the 3159 * failover_pending flag so we don't ignore any 3160 * future MOBILITY or other resets. 3161 */ 3162 adapter->failover_pending = false; 3163 3164 /* Transport event occurred during previous reset */ 3165 if (adapter->wait_for_reset) { 3166 /* Previous was CHANGE_PARAM; caller locked */ 3167 adapter->force_reset_recovery = false; 3168 rc = do_hard_reset(adapter, rwi, reset_state); 3169 } else { 3170 rtnl_lock(); 3171 adapter->force_reset_recovery = false; 3172 rc = do_hard_reset(adapter, rwi, reset_state); 3173 rtnl_unlock(); 3174 } 3175 if (rc) 3176 num_fails++; 3177 else 3178 num_fails = 0; 3179 3180 /* If auto-priority-failover is enabled we can get 3181 * back to back failovers during resets, resulting 3182 * in at least two failed resets (from high-priority 3183 * backing device to low-priority one and then back) 3184 * If resets continue to fail beyond that, give the 3185 * adapter some time to settle down before retrying. 3186 */ 3187 if (num_fails >= 3) { 3188 netdev_dbg(adapter->netdev, 3189 "[S:%s] Hard reset failed %d times, waiting 60 secs\n", 3190 adapter_state_to_string(adapter->state), 3191 num_fails); 3192 set_current_state(TASK_UNINTERRUPTIBLE); 3193 schedule_timeout(60 * HZ); 3194 } 3195 } else { 3196 rc = do_reset(adapter, rwi, reset_state); 3197 } 3198 tmprwi = rwi; 3199 adapter->last_reset_time = jiffies; 3200 3201 if (rc) 3202 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); 3203 3204 rwi = get_next_rwi(adapter); 3205 3206 /* 3207 * If there are no resets queued and the previous reset failed, 3208 * the adapter would be in an undefined state. So retry the 3209 * previous reset as a hard reset. 3210 * 3211 * Else, free the previous rwi and, if there is another reset 3212 * queued, process the new reset even if previous reset failed 3213 * (the previous reset could have failed because of a fail 3214 * over for instance, so process the fail over). 3215 */ 3216 if (!rwi && rc) 3217 rwi = tmprwi; 3218 else 3219 kfree(tmprwi); 3220 3221 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || 3222 rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) 3223 adapter->force_reset_recovery = true; 3224 } 3225 3226 if (adapter->wait_for_reset) { 3227 adapter->reset_done_rc = rc; 3228 complete(&adapter->reset_done); 3229 } 3230 3231 clear_bit_unlock(0, &adapter->resetting); 3232 3233 netdev_dbg(adapter->netdev, 3234 "[S:%s FRR:%d WFR:%d] Done processing resets\n", 3235 adapter_state_to_string(adapter->state), 3236 adapter->force_reset_recovery, 3237 adapter->wait_for_reset); 3238 } 3239 3240 static void __ibmvnic_delayed_reset(struct work_struct *work) 3241 { 3242 struct ibmvnic_adapter *adapter; 3243 3244 adapter = container_of(work, struct ibmvnic_adapter, 3245 ibmvnic_delayed_reset.work); 3246 __ibmvnic_reset(&adapter->ibmvnic_reset); 3247 } 3248 3249 static void flush_reset_queue(struct ibmvnic_adapter *adapter) 3250 { 3251 struct list_head *entry, *tmp_entry; 3252 3253 if (!list_empty(&adapter->rwi_list)) { 3254 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { 3255 list_del(entry); 3256 kfree(list_entry(entry, struct ibmvnic_rwi, list)); 3257 } 3258 } 3259 } 3260 3261 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 3262 enum ibmvnic_reset_reason reason) 3263 { 3264 struct net_device *netdev = adapter->netdev; 3265 struct ibmvnic_rwi *rwi, *tmp; 3266 unsigned long flags; 3267 int ret; 3268 3269 spin_lock_irqsave(&adapter->rwi_lock, flags); 3270 3271 /* If failover is pending don't schedule any other reset. 3272 * Instead let the failover complete. If there is already a 3273 * a failover reset scheduled, we will detect and drop the 3274 * duplicate reset when walking the ->rwi_list below. 3275 */ 3276 if (adapter->state == VNIC_REMOVING || 3277 adapter->state == VNIC_REMOVED || 3278 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { 3279 ret = EBUSY; 3280 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 3281 goto err; 3282 } 3283 3284 list_for_each_entry(tmp, &adapter->rwi_list, list) { 3285 if (tmp->reset_reason == reason) { 3286 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", 3287 reset_reason_to_string(reason)); 3288 ret = EBUSY; 3289 goto err; 3290 } 3291 } 3292 3293 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); 3294 if (!rwi) { 3295 ret = ENOMEM; 3296 goto err; 3297 } 3298 /* if we just received a transport event, 3299 * flush reset queue and process this reset 3300 */ 3301 if (adapter->force_reset_recovery) 3302 flush_reset_queue(adapter); 3303 3304 rwi->reset_reason = reason; 3305 list_add_tail(&rwi->list, &adapter->rwi_list); 3306 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", 3307 reset_reason_to_string(reason)); 3308 queue_work(system_long_wq, &adapter->ibmvnic_reset); 3309 3310 ret = 0; 3311 err: 3312 /* ibmvnic_close() below can block, so drop the lock first */ 3313 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 3314 3315 if (ret == ENOMEM) 3316 ibmvnic_close(netdev); 3317 3318 return -ret; 3319 } 3320 3321 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) 3322 { 3323 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3324 3325 if (test_bit(0, &adapter->resetting)) { 3326 netdev_err(adapter->netdev, 3327 "Adapter is resetting, skip timeout reset\n"); 3328 return; 3329 } 3330 /* No queuing up reset until at least 5 seconds (default watchdog val) 3331 * after last reset 3332 */ 3333 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { 3334 netdev_dbg(dev, "Not yet time to tx timeout.\n"); 3335 return; 3336 } 3337 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 3338 } 3339 3340 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 3341 struct ibmvnic_rx_buff *rx_buff) 3342 { 3343 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 3344 3345 rx_buff->skb = NULL; 3346 3347 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 3348 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 3349 3350 atomic_dec(&pool->available); 3351 } 3352 3353 static int ibmvnic_poll(struct napi_struct *napi, int budget) 3354 { 3355 struct ibmvnic_sub_crq_queue *rx_scrq; 3356 struct ibmvnic_adapter *adapter; 3357 struct net_device *netdev; 3358 int frames_processed; 3359 int scrq_num; 3360 3361 netdev = napi->dev; 3362 adapter = netdev_priv(netdev); 3363 scrq_num = (int)(napi - adapter->napi); 3364 frames_processed = 0; 3365 rx_scrq = adapter->rx_scrq[scrq_num]; 3366 3367 restart_poll: 3368 while (frames_processed < budget) { 3369 struct sk_buff *skb; 3370 struct ibmvnic_rx_buff *rx_buff; 3371 union sub_crq *next; 3372 u32 length; 3373 u16 offset; 3374 u8 flags = 0; 3375 3376 if (unlikely(test_bit(0, &adapter->resetting) && 3377 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 3378 enable_scrq_irq(adapter, rx_scrq); 3379 napi_complete_done(napi, frames_processed); 3380 return frames_processed; 3381 } 3382 3383 if (!pending_scrq(adapter, rx_scrq)) 3384 break; 3385 next = ibmvnic_next_scrq(adapter, rx_scrq); 3386 rx_buff = (struct ibmvnic_rx_buff *) 3387 be64_to_cpu(next->rx_comp.correlator); 3388 /* do error checking */ 3389 if (next->rx_comp.rc) { 3390 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 3391 be16_to_cpu(next->rx_comp.rc)); 3392 /* free the entry */ 3393 next->rx_comp.first = 0; 3394 dev_kfree_skb_any(rx_buff->skb); 3395 remove_buff_from_pool(adapter, rx_buff); 3396 continue; 3397 } else if (!rx_buff->skb) { 3398 /* free the entry */ 3399 next->rx_comp.first = 0; 3400 remove_buff_from_pool(adapter, rx_buff); 3401 continue; 3402 } 3403 3404 length = be32_to_cpu(next->rx_comp.len); 3405 offset = be16_to_cpu(next->rx_comp.off_frame_data); 3406 flags = next->rx_comp.flags; 3407 skb = rx_buff->skb; 3408 /* load long_term_buff before copying to skb */ 3409 dma_rmb(); 3410 skb_copy_to_linear_data(skb, rx_buff->data + offset, 3411 length); 3412 3413 /* VLAN Header has been stripped by the system firmware and 3414 * needs to be inserted by the driver 3415 */ 3416 if (adapter->rx_vlan_header_insertion && 3417 (flags & IBMVNIC_VLAN_STRIPPED)) 3418 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3419 ntohs(next->rx_comp.vlan_tci)); 3420 3421 /* free the entry */ 3422 next->rx_comp.first = 0; 3423 remove_buff_from_pool(adapter, rx_buff); 3424 3425 skb_put(skb, length); 3426 skb->protocol = eth_type_trans(skb, netdev); 3427 skb_record_rx_queue(skb, scrq_num); 3428 3429 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 3430 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 3431 skb->ip_summed = CHECKSUM_UNNECESSARY; 3432 } 3433 3434 length = skb->len; 3435 napi_gro_receive(napi, skb); /* send it up */ 3436 netdev->stats.rx_packets++; 3437 netdev->stats.rx_bytes += length; 3438 adapter->rx_stats_buffers[scrq_num].packets++; 3439 adapter->rx_stats_buffers[scrq_num].bytes += length; 3440 frames_processed++; 3441 } 3442 3443 if (adapter->state != VNIC_CLOSING && 3444 ((atomic_read(&adapter->rx_pool[scrq_num].available) < 3445 adapter->req_rx_add_entries_per_subcrq / 2) || 3446 frames_processed < budget)) 3447 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 3448 if (frames_processed < budget) { 3449 if (napi_complete_done(napi, frames_processed)) { 3450 enable_scrq_irq(adapter, rx_scrq); 3451 if (pending_scrq(adapter, rx_scrq)) { 3452 if (napi_reschedule(napi)) { 3453 disable_scrq_irq(adapter, rx_scrq); 3454 goto restart_poll; 3455 } 3456 } 3457 } 3458 } 3459 return frames_processed; 3460 } 3461 3462 static int wait_for_reset(struct ibmvnic_adapter *adapter) 3463 { 3464 int rc, ret; 3465 3466 adapter->fallback.mtu = adapter->req_mtu; 3467 adapter->fallback.rx_queues = adapter->req_rx_queues; 3468 adapter->fallback.tx_queues = adapter->req_tx_queues; 3469 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 3470 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 3471 3472 reinit_completion(&adapter->reset_done); 3473 adapter->wait_for_reset = true; 3474 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3475 3476 if (rc) { 3477 ret = rc; 3478 goto out; 3479 } 3480 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); 3481 if (rc) { 3482 ret = -ENODEV; 3483 goto out; 3484 } 3485 3486 ret = 0; 3487 if (adapter->reset_done_rc) { 3488 ret = -EIO; 3489 adapter->desired.mtu = adapter->fallback.mtu; 3490 adapter->desired.rx_queues = adapter->fallback.rx_queues; 3491 adapter->desired.tx_queues = adapter->fallback.tx_queues; 3492 adapter->desired.rx_entries = adapter->fallback.rx_entries; 3493 adapter->desired.tx_entries = adapter->fallback.tx_entries; 3494 3495 reinit_completion(&adapter->reset_done); 3496 adapter->wait_for_reset = true; 3497 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3498 if (rc) { 3499 ret = rc; 3500 goto out; 3501 } 3502 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 3503 60000); 3504 if (rc) { 3505 ret = -ENODEV; 3506 goto out; 3507 } 3508 } 3509 out: 3510 adapter->wait_for_reset = false; 3511 3512 return ret; 3513 } 3514 3515 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 3516 { 3517 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3518 3519 adapter->desired.mtu = new_mtu + ETH_HLEN; 3520 3521 return wait_for_reset(adapter); 3522 } 3523 3524 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 3525 struct net_device *dev, 3526 netdev_features_t features) 3527 { 3528 /* Some backing hardware adapters can not 3529 * handle packets with a MSS less than 224 3530 * or with only one segment. 3531 */ 3532 if (skb_is_gso(skb)) { 3533 if (skb_shinfo(skb)->gso_size < 224 || 3534 skb_shinfo(skb)->gso_segs == 1) 3535 features &= ~NETIF_F_GSO_MASK; 3536 } 3537 3538 return features; 3539 } 3540 3541 static const struct net_device_ops ibmvnic_netdev_ops = { 3542 .ndo_open = ibmvnic_open, 3543 .ndo_stop = ibmvnic_close, 3544 .ndo_start_xmit = ibmvnic_xmit, 3545 .ndo_set_rx_mode = ibmvnic_set_multi, 3546 .ndo_set_mac_address = ibmvnic_set_mac, 3547 .ndo_validate_addr = eth_validate_addr, 3548 .ndo_tx_timeout = ibmvnic_tx_timeout, 3549 .ndo_change_mtu = ibmvnic_change_mtu, 3550 .ndo_features_check = ibmvnic_features_check, 3551 }; 3552 3553 /* ethtool functions */ 3554 3555 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 3556 struct ethtool_link_ksettings *cmd) 3557 { 3558 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3559 int rc; 3560 3561 rc = send_query_phys_parms(adapter); 3562 if (rc) { 3563 adapter->speed = SPEED_UNKNOWN; 3564 adapter->duplex = DUPLEX_UNKNOWN; 3565 } 3566 cmd->base.speed = adapter->speed; 3567 cmd->base.duplex = adapter->duplex; 3568 cmd->base.port = PORT_FIBRE; 3569 cmd->base.phy_address = 0; 3570 cmd->base.autoneg = AUTONEG_ENABLE; 3571 3572 return 0; 3573 } 3574 3575 static void ibmvnic_get_drvinfo(struct net_device *netdev, 3576 struct ethtool_drvinfo *info) 3577 { 3578 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3579 3580 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 3581 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 3582 strscpy(info->fw_version, adapter->fw_version, 3583 sizeof(info->fw_version)); 3584 } 3585 3586 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 3587 { 3588 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3589 3590 return adapter->msg_enable; 3591 } 3592 3593 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 3594 { 3595 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3596 3597 adapter->msg_enable = data; 3598 } 3599 3600 static u32 ibmvnic_get_link(struct net_device *netdev) 3601 { 3602 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3603 3604 /* Don't need to send a query because we request a logical link up at 3605 * init and then we wait for link state indications 3606 */ 3607 return adapter->logical_link_state; 3608 } 3609 3610 static void ibmvnic_get_ringparam(struct net_device *netdev, 3611 struct ethtool_ringparam *ring, 3612 struct kernel_ethtool_ringparam *kernel_ring, 3613 struct netlink_ext_ack *extack) 3614 { 3615 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3616 3617 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 3618 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 3619 ring->rx_mini_max_pending = 0; 3620 ring->rx_jumbo_max_pending = 0; 3621 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 3622 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 3623 ring->rx_mini_pending = 0; 3624 ring->rx_jumbo_pending = 0; 3625 } 3626 3627 static int ibmvnic_set_ringparam(struct net_device *netdev, 3628 struct ethtool_ringparam *ring, 3629 struct kernel_ethtool_ringparam *kernel_ring, 3630 struct netlink_ext_ack *extack) 3631 { 3632 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3633 3634 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || 3635 ring->tx_pending > adapter->max_tx_entries_per_subcrq) { 3636 netdev_err(netdev, "Invalid request.\n"); 3637 netdev_err(netdev, "Max tx buffers = %llu\n", 3638 adapter->max_rx_add_entries_per_subcrq); 3639 netdev_err(netdev, "Max rx buffers = %llu\n", 3640 adapter->max_tx_entries_per_subcrq); 3641 return -EINVAL; 3642 } 3643 3644 adapter->desired.rx_entries = ring->rx_pending; 3645 adapter->desired.tx_entries = ring->tx_pending; 3646 3647 return wait_for_reset(adapter); 3648 } 3649 3650 static void ibmvnic_get_channels(struct net_device *netdev, 3651 struct ethtool_channels *channels) 3652 { 3653 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3654 3655 channels->max_rx = adapter->max_rx_queues; 3656 channels->max_tx = adapter->max_tx_queues; 3657 channels->max_other = 0; 3658 channels->max_combined = 0; 3659 channels->rx_count = adapter->req_rx_queues; 3660 channels->tx_count = adapter->req_tx_queues; 3661 channels->other_count = 0; 3662 channels->combined_count = 0; 3663 } 3664 3665 static int ibmvnic_set_channels(struct net_device *netdev, 3666 struct ethtool_channels *channels) 3667 { 3668 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3669 3670 adapter->desired.rx_queues = channels->rx_count; 3671 adapter->desired.tx_queues = channels->tx_count; 3672 3673 return wait_for_reset(adapter); 3674 } 3675 3676 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 3677 { 3678 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3679 int i; 3680 3681 if (stringset != ETH_SS_STATS) 3682 return; 3683 3684 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) 3685 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 3686 3687 for (i = 0; i < adapter->req_tx_queues; i++) { 3688 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 3689 data += ETH_GSTRING_LEN; 3690 3691 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 3692 data += ETH_GSTRING_LEN; 3693 3694 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); 3695 data += ETH_GSTRING_LEN; 3696 } 3697 3698 for (i = 0; i < adapter->req_rx_queues; i++) { 3699 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 3700 data += ETH_GSTRING_LEN; 3701 3702 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 3703 data += ETH_GSTRING_LEN; 3704 3705 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 3706 data += ETH_GSTRING_LEN; 3707 } 3708 } 3709 3710 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 3711 { 3712 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3713 3714 switch (sset) { 3715 case ETH_SS_STATS: 3716 return ARRAY_SIZE(ibmvnic_stats) + 3717 adapter->req_tx_queues * NUM_TX_STATS + 3718 adapter->req_rx_queues * NUM_RX_STATS; 3719 default: 3720 return -EOPNOTSUPP; 3721 } 3722 } 3723 3724 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 3725 struct ethtool_stats *stats, u64 *data) 3726 { 3727 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3728 union ibmvnic_crq crq; 3729 int i, j; 3730 int rc; 3731 3732 memset(&crq, 0, sizeof(crq)); 3733 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 3734 crq.request_statistics.cmd = REQUEST_STATISTICS; 3735 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 3736 crq.request_statistics.len = 3737 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 3738 3739 /* Wait for data to be written */ 3740 reinit_completion(&adapter->stats_done); 3741 rc = ibmvnic_send_crq(adapter, &crq); 3742 if (rc) 3743 return; 3744 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); 3745 if (rc) 3746 return; 3747 3748 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 3749 data[i] = be64_to_cpu(IBMVNIC_GET_STAT 3750 (adapter, ibmvnic_stats[i].offset)); 3751 3752 for (j = 0; j < adapter->req_tx_queues; j++) { 3753 data[i] = adapter->tx_stats_buffers[j].packets; 3754 i++; 3755 data[i] = adapter->tx_stats_buffers[j].bytes; 3756 i++; 3757 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 3758 i++; 3759 } 3760 3761 for (j = 0; j < adapter->req_rx_queues; j++) { 3762 data[i] = adapter->rx_stats_buffers[j].packets; 3763 i++; 3764 data[i] = adapter->rx_stats_buffers[j].bytes; 3765 i++; 3766 data[i] = adapter->rx_stats_buffers[j].interrupts; 3767 i++; 3768 } 3769 } 3770 3771 static const struct ethtool_ops ibmvnic_ethtool_ops = { 3772 .get_drvinfo = ibmvnic_get_drvinfo, 3773 .get_msglevel = ibmvnic_get_msglevel, 3774 .set_msglevel = ibmvnic_set_msglevel, 3775 .get_link = ibmvnic_get_link, 3776 .get_ringparam = ibmvnic_get_ringparam, 3777 .set_ringparam = ibmvnic_set_ringparam, 3778 .get_channels = ibmvnic_get_channels, 3779 .set_channels = ibmvnic_set_channels, 3780 .get_strings = ibmvnic_get_strings, 3781 .get_sset_count = ibmvnic_get_sset_count, 3782 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 3783 .get_link_ksettings = ibmvnic_get_link_ksettings, 3784 }; 3785 3786 /* Routines for managing CRQs/sCRQs */ 3787 3788 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 3789 struct ibmvnic_sub_crq_queue *scrq) 3790 { 3791 int rc; 3792 3793 if (!scrq) { 3794 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); 3795 return -EINVAL; 3796 } 3797 3798 if (scrq->irq) { 3799 free_irq(scrq->irq, scrq); 3800 irq_dispose_mapping(scrq->irq); 3801 scrq->irq = 0; 3802 } 3803 3804 if (scrq->msgs) { 3805 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 3806 atomic_set(&scrq->used, 0); 3807 scrq->cur = 0; 3808 scrq->ind_buf.index = 0; 3809 } else { 3810 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); 3811 return -EINVAL; 3812 } 3813 3814 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3815 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3816 return rc; 3817 } 3818 3819 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 3820 { 3821 int i, rc; 3822 3823 if (!adapter->tx_scrq || !adapter->rx_scrq) 3824 return -EINVAL; 3825 3826 ibmvnic_clean_affinity(adapter); 3827 3828 for (i = 0; i < adapter->req_tx_queues; i++) { 3829 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 3830 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 3831 if (rc) 3832 return rc; 3833 } 3834 3835 for (i = 0; i < adapter->req_rx_queues; i++) { 3836 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 3837 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 3838 if (rc) 3839 return rc; 3840 } 3841 3842 return rc; 3843 } 3844 3845 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 3846 struct ibmvnic_sub_crq_queue *scrq, 3847 bool do_h_free) 3848 { 3849 struct device *dev = &adapter->vdev->dev; 3850 long rc; 3851 3852 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 3853 3854 if (do_h_free) { 3855 /* Close the sub-crqs */ 3856 do { 3857 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3858 adapter->vdev->unit_address, 3859 scrq->crq_num); 3860 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 3861 3862 if (rc) { 3863 netdev_err(adapter->netdev, 3864 "Failed to release sub-CRQ %16lx, rc = %ld\n", 3865 scrq->crq_num, rc); 3866 } 3867 } 3868 3869 dma_free_coherent(dev, 3870 IBMVNIC_IND_ARR_SZ, 3871 scrq->ind_buf.indir_arr, 3872 scrq->ind_buf.indir_dma); 3873 3874 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3875 DMA_BIDIRECTIONAL); 3876 free_pages((unsigned long)scrq->msgs, 2); 3877 free_cpumask_var(scrq->affinity_mask); 3878 kfree(scrq); 3879 } 3880 3881 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 3882 *adapter) 3883 { 3884 struct device *dev = &adapter->vdev->dev; 3885 struct ibmvnic_sub_crq_queue *scrq; 3886 int rc; 3887 3888 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 3889 if (!scrq) 3890 return NULL; 3891 3892 scrq->msgs = 3893 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 3894 if (!scrq->msgs) { 3895 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 3896 goto zero_page_failed; 3897 } 3898 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL)) 3899 goto cpumask_alloc_failed; 3900 3901 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 3902 DMA_BIDIRECTIONAL); 3903 if (dma_mapping_error(dev, scrq->msg_token)) { 3904 dev_warn(dev, "Couldn't map crq queue messages page\n"); 3905 goto map_failed; 3906 } 3907 3908 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3909 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3910 3911 if (rc == H_RESOURCE) 3912 rc = ibmvnic_reset_crq(adapter); 3913 3914 if (rc == H_CLOSED) { 3915 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 3916 } else if (rc) { 3917 dev_warn(dev, "Error %d registering sub-crq\n", rc); 3918 goto reg_failed; 3919 } 3920 3921 scrq->adapter = adapter; 3922 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 3923 scrq->ind_buf.index = 0; 3924 3925 scrq->ind_buf.indir_arr = 3926 dma_alloc_coherent(dev, 3927 IBMVNIC_IND_ARR_SZ, 3928 &scrq->ind_buf.indir_dma, 3929 GFP_KERNEL); 3930 3931 if (!scrq->ind_buf.indir_arr) 3932 goto indir_failed; 3933 3934 spin_lock_init(&scrq->lock); 3935 3936 netdev_dbg(adapter->netdev, 3937 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 3938 scrq->crq_num, scrq->hw_irq, scrq->irq); 3939 3940 return scrq; 3941 3942 indir_failed: 3943 do { 3944 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3945 adapter->vdev->unit_address, 3946 scrq->crq_num); 3947 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); 3948 reg_failed: 3949 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3950 DMA_BIDIRECTIONAL); 3951 map_failed: 3952 free_cpumask_var(scrq->affinity_mask); 3953 cpumask_alloc_failed: 3954 free_pages((unsigned long)scrq->msgs, 2); 3955 zero_page_failed: 3956 kfree(scrq); 3957 3958 return NULL; 3959 } 3960 3961 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 3962 { 3963 int i; 3964 3965 ibmvnic_clean_affinity(adapter); 3966 if (adapter->tx_scrq) { 3967 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 3968 if (!adapter->tx_scrq[i]) 3969 continue; 3970 3971 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 3972 i); 3973 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); 3974 if (adapter->tx_scrq[i]->irq) { 3975 free_irq(adapter->tx_scrq[i]->irq, 3976 adapter->tx_scrq[i]); 3977 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 3978 adapter->tx_scrq[i]->irq = 0; 3979 } 3980 3981 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 3982 do_h_free); 3983 } 3984 3985 kfree(adapter->tx_scrq); 3986 adapter->tx_scrq = NULL; 3987 adapter->num_active_tx_scrqs = 0; 3988 } 3989 3990 if (adapter->rx_scrq) { 3991 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 3992 if (!adapter->rx_scrq[i]) 3993 continue; 3994 3995 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 3996 i); 3997 if (adapter->rx_scrq[i]->irq) { 3998 free_irq(adapter->rx_scrq[i]->irq, 3999 adapter->rx_scrq[i]); 4000 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 4001 adapter->rx_scrq[i]->irq = 0; 4002 } 4003 4004 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 4005 do_h_free); 4006 } 4007 4008 kfree(adapter->rx_scrq); 4009 adapter->rx_scrq = NULL; 4010 adapter->num_active_rx_scrqs = 0; 4011 } 4012 } 4013 4014 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 4015 struct ibmvnic_sub_crq_queue *scrq) 4016 { 4017 struct device *dev = &adapter->vdev->dev; 4018 unsigned long rc; 4019 4020 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4021 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 4022 if (rc) 4023 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 4024 scrq->hw_irq, rc); 4025 return rc; 4026 } 4027 4028 /* We can not use the IRQ chip EOI handler because that has the 4029 * unintended effect of changing the interrupt priority. 4030 */ 4031 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) 4032 { 4033 u64 val = 0xff000000 | scrq->hw_irq; 4034 unsigned long rc; 4035 4036 rc = plpar_hcall_norets(H_EOI, val); 4037 if (rc) 4038 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); 4039 } 4040 4041 /* Due to a firmware bug, the hypervisor can send an interrupt to a 4042 * transmit or receive queue just prior to a partition migration. 4043 * Force an EOI after migration. 4044 */ 4045 static void ibmvnic_clear_pending_interrupt(struct device *dev, 4046 struct ibmvnic_sub_crq_queue *scrq) 4047 { 4048 if (!xive_enabled()) 4049 ibmvnic_xics_eoi(dev, scrq); 4050 } 4051 4052 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 4053 struct ibmvnic_sub_crq_queue *scrq) 4054 { 4055 struct device *dev = &adapter->vdev->dev; 4056 unsigned long rc; 4057 4058 if (scrq->hw_irq > 0x100000000ULL) { 4059 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 4060 return 1; 4061 } 4062 4063 if (test_bit(0, &adapter->resetting) && 4064 adapter->reset_reason == VNIC_RESET_MOBILITY) { 4065 ibmvnic_clear_pending_interrupt(dev, scrq); 4066 } 4067 4068 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4069 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 4070 if (rc) 4071 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 4072 scrq->hw_irq, rc); 4073 return rc; 4074 } 4075 4076 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 4077 struct ibmvnic_sub_crq_queue *scrq) 4078 { 4079 struct device *dev = &adapter->vdev->dev; 4080 struct ibmvnic_tx_pool *tx_pool; 4081 struct ibmvnic_tx_buff *txbuff; 4082 struct netdev_queue *txq; 4083 union sub_crq *next; 4084 int index; 4085 int i; 4086 4087 restart_loop: 4088 while (pending_scrq(adapter, scrq)) { 4089 unsigned int pool = scrq->pool_index; 4090 int num_entries = 0; 4091 int total_bytes = 0; 4092 int num_packets = 0; 4093 4094 next = ibmvnic_next_scrq(adapter, scrq); 4095 for (i = 0; i < next->tx_comp.num_comps; i++) { 4096 index = be32_to_cpu(next->tx_comp.correlators[i]); 4097 if (index & IBMVNIC_TSO_POOL_MASK) { 4098 tx_pool = &adapter->tso_pool[pool]; 4099 index &= ~IBMVNIC_TSO_POOL_MASK; 4100 } else { 4101 tx_pool = &adapter->tx_pool[pool]; 4102 } 4103 4104 txbuff = &tx_pool->tx_buff[index]; 4105 num_packets++; 4106 num_entries += txbuff->num_entries; 4107 if (txbuff->skb) { 4108 total_bytes += txbuff->skb->len; 4109 if (next->tx_comp.rcs[i]) { 4110 dev_err(dev, "tx error %x\n", 4111 next->tx_comp.rcs[i]); 4112 dev_kfree_skb_irq(txbuff->skb); 4113 } else { 4114 dev_consume_skb_irq(txbuff->skb); 4115 } 4116 txbuff->skb = NULL; 4117 } else { 4118 netdev_warn(adapter->netdev, 4119 "TX completion received with NULL socket buffer\n"); 4120 } 4121 tx_pool->free_map[tx_pool->producer_index] = index; 4122 tx_pool->producer_index = 4123 (tx_pool->producer_index + 1) % 4124 tx_pool->num_buffers; 4125 } 4126 /* remove tx_comp scrq*/ 4127 next->tx_comp.first = 0; 4128 4129 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); 4130 netdev_tx_completed_queue(txq, num_packets, total_bytes); 4131 4132 if (atomic_sub_return(num_entries, &scrq->used) <= 4133 (adapter->req_tx_entries_per_subcrq / 2) && 4134 __netif_subqueue_stopped(adapter->netdev, 4135 scrq->pool_index)) { 4136 rcu_read_lock(); 4137 if (adapter->tx_queues_active) { 4138 netif_wake_subqueue(adapter->netdev, 4139 scrq->pool_index); 4140 netdev_dbg(adapter->netdev, 4141 "Started queue %d\n", 4142 scrq->pool_index); 4143 } 4144 rcu_read_unlock(); 4145 } 4146 } 4147 4148 enable_scrq_irq(adapter, scrq); 4149 4150 if (pending_scrq(adapter, scrq)) { 4151 disable_scrq_irq(adapter, scrq); 4152 goto restart_loop; 4153 } 4154 4155 return 0; 4156 } 4157 4158 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 4159 { 4160 struct ibmvnic_sub_crq_queue *scrq = instance; 4161 struct ibmvnic_adapter *adapter = scrq->adapter; 4162 4163 disable_scrq_irq(adapter, scrq); 4164 ibmvnic_complete_tx(adapter, scrq); 4165 4166 return IRQ_HANDLED; 4167 } 4168 4169 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 4170 { 4171 struct ibmvnic_sub_crq_queue *scrq = instance; 4172 struct ibmvnic_adapter *adapter = scrq->adapter; 4173 4174 /* When booting a kdump kernel we can hit pending interrupts 4175 * prior to completing driver initialization. 4176 */ 4177 if (unlikely(adapter->state != VNIC_OPEN)) 4178 return IRQ_NONE; 4179 4180 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 4181 4182 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 4183 disable_scrq_irq(adapter, scrq); 4184 __napi_schedule(&adapter->napi[scrq->scrq_num]); 4185 } 4186 4187 return IRQ_HANDLED; 4188 } 4189 4190 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 4191 { 4192 struct device *dev = &adapter->vdev->dev; 4193 struct ibmvnic_sub_crq_queue *scrq; 4194 int i = 0, j = 0; 4195 int rc = 0; 4196 4197 for (i = 0; i < adapter->req_tx_queues; i++) { 4198 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 4199 i); 4200 scrq = adapter->tx_scrq[i]; 4201 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 4202 4203 if (!scrq->irq) { 4204 rc = -EINVAL; 4205 dev_err(dev, "Error mapping irq\n"); 4206 goto req_tx_irq_failed; 4207 } 4208 4209 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", 4210 adapter->vdev->unit_address, i); 4211 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 4212 0, scrq->name, scrq); 4213 4214 if (rc) { 4215 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 4216 scrq->irq, rc); 4217 irq_dispose_mapping(scrq->irq); 4218 goto req_tx_irq_failed; 4219 } 4220 } 4221 4222 for (i = 0; i < adapter->req_rx_queues; i++) { 4223 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 4224 i); 4225 scrq = adapter->rx_scrq[i]; 4226 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 4227 if (!scrq->irq) { 4228 rc = -EINVAL; 4229 dev_err(dev, "Error mapping irq\n"); 4230 goto req_rx_irq_failed; 4231 } 4232 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", 4233 adapter->vdev->unit_address, i); 4234 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 4235 0, scrq->name, scrq); 4236 if (rc) { 4237 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 4238 scrq->irq, rc); 4239 irq_dispose_mapping(scrq->irq); 4240 goto req_rx_irq_failed; 4241 } 4242 } 4243 4244 cpus_read_lock(); 4245 ibmvnic_set_affinity(adapter); 4246 cpus_read_unlock(); 4247 4248 return rc; 4249 4250 req_rx_irq_failed: 4251 for (j = 0; j < i; j++) { 4252 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 4253 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 4254 } 4255 i = adapter->req_tx_queues; 4256 req_tx_irq_failed: 4257 for (j = 0; j < i; j++) { 4258 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 4259 irq_dispose_mapping(adapter->tx_scrq[j]->irq); 4260 } 4261 release_sub_crqs(adapter, 1); 4262 return rc; 4263 } 4264 4265 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 4266 { 4267 struct device *dev = &adapter->vdev->dev; 4268 struct ibmvnic_sub_crq_queue **allqueues; 4269 int registered_queues = 0; 4270 int total_queues; 4271 int more = 0; 4272 int i; 4273 4274 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 4275 4276 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 4277 if (!allqueues) 4278 return -ENOMEM; 4279 4280 for (i = 0; i < total_queues; i++) { 4281 allqueues[i] = init_sub_crq_queue(adapter); 4282 if (!allqueues[i]) { 4283 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 4284 break; 4285 } 4286 registered_queues++; 4287 } 4288 4289 /* Make sure we were able to register the minimum number of queues */ 4290 if (registered_queues < 4291 adapter->min_tx_queues + adapter->min_rx_queues) { 4292 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 4293 goto tx_failed; 4294 } 4295 4296 /* Distribute the failed allocated queues*/ 4297 for (i = 0; i < total_queues - registered_queues + more ; i++) { 4298 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 4299 switch (i % 3) { 4300 case 0: 4301 if (adapter->req_rx_queues > adapter->min_rx_queues) 4302 adapter->req_rx_queues--; 4303 else 4304 more++; 4305 break; 4306 case 1: 4307 if (adapter->req_tx_queues > adapter->min_tx_queues) 4308 adapter->req_tx_queues--; 4309 else 4310 more++; 4311 break; 4312 } 4313 } 4314 4315 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 4316 sizeof(*adapter->tx_scrq), GFP_KERNEL); 4317 if (!adapter->tx_scrq) 4318 goto tx_failed; 4319 4320 for (i = 0; i < adapter->req_tx_queues; i++) { 4321 adapter->tx_scrq[i] = allqueues[i]; 4322 adapter->tx_scrq[i]->pool_index = i; 4323 adapter->num_active_tx_scrqs++; 4324 } 4325 4326 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 4327 sizeof(*adapter->rx_scrq), GFP_KERNEL); 4328 if (!adapter->rx_scrq) 4329 goto rx_failed; 4330 4331 for (i = 0; i < adapter->req_rx_queues; i++) { 4332 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 4333 adapter->rx_scrq[i]->scrq_num = i; 4334 adapter->num_active_rx_scrqs++; 4335 } 4336 4337 kfree(allqueues); 4338 return 0; 4339 4340 rx_failed: 4341 kfree(adapter->tx_scrq); 4342 adapter->tx_scrq = NULL; 4343 tx_failed: 4344 for (i = 0; i < registered_queues; i++) 4345 release_sub_crq_queue(adapter, allqueues[i], 1); 4346 kfree(allqueues); 4347 return -ENOMEM; 4348 } 4349 4350 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) 4351 { 4352 struct device *dev = &adapter->vdev->dev; 4353 union ibmvnic_crq crq; 4354 int max_entries; 4355 int cap_reqs; 4356 4357 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on 4358 * the PROMISC flag). Initialize this count upfront. When the tasklet 4359 * receives a response to all of these, it will send the next protocol 4360 * message (QUERY_IP_OFFLOAD). 4361 */ 4362 if (!(adapter->netdev->flags & IFF_PROMISC) || 4363 adapter->promisc_supported) 4364 cap_reqs = 7; 4365 else 4366 cap_reqs = 6; 4367 4368 if (!retry) { 4369 /* Sub-CRQ entries are 32 byte long */ 4370 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 4371 4372 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4373 4374 if (adapter->min_tx_entries_per_subcrq > entries_page || 4375 adapter->min_rx_add_entries_per_subcrq > entries_page) { 4376 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 4377 return; 4378 } 4379 4380 if (adapter->desired.mtu) 4381 adapter->req_mtu = adapter->desired.mtu; 4382 else 4383 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 4384 4385 if (!adapter->desired.tx_entries) 4386 adapter->desired.tx_entries = 4387 adapter->max_tx_entries_per_subcrq; 4388 if (!adapter->desired.rx_entries) 4389 adapter->desired.rx_entries = 4390 adapter->max_rx_add_entries_per_subcrq; 4391 4392 max_entries = IBMVNIC_LTB_SET_SIZE / 4393 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 4394 4395 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4396 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) { 4397 adapter->desired.tx_entries = max_entries; 4398 } 4399 4400 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4401 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) { 4402 adapter->desired.rx_entries = max_entries; 4403 } 4404 4405 if (adapter->desired.tx_entries) 4406 adapter->req_tx_entries_per_subcrq = 4407 adapter->desired.tx_entries; 4408 else 4409 adapter->req_tx_entries_per_subcrq = 4410 adapter->max_tx_entries_per_subcrq; 4411 4412 if (adapter->desired.rx_entries) 4413 adapter->req_rx_add_entries_per_subcrq = 4414 adapter->desired.rx_entries; 4415 else 4416 adapter->req_rx_add_entries_per_subcrq = 4417 adapter->max_rx_add_entries_per_subcrq; 4418 4419 if (adapter->desired.tx_queues) 4420 adapter->req_tx_queues = 4421 adapter->desired.tx_queues; 4422 else 4423 adapter->req_tx_queues = 4424 adapter->opt_tx_comp_sub_queues; 4425 4426 if (adapter->desired.rx_queues) 4427 adapter->req_rx_queues = 4428 adapter->desired.rx_queues; 4429 else 4430 adapter->req_rx_queues = 4431 adapter->opt_rx_comp_queues; 4432 4433 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 4434 } else { 4435 atomic_add(cap_reqs, &adapter->running_cap_crqs); 4436 } 4437 memset(&crq, 0, sizeof(crq)); 4438 crq.request_capability.first = IBMVNIC_CRQ_CMD; 4439 crq.request_capability.cmd = REQUEST_CAPABILITY; 4440 4441 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 4442 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 4443 cap_reqs--; 4444 ibmvnic_send_crq(adapter, &crq); 4445 4446 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 4447 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 4448 cap_reqs--; 4449 ibmvnic_send_crq(adapter, &crq); 4450 4451 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 4452 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 4453 cap_reqs--; 4454 ibmvnic_send_crq(adapter, &crq); 4455 4456 crq.request_capability.capability = 4457 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 4458 crq.request_capability.number = 4459 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 4460 cap_reqs--; 4461 ibmvnic_send_crq(adapter, &crq); 4462 4463 crq.request_capability.capability = 4464 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 4465 crq.request_capability.number = 4466 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 4467 cap_reqs--; 4468 ibmvnic_send_crq(adapter, &crq); 4469 4470 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 4471 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 4472 cap_reqs--; 4473 ibmvnic_send_crq(adapter, &crq); 4474 4475 if (adapter->netdev->flags & IFF_PROMISC) { 4476 if (adapter->promisc_supported) { 4477 crq.request_capability.capability = 4478 cpu_to_be16(PROMISC_REQUESTED); 4479 crq.request_capability.number = cpu_to_be64(1); 4480 cap_reqs--; 4481 ibmvnic_send_crq(adapter, &crq); 4482 } 4483 } else { 4484 crq.request_capability.capability = 4485 cpu_to_be16(PROMISC_REQUESTED); 4486 crq.request_capability.number = cpu_to_be64(0); 4487 cap_reqs--; 4488 ibmvnic_send_crq(adapter, &crq); 4489 } 4490 4491 /* Keep at end to catch any discrepancy between expected and actual 4492 * CRQs sent. 4493 */ 4494 WARN_ON(cap_reqs != 0); 4495 } 4496 4497 static int pending_scrq(struct ibmvnic_adapter *adapter, 4498 struct ibmvnic_sub_crq_queue *scrq) 4499 { 4500 union sub_crq *entry = &scrq->msgs[scrq->cur]; 4501 int rc; 4502 4503 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); 4504 4505 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4506 * contents of the SCRQ descriptor 4507 */ 4508 dma_rmb(); 4509 4510 return rc; 4511 } 4512 4513 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 4514 struct ibmvnic_sub_crq_queue *scrq) 4515 { 4516 union sub_crq *entry; 4517 unsigned long flags; 4518 4519 spin_lock_irqsave(&scrq->lock, flags); 4520 entry = &scrq->msgs[scrq->cur]; 4521 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4522 if (++scrq->cur == scrq->size) 4523 scrq->cur = 0; 4524 } else { 4525 entry = NULL; 4526 } 4527 spin_unlock_irqrestore(&scrq->lock, flags); 4528 4529 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4530 * contents of the SCRQ descriptor 4531 */ 4532 dma_rmb(); 4533 4534 return entry; 4535 } 4536 4537 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 4538 { 4539 struct ibmvnic_crq_queue *queue = &adapter->crq; 4540 union ibmvnic_crq *crq; 4541 4542 crq = &queue->msgs[queue->cur]; 4543 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4544 if (++queue->cur == queue->size) 4545 queue->cur = 0; 4546 } else { 4547 crq = NULL; 4548 } 4549 4550 return crq; 4551 } 4552 4553 static void print_subcrq_error(struct device *dev, int rc, const char *func) 4554 { 4555 switch (rc) { 4556 case H_PARAMETER: 4557 dev_warn_ratelimited(dev, 4558 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", 4559 func, rc); 4560 break; 4561 case H_CLOSED: 4562 dev_warn_ratelimited(dev, 4563 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", 4564 func, rc); 4565 break; 4566 default: 4567 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); 4568 break; 4569 } 4570 } 4571 4572 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 4573 u64 remote_handle, u64 ioba, u64 num_entries) 4574 { 4575 unsigned int ua = adapter->vdev->unit_address; 4576 struct device *dev = &adapter->vdev->dev; 4577 int rc; 4578 4579 /* Make sure the hypervisor sees the complete request */ 4580 dma_wmb(); 4581 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 4582 cpu_to_be64(remote_handle), 4583 ioba, num_entries); 4584 4585 if (rc) 4586 print_subcrq_error(dev, rc, __func__); 4587 4588 return rc; 4589 } 4590 4591 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 4592 union ibmvnic_crq *crq) 4593 { 4594 unsigned int ua = adapter->vdev->unit_address; 4595 struct device *dev = &adapter->vdev->dev; 4596 u64 *u64_crq = (u64 *)crq; 4597 int rc; 4598 4599 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 4600 (unsigned long)cpu_to_be64(u64_crq[0]), 4601 (unsigned long)cpu_to_be64(u64_crq[1])); 4602 4603 if (!adapter->crq.active && 4604 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 4605 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 4606 return -EINVAL; 4607 } 4608 4609 /* Make sure the hypervisor sees the complete request */ 4610 dma_wmb(); 4611 4612 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 4613 cpu_to_be64(u64_crq[0]), 4614 cpu_to_be64(u64_crq[1])); 4615 4616 if (rc) { 4617 if (rc == H_CLOSED) { 4618 dev_warn(dev, "CRQ Queue closed\n"); 4619 /* do not reset, report the fail, wait for passive init from server */ 4620 } 4621 4622 dev_warn(dev, "Send error (rc=%d)\n", rc); 4623 } 4624 4625 return rc; 4626 } 4627 4628 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 4629 { 4630 struct device *dev = &adapter->vdev->dev; 4631 union ibmvnic_crq crq; 4632 int retries = 100; 4633 int rc; 4634 4635 memset(&crq, 0, sizeof(crq)); 4636 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 4637 crq.generic.cmd = IBMVNIC_CRQ_INIT; 4638 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 4639 4640 do { 4641 rc = ibmvnic_send_crq(adapter, &crq); 4642 if (rc != H_CLOSED) 4643 break; 4644 retries--; 4645 msleep(50); 4646 4647 } while (retries > 0); 4648 4649 if (rc) { 4650 dev_err(dev, "Failed to send init request, rc = %d\n", rc); 4651 return rc; 4652 } 4653 4654 return 0; 4655 } 4656 4657 struct vnic_login_client_data { 4658 u8 type; 4659 __be16 len; 4660 char name[]; 4661 } __packed; 4662 4663 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 4664 { 4665 int len; 4666 4667 /* Calculate the amount of buffer space needed for the 4668 * vnic client data in the login buffer. There are four entries, 4669 * OS name, LPAR name, device name, and a null last entry. 4670 */ 4671 len = 4 * sizeof(struct vnic_login_client_data); 4672 len += 6; /* "Linux" plus NULL */ 4673 len += strlen(utsname()->nodename) + 1; 4674 len += strlen(adapter->netdev->name) + 1; 4675 4676 return len; 4677 } 4678 4679 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 4680 struct vnic_login_client_data *vlcd) 4681 { 4682 const char *os_name = "Linux"; 4683 int len; 4684 4685 /* Type 1 - LPAR OS */ 4686 vlcd->type = 1; 4687 len = strlen(os_name) + 1; 4688 vlcd->len = cpu_to_be16(len); 4689 strscpy(vlcd->name, os_name, len); 4690 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4691 4692 /* Type 2 - LPAR name */ 4693 vlcd->type = 2; 4694 len = strlen(utsname()->nodename) + 1; 4695 vlcd->len = cpu_to_be16(len); 4696 strscpy(vlcd->name, utsname()->nodename, len); 4697 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4698 4699 /* Type 3 - device name */ 4700 vlcd->type = 3; 4701 len = strlen(adapter->netdev->name) + 1; 4702 vlcd->len = cpu_to_be16(len); 4703 strscpy(vlcd->name, adapter->netdev->name, len); 4704 } 4705 4706 static int send_login(struct ibmvnic_adapter *adapter) 4707 { 4708 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 4709 struct ibmvnic_login_buffer *login_buffer; 4710 struct device *dev = &adapter->vdev->dev; 4711 struct vnic_login_client_data *vlcd; 4712 dma_addr_t rsp_buffer_token; 4713 dma_addr_t buffer_token; 4714 size_t rsp_buffer_size; 4715 union ibmvnic_crq crq; 4716 int client_data_len; 4717 size_t buffer_size; 4718 __be64 *tx_list_p; 4719 __be64 *rx_list_p; 4720 int rc; 4721 int i; 4722 4723 if (!adapter->tx_scrq || !adapter->rx_scrq) { 4724 netdev_err(adapter->netdev, 4725 "RX or TX queues are not allocated, device login failed\n"); 4726 return -ENOMEM; 4727 } 4728 4729 release_login_buffer(adapter); 4730 release_login_rsp_buffer(adapter); 4731 4732 client_data_len = vnic_client_data_len(adapter); 4733 4734 buffer_size = 4735 sizeof(struct ibmvnic_login_buffer) + 4736 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 4737 client_data_len; 4738 4739 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 4740 if (!login_buffer) 4741 goto buf_alloc_failed; 4742 4743 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 4744 DMA_TO_DEVICE); 4745 if (dma_mapping_error(dev, buffer_token)) { 4746 dev_err(dev, "Couldn't map login buffer\n"); 4747 goto buf_map_failed; 4748 } 4749 4750 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 4751 sizeof(u64) * adapter->req_tx_queues + 4752 sizeof(u64) * adapter->req_rx_queues + 4753 sizeof(u64) * adapter->req_rx_queues + 4754 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 4755 4756 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 4757 if (!login_rsp_buffer) 4758 goto buf_rsp_alloc_failed; 4759 4760 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 4761 rsp_buffer_size, DMA_FROM_DEVICE); 4762 if (dma_mapping_error(dev, rsp_buffer_token)) { 4763 dev_err(dev, "Couldn't map login rsp buffer\n"); 4764 goto buf_rsp_map_failed; 4765 } 4766 4767 adapter->login_buf = login_buffer; 4768 adapter->login_buf_token = buffer_token; 4769 adapter->login_buf_sz = buffer_size; 4770 adapter->login_rsp_buf = login_rsp_buffer; 4771 adapter->login_rsp_buf_token = rsp_buffer_token; 4772 adapter->login_rsp_buf_sz = rsp_buffer_size; 4773 4774 login_buffer->len = cpu_to_be32(buffer_size); 4775 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 4776 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 4777 login_buffer->off_txcomp_subcrqs = 4778 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 4779 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 4780 login_buffer->off_rxcomp_subcrqs = 4781 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 4782 sizeof(u64) * adapter->req_tx_queues); 4783 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 4784 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 4785 4786 tx_list_p = (__be64 *)((char *)login_buffer + 4787 sizeof(struct ibmvnic_login_buffer)); 4788 rx_list_p = (__be64 *)((char *)login_buffer + 4789 sizeof(struct ibmvnic_login_buffer) + 4790 sizeof(u64) * adapter->req_tx_queues); 4791 4792 for (i = 0; i < adapter->req_tx_queues; i++) { 4793 if (adapter->tx_scrq[i]) { 4794 tx_list_p[i] = 4795 cpu_to_be64(adapter->tx_scrq[i]->crq_num); 4796 } 4797 } 4798 4799 for (i = 0; i < adapter->req_rx_queues; i++) { 4800 if (adapter->rx_scrq[i]) { 4801 rx_list_p[i] = 4802 cpu_to_be64(adapter->rx_scrq[i]->crq_num); 4803 } 4804 } 4805 4806 /* Insert vNIC login client data */ 4807 vlcd = (struct vnic_login_client_data *) 4808 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 4809 login_buffer->client_data_offset = 4810 cpu_to_be32((char *)vlcd - (char *)login_buffer); 4811 login_buffer->client_data_len = cpu_to_be32(client_data_len); 4812 4813 vnic_add_client_data(adapter, vlcd); 4814 4815 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 4816 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 4817 netdev_dbg(adapter->netdev, "%016lx\n", 4818 ((unsigned long *)(adapter->login_buf))[i]); 4819 } 4820 4821 memset(&crq, 0, sizeof(crq)); 4822 crq.login.first = IBMVNIC_CRQ_CMD; 4823 crq.login.cmd = LOGIN; 4824 crq.login.ioba = cpu_to_be32(buffer_token); 4825 crq.login.len = cpu_to_be32(buffer_size); 4826 4827 adapter->login_pending = true; 4828 rc = ibmvnic_send_crq(adapter, &crq); 4829 if (rc) { 4830 adapter->login_pending = false; 4831 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); 4832 goto buf_rsp_map_failed; 4833 } 4834 4835 return 0; 4836 4837 buf_rsp_map_failed: 4838 kfree(login_rsp_buffer); 4839 adapter->login_rsp_buf = NULL; 4840 buf_rsp_alloc_failed: 4841 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 4842 buf_map_failed: 4843 kfree(login_buffer); 4844 adapter->login_buf = NULL; 4845 buf_alloc_failed: 4846 return -ENOMEM; 4847 } 4848 4849 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 4850 u32 len, u8 map_id) 4851 { 4852 union ibmvnic_crq crq; 4853 4854 memset(&crq, 0, sizeof(crq)); 4855 crq.request_map.first = IBMVNIC_CRQ_CMD; 4856 crq.request_map.cmd = REQUEST_MAP; 4857 crq.request_map.map_id = map_id; 4858 crq.request_map.ioba = cpu_to_be32(addr); 4859 crq.request_map.len = cpu_to_be32(len); 4860 return ibmvnic_send_crq(adapter, &crq); 4861 } 4862 4863 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 4864 { 4865 union ibmvnic_crq crq; 4866 4867 memset(&crq, 0, sizeof(crq)); 4868 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 4869 crq.request_unmap.cmd = REQUEST_UNMAP; 4870 crq.request_unmap.map_id = map_id; 4871 return ibmvnic_send_crq(adapter, &crq); 4872 } 4873 4874 static void send_query_map(struct ibmvnic_adapter *adapter) 4875 { 4876 union ibmvnic_crq crq; 4877 4878 memset(&crq, 0, sizeof(crq)); 4879 crq.query_map.first = IBMVNIC_CRQ_CMD; 4880 crq.query_map.cmd = QUERY_MAP; 4881 ibmvnic_send_crq(adapter, &crq); 4882 } 4883 4884 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 4885 static void send_query_cap(struct ibmvnic_adapter *adapter) 4886 { 4887 union ibmvnic_crq crq; 4888 int cap_reqs; 4889 4890 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count 4891 * upfront. When the tasklet receives a response to all of these, it 4892 * can send out the next protocol messaage (REQUEST_CAPABILITY). 4893 */ 4894 cap_reqs = 25; 4895 4896 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4897 4898 memset(&crq, 0, sizeof(crq)); 4899 crq.query_capability.first = IBMVNIC_CRQ_CMD; 4900 crq.query_capability.cmd = QUERY_CAPABILITY; 4901 4902 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 4903 ibmvnic_send_crq(adapter, &crq); 4904 cap_reqs--; 4905 4906 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 4907 ibmvnic_send_crq(adapter, &crq); 4908 cap_reqs--; 4909 4910 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 4911 ibmvnic_send_crq(adapter, &crq); 4912 cap_reqs--; 4913 4914 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 4915 ibmvnic_send_crq(adapter, &crq); 4916 cap_reqs--; 4917 4918 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 4919 ibmvnic_send_crq(adapter, &crq); 4920 cap_reqs--; 4921 4922 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 4923 ibmvnic_send_crq(adapter, &crq); 4924 cap_reqs--; 4925 4926 crq.query_capability.capability = 4927 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 4928 ibmvnic_send_crq(adapter, &crq); 4929 cap_reqs--; 4930 4931 crq.query_capability.capability = 4932 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 4933 ibmvnic_send_crq(adapter, &crq); 4934 cap_reqs--; 4935 4936 crq.query_capability.capability = 4937 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 4938 ibmvnic_send_crq(adapter, &crq); 4939 cap_reqs--; 4940 4941 crq.query_capability.capability = 4942 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 4943 ibmvnic_send_crq(adapter, &crq); 4944 cap_reqs--; 4945 4946 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 4947 ibmvnic_send_crq(adapter, &crq); 4948 cap_reqs--; 4949 4950 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 4951 ibmvnic_send_crq(adapter, &crq); 4952 cap_reqs--; 4953 4954 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 4955 ibmvnic_send_crq(adapter, &crq); 4956 cap_reqs--; 4957 4958 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 4959 ibmvnic_send_crq(adapter, &crq); 4960 cap_reqs--; 4961 4962 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 4963 ibmvnic_send_crq(adapter, &crq); 4964 cap_reqs--; 4965 4966 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 4967 ibmvnic_send_crq(adapter, &crq); 4968 cap_reqs--; 4969 4970 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 4971 ibmvnic_send_crq(adapter, &crq); 4972 cap_reqs--; 4973 4974 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 4975 ibmvnic_send_crq(adapter, &crq); 4976 cap_reqs--; 4977 4978 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 4979 ibmvnic_send_crq(adapter, &crq); 4980 cap_reqs--; 4981 4982 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 4983 ibmvnic_send_crq(adapter, &crq); 4984 cap_reqs--; 4985 4986 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 4987 ibmvnic_send_crq(adapter, &crq); 4988 cap_reqs--; 4989 4990 crq.query_capability.capability = 4991 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 4992 ibmvnic_send_crq(adapter, &crq); 4993 cap_reqs--; 4994 4995 crq.query_capability.capability = 4996 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 4997 ibmvnic_send_crq(adapter, &crq); 4998 cap_reqs--; 4999 5000 crq.query_capability.capability = 5001 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 5002 ibmvnic_send_crq(adapter, &crq); 5003 cap_reqs--; 5004 5005 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 5006 5007 ibmvnic_send_crq(adapter, &crq); 5008 cap_reqs--; 5009 5010 /* Keep at end to catch any discrepancy between expected and actual 5011 * CRQs sent. 5012 */ 5013 WARN_ON(cap_reqs != 0); 5014 } 5015 5016 static void send_query_ip_offload(struct ibmvnic_adapter *adapter) 5017 { 5018 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 5019 struct device *dev = &adapter->vdev->dev; 5020 union ibmvnic_crq crq; 5021 5022 adapter->ip_offload_tok = 5023 dma_map_single(dev, 5024 &adapter->ip_offload_buf, 5025 buf_sz, 5026 DMA_FROM_DEVICE); 5027 5028 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 5029 if (!firmware_has_feature(FW_FEATURE_CMO)) 5030 dev_err(dev, "Couldn't map offload buffer\n"); 5031 return; 5032 } 5033 5034 memset(&crq, 0, sizeof(crq)); 5035 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 5036 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 5037 crq.query_ip_offload.len = cpu_to_be32(buf_sz); 5038 crq.query_ip_offload.ioba = 5039 cpu_to_be32(adapter->ip_offload_tok); 5040 5041 ibmvnic_send_crq(adapter, &crq); 5042 } 5043 5044 static void send_control_ip_offload(struct ibmvnic_adapter *adapter) 5045 { 5046 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; 5047 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 5048 struct device *dev = &adapter->vdev->dev; 5049 netdev_features_t old_hw_features = 0; 5050 union ibmvnic_crq crq; 5051 5052 adapter->ip_offload_ctrl_tok = 5053 dma_map_single(dev, 5054 ctrl_buf, 5055 sizeof(adapter->ip_offload_ctrl), 5056 DMA_TO_DEVICE); 5057 5058 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 5059 dev_err(dev, "Couldn't map ip offload control buffer\n"); 5060 return; 5061 } 5062 5063 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 5064 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); 5065 ctrl_buf->ipv4_chksum = buf->ipv4_chksum; 5066 ctrl_buf->ipv6_chksum = buf->ipv6_chksum; 5067 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 5068 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; 5069 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 5070 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; 5071 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; 5072 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; 5073 5074 /* large_rx disabled for now, additional features needed */ 5075 ctrl_buf->large_rx_ipv4 = 0; 5076 ctrl_buf->large_rx_ipv6 = 0; 5077 5078 if (adapter->state != VNIC_PROBING) { 5079 old_hw_features = adapter->netdev->hw_features; 5080 adapter->netdev->hw_features = 0; 5081 } 5082 5083 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 5084 5085 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 5086 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; 5087 5088 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 5089 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; 5090 5091 if ((adapter->netdev->features & 5092 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 5093 adapter->netdev->hw_features |= NETIF_F_RXCSUM; 5094 5095 if (buf->large_tx_ipv4) 5096 adapter->netdev->hw_features |= NETIF_F_TSO; 5097 if (buf->large_tx_ipv6) 5098 adapter->netdev->hw_features |= NETIF_F_TSO6; 5099 5100 if (adapter->state == VNIC_PROBING) { 5101 adapter->netdev->features |= adapter->netdev->hw_features; 5102 } else if (old_hw_features != adapter->netdev->hw_features) { 5103 netdev_features_t tmp = 0; 5104 5105 /* disable features no longer supported */ 5106 adapter->netdev->features &= adapter->netdev->hw_features; 5107 /* turn on features now supported if previously enabled */ 5108 tmp = (old_hw_features ^ adapter->netdev->hw_features) & 5109 adapter->netdev->hw_features; 5110 adapter->netdev->features |= 5111 tmp & adapter->netdev->wanted_features; 5112 } 5113 5114 memset(&crq, 0, sizeof(crq)); 5115 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 5116 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 5117 crq.control_ip_offload.len = 5118 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 5119 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 5120 ibmvnic_send_crq(adapter, &crq); 5121 } 5122 5123 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 5124 struct ibmvnic_adapter *adapter) 5125 { 5126 struct device *dev = &adapter->vdev->dev; 5127 5128 if (crq->get_vpd_size_rsp.rc.code) { 5129 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 5130 crq->get_vpd_size_rsp.rc.code); 5131 complete(&adapter->fw_done); 5132 return; 5133 } 5134 5135 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 5136 complete(&adapter->fw_done); 5137 } 5138 5139 static void handle_vpd_rsp(union ibmvnic_crq *crq, 5140 struct ibmvnic_adapter *adapter) 5141 { 5142 struct device *dev = &adapter->vdev->dev; 5143 unsigned char *substr = NULL; 5144 u8 fw_level_len = 0; 5145 5146 memset(adapter->fw_version, 0, 32); 5147 5148 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 5149 DMA_FROM_DEVICE); 5150 5151 if (crq->get_vpd_rsp.rc.code) { 5152 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 5153 crq->get_vpd_rsp.rc.code); 5154 goto complete; 5155 } 5156 5157 /* get the position of the firmware version info 5158 * located after the ASCII 'RM' substring in the buffer 5159 */ 5160 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 5161 if (!substr) { 5162 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 5163 goto complete; 5164 } 5165 5166 /* get length of firmware level ASCII substring */ 5167 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 5168 fw_level_len = *(substr + 2); 5169 } else { 5170 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 5171 goto complete; 5172 } 5173 5174 /* copy firmware version string from vpd into adapter */ 5175 if ((substr + 3 + fw_level_len) < 5176 (adapter->vpd->buff + adapter->vpd->len)) { 5177 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 5178 } else { 5179 dev_info(dev, "FW substr extrapolated VPD buff\n"); 5180 } 5181 5182 complete: 5183 if (adapter->fw_version[0] == '\0') 5184 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); 5185 complete(&adapter->fw_done); 5186 } 5187 5188 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 5189 { 5190 struct device *dev = &adapter->vdev->dev; 5191 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 5192 int i; 5193 5194 dma_unmap_single(dev, adapter->ip_offload_tok, 5195 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 5196 5197 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 5198 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 5199 netdev_dbg(adapter->netdev, "%016lx\n", 5200 ((unsigned long *)(buf))[i]); 5201 5202 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 5203 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 5204 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 5205 buf->tcp_ipv4_chksum); 5206 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 5207 buf->tcp_ipv6_chksum); 5208 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 5209 buf->udp_ipv4_chksum); 5210 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 5211 buf->udp_ipv6_chksum); 5212 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 5213 buf->large_tx_ipv4); 5214 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 5215 buf->large_tx_ipv6); 5216 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 5217 buf->large_rx_ipv4); 5218 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 5219 buf->large_rx_ipv6); 5220 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 5221 buf->max_ipv4_header_size); 5222 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 5223 buf->max_ipv6_header_size); 5224 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 5225 buf->max_tcp_header_size); 5226 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 5227 buf->max_udp_header_size); 5228 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 5229 buf->max_large_tx_size); 5230 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 5231 buf->max_large_rx_size); 5232 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 5233 buf->ipv6_extension_header); 5234 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 5235 buf->tcp_pseudosum_req); 5236 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 5237 buf->num_ipv6_ext_headers); 5238 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 5239 buf->off_ipv6_ext_headers); 5240 5241 send_control_ip_offload(adapter); 5242 } 5243 5244 static const char *ibmvnic_fw_err_cause(u16 cause) 5245 { 5246 switch (cause) { 5247 case ADAPTER_PROBLEM: 5248 return "adapter problem"; 5249 case BUS_PROBLEM: 5250 return "bus problem"; 5251 case FW_PROBLEM: 5252 return "firmware problem"; 5253 case DD_PROBLEM: 5254 return "device driver problem"; 5255 case EEH_RECOVERY: 5256 return "EEH recovery"; 5257 case FW_UPDATED: 5258 return "firmware updated"; 5259 case LOW_MEMORY: 5260 return "low Memory"; 5261 default: 5262 return "unknown"; 5263 } 5264 } 5265 5266 static void handle_error_indication(union ibmvnic_crq *crq, 5267 struct ibmvnic_adapter *adapter) 5268 { 5269 struct device *dev = &adapter->vdev->dev; 5270 u16 cause; 5271 5272 cause = be16_to_cpu(crq->error_indication.error_cause); 5273 5274 dev_warn_ratelimited(dev, 5275 "Firmware reports %serror, cause: %s. Starting recovery...\n", 5276 crq->error_indication.flags 5277 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 5278 ibmvnic_fw_err_cause(cause)); 5279 5280 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 5281 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5282 else 5283 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 5284 } 5285 5286 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 5287 struct ibmvnic_adapter *adapter) 5288 { 5289 struct net_device *netdev = adapter->netdev; 5290 struct device *dev = &adapter->vdev->dev; 5291 long rc; 5292 5293 rc = crq->change_mac_addr_rsp.rc.code; 5294 if (rc) { 5295 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 5296 goto out; 5297 } 5298 /* crq->change_mac_addr.mac_addr is the requested one 5299 * crq->change_mac_addr_rsp.mac_addr is the returned valid one. 5300 */ 5301 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); 5302 ether_addr_copy(adapter->mac_addr, 5303 &crq->change_mac_addr_rsp.mac_addr[0]); 5304 out: 5305 complete(&adapter->fw_done); 5306 return rc; 5307 } 5308 5309 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 5310 struct ibmvnic_adapter *adapter) 5311 { 5312 struct device *dev = &adapter->vdev->dev; 5313 u64 *req_value; 5314 char *name; 5315 5316 atomic_dec(&adapter->running_cap_crqs); 5317 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", 5318 atomic_read(&adapter->running_cap_crqs)); 5319 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 5320 case REQ_TX_QUEUES: 5321 req_value = &adapter->req_tx_queues; 5322 name = "tx"; 5323 break; 5324 case REQ_RX_QUEUES: 5325 req_value = &adapter->req_rx_queues; 5326 name = "rx"; 5327 break; 5328 case REQ_RX_ADD_QUEUES: 5329 req_value = &adapter->req_rx_add_queues; 5330 name = "rx_add"; 5331 break; 5332 case REQ_TX_ENTRIES_PER_SUBCRQ: 5333 req_value = &adapter->req_tx_entries_per_subcrq; 5334 name = "tx_entries_per_subcrq"; 5335 break; 5336 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 5337 req_value = &adapter->req_rx_add_entries_per_subcrq; 5338 name = "rx_add_entries_per_subcrq"; 5339 break; 5340 case REQ_MTU: 5341 req_value = &adapter->req_mtu; 5342 name = "mtu"; 5343 break; 5344 case PROMISC_REQUESTED: 5345 req_value = &adapter->promisc; 5346 name = "promisc"; 5347 break; 5348 default: 5349 dev_err(dev, "Got invalid cap request rsp %d\n", 5350 crq->request_capability.capability); 5351 return; 5352 } 5353 5354 switch (crq->request_capability_rsp.rc.code) { 5355 case SUCCESS: 5356 break; 5357 case PARTIALSUCCESS: 5358 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 5359 *req_value, 5360 (long)be64_to_cpu(crq->request_capability_rsp.number), 5361 name); 5362 5363 if (be16_to_cpu(crq->request_capability_rsp.capability) == 5364 REQ_MTU) { 5365 pr_err("mtu of %llu is not supported. Reverting.\n", 5366 *req_value); 5367 *req_value = adapter->fallback.mtu; 5368 } else { 5369 *req_value = 5370 be64_to_cpu(crq->request_capability_rsp.number); 5371 } 5372 5373 send_request_cap(adapter, 1); 5374 return; 5375 default: 5376 dev_err(dev, "Error %d in request cap rsp\n", 5377 crq->request_capability_rsp.rc.code); 5378 return; 5379 } 5380 5381 /* Done receiving requested capabilities, query IP offload support */ 5382 if (atomic_read(&adapter->running_cap_crqs) == 0) 5383 send_query_ip_offload(adapter); 5384 } 5385 5386 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 5387 struct ibmvnic_adapter *adapter) 5388 { 5389 struct device *dev = &adapter->vdev->dev; 5390 struct net_device *netdev = adapter->netdev; 5391 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 5392 struct ibmvnic_login_buffer *login = adapter->login_buf; 5393 u64 *tx_handle_array; 5394 u64 *rx_handle_array; 5395 int num_tx_pools; 5396 int num_rx_pools; 5397 u64 *size_array; 5398 int i; 5399 5400 /* CHECK: Test/set of login_pending does not need to be atomic 5401 * because only ibmvnic_tasklet tests/clears this. 5402 */ 5403 if (!adapter->login_pending) { 5404 netdev_warn(netdev, "Ignoring unexpected login response\n"); 5405 return 0; 5406 } 5407 adapter->login_pending = false; 5408 5409 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 5410 DMA_TO_DEVICE); 5411 dma_unmap_single(dev, adapter->login_rsp_buf_token, 5412 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 5413 5414 /* If the number of queues requested can't be allocated by the 5415 * server, the login response will return with code 1. We will need 5416 * to resend the login buffer with fewer queues requested. 5417 */ 5418 if (login_rsp_crq->generic.rc.code) { 5419 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 5420 complete(&adapter->init_done); 5421 return 0; 5422 } 5423 5424 if (adapter->failover_pending) { 5425 adapter->init_done_rc = -EAGAIN; 5426 netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 5427 complete(&adapter->init_done); 5428 /* login response buffer will be released on reset */ 5429 return 0; 5430 } 5431 5432 netdev->mtu = adapter->req_mtu - ETH_HLEN; 5433 5434 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 5435 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 5436 netdev_dbg(adapter->netdev, "%016lx\n", 5437 ((unsigned long *)(adapter->login_rsp_buf))[i]); 5438 } 5439 5440 /* Sanity checks */ 5441 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 5442 (be32_to_cpu(login->num_rxcomp_subcrqs) * 5443 adapter->req_rx_add_queues != 5444 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 5445 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 5446 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5447 return -EIO; 5448 } 5449 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5450 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 5451 /* variable buffer sizes are not supported, so just read the 5452 * first entry. 5453 */ 5454 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); 5455 5456 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 5457 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 5458 5459 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5460 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 5461 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5462 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); 5463 5464 for (i = 0; i < num_tx_pools; i++) 5465 adapter->tx_scrq[i]->handle = tx_handle_array[i]; 5466 5467 for (i = 0; i < num_rx_pools; i++) 5468 adapter->rx_scrq[i]->handle = rx_handle_array[i]; 5469 5470 adapter->num_active_tx_scrqs = num_tx_pools; 5471 adapter->num_active_rx_scrqs = num_rx_pools; 5472 release_login_rsp_buffer(adapter); 5473 release_login_buffer(adapter); 5474 complete(&adapter->init_done); 5475 5476 return 0; 5477 } 5478 5479 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 5480 struct ibmvnic_adapter *adapter) 5481 { 5482 struct device *dev = &adapter->vdev->dev; 5483 long rc; 5484 5485 rc = crq->request_unmap_rsp.rc.code; 5486 if (rc) 5487 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 5488 } 5489 5490 static void handle_query_map_rsp(union ibmvnic_crq *crq, 5491 struct ibmvnic_adapter *adapter) 5492 { 5493 struct net_device *netdev = adapter->netdev; 5494 struct device *dev = &adapter->vdev->dev; 5495 long rc; 5496 5497 rc = crq->query_map_rsp.rc.code; 5498 if (rc) { 5499 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 5500 return; 5501 } 5502 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", 5503 crq->query_map_rsp.page_size, 5504 __be32_to_cpu(crq->query_map_rsp.tot_pages), 5505 __be32_to_cpu(crq->query_map_rsp.free_pages)); 5506 } 5507 5508 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 5509 struct ibmvnic_adapter *adapter) 5510 { 5511 struct net_device *netdev = adapter->netdev; 5512 struct device *dev = &adapter->vdev->dev; 5513 long rc; 5514 5515 atomic_dec(&adapter->running_cap_crqs); 5516 netdev_dbg(netdev, "Outstanding queries: %d\n", 5517 atomic_read(&adapter->running_cap_crqs)); 5518 rc = crq->query_capability.rc.code; 5519 if (rc) { 5520 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 5521 goto out; 5522 } 5523 5524 switch (be16_to_cpu(crq->query_capability.capability)) { 5525 case MIN_TX_QUEUES: 5526 adapter->min_tx_queues = 5527 be64_to_cpu(crq->query_capability.number); 5528 netdev_dbg(netdev, "min_tx_queues = %lld\n", 5529 adapter->min_tx_queues); 5530 break; 5531 case MIN_RX_QUEUES: 5532 adapter->min_rx_queues = 5533 be64_to_cpu(crq->query_capability.number); 5534 netdev_dbg(netdev, "min_rx_queues = %lld\n", 5535 adapter->min_rx_queues); 5536 break; 5537 case MIN_RX_ADD_QUEUES: 5538 adapter->min_rx_add_queues = 5539 be64_to_cpu(crq->query_capability.number); 5540 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 5541 adapter->min_rx_add_queues); 5542 break; 5543 case MAX_TX_QUEUES: 5544 adapter->max_tx_queues = 5545 be64_to_cpu(crq->query_capability.number); 5546 netdev_dbg(netdev, "max_tx_queues = %lld\n", 5547 adapter->max_tx_queues); 5548 break; 5549 case MAX_RX_QUEUES: 5550 adapter->max_rx_queues = 5551 be64_to_cpu(crq->query_capability.number); 5552 netdev_dbg(netdev, "max_rx_queues = %lld\n", 5553 adapter->max_rx_queues); 5554 break; 5555 case MAX_RX_ADD_QUEUES: 5556 adapter->max_rx_add_queues = 5557 be64_to_cpu(crq->query_capability.number); 5558 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 5559 adapter->max_rx_add_queues); 5560 break; 5561 case MIN_TX_ENTRIES_PER_SUBCRQ: 5562 adapter->min_tx_entries_per_subcrq = 5563 be64_to_cpu(crq->query_capability.number); 5564 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 5565 adapter->min_tx_entries_per_subcrq); 5566 break; 5567 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 5568 adapter->min_rx_add_entries_per_subcrq = 5569 be64_to_cpu(crq->query_capability.number); 5570 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 5571 adapter->min_rx_add_entries_per_subcrq); 5572 break; 5573 case MAX_TX_ENTRIES_PER_SUBCRQ: 5574 adapter->max_tx_entries_per_subcrq = 5575 be64_to_cpu(crq->query_capability.number); 5576 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 5577 adapter->max_tx_entries_per_subcrq); 5578 break; 5579 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 5580 adapter->max_rx_add_entries_per_subcrq = 5581 be64_to_cpu(crq->query_capability.number); 5582 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 5583 adapter->max_rx_add_entries_per_subcrq); 5584 break; 5585 case TCP_IP_OFFLOAD: 5586 adapter->tcp_ip_offload = 5587 be64_to_cpu(crq->query_capability.number); 5588 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 5589 adapter->tcp_ip_offload); 5590 break; 5591 case PROMISC_SUPPORTED: 5592 adapter->promisc_supported = 5593 be64_to_cpu(crq->query_capability.number); 5594 netdev_dbg(netdev, "promisc_supported = %lld\n", 5595 adapter->promisc_supported); 5596 break; 5597 case MIN_MTU: 5598 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 5599 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 5600 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 5601 break; 5602 case MAX_MTU: 5603 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 5604 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 5605 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 5606 break; 5607 case MAX_MULTICAST_FILTERS: 5608 adapter->max_multicast_filters = 5609 be64_to_cpu(crq->query_capability.number); 5610 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 5611 adapter->max_multicast_filters); 5612 break; 5613 case VLAN_HEADER_INSERTION: 5614 adapter->vlan_header_insertion = 5615 be64_to_cpu(crq->query_capability.number); 5616 if (adapter->vlan_header_insertion) 5617 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 5618 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 5619 adapter->vlan_header_insertion); 5620 break; 5621 case RX_VLAN_HEADER_INSERTION: 5622 adapter->rx_vlan_header_insertion = 5623 be64_to_cpu(crq->query_capability.number); 5624 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 5625 adapter->rx_vlan_header_insertion); 5626 break; 5627 case MAX_TX_SG_ENTRIES: 5628 adapter->max_tx_sg_entries = 5629 be64_to_cpu(crq->query_capability.number); 5630 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 5631 adapter->max_tx_sg_entries); 5632 break; 5633 case RX_SG_SUPPORTED: 5634 adapter->rx_sg_supported = 5635 be64_to_cpu(crq->query_capability.number); 5636 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 5637 adapter->rx_sg_supported); 5638 break; 5639 case OPT_TX_COMP_SUB_QUEUES: 5640 adapter->opt_tx_comp_sub_queues = 5641 be64_to_cpu(crq->query_capability.number); 5642 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 5643 adapter->opt_tx_comp_sub_queues); 5644 break; 5645 case OPT_RX_COMP_QUEUES: 5646 adapter->opt_rx_comp_queues = 5647 be64_to_cpu(crq->query_capability.number); 5648 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 5649 adapter->opt_rx_comp_queues); 5650 break; 5651 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 5652 adapter->opt_rx_bufadd_q_per_rx_comp_q = 5653 be64_to_cpu(crq->query_capability.number); 5654 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 5655 adapter->opt_rx_bufadd_q_per_rx_comp_q); 5656 break; 5657 case OPT_TX_ENTRIES_PER_SUBCRQ: 5658 adapter->opt_tx_entries_per_subcrq = 5659 be64_to_cpu(crq->query_capability.number); 5660 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 5661 adapter->opt_tx_entries_per_subcrq); 5662 break; 5663 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 5664 adapter->opt_rxba_entries_per_subcrq = 5665 be64_to_cpu(crq->query_capability.number); 5666 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 5667 adapter->opt_rxba_entries_per_subcrq); 5668 break; 5669 case TX_RX_DESC_REQ: 5670 adapter->tx_rx_desc_req = crq->query_capability.number; 5671 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 5672 adapter->tx_rx_desc_req); 5673 break; 5674 5675 default: 5676 netdev_err(netdev, "Got invalid cap rsp %d\n", 5677 crq->query_capability.capability); 5678 } 5679 5680 out: 5681 if (atomic_read(&adapter->running_cap_crqs) == 0) 5682 send_request_cap(adapter, 0); 5683 } 5684 5685 static int send_query_phys_parms(struct ibmvnic_adapter *adapter) 5686 { 5687 union ibmvnic_crq crq; 5688 int rc; 5689 5690 memset(&crq, 0, sizeof(crq)); 5691 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; 5692 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; 5693 5694 mutex_lock(&adapter->fw_lock); 5695 adapter->fw_done_rc = 0; 5696 reinit_completion(&adapter->fw_done); 5697 5698 rc = ibmvnic_send_crq(adapter, &crq); 5699 if (rc) { 5700 mutex_unlock(&adapter->fw_lock); 5701 return rc; 5702 } 5703 5704 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 5705 if (rc) { 5706 mutex_unlock(&adapter->fw_lock); 5707 return rc; 5708 } 5709 5710 mutex_unlock(&adapter->fw_lock); 5711 return adapter->fw_done_rc ? -EIO : 0; 5712 } 5713 5714 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, 5715 struct ibmvnic_adapter *adapter) 5716 { 5717 struct net_device *netdev = adapter->netdev; 5718 int rc; 5719 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); 5720 5721 rc = crq->query_phys_parms_rsp.rc.code; 5722 if (rc) { 5723 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); 5724 return rc; 5725 } 5726 switch (rspeed) { 5727 case IBMVNIC_10MBPS: 5728 adapter->speed = SPEED_10; 5729 break; 5730 case IBMVNIC_100MBPS: 5731 adapter->speed = SPEED_100; 5732 break; 5733 case IBMVNIC_1GBPS: 5734 adapter->speed = SPEED_1000; 5735 break; 5736 case IBMVNIC_10GBPS: 5737 adapter->speed = SPEED_10000; 5738 break; 5739 case IBMVNIC_25GBPS: 5740 adapter->speed = SPEED_25000; 5741 break; 5742 case IBMVNIC_40GBPS: 5743 adapter->speed = SPEED_40000; 5744 break; 5745 case IBMVNIC_50GBPS: 5746 adapter->speed = SPEED_50000; 5747 break; 5748 case IBMVNIC_100GBPS: 5749 adapter->speed = SPEED_100000; 5750 break; 5751 case IBMVNIC_200GBPS: 5752 adapter->speed = SPEED_200000; 5753 break; 5754 default: 5755 if (netif_carrier_ok(netdev)) 5756 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); 5757 adapter->speed = SPEED_UNKNOWN; 5758 } 5759 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) 5760 adapter->duplex = DUPLEX_FULL; 5761 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) 5762 adapter->duplex = DUPLEX_HALF; 5763 else 5764 adapter->duplex = DUPLEX_UNKNOWN; 5765 5766 return rc; 5767 } 5768 5769 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 5770 struct ibmvnic_adapter *adapter) 5771 { 5772 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 5773 struct net_device *netdev = adapter->netdev; 5774 struct device *dev = &adapter->vdev->dev; 5775 u64 *u64_crq = (u64 *)crq; 5776 long rc; 5777 5778 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 5779 (unsigned long)cpu_to_be64(u64_crq[0]), 5780 (unsigned long)cpu_to_be64(u64_crq[1])); 5781 switch (gen_crq->first) { 5782 case IBMVNIC_CRQ_INIT_RSP: 5783 switch (gen_crq->cmd) { 5784 case IBMVNIC_CRQ_INIT: 5785 dev_info(dev, "Partner initialized\n"); 5786 adapter->from_passive_init = true; 5787 /* Discard any stale login responses from prev reset. 5788 * CHECK: should we clear even on INIT_COMPLETE? 5789 */ 5790 adapter->login_pending = false; 5791 5792 if (adapter->state == VNIC_DOWN) 5793 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); 5794 else 5795 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 5796 5797 if (rc && rc != -EBUSY) { 5798 /* We were unable to schedule the failover 5799 * reset either because the adapter was still 5800 * probing (eg: during kexec) or we could not 5801 * allocate memory. Clear the failover_pending 5802 * flag since no one else will. We ignore 5803 * EBUSY because it means either FAILOVER reset 5804 * is already scheduled or the adapter is 5805 * being removed. 5806 */ 5807 netdev_err(netdev, 5808 "Error %ld scheduling failover reset\n", 5809 rc); 5810 adapter->failover_pending = false; 5811 } 5812 5813 if (!completion_done(&adapter->init_done)) { 5814 if (!adapter->init_done_rc) 5815 adapter->init_done_rc = -EAGAIN; 5816 complete(&adapter->init_done); 5817 } 5818 5819 break; 5820 case IBMVNIC_CRQ_INIT_COMPLETE: 5821 dev_info(dev, "Partner initialization complete\n"); 5822 adapter->crq.active = true; 5823 send_version_xchg(adapter); 5824 break; 5825 default: 5826 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 5827 } 5828 return; 5829 case IBMVNIC_CRQ_XPORT_EVENT: 5830 netif_carrier_off(netdev); 5831 adapter->crq.active = false; 5832 /* terminate any thread waiting for a response 5833 * from the device 5834 */ 5835 if (!completion_done(&adapter->fw_done)) { 5836 adapter->fw_done_rc = -EIO; 5837 complete(&adapter->fw_done); 5838 } 5839 5840 /* if we got here during crq-init, retry crq-init */ 5841 if (!completion_done(&adapter->init_done)) { 5842 adapter->init_done_rc = -EAGAIN; 5843 complete(&adapter->init_done); 5844 } 5845 5846 if (!completion_done(&adapter->stats_done)) 5847 complete(&adapter->stats_done); 5848 if (test_bit(0, &adapter->resetting)) 5849 adapter->force_reset_recovery = true; 5850 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 5851 dev_info(dev, "Migrated, re-enabling adapter\n"); 5852 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 5853 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 5854 dev_info(dev, "Backing device failover detected\n"); 5855 adapter->failover_pending = true; 5856 } else { 5857 /* The adapter lost the connection */ 5858 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 5859 gen_crq->cmd); 5860 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5861 } 5862 return; 5863 case IBMVNIC_CRQ_CMD_RSP: 5864 break; 5865 default: 5866 dev_err(dev, "Got an invalid msg type 0x%02x\n", 5867 gen_crq->first); 5868 return; 5869 } 5870 5871 switch (gen_crq->cmd) { 5872 case VERSION_EXCHANGE_RSP: 5873 rc = crq->version_exchange_rsp.rc.code; 5874 if (rc) { 5875 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 5876 break; 5877 } 5878 ibmvnic_version = 5879 be16_to_cpu(crq->version_exchange_rsp.version); 5880 dev_info(dev, "Partner protocol version is %d\n", 5881 ibmvnic_version); 5882 send_query_cap(adapter); 5883 break; 5884 case QUERY_CAPABILITY_RSP: 5885 handle_query_cap_rsp(crq, adapter); 5886 break; 5887 case QUERY_MAP_RSP: 5888 handle_query_map_rsp(crq, adapter); 5889 break; 5890 case REQUEST_MAP_RSP: 5891 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 5892 complete(&adapter->fw_done); 5893 break; 5894 case REQUEST_UNMAP_RSP: 5895 handle_request_unmap_rsp(crq, adapter); 5896 break; 5897 case REQUEST_CAPABILITY_RSP: 5898 handle_request_cap_rsp(crq, adapter); 5899 break; 5900 case LOGIN_RSP: 5901 netdev_dbg(netdev, "Got Login Response\n"); 5902 handle_login_rsp(crq, adapter); 5903 break; 5904 case LOGICAL_LINK_STATE_RSP: 5905 netdev_dbg(netdev, 5906 "Got Logical Link State Response, state: %d rc: %d\n", 5907 crq->logical_link_state_rsp.link_state, 5908 crq->logical_link_state_rsp.rc.code); 5909 adapter->logical_link_state = 5910 crq->logical_link_state_rsp.link_state; 5911 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 5912 complete(&adapter->init_done); 5913 break; 5914 case LINK_STATE_INDICATION: 5915 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 5916 adapter->phys_link_state = 5917 crq->link_state_indication.phys_link_state; 5918 adapter->logical_link_state = 5919 crq->link_state_indication.logical_link_state; 5920 if (adapter->phys_link_state && adapter->logical_link_state) 5921 netif_carrier_on(netdev); 5922 else 5923 netif_carrier_off(netdev); 5924 break; 5925 case CHANGE_MAC_ADDR_RSP: 5926 netdev_dbg(netdev, "Got MAC address change Response\n"); 5927 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 5928 break; 5929 case ERROR_INDICATION: 5930 netdev_dbg(netdev, "Got Error Indication\n"); 5931 handle_error_indication(crq, adapter); 5932 break; 5933 case REQUEST_STATISTICS_RSP: 5934 netdev_dbg(netdev, "Got Statistics Response\n"); 5935 complete(&adapter->stats_done); 5936 break; 5937 case QUERY_IP_OFFLOAD_RSP: 5938 netdev_dbg(netdev, "Got Query IP offload Response\n"); 5939 handle_query_ip_offload_rsp(adapter); 5940 break; 5941 case MULTICAST_CTRL_RSP: 5942 netdev_dbg(netdev, "Got multicast control Response\n"); 5943 break; 5944 case CONTROL_IP_OFFLOAD_RSP: 5945 netdev_dbg(netdev, "Got Control IP offload Response\n"); 5946 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 5947 sizeof(adapter->ip_offload_ctrl), 5948 DMA_TO_DEVICE); 5949 complete(&adapter->init_done); 5950 break; 5951 case COLLECT_FW_TRACE_RSP: 5952 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 5953 complete(&adapter->fw_done); 5954 break; 5955 case GET_VPD_SIZE_RSP: 5956 handle_vpd_size_rsp(crq, adapter); 5957 break; 5958 case GET_VPD_RSP: 5959 handle_vpd_rsp(crq, adapter); 5960 break; 5961 case QUERY_PHYS_PARMS_RSP: 5962 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); 5963 complete(&adapter->fw_done); 5964 break; 5965 default: 5966 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 5967 gen_crq->cmd); 5968 } 5969 } 5970 5971 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 5972 { 5973 struct ibmvnic_adapter *adapter = instance; 5974 5975 tasklet_schedule(&adapter->tasklet); 5976 return IRQ_HANDLED; 5977 } 5978 5979 static void ibmvnic_tasklet(struct tasklet_struct *t) 5980 { 5981 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); 5982 struct ibmvnic_crq_queue *queue = &adapter->crq; 5983 union ibmvnic_crq *crq; 5984 unsigned long flags; 5985 5986 spin_lock_irqsave(&queue->lock, flags); 5987 5988 /* Pull all the valid messages off the CRQ */ 5989 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 5990 /* This barrier makes sure ibmvnic_next_crq()'s 5991 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded 5992 * before ibmvnic_handle_crq()'s 5993 * switch(gen_crq->first) and switch(gen_crq->cmd). 5994 */ 5995 dma_rmb(); 5996 ibmvnic_handle_crq(crq, adapter); 5997 crq->generic.first = 0; 5998 } 5999 6000 spin_unlock_irqrestore(&queue->lock, flags); 6001 } 6002 6003 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 6004 { 6005 struct vio_dev *vdev = adapter->vdev; 6006 int rc; 6007 6008 do { 6009 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 6010 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6011 6012 if (rc) 6013 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 6014 6015 return rc; 6016 } 6017 6018 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 6019 { 6020 struct ibmvnic_crq_queue *crq = &adapter->crq; 6021 struct device *dev = &adapter->vdev->dev; 6022 struct vio_dev *vdev = adapter->vdev; 6023 int rc; 6024 6025 /* Close the CRQ */ 6026 do { 6027 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6028 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6029 6030 /* Clean out the queue */ 6031 if (!crq->msgs) 6032 return -EINVAL; 6033 6034 memset(crq->msgs, 0, PAGE_SIZE); 6035 crq->cur = 0; 6036 crq->active = false; 6037 6038 /* And re-open it again */ 6039 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 6040 crq->msg_token, PAGE_SIZE); 6041 6042 if (rc == H_CLOSED) 6043 /* Adapter is good, but other end is not ready */ 6044 dev_warn(dev, "Partner adapter not ready\n"); 6045 else if (rc != 0) 6046 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 6047 6048 return rc; 6049 } 6050 6051 static void release_crq_queue(struct ibmvnic_adapter *adapter) 6052 { 6053 struct ibmvnic_crq_queue *crq = &adapter->crq; 6054 struct vio_dev *vdev = adapter->vdev; 6055 long rc; 6056 6057 if (!crq->msgs) 6058 return; 6059 6060 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 6061 free_irq(vdev->irq, adapter); 6062 tasklet_kill(&adapter->tasklet); 6063 do { 6064 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6065 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6066 6067 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 6068 DMA_BIDIRECTIONAL); 6069 free_page((unsigned long)crq->msgs); 6070 crq->msgs = NULL; 6071 crq->active = false; 6072 } 6073 6074 static int init_crq_queue(struct ibmvnic_adapter *adapter) 6075 { 6076 struct ibmvnic_crq_queue *crq = &adapter->crq; 6077 struct device *dev = &adapter->vdev->dev; 6078 struct vio_dev *vdev = adapter->vdev; 6079 int rc, retrc = -ENOMEM; 6080 6081 if (crq->msgs) 6082 return 0; 6083 6084 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 6085 /* Should we allocate more than one page? */ 6086 6087 if (!crq->msgs) 6088 return -ENOMEM; 6089 6090 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 6091 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 6092 DMA_BIDIRECTIONAL); 6093 if (dma_mapping_error(dev, crq->msg_token)) 6094 goto map_failed; 6095 6096 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 6097 crq->msg_token, PAGE_SIZE); 6098 6099 if (rc == H_RESOURCE) 6100 /* maybe kexecing and resource is busy. try a reset */ 6101 rc = ibmvnic_reset_crq(adapter); 6102 retrc = rc; 6103 6104 if (rc == H_CLOSED) { 6105 dev_warn(dev, "Partner adapter not ready\n"); 6106 } else if (rc) { 6107 dev_warn(dev, "Error %d opening adapter\n", rc); 6108 goto reg_crq_failed; 6109 } 6110 6111 retrc = 0; 6112 6113 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); 6114 6115 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 6116 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", 6117 adapter->vdev->unit_address); 6118 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); 6119 if (rc) { 6120 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 6121 vdev->irq, rc); 6122 goto req_irq_failed; 6123 } 6124 6125 rc = vio_enable_interrupts(vdev); 6126 if (rc) { 6127 dev_err(dev, "Error %d enabling interrupts\n", rc); 6128 goto req_irq_failed; 6129 } 6130 6131 crq->cur = 0; 6132 spin_lock_init(&crq->lock); 6133 6134 /* process any CRQs that were queued before we enabled interrupts */ 6135 tasklet_schedule(&adapter->tasklet); 6136 6137 return retrc; 6138 6139 req_irq_failed: 6140 tasklet_kill(&adapter->tasklet); 6141 do { 6142 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6143 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6144 reg_crq_failed: 6145 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 6146 map_failed: 6147 free_page((unsigned long)crq->msgs); 6148 crq->msgs = NULL; 6149 return retrc; 6150 } 6151 6152 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) 6153 { 6154 struct device *dev = &adapter->vdev->dev; 6155 unsigned long timeout = msecs_to_jiffies(20000); 6156 u64 old_num_rx_queues = adapter->req_rx_queues; 6157 u64 old_num_tx_queues = adapter->req_tx_queues; 6158 int rc; 6159 6160 adapter->from_passive_init = false; 6161 6162 rc = ibmvnic_send_crq_init(adapter); 6163 if (rc) { 6164 dev_err(dev, "Send crq init failed with error %d\n", rc); 6165 return rc; 6166 } 6167 6168 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 6169 dev_err(dev, "Initialization sequence timed out\n"); 6170 return -ETIMEDOUT; 6171 } 6172 6173 if (adapter->init_done_rc) { 6174 release_crq_queue(adapter); 6175 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc); 6176 return adapter->init_done_rc; 6177 } 6178 6179 if (adapter->from_passive_init) { 6180 adapter->state = VNIC_OPEN; 6181 adapter->from_passive_init = false; 6182 dev_err(dev, "CRQ-init failed, passive-init\n"); 6183 return -EINVAL; 6184 } 6185 6186 if (reset && 6187 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && 6188 adapter->reset_reason != VNIC_RESET_MOBILITY) { 6189 if (adapter->req_rx_queues != old_num_rx_queues || 6190 adapter->req_tx_queues != old_num_tx_queues) { 6191 release_sub_crqs(adapter, 0); 6192 rc = init_sub_crqs(adapter); 6193 } else { 6194 /* no need to reinitialize completely, but we do 6195 * need to clean up transmits that were in flight 6196 * when we processed the reset. Failure to do so 6197 * will confound the upper layer, usually TCP, by 6198 * creating the illusion of transmits that are 6199 * awaiting completion. 6200 */ 6201 clean_tx_pools(adapter); 6202 6203 rc = reset_sub_crq_queues(adapter); 6204 } 6205 } else { 6206 rc = init_sub_crqs(adapter); 6207 } 6208 6209 if (rc) { 6210 dev_err(dev, "Initialization of sub crqs failed\n"); 6211 release_crq_queue(adapter); 6212 return rc; 6213 } 6214 6215 rc = init_sub_crq_irqs(adapter); 6216 if (rc) { 6217 dev_err(dev, "Failed to initialize sub crq irqs\n"); 6218 release_crq_queue(adapter); 6219 } 6220 6221 return rc; 6222 } 6223 6224 static struct device_attribute dev_attr_failover; 6225 6226 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 6227 { 6228 struct ibmvnic_adapter *adapter; 6229 struct net_device *netdev; 6230 unsigned char *mac_addr_p; 6231 unsigned long flags; 6232 bool init_success; 6233 int rc; 6234 6235 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 6236 dev->unit_address); 6237 6238 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 6239 VETH_MAC_ADDR, NULL); 6240 if (!mac_addr_p) { 6241 dev_err(&dev->dev, 6242 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 6243 __FILE__, __LINE__); 6244 return 0; 6245 } 6246 6247 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 6248 IBMVNIC_MAX_QUEUES); 6249 if (!netdev) 6250 return -ENOMEM; 6251 6252 adapter = netdev_priv(netdev); 6253 adapter->state = VNIC_PROBING; 6254 dev_set_drvdata(&dev->dev, netdev); 6255 adapter->vdev = dev; 6256 adapter->netdev = netdev; 6257 adapter->login_pending = false; 6258 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); 6259 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ 6260 bitmap_set(adapter->map_ids, 0, 1); 6261 6262 ether_addr_copy(adapter->mac_addr, mac_addr_p); 6263 eth_hw_addr_set(netdev, adapter->mac_addr); 6264 netdev->irq = dev->irq; 6265 netdev->netdev_ops = &ibmvnic_netdev_ops; 6266 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 6267 SET_NETDEV_DEV(netdev, &dev->dev); 6268 6269 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 6270 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, 6271 __ibmvnic_delayed_reset); 6272 INIT_LIST_HEAD(&adapter->rwi_list); 6273 spin_lock_init(&adapter->rwi_lock); 6274 spin_lock_init(&adapter->state_lock); 6275 mutex_init(&adapter->fw_lock); 6276 init_completion(&adapter->probe_done); 6277 init_completion(&adapter->init_done); 6278 init_completion(&adapter->fw_done); 6279 init_completion(&adapter->reset_done); 6280 init_completion(&adapter->stats_done); 6281 clear_bit(0, &adapter->resetting); 6282 adapter->prev_rx_buf_sz = 0; 6283 adapter->prev_mtu = 0; 6284 6285 init_success = false; 6286 do { 6287 reinit_init_done(adapter); 6288 6289 /* clear any failovers we got in the previous pass 6290 * since we are reinitializing the CRQ 6291 */ 6292 adapter->failover_pending = false; 6293 6294 /* If we had already initialized CRQ, we may have one or 6295 * more resets queued already. Discard those and release 6296 * the CRQ before initializing the CRQ again. 6297 */ 6298 release_crq_queue(adapter); 6299 6300 /* Since we are still in PROBING state, __ibmvnic_reset() 6301 * will not access the ->rwi_list and since we released CRQ, 6302 * we won't get _new_ transport events. But there maybe an 6303 * ongoing ibmvnic_reset() call. So serialize access to 6304 * rwi_list. If we win the race, ibvmnic_reset() could add 6305 * a reset after we purged but thats ok - we just may end 6306 * up with an extra reset (i.e similar to having two or more 6307 * resets in the queue at once). 6308 * CHECK. 6309 */ 6310 spin_lock_irqsave(&adapter->rwi_lock, flags); 6311 flush_reset_queue(adapter); 6312 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 6313 6314 rc = init_crq_queue(adapter); 6315 if (rc) { 6316 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 6317 rc); 6318 goto ibmvnic_init_fail; 6319 } 6320 6321 rc = ibmvnic_reset_init(adapter, false); 6322 } while (rc == -EAGAIN); 6323 6324 /* We are ignoring the error from ibmvnic_reset_init() assuming that the 6325 * partner is not ready. CRQ is not active. When the partner becomes 6326 * ready, we will do the passive init reset. 6327 */ 6328 6329 if (!rc) 6330 init_success = true; 6331 6332 rc = init_stats_buffers(adapter); 6333 if (rc) 6334 goto ibmvnic_init_fail; 6335 6336 rc = init_stats_token(adapter); 6337 if (rc) 6338 goto ibmvnic_stats_fail; 6339 6340 rc = device_create_file(&dev->dev, &dev_attr_failover); 6341 if (rc) 6342 goto ibmvnic_dev_file_err; 6343 6344 netif_carrier_off(netdev); 6345 6346 if (init_success) { 6347 adapter->state = VNIC_PROBED; 6348 netdev->mtu = adapter->req_mtu - ETH_HLEN; 6349 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 6350 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 6351 } else { 6352 adapter->state = VNIC_DOWN; 6353 } 6354 6355 adapter->wait_for_reset = false; 6356 adapter->last_reset_time = jiffies; 6357 6358 rc = register_netdev(netdev); 6359 if (rc) { 6360 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 6361 goto ibmvnic_register_fail; 6362 } 6363 dev_info(&dev->dev, "ibmvnic registered\n"); 6364 6365 rc = ibmvnic_cpu_notif_add(adapter); 6366 if (rc) { 6367 netdev_err(netdev, "Registering cpu notifier failed\n"); 6368 goto cpu_notif_add_failed; 6369 } 6370 6371 complete(&adapter->probe_done); 6372 6373 return 0; 6374 6375 cpu_notif_add_failed: 6376 unregister_netdev(netdev); 6377 6378 ibmvnic_register_fail: 6379 device_remove_file(&dev->dev, &dev_attr_failover); 6380 6381 ibmvnic_dev_file_err: 6382 release_stats_token(adapter); 6383 6384 ibmvnic_stats_fail: 6385 release_stats_buffers(adapter); 6386 6387 ibmvnic_init_fail: 6388 release_sub_crqs(adapter, 1); 6389 release_crq_queue(adapter); 6390 6391 /* cleanup worker thread after releasing CRQ so we don't get 6392 * transport events (i.e new work items for the worker thread). 6393 */ 6394 adapter->state = VNIC_REMOVING; 6395 complete(&adapter->probe_done); 6396 flush_work(&adapter->ibmvnic_reset); 6397 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6398 6399 flush_reset_queue(adapter); 6400 6401 mutex_destroy(&adapter->fw_lock); 6402 free_netdev(netdev); 6403 6404 return rc; 6405 } 6406 6407 static void ibmvnic_remove(struct vio_dev *dev) 6408 { 6409 struct net_device *netdev = dev_get_drvdata(&dev->dev); 6410 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6411 unsigned long flags; 6412 6413 spin_lock_irqsave(&adapter->state_lock, flags); 6414 6415 /* If ibmvnic_reset() is scheduling a reset, wait for it to 6416 * finish. Then, set the state to REMOVING to prevent it from 6417 * scheduling any more work and to have reset functions ignore 6418 * any resets that have already been scheduled. Drop the lock 6419 * after setting state, so __ibmvnic_reset() which is called 6420 * from the flush_work() below, can make progress. 6421 */ 6422 spin_lock(&adapter->rwi_lock); 6423 adapter->state = VNIC_REMOVING; 6424 spin_unlock(&adapter->rwi_lock); 6425 6426 spin_unlock_irqrestore(&adapter->state_lock, flags); 6427 6428 ibmvnic_cpu_notif_remove(adapter); 6429 6430 flush_work(&adapter->ibmvnic_reset); 6431 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6432 6433 rtnl_lock(); 6434 unregister_netdevice(netdev); 6435 6436 release_resources(adapter); 6437 release_rx_pools(adapter); 6438 release_tx_pools(adapter); 6439 release_sub_crqs(adapter, 1); 6440 release_crq_queue(adapter); 6441 6442 release_stats_token(adapter); 6443 release_stats_buffers(adapter); 6444 6445 adapter->state = VNIC_REMOVED; 6446 6447 rtnl_unlock(); 6448 mutex_destroy(&adapter->fw_lock); 6449 device_remove_file(&dev->dev, &dev_attr_failover); 6450 free_netdev(netdev); 6451 dev_set_drvdata(&dev->dev, NULL); 6452 } 6453 6454 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 6455 const char *buf, size_t count) 6456 { 6457 struct net_device *netdev = dev_get_drvdata(dev); 6458 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6459 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 6460 __be64 session_token; 6461 long rc; 6462 6463 if (!sysfs_streq(buf, "1")) 6464 return -EINVAL; 6465 6466 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 6467 H_GET_SESSION_TOKEN, 0, 0, 0); 6468 if (rc) { 6469 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 6470 rc); 6471 goto last_resort; 6472 } 6473 6474 session_token = (__be64)retbuf[0]; 6475 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 6476 be64_to_cpu(session_token)); 6477 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 6478 H_SESSION_ERR_DETECTED, session_token, 0, 0); 6479 if (rc) { 6480 netdev_err(netdev, 6481 "H_VIOCTL initiated failover failed, rc %ld\n", 6482 rc); 6483 goto last_resort; 6484 } 6485 6486 return count; 6487 6488 last_resort: 6489 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); 6490 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 6491 6492 return count; 6493 } 6494 static DEVICE_ATTR_WO(failover); 6495 6496 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 6497 { 6498 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 6499 struct ibmvnic_adapter *adapter; 6500 struct iommu_table *tbl; 6501 unsigned long ret = 0; 6502 int i; 6503 6504 tbl = get_iommu_table_base(&vdev->dev); 6505 6506 /* netdev inits at probe time along with the structures we need below*/ 6507 if (!netdev) 6508 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 6509 6510 adapter = netdev_priv(netdev); 6511 6512 ret += PAGE_SIZE; /* the crq message queue */ 6513 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 6514 6515 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 6516 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 6517 6518 for (i = 0; i < adapter->num_active_rx_pools; i++) 6519 ret += adapter->rx_pool[i].size * 6520 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 6521 6522 return ret; 6523 } 6524 6525 static int ibmvnic_resume(struct device *dev) 6526 { 6527 struct net_device *netdev = dev_get_drvdata(dev); 6528 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6529 6530 if (adapter->state != VNIC_OPEN) 6531 return 0; 6532 6533 tasklet_schedule(&adapter->tasklet); 6534 6535 return 0; 6536 } 6537 6538 static const struct vio_device_id ibmvnic_device_table[] = { 6539 {"network", "IBM,vnic"}, 6540 {"", "" } 6541 }; 6542 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 6543 6544 static const struct dev_pm_ops ibmvnic_pm_ops = { 6545 .resume = ibmvnic_resume 6546 }; 6547 6548 static struct vio_driver ibmvnic_driver = { 6549 .id_table = ibmvnic_device_table, 6550 .probe = ibmvnic_probe, 6551 .remove = ibmvnic_remove, 6552 .get_desired_dma = ibmvnic_get_desired_dma, 6553 .name = ibmvnic_driver_name, 6554 .pm = &ibmvnic_pm_ops, 6555 }; 6556 6557 /* module functions */ 6558 static int __init ibmvnic_module_init(void) 6559 { 6560 int ret; 6561 6562 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online", 6563 ibmvnic_cpu_online, 6564 ibmvnic_cpu_down_prep); 6565 if (ret < 0) 6566 goto out; 6567 ibmvnic_online = ret; 6568 ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead", 6569 NULL, ibmvnic_cpu_dead); 6570 if (ret) 6571 goto err_dead; 6572 6573 ret = vio_register_driver(&ibmvnic_driver); 6574 if (ret) 6575 goto err_vio_register; 6576 6577 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 6578 IBMVNIC_DRIVER_VERSION); 6579 6580 return 0; 6581 err_vio_register: 6582 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD); 6583 err_dead: 6584 cpuhp_remove_multi_state(ibmvnic_online); 6585 out: 6586 return ret; 6587 } 6588 6589 static void __exit ibmvnic_module_exit(void) 6590 { 6591 vio_unregister_driver(&ibmvnic_driver); 6592 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD); 6593 cpuhp_remove_multi_state(ibmvnic_online); 6594 } 6595 6596 module_init(ibmvnic_module_init); 6597 module_exit(ibmvnic_module_exit); 6598