1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /* Messages are passed between the VNIC driver and the VNIC server using */ 17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 18 /* issue and receive commands that initiate communication with the server */ 19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 20 /* are used by the driver to notify the server that a packet is */ 21 /* ready for transmission or that a buffer has been added to receive a */ 22 /* packet. Subsequently, sCRQs are used by the server to notify the */ 23 /* driver that a packet transmission has been completed or that a packet */ 24 /* has been received and placed in a waiting buffer. */ 25 /* */ 26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 27 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 28 /* or receive has been completed, the VNIC driver is required to use */ 29 /* "long term mapping". This entails that large, continuous DMA mapped */ 30 /* buffers are allocated on driver initialization and these buffers are */ 31 /* then continuously reused to pass skbs to and from the VNIC server. */ 32 /* */ 33 /**************************************************************************/ 34 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/types.h> 38 #include <linux/errno.h> 39 #include <linux/completion.h> 40 #include <linux/ioport.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kernel.h> 43 #include <linux/netdevice.h> 44 #include <linux/etherdevice.h> 45 #include <linux/skbuff.h> 46 #include <linux/init.h> 47 #include <linux/delay.h> 48 #include <linux/mm.h> 49 #include <linux/ethtool.h> 50 #include <linux/proc_fs.h> 51 #include <linux/if_arp.h> 52 #include <linux/in.h> 53 #include <linux/ip.h> 54 #include <linux/ipv6.h> 55 #include <linux/irq.h> 56 #include <linux/irqdomain.h> 57 #include <linux/kthread.h> 58 #include <linux/seq_file.h> 59 #include <linux/interrupt.h> 60 #include <net/net_namespace.h> 61 #include <asm/hvcall.h> 62 #include <linux/atomic.h> 63 #include <asm/vio.h> 64 #include <asm/xive.h> 65 #include <asm/iommu.h> 66 #include <linux/uaccess.h> 67 #include <asm/firmware.h> 68 #include <linux/workqueue.h> 69 #include <linux/if_vlan.h> 70 #include <linux/utsname.h> 71 #include <linux/cpu.h> 72 73 #include "ibmvnic.h" 74 75 static const char ibmvnic_driver_name[] = "ibmvnic"; 76 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 77 78 MODULE_AUTHOR("Santiago Leon"); 79 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 80 MODULE_LICENSE("GPL"); 81 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 82 83 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 84 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 85 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 86 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 87 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 88 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 91 static int enable_scrq_irq(struct ibmvnic_adapter *, 92 struct ibmvnic_sub_crq_queue *); 93 static int disable_scrq_irq(struct ibmvnic_adapter *, 94 struct ibmvnic_sub_crq_queue *); 95 static int pending_scrq(struct ibmvnic_adapter *, 96 struct ibmvnic_sub_crq_queue *); 97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 98 struct ibmvnic_sub_crq_queue *); 99 static int ibmvnic_poll(struct napi_struct *napi, int data); 100 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter); 101 static inline void reinit_init_done(struct ibmvnic_adapter *adapter); 102 static void send_query_map(struct ibmvnic_adapter *adapter); 103 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8); 104 static int send_request_unmap(struct ibmvnic_adapter *, u8); 105 static int send_login(struct ibmvnic_adapter *adapter); 106 static void send_query_cap(struct ibmvnic_adapter *adapter); 107 static int init_sub_crqs(struct ibmvnic_adapter *); 108 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 109 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); 110 static void release_crq_queue(struct ibmvnic_adapter *); 111 static int __ibmvnic_set_mac(struct net_device *, u8 *); 112 static int init_crq_queue(struct ibmvnic_adapter *adapter); 113 static int send_query_phys_parms(struct ibmvnic_adapter *adapter); 114 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 115 struct ibmvnic_sub_crq_queue *tx_scrq); 116 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 117 struct ibmvnic_long_term_buff *ltb); 118 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); 119 120 struct ibmvnic_stat { 121 char name[ETH_GSTRING_LEN]; 122 int offset; 123 }; 124 125 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 126 offsetof(struct ibmvnic_statistics, stat)) 127 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) 128 129 static const struct ibmvnic_stat ibmvnic_stats[] = { 130 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 131 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 132 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 133 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 134 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 135 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 136 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 137 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 138 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 139 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 140 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 141 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 142 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 143 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 144 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 145 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 146 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 147 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 148 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 149 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 150 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 151 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 152 }; 153 154 static int send_crq_init_complete(struct ibmvnic_adapter *adapter) 155 { 156 union ibmvnic_crq crq; 157 158 memset(&crq, 0, sizeof(crq)); 159 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 160 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; 161 162 return ibmvnic_send_crq(adapter, &crq); 163 } 164 165 static int send_version_xchg(struct ibmvnic_adapter *adapter) 166 { 167 union ibmvnic_crq crq; 168 169 memset(&crq, 0, sizeof(crq)); 170 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 171 crq.version_exchange.cmd = VERSION_EXCHANGE; 172 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 173 174 return ibmvnic_send_crq(adapter, &crq); 175 } 176 177 static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter, 178 struct ibmvnic_sub_crq_queue *queue) 179 { 180 if (!(queue && queue->irq)) 181 return; 182 183 cpumask_clear(queue->affinity_mask); 184 185 if (irq_set_affinity_and_hint(queue->irq, NULL)) 186 netdev_warn(adapter->netdev, 187 "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n", 188 __func__, queue, queue->irq); 189 } 190 191 static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter) 192 { 193 struct ibmvnic_sub_crq_queue **rxqs; 194 struct ibmvnic_sub_crq_queue **txqs; 195 int num_rxqs, num_txqs; 196 int rc, i; 197 198 rc = 0; 199 rxqs = adapter->rx_scrq; 200 txqs = adapter->tx_scrq; 201 num_txqs = adapter->num_active_tx_scrqs; 202 num_rxqs = adapter->num_active_rx_scrqs; 203 204 netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__); 205 if (txqs) { 206 for (i = 0; i < num_txqs; i++) 207 ibmvnic_clean_queue_affinity(adapter, txqs[i]); 208 } 209 if (rxqs) { 210 for (i = 0; i < num_rxqs; i++) 211 ibmvnic_clean_queue_affinity(adapter, rxqs[i]); 212 } 213 } 214 215 static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue, 216 unsigned int *cpu, int *stragglers, 217 int stride) 218 { 219 cpumask_var_t mask; 220 int i; 221 int rc = 0; 222 223 if (!(queue && queue->irq)) 224 return rc; 225 226 /* cpumask_var_t is either a pointer or array, allocation works here */ 227 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 228 return -ENOMEM; 229 230 /* while we have extra cpu give one extra to this irq */ 231 if (*stragglers) { 232 stride++; 233 (*stragglers)--; 234 } 235 /* atomic write is safer than writing bit by bit directly */ 236 for (i = 0; i < stride; i++) { 237 cpumask_set_cpu(*cpu, mask); 238 *cpu = cpumask_next_wrap(*cpu, cpu_online_mask, 239 nr_cpu_ids, false); 240 } 241 /* set queue affinity mask */ 242 cpumask_copy(queue->affinity_mask, mask); 243 rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask); 244 free_cpumask_var(mask); 245 246 return rc; 247 } 248 249 /* assumes cpu read lock is held */ 250 static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter) 251 { 252 struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq; 253 struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq; 254 struct ibmvnic_sub_crq_queue *queue; 255 int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0; 256 int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0; 257 int total_queues, stride, stragglers, i; 258 unsigned int num_cpu, cpu; 259 bool is_rx_queue; 260 int rc = 0; 261 262 netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__); 263 if (!(adapter->rx_scrq && adapter->tx_scrq)) { 264 netdev_warn(adapter->netdev, 265 "%s: Set affinity failed, queues not allocated\n", 266 __func__); 267 return; 268 } 269 270 total_queues = num_rxqs + num_txqs; 271 num_cpu = num_online_cpus(); 272 /* number of cpu's assigned per irq */ 273 stride = max_t(int, num_cpu / total_queues, 1); 274 /* number of leftover cpu's */ 275 stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0; 276 /* next available cpu to assign irq to */ 277 cpu = cpumask_next(-1, cpu_online_mask); 278 279 for (i = 0; i < total_queues; i++) { 280 is_rx_queue = false; 281 /* balance core load by alternating rx and tx assignments 282 * ex: TX0 -> RX0 -> TX1 -> RX1 etc. 283 */ 284 if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) { 285 queue = rxqs[i_rxqs++]; 286 is_rx_queue = true; 287 } else { 288 queue = txqs[i_txqs++]; 289 } 290 291 rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers, 292 stride); 293 if (rc) 294 goto out; 295 296 if (!queue || is_rx_queue) 297 continue; 298 299 rc = __netif_set_xps_queue(adapter->netdev, 300 cpumask_bits(queue->affinity_mask), 301 i_txqs - 1, XPS_CPUS); 302 if (rc) 303 netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n", 304 __func__, i_txqs - 1, rc); 305 } 306 307 out: 308 if (rc) { 309 netdev_warn(adapter->netdev, 310 "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n", 311 __func__, queue, queue->irq, rc); 312 ibmvnic_clean_affinity(adapter); 313 } 314 } 315 316 static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node) 317 { 318 struct ibmvnic_adapter *adapter; 319 320 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node); 321 ibmvnic_set_affinity(adapter); 322 return 0; 323 } 324 325 static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node) 326 { 327 struct ibmvnic_adapter *adapter; 328 329 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead); 330 ibmvnic_set_affinity(adapter); 331 return 0; 332 } 333 334 static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 335 { 336 struct ibmvnic_adapter *adapter; 337 338 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node); 339 ibmvnic_clean_affinity(adapter); 340 return 0; 341 } 342 343 static enum cpuhp_state ibmvnic_online; 344 345 static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter) 346 { 347 int ret; 348 349 ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node); 350 if (ret) 351 return ret; 352 ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD, 353 &adapter->node_dead); 354 if (!ret) 355 return ret; 356 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node); 357 return ret; 358 } 359 360 static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter) 361 { 362 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node); 363 cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD, 364 &adapter->node_dead); 365 } 366 367 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 368 unsigned long length, unsigned long *number, 369 unsigned long *irq) 370 { 371 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 372 long rc; 373 374 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 375 *number = retbuf[0]; 376 *irq = retbuf[1]; 377 378 return rc; 379 } 380 381 /** 382 * ibmvnic_wait_for_completion - Check device state and wait for completion 383 * @adapter: private device data 384 * @comp_done: completion structure to wait for 385 * @timeout: time to wait in milliseconds 386 * 387 * Wait for a completion signal or until the timeout limit is reached 388 * while checking that the device is still active. 389 */ 390 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, 391 struct completion *comp_done, 392 unsigned long timeout) 393 { 394 struct net_device *netdev; 395 unsigned long div_timeout; 396 u8 retry; 397 398 netdev = adapter->netdev; 399 retry = 5; 400 div_timeout = msecs_to_jiffies(timeout / retry); 401 while (true) { 402 if (!adapter->crq.active) { 403 netdev_err(netdev, "Device down!\n"); 404 return -ENODEV; 405 } 406 if (!retry--) 407 break; 408 if (wait_for_completion_timeout(comp_done, div_timeout)) 409 return 0; 410 } 411 netdev_err(netdev, "Operation timed out.\n"); 412 return -ETIMEDOUT; 413 } 414 415 /** 416 * reuse_ltb() - Check if a long term buffer can be reused 417 * @ltb: The long term buffer to be checked 418 * @size: The size of the long term buffer. 419 * 420 * An LTB can be reused unless its size has changed. 421 * 422 * Return: Return true if the LTB can be reused, false otherwise. 423 */ 424 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) 425 { 426 return (ltb->buff && ltb->size == size); 427 } 428 429 /** 430 * alloc_long_term_buff() - Allocate a long term buffer (LTB) 431 * 432 * @adapter: ibmvnic adapter associated to the LTB 433 * @ltb: container object for the LTB 434 * @size: size of the LTB 435 * 436 * Allocate an LTB of the specified size and notify VIOS. 437 * 438 * If the given @ltb already has the correct size, reuse it. Otherwise if 439 * its non-NULL, free it. Then allocate a new one of the correct size. 440 * Notify the VIOS either way since we may now be working with a new VIOS. 441 * 442 * Allocating larger chunks of memory during resets, specially LPM or under 443 * low memory situations can cause resets to fail/timeout and for LPAR to 444 * lose connectivity. So hold onto the LTB even if we fail to communicate 445 * with the VIOS and reuse it on next open. Free LTB when adapter is closed. 446 * 447 * Return: 0 if we were able to allocate the LTB and notify the VIOS and 448 * a negative value otherwise. 449 */ 450 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 451 struct ibmvnic_long_term_buff *ltb, int size) 452 { 453 struct device *dev = &adapter->vdev->dev; 454 u64 prev = 0; 455 int rc; 456 457 if (!reuse_ltb(ltb, size)) { 458 dev_dbg(dev, 459 "LTB size changed from 0x%llx to 0x%x, reallocating\n", 460 ltb->size, size); 461 prev = ltb->size; 462 free_long_term_buff(adapter, ltb); 463 } 464 465 if (ltb->buff) { 466 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", 467 ltb->map_id, ltb->size); 468 } else { 469 ltb->buff = dma_alloc_coherent(dev, size, <b->addr, 470 GFP_KERNEL); 471 if (!ltb->buff) { 472 dev_err(dev, "Couldn't alloc long term buffer\n"); 473 return -ENOMEM; 474 } 475 ltb->size = size; 476 477 ltb->map_id = find_first_zero_bit(adapter->map_ids, 478 MAX_MAP_ID); 479 bitmap_set(adapter->map_ids, ltb->map_id, 1); 480 481 dev_dbg(dev, 482 "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n", 483 ltb->map_id, ltb->size, prev); 484 } 485 486 /* Ensure ltb is zeroed - specially when reusing it. */ 487 memset(ltb->buff, 0, ltb->size); 488 489 mutex_lock(&adapter->fw_lock); 490 adapter->fw_done_rc = 0; 491 reinit_completion(&adapter->fw_done); 492 493 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 494 if (rc) { 495 dev_err(dev, "send_request_map failed, rc = %d\n", rc); 496 goto out; 497 } 498 499 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 500 if (rc) { 501 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", 502 rc); 503 goto out; 504 } 505 506 if (adapter->fw_done_rc) { 507 dev_err(dev, "Couldn't map LTB, rc = %d\n", 508 adapter->fw_done_rc); 509 rc = -EIO; 510 goto out; 511 } 512 rc = 0; 513 out: 514 /* don't free LTB on communication error - see function header */ 515 mutex_unlock(&adapter->fw_lock); 516 return rc; 517 } 518 519 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 520 struct ibmvnic_long_term_buff *ltb) 521 { 522 struct device *dev = &adapter->vdev->dev; 523 524 if (!ltb->buff) 525 return; 526 527 /* VIOS automatically unmaps the long term buffer at remote 528 * end for the following resets: 529 * FAILOVER, MOBILITY, TIMEOUT. 530 */ 531 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 532 adapter->reset_reason != VNIC_RESET_MOBILITY && 533 adapter->reset_reason != VNIC_RESET_TIMEOUT) 534 send_request_unmap(adapter, ltb->map_id); 535 536 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 537 538 ltb->buff = NULL; 539 /* mark this map_id free */ 540 bitmap_clear(adapter->map_ids, ltb->map_id, 1); 541 ltb->map_id = 0; 542 } 543 544 /** 545 * free_ltb_set - free the given set of long term buffers (LTBS) 546 * @adapter: The ibmvnic adapter containing this ltb set 547 * @ltb_set: The ltb_set to be freed 548 * 549 * Free the set of LTBs in the given set. 550 */ 551 552 static void free_ltb_set(struct ibmvnic_adapter *adapter, 553 struct ibmvnic_ltb_set *ltb_set) 554 { 555 int i; 556 557 for (i = 0; i < ltb_set->num_ltbs; i++) 558 free_long_term_buff(adapter, <b_set->ltbs[i]); 559 560 kfree(ltb_set->ltbs); 561 ltb_set->ltbs = NULL; 562 ltb_set->num_ltbs = 0; 563 } 564 565 /** 566 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs) 567 * 568 * @adapter: ibmvnic adapter associated to the LTB 569 * @ltb_set: container object for the set of LTBs 570 * @num_buffs: Number of buffers in the LTB 571 * @buff_size: Size of each buffer in the LTB 572 * 573 * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size 574 * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the 575 * new set of LTBs have fewer LTBs than the old set, free the excess LTBs. 576 * If new set needs more than in old set, allocate the remaining ones. 577 * Try and reuse as many LTBs as possible and avoid reallocation. 578 * 579 * Any changes to this allocation strategy must be reflected in 580 * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb(). 581 */ 582 static int alloc_ltb_set(struct ibmvnic_adapter *adapter, 583 struct ibmvnic_ltb_set *ltb_set, int num_buffs, 584 int buff_size) 585 { 586 struct device *dev = &adapter->vdev->dev; 587 struct ibmvnic_ltb_set old_set; 588 struct ibmvnic_ltb_set new_set; 589 int rem_size; 590 int tot_size; /* size of all ltbs */ 591 int ltb_size; /* size of one ltb */ 592 int nltbs; 593 int rc; 594 int n; 595 int i; 596 597 dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs, 598 buff_size); 599 600 ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size); 601 tot_size = num_buffs * buff_size; 602 603 if (ltb_size > tot_size) 604 ltb_size = tot_size; 605 606 nltbs = tot_size / ltb_size; 607 if (tot_size % ltb_size) 608 nltbs++; 609 610 old_set = *ltb_set; 611 612 if (old_set.num_ltbs == nltbs) { 613 new_set = old_set; 614 } else { 615 int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff); 616 617 new_set.ltbs = kzalloc(tmp, GFP_KERNEL); 618 if (!new_set.ltbs) 619 return -ENOMEM; 620 621 new_set.num_ltbs = nltbs; 622 623 /* Free any excess ltbs in old set */ 624 for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++) 625 free_long_term_buff(adapter, &old_set.ltbs[i]); 626 627 /* Copy remaining ltbs to new set. All LTBs except the 628 * last one are of the same size. alloc_long_term_buff() 629 * will realloc if the size changes. 630 */ 631 n = min(old_set.num_ltbs, new_set.num_ltbs); 632 for (i = 0; i < n; i++) 633 new_set.ltbs[i] = old_set.ltbs[i]; 634 635 /* Any additional ltbs in new set will have NULL ltbs for 636 * now and will be allocated in alloc_long_term_buff(). 637 */ 638 639 /* We no longer need the old_set so free it. Note that we 640 * may have reused some ltbs from old set and freed excess 641 * ltbs above. So we only need to free the container now 642 * not the LTBs themselves. (i.e. dont free_ltb_set()!) 643 */ 644 kfree(old_set.ltbs); 645 old_set.ltbs = NULL; 646 old_set.num_ltbs = 0; 647 648 /* Install the new set. If allocations fail below, we will 649 * retry later and know what size LTBs we need. 650 */ 651 *ltb_set = new_set; 652 } 653 654 i = 0; 655 rem_size = tot_size; 656 while (rem_size) { 657 if (ltb_size > rem_size) 658 ltb_size = rem_size; 659 660 rem_size -= ltb_size; 661 662 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size); 663 if (rc) 664 goto out; 665 i++; 666 } 667 668 WARN_ON(i != new_set.num_ltbs); 669 670 return 0; 671 out: 672 /* We may have allocated one/more LTBs before failing and we 673 * want to try and reuse on next reset. So don't free ltb set. 674 */ 675 return rc; 676 } 677 678 /** 679 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB. 680 * @rxpool: The receive buffer pool containing buffer 681 * @bufidx: Index of buffer in rxpool 682 * @ltbp: (Output) pointer to the long term buffer containing the buffer 683 * @offset: (Output) offset of buffer in the LTB from @ltbp 684 * 685 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the 686 * pool and its corresponding offset. Assume for now that each LTB is of 687 * different size but could possibly be optimized based on the allocation 688 * strategy in alloc_ltb_set(). 689 */ 690 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, 691 unsigned int bufidx, 692 struct ibmvnic_long_term_buff **ltbp, 693 unsigned int *offset) 694 { 695 struct ibmvnic_long_term_buff *ltb; 696 int nbufs; /* # of buffers in one ltb */ 697 int i; 698 699 WARN_ON(bufidx >= rxpool->size); 700 701 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) { 702 ltb = &rxpool->ltb_set.ltbs[i]; 703 nbufs = ltb->size / rxpool->buff_size; 704 if (bufidx < nbufs) 705 break; 706 bufidx -= nbufs; 707 } 708 709 *ltbp = ltb; 710 *offset = bufidx * rxpool->buff_size; 711 } 712 713 /** 714 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB. 715 * @txpool: The transmit buffer pool containing buffer 716 * @bufidx: Index of buffer in txpool 717 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer 718 * @offset: (Output) offset of buffer in the LTB from @ltbp 719 * 720 * Map the given buffer identified by [txpool, bufidx] to an LTB in the 721 * pool and its corresponding offset. 722 */ 723 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool, 724 unsigned int bufidx, 725 struct ibmvnic_long_term_buff **ltbp, 726 unsigned int *offset) 727 { 728 struct ibmvnic_long_term_buff *ltb; 729 int nbufs; /* # of buffers in one ltb */ 730 int i; 731 732 WARN_ON_ONCE(bufidx >= txpool->num_buffers); 733 734 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) { 735 ltb = &txpool->ltb_set.ltbs[i]; 736 nbufs = ltb->size / txpool->buf_size; 737 if (bufidx < nbufs) 738 break; 739 bufidx -= nbufs; 740 } 741 742 *ltbp = ltb; 743 *offset = bufidx * txpool->buf_size; 744 } 745 746 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 747 { 748 int i; 749 750 for (i = 0; i < adapter->num_active_rx_pools; i++) 751 adapter->rx_pool[i].active = 0; 752 } 753 754 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 755 struct ibmvnic_rx_pool *pool) 756 { 757 int count = pool->size - atomic_read(&pool->available); 758 u64 handle = adapter->rx_scrq[pool->index]->handle; 759 struct device *dev = &adapter->vdev->dev; 760 struct ibmvnic_ind_xmit_queue *ind_bufp; 761 struct ibmvnic_sub_crq_queue *rx_scrq; 762 struct ibmvnic_long_term_buff *ltb; 763 union sub_crq *sub_crq; 764 int buffers_added = 0; 765 unsigned long lpar_rc; 766 struct sk_buff *skb; 767 unsigned int offset; 768 dma_addr_t dma_addr; 769 unsigned char *dst; 770 int shift = 0; 771 int bufidx; 772 int i; 773 774 if (!pool->active) 775 return; 776 777 rx_scrq = adapter->rx_scrq[pool->index]; 778 ind_bufp = &rx_scrq->ind_buf; 779 780 /* netdev_skb_alloc() could have failed after we saved a few skbs 781 * in the indir_buf and we would not have sent them to VIOS yet. 782 * To account for them, start the loop at ind_bufp->index rather 783 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will 784 * be 0. 785 */ 786 for (i = ind_bufp->index; i < count; ++i) { 787 bufidx = pool->free_map[pool->next_free]; 788 789 /* We maybe reusing the skb from earlier resets. Allocate 790 * only if necessary. But since the LTB may have changed 791 * during reset (see init_rx_pools()), update LTB below 792 * even if reusing skb. 793 */ 794 skb = pool->rx_buff[bufidx].skb; 795 if (!skb) { 796 skb = netdev_alloc_skb(adapter->netdev, 797 pool->buff_size); 798 if (!skb) { 799 dev_err(dev, "Couldn't replenish rx buff\n"); 800 adapter->replenish_no_mem++; 801 break; 802 } 803 } 804 805 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 806 pool->next_free = (pool->next_free + 1) % pool->size; 807 808 /* Copy the skb to the long term mapped DMA buffer */ 809 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset); 810 dst = ltb->buff + offset; 811 memset(dst, 0, pool->buff_size); 812 dma_addr = ltb->addr + offset; 813 814 /* add the skb to an rx_buff in the pool */ 815 pool->rx_buff[bufidx].data = dst; 816 pool->rx_buff[bufidx].dma = dma_addr; 817 pool->rx_buff[bufidx].skb = skb; 818 pool->rx_buff[bufidx].pool_index = pool->index; 819 pool->rx_buff[bufidx].size = pool->buff_size; 820 821 /* queue the rx_buff for the next send_subcrq_indirect */ 822 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; 823 memset(sub_crq, 0, sizeof(*sub_crq)); 824 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; 825 sub_crq->rx_add.correlator = 826 cpu_to_be64((u64)&pool->rx_buff[bufidx]); 827 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); 828 sub_crq->rx_add.map_id = ltb->map_id; 829 830 /* The length field of the sCRQ is defined to be 24 bits so the 831 * buffer size needs to be left shifted by a byte before it is 832 * converted to big endian to prevent the last byte from being 833 * truncated. 834 */ 835 #ifdef __LITTLE_ENDIAN__ 836 shift = 8; 837 #endif 838 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); 839 840 /* if send_subcrq_indirect queue is full, flush to VIOS */ 841 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || 842 i == count - 1) { 843 lpar_rc = 844 send_subcrq_indirect(adapter, handle, 845 (u64)ind_bufp->indir_dma, 846 (u64)ind_bufp->index); 847 if (lpar_rc != H_SUCCESS) 848 goto failure; 849 buffers_added += ind_bufp->index; 850 adapter->replenish_add_buff_success += ind_bufp->index; 851 ind_bufp->index = 0; 852 } 853 } 854 atomic_add(buffers_added, &pool->available); 855 return; 856 857 failure: 858 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) 859 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); 860 for (i = ind_bufp->index - 1; i >= 0; --i) { 861 struct ibmvnic_rx_buff *rx_buff; 862 863 pool->next_free = pool->next_free == 0 ? 864 pool->size - 1 : pool->next_free - 1; 865 sub_crq = &ind_bufp->indir_arr[i]; 866 rx_buff = (struct ibmvnic_rx_buff *) 867 be64_to_cpu(sub_crq->rx_add.correlator); 868 bufidx = (int)(rx_buff - pool->rx_buff); 869 pool->free_map[pool->next_free] = bufidx; 870 dev_kfree_skb_any(pool->rx_buff[bufidx].skb); 871 pool->rx_buff[bufidx].skb = NULL; 872 } 873 adapter->replenish_add_buff_failure += ind_bufp->index; 874 atomic_add(buffers_added, &pool->available); 875 ind_bufp->index = 0; 876 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 877 /* Disable buffer pool replenishment and report carrier off if 878 * queue is closed or pending failover. 879 * Firmware guarantees that a signal will be sent to the 880 * driver, triggering a reset. 881 */ 882 deactivate_rx_pools(adapter); 883 netif_carrier_off(adapter->netdev); 884 } 885 } 886 887 static void replenish_pools(struct ibmvnic_adapter *adapter) 888 { 889 int i; 890 891 adapter->replenish_task_cycles++; 892 for (i = 0; i < adapter->num_active_rx_pools; i++) { 893 if (adapter->rx_pool[i].active) 894 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 895 } 896 897 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); 898 } 899 900 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 901 { 902 kfree(adapter->tx_stats_buffers); 903 kfree(adapter->rx_stats_buffers); 904 adapter->tx_stats_buffers = NULL; 905 adapter->rx_stats_buffers = NULL; 906 } 907 908 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 909 { 910 adapter->tx_stats_buffers = 911 kcalloc(IBMVNIC_MAX_QUEUES, 912 sizeof(struct ibmvnic_tx_queue_stats), 913 GFP_KERNEL); 914 if (!adapter->tx_stats_buffers) 915 return -ENOMEM; 916 917 adapter->rx_stats_buffers = 918 kcalloc(IBMVNIC_MAX_QUEUES, 919 sizeof(struct ibmvnic_rx_queue_stats), 920 GFP_KERNEL); 921 if (!adapter->rx_stats_buffers) 922 return -ENOMEM; 923 924 return 0; 925 } 926 927 static void release_stats_token(struct ibmvnic_adapter *adapter) 928 { 929 struct device *dev = &adapter->vdev->dev; 930 931 if (!adapter->stats_token) 932 return; 933 934 dma_unmap_single(dev, adapter->stats_token, 935 sizeof(struct ibmvnic_statistics), 936 DMA_FROM_DEVICE); 937 adapter->stats_token = 0; 938 } 939 940 static int init_stats_token(struct ibmvnic_adapter *adapter) 941 { 942 struct device *dev = &adapter->vdev->dev; 943 dma_addr_t stok; 944 int rc; 945 946 stok = dma_map_single(dev, &adapter->stats, 947 sizeof(struct ibmvnic_statistics), 948 DMA_FROM_DEVICE); 949 rc = dma_mapping_error(dev, stok); 950 if (rc) { 951 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc); 952 return rc; 953 } 954 955 adapter->stats_token = stok; 956 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 957 return 0; 958 } 959 960 /** 961 * release_rx_pools() - Release any rx pools attached to @adapter. 962 * @adapter: ibmvnic adapter 963 * 964 * Safe to call this multiple times - even if no pools are attached. 965 */ 966 static void release_rx_pools(struct ibmvnic_adapter *adapter) 967 { 968 struct ibmvnic_rx_pool *rx_pool; 969 int i, j; 970 971 if (!adapter->rx_pool) 972 return; 973 974 for (i = 0; i < adapter->num_active_rx_pools; i++) { 975 rx_pool = &adapter->rx_pool[i]; 976 977 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 978 979 kfree(rx_pool->free_map); 980 981 free_ltb_set(adapter, &rx_pool->ltb_set); 982 983 if (!rx_pool->rx_buff) 984 continue; 985 986 for (j = 0; j < rx_pool->size; j++) { 987 if (rx_pool->rx_buff[j].skb) { 988 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); 989 rx_pool->rx_buff[j].skb = NULL; 990 } 991 } 992 993 kfree(rx_pool->rx_buff); 994 } 995 996 kfree(adapter->rx_pool); 997 adapter->rx_pool = NULL; 998 adapter->num_active_rx_pools = 0; 999 adapter->prev_rx_pool_size = 0; 1000 } 1001 1002 /** 1003 * reuse_rx_pools() - Check if the existing rx pools can be reused. 1004 * @adapter: ibmvnic adapter 1005 * 1006 * Check if the existing rx pools in the adapter can be reused. The 1007 * pools can be reused if the pool parameters (number of pools, 1008 * number of buffers in the pool and size of each buffer) have not 1009 * changed. 1010 * 1011 * NOTE: This assumes that all pools have the same number of buffers 1012 * which is the case currently. If that changes, we must fix this. 1013 * 1014 * Return: true if the rx pools can be reused, false otherwise. 1015 */ 1016 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) 1017 { 1018 u64 old_num_pools, new_num_pools; 1019 u64 old_pool_size, new_pool_size; 1020 u64 old_buff_size, new_buff_size; 1021 1022 if (!adapter->rx_pool) 1023 return false; 1024 1025 old_num_pools = adapter->num_active_rx_pools; 1026 new_num_pools = adapter->req_rx_queues; 1027 1028 old_pool_size = adapter->prev_rx_pool_size; 1029 new_pool_size = adapter->req_rx_add_entries_per_subcrq; 1030 1031 old_buff_size = adapter->prev_rx_buf_sz; 1032 new_buff_size = adapter->cur_rx_buf_sz; 1033 1034 if (old_buff_size != new_buff_size || 1035 old_num_pools != new_num_pools || 1036 old_pool_size != new_pool_size) 1037 return false; 1038 1039 return true; 1040 } 1041 1042 /** 1043 * init_rx_pools(): Initialize the set of receiver pools in the adapter. 1044 * @netdev: net device associated with the vnic interface 1045 * 1046 * Initialize the set of receiver pools in the ibmvnic adapter associated 1047 * with the net_device @netdev. If possible, reuse the existing rx pools. 1048 * Otherwise free any existing pools and allocate a new set of pools 1049 * before initializing them. 1050 * 1051 * Return: 0 on success and negative value on error. 1052 */ 1053 static int init_rx_pools(struct net_device *netdev) 1054 { 1055 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1056 struct device *dev = &adapter->vdev->dev; 1057 struct ibmvnic_rx_pool *rx_pool; 1058 u64 num_pools; 1059 u64 pool_size; /* # of buffers in one pool */ 1060 u64 buff_size; 1061 int i, j, rc; 1062 1063 pool_size = adapter->req_rx_add_entries_per_subcrq; 1064 num_pools = adapter->req_rx_queues; 1065 buff_size = adapter->cur_rx_buf_sz; 1066 1067 if (reuse_rx_pools(adapter)) { 1068 dev_dbg(dev, "Reusing rx pools\n"); 1069 goto update_ltb; 1070 } 1071 1072 /* Allocate/populate the pools. */ 1073 release_rx_pools(adapter); 1074 1075 adapter->rx_pool = kcalloc(num_pools, 1076 sizeof(struct ibmvnic_rx_pool), 1077 GFP_KERNEL); 1078 if (!adapter->rx_pool) { 1079 dev_err(dev, "Failed to allocate rx pools\n"); 1080 return -ENOMEM; 1081 } 1082 1083 /* Set num_active_rx_pools early. If we fail below after partial 1084 * allocation, release_rx_pools() will know how many to look for. 1085 */ 1086 adapter->num_active_rx_pools = num_pools; 1087 1088 for (i = 0; i < num_pools; i++) { 1089 rx_pool = &adapter->rx_pool[i]; 1090 1091 netdev_dbg(adapter->netdev, 1092 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 1093 i, pool_size, buff_size); 1094 1095 rx_pool->size = pool_size; 1096 rx_pool->index = i; 1097 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 1098 1099 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 1100 GFP_KERNEL); 1101 if (!rx_pool->free_map) { 1102 dev_err(dev, "Couldn't alloc free_map %d\n", i); 1103 rc = -ENOMEM; 1104 goto out_release; 1105 } 1106 1107 rx_pool->rx_buff = kcalloc(rx_pool->size, 1108 sizeof(struct ibmvnic_rx_buff), 1109 GFP_KERNEL); 1110 if (!rx_pool->rx_buff) { 1111 dev_err(dev, "Couldn't alloc rx buffers\n"); 1112 rc = -ENOMEM; 1113 goto out_release; 1114 } 1115 } 1116 1117 adapter->prev_rx_pool_size = pool_size; 1118 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; 1119 1120 update_ltb: 1121 for (i = 0; i < num_pools; i++) { 1122 rx_pool = &adapter->rx_pool[i]; 1123 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", 1124 i, rx_pool->size, rx_pool->buff_size); 1125 1126 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set, 1127 rx_pool->size, rx_pool->buff_size); 1128 if (rc) 1129 goto out; 1130 1131 for (j = 0; j < rx_pool->size; ++j) { 1132 struct ibmvnic_rx_buff *rx_buff; 1133 1134 rx_pool->free_map[j] = j; 1135 1136 /* NOTE: Don't clear rx_buff->skb here - will leak 1137 * memory! replenish_rx_pool() will reuse skbs or 1138 * allocate as necessary. 1139 */ 1140 rx_buff = &rx_pool->rx_buff[j]; 1141 rx_buff->dma = 0; 1142 rx_buff->data = 0; 1143 rx_buff->size = 0; 1144 rx_buff->pool_index = 0; 1145 } 1146 1147 /* Mark pool "empty" so replenish_rx_pools() will 1148 * update the LTB info for each buffer 1149 */ 1150 atomic_set(&rx_pool->available, 0); 1151 rx_pool->next_alloc = 0; 1152 rx_pool->next_free = 0; 1153 /* replenish_rx_pool() may have called deactivate_rx_pools() 1154 * on failover. Ensure pool is active now. 1155 */ 1156 rx_pool->active = 1; 1157 } 1158 return 0; 1159 out_release: 1160 release_rx_pools(adapter); 1161 out: 1162 /* We failed to allocate one or more LTBs or map them on the VIOS. 1163 * Hold onto the pools and any LTBs that we did allocate/map. 1164 */ 1165 return rc; 1166 } 1167 1168 static void release_vpd_data(struct ibmvnic_adapter *adapter) 1169 { 1170 if (!adapter->vpd) 1171 return; 1172 1173 kfree(adapter->vpd->buff); 1174 kfree(adapter->vpd); 1175 1176 adapter->vpd = NULL; 1177 } 1178 1179 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 1180 struct ibmvnic_tx_pool *tx_pool) 1181 { 1182 kfree(tx_pool->tx_buff); 1183 kfree(tx_pool->free_map); 1184 free_ltb_set(adapter, &tx_pool->ltb_set); 1185 } 1186 1187 /** 1188 * release_tx_pools() - Release any tx pools attached to @adapter. 1189 * @adapter: ibmvnic adapter 1190 * 1191 * Safe to call this multiple times - even if no pools are attached. 1192 */ 1193 static void release_tx_pools(struct ibmvnic_adapter *adapter) 1194 { 1195 int i; 1196 1197 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are 1198 * both NULL or both non-NULL. So we only need to check one. 1199 */ 1200 if (!adapter->tx_pool) 1201 return; 1202 1203 for (i = 0; i < adapter->num_active_tx_pools; i++) { 1204 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 1205 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 1206 } 1207 1208 kfree(adapter->tx_pool); 1209 adapter->tx_pool = NULL; 1210 kfree(adapter->tso_pool); 1211 adapter->tso_pool = NULL; 1212 adapter->num_active_tx_pools = 0; 1213 adapter->prev_tx_pool_size = 0; 1214 } 1215 1216 static int init_one_tx_pool(struct net_device *netdev, 1217 struct ibmvnic_tx_pool *tx_pool, 1218 int pool_size, int buf_size) 1219 { 1220 int i; 1221 1222 tx_pool->tx_buff = kcalloc(pool_size, 1223 sizeof(struct ibmvnic_tx_buff), 1224 GFP_KERNEL); 1225 if (!tx_pool->tx_buff) 1226 return -ENOMEM; 1227 1228 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); 1229 if (!tx_pool->free_map) { 1230 kfree(tx_pool->tx_buff); 1231 tx_pool->tx_buff = NULL; 1232 return -ENOMEM; 1233 } 1234 1235 for (i = 0; i < pool_size; i++) 1236 tx_pool->free_map[i] = i; 1237 1238 tx_pool->consumer_index = 0; 1239 tx_pool->producer_index = 0; 1240 tx_pool->num_buffers = pool_size; 1241 tx_pool->buf_size = buf_size; 1242 1243 return 0; 1244 } 1245 1246 /** 1247 * reuse_tx_pools() - Check if the existing tx pools can be reused. 1248 * @adapter: ibmvnic adapter 1249 * 1250 * Check if the existing tx pools in the adapter can be reused. The 1251 * pools can be reused if the pool parameters (number of pools, 1252 * number of buffers in the pool and mtu) have not changed. 1253 * 1254 * NOTE: This assumes that all pools have the same number of buffers 1255 * which is the case currently. If that changes, we must fix this. 1256 * 1257 * Return: true if the tx pools can be reused, false otherwise. 1258 */ 1259 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) 1260 { 1261 u64 old_num_pools, new_num_pools; 1262 u64 old_pool_size, new_pool_size; 1263 u64 old_mtu, new_mtu; 1264 1265 if (!adapter->tx_pool) 1266 return false; 1267 1268 old_num_pools = adapter->num_active_tx_pools; 1269 new_num_pools = adapter->num_active_tx_scrqs; 1270 old_pool_size = adapter->prev_tx_pool_size; 1271 new_pool_size = adapter->req_tx_entries_per_subcrq; 1272 old_mtu = adapter->prev_mtu; 1273 new_mtu = adapter->req_mtu; 1274 1275 if (old_mtu != new_mtu || 1276 old_num_pools != new_num_pools || 1277 old_pool_size != new_pool_size) 1278 return false; 1279 1280 return true; 1281 } 1282 1283 /** 1284 * init_tx_pools(): Initialize the set of transmit pools in the adapter. 1285 * @netdev: net device associated with the vnic interface 1286 * 1287 * Initialize the set of transmit pools in the ibmvnic adapter associated 1288 * with the net_device @netdev. If possible, reuse the existing tx pools. 1289 * Otherwise free any existing pools and allocate a new set of pools 1290 * before initializing them. 1291 * 1292 * Return: 0 on success and negative value on error. 1293 */ 1294 static int init_tx_pools(struct net_device *netdev) 1295 { 1296 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1297 struct device *dev = &adapter->vdev->dev; 1298 int num_pools; 1299 u64 pool_size; /* # of buffers in pool */ 1300 u64 buff_size; 1301 int i, j, rc; 1302 1303 num_pools = adapter->req_tx_queues; 1304 1305 /* We must notify the VIOS about the LTB on all resets - but we only 1306 * need to alloc/populate pools if either the number of buffers or 1307 * size of each buffer in the pool has changed. 1308 */ 1309 if (reuse_tx_pools(adapter)) { 1310 netdev_dbg(netdev, "Reusing tx pools\n"); 1311 goto update_ltb; 1312 } 1313 1314 /* Allocate/populate the pools. */ 1315 release_tx_pools(adapter); 1316 1317 pool_size = adapter->req_tx_entries_per_subcrq; 1318 num_pools = adapter->num_active_tx_scrqs; 1319 1320 adapter->tx_pool = kcalloc(num_pools, 1321 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 1322 if (!adapter->tx_pool) 1323 return -ENOMEM; 1324 1325 adapter->tso_pool = kcalloc(num_pools, 1326 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 1327 /* To simplify release_tx_pools() ensure that ->tx_pool and 1328 * ->tso_pool are either both NULL or both non-NULL. 1329 */ 1330 if (!adapter->tso_pool) { 1331 kfree(adapter->tx_pool); 1332 adapter->tx_pool = NULL; 1333 return -ENOMEM; 1334 } 1335 1336 /* Set num_active_tx_pools early. If we fail below after partial 1337 * allocation, release_tx_pools() will know how many to look for. 1338 */ 1339 adapter->num_active_tx_pools = num_pools; 1340 1341 buff_size = adapter->req_mtu + VLAN_HLEN; 1342 buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 1343 1344 for (i = 0; i < num_pools; i++) { 1345 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", 1346 i, adapter->req_tx_entries_per_subcrq, buff_size); 1347 1348 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 1349 pool_size, buff_size); 1350 if (rc) 1351 goto out_release; 1352 1353 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], 1354 IBMVNIC_TSO_BUFS, 1355 IBMVNIC_TSO_BUF_SZ); 1356 if (rc) 1357 goto out_release; 1358 } 1359 1360 adapter->prev_tx_pool_size = pool_size; 1361 adapter->prev_mtu = adapter->req_mtu; 1362 1363 update_ltb: 1364 /* NOTE: All tx_pools have the same number of buffers (which is 1365 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS 1366 * buffers (see calls init_one_tx_pool() for these). 1367 * For consistency, we use tx_pool->num_buffers and 1368 * tso_pool->num_buffers below. 1369 */ 1370 rc = -1; 1371 for (i = 0; i < num_pools; i++) { 1372 struct ibmvnic_tx_pool *tso_pool; 1373 struct ibmvnic_tx_pool *tx_pool; 1374 1375 tx_pool = &adapter->tx_pool[i]; 1376 1377 dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n", 1378 i, tx_pool->num_buffers, tx_pool->buf_size); 1379 1380 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set, 1381 tx_pool->num_buffers, tx_pool->buf_size); 1382 if (rc) 1383 goto out; 1384 1385 tx_pool->consumer_index = 0; 1386 tx_pool->producer_index = 0; 1387 1388 for (j = 0; j < tx_pool->num_buffers; j++) 1389 tx_pool->free_map[j] = j; 1390 1391 tso_pool = &adapter->tso_pool[i]; 1392 1393 dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n", 1394 i, tso_pool->num_buffers, tso_pool->buf_size); 1395 1396 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set, 1397 tso_pool->num_buffers, tso_pool->buf_size); 1398 if (rc) 1399 goto out; 1400 1401 tso_pool->consumer_index = 0; 1402 tso_pool->producer_index = 0; 1403 1404 for (j = 0; j < tso_pool->num_buffers; j++) 1405 tso_pool->free_map[j] = j; 1406 } 1407 1408 return 0; 1409 out_release: 1410 release_tx_pools(adapter); 1411 out: 1412 /* We failed to allocate one or more LTBs or map them on the VIOS. 1413 * Hold onto the pools and any LTBs that we did allocate/map. 1414 */ 1415 return rc; 1416 } 1417 1418 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 1419 { 1420 int i; 1421 1422 if (adapter->napi_enabled) 1423 return; 1424 1425 for (i = 0; i < adapter->req_rx_queues; i++) 1426 napi_enable(&adapter->napi[i]); 1427 1428 adapter->napi_enabled = true; 1429 } 1430 1431 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 1432 { 1433 int i; 1434 1435 if (!adapter->napi_enabled) 1436 return; 1437 1438 for (i = 0; i < adapter->req_rx_queues; i++) { 1439 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 1440 napi_disable(&adapter->napi[i]); 1441 } 1442 1443 adapter->napi_enabled = false; 1444 } 1445 1446 static int init_napi(struct ibmvnic_adapter *adapter) 1447 { 1448 int i; 1449 1450 adapter->napi = kcalloc(adapter->req_rx_queues, 1451 sizeof(struct napi_struct), GFP_KERNEL); 1452 if (!adapter->napi) 1453 return -ENOMEM; 1454 1455 for (i = 0; i < adapter->req_rx_queues; i++) { 1456 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 1457 netif_napi_add(adapter->netdev, &adapter->napi[i], 1458 ibmvnic_poll); 1459 } 1460 1461 adapter->num_active_rx_napi = adapter->req_rx_queues; 1462 return 0; 1463 } 1464 1465 static void release_napi(struct ibmvnic_adapter *adapter) 1466 { 1467 int i; 1468 1469 if (!adapter->napi) 1470 return; 1471 1472 for (i = 0; i < adapter->num_active_rx_napi; i++) { 1473 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); 1474 netif_napi_del(&adapter->napi[i]); 1475 } 1476 1477 kfree(adapter->napi); 1478 adapter->napi = NULL; 1479 adapter->num_active_rx_napi = 0; 1480 adapter->napi_enabled = false; 1481 } 1482 1483 static const char *adapter_state_to_string(enum vnic_state state) 1484 { 1485 switch (state) { 1486 case VNIC_PROBING: 1487 return "PROBING"; 1488 case VNIC_PROBED: 1489 return "PROBED"; 1490 case VNIC_OPENING: 1491 return "OPENING"; 1492 case VNIC_OPEN: 1493 return "OPEN"; 1494 case VNIC_CLOSING: 1495 return "CLOSING"; 1496 case VNIC_CLOSED: 1497 return "CLOSED"; 1498 case VNIC_REMOVING: 1499 return "REMOVING"; 1500 case VNIC_REMOVED: 1501 return "REMOVED"; 1502 case VNIC_DOWN: 1503 return "DOWN"; 1504 } 1505 return "UNKNOWN"; 1506 } 1507 1508 static int ibmvnic_login(struct net_device *netdev) 1509 { 1510 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1511 unsigned long timeout = msecs_to_jiffies(20000); 1512 int retry_count = 0; 1513 int retries = 10; 1514 bool retry; 1515 int rc; 1516 1517 do { 1518 retry = false; 1519 if (retry_count > retries) { 1520 netdev_warn(netdev, "Login attempts exceeded\n"); 1521 return -EACCES; 1522 } 1523 1524 adapter->init_done_rc = 0; 1525 reinit_completion(&adapter->init_done); 1526 rc = send_login(adapter); 1527 if (rc) 1528 return rc; 1529 1530 if (!wait_for_completion_timeout(&adapter->init_done, 1531 timeout)) { 1532 netdev_warn(netdev, "Login timed out\n"); 1533 adapter->login_pending = false; 1534 goto partial_reset; 1535 } 1536 1537 if (adapter->init_done_rc == ABORTED) { 1538 netdev_warn(netdev, "Login aborted, retrying...\n"); 1539 retry = true; 1540 adapter->init_done_rc = 0; 1541 retry_count++; 1542 /* FW or device may be busy, so 1543 * wait a bit before retrying login 1544 */ 1545 msleep(500); 1546 } else if (adapter->init_done_rc == PARTIALSUCCESS) { 1547 retry_count++; 1548 release_sub_crqs(adapter, 1); 1549 1550 retry = true; 1551 netdev_dbg(netdev, 1552 "Received partial success, retrying...\n"); 1553 adapter->init_done_rc = 0; 1554 reinit_completion(&adapter->init_done); 1555 send_query_cap(adapter); 1556 if (!wait_for_completion_timeout(&adapter->init_done, 1557 timeout)) { 1558 netdev_warn(netdev, 1559 "Capabilities query timed out\n"); 1560 return -ETIMEDOUT; 1561 } 1562 1563 rc = init_sub_crqs(adapter); 1564 if (rc) { 1565 netdev_warn(netdev, 1566 "SCRQ initialization failed\n"); 1567 return rc; 1568 } 1569 1570 rc = init_sub_crq_irqs(adapter); 1571 if (rc) { 1572 netdev_warn(netdev, 1573 "SCRQ irq initialization failed\n"); 1574 return rc; 1575 } 1576 } else if (adapter->init_done_rc) { 1577 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", 1578 adapter->init_done_rc); 1579 1580 partial_reset: 1581 /* adapter login failed, so free any CRQs or sub-CRQs 1582 * and register again before attempting to login again. 1583 * If we don't do this then the VIOS may think that 1584 * we are already logged in and reject any subsequent 1585 * attempts 1586 */ 1587 netdev_warn(netdev, 1588 "Freeing and re-registering CRQs before attempting to login again\n"); 1589 retry = true; 1590 adapter->init_done_rc = 0; 1591 retry_count++; 1592 release_sub_crqs(adapter, true); 1593 reinit_init_done(adapter); 1594 release_crq_queue(adapter); 1595 /* If we don't sleep here then we risk an unnecessary 1596 * failover event from the VIOS. This is a known VIOS 1597 * issue caused by a vnic device freeing and registering 1598 * a CRQ too quickly. 1599 */ 1600 msleep(1500); 1601 rc = init_crq_queue(adapter); 1602 if (rc) { 1603 netdev_err(netdev, "login recovery: init CRQ failed %d\n", 1604 rc); 1605 return -EIO; 1606 } 1607 1608 rc = ibmvnic_reset_init(adapter, false); 1609 if (rc) { 1610 netdev_err(netdev, "login recovery: Reset init failed %d\n", 1611 rc); 1612 return -EIO; 1613 } 1614 } 1615 } while (retry); 1616 1617 __ibmvnic_set_mac(netdev, adapter->mac_addr); 1618 1619 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); 1620 return 0; 1621 } 1622 1623 static void release_login_buffer(struct ibmvnic_adapter *adapter) 1624 { 1625 if (!adapter->login_buf) 1626 return; 1627 1628 dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token, 1629 adapter->login_buf_sz, DMA_TO_DEVICE); 1630 kfree(adapter->login_buf); 1631 adapter->login_buf = NULL; 1632 } 1633 1634 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 1635 { 1636 if (!adapter->login_rsp_buf) 1637 return; 1638 1639 dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token, 1640 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 1641 kfree(adapter->login_rsp_buf); 1642 adapter->login_rsp_buf = NULL; 1643 } 1644 1645 static void release_resources(struct ibmvnic_adapter *adapter) 1646 { 1647 release_vpd_data(adapter); 1648 1649 release_napi(adapter); 1650 release_login_buffer(adapter); 1651 release_login_rsp_buffer(adapter); 1652 } 1653 1654 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 1655 { 1656 struct net_device *netdev = adapter->netdev; 1657 unsigned long timeout = msecs_to_jiffies(20000); 1658 union ibmvnic_crq crq; 1659 bool resend; 1660 int rc; 1661 1662 netdev_dbg(netdev, "setting link state %d\n", link_state); 1663 1664 memset(&crq, 0, sizeof(crq)); 1665 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 1666 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 1667 crq.logical_link_state.link_state = link_state; 1668 1669 do { 1670 resend = false; 1671 1672 reinit_completion(&adapter->init_done); 1673 rc = ibmvnic_send_crq(adapter, &crq); 1674 if (rc) { 1675 netdev_err(netdev, "Failed to set link state\n"); 1676 return rc; 1677 } 1678 1679 if (!wait_for_completion_timeout(&adapter->init_done, 1680 timeout)) { 1681 netdev_err(netdev, "timeout setting link state\n"); 1682 return -ETIMEDOUT; 1683 } 1684 1685 if (adapter->init_done_rc == PARTIALSUCCESS) { 1686 /* Partuial success, delay and re-send */ 1687 mdelay(1000); 1688 resend = true; 1689 } else if (adapter->init_done_rc) { 1690 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 1691 adapter->init_done_rc); 1692 return adapter->init_done_rc; 1693 } 1694 } while (resend); 1695 1696 return 0; 1697 } 1698 1699 static int set_real_num_queues(struct net_device *netdev) 1700 { 1701 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1702 int rc; 1703 1704 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 1705 adapter->req_tx_queues, adapter->req_rx_queues); 1706 1707 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 1708 if (rc) { 1709 netdev_err(netdev, "failed to set the number of tx queues\n"); 1710 return rc; 1711 } 1712 1713 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 1714 if (rc) 1715 netdev_err(netdev, "failed to set the number of rx queues\n"); 1716 1717 return rc; 1718 } 1719 1720 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 1721 { 1722 struct device *dev = &adapter->vdev->dev; 1723 union ibmvnic_crq crq; 1724 int len = 0; 1725 int rc; 1726 1727 if (adapter->vpd->buff) 1728 len = adapter->vpd->len; 1729 1730 mutex_lock(&adapter->fw_lock); 1731 adapter->fw_done_rc = 0; 1732 reinit_completion(&adapter->fw_done); 1733 1734 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 1735 crq.get_vpd_size.cmd = GET_VPD_SIZE; 1736 rc = ibmvnic_send_crq(adapter, &crq); 1737 if (rc) { 1738 mutex_unlock(&adapter->fw_lock); 1739 return rc; 1740 } 1741 1742 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1743 if (rc) { 1744 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); 1745 mutex_unlock(&adapter->fw_lock); 1746 return rc; 1747 } 1748 mutex_unlock(&adapter->fw_lock); 1749 1750 if (!adapter->vpd->len) 1751 return -ENODATA; 1752 1753 if (!adapter->vpd->buff) 1754 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 1755 else if (adapter->vpd->len != len) 1756 adapter->vpd->buff = 1757 krealloc(adapter->vpd->buff, 1758 adapter->vpd->len, GFP_KERNEL); 1759 1760 if (!adapter->vpd->buff) { 1761 dev_err(dev, "Could allocate VPD buffer\n"); 1762 return -ENOMEM; 1763 } 1764 1765 adapter->vpd->dma_addr = 1766 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 1767 DMA_FROM_DEVICE); 1768 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 1769 dev_err(dev, "Could not map VPD buffer\n"); 1770 kfree(adapter->vpd->buff); 1771 adapter->vpd->buff = NULL; 1772 return -ENOMEM; 1773 } 1774 1775 mutex_lock(&adapter->fw_lock); 1776 adapter->fw_done_rc = 0; 1777 reinit_completion(&adapter->fw_done); 1778 1779 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 1780 crq.get_vpd.cmd = GET_VPD; 1781 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 1782 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1783 rc = ibmvnic_send_crq(adapter, &crq); 1784 if (rc) { 1785 kfree(adapter->vpd->buff); 1786 adapter->vpd->buff = NULL; 1787 mutex_unlock(&adapter->fw_lock); 1788 return rc; 1789 } 1790 1791 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1792 if (rc) { 1793 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); 1794 kfree(adapter->vpd->buff); 1795 adapter->vpd->buff = NULL; 1796 mutex_unlock(&adapter->fw_lock); 1797 return rc; 1798 } 1799 1800 mutex_unlock(&adapter->fw_lock); 1801 return 0; 1802 } 1803 1804 static int init_resources(struct ibmvnic_adapter *adapter) 1805 { 1806 struct net_device *netdev = adapter->netdev; 1807 int rc; 1808 1809 rc = set_real_num_queues(netdev); 1810 if (rc) 1811 return rc; 1812 1813 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1814 if (!adapter->vpd) 1815 return -ENOMEM; 1816 1817 /* Vital Product Data (VPD) */ 1818 rc = ibmvnic_get_vpd(adapter); 1819 if (rc) { 1820 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1821 return rc; 1822 } 1823 1824 rc = init_napi(adapter); 1825 if (rc) 1826 return rc; 1827 1828 send_query_map(adapter); 1829 1830 rc = init_rx_pools(netdev); 1831 if (rc) 1832 return rc; 1833 1834 rc = init_tx_pools(netdev); 1835 return rc; 1836 } 1837 1838 static int __ibmvnic_open(struct net_device *netdev) 1839 { 1840 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1841 enum vnic_state prev_state = adapter->state; 1842 int i, rc; 1843 1844 adapter->state = VNIC_OPENING; 1845 replenish_pools(adapter); 1846 ibmvnic_napi_enable(adapter); 1847 1848 /* We're ready to receive frames, enable the sub-crq interrupts and 1849 * set the logical link state to up 1850 */ 1851 for (i = 0; i < adapter->req_rx_queues; i++) { 1852 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1853 if (prev_state == VNIC_CLOSED) 1854 enable_irq(adapter->rx_scrq[i]->irq); 1855 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1856 } 1857 1858 for (i = 0; i < adapter->req_tx_queues; i++) { 1859 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1860 if (prev_state == VNIC_CLOSED) 1861 enable_irq(adapter->tx_scrq[i]->irq); 1862 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1863 /* netdev_tx_reset_queue will reset dql stats. During NON_FATAL 1864 * resets, don't reset the stats because there could be batched 1865 * skb's waiting to be sent. If we reset dql stats, we risk 1866 * num_completed being greater than num_queued. This will cause 1867 * a BUG_ON in dql_completed(). 1868 */ 1869 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) 1870 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); 1871 } 1872 1873 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1874 if (rc) { 1875 ibmvnic_napi_disable(adapter); 1876 ibmvnic_disable_irqs(adapter); 1877 return rc; 1878 } 1879 1880 adapter->tx_queues_active = true; 1881 1882 /* Since queues were stopped until now, there shouldn't be any 1883 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we 1884 * don't need the synchronize_rcu()? Leaving it for consistency 1885 * with setting ->tx_queues_active = false. 1886 */ 1887 synchronize_rcu(); 1888 1889 netif_tx_start_all_queues(netdev); 1890 1891 if (prev_state == VNIC_CLOSED) { 1892 for (i = 0; i < adapter->req_rx_queues; i++) 1893 napi_schedule(&adapter->napi[i]); 1894 } 1895 1896 adapter->state = VNIC_OPEN; 1897 return rc; 1898 } 1899 1900 static int ibmvnic_open(struct net_device *netdev) 1901 { 1902 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1903 int rc; 1904 1905 ASSERT_RTNL(); 1906 1907 /* If device failover is pending or we are about to reset, just set 1908 * device state and return. Device operation will be handled by reset 1909 * routine. 1910 * 1911 * It should be safe to overwrite the adapter->state here. Since 1912 * we hold the rtnl, either the reset has not actually started or 1913 * the rtnl got dropped during the set_link_state() in do_reset(). 1914 * In the former case, no one else is changing the state (again we 1915 * have the rtnl) and in the latter case, do_reset() will detect and 1916 * honor our setting below. 1917 */ 1918 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { 1919 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n", 1920 adapter_state_to_string(adapter->state), 1921 adapter->failover_pending); 1922 adapter->state = VNIC_OPEN; 1923 rc = 0; 1924 goto out; 1925 } 1926 1927 if (adapter->state != VNIC_CLOSED) { 1928 rc = ibmvnic_login(netdev); 1929 if (rc) 1930 goto out; 1931 1932 rc = init_resources(adapter); 1933 if (rc) { 1934 netdev_err(netdev, "failed to initialize resources\n"); 1935 goto out; 1936 } 1937 } 1938 1939 rc = __ibmvnic_open(netdev); 1940 1941 out: 1942 /* If open failed and there is a pending failover or in-progress reset, 1943 * set device state and return. Device operation will be handled by 1944 * reset routine. See also comments above regarding rtnl. 1945 */ 1946 if (rc && 1947 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { 1948 adapter->state = VNIC_OPEN; 1949 rc = 0; 1950 } 1951 1952 if (rc) { 1953 release_resources(adapter); 1954 release_rx_pools(adapter); 1955 release_tx_pools(adapter); 1956 } 1957 1958 return rc; 1959 } 1960 1961 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1962 { 1963 struct ibmvnic_rx_pool *rx_pool; 1964 struct ibmvnic_rx_buff *rx_buff; 1965 u64 rx_entries; 1966 int rx_scrqs; 1967 int i, j; 1968 1969 if (!adapter->rx_pool) 1970 return; 1971 1972 rx_scrqs = adapter->num_active_rx_pools; 1973 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1974 1975 /* Free any remaining skbs in the rx buffer pools */ 1976 for (i = 0; i < rx_scrqs; i++) { 1977 rx_pool = &adapter->rx_pool[i]; 1978 if (!rx_pool || !rx_pool->rx_buff) 1979 continue; 1980 1981 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1982 for (j = 0; j < rx_entries; j++) { 1983 rx_buff = &rx_pool->rx_buff[j]; 1984 if (rx_buff && rx_buff->skb) { 1985 dev_kfree_skb_any(rx_buff->skb); 1986 rx_buff->skb = NULL; 1987 } 1988 } 1989 } 1990 } 1991 1992 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1993 struct ibmvnic_tx_pool *tx_pool) 1994 { 1995 struct ibmvnic_tx_buff *tx_buff; 1996 u64 tx_entries; 1997 int i; 1998 1999 if (!tx_pool || !tx_pool->tx_buff) 2000 return; 2001 2002 tx_entries = tx_pool->num_buffers; 2003 2004 for (i = 0; i < tx_entries; i++) { 2005 tx_buff = &tx_pool->tx_buff[i]; 2006 if (tx_buff && tx_buff->skb) { 2007 dev_kfree_skb_any(tx_buff->skb); 2008 tx_buff->skb = NULL; 2009 } 2010 } 2011 } 2012 2013 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 2014 { 2015 int tx_scrqs; 2016 int i; 2017 2018 if (!adapter->tx_pool || !adapter->tso_pool) 2019 return; 2020 2021 tx_scrqs = adapter->num_active_tx_pools; 2022 2023 /* Free any remaining skbs in the tx buffer pools */ 2024 for (i = 0; i < tx_scrqs; i++) { 2025 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 2026 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 2027 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 2028 } 2029 } 2030 2031 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 2032 { 2033 struct net_device *netdev = adapter->netdev; 2034 int i; 2035 2036 if (adapter->tx_scrq) { 2037 for (i = 0; i < adapter->req_tx_queues; i++) 2038 if (adapter->tx_scrq[i]->irq) { 2039 netdev_dbg(netdev, 2040 "Disabling tx_scrq[%d] irq\n", i); 2041 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 2042 disable_irq(adapter->tx_scrq[i]->irq); 2043 } 2044 } 2045 2046 if (adapter->rx_scrq) { 2047 for (i = 0; i < adapter->req_rx_queues; i++) { 2048 if (adapter->rx_scrq[i]->irq) { 2049 netdev_dbg(netdev, 2050 "Disabling rx_scrq[%d] irq\n", i); 2051 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 2052 disable_irq(adapter->rx_scrq[i]->irq); 2053 } 2054 } 2055 } 2056 } 2057 2058 static void ibmvnic_cleanup(struct net_device *netdev) 2059 { 2060 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2061 2062 /* ensure that transmissions are stopped if called by do_reset */ 2063 2064 adapter->tx_queues_active = false; 2065 2066 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active 2067 * update so they don't restart a queue after we stop it below. 2068 */ 2069 synchronize_rcu(); 2070 2071 if (test_bit(0, &adapter->resetting)) 2072 netif_tx_disable(netdev); 2073 else 2074 netif_tx_stop_all_queues(netdev); 2075 2076 ibmvnic_napi_disable(adapter); 2077 ibmvnic_disable_irqs(adapter); 2078 } 2079 2080 static int __ibmvnic_close(struct net_device *netdev) 2081 { 2082 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2083 int rc = 0; 2084 2085 adapter->state = VNIC_CLOSING; 2086 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2087 adapter->state = VNIC_CLOSED; 2088 return rc; 2089 } 2090 2091 static int ibmvnic_close(struct net_device *netdev) 2092 { 2093 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2094 int rc; 2095 2096 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n", 2097 adapter_state_to_string(adapter->state), 2098 adapter->failover_pending, 2099 adapter->force_reset_recovery); 2100 2101 /* If device failover is pending, just set device state and return. 2102 * Device operation will be handled by reset routine. 2103 */ 2104 if (adapter->failover_pending) { 2105 adapter->state = VNIC_CLOSED; 2106 return 0; 2107 } 2108 2109 rc = __ibmvnic_close(netdev); 2110 ibmvnic_cleanup(netdev); 2111 clean_rx_pools(adapter); 2112 clean_tx_pools(adapter); 2113 2114 return rc; 2115 } 2116 2117 /** 2118 * build_hdr_data - creates L2/L3/L4 header data buffer 2119 * @hdr_field: bitfield determining needed headers 2120 * @skb: socket buffer 2121 * @hdr_len: array of header lengths 2122 * @hdr_data: buffer to write the header to 2123 * 2124 * Reads hdr_field to determine which headers are needed by firmware. 2125 * Builds a buffer containing these headers. Saves individual header 2126 * lengths and total buffer length to be used to build descriptors. 2127 */ 2128 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 2129 int *hdr_len, u8 *hdr_data) 2130 { 2131 int len = 0; 2132 u8 *hdr; 2133 2134 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 2135 hdr_len[0] = sizeof(struct vlan_ethhdr); 2136 else 2137 hdr_len[0] = sizeof(struct ethhdr); 2138 2139 if (skb->protocol == htons(ETH_P_IP)) { 2140 hdr_len[1] = ip_hdr(skb)->ihl * 4; 2141 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2142 hdr_len[2] = tcp_hdrlen(skb); 2143 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 2144 hdr_len[2] = sizeof(struct udphdr); 2145 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2146 hdr_len[1] = sizeof(struct ipv6hdr); 2147 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2148 hdr_len[2] = tcp_hdrlen(skb); 2149 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 2150 hdr_len[2] = sizeof(struct udphdr); 2151 } else if (skb->protocol == htons(ETH_P_ARP)) { 2152 hdr_len[1] = arp_hdr_len(skb->dev); 2153 hdr_len[2] = 0; 2154 } 2155 2156 memset(hdr_data, 0, 120); 2157 if ((hdr_field >> 6) & 1) { 2158 hdr = skb_mac_header(skb); 2159 memcpy(hdr_data, hdr, hdr_len[0]); 2160 len += hdr_len[0]; 2161 } 2162 2163 if ((hdr_field >> 5) & 1) { 2164 hdr = skb_network_header(skb); 2165 memcpy(hdr_data + len, hdr, hdr_len[1]); 2166 len += hdr_len[1]; 2167 } 2168 2169 if ((hdr_field >> 4) & 1) { 2170 hdr = skb_transport_header(skb); 2171 memcpy(hdr_data + len, hdr, hdr_len[2]); 2172 len += hdr_len[2]; 2173 } 2174 return len; 2175 } 2176 2177 /** 2178 * create_hdr_descs - create header and header extension descriptors 2179 * @hdr_field: bitfield determining needed headers 2180 * @hdr_data: buffer containing header data 2181 * @len: length of data buffer 2182 * @hdr_len: array of individual header lengths 2183 * @scrq_arr: descriptor array 2184 * 2185 * Creates header and, if needed, header extension descriptors and 2186 * places them in a descriptor array, scrq_arr 2187 */ 2188 2189 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 2190 union sub_crq *scrq_arr) 2191 { 2192 union sub_crq hdr_desc; 2193 int tmp_len = len; 2194 int num_descs = 0; 2195 u8 *data, *cur; 2196 int tmp; 2197 2198 while (tmp_len > 0) { 2199 cur = hdr_data + len - tmp_len; 2200 2201 memset(&hdr_desc, 0, sizeof(hdr_desc)); 2202 if (cur != hdr_data) { 2203 data = hdr_desc.hdr_ext.data; 2204 tmp = tmp_len > 29 ? 29 : tmp_len; 2205 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 2206 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 2207 hdr_desc.hdr_ext.len = tmp; 2208 } else { 2209 data = hdr_desc.hdr.data; 2210 tmp = tmp_len > 24 ? 24 : tmp_len; 2211 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 2212 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 2213 hdr_desc.hdr.len = tmp; 2214 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 2215 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 2216 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 2217 hdr_desc.hdr.flag = hdr_field << 1; 2218 } 2219 memcpy(data, cur, tmp); 2220 tmp_len -= tmp; 2221 *scrq_arr = hdr_desc; 2222 scrq_arr++; 2223 num_descs++; 2224 } 2225 2226 return num_descs; 2227 } 2228 2229 /** 2230 * build_hdr_descs_arr - build a header descriptor array 2231 * @skb: tx socket buffer 2232 * @indir_arr: indirect array 2233 * @num_entries: number of descriptors to be sent 2234 * @hdr_field: bit field determining which headers will be sent 2235 * 2236 * This function will build a TX descriptor array with applicable 2237 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 2238 */ 2239 2240 static void build_hdr_descs_arr(struct sk_buff *skb, 2241 union sub_crq *indir_arr, 2242 int *num_entries, u8 hdr_field) 2243 { 2244 int hdr_len[3] = {0, 0, 0}; 2245 u8 hdr_data[140] = {0}; 2246 int tot_len; 2247 2248 tot_len = build_hdr_data(hdr_field, skb, hdr_len, 2249 hdr_data); 2250 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 2251 indir_arr + 1); 2252 } 2253 2254 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 2255 struct net_device *netdev) 2256 { 2257 /* For some backing devices, mishandling of small packets 2258 * can result in a loss of connection or TX stall. Device 2259 * architects recommend that no packet should be smaller 2260 * than the minimum MTU value provided to the driver, so 2261 * pad any packets to that length 2262 */ 2263 if (skb->len < netdev->min_mtu) 2264 return skb_put_padto(skb, netdev->min_mtu); 2265 2266 return 0; 2267 } 2268 2269 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 2270 struct ibmvnic_sub_crq_queue *tx_scrq) 2271 { 2272 struct ibmvnic_ind_xmit_queue *ind_bufp; 2273 struct ibmvnic_tx_buff *tx_buff; 2274 struct ibmvnic_tx_pool *tx_pool; 2275 union sub_crq tx_scrq_entry; 2276 int queue_num; 2277 int entries; 2278 int index; 2279 int i; 2280 2281 ind_bufp = &tx_scrq->ind_buf; 2282 entries = (u64)ind_bufp->index; 2283 queue_num = tx_scrq->pool_index; 2284 2285 for (i = entries - 1; i >= 0; --i) { 2286 tx_scrq_entry = ind_bufp->indir_arr[i]; 2287 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) 2288 continue; 2289 index = be32_to_cpu(tx_scrq_entry.v1.correlator); 2290 if (index & IBMVNIC_TSO_POOL_MASK) { 2291 tx_pool = &adapter->tso_pool[queue_num]; 2292 index &= ~IBMVNIC_TSO_POOL_MASK; 2293 } else { 2294 tx_pool = &adapter->tx_pool[queue_num]; 2295 } 2296 tx_pool->free_map[tx_pool->consumer_index] = index; 2297 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2298 tx_pool->num_buffers - 1 : 2299 tx_pool->consumer_index - 1; 2300 tx_buff = &tx_pool->tx_buff[index]; 2301 adapter->netdev->stats.tx_packets--; 2302 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; 2303 adapter->tx_stats_buffers[queue_num].packets--; 2304 adapter->tx_stats_buffers[queue_num].bytes -= 2305 tx_buff->skb->len; 2306 dev_kfree_skb_any(tx_buff->skb); 2307 tx_buff->skb = NULL; 2308 adapter->netdev->stats.tx_dropped++; 2309 } 2310 2311 ind_bufp->index = 0; 2312 2313 if (atomic_sub_return(entries, &tx_scrq->used) <= 2314 (adapter->req_tx_entries_per_subcrq / 2) && 2315 __netif_subqueue_stopped(adapter->netdev, queue_num)) { 2316 rcu_read_lock(); 2317 2318 if (adapter->tx_queues_active) { 2319 netif_wake_subqueue(adapter->netdev, queue_num); 2320 netdev_dbg(adapter->netdev, "Started queue %d\n", 2321 queue_num); 2322 } 2323 2324 rcu_read_unlock(); 2325 } 2326 } 2327 2328 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, 2329 struct ibmvnic_sub_crq_queue *tx_scrq) 2330 { 2331 struct ibmvnic_ind_xmit_queue *ind_bufp; 2332 u64 dma_addr; 2333 u64 entries; 2334 u64 handle; 2335 int rc; 2336 2337 ind_bufp = &tx_scrq->ind_buf; 2338 dma_addr = (u64)ind_bufp->indir_dma; 2339 entries = (u64)ind_bufp->index; 2340 handle = tx_scrq->handle; 2341 2342 if (!entries) 2343 return 0; 2344 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); 2345 if (rc) 2346 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); 2347 else 2348 ind_bufp->index = 0; 2349 return 0; 2350 } 2351 2352 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 2353 { 2354 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2355 int queue_num = skb_get_queue_mapping(skb); 2356 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 2357 struct device *dev = &adapter->vdev->dev; 2358 struct ibmvnic_ind_xmit_queue *ind_bufp; 2359 struct ibmvnic_tx_buff *tx_buff = NULL; 2360 struct ibmvnic_sub_crq_queue *tx_scrq; 2361 struct ibmvnic_long_term_buff *ltb; 2362 struct ibmvnic_tx_pool *tx_pool; 2363 unsigned int tx_send_failed = 0; 2364 netdev_tx_t ret = NETDEV_TX_OK; 2365 unsigned int tx_map_failed = 0; 2366 union sub_crq indir_arr[16]; 2367 unsigned int tx_dropped = 0; 2368 unsigned int tx_packets = 0; 2369 unsigned int tx_bytes = 0; 2370 dma_addr_t data_dma_addr; 2371 struct netdev_queue *txq; 2372 unsigned long lpar_rc; 2373 union sub_crq tx_crq; 2374 unsigned int offset; 2375 int num_entries = 1; 2376 unsigned char *dst; 2377 int bufidx = 0; 2378 u8 proto = 0; 2379 2380 /* If a reset is in progress, drop the packet since 2381 * the scrqs may get torn down. Otherwise use the 2382 * rcu to ensure reset waits for us to complete. 2383 */ 2384 rcu_read_lock(); 2385 if (!adapter->tx_queues_active) { 2386 dev_kfree_skb_any(skb); 2387 2388 tx_send_failed++; 2389 tx_dropped++; 2390 ret = NETDEV_TX_OK; 2391 goto out; 2392 } 2393 2394 tx_scrq = adapter->tx_scrq[queue_num]; 2395 txq = netdev_get_tx_queue(netdev, queue_num); 2396 ind_bufp = &tx_scrq->ind_buf; 2397 2398 if (ibmvnic_xmit_workarounds(skb, netdev)) { 2399 tx_dropped++; 2400 tx_send_failed++; 2401 ret = NETDEV_TX_OK; 2402 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2403 goto out; 2404 } 2405 2406 if (skb_is_gso(skb)) 2407 tx_pool = &adapter->tso_pool[queue_num]; 2408 else 2409 tx_pool = &adapter->tx_pool[queue_num]; 2410 2411 bufidx = tx_pool->free_map[tx_pool->consumer_index]; 2412 2413 if (bufidx == IBMVNIC_INVALID_MAP) { 2414 dev_kfree_skb_any(skb); 2415 tx_send_failed++; 2416 tx_dropped++; 2417 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2418 ret = NETDEV_TX_OK; 2419 goto out; 2420 } 2421 2422 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 2423 2424 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset); 2425 2426 dst = ltb->buff + offset; 2427 memset(dst, 0, tx_pool->buf_size); 2428 data_dma_addr = ltb->addr + offset; 2429 2430 if (skb_shinfo(skb)->nr_frags) { 2431 int cur, i; 2432 2433 /* Copy the head */ 2434 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 2435 cur = skb_headlen(skb); 2436 2437 /* Copy the frags */ 2438 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2439 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2440 2441 memcpy(dst + cur, skb_frag_address(frag), 2442 skb_frag_size(frag)); 2443 cur += skb_frag_size(frag); 2444 } 2445 } else { 2446 skb_copy_from_linear_data(skb, dst, skb->len); 2447 } 2448 2449 /* post changes to long_term_buff *dst before VIOS accessing it */ 2450 dma_wmb(); 2451 2452 tx_pool->consumer_index = 2453 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 2454 2455 tx_buff = &tx_pool->tx_buff[bufidx]; 2456 tx_buff->skb = skb; 2457 tx_buff->index = bufidx; 2458 tx_buff->pool_index = queue_num; 2459 2460 memset(&tx_crq, 0, sizeof(tx_crq)); 2461 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 2462 tx_crq.v1.type = IBMVNIC_TX_DESC; 2463 tx_crq.v1.n_crq_elem = 1; 2464 tx_crq.v1.n_sge = 1; 2465 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 2466 2467 if (skb_is_gso(skb)) 2468 tx_crq.v1.correlator = 2469 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK); 2470 else 2471 tx_crq.v1.correlator = cpu_to_be32(bufidx); 2472 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id); 2473 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 2474 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 2475 2476 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { 2477 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 2478 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 2479 } 2480 2481 if (skb->protocol == htons(ETH_P_IP)) { 2482 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 2483 proto = ip_hdr(skb)->protocol; 2484 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2485 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 2486 proto = ipv6_hdr(skb)->nexthdr; 2487 } 2488 2489 if (proto == IPPROTO_TCP) 2490 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 2491 else if (proto == IPPROTO_UDP) 2492 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 2493 2494 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2495 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 2496 hdrs += 2; 2497 } 2498 if (skb_is_gso(skb)) { 2499 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 2500 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 2501 hdrs += 2; 2502 } 2503 2504 if ((*hdrs >> 7) & 1) 2505 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); 2506 2507 tx_crq.v1.n_crq_elem = num_entries; 2508 tx_buff->num_entries = num_entries; 2509 /* flush buffer if current entry can not fit */ 2510 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { 2511 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2512 if (lpar_rc != H_SUCCESS) 2513 goto tx_flush_err; 2514 } 2515 2516 indir_arr[0] = tx_crq; 2517 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], 2518 num_entries * sizeof(struct ibmvnic_generic_scrq)); 2519 ind_bufp->index += num_entries; 2520 if (__netdev_tx_sent_queue(txq, skb->len, 2521 netdev_xmit_more() && 2522 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { 2523 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2524 if (lpar_rc != H_SUCCESS) 2525 goto tx_err; 2526 } 2527 2528 if (atomic_add_return(num_entries, &tx_scrq->used) 2529 >= adapter->req_tx_entries_per_subcrq) { 2530 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 2531 netif_stop_subqueue(netdev, queue_num); 2532 } 2533 2534 tx_packets++; 2535 tx_bytes += skb->len; 2536 txq_trans_cond_update(txq); 2537 ret = NETDEV_TX_OK; 2538 goto out; 2539 2540 tx_flush_err: 2541 dev_kfree_skb_any(skb); 2542 tx_buff->skb = NULL; 2543 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2544 tx_pool->num_buffers - 1 : 2545 tx_pool->consumer_index - 1; 2546 tx_dropped++; 2547 tx_err: 2548 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) 2549 dev_err_ratelimited(dev, "tx: send failed\n"); 2550 2551 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 2552 /* Disable TX and report carrier off if queue is closed 2553 * or pending failover. 2554 * Firmware guarantees that a signal will be sent to the 2555 * driver, triggering a reset or some other action. 2556 */ 2557 netif_tx_stop_all_queues(netdev); 2558 netif_carrier_off(netdev); 2559 } 2560 out: 2561 rcu_read_unlock(); 2562 netdev->stats.tx_dropped += tx_dropped; 2563 netdev->stats.tx_bytes += tx_bytes; 2564 netdev->stats.tx_packets += tx_packets; 2565 adapter->tx_send_failed += tx_send_failed; 2566 adapter->tx_map_failed += tx_map_failed; 2567 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 2568 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 2569 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 2570 2571 return ret; 2572 } 2573 2574 static void ibmvnic_set_multi(struct net_device *netdev) 2575 { 2576 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2577 struct netdev_hw_addr *ha; 2578 union ibmvnic_crq crq; 2579 2580 memset(&crq, 0, sizeof(crq)); 2581 crq.request_capability.first = IBMVNIC_CRQ_CMD; 2582 crq.request_capability.cmd = REQUEST_CAPABILITY; 2583 2584 if (netdev->flags & IFF_PROMISC) { 2585 if (!adapter->promisc_supported) 2586 return; 2587 } else { 2588 if (netdev->flags & IFF_ALLMULTI) { 2589 /* Accept all multicast */ 2590 memset(&crq, 0, sizeof(crq)); 2591 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2592 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2593 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 2594 ibmvnic_send_crq(adapter, &crq); 2595 } else if (netdev_mc_empty(netdev)) { 2596 /* Reject all multicast */ 2597 memset(&crq, 0, sizeof(crq)); 2598 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2599 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2600 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 2601 ibmvnic_send_crq(adapter, &crq); 2602 } else { 2603 /* Accept one or more multicast(s) */ 2604 netdev_for_each_mc_addr(ha, netdev) { 2605 memset(&crq, 0, sizeof(crq)); 2606 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2607 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2608 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 2609 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 2610 ha->addr); 2611 ibmvnic_send_crq(adapter, &crq); 2612 } 2613 } 2614 } 2615 } 2616 2617 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) 2618 { 2619 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2620 union ibmvnic_crq crq; 2621 int rc; 2622 2623 if (!is_valid_ether_addr(dev_addr)) { 2624 rc = -EADDRNOTAVAIL; 2625 goto err; 2626 } 2627 2628 memset(&crq, 0, sizeof(crq)); 2629 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 2630 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 2631 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); 2632 2633 mutex_lock(&adapter->fw_lock); 2634 adapter->fw_done_rc = 0; 2635 reinit_completion(&adapter->fw_done); 2636 2637 rc = ibmvnic_send_crq(adapter, &crq); 2638 if (rc) { 2639 rc = -EIO; 2640 mutex_unlock(&adapter->fw_lock); 2641 goto err; 2642 } 2643 2644 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 2645 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 2646 if (rc || adapter->fw_done_rc) { 2647 rc = -EIO; 2648 mutex_unlock(&adapter->fw_lock); 2649 goto err; 2650 } 2651 mutex_unlock(&adapter->fw_lock); 2652 return 0; 2653 err: 2654 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 2655 return rc; 2656 } 2657 2658 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 2659 { 2660 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2661 struct sockaddr *addr = p; 2662 int rc; 2663 2664 rc = 0; 2665 if (!is_valid_ether_addr(addr->sa_data)) 2666 return -EADDRNOTAVAIL; 2667 2668 ether_addr_copy(adapter->mac_addr, addr->sa_data); 2669 if (adapter->state != VNIC_PROBED) 2670 rc = __ibmvnic_set_mac(netdev, addr->sa_data); 2671 2672 return rc; 2673 } 2674 2675 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) 2676 { 2677 switch (reason) { 2678 case VNIC_RESET_FAILOVER: 2679 return "FAILOVER"; 2680 case VNIC_RESET_MOBILITY: 2681 return "MOBILITY"; 2682 case VNIC_RESET_FATAL: 2683 return "FATAL"; 2684 case VNIC_RESET_NON_FATAL: 2685 return "NON_FATAL"; 2686 case VNIC_RESET_TIMEOUT: 2687 return "TIMEOUT"; 2688 case VNIC_RESET_CHANGE_PARAM: 2689 return "CHANGE_PARAM"; 2690 case VNIC_RESET_PASSIVE_INIT: 2691 return "PASSIVE_INIT"; 2692 } 2693 return "UNKNOWN"; 2694 } 2695 2696 /* 2697 * Initialize the init_done completion and return code values. We 2698 * can get a transport event just after registering the CRQ and the 2699 * tasklet will use this to communicate the transport event. To ensure 2700 * we don't miss the notification/error, initialize these _before_ 2701 * regisering the CRQ. 2702 */ 2703 static inline void reinit_init_done(struct ibmvnic_adapter *adapter) 2704 { 2705 reinit_completion(&adapter->init_done); 2706 adapter->init_done_rc = 0; 2707 } 2708 2709 /* 2710 * do_reset returns zero if we are able to keep processing reset events, or 2711 * non-zero if we hit a fatal error and must halt. 2712 */ 2713 static int do_reset(struct ibmvnic_adapter *adapter, 2714 struct ibmvnic_rwi *rwi, u32 reset_state) 2715 { 2716 struct net_device *netdev = adapter->netdev; 2717 u64 old_num_rx_queues, old_num_tx_queues; 2718 u64 old_num_rx_slots, old_num_tx_slots; 2719 int rc; 2720 2721 netdev_dbg(adapter->netdev, 2722 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", 2723 adapter_state_to_string(adapter->state), 2724 adapter->failover_pending, 2725 reset_reason_to_string(rwi->reset_reason), 2726 adapter_state_to_string(reset_state)); 2727 2728 adapter->reset_reason = rwi->reset_reason; 2729 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ 2730 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2731 rtnl_lock(); 2732 2733 /* Now that we have the rtnl lock, clear any pending failover. 2734 * This will ensure ibmvnic_open() has either completed or will 2735 * block until failover is complete. 2736 */ 2737 if (rwi->reset_reason == VNIC_RESET_FAILOVER) 2738 adapter->failover_pending = false; 2739 2740 /* read the state and check (again) after getting rtnl */ 2741 reset_state = adapter->state; 2742 2743 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2744 rc = -EBUSY; 2745 goto out; 2746 } 2747 2748 netif_carrier_off(netdev); 2749 2750 old_num_rx_queues = adapter->req_rx_queues; 2751 old_num_tx_queues = adapter->req_tx_queues; 2752 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; 2753 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; 2754 2755 ibmvnic_cleanup(netdev); 2756 2757 if (reset_state == VNIC_OPEN && 2758 adapter->reset_reason != VNIC_RESET_MOBILITY && 2759 adapter->reset_reason != VNIC_RESET_FAILOVER) { 2760 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2761 rc = __ibmvnic_close(netdev); 2762 if (rc) 2763 goto out; 2764 } else { 2765 adapter->state = VNIC_CLOSING; 2766 2767 /* Release the RTNL lock before link state change and 2768 * re-acquire after the link state change to allow 2769 * linkwatch_event to grab the RTNL lock and run during 2770 * a reset. 2771 */ 2772 rtnl_unlock(); 2773 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2774 rtnl_lock(); 2775 if (rc) 2776 goto out; 2777 2778 if (adapter->state == VNIC_OPEN) { 2779 /* When we dropped rtnl, ibmvnic_open() got 2780 * it and noticed that we are resetting and 2781 * set the adapter state to OPEN. Update our 2782 * new "target" state, and resume the reset 2783 * from VNIC_CLOSING state. 2784 */ 2785 netdev_dbg(netdev, 2786 "Open changed state from %s, updating.\n", 2787 adapter_state_to_string(reset_state)); 2788 reset_state = VNIC_OPEN; 2789 adapter->state = VNIC_CLOSING; 2790 } 2791 2792 if (adapter->state != VNIC_CLOSING) { 2793 /* If someone else changed the adapter state 2794 * when we dropped the rtnl, fail the reset 2795 */ 2796 rc = -EAGAIN; 2797 goto out; 2798 } 2799 adapter->state = VNIC_CLOSED; 2800 } 2801 } 2802 2803 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2804 release_resources(adapter); 2805 release_sub_crqs(adapter, 1); 2806 release_crq_queue(adapter); 2807 } 2808 2809 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 2810 /* remove the closed state so when we call open it appears 2811 * we are coming from the probed state. 2812 */ 2813 adapter->state = VNIC_PROBED; 2814 2815 reinit_init_done(adapter); 2816 2817 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2818 rc = init_crq_queue(adapter); 2819 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 2820 rc = ibmvnic_reenable_crq_queue(adapter); 2821 release_sub_crqs(adapter, 1); 2822 } else { 2823 rc = ibmvnic_reset_crq(adapter); 2824 if (rc == H_CLOSED || rc == H_SUCCESS) { 2825 rc = vio_enable_interrupts(adapter->vdev); 2826 if (rc) 2827 netdev_err(adapter->netdev, 2828 "Reset failed to enable interrupts. rc=%d\n", 2829 rc); 2830 } 2831 } 2832 2833 if (rc) { 2834 netdev_err(adapter->netdev, 2835 "Reset couldn't initialize crq. rc=%d\n", rc); 2836 goto out; 2837 } 2838 2839 rc = ibmvnic_reset_init(adapter, true); 2840 if (rc) 2841 goto out; 2842 2843 /* If the adapter was in PROBE or DOWN state prior to the reset, 2844 * exit here. 2845 */ 2846 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) { 2847 rc = 0; 2848 goto out; 2849 } 2850 2851 rc = ibmvnic_login(netdev); 2852 if (rc) 2853 goto out; 2854 2855 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2856 rc = init_resources(adapter); 2857 if (rc) 2858 goto out; 2859 } else if (adapter->req_rx_queues != old_num_rx_queues || 2860 adapter->req_tx_queues != old_num_tx_queues || 2861 adapter->req_rx_add_entries_per_subcrq != 2862 old_num_rx_slots || 2863 adapter->req_tx_entries_per_subcrq != 2864 old_num_tx_slots || 2865 !adapter->rx_pool || 2866 !adapter->tso_pool || 2867 !adapter->tx_pool) { 2868 release_napi(adapter); 2869 release_vpd_data(adapter); 2870 2871 rc = init_resources(adapter); 2872 if (rc) 2873 goto out; 2874 2875 } else { 2876 rc = init_tx_pools(netdev); 2877 if (rc) { 2878 netdev_dbg(netdev, 2879 "init tx pools failed (%d)\n", 2880 rc); 2881 goto out; 2882 } 2883 2884 rc = init_rx_pools(netdev); 2885 if (rc) { 2886 netdev_dbg(netdev, 2887 "init rx pools failed (%d)\n", 2888 rc); 2889 goto out; 2890 } 2891 } 2892 ibmvnic_disable_irqs(adapter); 2893 } 2894 adapter->state = VNIC_CLOSED; 2895 2896 if (reset_state == VNIC_CLOSED) { 2897 rc = 0; 2898 goto out; 2899 } 2900 2901 rc = __ibmvnic_open(netdev); 2902 if (rc) { 2903 rc = IBMVNIC_OPEN_FAILED; 2904 goto out; 2905 } 2906 2907 /* refresh device's multicast list */ 2908 ibmvnic_set_multi(netdev); 2909 2910 if (adapter->reset_reason == VNIC_RESET_FAILOVER || 2911 adapter->reset_reason == VNIC_RESET_MOBILITY) 2912 __netdev_notify_peers(netdev); 2913 2914 rc = 0; 2915 2916 out: 2917 /* restore the adapter state if reset failed */ 2918 if (rc) 2919 adapter->state = reset_state; 2920 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */ 2921 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2922 rtnl_unlock(); 2923 2924 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", 2925 adapter_state_to_string(adapter->state), 2926 adapter->failover_pending, rc); 2927 return rc; 2928 } 2929 2930 static int do_hard_reset(struct ibmvnic_adapter *adapter, 2931 struct ibmvnic_rwi *rwi, u32 reset_state) 2932 { 2933 struct net_device *netdev = adapter->netdev; 2934 int rc; 2935 2936 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", 2937 reset_reason_to_string(rwi->reset_reason)); 2938 2939 /* read the state and check (again) after getting rtnl */ 2940 reset_state = adapter->state; 2941 2942 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2943 rc = -EBUSY; 2944 goto out; 2945 } 2946 2947 netif_carrier_off(netdev); 2948 adapter->reset_reason = rwi->reset_reason; 2949 2950 ibmvnic_cleanup(netdev); 2951 release_resources(adapter); 2952 release_sub_crqs(adapter, 0); 2953 release_crq_queue(adapter); 2954 2955 /* remove the closed state so when we call open it appears 2956 * we are coming from the probed state. 2957 */ 2958 adapter->state = VNIC_PROBED; 2959 2960 reinit_init_done(adapter); 2961 2962 rc = init_crq_queue(adapter); 2963 if (rc) { 2964 netdev_err(adapter->netdev, 2965 "Couldn't initialize crq. rc=%d\n", rc); 2966 goto out; 2967 } 2968 2969 rc = ibmvnic_reset_init(adapter, false); 2970 if (rc) 2971 goto out; 2972 2973 /* If the adapter was in PROBE or DOWN state prior to the reset, 2974 * exit here. 2975 */ 2976 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) 2977 goto out; 2978 2979 rc = ibmvnic_login(netdev); 2980 if (rc) 2981 goto out; 2982 2983 rc = init_resources(adapter); 2984 if (rc) 2985 goto out; 2986 2987 ibmvnic_disable_irqs(adapter); 2988 adapter->state = VNIC_CLOSED; 2989 2990 if (reset_state == VNIC_CLOSED) 2991 goto out; 2992 2993 rc = __ibmvnic_open(netdev); 2994 if (rc) { 2995 rc = IBMVNIC_OPEN_FAILED; 2996 goto out; 2997 } 2998 2999 __netdev_notify_peers(netdev); 3000 out: 3001 /* restore adapter state if reset failed */ 3002 if (rc) 3003 adapter->state = reset_state; 3004 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", 3005 adapter_state_to_string(adapter->state), 3006 adapter->failover_pending, rc); 3007 return rc; 3008 } 3009 3010 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 3011 { 3012 struct ibmvnic_rwi *rwi; 3013 unsigned long flags; 3014 3015 spin_lock_irqsave(&adapter->rwi_lock, flags); 3016 3017 if (!list_empty(&adapter->rwi_list)) { 3018 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 3019 list); 3020 list_del(&rwi->list); 3021 } else { 3022 rwi = NULL; 3023 } 3024 3025 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 3026 return rwi; 3027 } 3028 3029 /** 3030 * do_passive_init - complete probing when partner device is detected. 3031 * @adapter: ibmvnic_adapter struct 3032 * 3033 * If the ibmvnic device does not have a partner device to communicate with at boot 3034 * and that partner device comes online at a later time, this function is called 3035 * to complete the initialization process of ibmvnic device. 3036 * Caller is expected to hold rtnl_lock(). 3037 * 3038 * Returns non-zero if sub-CRQs are not initialized properly leaving the device 3039 * in the down state. 3040 * Returns 0 upon success and the device is in PROBED state. 3041 */ 3042 3043 static int do_passive_init(struct ibmvnic_adapter *adapter) 3044 { 3045 unsigned long timeout = msecs_to_jiffies(30000); 3046 struct net_device *netdev = adapter->netdev; 3047 struct device *dev = &adapter->vdev->dev; 3048 int rc; 3049 3050 netdev_dbg(netdev, "Partner device found, probing.\n"); 3051 3052 adapter->state = VNIC_PROBING; 3053 reinit_completion(&adapter->init_done); 3054 adapter->init_done_rc = 0; 3055 adapter->crq.active = true; 3056 3057 rc = send_crq_init_complete(adapter); 3058 if (rc) 3059 goto out; 3060 3061 rc = send_version_xchg(adapter); 3062 if (rc) 3063 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); 3064 3065 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3066 dev_err(dev, "Initialization sequence timed out\n"); 3067 rc = -ETIMEDOUT; 3068 goto out; 3069 } 3070 3071 rc = init_sub_crqs(adapter); 3072 if (rc) { 3073 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc); 3074 goto out; 3075 } 3076 3077 rc = init_sub_crq_irqs(adapter); 3078 if (rc) { 3079 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc); 3080 goto init_failed; 3081 } 3082 3083 netdev->mtu = adapter->req_mtu - ETH_HLEN; 3084 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 3085 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 3086 3087 adapter->state = VNIC_PROBED; 3088 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n"); 3089 3090 return 0; 3091 3092 init_failed: 3093 release_sub_crqs(adapter, 1); 3094 out: 3095 adapter->state = VNIC_DOWN; 3096 return rc; 3097 } 3098 3099 static void __ibmvnic_reset(struct work_struct *work) 3100 { 3101 struct ibmvnic_adapter *adapter; 3102 unsigned int timeout = 5000; 3103 struct ibmvnic_rwi *tmprwi; 3104 bool saved_state = false; 3105 struct ibmvnic_rwi *rwi; 3106 unsigned long flags; 3107 struct device *dev; 3108 bool need_reset; 3109 int num_fails = 0; 3110 u32 reset_state; 3111 int rc = 0; 3112 3113 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 3114 dev = &adapter->vdev->dev; 3115 3116 /* Wait for ibmvnic_probe() to complete. If probe is taking too long 3117 * or if another reset is in progress, defer work for now. If probe 3118 * eventually fails it will flush and terminate our work. 3119 * 3120 * Three possibilities here: 3121 * 1. Adpater being removed - just return 3122 * 2. Timed out on probe or another reset in progress - delay the work 3123 * 3. Completed probe - perform any resets in queue 3124 */ 3125 if (adapter->state == VNIC_PROBING && 3126 !wait_for_completion_timeout(&adapter->probe_done, timeout)) { 3127 dev_err(dev, "Reset thread timed out on probe"); 3128 queue_delayed_work(system_long_wq, 3129 &adapter->ibmvnic_delayed_reset, 3130 IBMVNIC_RESET_DELAY); 3131 return; 3132 } 3133 3134 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */ 3135 if (adapter->state == VNIC_REMOVING) 3136 return; 3137 3138 /* ->rwi_list is stable now (no one else is removing entries) */ 3139 3140 /* ibmvnic_probe() may have purged the reset queue after we were 3141 * scheduled to process a reset so there maybe no resets to process. 3142 * Before setting the ->resetting bit though, we have to make sure 3143 * that there is infact a reset to process. Otherwise we may race 3144 * with ibmvnic_open() and end up leaving the vnic down: 3145 * 3146 * __ibmvnic_reset() ibmvnic_open() 3147 * ----------------- -------------- 3148 * 3149 * set ->resetting bit 3150 * find ->resetting bit is set 3151 * set ->state to IBMVNIC_OPEN (i.e 3152 * assume reset will open device) 3153 * return 3154 * find reset queue empty 3155 * return 3156 * 3157 * Neither performed vnic login/open and vnic stays down 3158 * 3159 * If we hold the lock and conditionally set the bit, either we 3160 * or ibmvnic_open() will complete the open. 3161 */ 3162 need_reset = false; 3163 spin_lock(&adapter->rwi_lock); 3164 if (!list_empty(&adapter->rwi_list)) { 3165 if (test_and_set_bit_lock(0, &adapter->resetting)) { 3166 queue_delayed_work(system_long_wq, 3167 &adapter->ibmvnic_delayed_reset, 3168 IBMVNIC_RESET_DELAY); 3169 } else { 3170 need_reset = true; 3171 } 3172 } 3173 spin_unlock(&adapter->rwi_lock); 3174 3175 if (!need_reset) 3176 return; 3177 3178 rwi = get_next_rwi(adapter); 3179 while (rwi) { 3180 spin_lock_irqsave(&adapter->state_lock, flags); 3181 3182 if (adapter->state == VNIC_REMOVING || 3183 adapter->state == VNIC_REMOVED) { 3184 spin_unlock_irqrestore(&adapter->state_lock, flags); 3185 kfree(rwi); 3186 rc = EBUSY; 3187 break; 3188 } 3189 3190 if (!saved_state) { 3191 reset_state = adapter->state; 3192 saved_state = true; 3193 } 3194 spin_unlock_irqrestore(&adapter->state_lock, flags); 3195 3196 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) { 3197 rtnl_lock(); 3198 rc = do_passive_init(adapter); 3199 rtnl_unlock(); 3200 if (!rc) 3201 netif_carrier_on(adapter->netdev); 3202 } else if (adapter->force_reset_recovery) { 3203 /* Since we are doing a hard reset now, clear the 3204 * failover_pending flag so we don't ignore any 3205 * future MOBILITY or other resets. 3206 */ 3207 adapter->failover_pending = false; 3208 3209 /* Transport event occurred during previous reset */ 3210 if (adapter->wait_for_reset) { 3211 /* Previous was CHANGE_PARAM; caller locked */ 3212 adapter->force_reset_recovery = false; 3213 rc = do_hard_reset(adapter, rwi, reset_state); 3214 } else { 3215 rtnl_lock(); 3216 adapter->force_reset_recovery = false; 3217 rc = do_hard_reset(adapter, rwi, reset_state); 3218 rtnl_unlock(); 3219 } 3220 if (rc) 3221 num_fails++; 3222 else 3223 num_fails = 0; 3224 3225 /* If auto-priority-failover is enabled we can get 3226 * back to back failovers during resets, resulting 3227 * in at least two failed resets (from high-priority 3228 * backing device to low-priority one and then back) 3229 * If resets continue to fail beyond that, give the 3230 * adapter some time to settle down before retrying. 3231 */ 3232 if (num_fails >= 3) { 3233 netdev_dbg(adapter->netdev, 3234 "[S:%s] Hard reset failed %d times, waiting 60 secs\n", 3235 adapter_state_to_string(adapter->state), 3236 num_fails); 3237 set_current_state(TASK_UNINTERRUPTIBLE); 3238 schedule_timeout(60 * HZ); 3239 } 3240 } else { 3241 rc = do_reset(adapter, rwi, reset_state); 3242 } 3243 tmprwi = rwi; 3244 adapter->last_reset_time = jiffies; 3245 3246 if (rc) 3247 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); 3248 3249 rwi = get_next_rwi(adapter); 3250 3251 /* 3252 * If there are no resets queued and the previous reset failed, 3253 * the adapter would be in an undefined state. So retry the 3254 * previous reset as a hard reset. 3255 * 3256 * Else, free the previous rwi and, if there is another reset 3257 * queued, process the new reset even if previous reset failed 3258 * (the previous reset could have failed because of a fail 3259 * over for instance, so process the fail over). 3260 */ 3261 if (!rwi && rc) 3262 rwi = tmprwi; 3263 else 3264 kfree(tmprwi); 3265 3266 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || 3267 rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) 3268 adapter->force_reset_recovery = true; 3269 } 3270 3271 if (adapter->wait_for_reset) { 3272 adapter->reset_done_rc = rc; 3273 complete(&adapter->reset_done); 3274 } 3275 3276 clear_bit_unlock(0, &adapter->resetting); 3277 3278 netdev_dbg(adapter->netdev, 3279 "[S:%s FRR:%d WFR:%d] Done processing resets\n", 3280 adapter_state_to_string(adapter->state), 3281 adapter->force_reset_recovery, 3282 adapter->wait_for_reset); 3283 } 3284 3285 static void __ibmvnic_delayed_reset(struct work_struct *work) 3286 { 3287 struct ibmvnic_adapter *adapter; 3288 3289 adapter = container_of(work, struct ibmvnic_adapter, 3290 ibmvnic_delayed_reset.work); 3291 __ibmvnic_reset(&adapter->ibmvnic_reset); 3292 } 3293 3294 static void flush_reset_queue(struct ibmvnic_adapter *adapter) 3295 { 3296 struct list_head *entry, *tmp_entry; 3297 3298 if (!list_empty(&adapter->rwi_list)) { 3299 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { 3300 list_del(entry); 3301 kfree(list_entry(entry, struct ibmvnic_rwi, list)); 3302 } 3303 } 3304 } 3305 3306 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 3307 enum ibmvnic_reset_reason reason) 3308 { 3309 struct net_device *netdev = adapter->netdev; 3310 struct ibmvnic_rwi *rwi, *tmp; 3311 unsigned long flags; 3312 int ret; 3313 3314 spin_lock_irqsave(&adapter->rwi_lock, flags); 3315 3316 /* If failover is pending don't schedule any other reset. 3317 * Instead let the failover complete. If there is already a 3318 * a failover reset scheduled, we will detect and drop the 3319 * duplicate reset when walking the ->rwi_list below. 3320 */ 3321 if (adapter->state == VNIC_REMOVING || 3322 adapter->state == VNIC_REMOVED || 3323 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { 3324 ret = EBUSY; 3325 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 3326 goto err; 3327 } 3328 3329 list_for_each_entry(tmp, &adapter->rwi_list, list) { 3330 if (tmp->reset_reason == reason) { 3331 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", 3332 reset_reason_to_string(reason)); 3333 ret = EBUSY; 3334 goto err; 3335 } 3336 } 3337 3338 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); 3339 if (!rwi) { 3340 ret = ENOMEM; 3341 goto err; 3342 } 3343 /* if we just received a transport event, 3344 * flush reset queue and process this reset 3345 */ 3346 if (adapter->force_reset_recovery) 3347 flush_reset_queue(adapter); 3348 3349 rwi->reset_reason = reason; 3350 list_add_tail(&rwi->list, &adapter->rwi_list); 3351 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", 3352 reset_reason_to_string(reason)); 3353 queue_work(system_long_wq, &adapter->ibmvnic_reset); 3354 3355 ret = 0; 3356 err: 3357 /* ibmvnic_close() below can block, so drop the lock first */ 3358 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 3359 3360 if (ret == ENOMEM) 3361 ibmvnic_close(netdev); 3362 3363 return -ret; 3364 } 3365 3366 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) 3367 { 3368 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3369 3370 if (test_bit(0, &adapter->resetting)) { 3371 netdev_err(adapter->netdev, 3372 "Adapter is resetting, skip timeout reset\n"); 3373 return; 3374 } 3375 /* No queuing up reset until at least 5 seconds (default watchdog val) 3376 * after last reset 3377 */ 3378 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { 3379 netdev_dbg(dev, "Not yet time to tx timeout.\n"); 3380 return; 3381 } 3382 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 3383 } 3384 3385 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 3386 struct ibmvnic_rx_buff *rx_buff) 3387 { 3388 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 3389 3390 rx_buff->skb = NULL; 3391 3392 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 3393 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 3394 3395 atomic_dec(&pool->available); 3396 } 3397 3398 static int ibmvnic_poll(struct napi_struct *napi, int budget) 3399 { 3400 struct ibmvnic_sub_crq_queue *rx_scrq; 3401 struct ibmvnic_adapter *adapter; 3402 struct net_device *netdev; 3403 int frames_processed; 3404 int scrq_num; 3405 3406 netdev = napi->dev; 3407 adapter = netdev_priv(netdev); 3408 scrq_num = (int)(napi - adapter->napi); 3409 frames_processed = 0; 3410 rx_scrq = adapter->rx_scrq[scrq_num]; 3411 3412 restart_poll: 3413 while (frames_processed < budget) { 3414 struct sk_buff *skb; 3415 struct ibmvnic_rx_buff *rx_buff; 3416 union sub_crq *next; 3417 u32 length; 3418 u16 offset; 3419 u8 flags = 0; 3420 3421 if (unlikely(test_bit(0, &adapter->resetting) && 3422 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 3423 enable_scrq_irq(adapter, rx_scrq); 3424 napi_complete_done(napi, frames_processed); 3425 return frames_processed; 3426 } 3427 3428 if (!pending_scrq(adapter, rx_scrq)) 3429 break; 3430 next = ibmvnic_next_scrq(adapter, rx_scrq); 3431 rx_buff = (struct ibmvnic_rx_buff *) 3432 be64_to_cpu(next->rx_comp.correlator); 3433 /* do error checking */ 3434 if (next->rx_comp.rc) { 3435 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 3436 be16_to_cpu(next->rx_comp.rc)); 3437 /* free the entry */ 3438 next->rx_comp.first = 0; 3439 dev_kfree_skb_any(rx_buff->skb); 3440 remove_buff_from_pool(adapter, rx_buff); 3441 continue; 3442 } else if (!rx_buff->skb) { 3443 /* free the entry */ 3444 next->rx_comp.first = 0; 3445 remove_buff_from_pool(adapter, rx_buff); 3446 continue; 3447 } 3448 3449 length = be32_to_cpu(next->rx_comp.len); 3450 offset = be16_to_cpu(next->rx_comp.off_frame_data); 3451 flags = next->rx_comp.flags; 3452 skb = rx_buff->skb; 3453 /* load long_term_buff before copying to skb */ 3454 dma_rmb(); 3455 skb_copy_to_linear_data(skb, rx_buff->data + offset, 3456 length); 3457 3458 /* VLAN Header has been stripped by the system firmware and 3459 * needs to be inserted by the driver 3460 */ 3461 if (adapter->rx_vlan_header_insertion && 3462 (flags & IBMVNIC_VLAN_STRIPPED)) 3463 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3464 ntohs(next->rx_comp.vlan_tci)); 3465 3466 /* free the entry */ 3467 next->rx_comp.first = 0; 3468 remove_buff_from_pool(adapter, rx_buff); 3469 3470 skb_put(skb, length); 3471 skb->protocol = eth_type_trans(skb, netdev); 3472 skb_record_rx_queue(skb, scrq_num); 3473 3474 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 3475 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 3476 skb->ip_summed = CHECKSUM_UNNECESSARY; 3477 } 3478 3479 length = skb->len; 3480 napi_gro_receive(napi, skb); /* send it up */ 3481 netdev->stats.rx_packets++; 3482 netdev->stats.rx_bytes += length; 3483 adapter->rx_stats_buffers[scrq_num].packets++; 3484 adapter->rx_stats_buffers[scrq_num].bytes += length; 3485 frames_processed++; 3486 } 3487 3488 if (adapter->state != VNIC_CLOSING && 3489 ((atomic_read(&adapter->rx_pool[scrq_num].available) < 3490 adapter->req_rx_add_entries_per_subcrq / 2) || 3491 frames_processed < budget)) 3492 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 3493 if (frames_processed < budget) { 3494 if (napi_complete_done(napi, frames_processed)) { 3495 enable_scrq_irq(adapter, rx_scrq); 3496 if (pending_scrq(adapter, rx_scrq)) { 3497 if (napi_reschedule(napi)) { 3498 disable_scrq_irq(adapter, rx_scrq); 3499 goto restart_poll; 3500 } 3501 } 3502 } 3503 } 3504 return frames_processed; 3505 } 3506 3507 static int wait_for_reset(struct ibmvnic_adapter *adapter) 3508 { 3509 int rc, ret; 3510 3511 adapter->fallback.mtu = adapter->req_mtu; 3512 adapter->fallback.rx_queues = adapter->req_rx_queues; 3513 adapter->fallback.tx_queues = adapter->req_tx_queues; 3514 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 3515 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 3516 3517 reinit_completion(&adapter->reset_done); 3518 adapter->wait_for_reset = true; 3519 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3520 3521 if (rc) { 3522 ret = rc; 3523 goto out; 3524 } 3525 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); 3526 if (rc) { 3527 ret = -ENODEV; 3528 goto out; 3529 } 3530 3531 ret = 0; 3532 if (adapter->reset_done_rc) { 3533 ret = -EIO; 3534 adapter->desired.mtu = adapter->fallback.mtu; 3535 adapter->desired.rx_queues = adapter->fallback.rx_queues; 3536 adapter->desired.tx_queues = adapter->fallback.tx_queues; 3537 adapter->desired.rx_entries = adapter->fallback.rx_entries; 3538 adapter->desired.tx_entries = adapter->fallback.tx_entries; 3539 3540 reinit_completion(&adapter->reset_done); 3541 adapter->wait_for_reset = true; 3542 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3543 if (rc) { 3544 ret = rc; 3545 goto out; 3546 } 3547 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 3548 60000); 3549 if (rc) { 3550 ret = -ENODEV; 3551 goto out; 3552 } 3553 } 3554 out: 3555 adapter->wait_for_reset = false; 3556 3557 return ret; 3558 } 3559 3560 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 3561 { 3562 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3563 3564 adapter->desired.mtu = new_mtu + ETH_HLEN; 3565 3566 return wait_for_reset(adapter); 3567 } 3568 3569 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 3570 struct net_device *dev, 3571 netdev_features_t features) 3572 { 3573 /* Some backing hardware adapters can not 3574 * handle packets with a MSS less than 224 3575 * or with only one segment. 3576 */ 3577 if (skb_is_gso(skb)) { 3578 if (skb_shinfo(skb)->gso_size < 224 || 3579 skb_shinfo(skb)->gso_segs == 1) 3580 features &= ~NETIF_F_GSO_MASK; 3581 } 3582 3583 return features; 3584 } 3585 3586 static const struct net_device_ops ibmvnic_netdev_ops = { 3587 .ndo_open = ibmvnic_open, 3588 .ndo_stop = ibmvnic_close, 3589 .ndo_start_xmit = ibmvnic_xmit, 3590 .ndo_set_rx_mode = ibmvnic_set_multi, 3591 .ndo_set_mac_address = ibmvnic_set_mac, 3592 .ndo_validate_addr = eth_validate_addr, 3593 .ndo_tx_timeout = ibmvnic_tx_timeout, 3594 .ndo_change_mtu = ibmvnic_change_mtu, 3595 .ndo_features_check = ibmvnic_features_check, 3596 }; 3597 3598 /* ethtool functions */ 3599 3600 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 3601 struct ethtool_link_ksettings *cmd) 3602 { 3603 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3604 int rc; 3605 3606 rc = send_query_phys_parms(adapter); 3607 if (rc) { 3608 adapter->speed = SPEED_UNKNOWN; 3609 adapter->duplex = DUPLEX_UNKNOWN; 3610 } 3611 cmd->base.speed = adapter->speed; 3612 cmd->base.duplex = adapter->duplex; 3613 cmd->base.port = PORT_FIBRE; 3614 cmd->base.phy_address = 0; 3615 cmd->base.autoneg = AUTONEG_ENABLE; 3616 3617 return 0; 3618 } 3619 3620 static void ibmvnic_get_drvinfo(struct net_device *netdev, 3621 struct ethtool_drvinfo *info) 3622 { 3623 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3624 3625 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 3626 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 3627 strscpy(info->fw_version, adapter->fw_version, 3628 sizeof(info->fw_version)); 3629 } 3630 3631 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 3632 { 3633 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3634 3635 return adapter->msg_enable; 3636 } 3637 3638 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 3639 { 3640 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3641 3642 adapter->msg_enable = data; 3643 } 3644 3645 static u32 ibmvnic_get_link(struct net_device *netdev) 3646 { 3647 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3648 3649 /* Don't need to send a query because we request a logical link up at 3650 * init and then we wait for link state indications 3651 */ 3652 return adapter->logical_link_state; 3653 } 3654 3655 static void ibmvnic_get_ringparam(struct net_device *netdev, 3656 struct ethtool_ringparam *ring, 3657 struct kernel_ethtool_ringparam *kernel_ring, 3658 struct netlink_ext_ack *extack) 3659 { 3660 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3661 3662 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 3663 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 3664 ring->rx_mini_max_pending = 0; 3665 ring->rx_jumbo_max_pending = 0; 3666 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 3667 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 3668 ring->rx_mini_pending = 0; 3669 ring->rx_jumbo_pending = 0; 3670 } 3671 3672 static int ibmvnic_set_ringparam(struct net_device *netdev, 3673 struct ethtool_ringparam *ring, 3674 struct kernel_ethtool_ringparam *kernel_ring, 3675 struct netlink_ext_ack *extack) 3676 { 3677 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3678 3679 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || 3680 ring->tx_pending > adapter->max_tx_entries_per_subcrq) { 3681 netdev_err(netdev, "Invalid request.\n"); 3682 netdev_err(netdev, "Max tx buffers = %llu\n", 3683 adapter->max_rx_add_entries_per_subcrq); 3684 netdev_err(netdev, "Max rx buffers = %llu\n", 3685 adapter->max_tx_entries_per_subcrq); 3686 return -EINVAL; 3687 } 3688 3689 adapter->desired.rx_entries = ring->rx_pending; 3690 adapter->desired.tx_entries = ring->tx_pending; 3691 3692 return wait_for_reset(adapter); 3693 } 3694 3695 static void ibmvnic_get_channels(struct net_device *netdev, 3696 struct ethtool_channels *channels) 3697 { 3698 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3699 3700 channels->max_rx = adapter->max_rx_queues; 3701 channels->max_tx = adapter->max_tx_queues; 3702 channels->max_other = 0; 3703 channels->max_combined = 0; 3704 channels->rx_count = adapter->req_rx_queues; 3705 channels->tx_count = adapter->req_tx_queues; 3706 channels->other_count = 0; 3707 channels->combined_count = 0; 3708 } 3709 3710 static int ibmvnic_set_channels(struct net_device *netdev, 3711 struct ethtool_channels *channels) 3712 { 3713 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3714 3715 adapter->desired.rx_queues = channels->rx_count; 3716 adapter->desired.tx_queues = channels->tx_count; 3717 3718 return wait_for_reset(adapter); 3719 } 3720 3721 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 3722 { 3723 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3724 int i; 3725 3726 if (stringset != ETH_SS_STATS) 3727 return; 3728 3729 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) 3730 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 3731 3732 for (i = 0; i < adapter->req_tx_queues; i++) { 3733 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 3734 data += ETH_GSTRING_LEN; 3735 3736 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 3737 data += ETH_GSTRING_LEN; 3738 3739 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); 3740 data += ETH_GSTRING_LEN; 3741 } 3742 3743 for (i = 0; i < adapter->req_rx_queues; i++) { 3744 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 3745 data += ETH_GSTRING_LEN; 3746 3747 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 3748 data += ETH_GSTRING_LEN; 3749 3750 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 3751 data += ETH_GSTRING_LEN; 3752 } 3753 } 3754 3755 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 3756 { 3757 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3758 3759 switch (sset) { 3760 case ETH_SS_STATS: 3761 return ARRAY_SIZE(ibmvnic_stats) + 3762 adapter->req_tx_queues * NUM_TX_STATS + 3763 adapter->req_rx_queues * NUM_RX_STATS; 3764 default: 3765 return -EOPNOTSUPP; 3766 } 3767 } 3768 3769 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 3770 struct ethtool_stats *stats, u64 *data) 3771 { 3772 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3773 union ibmvnic_crq crq; 3774 int i, j; 3775 int rc; 3776 3777 memset(&crq, 0, sizeof(crq)); 3778 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 3779 crq.request_statistics.cmd = REQUEST_STATISTICS; 3780 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 3781 crq.request_statistics.len = 3782 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 3783 3784 /* Wait for data to be written */ 3785 reinit_completion(&adapter->stats_done); 3786 rc = ibmvnic_send_crq(adapter, &crq); 3787 if (rc) 3788 return; 3789 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); 3790 if (rc) 3791 return; 3792 3793 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 3794 data[i] = be64_to_cpu(IBMVNIC_GET_STAT 3795 (adapter, ibmvnic_stats[i].offset)); 3796 3797 for (j = 0; j < adapter->req_tx_queues; j++) { 3798 data[i] = adapter->tx_stats_buffers[j].packets; 3799 i++; 3800 data[i] = adapter->tx_stats_buffers[j].bytes; 3801 i++; 3802 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 3803 i++; 3804 } 3805 3806 for (j = 0; j < adapter->req_rx_queues; j++) { 3807 data[i] = adapter->rx_stats_buffers[j].packets; 3808 i++; 3809 data[i] = adapter->rx_stats_buffers[j].bytes; 3810 i++; 3811 data[i] = adapter->rx_stats_buffers[j].interrupts; 3812 i++; 3813 } 3814 } 3815 3816 static const struct ethtool_ops ibmvnic_ethtool_ops = { 3817 .get_drvinfo = ibmvnic_get_drvinfo, 3818 .get_msglevel = ibmvnic_get_msglevel, 3819 .set_msglevel = ibmvnic_set_msglevel, 3820 .get_link = ibmvnic_get_link, 3821 .get_ringparam = ibmvnic_get_ringparam, 3822 .set_ringparam = ibmvnic_set_ringparam, 3823 .get_channels = ibmvnic_get_channels, 3824 .set_channels = ibmvnic_set_channels, 3825 .get_strings = ibmvnic_get_strings, 3826 .get_sset_count = ibmvnic_get_sset_count, 3827 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 3828 .get_link_ksettings = ibmvnic_get_link_ksettings, 3829 }; 3830 3831 /* Routines for managing CRQs/sCRQs */ 3832 3833 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 3834 struct ibmvnic_sub_crq_queue *scrq) 3835 { 3836 int rc; 3837 3838 if (!scrq) { 3839 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); 3840 return -EINVAL; 3841 } 3842 3843 if (scrq->irq) { 3844 free_irq(scrq->irq, scrq); 3845 irq_dispose_mapping(scrq->irq); 3846 scrq->irq = 0; 3847 } 3848 3849 if (scrq->msgs) { 3850 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 3851 atomic_set(&scrq->used, 0); 3852 scrq->cur = 0; 3853 scrq->ind_buf.index = 0; 3854 } else { 3855 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); 3856 return -EINVAL; 3857 } 3858 3859 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3860 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3861 return rc; 3862 } 3863 3864 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 3865 { 3866 int i, rc; 3867 3868 if (!adapter->tx_scrq || !adapter->rx_scrq) 3869 return -EINVAL; 3870 3871 ibmvnic_clean_affinity(adapter); 3872 3873 for (i = 0; i < adapter->req_tx_queues; i++) { 3874 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 3875 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 3876 if (rc) 3877 return rc; 3878 } 3879 3880 for (i = 0; i < adapter->req_rx_queues; i++) { 3881 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 3882 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 3883 if (rc) 3884 return rc; 3885 } 3886 3887 return rc; 3888 } 3889 3890 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 3891 struct ibmvnic_sub_crq_queue *scrq, 3892 bool do_h_free) 3893 { 3894 struct device *dev = &adapter->vdev->dev; 3895 long rc; 3896 3897 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 3898 3899 if (do_h_free) { 3900 /* Close the sub-crqs */ 3901 do { 3902 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3903 adapter->vdev->unit_address, 3904 scrq->crq_num); 3905 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 3906 3907 if (rc) { 3908 netdev_err(adapter->netdev, 3909 "Failed to release sub-CRQ %16lx, rc = %ld\n", 3910 scrq->crq_num, rc); 3911 } 3912 } 3913 3914 dma_free_coherent(dev, 3915 IBMVNIC_IND_ARR_SZ, 3916 scrq->ind_buf.indir_arr, 3917 scrq->ind_buf.indir_dma); 3918 3919 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3920 DMA_BIDIRECTIONAL); 3921 free_pages((unsigned long)scrq->msgs, 2); 3922 free_cpumask_var(scrq->affinity_mask); 3923 kfree(scrq); 3924 } 3925 3926 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 3927 *adapter) 3928 { 3929 struct device *dev = &adapter->vdev->dev; 3930 struct ibmvnic_sub_crq_queue *scrq; 3931 int rc; 3932 3933 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 3934 if (!scrq) 3935 return NULL; 3936 3937 scrq->msgs = 3938 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 3939 if (!scrq->msgs) { 3940 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 3941 goto zero_page_failed; 3942 } 3943 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL)) 3944 goto cpumask_alloc_failed; 3945 3946 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 3947 DMA_BIDIRECTIONAL); 3948 if (dma_mapping_error(dev, scrq->msg_token)) { 3949 dev_warn(dev, "Couldn't map crq queue messages page\n"); 3950 goto map_failed; 3951 } 3952 3953 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3954 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3955 3956 if (rc == H_RESOURCE) 3957 rc = ibmvnic_reset_crq(adapter); 3958 3959 if (rc == H_CLOSED) { 3960 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 3961 } else if (rc) { 3962 dev_warn(dev, "Error %d registering sub-crq\n", rc); 3963 goto reg_failed; 3964 } 3965 3966 scrq->adapter = adapter; 3967 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 3968 scrq->ind_buf.index = 0; 3969 3970 scrq->ind_buf.indir_arr = 3971 dma_alloc_coherent(dev, 3972 IBMVNIC_IND_ARR_SZ, 3973 &scrq->ind_buf.indir_dma, 3974 GFP_KERNEL); 3975 3976 if (!scrq->ind_buf.indir_arr) 3977 goto indir_failed; 3978 3979 spin_lock_init(&scrq->lock); 3980 3981 netdev_dbg(adapter->netdev, 3982 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 3983 scrq->crq_num, scrq->hw_irq, scrq->irq); 3984 3985 return scrq; 3986 3987 indir_failed: 3988 do { 3989 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3990 adapter->vdev->unit_address, 3991 scrq->crq_num); 3992 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); 3993 reg_failed: 3994 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3995 DMA_BIDIRECTIONAL); 3996 map_failed: 3997 free_cpumask_var(scrq->affinity_mask); 3998 cpumask_alloc_failed: 3999 free_pages((unsigned long)scrq->msgs, 2); 4000 zero_page_failed: 4001 kfree(scrq); 4002 4003 return NULL; 4004 } 4005 4006 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 4007 { 4008 int i; 4009 4010 ibmvnic_clean_affinity(adapter); 4011 if (adapter->tx_scrq) { 4012 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 4013 if (!adapter->tx_scrq[i]) 4014 continue; 4015 4016 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 4017 i); 4018 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); 4019 if (adapter->tx_scrq[i]->irq) { 4020 free_irq(adapter->tx_scrq[i]->irq, 4021 adapter->tx_scrq[i]); 4022 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 4023 adapter->tx_scrq[i]->irq = 0; 4024 } 4025 4026 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 4027 do_h_free); 4028 } 4029 4030 kfree(adapter->tx_scrq); 4031 adapter->tx_scrq = NULL; 4032 adapter->num_active_tx_scrqs = 0; 4033 } 4034 4035 if (adapter->rx_scrq) { 4036 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 4037 if (!adapter->rx_scrq[i]) 4038 continue; 4039 4040 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 4041 i); 4042 if (adapter->rx_scrq[i]->irq) { 4043 free_irq(adapter->rx_scrq[i]->irq, 4044 adapter->rx_scrq[i]); 4045 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 4046 adapter->rx_scrq[i]->irq = 0; 4047 } 4048 4049 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 4050 do_h_free); 4051 } 4052 4053 kfree(adapter->rx_scrq); 4054 adapter->rx_scrq = NULL; 4055 adapter->num_active_rx_scrqs = 0; 4056 } 4057 } 4058 4059 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 4060 struct ibmvnic_sub_crq_queue *scrq) 4061 { 4062 struct device *dev = &adapter->vdev->dev; 4063 unsigned long rc; 4064 4065 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4066 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 4067 if (rc) 4068 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 4069 scrq->hw_irq, rc); 4070 return rc; 4071 } 4072 4073 /* We can not use the IRQ chip EOI handler because that has the 4074 * unintended effect of changing the interrupt priority. 4075 */ 4076 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) 4077 { 4078 u64 val = 0xff000000 | scrq->hw_irq; 4079 unsigned long rc; 4080 4081 rc = plpar_hcall_norets(H_EOI, val); 4082 if (rc) 4083 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); 4084 } 4085 4086 /* Due to a firmware bug, the hypervisor can send an interrupt to a 4087 * transmit or receive queue just prior to a partition migration. 4088 * Force an EOI after migration. 4089 */ 4090 static void ibmvnic_clear_pending_interrupt(struct device *dev, 4091 struct ibmvnic_sub_crq_queue *scrq) 4092 { 4093 if (!xive_enabled()) 4094 ibmvnic_xics_eoi(dev, scrq); 4095 } 4096 4097 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 4098 struct ibmvnic_sub_crq_queue *scrq) 4099 { 4100 struct device *dev = &adapter->vdev->dev; 4101 unsigned long rc; 4102 4103 if (scrq->hw_irq > 0x100000000ULL) { 4104 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 4105 return 1; 4106 } 4107 4108 if (test_bit(0, &adapter->resetting) && 4109 adapter->reset_reason == VNIC_RESET_MOBILITY) { 4110 ibmvnic_clear_pending_interrupt(dev, scrq); 4111 } 4112 4113 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4114 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 4115 if (rc) 4116 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 4117 scrq->hw_irq, rc); 4118 return rc; 4119 } 4120 4121 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 4122 struct ibmvnic_sub_crq_queue *scrq) 4123 { 4124 struct device *dev = &adapter->vdev->dev; 4125 struct ibmvnic_tx_pool *tx_pool; 4126 struct ibmvnic_tx_buff *txbuff; 4127 struct netdev_queue *txq; 4128 union sub_crq *next; 4129 int index; 4130 int i; 4131 4132 restart_loop: 4133 while (pending_scrq(adapter, scrq)) { 4134 unsigned int pool = scrq->pool_index; 4135 int num_entries = 0; 4136 int total_bytes = 0; 4137 int num_packets = 0; 4138 4139 next = ibmvnic_next_scrq(adapter, scrq); 4140 for (i = 0; i < next->tx_comp.num_comps; i++) { 4141 index = be32_to_cpu(next->tx_comp.correlators[i]); 4142 if (index & IBMVNIC_TSO_POOL_MASK) { 4143 tx_pool = &adapter->tso_pool[pool]; 4144 index &= ~IBMVNIC_TSO_POOL_MASK; 4145 } else { 4146 tx_pool = &adapter->tx_pool[pool]; 4147 } 4148 4149 txbuff = &tx_pool->tx_buff[index]; 4150 num_packets++; 4151 num_entries += txbuff->num_entries; 4152 if (txbuff->skb) { 4153 total_bytes += txbuff->skb->len; 4154 if (next->tx_comp.rcs[i]) { 4155 dev_err(dev, "tx error %x\n", 4156 next->tx_comp.rcs[i]); 4157 dev_kfree_skb_irq(txbuff->skb); 4158 } else { 4159 dev_consume_skb_irq(txbuff->skb); 4160 } 4161 txbuff->skb = NULL; 4162 } else { 4163 netdev_warn(adapter->netdev, 4164 "TX completion received with NULL socket buffer\n"); 4165 } 4166 tx_pool->free_map[tx_pool->producer_index] = index; 4167 tx_pool->producer_index = 4168 (tx_pool->producer_index + 1) % 4169 tx_pool->num_buffers; 4170 } 4171 /* remove tx_comp scrq*/ 4172 next->tx_comp.first = 0; 4173 4174 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); 4175 netdev_tx_completed_queue(txq, num_packets, total_bytes); 4176 4177 if (atomic_sub_return(num_entries, &scrq->used) <= 4178 (adapter->req_tx_entries_per_subcrq / 2) && 4179 __netif_subqueue_stopped(adapter->netdev, 4180 scrq->pool_index)) { 4181 rcu_read_lock(); 4182 if (adapter->tx_queues_active) { 4183 netif_wake_subqueue(adapter->netdev, 4184 scrq->pool_index); 4185 netdev_dbg(adapter->netdev, 4186 "Started queue %d\n", 4187 scrq->pool_index); 4188 } 4189 rcu_read_unlock(); 4190 } 4191 } 4192 4193 enable_scrq_irq(adapter, scrq); 4194 4195 if (pending_scrq(adapter, scrq)) { 4196 disable_scrq_irq(adapter, scrq); 4197 goto restart_loop; 4198 } 4199 4200 return 0; 4201 } 4202 4203 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 4204 { 4205 struct ibmvnic_sub_crq_queue *scrq = instance; 4206 struct ibmvnic_adapter *adapter = scrq->adapter; 4207 4208 disable_scrq_irq(adapter, scrq); 4209 ibmvnic_complete_tx(adapter, scrq); 4210 4211 return IRQ_HANDLED; 4212 } 4213 4214 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 4215 { 4216 struct ibmvnic_sub_crq_queue *scrq = instance; 4217 struct ibmvnic_adapter *adapter = scrq->adapter; 4218 4219 /* When booting a kdump kernel we can hit pending interrupts 4220 * prior to completing driver initialization. 4221 */ 4222 if (unlikely(adapter->state != VNIC_OPEN)) 4223 return IRQ_NONE; 4224 4225 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 4226 4227 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 4228 disable_scrq_irq(adapter, scrq); 4229 __napi_schedule(&adapter->napi[scrq->scrq_num]); 4230 } 4231 4232 return IRQ_HANDLED; 4233 } 4234 4235 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 4236 { 4237 struct device *dev = &adapter->vdev->dev; 4238 struct ibmvnic_sub_crq_queue *scrq; 4239 int i = 0, j = 0; 4240 int rc = 0; 4241 4242 for (i = 0; i < adapter->req_tx_queues; i++) { 4243 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 4244 i); 4245 scrq = adapter->tx_scrq[i]; 4246 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 4247 4248 if (!scrq->irq) { 4249 rc = -EINVAL; 4250 dev_err(dev, "Error mapping irq\n"); 4251 goto req_tx_irq_failed; 4252 } 4253 4254 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", 4255 adapter->vdev->unit_address, i); 4256 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 4257 0, scrq->name, scrq); 4258 4259 if (rc) { 4260 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 4261 scrq->irq, rc); 4262 irq_dispose_mapping(scrq->irq); 4263 goto req_tx_irq_failed; 4264 } 4265 } 4266 4267 for (i = 0; i < adapter->req_rx_queues; i++) { 4268 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 4269 i); 4270 scrq = adapter->rx_scrq[i]; 4271 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 4272 if (!scrq->irq) { 4273 rc = -EINVAL; 4274 dev_err(dev, "Error mapping irq\n"); 4275 goto req_rx_irq_failed; 4276 } 4277 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", 4278 adapter->vdev->unit_address, i); 4279 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 4280 0, scrq->name, scrq); 4281 if (rc) { 4282 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 4283 scrq->irq, rc); 4284 irq_dispose_mapping(scrq->irq); 4285 goto req_rx_irq_failed; 4286 } 4287 } 4288 4289 cpus_read_lock(); 4290 ibmvnic_set_affinity(adapter); 4291 cpus_read_unlock(); 4292 4293 return rc; 4294 4295 req_rx_irq_failed: 4296 for (j = 0; j < i; j++) { 4297 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 4298 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 4299 } 4300 i = adapter->req_tx_queues; 4301 req_tx_irq_failed: 4302 for (j = 0; j < i; j++) { 4303 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 4304 irq_dispose_mapping(adapter->tx_scrq[j]->irq); 4305 } 4306 release_sub_crqs(adapter, 1); 4307 return rc; 4308 } 4309 4310 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 4311 { 4312 struct device *dev = &adapter->vdev->dev; 4313 struct ibmvnic_sub_crq_queue **allqueues; 4314 int registered_queues = 0; 4315 int total_queues; 4316 int more = 0; 4317 int i; 4318 4319 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 4320 4321 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 4322 if (!allqueues) 4323 return -ENOMEM; 4324 4325 for (i = 0; i < total_queues; i++) { 4326 allqueues[i] = init_sub_crq_queue(adapter); 4327 if (!allqueues[i]) { 4328 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 4329 break; 4330 } 4331 registered_queues++; 4332 } 4333 4334 /* Make sure we were able to register the minimum number of queues */ 4335 if (registered_queues < 4336 adapter->min_tx_queues + adapter->min_rx_queues) { 4337 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 4338 goto tx_failed; 4339 } 4340 4341 /* Distribute the failed allocated queues*/ 4342 for (i = 0; i < total_queues - registered_queues + more ; i++) { 4343 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 4344 switch (i % 3) { 4345 case 0: 4346 if (adapter->req_rx_queues > adapter->min_rx_queues) 4347 adapter->req_rx_queues--; 4348 else 4349 more++; 4350 break; 4351 case 1: 4352 if (adapter->req_tx_queues > adapter->min_tx_queues) 4353 adapter->req_tx_queues--; 4354 else 4355 more++; 4356 break; 4357 } 4358 } 4359 4360 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 4361 sizeof(*adapter->tx_scrq), GFP_KERNEL); 4362 if (!adapter->tx_scrq) 4363 goto tx_failed; 4364 4365 for (i = 0; i < adapter->req_tx_queues; i++) { 4366 adapter->tx_scrq[i] = allqueues[i]; 4367 adapter->tx_scrq[i]->pool_index = i; 4368 adapter->num_active_tx_scrqs++; 4369 } 4370 4371 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 4372 sizeof(*adapter->rx_scrq), GFP_KERNEL); 4373 if (!adapter->rx_scrq) 4374 goto rx_failed; 4375 4376 for (i = 0; i < adapter->req_rx_queues; i++) { 4377 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 4378 adapter->rx_scrq[i]->scrq_num = i; 4379 adapter->num_active_rx_scrqs++; 4380 } 4381 4382 kfree(allqueues); 4383 return 0; 4384 4385 rx_failed: 4386 kfree(adapter->tx_scrq); 4387 adapter->tx_scrq = NULL; 4388 tx_failed: 4389 for (i = 0; i < registered_queues; i++) 4390 release_sub_crq_queue(adapter, allqueues[i], 1); 4391 kfree(allqueues); 4392 return -ENOMEM; 4393 } 4394 4395 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) 4396 { 4397 struct device *dev = &adapter->vdev->dev; 4398 union ibmvnic_crq crq; 4399 int max_entries; 4400 int cap_reqs; 4401 4402 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on 4403 * the PROMISC flag). Initialize this count upfront. When the tasklet 4404 * receives a response to all of these, it will send the next protocol 4405 * message (QUERY_IP_OFFLOAD). 4406 */ 4407 if (!(adapter->netdev->flags & IFF_PROMISC) || 4408 adapter->promisc_supported) 4409 cap_reqs = 7; 4410 else 4411 cap_reqs = 6; 4412 4413 if (!retry) { 4414 /* Sub-CRQ entries are 32 byte long */ 4415 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 4416 4417 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4418 4419 if (adapter->min_tx_entries_per_subcrq > entries_page || 4420 adapter->min_rx_add_entries_per_subcrq > entries_page) { 4421 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 4422 return; 4423 } 4424 4425 if (adapter->desired.mtu) 4426 adapter->req_mtu = adapter->desired.mtu; 4427 else 4428 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 4429 4430 if (!adapter->desired.tx_entries) 4431 adapter->desired.tx_entries = 4432 adapter->max_tx_entries_per_subcrq; 4433 if (!adapter->desired.rx_entries) 4434 adapter->desired.rx_entries = 4435 adapter->max_rx_add_entries_per_subcrq; 4436 4437 max_entries = IBMVNIC_LTB_SET_SIZE / 4438 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 4439 4440 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4441 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) { 4442 adapter->desired.tx_entries = max_entries; 4443 } 4444 4445 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4446 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) { 4447 adapter->desired.rx_entries = max_entries; 4448 } 4449 4450 if (adapter->desired.tx_entries) 4451 adapter->req_tx_entries_per_subcrq = 4452 adapter->desired.tx_entries; 4453 else 4454 adapter->req_tx_entries_per_subcrq = 4455 adapter->max_tx_entries_per_subcrq; 4456 4457 if (adapter->desired.rx_entries) 4458 adapter->req_rx_add_entries_per_subcrq = 4459 adapter->desired.rx_entries; 4460 else 4461 adapter->req_rx_add_entries_per_subcrq = 4462 adapter->max_rx_add_entries_per_subcrq; 4463 4464 if (adapter->desired.tx_queues) 4465 adapter->req_tx_queues = 4466 adapter->desired.tx_queues; 4467 else 4468 adapter->req_tx_queues = 4469 adapter->opt_tx_comp_sub_queues; 4470 4471 if (adapter->desired.rx_queues) 4472 adapter->req_rx_queues = 4473 adapter->desired.rx_queues; 4474 else 4475 adapter->req_rx_queues = 4476 adapter->opt_rx_comp_queues; 4477 4478 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 4479 } else { 4480 atomic_add(cap_reqs, &adapter->running_cap_crqs); 4481 } 4482 memset(&crq, 0, sizeof(crq)); 4483 crq.request_capability.first = IBMVNIC_CRQ_CMD; 4484 crq.request_capability.cmd = REQUEST_CAPABILITY; 4485 4486 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 4487 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 4488 cap_reqs--; 4489 ibmvnic_send_crq(adapter, &crq); 4490 4491 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 4492 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 4493 cap_reqs--; 4494 ibmvnic_send_crq(adapter, &crq); 4495 4496 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 4497 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 4498 cap_reqs--; 4499 ibmvnic_send_crq(adapter, &crq); 4500 4501 crq.request_capability.capability = 4502 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 4503 crq.request_capability.number = 4504 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 4505 cap_reqs--; 4506 ibmvnic_send_crq(adapter, &crq); 4507 4508 crq.request_capability.capability = 4509 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 4510 crq.request_capability.number = 4511 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 4512 cap_reqs--; 4513 ibmvnic_send_crq(adapter, &crq); 4514 4515 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 4516 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 4517 cap_reqs--; 4518 ibmvnic_send_crq(adapter, &crq); 4519 4520 if (adapter->netdev->flags & IFF_PROMISC) { 4521 if (adapter->promisc_supported) { 4522 crq.request_capability.capability = 4523 cpu_to_be16(PROMISC_REQUESTED); 4524 crq.request_capability.number = cpu_to_be64(1); 4525 cap_reqs--; 4526 ibmvnic_send_crq(adapter, &crq); 4527 } 4528 } else { 4529 crq.request_capability.capability = 4530 cpu_to_be16(PROMISC_REQUESTED); 4531 crq.request_capability.number = cpu_to_be64(0); 4532 cap_reqs--; 4533 ibmvnic_send_crq(adapter, &crq); 4534 } 4535 4536 /* Keep at end to catch any discrepancy between expected and actual 4537 * CRQs sent. 4538 */ 4539 WARN_ON(cap_reqs != 0); 4540 } 4541 4542 static int pending_scrq(struct ibmvnic_adapter *adapter, 4543 struct ibmvnic_sub_crq_queue *scrq) 4544 { 4545 union sub_crq *entry = &scrq->msgs[scrq->cur]; 4546 int rc; 4547 4548 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); 4549 4550 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4551 * contents of the SCRQ descriptor 4552 */ 4553 dma_rmb(); 4554 4555 return rc; 4556 } 4557 4558 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 4559 struct ibmvnic_sub_crq_queue *scrq) 4560 { 4561 union sub_crq *entry; 4562 unsigned long flags; 4563 4564 spin_lock_irqsave(&scrq->lock, flags); 4565 entry = &scrq->msgs[scrq->cur]; 4566 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4567 if (++scrq->cur == scrq->size) 4568 scrq->cur = 0; 4569 } else { 4570 entry = NULL; 4571 } 4572 spin_unlock_irqrestore(&scrq->lock, flags); 4573 4574 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4575 * contents of the SCRQ descriptor 4576 */ 4577 dma_rmb(); 4578 4579 return entry; 4580 } 4581 4582 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 4583 { 4584 struct ibmvnic_crq_queue *queue = &adapter->crq; 4585 union ibmvnic_crq *crq; 4586 4587 crq = &queue->msgs[queue->cur]; 4588 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4589 if (++queue->cur == queue->size) 4590 queue->cur = 0; 4591 } else { 4592 crq = NULL; 4593 } 4594 4595 return crq; 4596 } 4597 4598 static void print_subcrq_error(struct device *dev, int rc, const char *func) 4599 { 4600 switch (rc) { 4601 case H_PARAMETER: 4602 dev_warn_ratelimited(dev, 4603 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", 4604 func, rc); 4605 break; 4606 case H_CLOSED: 4607 dev_warn_ratelimited(dev, 4608 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", 4609 func, rc); 4610 break; 4611 default: 4612 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); 4613 break; 4614 } 4615 } 4616 4617 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 4618 u64 remote_handle, u64 ioba, u64 num_entries) 4619 { 4620 unsigned int ua = adapter->vdev->unit_address; 4621 struct device *dev = &adapter->vdev->dev; 4622 int rc; 4623 4624 /* Make sure the hypervisor sees the complete request */ 4625 dma_wmb(); 4626 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 4627 cpu_to_be64(remote_handle), 4628 ioba, num_entries); 4629 4630 if (rc) 4631 print_subcrq_error(dev, rc, __func__); 4632 4633 return rc; 4634 } 4635 4636 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 4637 union ibmvnic_crq *crq) 4638 { 4639 unsigned int ua = adapter->vdev->unit_address; 4640 struct device *dev = &adapter->vdev->dev; 4641 u64 *u64_crq = (u64 *)crq; 4642 int rc; 4643 4644 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 4645 (unsigned long)cpu_to_be64(u64_crq[0]), 4646 (unsigned long)cpu_to_be64(u64_crq[1])); 4647 4648 if (!adapter->crq.active && 4649 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 4650 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 4651 return -EINVAL; 4652 } 4653 4654 /* Make sure the hypervisor sees the complete request */ 4655 dma_wmb(); 4656 4657 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 4658 cpu_to_be64(u64_crq[0]), 4659 cpu_to_be64(u64_crq[1])); 4660 4661 if (rc) { 4662 if (rc == H_CLOSED) { 4663 dev_warn(dev, "CRQ Queue closed\n"); 4664 /* do not reset, report the fail, wait for passive init from server */ 4665 } 4666 4667 dev_warn(dev, "Send error (rc=%d)\n", rc); 4668 } 4669 4670 return rc; 4671 } 4672 4673 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 4674 { 4675 struct device *dev = &adapter->vdev->dev; 4676 union ibmvnic_crq crq; 4677 int retries = 100; 4678 int rc; 4679 4680 memset(&crq, 0, sizeof(crq)); 4681 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 4682 crq.generic.cmd = IBMVNIC_CRQ_INIT; 4683 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 4684 4685 do { 4686 rc = ibmvnic_send_crq(adapter, &crq); 4687 if (rc != H_CLOSED) 4688 break; 4689 retries--; 4690 msleep(50); 4691 4692 } while (retries > 0); 4693 4694 if (rc) { 4695 dev_err(dev, "Failed to send init request, rc = %d\n", rc); 4696 return rc; 4697 } 4698 4699 return 0; 4700 } 4701 4702 struct vnic_login_client_data { 4703 u8 type; 4704 __be16 len; 4705 char name[]; 4706 } __packed; 4707 4708 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 4709 { 4710 int len; 4711 4712 /* Calculate the amount of buffer space needed for the 4713 * vnic client data in the login buffer. There are four entries, 4714 * OS name, LPAR name, device name, and a null last entry. 4715 */ 4716 len = 4 * sizeof(struct vnic_login_client_data); 4717 len += 6; /* "Linux" plus NULL */ 4718 len += strlen(utsname()->nodename) + 1; 4719 len += strlen(adapter->netdev->name) + 1; 4720 4721 return len; 4722 } 4723 4724 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 4725 struct vnic_login_client_data *vlcd) 4726 { 4727 const char *os_name = "Linux"; 4728 int len; 4729 4730 /* Type 1 - LPAR OS */ 4731 vlcd->type = 1; 4732 len = strlen(os_name) + 1; 4733 vlcd->len = cpu_to_be16(len); 4734 strscpy(vlcd->name, os_name, len); 4735 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4736 4737 /* Type 2 - LPAR name */ 4738 vlcd->type = 2; 4739 len = strlen(utsname()->nodename) + 1; 4740 vlcd->len = cpu_to_be16(len); 4741 strscpy(vlcd->name, utsname()->nodename, len); 4742 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4743 4744 /* Type 3 - device name */ 4745 vlcd->type = 3; 4746 len = strlen(adapter->netdev->name) + 1; 4747 vlcd->len = cpu_to_be16(len); 4748 strscpy(vlcd->name, adapter->netdev->name, len); 4749 } 4750 4751 static int send_login(struct ibmvnic_adapter *adapter) 4752 { 4753 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 4754 struct ibmvnic_login_buffer *login_buffer; 4755 struct device *dev = &adapter->vdev->dev; 4756 struct vnic_login_client_data *vlcd; 4757 dma_addr_t rsp_buffer_token; 4758 dma_addr_t buffer_token; 4759 size_t rsp_buffer_size; 4760 union ibmvnic_crq crq; 4761 int client_data_len; 4762 size_t buffer_size; 4763 __be64 *tx_list_p; 4764 __be64 *rx_list_p; 4765 int rc; 4766 int i; 4767 4768 if (!adapter->tx_scrq || !adapter->rx_scrq) { 4769 netdev_err(adapter->netdev, 4770 "RX or TX queues are not allocated, device login failed\n"); 4771 return -ENOMEM; 4772 } 4773 4774 release_login_buffer(adapter); 4775 release_login_rsp_buffer(adapter); 4776 4777 client_data_len = vnic_client_data_len(adapter); 4778 4779 buffer_size = 4780 sizeof(struct ibmvnic_login_buffer) + 4781 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 4782 client_data_len; 4783 4784 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 4785 if (!login_buffer) 4786 goto buf_alloc_failed; 4787 4788 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 4789 DMA_TO_DEVICE); 4790 if (dma_mapping_error(dev, buffer_token)) { 4791 dev_err(dev, "Couldn't map login buffer\n"); 4792 goto buf_map_failed; 4793 } 4794 4795 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 4796 sizeof(u64) * adapter->req_tx_queues + 4797 sizeof(u64) * adapter->req_rx_queues + 4798 sizeof(u64) * adapter->req_rx_queues + 4799 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 4800 4801 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 4802 if (!login_rsp_buffer) 4803 goto buf_rsp_alloc_failed; 4804 4805 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 4806 rsp_buffer_size, DMA_FROM_DEVICE); 4807 if (dma_mapping_error(dev, rsp_buffer_token)) { 4808 dev_err(dev, "Couldn't map login rsp buffer\n"); 4809 goto buf_rsp_map_failed; 4810 } 4811 4812 adapter->login_buf = login_buffer; 4813 adapter->login_buf_token = buffer_token; 4814 adapter->login_buf_sz = buffer_size; 4815 adapter->login_rsp_buf = login_rsp_buffer; 4816 adapter->login_rsp_buf_token = rsp_buffer_token; 4817 adapter->login_rsp_buf_sz = rsp_buffer_size; 4818 4819 login_buffer->len = cpu_to_be32(buffer_size); 4820 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 4821 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 4822 login_buffer->off_txcomp_subcrqs = 4823 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 4824 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 4825 login_buffer->off_rxcomp_subcrqs = 4826 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 4827 sizeof(u64) * adapter->req_tx_queues); 4828 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 4829 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 4830 4831 tx_list_p = (__be64 *)((char *)login_buffer + 4832 sizeof(struct ibmvnic_login_buffer)); 4833 rx_list_p = (__be64 *)((char *)login_buffer + 4834 sizeof(struct ibmvnic_login_buffer) + 4835 sizeof(u64) * adapter->req_tx_queues); 4836 4837 for (i = 0; i < adapter->req_tx_queues; i++) { 4838 if (adapter->tx_scrq[i]) { 4839 tx_list_p[i] = 4840 cpu_to_be64(adapter->tx_scrq[i]->crq_num); 4841 } 4842 } 4843 4844 for (i = 0; i < adapter->req_rx_queues; i++) { 4845 if (adapter->rx_scrq[i]) { 4846 rx_list_p[i] = 4847 cpu_to_be64(adapter->rx_scrq[i]->crq_num); 4848 } 4849 } 4850 4851 /* Insert vNIC login client data */ 4852 vlcd = (struct vnic_login_client_data *) 4853 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 4854 login_buffer->client_data_offset = 4855 cpu_to_be32((char *)vlcd - (char *)login_buffer); 4856 login_buffer->client_data_len = cpu_to_be32(client_data_len); 4857 4858 vnic_add_client_data(adapter, vlcd); 4859 4860 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 4861 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 4862 netdev_dbg(adapter->netdev, "%016lx\n", 4863 ((unsigned long *)(adapter->login_buf))[i]); 4864 } 4865 4866 memset(&crq, 0, sizeof(crq)); 4867 crq.login.first = IBMVNIC_CRQ_CMD; 4868 crq.login.cmd = LOGIN; 4869 crq.login.ioba = cpu_to_be32(buffer_token); 4870 crq.login.len = cpu_to_be32(buffer_size); 4871 4872 adapter->login_pending = true; 4873 rc = ibmvnic_send_crq(adapter, &crq); 4874 if (rc) { 4875 adapter->login_pending = false; 4876 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); 4877 goto buf_send_failed; 4878 } 4879 4880 return 0; 4881 4882 buf_send_failed: 4883 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size, 4884 DMA_FROM_DEVICE); 4885 buf_rsp_map_failed: 4886 kfree(login_rsp_buffer); 4887 adapter->login_rsp_buf = NULL; 4888 buf_rsp_alloc_failed: 4889 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 4890 buf_map_failed: 4891 kfree(login_buffer); 4892 adapter->login_buf = NULL; 4893 buf_alloc_failed: 4894 return -ENOMEM; 4895 } 4896 4897 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 4898 u32 len, u8 map_id) 4899 { 4900 union ibmvnic_crq crq; 4901 4902 memset(&crq, 0, sizeof(crq)); 4903 crq.request_map.first = IBMVNIC_CRQ_CMD; 4904 crq.request_map.cmd = REQUEST_MAP; 4905 crq.request_map.map_id = map_id; 4906 crq.request_map.ioba = cpu_to_be32(addr); 4907 crq.request_map.len = cpu_to_be32(len); 4908 return ibmvnic_send_crq(adapter, &crq); 4909 } 4910 4911 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 4912 { 4913 union ibmvnic_crq crq; 4914 4915 memset(&crq, 0, sizeof(crq)); 4916 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 4917 crq.request_unmap.cmd = REQUEST_UNMAP; 4918 crq.request_unmap.map_id = map_id; 4919 return ibmvnic_send_crq(adapter, &crq); 4920 } 4921 4922 static void send_query_map(struct ibmvnic_adapter *adapter) 4923 { 4924 union ibmvnic_crq crq; 4925 4926 memset(&crq, 0, sizeof(crq)); 4927 crq.query_map.first = IBMVNIC_CRQ_CMD; 4928 crq.query_map.cmd = QUERY_MAP; 4929 ibmvnic_send_crq(adapter, &crq); 4930 } 4931 4932 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 4933 static void send_query_cap(struct ibmvnic_adapter *adapter) 4934 { 4935 union ibmvnic_crq crq; 4936 int cap_reqs; 4937 4938 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count 4939 * upfront. When the tasklet receives a response to all of these, it 4940 * can send out the next protocol messaage (REQUEST_CAPABILITY). 4941 */ 4942 cap_reqs = 25; 4943 4944 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4945 4946 memset(&crq, 0, sizeof(crq)); 4947 crq.query_capability.first = IBMVNIC_CRQ_CMD; 4948 crq.query_capability.cmd = QUERY_CAPABILITY; 4949 4950 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 4951 ibmvnic_send_crq(adapter, &crq); 4952 cap_reqs--; 4953 4954 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 4955 ibmvnic_send_crq(adapter, &crq); 4956 cap_reqs--; 4957 4958 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 4959 ibmvnic_send_crq(adapter, &crq); 4960 cap_reqs--; 4961 4962 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 4963 ibmvnic_send_crq(adapter, &crq); 4964 cap_reqs--; 4965 4966 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 4967 ibmvnic_send_crq(adapter, &crq); 4968 cap_reqs--; 4969 4970 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 4971 ibmvnic_send_crq(adapter, &crq); 4972 cap_reqs--; 4973 4974 crq.query_capability.capability = 4975 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 4976 ibmvnic_send_crq(adapter, &crq); 4977 cap_reqs--; 4978 4979 crq.query_capability.capability = 4980 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 4981 ibmvnic_send_crq(adapter, &crq); 4982 cap_reqs--; 4983 4984 crq.query_capability.capability = 4985 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 4986 ibmvnic_send_crq(adapter, &crq); 4987 cap_reqs--; 4988 4989 crq.query_capability.capability = 4990 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 4991 ibmvnic_send_crq(adapter, &crq); 4992 cap_reqs--; 4993 4994 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 4995 ibmvnic_send_crq(adapter, &crq); 4996 cap_reqs--; 4997 4998 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 4999 ibmvnic_send_crq(adapter, &crq); 5000 cap_reqs--; 5001 5002 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 5003 ibmvnic_send_crq(adapter, &crq); 5004 cap_reqs--; 5005 5006 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 5007 ibmvnic_send_crq(adapter, &crq); 5008 cap_reqs--; 5009 5010 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 5011 ibmvnic_send_crq(adapter, &crq); 5012 cap_reqs--; 5013 5014 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 5015 ibmvnic_send_crq(adapter, &crq); 5016 cap_reqs--; 5017 5018 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 5019 ibmvnic_send_crq(adapter, &crq); 5020 cap_reqs--; 5021 5022 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 5023 ibmvnic_send_crq(adapter, &crq); 5024 cap_reqs--; 5025 5026 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 5027 ibmvnic_send_crq(adapter, &crq); 5028 cap_reqs--; 5029 5030 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 5031 ibmvnic_send_crq(adapter, &crq); 5032 cap_reqs--; 5033 5034 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 5035 ibmvnic_send_crq(adapter, &crq); 5036 cap_reqs--; 5037 5038 crq.query_capability.capability = 5039 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 5040 ibmvnic_send_crq(adapter, &crq); 5041 cap_reqs--; 5042 5043 crq.query_capability.capability = 5044 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 5045 ibmvnic_send_crq(adapter, &crq); 5046 cap_reqs--; 5047 5048 crq.query_capability.capability = 5049 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 5050 ibmvnic_send_crq(adapter, &crq); 5051 cap_reqs--; 5052 5053 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 5054 5055 ibmvnic_send_crq(adapter, &crq); 5056 cap_reqs--; 5057 5058 /* Keep at end to catch any discrepancy between expected and actual 5059 * CRQs sent. 5060 */ 5061 WARN_ON(cap_reqs != 0); 5062 } 5063 5064 static void send_query_ip_offload(struct ibmvnic_adapter *adapter) 5065 { 5066 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 5067 struct device *dev = &adapter->vdev->dev; 5068 union ibmvnic_crq crq; 5069 5070 adapter->ip_offload_tok = 5071 dma_map_single(dev, 5072 &adapter->ip_offload_buf, 5073 buf_sz, 5074 DMA_FROM_DEVICE); 5075 5076 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 5077 if (!firmware_has_feature(FW_FEATURE_CMO)) 5078 dev_err(dev, "Couldn't map offload buffer\n"); 5079 return; 5080 } 5081 5082 memset(&crq, 0, sizeof(crq)); 5083 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 5084 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 5085 crq.query_ip_offload.len = cpu_to_be32(buf_sz); 5086 crq.query_ip_offload.ioba = 5087 cpu_to_be32(adapter->ip_offload_tok); 5088 5089 ibmvnic_send_crq(adapter, &crq); 5090 } 5091 5092 static void send_control_ip_offload(struct ibmvnic_adapter *adapter) 5093 { 5094 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; 5095 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 5096 struct device *dev = &adapter->vdev->dev; 5097 netdev_features_t old_hw_features = 0; 5098 union ibmvnic_crq crq; 5099 5100 adapter->ip_offload_ctrl_tok = 5101 dma_map_single(dev, 5102 ctrl_buf, 5103 sizeof(adapter->ip_offload_ctrl), 5104 DMA_TO_DEVICE); 5105 5106 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 5107 dev_err(dev, "Couldn't map ip offload control buffer\n"); 5108 return; 5109 } 5110 5111 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 5112 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); 5113 ctrl_buf->ipv4_chksum = buf->ipv4_chksum; 5114 ctrl_buf->ipv6_chksum = buf->ipv6_chksum; 5115 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 5116 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; 5117 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 5118 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; 5119 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; 5120 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; 5121 5122 /* large_rx disabled for now, additional features needed */ 5123 ctrl_buf->large_rx_ipv4 = 0; 5124 ctrl_buf->large_rx_ipv6 = 0; 5125 5126 if (adapter->state != VNIC_PROBING) { 5127 old_hw_features = adapter->netdev->hw_features; 5128 adapter->netdev->hw_features = 0; 5129 } 5130 5131 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 5132 5133 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 5134 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; 5135 5136 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 5137 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; 5138 5139 if ((adapter->netdev->features & 5140 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 5141 adapter->netdev->hw_features |= NETIF_F_RXCSUM; 5142 5143 if (buf->large_tx_ipv4) 5144 adapter->netdev->hw_features |= NETIF_F_TSO; 5145 if (buf->large_tx_ipv6) 5146 adapter->netdev->hw_features |= NETIF_F_TSO6; 5147 5148 if (adapter->state == VNIC_PROBING) { 5149 adapter->netdev->features |= adapter->netdev->hw_features; 5150 } else if (old_hw_features != adapter->netdev->hw_features) { 5151 netdev_features_t tmp = 0; 5152 5153 /* disable features no longer supported */ 5154 adapter->netdev->features &= adapter->netdev->hw_features; 5155 /* turn on features now supported if previously enabled */ 5156 tmp = (old_hw_features ^ adapter->netdev->hw_features) & 5157 adapter->netdev->hw_features; 5158 adapter->netdev->features |= 5159 tmp & adapter->netdev->wanted_features; 5160 } 5161 5162 memset(&crq, 0, sizeof(crq)); 5163 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 5164 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 5165 crq.control_ip_offload.len = 5166 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 5167 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 5168 ibmvnic_send_crq(adapter, &crq); 5169 } 5170 5171 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 5172 struct ibmvnic_adapter *adapter) 5173 { 5174 struct device *dev = &adapter->vdev->dev; 5175 5176 if (crq->get_vpd_size_rsp.rc.code) { 5177 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 5178 crq->get_vpd_size_rsp.rc.code); 5179 complete(&adapter->fw_done); 5180 return; 5181 } 5182 5183 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 5184 complete(&adapter->fw_done); 5185 } 5186 5187 static void handle_vpd_rsp(union ibmvnic_crq *crq, 5188 struct ibmvnic_adapter *adapter) 5189 { 5190 struct device *dev = &adapter->vdev->dev; 5191 unsigned char *substr = NULL; 5192 u8 fw_level_len = 0; 5193 5194 memset(adapter->fw_version, 0, 32); 5195 5196 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 5197 DMA_FROM_DEVICE); 5198 5199 if (crq->get_vpd_rsp.rc.code) { 5200 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 5201 crq->get_vpd_rsp.rc.code); 5202 goto complete; 5203 } 5204 5205 /* get the position of the firmware version info 5206 * located after the ASCII 'RM' substring in the buffer 5207 */ 5208 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 5209 if (!substr) { 5210 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 5211 goto complete; 5212 } 5213 5214 /* get length of firmware level ASCII substring */ 5215 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 5216 fw_level_len = *(substr + 2); 5217 } else { 5218 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 5219 goto complete; 5220 } 5221 5222 /* copy firmware version string from vpd into adapter */ 5223 if ((substr + 3 + fw_level_len) < 5224 (adapter->vpd->buff + adapter->vpd->len)) { 5225 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 5226 } else { 5227 dev_info(dev, "FW substr extrapolated VPD buff\n"); 5228 } 5229 5230 complete: 5231 if (adapter->fw_version[0] == '\0') 5232 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); 5233 complete(&adapter->fw_done); 5234 } 5235 5236 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 5237 { 5238 struct device *dev = &adapter->vdev->dev; 5239 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 5240 int i; 5241 5242 dma_unmap_single(dev, adapter->ip_offload_tok, 5243 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 5244 5245 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 5246 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 5247 netdev_dbg(adapter->netdev, "%016lx\n", 5248 ((unsigned long *)(buf))[i]); 5249 5250 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 5251 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 5252 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 5253 buf->tcp_ipv4_chksum); 5254 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 5255 buf->tcp_ipv6_chksum); 5256 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 5257 buf->udp_ipv4_chksum); 5258 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 5259 buf->udp_ipv6_chksum); 5260 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 5261 buf->large_tx_ipv4); 5262 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 5263 buf->large_tx_ipv6); 5264 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 5265 buf->large_rx_ipv4); 5266 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 5267 buf->large_rx_ipv6); 5268 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 5269 buf->max_ipv4_header_size); 5270 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 5271 buf->max_ipv6_header_size); 5272 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 5273 buf->max_tcp_header_size); 5274 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 5275 buf->max_udp_header_size); 5276 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 5277 buf->max_large_tx_size); 5278 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 5279 buf->max_large_rx_size); 5280 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 5281 buf->ipv6_extension_header); 5282 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 5283 buf->tcp_pseudosum_req); 5284 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 5285 buf->num_ipv6_ext_headers); 5286 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 5287 buf->off_ipv6_ext_headers); 5288 5289 send_control_ip_offload(adapter); 5290 } 5291 5292 static const char *ibmvnic_fw_err_cause(u16 cause) 5293 { 5294 switch (cause) { 5295 case ADAPTER_PROBLEM: 5296 return "adapter problem"; 5297 case BUS_PROBLEM: 5298 return "bus problem"; 5299 case FW_PROBLEM: 5300 return "firmware problem"; 5301 case DD_PROBLEM: 5302 return "device driver problem"; 5303 case EEH_RECOVERY: 5304 return "EEH recovery"; 5305 case FW_UPDATED: 5306 return "firmware updated"; 5307 case LOW_MEMORY: 5308 return "low Memory"; 5309 default: 5310 return "unknown"; 5311 } 5312 } 5313 5314 static void handle_error_indication(union ibmvnic_crq *crq, 5315 struct ibmvnic_adapter *adapter) 5316 { 5317 struct device *dev = &adapter->vdev->dev; 5318 u16 cause; 5319 5320 cause = be16_to_cpu(crq->error_indication.error_cause); 5321 5322 dev_warn_ratelimited(dev, 5323 "Firmware reports %serror, cause: %s. Starting recovery...\n", 5324 crq->error_indication.flags 5325 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 5326 ibmvnic_fw_err_cause(cause)); 5327 5328 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 5329 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5330 else 5331 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 5332 } 5333 5334 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 5335 struct ibmvnic_adapter *adapter) 5336 { 5337 struct net_device *netdev = adapter->netdev; 5338 struct device *dev = &adapter->vdev->dev; 5339 long rc; 5340 5341 rc = crq->change_mac_addr_rsp.rc.code; 5342 if (rc) { 5343 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 5344 goto out; 5345 } 5346 /* crq->change_mac_addr.mac_addr is the requested one 5347 * crq->change_mac_addr_rsp.mac_addr is the returned valid one. 5348 */ 5349 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); 5350 ether_addr_copy(adapter->mac_addr, 5351 &crq->change_mac_addr_rsp.mac_addr[0]); 5352 out: 5353 complete(&adapter->fw_done); 5354 return rc; 5355 } 5356 5357 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 5358 struct ibmvnic_adapter *adapter) 5359 { 5360 struct device *dev = &adapter->vdev->dev; 5361 u64 *req_value; 5362 char *name; 5363 5364 atomic_dec(&adapter->running_cap_crqs); 5365 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", 5366 atomic_read(&adapter->running_cap_crqs)); 5367 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 5368 case REQ_TX_QUEUES: 5369 req_value = &adapter->req_tx_queues; 5370 name = "tx"; 5371 break; 5372 case REQ_RX_QUEUES: 5373 req_value = &adapter->req_rx_queues; 5374 name = "rx"; 5375 break; 5376 case REQ_RX_ADD_QUEUES: 5377 req_value = &adapter->req_rx_add_queues; 5378 name = "rx_add"; 5379 break; 5380 case REQ_TX_ENTRIES_PER_SUBCRQ: 5381 req_value = &adapter->req_tx_entries_per_subcrq; 5382 name = "tx_entries_per_subcrq"; 5383 break; 5384 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 5385 req_value = &adapter->req_rx_add_entries_per_subcrq; 5386 name = "rx_add_entries_per_subcrq"; 5387 break; 5388 case REQ_MTU: 5389 req_value = &adapter->req_mtu; 5390 name = "mtu"; 5391 break; 5392 case PROMISC_REQUESTED: 5393 req_value = &adapter->promisc; 5394 name = "promisc"; 5395 break; 5396 default: 5397 dev_err(dev, "Got invalid cap request rsp %d\n", 5398 crq->request_capability.capability); 5399 return; 5400 } 5401 5402 switch (crq->request_capability_rsp.rc.code) { 5403 case SUCCESS: 5404 break; 5405 case PARTIALSUCCESS: 5406 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 5407 *req_value, 5408 (long)be64_to_cpu(crq->request_capability_rsp.number), 5409 name); 5410 5411 if (be16_to_cpu(crq->request_capability_rsp.capability) == 5412 REQ_MTU) { 5413 pr_err("mtu of %llu is not supported. Reverting.\n", 5414 *req_value); 5415 *req_value = adapter->fallback.mtu; 5416 } else { 5417 *req_value = 5418 be64_to_cpu(crq->request_capability_rsp.number); 5419 } 5420 5421 send_request_cap(adapter, 1); 5422 return; 5423 default: 5424 dev_err(dev, "Error %d in request cap rsp\n", 5425 crq->request_capability_rsp.rc.code); 5426 return; 5427 } 5428 5429 /* Done receiving requested capabilities, query IP offload support */ 5430 if (atomic_read(&adapter->running_cap_crqs) == 0) 5431 send_query_ip_offload(adapter); 5432 } 5433 5434 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 5435 struct ibmvnic_adapter *adapter) 5436 { 5437 struct device *dev = &adapter->vdev->dev; 5438 struct net_device *netdev = adapter->netdev; 5439 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 5440 struct ibmvnic_login_buffer *login = adapter->login_buf; 5441 u64 *tx_handle_array; 5442 u64 *rx_handle_array; 5443 int num_tx_pools; 5444 int num_rx_pools; 5445 u64 *size_array; 5446 u32 rsp_len; 5447 int i; 5448 5449 /* CHECK: Test/set of login_pending does not need to be atomic 5450 * because only ibmvnic_tasklet tests/clears this. 5451 */ 5452 if (!adapter->login_pending) { 5453 netdev_warn(netdev, "Ignoring unexpected login response\n"); 5454 return 0; 5455 } 5456 adapter->login_pending = false; 5457 5458 /* If the number of queues requested can't be allocated by the 5459 * server, the login response will return with code 1. We will need 5460 * to resend the login buffer with fewer queues requested. 5461 */ 5462 if (login_rsp_crq->generic.rc.code) { 5463 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 5464 complete(&adapter->init_done); 5465 return 0; 5466 } 5467 5468 if (adapter->failover_pending) { 5469 adapter->init_done_rc = -EAGAIN; 5470 netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 5471 complete(&adapter->init_done); 5472 /* login response buffer will be released on reset */ 5473 return 0; 5474 } 5475 5476 netdev->mtu = adapter->req_mtu - ETH_HLEN; 5477 5478 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 5479 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 5480 netdev_dbg(adapter->netdev, "%016lx\n", 5481 ((unsigned long *)(adapter->login_rsp_buf))[i]); 5482 } 5483 5484 /* Sanity checks */ 5485 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 5486 (be32_to_cpu(login->num_rxcomp_subcrqs) * 5487 adapter->req_rx_add_queues != 5488 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 5489 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 5490 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5491 return -EIO; 5492 } 5493 5494 rsp_len = be32_to_cpu(login_rsp->len); 5495 if (be32_to_cpu(login->login_rsp_len) < rsp_len || 5496 rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) || 5497 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) || 5498 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) || 5499 rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) { 5500 /* This can happen if a login request times out and there are 5501 * 2 outstanding login requests sent, the LOGIN_RSP crq 5502 * could have been for the older login request. So we are 5503 * parsing the newer response buffer which may be incomplete 5504 */ 5505 dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n"); 5506 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5507 return -EIO; 5508 } 5509 5510 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5511 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 5512 /* variable buffer sizes are not supported, so just read the 5513 * first entry. 5514 */ 5515 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); 5516 5517 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 5518 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 5519 5520 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5521 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 5522 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5523 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); 5524 5525 for (i = 0; i < num_tx_pools; i++) 5526 adapter->tx_scrq[i]->handle = tx_handle_array[i]; 5527 5528 for (i = 0; i < num_rx_pools; i++) 5529 adapter->rx_scrq[i]->handle = rx_handle_array[i]; 5530 5531 adapter->num_active_tx_scrqs = num_tx_pools; 5532 adapter->num_active_rx_scrqs = num_rx_pools; 5533 release_login_rsp_buffer(adapter); 5534 release_login_buffer(adapter); 5535 complete(&adapter->init_done); 5536 5537 return 0; 5538 } 5539 5540 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 5541 struct ibmvnic_adapter *adapter) 5542 { 5543 struct device *dev = &adapter->vdev->dev; 5544 long rc; 5545 5546 rc = crq->request_unmap_rsp.rc.code; 5547 if (rc) 5548 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 5549 } 5550 5551 static void handle_query_map_rsp(union ibmvnic_crq *crq, 5552 struct ibmvnic_adapter *adapter) 5553 { 5554 struct net_device *netdev = adapter->netdev; 5555 struct device *dev = &adapter->vdev->dev; 5556 long rc; 5557 5558 rc = crq->query_map_rsp.rc.code; 5559 if (rc) { 5560 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 5561 return; 5562 } 5563 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", 5564 crq->query_map_rsp.page_size, 5565 __be32_to_cpu(crq->query_map_rsp.tot_pages), 5566 __be32_to_cpu(crq->query_map_rsp.free_pages)); 5567 } 5568 5569 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 5570 struct ibmvnic_adapter *adapter) 5571 { 5572 struct net_device *netdev = adapter->netdev; 5573 struct device *dev = &adapter->vdev->dev; 5574 long rc; 5575 5576 atomic_dec(&adapter->running_cap_crqs); 5577 netdev_dbg(netdev, "Outstanding queries: %d\n", 5578 atomic_read(&adapter->running_cap_crqs)); 5579 rc = crq->query_capability.rc.code; 5580 if (rc) { 5581 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 5582 goto out; 5583 } 5584 5585 switch (be16_to_cpu(crq->query_capability.capability)) { 5586 case MIN_TX_QUEUES: 5587 adapter->min_tx_queues = 5588 be64_to_cpu(crq->query_capability.number); 5589 netdev_dbg(netdev, "min_tx_queues = %lld\n", 5590 adapter->min_tx_queues); 5591 break; 5592 case MIN_RX_QUEUES: 5593 adapter->min_rx_queues = 5594 be64_to_cpu(crq->query_capability.number); 5595 netdev_dbg(netdev, "min_rx_queues = %lld\n", 5596 adapter->min_rx_queues); 5597 break; 5598 case MIN_RX_ADD_QUEUES: 5599 adapter->min_rx_add_queues = 5600 be64_to_cpu(crq->query_capability.number); 5601 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 5602 adapter->min_rx_add_queues); 5603 break; 5604 case MAX_TX_QUEUES: 5605 adapter->max_tx_queues = 5606 be64_to_cpu(crq->query_capability.number); 5607 netdev_dbg(netdev, "max_tx_queues = %lld\n", 5608 adapter->max_tx_queues); 5609 break; 5610 case MAX_RX_QUEUES: 5611 adapter->max_rx_queues = 5612 be64_to_cpu(crq->query_capability.number); 5613 netdev_dbg(netdev, "max_rx_queues = %lld\n", 5614 adapter->max_rx_queues); 5615 break; 5616 case MAX_RX_ADD_QUEUES: 5617 adapter->max_rx_add_queues = 5618 be64_to_cpu(crq->query_capability.number); 5619 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 5620 adapter->max_rx_add_queues); 5621 break; 5622 case MIN_TX_ENTRIES_PER_SUBCRQ: 5623 adapter->min_tx_entries_per_subcrq = 5624 be64_to_cpu(crq->query_capability.number); 5625 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 5626 adapter->min_tx_entries_per_subcrq); 5627 break; 5628 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 5629 adapter->min_rx_add_entries_per_subcrq = 5630 be64_to_cpu(crq->query_capability.number); 5631 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 5632 adapter->min_rx_add_entries_per_subcrq); 5633 break; 5634 case MAX_TX_ENTRIES_PER_SUBCRQ: 5635 adapter->max_tx_entries_per_subcrq = 5636 be64_to_cpu(crq->query_capability.number); 5637 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 5638 adapter->max_tx_entries_per_subcrq); 5639 break; 5640 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 5641 adapter->max_rx_add_entries_per_subcrq = 5642 be64_to_cpu(crq->query_capability.number); 5643 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 5644 adapter->max_rx_add_entries_per_subcrq); 5645 break; 5646 case TCP_IP_OFFLOAD: 5647 adapter->tcp_ip_offload = 5648 be64_to_cpu(crq->query_capability.number); 5649 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 5650 adapter->tcp_ip_offload); 5651 break; 5652 case PROMISC_SUPPORTED: 5653 adapter->promisc_supported = 5654 be64_to_cpu(crq->query_capability.number); 5655 netdev_dbg(netdev, "promisc_supported = %lld\n", 5656 adapter->promisc_supported); 5657 break; 5658 case MIN_MTU: 5659 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 5660 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 5661 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 5662 break; 5663 case MAX_MTU: 5664 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 5665 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 5666 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 5667 break; 5668 case MAX_MULTICAST_FILTERS: 5669 adapter->max_multicast_filters = 5670 be64_to_cpu(crq->query_capability.number); 5671 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 5672 adapter->max_multicast_filters); 5673 break; 5674 case VLAN_HEADER_INSERTION: 5675 adapter->vlan_header_insertion = 5676 be64_to_cpu(crq->query_capability.number); 5677 if (adapter->vlan_header_insertion) 5678 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 5679 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 5680 adapter->vlan_header_insertion); 5681 break; 5682 case RX_VLAN_HEADER_INSERTION: 5683 adapter->rx_vlan_header_insertion = 5684 be64_to_cpu(crq->query_capability.number); 5685 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 5686 adapter->rx_vlan_header_insertion); 5687 break; 5688 case MAX_TX_SG_ENTRIES: 5689 adapter->max_tx_sg_entries = 5690 be64_to_cpu(crq->query_capability.number); 5691 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 5692 adapter->max_tx_sg_entries); 5693 break; 5694 case RX_SG_SUPPORTED: 5695 adapter->rx_sg_supported = 5696 be64_to_cpu(crq->query_capability.number); 5697 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 5698 adapter->rx_sg_supported); 5699 break; 5700 case OPT_TX_COMP_SUB_QUEUES: 5701 adapter->opt_tx_comp_sub_queues = 5702 be64_to_cpu(crq->query_capability.number); 5703 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 5704 adapter->opt_tx_comp_sub_queues); 5705 break; 5706 case OPT_RX_COMP_QUEUES: 5707 adapter->opt_rx_comp_queues = 5708 be64_to_cpu(crq->query_capability.number); 5709 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 5710 adapter->opt_rx_comp_queues); 5711 break; 5712 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 5713 adapter->opt_rx_bufadd_q_per_rx_comp_q = 5714 be64_to_cpu(crq->query_capability.number); 5715 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 5716 adapter->opt_rx_bufadd_q_per_rx_comp_q); 5717 break; 5718 case OPT_TX_ENTRIES_PER_SUBCRQ: 5719 adapter->opt_tx_entries_per_subcrq = 5720 be64_to_cpu(crq->query_capability.number); 5721 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 5722 adapter->opt_tx_entries_per_subcrq); 5723 break; 5724 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 5725 adapter->opt_rxba_entries_per_subcrq = 5726 be64_to_cpu(crq->query_capability.number); 5727 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 5728 adapter->opt_rxba_entries_per_subcrq); 5729 break; 5730 case TX_RX_DESC_REQ: 5731 adapter->tx_rx_desc_req = crq->query_capability.number; 5732 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 5733 adapter->tx_rx_desc_req); 5734 break; 5735 5736 default: 5737 netdev_err(netdev, "Got invalid cap rsp %d\n", 5738 crq->query_capability.capability); 5739 } 5740 5741 out: 5742 if (atomic_read(&adapter->running_cap_crqs) == 0) 5743 send_request_cap(adapter, 0); 5744 } 5745 5746 static int send_query_phys_parms(struct ibmvnic_adapter *adapter) 5747 { 5748 union ibmvnic_crq crq; 5749 int rc; 5750 5751 memset(&crq, 0, sizeof(crq)); 5752 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; 5753 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; 5754 5755 mutex_lock(&adapter->fw_lock); 5756 adapter->fw_done_rc = 0; 5757 reinit_completion(&adapter->fw_done); 5758 5759 rc = ibmvnic_send_crq(adapter, &crq); 5760 if (rc) { 5761 mutex_unlock(&adapter->fw_lock); 5762 return rc; 5763 } 5764 5765 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 5766 if (rc) { 5767 mutex_unlock(&adapter->fw_lock); 5768 return rc; 5769 } 5770 5771 mutex_unlock(&adapter->fw_lock); 5772 return adapter->fw_done_rc ? -EIO : 0; 5773 } 5774 5775 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, 5776 struct ibmvnic_adapter *adapter) 5777 { 5778 struct net_device *netdev = adapter->netdev; 5779 int rc; 5780 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); 5781 5782 rc = crq->query_phys_parms_rsp.rc.code; 5783 if (rc) { 5784 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); 5785 return rc; 5786 } 5787 switch (rspeed) { 5788 case IBMVNIC_10MBPS: 5789 adapter->speed = SPEED_10; 5790 break; 5791 case IBMVNIC_100MBPS: 5792 adapter->speed = SPEED_100; 5793 break; 5794 case IBMVNIC_1GBPS: 5795 adapter->speed = SPEED_1000; 5796 break; 5797 case IBMVNIC_10GBPS: 5798 adapter->speed = SPEED_10000; 5799 break; 5800 case IBMVNIC_25GBPS: 5801 adapter->speed = SPEED_25000; 5802 break; 5803 case IBMVNIC_40GBPS: 5804 adapter->speed = SPEED_40000; 5805 break; 5806 case IBMVNIC_50GBPS: 5807 adapter->speed = SPEED_50000; 5808 break; 5809 case IBMVNIC_100GBPS: 5810 adapter->speed = SPEED_100000; 5811 break; 5812 case IBMVNIC_200GBPS: 5813 adapter->speed = SPEED_200000; 5814 break; 5815 default: 5816 if (netif_carrier_ok(netdev)) 5817 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); 5818 adapter->speed = SPEED_UNKNOWN; 5819 } 5820 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) 5821 adapter->duplex = DUPLEX_FULL; 5822 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) 5823 adapter->duplex = DUPLEX_HALF; 5824 else 5825 adapter->duplex = DUPLEX_UNKNOWN; 5826 5827 return rc; 5828 } 5829 5830 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 5831 struct ibmvnic_adapter *adapter) 5832 { 5833 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 5834 struct net_device *netdev = adapter->netdev; 5835 struct device *dev = &adapter->vdev->dev; 5836 u64 *u64_crq = (u64 *)crq; 5837 long rc; 5838 5839 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 5840 (unsigned long)cpu_to_be64(u64_crq[0]), 5841 (unsigned long)cpu_to_be64(u64_crq[1])); 5842 switch (gen_crq->first) { 5843 case IBMVNIC_CRQ_INIT_RSP: 5844 switch (gen_crq->cmd) { 5845 case IBMVNIC_CRQ_INIT: 5846 dev_info(dev, "Partner initialized\n"); 5847 adapter->from_passive_init = true; 5848 /* Discard any stale login responses from prev reset. 5849 * CHECK: should we clear even on INIT_COMPLETE? 5850 */ 5851 adapter->login_pending = false; 5852 5853 if (adapter->state == VNIC_DOWN) 5854 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); 5855 else 5856 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 5857 5858 if (rc && rc != -EBUSY) { 5859 /* We were unable to schedule the failover 5860 * reset either because the adapter was still 5861 * probing (eg: during kexec) or we could not 5862 * allocate memory. Clear the failover_pending 5863 * flag since no one else will. We ignore 5864 * EBUSY because it means either FAILOVER reset 5865 * is already scheduled or the adapter is 5866 * being removed. 5867 */ 5868 netdev_err(netdev, 5869 "Error %ld scheduling failover reset\n", 5870 rc); 5871 adapter->failover_pending = false; 5872 } 5873 5874 if (!completion_done(&adapter->init_done)) { 5875 if (!adapter->init_done_rc) 5876 adapter->init_done_rc = -EAGAIN; 5877 complete(&adapter->init_done); 5878 } 5879 5880 break; 5881 case IBMVNIC_CRQ_INIT_COMPLETE: 5882 dev_info(dev, "Partner initialization complete\n"); 5883 adapter->crq.active = true; 5884 send_version_xchg(adapter); 5885 break; 5886 default: 5887 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 5888 } 5889 return; 5890 case IBMVNIC_CRQ_XPORT_EVENT: 5891 netif_carrier_off(netdev); 5892 adapter->crq.active = false; 5893 /* terminate any thread waiting for a response 5894 * from the device 5895 */ 5896 if (!completion_done(&adapter->fw_done)) { 5897 adapter->fw_done_rc = -EIO; 5898 complete(&adapter->fw_done); 5899 } 5900 5901 /* if we got here during crq-init, retry crq-init */ 5902 if (!completion_done(&adapter->init_done)) { 5903 adapter->init_done_rc = -EAGAIN; 5904 complete(&adapter->init_done); 5905 } 5906 5907 if (!completion_done(&adapter->stats_done)) 5908 complete(&adapter->stats_done); 5909 if (test_bit(0, &adapter->resetting)) 5910 adapter->force_reset_recovery = true; 5911 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 5912 dev_info(dev, "Migrated, re-enabling adapter\n"); 5913 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 5914 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 5915 dev_info(dev, "Backing device failover detected\n"); 5916 adapter->failover_pending = true; 5917 } else { 5918 /* The adapter lost the connection */ 5919 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 5920 gen_crq->cmd); 5921 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5922 } 5923 return; 5924 case IBMVNIC_CRQ_CMD_RSP: 5925 break; 5926 default: 5927 dev_err(dev, "Got an invalid msg type 0x%02x\n", 5928 gen_crq->first); 5929 return; 5930 } 5931 5932 switch (gen_crq->cmd) { 5933 case VERSION_EXCHANGE_RSP: 5934 rc = crq->version_exchange_rsp.rc.code; 5935 if (rc) { 5936 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 5937 break; 5938 } 5939 ibmvnic_version = 5940 be16_to_cpu(crq->version_exchange_rsp.version); 5941 dev_info(dev, "Partner protocol version is %d\n", 5942 ibmvnic_version); 5943 send_query_cap(adapter); 5944 break; 5945 case QUERY_CAPABILITY_RSP: 5946 handle_query_cap_rsp(crq, adapter); 5947 break; 5948 case QUERY_MAP_RSP: 5949 handle_query_map_rsp(crq, adapter); 5950 break; 5951 case REQUEST_MAP_RSP: 5952 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 5953 complete(&adapter->fw_done); 5954 break; 5955 case REQUEST_UNMAP_RSP: 5956 handle_request_unmap_rsp(crq, adapter); 5957 break; 5958 case REQUEST_CAPABILITY_RSP: 5959 handle_request_cap_rsp(crq, adapter); 5960 break; 5961 case LOGIN_RSP: 5962 netdev_dbg(netdev, "Got Login Response\n"); 5963 handle_login_rsp(crq, adapter); 5964 break; 5965 case LOGICAL_LINK_STATE_RSP: 5966 netdev_dbg(netdev, 5967 "Got Logical Link State Response, state: %d rc: %d\n", 5968 crq->logical_link_state_rsp.link_state, 5969 crq->logical_link_state_rsp.rc.code); 5970 adapter->logical_link_state = 5971 crq->logical_link_state_rsp.link_state; 5972 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 5973 complete(&adapter->init_done); 5974 break; 5975 case LINK_STATE_INDICATION: 5976 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 5977 adapter->phys_link_state = 5978 crq->link_state_indication.phys_link_state; 5979 adapter->logical_link_state = 5980 crq->link_state_indication.logical_link_state; 5981 if (adapter->phys_link_state && adapter->logical_link_state) 5982 netif_carrier_on(netdev); 5983 else 5984 netif_carrier_off(netdev); 5985 break; 5986 case CHANGE_MAC_ADDR_RSP: 5987 netdev_dbg(netdev, "Got MAC address change Response\n"); 5988 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 5989 break; 5990 case ERROR_INDICATION: 5991 netdev_dbg(netdev, "Got Error Indication\n"); 5992 handle_error_indication(crq, adapter); 5993 break; 5994 case REQUEST_STATISTICS_RSP: 5995 netdev_dbg(netdev, "Got Statistics Response\n"); 5996 complete(&adapter->stats_done); 5997 break; 5998 case QUERY_IP_OFFLOAD_RSP: 5999 netdev_dbg(netdev, "Got Query IP offload Response\n"); 6000 handle_query_ip_offload_rsp(adapter); 6001 break; 6002 case MULTICAST_CTRL_RSP: 6003 netdev_dbg(netdev, "Got multicast control Response\n"); 6004 break; 6005 case CONTROL_IP_OFFLOAD_RSP: 6006 netdev_dbg(netdev, "Got Control IP offload Response\n"); 6007 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 6008 sizeof(adapter->ip_offload_ctrl), 6009 DMA_TO_DEVICE); 6010 complete(&adapter->init_done); 6011 break; 6012 case COLLECT_FW_TRACE_RSP: 6013 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 6014 complete(&adapter->fw_done); 6015 break; 6016 case GET_VPD_SIZE_RSP: 6017 handle_vpd_size_rsp(crq, adapter); 6018 break; 6019 case GET_VPD_RSP: 6020 handle_vpd_rsp(crq, adapter); 6021 break; 6022 case QUERY_PHYS_PARMS_RSP: 6023 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); 6024 complete(&adapter->fw_done); 6025 break; 6026 default: 6027 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 6028 gen_crq->cmd); 6029 } 6030 } 6031 6032 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 6033 { 6034 struct ibmvnic_adapter *adapter = instance; 6035 6036 tasklet_schedule(&adapter->tasklet); 6037 return IRQ_HANDLED; 6038 } 6039 6040 static void ibmvnic_tasklet(struct tasklet_struct *t) 6041 { 6042 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); 6043 struct ibmvnic_crq_queue *queue = &adapter->crq; 6044 union ibmvnic_crq *crq; 6045 unsigned long flags; 6046 6047 spin_lock_irqsave(&queue->lock, flags); 6048 6049 /* Pull all the valid messages off the CRQ */ 6050 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 6051 /* This barrier makes sure ibmvnic_next_crq()'s 6052 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded 6053 * before ibmvnic_handle_crq()'s 6054 * switch(gen_crq->first) and switch(gen_crq->cmd). 6055 */ 6056 dma_rmb(); 6057 ibmvnic_handle_crq(crq, adapter); 6058 crq->generic.first = 0; 6059 } 6060 6061 spin_unlock_irqrestore(&queue->lock, flags); 6062 } 6063 6064 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 6065 { 6066 struct vio_dev *vdev = adapter->vdev; 6067 int rc; 6068 6069 do { 6070 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 6071 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6072 6073 if (rc) 6074 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 6075 6076 return rc; 6077 } 6078 6079 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 6080 { 6081 struct ibmvnic_crq_queue *crq = &adapter->crq; 6082 struct device *dev = &adapter->vdev->dev; 6083 struct vio_dev *vdev = adapter->vdev; 6084 int rc; 6085 6086 /* Close the CRQ */ 6087 do { 6088 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6089 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6090 6091 /* Clean out the queue */ 6092 if (!crq->msgs) 6093 return -EINVAL; 6094 6095 memset(crq->msgs, 0, PAGE_SIZE); 6096 crq->cur = 0; 6097 crq->active = false; 6098 6099 /* And re-open it again */ 6100 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 6101 crq->msg_token, PAGE_SIZE); 6102 6103 if (rc == H_CLOSED) 6104 /* Adapter is good, but other end is not ready */ 6105 dev_warn(dev, "Partner adapter not ready\n"); 6106 else if (rc != 0) 6107 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 6108 6109 return rc; 6110 } 6111 6112 static void release_crq_queue(struct ibmvnic_adapter *adapter) 6113 { 6114 struct ibmvnic_crq_queue *crq = &adapter->crq; 6115 struct vio_dev *vdev = adapter->vdev; 6116 long rc; 6117 6118 if (!crq->msgs) 6119 return; 6120 6121 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 6122 free_irq(vdev->irq, adapter); 6123 tasklet_kill(&adapter->tasklet); 6124 do { 6125 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6126 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6127 6128 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 6129 DMA_BIDIRECTIONAL); 6130 free_page((unsigned long)crq->msgs); 6131 crq->msgs = NULL; 6132 crq->active = false; 6133 } 6134 6135 static int init_crq_queue(struct ibmvnic_adapter *adapter) 6136 { 6137 struct ibmvnic_crq_queue *crq = &adapter->crq; 6138 struct device *dev = &adapter->vdev->dev; 6139 struct vio_dev *vdev = adapter->vdev; 6140 int rc, retrc = -ENOMEM; 6141 6142 if (crq->msgs) 6143 return 0; 6144 6145 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 6146 /* Should we allocate more than one page? */ 6147 6148 if (!crq->msgs) 6149 return -ENOMEM; 6150 6151 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 6152 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 6153 DMA_BIDIRECTIONAL); 6154 if (dma_mapping_error(dev, crq->msg_token)) 6155 goto map_failed; 6156 6157 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 6158 crq->msg_token, PAGE_SIZE); 6159 6160 if (rc == H_RESOURCE) 6161 /* maybe kexecing and resource is busy. try a reset */ 6162 rc = ibmvnic_reset_crq(adapter); 6163 retrc = rc; 6164 6165 if (rc == H_CLOSED) { 6166 dev_warn(dev, "Partner adapter not ready\n"); 6167 } else if (rc) { 6168 dev_warn(dev, "Error %d opening adapter\n", rc); 6169 goto reg_crq_failed; 6170 } 6171 6172 retrc = 0; 6173 6174 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); 6175 6176 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 6177 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", 6178 adapter->vdev->unit_address); 6179 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); 6180 if (rc) { 6181 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 6182 vdev->irq, rc); 6183 goto req_irq_failed; 6184 } 6185 6186 rc = vio_enable_interrupts(vdev); 6187 if (rc) { 6188 dev_err(dev, "Error %d enabling interrupts\n", rc); 6189 goto req_irq_failed; 6190 } 6191 6192 crq->cur = 0; 6193 spin_lock_init(&crq->lock); 6194 6195 /* process any CRQs that were queued before we enabled interrupts */ 6196 tasklet_schedule(&adapter->tasklet); 6197 6198 return retrc; 6199 6200 req_irq_failed: 6201 tasklet_kill(&adapter->tasklet); 6202 do { 6203 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6204 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6205 reg_crq_failed: 6206 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 6207 map_failed: 6208 free_page((unsigned long)crq->msgs); 6209 crq->msgs = NULL; 6210 return retrc; 6211 } 6212 6213 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) 6214 { 6215 struct device *dev = &adapter->vdev->dev; 6216 unsigned long timeout = msecs_to_jiffies(20000); 6217 u64 old_num_rx_queues = adapter->req_rx_queues; 6218 u64 old_num_tx_queues = adapter->req_tx_queues; 6219 int rc; 6220 6221 adapter->from_passive_init = false; 6222 6223 rc = ibmvnic_send_crq_init(adapter); 6224 if (rc) { 6225 dev_err(dev, "Send crq init failed with error %d\n", rc); 6226 return rc; 6227 } 6228 6229 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 6230 dev_err(dev, "Initialization sequence timed out\n"); 6231 return -ETIMEDOUT; 6232 } 6233 6234 if (adapter->init_done_rc) { 6235 release_crq_queue(adapter); 6236 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc); 6237 return adapter->init_done_rc; 6238 } 6239 6240 if (adapter->from_passive_init) { 6241 adapter->state = VNIC_OPEN; 6242 adapter->from_passive_init = false; 6243 dev_err(dev, "CRQ-init failed, passive-init\n"); 6244 return -EINVAL; 6245 } 6246 6247 if (reset && 6248 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && 6249 adapter->reset_reason != VNIC_RESET_MOBILITY) { 6250 if (adapter->req_rx_queues != old_num_rx_queues || 6251 adapter->req_tx_queues != old_num_tx_queues) { 6252 release_sub_crqs(adapter, 0); 6253 rc = init_sub_crqs(adapter); 6254 } else { 6255 /* no need to reinitialize completely, but we do 6256 * need to clean up transmits that were in flight 6257 * when we processed the reset. Failure to do so 6258 * will confound the upper layer, usually TCP, by 6259 * creating the illusion of transmits that are 6260 * awaiting completion. 6261 */ 6262 clean_tx_pools(adapter); 6263 6264 rc = reset_sub_crq_queues(adapter); 6265 } 6266 } else { 6267 rc = init_sub_crqs(adapter); 6268 } 6269 6270 if (rc) { 6271 dev_err(dev, "Initialization of sub crqs failed\n"); 6272 release_crq_queue(adapter); 6273 return rc; 6274 } 6275 6276 rc = init_sub_crq_irqs(adapter); 6277 if (rc) { 6278 dev_err(dev, "Failed to initialize sub crq irqs\n"); 6279 release_crq_queue(adapter); 6280 } 6281 6282 return rc; 6283 } 6284 6285 static struct device_attribute dev_attr_failover; 6286 6287 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 6288 { 6289 struct ibmvnic_adapter *adapter; 6290 struct net_device *netdev; 6291 unsigned char *mac_addr_p; 6292 unsigned long flags; 6293 bool init_success; 6294 int rc; 6295 6296 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 6297 dev->unit_address); 6298 6299 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 6300 VETH_MAC_ADDR, NULL); 6301 if (!mac_addr_p) { 6302 dev_err(&dev->dev, 6303 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 6304 __FILE__, __LINE__); 6305 return 0; 6306 } 6307 6308 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 6309 IBMVNIC_MAX_QUEUES); 6310 if (!netdev) 6311 return -ENOMEM; 6312 6313 adapter = netdev_priv(netdev); 6314 adapter->state = VNIC_PROBING; 6315 dev_set_drvdata(&dev->dev, netdev); 6316 adapter->vdev = dev; 6317 adapter->netdev = netdev; 6318 adapter->login_pending = false; 6319 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); 6320 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ 6321 bitmap_set(adapter->map_ids, 0, 1); 6322 6323 ether_addr_copy(adapter->mac_addr, mac_addr_p); 6324 eth_hw_addr_set(netdev, adapter->mac_addr); 6325 netdev->irq = dev->irq; 6326 netdev->netdev_ops = &ibmvnic_netdev_ops; 6327 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 6328 SET_NETDEV_DEV(netdev, &dev->dev); 6329 6330 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 6331 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, 6332 __ibmvnic_delayed_reset); 6333 INIT_LIST_HEAD(&adapter->rwi_list); 6334 spin_lock_init(&adapter->rwi_lock); 6335 spin_lock_init(&adapter->state_lock); 6336 mutex_init(&adapter->fw_lock); 6337 init_completion(&adapter->probe_done); 6338 init_completion(&adapter->init_done); 6339 init_completion(&adapter->fw_done); 6340 init_completion(&adapter->reset_done); 6341 init_completion(&adapter->stats_done); 6342 clear_bit(0, &adapter->resetting); 6343 adapter->prev_rx_buf_sz = 0; 6344 adapter->prev_mtu = 0; 6345 6346 init_success = false; 6347 do { 6348 reinit_init_done(adapter); 6349 6350 /* clear any failovers we got in the previous pass 6351 * since we are reinitializing the CRQ 6352 */ 6353 adapter->failover_pending = false; 6354 6355 /* If we had already initialized CRQ, we may have one or 6356 * more resets queued already. Discard those and release 6357 * the CRQ before initializing the CRQ again. 6358 */ 6359 release_crq_queue(adapter); 6360 6361 /* Since we are still in PROBING state, __ibmvnic_reset() 6362 * will not access the ->rwi_list and since we released CRQ, 6363 * we won't get _new_ transport events. But there maybe an 6364 * ongoing ibmvnic_reset() call. So serialize access to 6365 * rwi_list. If we win the race, ibvmnic_reset() could add 6366 * a reset after we purged but thats ok - we just may end 6367 * up with an extra reset (i.e similar to having two or more 6368 * resets in the queue at once). 6369 * CHECK. 6370 */ 6371 spin_lock_irqsave(&adapter->rwi_lock, flags); 6372 flush_reset_queue(adapter); 6373 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 6374 6375 rc = init_crq_queue(adapter); 6376 if (rc) { 6377 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 6378 rc); 6379 goto ibmvnic_init_fail; 6380 } 6381 6382 rc = ibmvnic_reset_init(adapter, false); 6383 } while (rc == -EAGAIN); 6384 6385 /* We are ignoring the error from ibmvnic_reset_init() assuming that the 6386 * partner is not ready. CRQ is not active. When the partner becomes 6387 * ready, we will do the passive init reset. 6388 */ 6389 6390 if (!rc) 6391 init_success = true; 6392 6393 rc = init_stats_buffers(adapter); 6394 if (rc) 6395 goto ibmvnic_init_fail; 6396 6397 rc = init_stats_token(adapter); 6398 if (rc) 6399 goto ibmvnic_stats_fail; 6400 6401 rc = device_create_file(&dev->dev, &dev_attr_failover); 6402 if (rc) 6403 goto ibmvnic_dev_file_err; 6404 6405 netif_carrier_off(netdev); 6406 6407 if (init_success) { 6408 adapter->state = VNIC_PROBED; 6409 netdev->mtu = adapter->req_mtu - ETH_HLEN; 6410 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 6411 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 6412 } else { 6413 adapter->state = VNIC_DOWN; 6414 } 6415 6416 adapter->wait_for_reset = false; 6417 adapter->last_reset_time = jiffies; 6418 6419 rc = register_netdev(netdev); 6420 if (rc) { 6421 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 6422 goto ibmvnic_register_fail; 6423 } 6424 dev_info(&dev->dev, "ibmvnic registered\n"); 6425 6426 rc = ibmvnic_cpu_notif_add(adapter); 6427 if (rc) { 6428 netdev_err(netdev, "Registering cpu notifier failed\n"); 6429 goto cpu_notif_add_failed; 6430 } 6431 6432 complete(&adapter->probe_done); 6433 6434 return 0; 6435 6436 cpu_notif_add_failed: 6437 unregister_netdev(netdev); 6438 6439 ibmvnic_register_fail: 6440 device_remove_file(&dev->dev, &dev_attr_failover); 6441 6442 ibmvnic_dev_file_err: 6443 release_stats_token(adapter); 6444 6445 ibmvnic_stats_fail: 6446 release_stats_buffers(adapter); 6447 6448 ibmvnic_init_fail: 6449 release_sub_crqs(adapter, 1); 6450 release_crq_queue(adapter); 6451 6452 /* cleanup worker thread after releasing CRQ so we don't get 6453 * transport events (i.e new work items for the worker thread). 6454 */ 6455 adapter->state = VNIC_REMOVING; 6456 complete(&adapter->probe_done); 6457 flush_work(&adapter->ibmvnic_reset); 6458 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6459 6460 flush_reset_queue(adapter); 6461 6462 mutex_destroy(&adapter->fw_lock); 6463 free_netdev(netdev); 6464 6465 return rc; 6466 } 6467 6468 static void ibmvnic_remove(struct vio_dev *dev) 6469 { 6470 struct net_device *netdev = dev_get_drvdata(&dev->dev); 6471 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6472 unsigned long flags; 6473 6474 spin_lock_irqsave(&adapter->state_lock, flags); 6475 6476 /* If ibmvnic_reset() is scheduling a reset, wait for it to 6477 * finish. Then, set the state to REMOVING to prevent it from 6478 * scheduling any more work and to have reset functions ignore 6479 * any resets that have already been scheduled. Drop the lock 6480 * after setting state, so __ibmvnic_reset() which is called 6481 * from the flush_work() below, can make progress. 6482 */ 6483 spin_lock(&adapter->rwi_lock); 6484 adapter->state = VNIC_REMOVING; 6485 spin_unlock(&adapter->rwi_lock); 6486 6487 spin_unlock_irqrestore(&adapter->state_lock, flags); 6488 6489 ibmvnic_cpu_notif_remove(adapter); 6490 6491 flush_work(&adapter->ibmvnic_reset); 6492 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6493 6494 rtnl_lock(); 6495 unregister_netdevice(netdev); 6496 6497 release_resources(adapter); 6498 release_rx_pools(adapter); 6499 release_tx_pools(adapter); 6500 release_sub_crqs(adapter, 1); 6501 release_crq_queue(adapter); 6502 6503 release_stats_token(adapter); 6504 release_stats_buffers(adapter); 6505 6506 adapter->state = VNIC_REMOVED; 6507 6508 rtnl_unlock(); 6509 mutex_destroy(&adapter->fw_lock); 6510 device_remove_file(&dev->dev, &dev_attr_failover); 6511 free_netdev(netdev); 6512 dev_set_drvdata(&dev->dev, NULL); 6513 } 6514 6515 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 6516 const char *buf, size_t count) 6517 { 6518 struct net_device *netdev = dev_get_drvdata(dev); 6519 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6520 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 6521 __be64 session_token; 6522 long rc; 6523 6524 if (!sysfs_streq(buf, "1")) 6525 return -EINVAL; 6526 6527 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 6528 H_GET_SESSION_TOKEN, 0, 0, 0); 6529 if (rc) { 6530 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 6531 rc); 6532 goto last_resort; 6533 } 6534 6535 session_token = (__be64)retbuf[0]; 6536 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 6537 be64_to_cpu(session_token)); 6538 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 6539 H_SESSION_ERR_DETECTED, session_token, 0, 0); 6540 if (rc) { 6541 netdev_err(netdev, 6542 "H_VIOCTL initiated failover failed, rc %ld\n", 6543 rc); 6544 goto last_resort; 6545 } 6546 6547 return count; 6548 6549 last_resort: 6550 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); 6551 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 6552 6553 return count; 6554 } 6555 static DEVICE_ATTR_WO(failover); 6556 6557 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 6558 { 6559 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 6560 struct ibmvnic_adapter *adapter; 6561 struct iommu_table *tbl; 6562 unsigned long ret = 0; 6563 int i; 6564 6565 tbl = get_iommu_table_base(&vdev->dev); 6566 6567 /* netdev inits at probe time along with the structures we need below*/ 6568 if (!netdev) 6569 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 6570 6571 adapter = netdev_priv(netdev); 6572 6573 ret += PAGE_SIZE; /* the crq message queue */ 6574 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 6575 6576 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 6577 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 6578 6579 for (i = 0; i < adapter->num_active_rx_pools; i++) 6580 ret += adapter->rx_pool[i].size * 6581 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 6582 6583 return ret; 6584 } 6585 6586 static int ibmvnic_resume(struct device *dev) 6587 { 6588 struct net_device *netdev = dev_get_drvdata(dev); 6589 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6590 6591 if (adapter->state != VNIC_OPEN) 6592 return 0; 6593 6594 tasklet_schedule(&adapter->tasklet); 6595 6596 return 0; 6597 } 6598 6599 static const struct vio_device_id ibmvnic_device_table[] = { 6600 {"network", "IBM,vnic"}, 6601 {"", "" } 6602 }; 6603 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 6604 6605 static const struct dev_pm_ops ibmvnic_pm_ops = { 6606 .resume = ibmvnic_resume 6607 }; 6608 6609 static struct vio_driver ibmvnic_driver = { 6610 .id_table = ibmvnic_device_table, 6611 .probe = ibmvnic_probe, 6612 .remove = ibmvnic_remove, 6613 .get_desired_dma = ibmvnic_get_desired_dma, 6614 .name = ibmvnic_driver_name, 6615 .pm = &ibmvnic_pm_ops, 6616 }; 6617 6618 /* module functions */ 6619 static int __init ibmvnic_module_init(void) 6620 { 6621 int ret; 6622 6623 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online", 6624 ibmvnic_cpu_online, 6625 ibmvnic_cpu_down_prep); 6626 if (ret < 0) 6627 goto out; 6628 ibmvnic_online = ret; 6629 ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead", 6630 NULL, ibmvnic_cpu_dead); 6631 if (ret) 6632 goto err_dead; 6633 6634 ret = vio_register_driver(&ibmvnic_driver); 6635 if (ret) 6636 goto err_vio_register; 6637 6638 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 6639 IBMVNIC_DRIVER_VERSION); 6640 6641 return 0; 6642 err_vio_register: 6643 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD); 6644 err_dead: 6645 cpuhp_remove_multi_state(ibmvnic_online); 6646 out: 6647 return ret; 6648 } 6649 6650 static void __exit ibmvnic_module_exit(void) 6651 { 6652 vio_unregister_driver(&ibmvnic_driver); 6653 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD); 6654 cpuhp_remove_multi_state(ibmvnic_online); 6655 } 6656 6657 module_init(ibmvnic_module_init); 6658 module_exit(ibmvnic_module_exit); 6659