1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /* Messages are passed between the VNIC driver and the VNIC server using */ 17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 18 /* issue and receive commands that initiate communication with the server */ 19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 20 /* are used by the driver to notify the server that a packet is */ 21 /* ready for transmission or that a buffer has been added to receive a */ 22 /* packet. Subsequently, sCRQs are used by the server to notify the */ 23 /* driver that a packet transmission has been completed or that a packet */ 24 /* has been received and placed in a waiting buffer. */ 25 /* */ 26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 27 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 28 /* or receive has been completed, the VNIC driver is required to use */ 29 /* "long term mapping". This entails that large, continuous DMA mapped */ 30 /* buffers are allocated on driver initialization and these buffers are */ 31 /* then continuously reused to pass skbs to and from the VNIC server. */ 32 /* */ 33 /**************************************************************************/ 34 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/types.h> 38 #include <linux/errno.h> 39 #include <linux/completion.h> 40 #include <linux/ioport.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kernel.h> 43 #include <linux/netdevice.h> 44 #include <linux/etherdevice.h> 45 #include <linux/skbuff.h> 46 #include <linux/init.h> 47 #include <linux/delay.h> 48 #include <linux/mm.h> 49 #include <linux/ethtool.h> 50 #include <linux/proc_fs.h> 51 #include <linux/if_arp.h> 52 #include <linux/in.h> 53 #include <linux/ip.h> 54 #include <linux/ipv6.h> 55 #include <linux/irq.h> 56 #include <linux/irqdomain.h> 57 #include <linux/kthread.h> 58 #include <linux/seq_file.h> 59 #include <linux/interrupt.h> 60 #include <net/net_namespace.h> 61 #include <asm/hvcall.h> 62 #include <linux/atomic.h> 63 #include <asm/vio.h> 64 #include <asm/xive.h> 65 #include <asm/iommu.h> 66 #include <linux/uaccess.h> 67 #include <asm/firmware.h> 68 #include <linux/workqueue.h> 69 #include <linux/if_vlan.h> 70 #include <linux/utsname.h> 71 #include <linux/cpu.h> 72 73 #include "ibmvnic.h" 74 75 static const char ibmvnic_driver_name[] = "ibmvnic"; 76 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 77 78 MODULE_AUTHOR("Santiago Leon"); 79 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 80 MODULE_LICENSE("GPL"); 81 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 82 83 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 84 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 85 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 86 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 87 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 88 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 91 static int enable_scrq_irq(struct ibmvnic_adapter *, 92 struct ibmvnic_sub_crq_queue *); 93 static int disable_scrq_irq(struct ibmvnic_adapter *, 94 struct ibmvnic_sub_crq_queue *); 95 static int pending_scrq(struct ibmvnic_adapter *, 96 struct ibmvnic_sub_crq_queue *); 97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 98 struct ibmvnic_sub_crq_queue *); 99 static int ibmvnic_poll(struct napi_struct *napi, int data); 100 static void send_query_map(struct ibmvnic_adapter *adapter); 101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8); 102 static int send_request_unmap(struct ibmvnic_adapter *, u8); 103 static int send_login(struct ibmvnic_adapter *adapter); 104 static void send_query_cap(struct ibmvnic_adapter *adapter); 105 static int init_sub_crqs(struct ibmvnic_adapter *); 106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 107 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); 108 static void release_crq_queue(struct ibmvnic_adapter *); 109 static int __ibmvnic_set_mac(struct net_device *, u8 *); 110 static int init_crq_queue(struct ibmvnic_adapter *adapter); 111 static int send_query_phys_parms(struct ibmvnic_adapter *adapter); 112 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 113 struct ibmvnic_sub_crq_queue *tx_scrq); 114 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 115 struct ibmvnic_long_term_buff *ltb); 116 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); 117 118 struct ibmvnic_stat { 119 char name[ETH_GSTRING_LEN]; 120 int offset; 121 }; 122 123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 124 offsetof(struct ibmvnic_statistics, stat)) 125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) 126 127 static const struct ibmvnic_stat ibmvnic_stats[] = { 128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 138 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 150 }; 151 152 static int send_crq_init_complete(struct ibmvnic_adapter *adapter) 153 { 154 union ibmvnic_crq crq; 155 156 memset(&crq, 0, sizeof(crq)); 157 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 158 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; 159 160 return ibmvnic_send_crq(adapter, &crq); 161 } 162 163 static int send_version_xchg(struct ibmvnic_adapter *adapter) 164 { 165 union ibmvnic_crq crq; 166 167 memset(&crq, 0, sizeof(crq)); 168 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 169 crq.version_exchange.cmd = VERSION_EXCHANGE; 170 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 171 172 return ibmvnic_send_crq(adapter, &crq); 173 } 174 175 static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter, 176 struct ibmvnic_sub_crq_queue *queue) 177 { 178 if (!(queue && queue->irq)) 179 return; 180 181 cpumask_clear(queue->affinity_mask); 182 183 if (irq_set_affinity_and_hint(queue->irq, NULL)) 184 netdev_warn(adapter->netdev, 185 "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n", 186 __func__, queue, queue->irq); 187 } 188 189 static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter) 190 { 191 struct ibmvnic_sub_crq_queue **rxqs; 192 struct ibmvnic_sub_crq_queue **txqs; 193 int num_rxqs, num_txqs; 194 int rc, i; 195 196 rc = 0; 197 rxqs = adapter->rx_scrq; 198 txqs = adapter->tx_scrq; 199 num_txqs = adapter->num_active_tx_scrqs; 200 num_rxqs = adapter->num_active_rx_scrqs; 201 202 netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__); 203 if (txqs) { 204 for (i = 0; i < num_txqs; i++) 205 ibmvnic_clean_queue_affinity(adapter, txqs[i]); 206 } 207 if (rxqs) { 208 for (i = 0; i < num_rxqs; i++) 209 ibmvnic_clean_queue_affinity(adapter, rxqs[i]); 210 } 211 } 212 213 static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue, 214 unsigned int *cpu, int *stragglers, 215 int stride) 216 { 217 cpumask_var_t mask; 218 int i; 219 int rc = 0; 220 221 if (!(queue && queue->irq)) 222 return rc; 223 224 /* cpumask_var_t is either a pointer or array, allocation works here */ 225 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 226 return -ENOMEM; 227 228 /* while we have extra cpu give one extra to this irq */ 229 if (*stragglers) { 230 stride++; 231 (*stragglers)--; 232 } 233 /* atomic write is safer than writing bit by bit directly */ 234 for (i = 0; i < stride; i++) { 235 cpumask_set_cpu(*cpu, mask); 236 *cpu = cpumask_next_wrap(*cpu, cpu_online_mask, 237 nr_cpu_ids, false); 238 } 239 /* set queue affinity mask */ 240 cpumask_copy(queue->affinity_mask, mask); 241 rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask); 242 free_cpumask_var(mask); 243 244 return rc; 245 } 246 247 /* assumes cpu read lock is held */ 248 static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter) 249 { 250 struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq; 251 struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq; 252 struct ibmvnic_sub_crq_queue *queue; 253 int num_rxqs = adapter->num_active_rx_scrqs; 254 int num_txqs = adapter->num_active_tx_scrqs; 255 int total_queues, stride, stragglers, i; 256 unsigned int num_cpu, cpu; 257 int rc = 0; 258 259 netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__); 260 if (!(adapter->rx_scrq && adapter->tx_scrq)) { 261 netdev_warn(adapter->netdev, 262 "%s: Set affinity failed, queues not allocated\n", 263 __func__); 264 return; 265 } 266 267 total_queues = num_rxqs + num_txqs; 268 num_cpu = num_online_cpus(); 269 /* number of cpu's assigned per irq */ 270 stride = max_t(int, num_cpu / total_queues, 1); 271 /* number of leftover cpu's */ 272 stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0; 273 /* next available cpu to assign irq to */ 274 cpu = cpumask_next(-1, cpu_online_mask); 275 276 for (i = 0; i < num_txqs; i++) { 277 queue = txqs[i]; 278 rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers, 279 stride); 280 if (rc) 281 goto out; 282 } 283 284 for (i = 0; i < num_rxqs; i++) { 285 queue = rxqs[i]; 286 rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers, 287 stride); 288 if (rc) 289 goto out; 290 } 291 292 out: 293 if (rc) { 294 netdev_warn(adapter->netdev, 295 "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n", 296 __func__, queue, queue->irq, rc); 297 ibmvnic_clean_affinity(adapter); 298 } 299 } 300 301 static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node) 302 { 303 struct ibmvnic_adapter *adapter; 304 305 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node); 306 ibmvnic_set_affinity(adapter); 307 return 0; 308 } 309 310 static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node) 311 { 312 struct ibmvnic_adapter *adapter; 313 314 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead); 315 ibmvnic_set_affinity(adapter); 316 return 0; 317 } 318 319 static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 320 { 321 struct ibmvnic_adapter *adapter; 322 323 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node); 324 ibmvnic_clean_affinity(adapter); 325 return 0; 326 } 327 328 static enum cpuhp_state ibmvnic_online; 329 330 static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter) 331 { 332 int ret; 333 334 ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node); 335 if (ret) 336 return ret; 337 ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD, 338 &adapter->node_dead); 339 if (!ret) 340 return ret; 341 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node); 342 return ret; 343 } 344 345 static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter) 346 { 347 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node); 348 cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD, 349 &adapter->node_dead); 350 } 351 352 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 353 unsigned long length, unsigned long *number, 354 unsigned long *irq) 355 { 356 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 357 long rc; 358 359 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 360 *number = retbuf[0]; 361 *irq = retbuf[1]; 362 363 return rc; 364 } 365 366 /** 367 * ibmvnic_wait_for_completion - Check device state and wait for completion 368 * @adapter: private device data 369 * @comp_done: completion structure to wait for 370 * @timeout: time to wait in milliseconds 371 * 372 * Wait for a completion signal or until the timeout limit is reached 373 * while checking that the device is still active. 374 */ 375 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, 376 struct completion *comp_done, 377 unsigned long timeout) 378 { 379 struct net_device *netdev; 380 unsigned long div_timeout; 381 u8 retry; 382 383 netdev = adapter->netdev; 384 retry = 5; 385 div_timeout = msecs_to_jiffies(timeout / retry); 386 while (true) { 387 if (!adapter->crq.active) { 388 netdev_err(netdev, "Device down!\n"); 389 return -ENODEV; 390 } 391 if (!retry--) 392 break; 393 if (wait_for_completion_timeout(comp_done, div_timeout)) 394 return 0; 395 } 396 netdev_err(netdev, "Operation timed out.\n"); 397 return -ETIMEDOUT; 398 } 399 400 /** 401 * reuse_ltb() - Check if a long term buffer can be reused 402 * @ltb: The long term buffer to be checked 403 * @size: The size of the long term buffer. 404 * 405 * An LTB can be reused unless its size has changed. 406 * 407 * Return: Return true if the LTB can be reused, false otherwise. 408 */ 409 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) 410 { 411 return (ltb->buff && ltb->size == size); 412 } 413 414 /** 415 * alloc_long_term_buff() - Allocate a long term buffer (LTB) 416 * 417 * @adapter: ibmvnic adapter associated to the LTB 418 * @ltb: container object for the LTB 419 * @size: size of the LTB 420 * 421 * Allocate an LTB of the specified size and notify VIOS. 422 * 423 * If the given @ltb already has the correct size, reuse it. Otherwise if 424 * its non-NULL, free it. Then allocate a new one of the correct size. 425 * Notify the VIOS either way since we may now be working with a new VIOS. 426 * 427 * Allocating larger chunks of memory during resets, specially LPM or under 428 * low memory situations can cause resets to fail/timeout and for LPAR to 429 * lose connectivity. So hold onto the LTB even if we fail to communicate 430 * with the VIOS and reuse it on next open. Free LTB when adapter is closed. 431 * 432 * Return: 0 if we were able to allocate the LTB and notify the VIOS and 433 * a negative value otherwise. 434 */ 435 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 436 struct ibmvnic_long_term_buff *ltb, int size) 437 { 438 struct device *dev = &adapter->vdev->dev; 439 u64 prev = 0; 440 int rc; 441 442 if (!reuse_ltb(ltb, size)) { 443 dev_dbg(dev, 444 "LTB size changed from 0x%llx to 0x%x, reallocating\n", 445 ltb->size, size); 446 prev = ltb->size; 447 free_long_term_buff(adapter, ltb); 448 } 449 450 if (ltb->buff) { 451 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", 452 ltb->map_id, ltb->size); 453 } else { 454 ltb->buff = dma_alloc_coherent(dev, size, <b->addr, 455 GFP_KERNEL); 456 if (!ltb->buff) { 457 dev_err(dev, "Couldn't alloc long term buffer\n"); 458 return -ENOMEM; 459 } 460 ltb->size = size; 461 462 ltb->map_id = find_first_zero_bit(adapter->map_ids, 463 MAX_MAP_ID); 464 bitmap_set(adapter->map_ids, ltb->map_id, 1); 465 466 dev_dbg(dev, 467 "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n", 468 ltb->map_id, ltb->size, prev); 469 } 470 471 /* Ensure ltb is zeroed - specially when reusing it. */ 472 memset(ltb->buff, 0, ltb->size); 473 474 mutex_lock(&adapter->fw_lock); 475 adapter->fw_done_rc = 0; 476 reinit_completion(&adapter->fw_done); 477 478 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 479 if (rc) { 480 dev_err(dev, "send_request_map failed, rc = %d\n", rc); 481 goto out; 482 } 483 484 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 485 if (rc) { 486 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", 487 rc); 488 goto out; 489 } 490 491 if (adapter->fw_done_rc) { 492 dev_err(dev, "Couldn't map LTB, rc = %d\n", 493 adapter->fw_done_rc); 494 rc = -EIO; 495 goto out; 496 } 497 rc = 0; 498 out: 499 /* don't free LTB on communication error - see function header */ 500 mutex_unlock(&adapter->fw_lock); 501 return rc; 502 } 503 504 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 505 struct ibmvnic_long_term_buff *ltb) 506 { 507 struct device *dev = &adapter->vdev->dev; 508 509 if (!ltb->buff) 510 return; 511 512 /* VIOS automatically unmaps the long term buffer at remote 513 * end for the following resets: 514 * FAILOVER, MOBILITY, TIMEOUT. 515 */ 516 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 517 adapter->reset_reason != VNIC_RESET_MOBILITY && 518 adapter->reset_reason != VNIC_RESET_TIMEOUT) 519 send_request_unmap(adapter, ltb->map_id); 520 521 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 522 523 ltb->buff = NULL; 524 /* mark this map_id free */ 525 bitmap_clear(adapter->map_ids, ltb->map_id, 1); 526 ltb->map_id = 0; 527 } 528 529 /** 530 * free_ltb_set - free the given set of long term buffers (LTBS) 531 * @adapter: The ibmvnic adapter containing this ltb set 532 * @ltb_set: The ltb_set to be freed 533 * 534 * Free the set of LTBs in the given set. 535 */ 536 537 static void free_ltb_set(struct ibmvnic_adapter *adapter, 538 struct ibmvnic_ltb_set *ltb_set) 539 { 540 int i; 541 542 for (i = 0; i < ltb_set->num_ltbs; i++) 543 free_long_term_buff(adapter, <b_set->ltbs[i]); 544 545 kfree(ltb_set->ltbs); 546 ltb_set->ltbs = NULL; 547 ltb_set->num_ltbs = 0; 548 } 549 550 /** 551 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs) 552 * 553 * @adapter: ibmvnic adapter associated to the LTB 554 * @ltb_set: container object for the set of LTBs 555 * @num_buffs: Number of buffers in the LTB 556 * @buff_size: Size of each buffer in the LTB 557 * 558 * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size 559 * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the 560 * new set of LTBs have fewer LTBs than the old set, free the excess LTBs. 561 * If new set needs more than in old set, allocate the remaining ones. 562 * Try and reuse as many LTBs as possible and avoid reallocation. 563 * 564 * Any changes to this allocation strategy must be reflected in 565 * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb(). 566 */ 567 static int alloc_ltb_set(struct ibmvnic_adapter *adapter, 568 struct ibmvnic_ltb_set *ltb_set, int num_buffs, 569 int buff_size) 570 { 571 struct device *dev = &adapter->vdev->dev; 572 struct ibmvnic_ltb_set old_set; 573 struct ibmvnic_ltb_set new_set; 574 int rem_size; 575 int tot_size; /* size of all ltbs */ 576 int ltb_size; /* size of one ltb */ 577 int nltbs; 578 int rc; 579 int n; 580 int i; 581 582 dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs, 583 buff_size); 584 585 ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size); 586 tot_size = num_buffs * buff_size; 587 588 if (ltb_size > tot_size) 589 ltb_size = tot_size; 590 591 nltbs = tot_size / ltb_size; 592 if (tot_size % ltb_size) 593 nltbs++; 594 595 old_set = *ltb_set; 596 597 if (old_set.num_ltbs == nltbs) { 598 new_set = old_set; 599 } else { 600 int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff); 601 602 new_set.ltbs = kzalloc(tmp, GFP_KERNEL); 603 if (!new_set.ltbs) 604 return -ENOMEM; 605 606 new_set.num_ltbs = nltbs; 607 608 /* Free any excess ltbs in old set */ 609 for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++) 610 free_long_term_buff(adapter, &old_set.ltbs[i]); 611 612 /* Copy remaining ltbs to new set. All LTBs except the 613 * last one are of the same size. alloc_long_term_buff() 614 * will realloc if the size changes. 615 */ 616 n = min(old_set.num_ltbs, new_set.num_ltbs); 617 for (i = 0; i < n; i++) 618 new_set.ltbs[i] = old_set.ltbs[i]; 619 620 /* Any additional ltbs in new set will have NULL ltbs for 621 * now and will be allocated in alloc_long_term_buff(). 622 */ 623 624 /* We no longer need the old_set so free it. Note that we 625 * may have reused some ltbs from old set and freed excess 626 * ltbs above. So we only need to free the container now 627 * not the LTBs themselves. (i.e. dont free_ltb_set()!) 628 */ 629 kfree(old_set.ltbs); 630 old_set.ltbs = NULL; 631 old_set.num_ltbs = 0; 632 633 /* Install the new set. If allocations fail below, we will 634 * retry later and know what size LTBs we need. 635 */ 636 *ltb_set = new_set; 637 } 638 639 i = 0; 640 rem_size = tot_size; 641 while (rem_size) { 642 if (ltb_size > rem_size) 643 ltb_size = rem_size; 644 645 rem_size -= ltb_size; 646 647 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size); 648 if (rc) 649 goto out; 650 i++; 651 } 652 653 WARN_ON(i != new_set.num_ltbs); 654 655 return 0; 656 out: 657 /* We may have allocated one/more LTBs before failing and we 658 * want to try and reuse on next reset. So don't free ltb set. 659 */ 660 return rc; 661 } 662 663 /** 664 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB. 665 * @rxpool: The receive buffer pool containing buffer 666 * @bufidx: Index of buffer in rxpool 667 * @ltbp: (Output) pointer to the long term buffer containing the buffer 668 * @offset: (Output) offset of buffer in the LTB from @ltbp 669 * 670 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the 671 * pool and its corresponding offset. Assume for now that each LTB is of 672 * different size but could possibly be optimized based on the allocation 673 * strategy in alloc_ltb_set(). 674 */ 675 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, 676 unsigned int bufidx, 677 struct ibmvnic_long_term_buff **ltbp, 678 unsigned int *offset) 679 { 680 struct ibmvnic_long_term_buff *ltb; 681 int nbufs; /* # of buffers in one ltb */ 682 int i; 683 684 WARN_ON(bufidx >= rxpool->size); 685 686 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) { 687 ltb = &rxpool->ltb_set.ltbs[i]; 688 nbufs = ltb->size / rxpool->buff_size; 689 if (bufidx < nbufs) 690 break; 691 bufidx -= nbufs; 692 } 693 694 *ltbp = ltb; 695 *offset = bufidx * rxpool->buff_size; 696 } 697 698 /** 699 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB. 700 * @txpool: The transmit buffer pool containing buffer 701 * @bufidx: Index of buffer in txpool 702 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer 703 * @offset: (Output) offset of buffer in the LTB from @ltbp 704 * 705 * Map the given buffer identified by [txpool, bufidx] to an LTB in the 706 * pool and its corresponding offset. 707 */ 708 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool, 709 unsigned int bufidx, 710 struct ibmvnic_long_term_buff **ltbp, 711 unsigned int *offset) 712 { 713 struct ibmvnic_long_term_buff *ltb; 714 int nbufs; /* # of buffers in one ltb */ 715 int i; 716 717 WARN_ON_ONCE(bufidx >= txpool->num_buffers); 718 719 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) { 720 ltb = &txpool->ltb_set.ltbs[i]; 721 nbufs = ltb->size / txpool->buf_size; 722 if (bufidx < nbufs) 723 break; 724 bufidx -= nbufs; 725 } 726 727 *ltbp = ltb; 728 *offset = bufidx * txpool->buf_size; 729 } 730 731 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 732 { 733 int i; 734 735 for (i = 0; i < adapter->num_active_rx_pools; i++) 736 adapter->rx_pool[i].active = 0; 737 } 738 739 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 740 struct ibmvnic_rx_pool *pool) 741 { 742 int count = pool->size - atomic_read(&pool->available); 743 u64 handle = adapter->rx_scrq[pool->index]->handle; 744 struct device *dev = &adapter->vdev->dev; 745 struct ibmvnic_ind_xmit_queue *ind_bufp; 746 struct ibmvnic_sub_crq_queue *rx_scrq; 747 struct ibmvnic_long_term_buff *ltb; 748 union sub_crq *sub_crq; 749 int buffers_added = 0; 750 unsigned long lpar_rc; 751 struct sk_buff *skb; 752 unsigned int offset; 753 dma_addr_t dma_addr; 754 unsigned char *dst; 755 int shift = 0; 756 int bufidx; 757 int i; 758 759 if (!pool->active) 760 return; 761 762 rx_scrq = adapter->rx_scrq[pool->index]; 763 ind_bufp = &rx_scrq->ind_buf; 764 765 /* netdev_skb_alloc() could have failed after we saved a few skbs 766 * in the indir_buf and we would not have sent them to VIOS yet. 767 * To account for them, start the loop at ind_bufp->index rather 768 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will 769 * be 0. 770 */ 771 for (i = ind_bufp->index; i < count; ++i) { 772 bufidx = pool->free_map[pool->next_free]; 773 774 /* We maybe reusing the skb from earlier resets. Allocate 775 * only if necessary. But since the LTB may have changed 776 * during reset (see init_rx_pools()), update LTB below 777 * even if reusing skb. 778 */ 779 skb = pool->rx_buff[bufidx].skb; 780 if (!skb) { 781 skb = netdev_alloc_skb(adapter->netdev, 782 pool->buff_size); 783 if (!skb) { 784 dev_err(dev, "Couldn't replenish rx buff\n"); 785 adapter->replenish_no_mem++; 786 break; 787 } 788 } 789 790 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 791 pool->next_free = (pool->next_free + 1) % pool->size; 792 793 /* Copy the skb to the long term mapped DMA buffer */ 794 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset); 795 dst = ltb->buff + offset; 796 memset(dst, 0, pool->buff_size); 797 dma_addr = ltb->addr + offset; 798 799 /* add the skb to an rx_buff in the pool */ 800 pool->rx_buff[bufidx].data = dst; 801 pool->rx_buff[bufidx].dma = dma_addr; 802 pool->rx_buff[bufidx].skb = skb; 803 pool->rx_buff[bufidx].pool_index = pool->index; 804 pool->rx_buff[bufidx].size = pool->buff_size; 805 806 /* queue the rx_buff for the next send_subcrq_indirect */ 807 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; 808 memset(sub_crq, 0, sizeof(*sub_crq)); 809 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; 810 sub_crq->rx_add.correlator = 811 cpu_to_be64((u64)&pool->rx_buff[bufidx]); 812 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); 813 sub_crq->rx_add.map_id = ltb->map_id; 814 815 /* The length field of the sCRQ is defined to be 24 bits so the 816 * buffer size needs to be left shifted by a byte before it is 817 * converted to big endian to prevent the last byte from being 818 * truncated. 819 */ 820 #ifdef __LITTLE_ENDIAN__ 821 shift = 8; 822 #endif 823 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); 824 825 /* if send_subcrq_indirect queue is full, flush to VIOS */ 826 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || 827 i == count - 1) { 828 lpar_rc = 829 send_subcrq_indirect(adapter, handle, 830 (u64)ind_bufp->indir_dma, 831 (u64)ind_bufp->index); 832 if (lpar_rc != H_SUCCESS) 833 goto failure; 834 buffers_added += ind_bufp->index; 835 adapter->replenish_add_buff_success += ind_bufp->index; 836 ind_bufp->index = 0; 837 } 838 } 839 atomic_add(buffers_added, &pool->available); 840 return; 841 842 failure: 843 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) 844 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); 845 for (i = ind_bufp->index - 1; i >= 0; --i) { 846 struct ibmvnic_rx_buff *rx_buff; 847 848 pool->next_free = pool->next_free == 0 ? 849 pool->size - 1 : pool->next_free - 1; 850 sub_crq = &ind_bufp->indir_arr[i]; 851 rx_buff = (struct ibmvnic_rx_buff *) 852 be64_to_cpu(sub_crq->rx_add.correlator); 853 bufidx = (int)(rx_buff - pool->rx_buff); 854 pool->free_map[pool->next_free] = bufidx; 855 dev_kfree_skb_any(pool->rx_buff[bufidx].skb); 856 pool->rx_buff[bufidx].skb = NULL; 857 } 858 adapter->replenish_add_buff_failure += ind_bufp->index; 859 atomic_add(buffers_added, &pool->available); 860 ind_bufp->index = 0; 861 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 862 /* Disable buffer pool replenishment and report carrier off if 863 * queue is closed or pending failover. 864 * Firmware guarantees that a signal will be sent to the 865 * driver, triggering a reset. 866 */ 867 deactivate_rx_pools(adapter); 868 netif_carrier_off(adapter->netdev); 869 } 870 } 871 872 static void replenish_pools(struct ibmvnic_adapter *adapter) 873 { 874 int i; 875 876 adapter->replenish_task_cycles++; 877 for (i = 0; i < adapter->num_active_rx_pools; i++) { 878 if (adapter->rx_pool[i].active) 879 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 880 } 881 882 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); 883 } 884 885 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 886 { 887 kfree(adapter->tx_stats_buffers); 888 kfree(adapter->rx_stats_buffers); 889 adapter->tx_stats_buffers = NULL; 890 adapter->rx_stats_buffers = NULL; 891 } 892 893 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 894 { 895 adapter->tx_stats_buffers = 896 kcalloc(IBMVNIC_MAX_QUEUES, 897 sizeof(struct ibmvnic_tx_queue_stats), 898 GFP_KERNEL); 899 if (!adapter->tx_stats_buffers) 900 return -ENOMEM; 901 902 adapter->rx_stats_buffers = 903 kcalloc(IBMVNIC_MAX_QUEUES, 904 sizeof(struct ibmvnic_rx_queue_stats), 905 GFP_KERNEL); 906 if (!adapter->rx_stats_buffers) 907 return -ENOMEM; 908 909 return 0; 910 } 911 912 static void release_stats_token(struct ibmvnic_adapter *adapter) 913 { 914 struct device *dev = &adapter->vdev->dev; 915 916 if (!adapter->stats_token) 917 return; 918 919 dma_unmap_single(dev, adapter->stats_token, 920 sizeof(struct ibmvnic_statistics), 921 DMA_FROM_DEVICE); 922 adapter->stats_token = 0; 923 } 924 925 static int init_stats_token(struct ibmvnic_adapter *adapter) 926 { 927 struct device *dev = &adapter->vdev->dev; 928 dma_addr_t stok; 929 int rc; 930 931 stok = dma_map_single(dev, &adapter->stats, 932 sizeof(struct ibmvnic_statistics), 933 DMA_FROM_DEVICE); 934 rc = dma_mapping_error(dev, stok); 935 if (rc) { 936 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc); 937 return rc; 938 } 939 940 adapter->stats_token = stok; 941 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 942 return 0; 943 } 944 945 /** 946 * release_rx_pools() - Release any rx pools attached to @adapter. 947 * @adapter: ibmvnic adapter 948 * 949 * Safe to call this multiple times - even if no pools are attached. 950 */ 951 static void release_rx_pools(struct ibmvnic_adapter *adapter) 952 { 953 struct ibmvnic_rx_pool *rx_pool; 954 int i, j; 955 956 if (!adapter->rx_pool) 957 return; 958 959 for (i = 0; i < adapter->num_active_rx_pools; i++) { 960 rx_pool = &adapter->rx_pool[i]; 961 962 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 963 964 kfree(rx_pool->free_map); 965 966 free_ltb_set(adapter, &rx_pool->ltb_set); 967 968 if (!rx_pool->rx_buff) 969 continue; 970 971 for (j = 0; j < rx_pool->size; j++) { 972 if (rx_pool->rx_buff[j].skb) { 973 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); 974 rx_pool->rx_buff[j].skb = NULL; 975 } 976 } 977 978 kfree(rx_pool->rx_buff); 979 } 980 981 kfree(adapter->rx_pool); 982 adapter->rx_pool = NULL; 983 adapter->num_active_rx_pools = 0; 984 adapter->prev_rx_pool_size = 0; 985 } 986 987 /** 988 * reuse_rx_pools() - Check if the existing rx pools can be reused. 989 * @adapter: ibmvnic adapter 990 * 991 * Check if the existing rx pools in the adapter can be reused. The 992 * pools can be reused if the pool parameters (number of pools, 993 * number of buffers in the pool and size of each buffer) have not 994 * changed. 995 * 996 * NOTE: This assumes that all pools have the same number of buffers 997 * which is the case currently. If that changes, we must fix this. 998 * 999 * Return: true if the rx pools can be reused, false otherwise. 1000 */ 1001 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) 1002 { 1003 u64 old_num_pools, new_num_pools; 1004 u64 old_pool_size, new_pool_size; 1005 u64 old_buff_size, new_buff_size; 1006 1007 if (!adapter->rx_pool) 1008 return false; 1009 1010 old_num_pools = adapter->num_active_rx_pools; 1011 new_num_pools = adapter->req_rx_queues; 1012 1013 old_pool_size = adapter->prev_rx_pool_size; 1014 new_pool_size = adapter->req_rx_add_entries_per_subcrq; 1015 1016 old_buff_size = adapter->prev_rx_buf_sz; 1017 new_buff_size = adapter->cur_rx_buf_sz; 1018 1019 if (old_buff_size != new_buff_size || 1020 old_num_pools != new_num_pools || 1021 old_pool_size != new_pool_size) 1022 return false; 1023 1024 return true; 1025 } 1026 1027 /** 1028 * init_rx_pools(): Initialize the set of receiver pools in the adapter. 1029 * @netdev: net device associated with the vnic interface 1030 * 1031 * Initialize the set of receiver pools in the ibmvnic adapter associated 1032 * with the net_device @netdev. If possible, reuse the existing rx pools. 1033 * Otherwise free any existing pools and allocate a new set of pools 1034 * before initializing them. 1035 * 1036 * Return: 0 on success and negative value on error. 1037 */ 1038 static int init_rx_pools(struct net_device *netdev) 1039 { 1040 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1041 struct device *dev = &adapter->vdev->dev; 1042 struct ibmvnic_rx_pool *rx_pool; 1043 u64 num_pools; 1044 u64 pool_size; /* # of buffers in one pool */ 1045 u64 buff_size; 1046 int i, j, rc; 1047 1048 pool_size = adapter->req_rx_add_entries_per_subcrq; 1049 num_pools = adapter->req_rx_queues; 1050 buff_size = adapter->cur_rx_buf_sz; 1051 1052 if (reuse_rx_pools(adapter)) { 1053 dev_dbg(dev, "Reusing rx pools\n"); 1054 goto update_ltb; 1055 } 1056 1057 /* Allocate/populate the pools. */ 1058 release_rx_pools(adapter); 1059 1060 adapter->rx_pool = kcalloc(num_pools, 1061 sizeof(struct ibmvnic_rx_pool), 1062 GFP_KERNEL); 1063 if (!adapter->rx_pool) { 1064 dev_err(dev, "Failed to allocate rx pools\n"); 1065 return -ENOMEM; 1066 } 1067 1068 /* Set num_active_rx_pools early. If we fail below after partial 1069 * allocation, release_rx_pools() will know how many to look for. 1070 */ 1071 adapter->num_active_rx_pools = num_pools; 1072 1073 for (i = 0; i < num_pools; i++) { 1074 rx_pool = &adapter->rx_pool[i]; 1075 1076 netdev_dbg(adapter->netdev, 1077 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 1078 i, pool_size, buff_size); 1079 1080 rx_pool->size = pool_size; 1081 rx_pool->index = i; 1082 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 1083 1084 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 1085 GFP_KERNEL); 1086 if (!rx_pool->free_map) { 1087 dev_err(dev, "Couldn't alloc free_map %d\n", i); 1088 rc = -ENOMEM; 1089 goto out_release; 1090 } 1091 1092 rx_pool->rx_buff = kcalloc(rx_pool->size, 1093 sizeof(struct ibmvnic_rx_buff), 1094 GFP_KERNEL); 1095 if (!rx_pool->rx_buff) { 1096 dev_err(dev, "Couldn't alloc rx buffers\n"); 1097 rc = -ENOMEM; 1098 goto out_release; 1099 } 1100 } 1101 1102 adapter->prev_rx_pool_size = pool_size; 1103 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; 1104 1105 update_ltb: 1106 for (i = 0; i < num_pools; i++) { 1107 rx_pool = &adapter->rx_pool[i]; 1108 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", 1109 i, rx_pool->size, rx_pool->buff_size); 1110 1111 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set, 1112 rx_pool->size, rx_pool->buff_size); 1113 if (rc) 1114 goto out; 1115 1116 for (j = 0; j < rx_pool->size; ++j) { 1117 struct ibmvnic_rx_buff *rx_buff; 1118 1119 rx_pool->free_map[j] = j; 1120 1121 /* NOTE: Don't clear rx_buff->skb here - will leak 1122 * memory! replenish_rx_pool() will reuse skbs or 1123 * allocate as necessary. 1124 */ 1125 rx_buff = &rx_pool->rx_buff[j]; 1126 rx_buff->dma = 0; 1127 rx_buff->data = 0; 1128 rx_buff->size = 0; 1129 rx_buff->pool_index = 0; 1130 } 1131 1132 /* Mark pool "empty" so replenish_rx_pools() will 1133 * update the LTB info for each buffer 1134 */ 1135 atomic_set(&rx_pool->available, 0); 1136 rx_pool->next_alloc = 0; 1137 rx_pool->next_free = 0; 1138 /* replenish_rx_pool() may have called deactivate_rx_pools() 1139 * on failover. Ensure pool is active now. 1140 */ 1141 rx_pool->active = 1; 1142 } 1143 return 0; 1144 out_release: 1145 release_rx_pools(adapter); 1146 out: 1147 /* We failed to allocate one or more LTBs or map them on the VIOS. 1148 * Hold onto the pools and any LTBs that we did allocate/map. 1149 */ 1150 return rc; 1151 } 1152 1153 static void release_vpd_data(struct ibmvnic_adapter *adapter) 1154 { 1155 if (!adapter->vpd) 1156 return; 1157 1158 kfree(adapter->vpd->buff); 1159 kfree(adapter->vpd); 1160 1161 adapter->vpd = NULL; 1162 } 1163 1164 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 1165 struct ibmvnic_tx_pool *tx_pool) 1166 { 1167 kfree(tx_pool->tx_buff); 1168 kfree(tx_pool->free_map); 1169 free_ltb_set(adapter, &tx_pool->ltb_set); 1170 } 1171 1172 /** 1173 * release_tx_pools() - Release any tx pools attached to @adapter. 1174 * @adapter: ibmvnic adapter 1175 * 1176 * Safe to call this multiple times - even if no pools are attached. 1177 */ 1178 static void release_tx_pools(struct ibmvnic_adapter *adapter) 1179 { 1180 int i; 1181 1182 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are 1183 * both NULL or both non-NULL. So we only need to check one. 1184 */ 1185 if (!adapter->tx_pool) 1186 return; 1187 1188 for (i = 0; i < adapter->num_active_tx_pools; i++) { 1189 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 1190 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 1191 } 1192 1193 kfree(adapter->tx_pool); 1194 adapter->tx_pool = NULL; 1195 kfree(adapter->tso_pool); 1196 adapter->tso_pool = NULL; 1197 adapter->num_active_tx_pools = 0; 1198 adapter->prev_tx_pool_size = 0; 1199 } 1200 1201 static int init_one_tx_pool(struct net_device *netdev, 1202 struct ibmvnic_tx_pool *tx_pool, 1203 int pool_size, int buf_size) 1204 { 1205 int i; 1206 1207 tx_pool->tx_buff = kcalloc(pool_size, 1208 sizeof(struct ibmvnic_tx_buff), 1209 GFP_KERNEL); 1210 if (!tx_pool->tx_buff) 1211 return -ENOMEM; 1212 1213 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); 1214 if (!tx_pool->free_map) { 1215 kfree(tx_pool->tx_buff); 1216 tx_pool->tx_buff = NULL; 1217 return -ENOMEM; 1218 } 1219 1220 for (i = 0; i < pool_size; i++) 1221 tx_pool->free_map[i] = i; 1222 1223 tx_pool->consumer_index = 0; 1224 tx_pool->producer_index = 0; 1225 tx_pool->num_buffers = pool_size; 1226 tx_pool->buf_size = buf_size; 1227 1228 return 0; 1229 } 1230 1231 /** 1232 * reuse_tx_pools() - Check if the existing tx pools can be reused. 1233 * @adapter: ibmvnic adapter 1234 * 1235 * Check if the existing tx pools in the adapter can be reused. The 1236 * pools can be reused if the pool parameters (number of pools, 1237 * number of buffers in the pool and mtu) have not changed. 1238 * 1239 * NOTE: This assumes that all pools have the same number of buffers 1240 * which is the case currently. If that changes, we must fix this. 1241 * 1242 * Return: true if the tx pools can be reused, false otherwise. 1243 */ 1244 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) 1245 { 1246 u64 old_num_pools, new_num_pools; 1247 u64 old_pool_size, new_pool_size; 1248 u64 old_mtu, new_mtu; 1249 1250 if (!adapter->tx_pool) 1251 return false; 1252 1253 old_num_pools = adapter->num_active_tx_pools; 1254 new_num_pools = adapter->num_active_tx_scrqs; 1255 old_pool_size = adapter->prev_tx_pool_size; 1256 new_pool_size = adapter->req_tx_entries_per_subcrq; 1257 old_mtu = adapter->prev_mtu; 1258 new_mtu = adapter->req_mtu; 1259 1260 if (old_mtu != new_mtu || 1261 old_num_pools != new_num_pools || 1262 old_pool_size != new_pool_size) 1263 return false; 1264 1265 return true; 1266 } 1267 1268 /** 1269 * init_tx_pools(): Initialize the set of transmit pools in the adapter. 1270 * @netdev: net device associated with the vnic interface 1271 * 1272 * Initialize the set of transmit pools in the ibmvnic adapter associated 1273 * with the net_device @netdev. If possible, reuse the existing tx pools. 1274 * Otherwise free any existing pools and allocate a new set of pools 1275 * before initializing them. 1276 * 1277 * Return: 0 on success and negative value on error. 1278 */ 1279 static int init_tx_pools(struct net_device *netdev) 1280 { 1281 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1282 struct device *dev = &adapter->vdev->dev; 1283 int num_pools; 1284 u64 pool_size; /* # of buffers in pool */ 1285 u64 buff_size; 1286 int i, j, rc; 1287 1288 num_pools = adapter->req_tx_queues; 1289 1290 /* We must notify the VIOS about the LTB on all resets - but we only 1291 * need to alloc/populate pools if either the number of buffers or 1292 * size of each buffer in the pool has changed. 1293 */ 1294 if (reuse_tx_pools(adapter)) { 1295 netdev_dbg(netdev, "Reusing tx pools\n"); 1296 goto update_ltb; 1297 } 1298 1299 /* Allocate/populate the pools. */ 1300 release_tx_pools(adapter); 1301 1302 pool_size = adapter->req_tx_entries_per_subcrq; 1303 num_pools = adapter->num_active_tx_scrqs; 1304 1305 adapter->tx_pool = kcalloc(num_pools, 1306 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 1307 if (!adapter->tx_pool) 1308 return -ENOMEM; 1309 1310 adapter->tso_pool = kcalloc(num_pools, 1311 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 1312 /* To simplify release_tx_pools() ensure that ->tx_pool and 1313 * ->tso_pool are either both NULL or both non-NULL. 1314 */ 1315 if (!adapter->tso_pool) { 1316 kfree(adapter->tx_pool); 1317 adapter->tx_pool = NULL; 1318 return -ENOMEM; 1319 } 1320 1321 /* Set num_active_tx_pools early. If we fail below after partial 1322 * allocation, release_tx_pools() will know how many to look for. 1323 */ 1324 adapter->num_active_tx_pools = num_pools; 1325 1326 buff_size = adapter->req_mtu + VLAN_HLEN; 1327 buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 1328 1329 for (i = 0; i < num_pools; i++) { 1330 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", 1331 i, adapter->req_tx_entries_per_subcrq, buff_size); 1332 1333 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 1334 pool_size, buff_size); 1335 if (rc) 1336 goto out_release; 1337 1338 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], 1339 IBMVNIC_TSO_BUFS, 1340 IBMVNIC_TSO_BUF_SZ); 1341 if (rc) 1342 goto out_release; 1343 } 1344 1345 adapter->prev_tx_pool_size = pool_size; 1346 adapter->prev_mtu = adapter->req_mtu; 1347 1348 update_ltb: 1349 /* NOTE: All tx_pools have the same number of buffers (which is 1350 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS 1351 * buffers (see calls init_one_tx_pool() for these). 1352 * For consistency, we use tx_pool->num_buffers and 1353 * tso_pool->num_buffers below. 1354 */ 1355 rc = -1; 1356 for (i = 0; i < num_pools; i++) { 1357 struct ibmvnic_tx_pool *tso_pool; 1358 struct ibmvnic_tx_pool *tx_pool; 1359 1360 tx_pool = &adapter->tx_pool[i]; 1361 1362 dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n", 1363 i, tx_pool->num_buffers, tx_pool->buf_size); 1364 1365 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set, 1366 tx_pool->num_buffers, tx_pool->buf_size); 1367 if (rc) 1368 goto out; 1369 1370 tx_pool->consumer_index = 0; 1371 tx_pool->producer_index = 0; 1372 1373 for (j = 0; j < tx_pool->num_buffers; j++) 1374 tx_pool->free_map[j] = j; 1375 1376 tso_pool = &adapter->tso_pool[i]; 1377 1378 dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n", 1379 i, tso_pool->num_buffers, tso_pool->buf_size); 1380 1381 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set, 1382 tso_pool->num_buffers, tso_pool->buf_size); 1383 if (rc) 1384 goto out; 1385 1386 tso_pool->consumer_index = 0; 1387 tso_pool->producer_index = 0; 1388 1389 for (j = 0; j < tso_pool->num_buffers; j++) 1390 tso_pool->free_map[j] = j; 1391 } 1392 1393 return 0; 1394 out_release: 1395 release_tx_pools(adapter); 1396 out: 1397 /* We failed to allocate one or more LTBs or map them on the VIOS. 1398 * Hold onto the pools and any LTBs that we did allocate/map. 1399 */ 1400 return rc; 1401 } 1402 1403 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 1404 { 1405 int i; 1406 1407 if (adapter->napi_enabled) 1408 return; 1409 1410 for (i = 0; i < adapter->req_rx_queues; i++) 1411 napi_enable(&adapter->napi[i]); 1412 1413 adapter->napi_enabled = true; 1414 } 1415 1416 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 1417 { 1418 int i; 1419 1420 if (!adapter->napi_enabled) 1421 return; 1422 1423 for (i = 0; i < adapter->req_rx_queues; i++) { 1424 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 1425 napi_disable(&adapter->napi[i]); 1426 } 1427 1428 adapter->napi_enabled = false; 1429 } 1430 1431 static int init_napi(struct ibmvnic_adapter *adapter) 1432 { 1433 int i; 1434 1435 adapter->napi = kcalloc(adapter->req_rx_queues, 1436 sizeof(struct napi_struct), GFP_KERNEL); 1437 if (!adapter->napi) 1438 return -ENOMEM; 1439 1440 for (i = 0; i < adapter->req_rx_queues; i++) { 1441 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 1442 netif_napi_add(adapter->netdev, &adapter->napi[i], 1443 ibmvnic_poll); 1444 } 1445 1446 adapter->num_active_rx_napi = adapter->req_rx_queues; 1447 return 0; 1448 } 1449 1450 static void release_napi(struct ibmvnic_adapter *adapter) 1451 { 1452 int i; 1453 1454 if (!adapter->napi) 1455 return; 1456 1457 for (i = 0; i < adapter->num_active_rx_napi; i++) { 1458 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); 1459 netif_napi_del(&adapter->napi[i]); 1460 } 1461 1462 kfree(adapter->napi); 1463 adapter->napi = NULL; 1464 adapter->num_active_rx_napi = 0; 1465 adapter->napi_enabled = false; 1466 } 1467 1468 static const char *adapter_state_to_string(enum vnic_state state) 1469 { 1470 switch (state) { 1471 case VNIC_PROBING: 1472 return "PROBING"; 1473 case VNIC_PROBED: 1474 return "PROBED"; 1475 case VNIC_OPENING: 1476 return "OPENING"; 1477 case VNIC_OPEN: 1478 return "OPEN"; 1479 case VNIC_CLOSING: 1480 return "CLOSING"; 1481 case VNIC_CLOSED: 1482 return "CLOSED"; 1483 case VNIC_REMOVING: 1484 return "REMOVING"; 1485 case VNIC_REMOVED: 1486 return "REMOVED"; 1487 case VNIC_DOWN: 1488 return "DOWN"; 1489 } 1490 return "UNKNOWN"; 1491 } 1492 1493 static int ibmvnic_login(struct net_device *netdev) 1494 { 1495 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1496 unsigned long timeout = msecs_to_jiffies(20000); 1497 int retry_count = 0; 1498 int retries = 10; 1499 bool retry; 1500 int rc; 1501 1502 do { 1503 retry = false; 1504 if (retry_count > retries) { 1505 netdev_warn(netdev, "Login attempts exceeded\n"); 1506 return -EACCES; 1507 } 1508 1509 adapter->init_done_rc = 0; 1510 reinit_completion(&adapter->init_done); 1511 rc = send_login(adapter); 1512 if (rc) 1513 return rc; 1514 1515 if (!wait_for_completion_timeout(&adapter->init_done, 1516 timeout)) { 1517 netdev_warn(netdev, "Login timed out, retrying...\n"); 1518 retry = true; 1519 adapter->init_done_rc = 0; 1520 retry_count++; 1521 continue; 1522 } 1523 1524 if (adapter->init_done_rc == ABORTED) { 1525 netdev_warn(netdev, "Login aborted, retrying...\n"); 1526 retry = true; 1527 adapter->init_done_rc = 0; 1528 retry_count++; 1529 /* FW or device may be busy, so 1530 * wait a bit before retrying login 1531 */ 1532 msleep(500); 1533 } else if (adapter->init_done_rc == PARTIALSUCCESS) { 1534 retry_count++; 1535 release_sub_crqs(adapter, 1); 1536 1537 retry = true; 1538 netdev_dbg(netdev, 1539 "Received partial success, retrying...\n"); 1540 adapter->init_done_rc = 0; 1541 reinit_completion(&adapter->init_done); 1542 send_query_cap(adapter); 1543 if (!wait_for_completion_timeout(&adapter->init_done, 1544 timeout)) { 1545 netdev_warn(netdev, 1546 "Capabilities query timed out\n"); 1547 return -ETIMEDOUT; 1548 } 1549 1550 rc = init_sub_crqs(adapter); 1551 if (rc) { 1552 netdev_warn(netdev, 1553 "SCRQ initialization failed\n"); 1554 return rc; 1555 } 1556 1557 rc = init_sub_crq_irqs(adapter); 1558 if (rc) { 1559 netdev_warn(netdev, 1560 "SCRQ irq initialization failed\n"); 1561 return rc; 1562 } 1563 } else if (adapter->init_done_rc) { 1564 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", 1565 adapter->init_done_rc); 1566 return -EIO; 1567 } 1568 } while (retry); 1569 1570 __ibmvnic_set_mac(netdev, adapter->mac_addr); 1571 1572 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); 1573 return 0; 1574 } 1575 1576 static void release_login_buffer(struct ibmvnic_adapter *adapter) 1577 { 1578 kfree(adapter->login_buf); 1579 adapter->login_buf = NULL; 1580 } 1581 1582 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 1583 { 1584 kfree(adapter->login_rsp_buf); 1585 adapter->login_rsp_buf = NULL; 1586 } 1587 1588 static void release_resources(struct ibmvnic_adapter *adapter) 1589 { 1590 release_vpd_data(adapter); 1591 1592 release_napi(adapter); 1593 release_login_buffer(adapter); 1594 release_login_rsp_buffer(adapter); 1595 } 1596 1597 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 1598 { 1599 struct net_device *netdev = adapter->netdev; 1600 unsigned long timeout = msecs_to_jiffies(20000); 1601 union ibmvnic_crq crq; 1602 bool resend; 1603 int rc; 1604 1605 netdev_dbg(netdev, "setting link state %d\n", link_state); 1606 1607 memset(&crq, 0, sizeof(crq)); 1608 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 1609 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 1610 crq.logical_link_state.link_state = link_state; 1611 1612 do { 1613 resend = false; 1614 1615 reinit_completion(&adapter->init_done); 1616 rc = ibmvnic_send_crq(adapter, &crq); 1617 if (rc) { 1618 netdev_err(netdev, "Failed to set link state\n"); 1619 return rc; 1620 } 1621 1622 if (!wait_for_completion_timeout(&adapter->init_done, 1623 timeout)) { 1624 netdev_err(netdev, "timeout setting link state\n"); 1625 return -ETIMEDOUT; 1626 } 1627 1628 if (adapter->init_done_rc == PARTIALSUCCESS) { 1629 /* Partuial success, delay and re-send */ 1630 mdelay(1000); 1631 resend = true; 1632 } else if (adapter->init_done_rc) { 1633 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 1634 adapter->init_done_rc); 1635 return adapter->init_done_rc; 1636 } 1637 } while (resend); 1638 1639 return 0; 1640 } 1641 1642 static int set_real_num_queues(struct net_device *netdev) 1643 { 1644 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1645 int rc; 1646 1647 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 1648 adapter->req_tx_queues, adapter->req_rx_queues); 1649 1650 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 1651 if (rc) { 1652 netdev_err(netdev, "failed to set the number of tx queues\n"); 1653 return rc; 1654 } 1655 1656 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 1657 if (rc) 1658 netdev_err(netdev, "failed to set the number of rx queues\n"); 1659 1660 return rc; 1661 } 1662 1663 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 1664 { 1665 struct device *dev = &adapter->vdev->dev; 1666 union ibmvnic_crq crq; 1667 int len = 0; 1668 int rc; 1669 1670 if (adapter->vpd->buff) 1671 len = adapter->vpd->len; 1672 1673 mutex_lock(&adapter->fw_lock); 1674 adapter->fw_done_rc = 0; 1675 reinit_completion(&adapter->fw_done); 1676 1677 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 1678 crq.get_vpd_size.cmd = GET_VPD_SIZE; 1679 rc = ibmvnic_send_crq(adapter, &crq); 1680 if (rc) { 1681 mutex_unlock(&adapter->fw_lock); 1682 return rc; 1683 } 1684 1685 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1686 if (rc) { 1687 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); 1688 mutex_unlock(&adapter->fw_lock); 1689 return rc; 1690 } 1691 mutex_unlock(&adapter->fw_lock); 1692 1693 if (!adapter->vpd->len) 1694 return -ENODATA; 1695 1696 if (!adapter->vpd->buff) 1697 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 1698 else if (adapter->vpd->len != len) 1699 adapter->vpd->buff = 1700 krealloc(adapter->vpd->buff, 1701 adapter->vpd->len, GFP_KERNEL); 1702 1703 if (!adapter->vpd->buff) { 1704 dev_err(dev, "Could allocate VPD buffer\n"); 1705 return -ENOMEM; 1706 } 1707 1708 adapter->vpd->dma_addr = 1709 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 1710 DMA_FROM_DEVICE); 1711 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 1712 dev_err(dev, "Could not map VPD buffer\n"); 1713 kfree(adapter->vpd->buff); 1714 adapter->vpd->buff = NULL; 1715 return -ENOMEM; 1716 } 1717 1718 mutex_lock(&adapter->fw_lock); 1719 adapter->fw_done_rc = 0; 1720 reinit_completion(&adapter->fw_done); 1721 1722 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 1723 crq.get_vpd.cmd = GET_VPD; 1724 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 1725 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1726 rc = ibmvnic_send_crq(adapter, &crq); 1727 if (rc) { 1728 kfree(adapter->vpd->buff); 1729 adapter->vpd->buff = NULL; 1730 mutex_unlock(&adapter->fw_lock); 1731 return rc; 1732 } 1733 1734 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1735 if (rc) { 1736 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); 1737 kfree(adapter->vpd->buff); 1738 adapter->vpd->buff = NULL; 1739 mutex_unlock(&adapter->fw_lock); 1740 return rc; 1741 } 1742 1743 mutex_unlock(&adapter->fw_lock); 1744 return 0; 1745 } 1746 1747 static int init_resources(struct ibmvnic_adapter *adapter) 1748 { 1749 struct net_device *netdev = adapter->netdev; 1750 int rc; 1751 1752 rc = set_real_num_queues(netdev); 1753 if (rc) 1754 return rc; 1755 1756 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1757 if (!adapter->vpd) 1758 return -ENOMEM; 1759 1760 /* Vital Product Data (VPD) */ 1761 rc = ibmvnic_get_vpd(adapter); 1762 if (rc) { 1763 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1764 return rc; 1765 } 1766 1767 rc = init_napi(adapter); 1768 if (rc) 1769 return rc; 1770 1771 send_query_map(adapter); 1772 1773 rc = init_rx_pools(netdev); 1774 if (rc) 1775 return rc; 1776 1777 rc = init_tx_pools(netdev); 1778 return rc; 1779 } 1780 1781 static int __ibmvnic_open(struct net_device *netdev) 1782 { 1783 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1784 enum vnic_state prev_state = adapter->state; 1785 int i, rc; 1786 1787 adapter->state = VNIC_OPENING; 1788 replenish_pools(adapter); 1789 ibmvnic_napi_enable(adapter); 1790 1791 /* We're ready to receive frames, enable the sub-crq interrupts and 1792 * set the logical link state to up 1793 */ 1794 for (i = 0; i < adapter->req_rx_queues; i++) { 1795 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1796 if (prev_state == VNIC_CLOSED) 1797 enable_irq(adapter->rx_scrq[i]->irq); 1798 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1799 } 1800 1801 for (i = 0; i < adapter->req_tx_queues; i++) { 1802 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1803 if (prev_state == VNIC_CLOSED) 1804 enable_irq(adapter->tx_scrq[i]->irq); 1805 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1806 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); 1807 } 1808 1809 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1810 if (rc) { 1811 ibmvnic_napi_disable(adapter); 1812 ibmvnic_disable_irqs(adapter); 1813 return rc; 1814 } 1815 1816 adapter->tx_queues_active = true; 1817 1818 /* Since queues were stopped until now, there shouldn't be any 1819 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we 1820 * don't need the synchronize_rcu()? Leaving it for consistency 1821 * with setting ->tx_queues_active = false. 1822 */ 1823 synchronize_rcu(); 1824 1825 netif_tx_start_all_queues(netdev); 1826 1827 if (prev_state == VNIC_CLOSED) { 1828 for (i = 0; i < adapter->req_rx_queues; i++) 1829 napi_schedule(&adapter->napi[i]); 1830 } 1831 1832 adapter->state = VNIC_OPEN; 1833 return rc; 1834 } 1835 1836 static int ibmvnic_open(struct net_device *netdev) 1837 { 1838 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1839 int rc; 1840 1841 ASSERT_RTNL(); 1842 1843 /* If device failover is pending or we are about to reset, just set 1844 * device state and return. Device operation will be handled by reset 1845 * routine. 1846 * 1847 * It should be safe to overwrite the adapter->state here. Since 1848 * we hold the rtnl, either the reset has not actually started or 1849 * the rtnl got dropped during the set_link_state() in do_reset(). 1850 * In the former case, no one else is changing the state (again we 1851 * have the rtnl) and in the latter case, do_reset() will detect and 1852 * honor our setting below. 1853 */ 1854 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { 1855 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n", 1856 adapter_state_to_string(adapter->state), 1857 adapter->failover_pending); 1858 adapter->state = VNIC_OPEN; 1859 rc = 0; 1860 goto out; 1861 } 1862 1863 if (adapter->state != VNIC_CLOSED) { 1864 rc = ibmvnic_login(netdev); 1865 if (rc) 1866 goto out; 1867 1868 rc = init_resources(adapter); 1869 if (rc) { 1870 netdev_err(netdev, "failed to initialize resources\n"); 1871 goto out; 1872 } 1873 } 1874 1875 rc = __ibmvnic_open(netdev); 1876 1877 out: 1878 /* If open failed and there is a pending failover or in-progress reset, 1879 * set device state and return. Device operation will be handled by 1880 * reset routine. See also comments above regarding rtnl. 1881 */ 1882 if (rc && 1883 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { 1884 adapter->state = VNIC_OPEN; 1885 rc = 0; 1886 } 1887 1888 if (rc) { 1889 release_resources(adapter); 1890 release_rx_pools(adapter); 1891 release_tx_pools(adapter); 1892 } 1893 1894 return rc; 1895 } 1896 1897 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1898 { 1899 struct ibmvnic_rx_pool *rx_pool; 1900 struct ibmvnic_rx_buff *rx_buff; 1901 u64 rx_entries; 1902 int rx_scrqs; 1903 int i, j; 1904 1905 if (!adapter->rx_pool) 1906 return; 1907 1908 rx_scrqs = adapter->num_active_rx_pools; 1909 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1910 1911 /* Free any remaining skbs in the rx buffer pools */ 1912 for (i = 0; i < rx_scrqs; i++) { 1913 rx_pool = &adapter->rx_pool[i]; 1914 if (!rx_pool || !rx_pool->rx_buff) 1915 continue; 1916 1917 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1918 for (j = 0; j < rx_entries; j++) { 1919 rx_buff = &rx_pool->rx_buff[j]; 1920 if (rx_buff && rx_buff->skb) { 1921 dev_kfree_skb_any(rx_buff->skb); 1922 rx_buff->skb = NULL; 1923 } 1924 } 1925 } 1926 } 1927 1928 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1929 struct ibmvnic_tx_pool *tx_pool) 1930 { 1931 struct ibmvnic_tx_buff *tx_buff; 1932 u64 tx_entries; 1933 int i; 1934 1935 if (!tx_pool || !tx_pool->tx_buff) 1936 return; 1937 1938 tx_entries = tx_pool->num_buffers; 1939 1940 for (i = 0; i < tx_entries; i++) { 1941 tx_buff = &tx_pool->tx_buff[i]; 1942 if (tx_buff && tx_buff->skb) { 1943 dev_kfree_skb_any(tx_buff->skb); 1944 tx_buff->skb = NULL; 1945 } 1946 } 1947 } 1948 1949 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1950 { 1951 int tx_scrqs; 1952 int i; 1953 1954 if (!adapter->tx_pool || !adapter->tso_pool) 1955 return; 1956 1957 tx_scrqs = adapter->num_active_tx_pools; 1958 1959 /* Free any remaining skbs in the tx buffer pools */ 1960 for (i = 0; i < tx_scrqs; i++) { 1961 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1962 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1963 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1964 } 1965 } 1966 1967 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1968 { 1969 struct net_device *netdev = adapter->netdev; 1970 int i; 1971 1972 if (adapter->tx_scrq) { 1973 for (i = 0; i < adapter->req_tx_queues; i++) 1974 if (adapter->tx_scrq[i]->irq) { 1975 netdev_dbg(netdev, 1976 "Disabling tx_scrq[%d] irq\n", i); 1977 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1978 disable_irq(adapter->tx_scrq[i]->irq); 1979 } 1980 } 1981 1982 if (adapter->rx_scrq) { 1983 for (i = 0; i < adapter->req_rx_queues; i++) { 1984 if (adapter->rx_scrq[i]->irq) { 1985 netdev_dbg(netdev, 1986 "Disabling rx_scrq[%d] irq\n", i); 1987 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1988 disable_irq(adapter->rx_scrq[i]->irq); 1989 } 1990 } 1991 } 1992 } 1993 1994 static void ibmvnic_cleanup(struct net_device *netdev) 1995 { 1996 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1997 1998 /* ensure that transmissions are stopped if called by do_reset */ 1999 2000 adapter->tx_queues_active = false; 2001 2002 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active 2003 * update so they don't restart a queue after we stop it below. 2004 */ 2005 synchronize_rcu(); 2006 2007 if (test_bit(0, &adapter->resetting)) 2008 netif_tx_disable(netdev); 2009 else 2010 netif_tx_stop_all_queues(netdev); 2011 2012 ibmvnic_napi_disable(adapter); 2013 ibmvnic_disable_irqs(adapter); 2014 } 2015 2016 static int __ibmvnic_close(struct net_device *netdev) 2017 { 2018 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2019 int rc = 0; 2020 2021 adapter->state = VNIC_CLOSING; 2022 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2023 adapter->state = VNIC_CLOSED; 2024 return rc; 2025 } 2026 2027 static int ibmvnic_close(struct net_device *netdev) 2028 { 2029 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2030 int rc; 2031 2032 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n", 2033 adapter_state_to_string(adapter->state), 2034 adapter->failover_pending, 2035 adapter->force_reset_recovery); 2036 2037 /* If device failover is pending, just set device state and return. 2038 * Device operation will be handled by reset routine. 2039 */ 2040 if (adapter->failover_pending) { 2041 adapter->state = VNIC_CLOSED; 2042 return 0; 2043 } 2044 2045 rc = __ibmvnic_close(netdev); 2046 ibmvnic_cleanup(netdev); 2047 clean_rx_pools(adapter); 2048 clean_tx_pools(adapter); 2049 2050 return rc; 2051 } 2052 2053 /** 2054 * build_hdr_data - creates L2/L3/L4 header data buffer 2055 * @hdr_field: bitfield determining needed headers 2056 * @skb: socket buffer 2057 * @hdr_len: array of header lengths 2058 * @hdr_data: buffer to write the header to 2059 * 2060 * Reads hdr_field to determine which headers are needed by firmware. 2061 * Builds a buffer containing these headers. Saves individual header 2062 * lengths and total buffer length to be used to build descriptors. 2063 */ 2064 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 2065 int *hdr_len, u8 *hdr_data) 2066 { 2067 int len = 0; 2068 u8 *hdr; 2069 2070 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 2071 hdr_len[0] = sizeof(struct vlan_ethhdr); 2072 else 2073 hdr_len[0] = sizeof(struct ethhdr); 2074 2075 if (skb->protocol == htons(ETH_P_IP)) { 2076 hdr_len[1] = ip_hdr(skb)->ihl * 4; 2077 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2078 hdr_len[2] = tcp_hdrlen(skb); 2079 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 2080 hdr_len[2] = sizeof(struct udphdr); 2081 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2082 hdr_len[1] = sizeof(struct ipv6hdr); 2083 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2084 hdr_len[2] = tcp_hdrlen(skb); 2085 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 2086 hdr_len[2] = sizeof(struct udphdr); 2087 } else if (skb->protocol == htons(ETH_P_ARP)) { 2088 hdr_len[1] = arp_hdr_len(skb->dev); 2089 hdr_len[2] = 0; 2090 } 2091 2092 memset(hdr_data, 0, 120); 2093 if ((hdr_field >> 6) & 1) { 2094 hdr = skb_mac_header(skb); 2095 memcpy(hdr_data, hdr, hdr_len[0]); 2096 len += hdr_len[0]; 2097 } 2098 2099 if ((hdr_field >> 5) & 1) { 2100 hdr = skb_network_header(skb); 2101 memcpy(hdr_data + len, hdr, hdr_len[1]); 2102 len += hdr_len[1]; 2103 } 2104 2105 if ((hdr_field >> 4) & 1) { 2106 hdr = skb_transport_header(skb); 2107 memcpy(hdr_data + len, hdr, hdr_len[2]); 2108 len += hdr_len[2]; 2109 } 2110 return len; 2111 } 2112 2113 /** 2114 * create_hdr_descs - create header and header extension descriptors 2115 * @hdr_field: bitfield determining needed headers 2116 * @hdr_data: buffer containing header data 2117 * @len: length of data buffer 2118 * @hdr_len: array of individual header lengths 2119 * @scrq_arr: descriptor array 2120 * 2121 * Creates header and, if needed, header extension descriptors and 2122 * places them in a descriptor array, scrq_arr 2123 */ 2124 2125 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 2126 union sub_crq *scrq_arr) 2127 { 2128 union sub_crq hdr_desc; 2129 int tmp_len = len; 2130 int num_descs = 0; 2131 u8 *data, *cur; 2132 int tmp; 2133 2134 while (tmp_len > 0) { 2135 cur = hdr_data + len - tmp_len; 2136 2137 memset(&hdr_desc, 0, sizeof(hdr_desc)); 2138 if (cur != hdr_data) { 2139 data = hdr_desc.hdr_ext.data; 2140 tmp = tmp_len > 29 ? 29 : tmp_len; 2141 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 2142 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 2143 hdr_desc.hdr_ext.len = tmp; 2144 } else { 2145 data = hdr_desc.hdr.data; 2146 tmp = tmp_len > 24 ? 24 : tmp_len; 2147 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 2148 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 2149 hdr_desc.hdr.len = tmp; 2150 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 2151 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 2152 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 2153 hdr_desc.hdr.flag = hdr_field << 1; 2154 } 2155 memcpy(data, cur, tmp); 2156 tmp_len -= tmp; 2157 *scrq_arr = hdr_desc; 2158 scrq_arr++; 2159 num_descs++; 2160 } 2161 2162 return num_descs; 2163 } 2164 2165 /** 2166 * build_hdr_descs_arr - build a header descriptor array 2167 * @skb: tx socket buffer 2168 * @indir_arr: indirect array 2169 * @num_entries: number of descriptors to be sent 2170 * @hdr_field: bit field determining which headers will be sent 2171 * 2172 * This function will build a TX descriptor array with applicable 2173 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 2174 */ 2175 2176 static void build_hdr_descs_arr(struct sk_buff *skb, 2177 union sub_crq *indir_arr, 2178 int *num_entries, u8 hdr_field) 2179 { 2180 int hdr_len[3] = {0, 0, 0}; 2181 u8 hdr_data[140] = {0}; 2182 int tot_len; 2183 2184 tot_len = build_hdr_data(hdr_field, skb, hdr_len, 2185 hdr_data); 2186 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 2187 indir_arr + 1); 2188 } 2189 2190 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 2191 struct net_device *netdev) 2192 { 2193 /* For some backing devices, mishandling of small packets 2194 * can result in a loss of connection or TX stall. Device 2195 * architects recommend that no packet should be smaller 2196 * than the minimum MTU value provided to the driver, so 2197 * pad any packets to that length 2198 */ 2199 if (skb->len < netdev->min_mtu) 2200 return skb_put_padto(skb, netdev->min_mtu); 2201 2202 return 0; 2203 } 2204 2205 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 2206 struct ibmvnic_sub_crq_queue *tx_scrq) 2207 { 2208 struct ibmvnic_ind_xmit_queue *ind_bufp; 2209 struct ibmvnic_tx_buff *tx_buff; 2210 struct ibmvnic_tx_pool *tx_pool; 2211 union sub_crq tx_scrq_entry; 2212 int queue_num; 2213 int entries; 2214 int index; 2215 int i; 2216 2217 ind_bufp = &tx_scrq->ind_buf; 2218 entries = (u64)ind_bufp->index; 2219 queue_num = tx_scrq->pool_index; 2220 2221 for (i = entries - 1; i >= 0; --i) { 2222 tx_scrq_entry = ind_bufp->indir_arr[i]; 2223 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) 2224 continue; 2225 index = be32_to_cpu(tx_scrq_entry.v1.correlator); 2226 if (index & IBMVNIC_TSO_POOL_MASK) { 2227 tx_pool = &adapter->tso_pool[queue_num]; 2228 index &= ~IBMVNIC_TSO_POOL_MASK; 2229 } else { 2230 tx_pool = &adapter->tx_pool[queue_num]; 2231 } 2232 tx_pool->free_map[tx_pool->consumer_index] = index; 2233 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2234 tx_pool->num_buffers - 1 : 2235 tx_pool->consumer_index - 1; 2236 tx_buff = &tx_pool->tx_buff[index]; 2237 adapter->netdev->stats.tx_packets--; 2238 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; 2239 adapter->tx_stats_buffers[queue_num].packets--; 2240 adapter->tx_stats_buffers[queue_num].bytes -= 2241 tx_buff->skb->len; 2242 dev_kfree_skb_any(tx_buff->skb); 2243 tx_buff->skb = NULL; 2244 adapter->netdev->stats.tx_dropped++; 2245 } 2246 2247 ind_bufp->index = 0; 2248 2249 if (atomic_sub_return(entries, &tx_scrq->used) <= 2250 (adapter->req_tx_entries_per_subcrq / 2) && 2251 __netif_subqueue_stopped(adapter->netdev, queue_num)) { 2252 rcu_read_lock(); 2253 2254 if (adapter->tx_queues_active) { 2255 netif_wake_subqueue(adapter->netdev, queue_num); 2256 netdev_dbg(adapter->netdev, "Started queue %d\n", 2257 queue_num); 2258 } 2259 2260 rcu_read_unlock(); 2261 } 2262 } 2263 2264 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, 2265 struct ibmvnic_sub_crq_queue *tx_scrq) 2266 { 2267 struct ibmvnic_ind_xmit_queue *ind_bufp; 2268 u64 dma_addr; 2269 u64 entries; 2270 u64 handle; 2271 int rc; 2272 2273 ind_bufp = &tx_scrq->ind_buf; 2274 dma_addr = (u64)ind_bufp->indir_dma; 2275 entries = (u64)ind_bufp->index; 2276 handle = tx_scrq->handle; 2277 2278 if (!entries) 2279 return 0; 2280 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); 2281 if (rc) 2282 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); 2283 else 2284 ind_bufp->index = 0; 2285 return 0; 2286 } 2287 2288 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 2289 { 2290 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2291 int queue_num = skb_get_queue_mapping(skb); 2292 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 2293 struct device *dev = &adapter->vdev->dev; 2294 struct ibmvnic_ind_xmit_queue *ind_bufp; 2295 struct ibmvnic_tx_buff *tx_buff = NULL; 2296 struct ibmvnic_sub_crq_queue *tx_scrq; 2297 struct ibmvnic_long_term_buff *ltb; 2298 struct ibmvnic_tx_pool *tx_pool; 2299 unsigned int tx_send_failed = 0; 2300 netdev_tx_t ret = NETDEV_TX_OK; 2301 unsigned int tx_map_failed = 0; 2302 union sub_crq indir_arr[16]; 2303 unsigned int tx_dropped = 0; 2304 unsigned int tx_packets = 0; 2305 unsigned int tx_bytes = 0; 2306 dma_addr_t data_dma_addr; 2307 struct netdev_queue *txq; 2308 unsigned long lpar_rc; 2309 union sub_crq tx_crq; 2310 unsigned int offset; 2311 int num_entries = 1; 2312 unsigned char *dst; 2313 int bufidx = 0; 2314 u8 proto = 0; 2315 2316 /* If a reset is in progress, drop the packet since 2317 * the scrqs may get torn down. Otherwise use the 2318 * rcu to ensure reset waits for us to complete. 2319 */ 2320 rcu_read_lock(); 2321 if (!adapter->tx_queues_active) { 2322 dev_kfree_skb_any(skb); 2323 2324 tx_send_failed++; 2325 tx_dropped++; 2326 ret = NETDEV_TX_OK; 2327 goto out; 2328 } 2329 2330 tx_scrq = adapter->tx_scrq[queue_num]; 2331 txq = netdev_get_tx_queue(netdev, queue_num); 2332 ind_bufp = &tx_scrq->ind_buf; 2333 2334 if (ibmvnic_xmit_workarounds(skb, netdev)) { 2335 tx_dropped++; 2336 tx_send_failed++; 2337 ret = NETDEV_TX_OK; 2338 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2339 goto out; 2340 } 2341 2342 if (skb_is_gso(skb)) 2343 tx_pool = &adapter->tso_pool[queue_num]; 2344 else 2345 tx_pool = &adapter->tx_pool[queue_num]; 2346 2347 bufidx = tx_pool->free_map[tx_pool->consumer_index]; 2348 2349 if (bufidx == IBMVNIC_INVALID_MAP) { 2350 dev_kfree_skb_any(skb); 2351 tx_send_failed++; 2352 tx_dropped++; 2353 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2354 ret = NETDEV_TX_OK; 2355 goto out; 2356 } 2357 2358 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 2359 2360 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset); 2361 2362 dst = ltb->buff + offset; 2363 memset(dst, 0, tx_pool->buf_size); 2364 data_dma_addr = ltb->addr + offset; 2365 2366 if (skb_shinfo(skb)->nr_frags) { 2367 int cur, i; 2368 2369 /* Copy the head */ 2370 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 2371 cur = skb_headlen(skb); 2372 2373 /* Copy the frags */ 2374 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2375 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2376 2377 memcpy(dst + cur, skb_frag_address(frag), 2378 skb_frag_size(frag)); 2379 cur += skb_frag_size(frag); 2380 } 2381 } else { 2382 skb_copy_from_linear_data(skb, dst, skb->len); 2383 } 2384 2385 /* post changes to long_term_buff *dst before VIOS accessing it */ 2386 dma_wmb(); 2387 2388 tx_pool->consumer_index = 2389 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 2390 2391 tx_buff = &tx_pool->tx_buff[bufidx]; 2392 tx_buff->skb = skb; 2393 tx_buff->index = bufidx; 2394 tx_buff->pool_index = queue_num; 2395 2396 memset(&tx_crq, 0, sizeof(tx_crq)); 2397 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 2398 tx_crq.v1.type = IBMVNIC_TX_DESC; 2399 tx_crq.v1.n_crq_elem = 1; 2400 tx_crq.v1.n_sge = 1; 2401 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 2402 2403 if (skb_is_gso(skb)) 2404 tx_crq.v1.correlator = 2405 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK); 2406 else 2407 tx_crq.v1.correlator = cpu_to_be32(bufidx); 2408 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id); 2409 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 2410 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 2411 2412 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { 2413 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 2414 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 2415 } 2416 2417 if (skb->protocol == htons(ETH_P_IP)) { 2418 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 2419 proto = ip_hdr(skb)->protocol; 2420 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2421 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 2422 proto = ipv6_hdr(skb)->nexthdr; 2423 } 2424 2425 if (proto == IPPROTO_TCP) 2426 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 2427 else if (proto == IPPROTO_UDP) 2428 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 2429 2430 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2431 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 2432 hdrs += 2; 2433 } 2434 if (skb_is_gso(skb)) { 2435 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 2436 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 2437 hdrs += 2; 2438 } 2439 2440 if ((*hdrs >> 7) & 1) 2441 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); 2442 2443 tx_crq.v1.n_crq_elem = num_entries; 2444 tx_buff->num_entries = num_entries; 2445 /* flush buffer if current entry can not fit */ 2446 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { 2447 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2448 if (lpar_rc != H_SUCCESS) 2449 goto tx_flush_err; 2450 } 2451 2452 indir_arr[0] = tx_crq; 2453 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], 2454 num_entries * sizeof(struct ibmvnic_generic_scrq)); 2455 ind_bufp->index += num_entries; 2456 if (__netdev_tx_sent_queue(txq, skb->len, 2457 netdev_xmit_more() && 2458 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { 2459 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2460 if (lpar_rc != H_SUCCESS) 2461 goto tx_err; 2462 } 2463 2464 if (atomic_add_return(num_entries, &tx_scrq->used) 2465 >= adapter->req_tx_entries_per_subcrq) { 2466 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 2467 netif_stop_subqueue(netdev, queue_num); 2468 } 2469 2470 tx_packets++; 2471 tx_bytes += skb->len; 2472 txq_trans_cond_update(txq); 2473 ret = NETDEV_TX_OK; 2474 goto out; 2475 2476 tx_flush_err: 2477 dev_kfree_skb_any(skb); 2478 tx_buff->skb = NULL; 2479 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2480 tx_pool->num_buffers - 1 : 2481 tx_pool->consumer_index - 1; 2482 tx_dropped++; 2483 tx_err: 2484 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) 2485 dev_err_ratelimited(dev, "tx: send failed\n"); 2486 2487 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 2488 /* Disable TX and report carrier off if queue is closed 2489 * or pending failover. 2490 * Firmware guarantees that a signal will be sent to the 2491 * driver, triggering a reset or some other action. 2492 */ 2493 netif_tx_stop_all_queues(netdev); 2494 netif_carrier_off(netdev); 2495 } 2496 out: 2497 rcu_read_unlock(); 2498 netdev->stats.tx_dropped += tx_dropped; 2499 netdev->stats.tx_bytes += tx_bytes; 2500 netdev->stats.tx_packets += tx_packets; 2501 adapter->tx_send_failed += tx_send_failed; 2502 adapter->tx_map_failed += tx_map_failed; 2503 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 2504 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 2505 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 2506 2507 return ret; 2508 } 2509 2510 static void ibmvnic_set_multi(struct net_device *netdev) 2511 { 2512 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2513 struct netdev_hw_addr *ha; 2514 union ibmvnic_crq crq; 2515 2516 memset(&crq, 0, sizeof(crq)); 2517 crq.request_capability.first = IBMVNIC_CRQ_CMD; 2518 crq.request_capability.cmd = REQUEST_CAPABILITY; 2519 2520 if (netdev->flags & IFF_PROMISC) { 2521 if (!adapter->promisc_supported) 2522 return; 2523 } else { 2524 if (netdev->flags & IFF_ALLMULTI) { 2525 /* Accept all multicast */ 2526 memset(&crq, 0, sizeof(crq)); 2527 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2528 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2529 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 2530 ibmvnic_send_crq(adapter, &crq); 2531 } else if (netdev_mc_empty(netdev)) { 2532 /* Reject all multicast */ 2533 memset(&crq, 0, sizeof(crq)); 2534 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2535 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2536 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 2537 ibmvnic_send_crq(adapter, &crq); 2538 } else { 2539 /* Accept one or more multicast(s) */ 2540 netdev_for_each_mc_addr(ha, netdev) { 2541 memset(&crq, 0, sizeof(crq)); 2542 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2543 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2544 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 2545 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 2546 ha->addr); 2547 ibmvnic_send_crq(adapter, &crq); 2548 } 2549 } 2550 } 2551 } 2552 2553 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) 2554 { 2555 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2556 union ibmvnic_crq crq; 2557 int rc; 2558 2559 if (!is_valid_ether_addr(dev_addr)) { 2560 rc = -EADDRNOTAVAIL; 2561 goto err; 2562 } 2563 2564 memset(&crq, 0, sizeof(crq)); 2565 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 2566 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 2567 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); 2568 2569 mutex_lock(&adapter->fw_lock); 2570 adapter->fw_done_rc = 0; 2571 reinit_completion(&adapter->fw_done); 2572 2573 rc = ibmvnic_send_crq(adapter, &crq); 2574 if (rc) { 2575 rc = -EIO; 2576 mutex_unlock(&adapter->fw_lock); 2577 goto err; 2578 } 2579 2580 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 2581 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 2582 if (rc || adapter->fw_done_rc) { 2583 rc = -EIO; 2584 mutex_unlock(&adapter->fw_lock); 2585 goto err; 2586 } 2587 mutex_unlock(&adapter->fw_lock); 2588 return 0; 2589 err: 2590 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 2591 return rc; 2592 } 2593 2594 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 2595 { 2596 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2597 struct sockaddr *addr = p; 2598 int rc; 2599 2600 rc = 0; 2601 if (!is_valid_ether_addr(addr->sa_data)) 2602 return -EADDRNOTAVAIL; 2603 2604 ether_addr_copy(adapter->mac_addr, addr->sa_data); 2605 if (adapter->state != VNIC_PROBED) 2606 rc = __ibmvnic_set_mac(netdev, addr->sa_data); 2607 2608 return rc; 2609 } 2610 2611 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) 2612 { 2613 switch (reason) { 2614 case VNIC_RESET_FAILOVER: 2615 return "FAILOVER"; 2616 case VNIC_RESET_MOBILITY: 2617 return "MOBILITY"; 2618 case VNIC_RESET_FATAL: 2619 return "FATAL"; 2620 case VNIC_RESET_NON_FATAL: 2621 return "NON_FATAL"; 2622 case VNIC_RESET_TIMEOUT: 2623 return "TIMEOUT"; 2624 case VNIC_RESET_CHANGE_PARAM: 2625 return "CHANGE_PARAM"; 2626 case VNIC_RESET_PASSIVE_INIT: 2627 return "PASSIVE_INIT"; 2628 } 2629 return "UNKNOWN"; 2630 } 2631 2632 /* 2633 * Initialize the init_done completion and return code values. We 2634 * can get a transport event just after registering the CRQ and the 2635 * tasklet will use this to communicate the transport event. To ensure 2636 * we don't miss the notification/error, initialize these _before_ 2637 * regisering the CRQ. 2638 */ 2639 static inline void reinit_init_done(struct ibmvnic_adapter *adapter) 2640 { 2641 reinit_completion(&adapter->init_done); 2642 adapter->init_done_rc = 0; 2643 } 2644 2645 /* 2646 * do_reset returns zero if we are able to keep processing reset events, or 2647 * non-zero if we hit a fatal error and must halt. 2648 */ 2649 static int do_reset(struct ibmvnic_adapter *adapter, 2650 struct ibmvnic_rwi *rwi, u32 reset_state) 2651 { 2652 struct net_device *netdev = adapter->netdev; 2653 u64 old_num_rx_queues, old_num_tx_queues; 2654 u64 old_num_rx_slots, old_num_tx_slots; 2655 int rc; 2656 2657 netdev_dbg(adapter->netdev, 2658 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", 2659 adapter_state_to_string(adapter->state), 2660 adapter->failover_pending, 2661 reset_reason_to_string(rwi->reset_reason), 2662 adapter_state_to_string(reset_state)); 2663 2664 adapter->reset_reason = rwi->reset_reason; 2665 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ 2666 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2667 rtnl_lock(); 2668 2669 /* Now that we have the rtnl lock, clear any pending failover. 2670 * This will ensure ibmvnic_open() has either completed or will 2671 * block until failover is complete. 2672 */ 2673 if (rwi->reset_reason == VNIC_RESET_FAILOVER) 2674 adapter->failover_pending = false; 2675 2676 /* read the state and check (again) after getting rtnl */ 2677 reset_state = adapter->state; 2678 2679 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2680 rc = -EBUSY; 2681 goto out; 2682 } 2683 2684 netif_carrier_off(netdev); 2685 2686 old_num_rx_queues = adapter->req_rx_queues; 2687 old_num_tx_queues = adapter->req_tx_queues; 2688 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; 2689 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; 2690 2691 ibmvnic_cleanup(netdev); 2692 2693 if (reset_state == VNIC_OPEN && 2694 adapter->reset_reason != VNIC_RESET_MOBILITY && 2695 adapter->reset_reason != VNIC_RESET_FAILOVER) { 2696 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2697 rc = __ibmvnic_close(netdev); 2698 if (rc) 2699 goto out; 2700 } else { 2701 adapter->state = VNIC_CLOSING; 2702 2703 /* Release the RTNL lock before link state change and 2704 * re-acquire after the link state change to allow 2705 * linkwatch_event to grab the RTNL lock and run during 2706 * a reset. 2707 */ 2708 rtnl_unlock(); 2709 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2710 rtnl_lock(); 2711 if (rc) 2712 goto out; 2713 2714 if (adapter->state == VNIC_OPEN) { 2715 /* When we dropped rtnl, ibmvnic_open() got 2716 * it and noticed that we are resetting and 2717 * set the adapter state to OPEN. Update our 2718 * new "target" state, and resume the reset 2719 * from VNIC_CLOSING state. 2720 */ 2721 netdev_dbg(netdev, 2722 "Open changed state from %s, updating.\n", 2723 adapter_state_to_string(reset_state)); 2724 reset_state = VNIC_OPEN; 2725 adapter->state = VNIC_CLOSING; 2726 } 2727 2728 if (adapter->state != VNIC_CLOSING) { 2729 /* If someone else changed the adapter state 2730 * when we dropped the rtnl, fail the reset 2731 */ 2732 rc = -EAGAIN; 2733 goto out; 2734 } 2735 adapter->state = VNIC_CLOSED; 2736 } 2737 } 2738 2739 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2740 release_resources(adapter); 2741 release_sub_crqs(adapter, 1); 2742 release_crq_queue(adapter); 2743 } 2744 2745 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 2746 /* remove the closed state so when we call open it appears 2747 * we are coming from the probed state. 2748 */ 2749 adapter->state = VNIC_PROBED; 2750 2751 reinit_init_done(adapter); 2752 2753 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2754 rc = init_crq_queue(adapter); 2755 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 2756 rc = ibmvnic_reenable_crq_queue(adapter); 2757 release_sub_crqs(adapter, 1); 2758 } else { 2759 rc = ibmvnic_reset_crq(adapter); 2760 if (rc == H_CLOSED || rc == H_SUCCESS) { 2761 rc = vio_enable_interrupts(adapter->vdev); 2762 if (rc) 2763 netdev_err(adapter->netdev, 2764 "Reset failed to enable interrupts. rc=%d\n", 2765 rc); 2766 } 2767 } 2768 2769 if (rc) { 2770 netdev_err(adapter->netdev, 2771 "Reset couldn't initialize crq. rc=%d\n", rc); 2772 goto out; 2773 } 2774 2775 rc = ibmvnic_reset_init(adapter, true); 2776 if (rc) 2777 goto out; 2778 2779 /* If the adapter was in PROBE or DOWN state prior to the reset, 2780 * exit here. 2781 */ 2782 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) { 2783 rc = 0; 2784 goto out; 2785 } 2786 2787 rc = ibmvnic_login(netdev); 2788 if (rc) 2789 goto out; 2790 2791 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2792 rc = init_resources(adapter); 2793 if (rc) 2794 goto out; 2795 } else if (adapter->req_rx_queues != old_num_rx_queues || 2796 adapter->req_tx_queues != old_num_tx_queues || 2797 adapter->req_rx_add_entries_per_subcrq != 2798 old_num_rx_slots || 2799 adapter->req_tx_entries_per_subcrq != 2800 old_num_tx_slots || 2801 !adapter->rx_pool || 2802 !adapter->tso_pool || 2803 !adapter->tx_pool) { 2804 release_napi(adapter); 2805 release_vpd_data(adapter); 2806 2807 rc = init_resources(adapter); 2808 if (rc) 2809 goto out; 2810 2811 } else { 2812 rc = init_tx_pools(netdev); 2813 if (rc) { 2814 netdev_dbg(netdev, 2815 "init tx pools failed (%d)\n", 2816 rc); 2817 goto out; 2818 } 2819 2820 rc = init_rx_pools(netdev); 2821 if (rc) { 2822 netdev_dbg(netdev, 2823 "init rx pools failed (%d)\n", 2824 rc); 2825 goto out; 2826 } 2827 } 2828 ibmvnic_disable_irqs(adapter); 2829 } 2830 adapter->state = VNIC_CLOSED; 2831 2832 if (reset_state == VNIC_CLOSED) { 2833 rc = 0; 2834 goto out; 2835 } 2836 2837 rc = __ibmvnic_open(netdev); 2838 if (rc) { 2839 rc = IBMVNIC_OPEN_FAILED; 2840 goto out; 2841 } 2842 2843 /* refresh device's multicast list */ 2844 ibmvnic_set_multi(netdev); 2845 2846 if (adapter->reset_reason == VNIC_RESET_FAILOVER || 2847 adapter->reset_reason == VNIC_RESET_MOBILITY) 2848 __netdev_notify_peers(netdev); 2849 2850 rc = 0; 2851 2852 out: 2853 /* restore the adapter state if reset failed */ 2854 if (rc) 2855 adapter->state = reset_state; 2856 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */ 2857 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2858 rtnl_unlock(); 2859 2860 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", 2861 adapter_state_to_string(adapter->state), 2862 adapter->failover_pending, rc); 2863 return rc; 2864 } 2865 2866 static int do_hard_reset(struct ibmvnic_adapter *adapter, 2867 struct ibmvnic_rwi *rwi, u32 reset_state) 2868 { 2869 struct net_device *netdev = adapter->netdev; 2870 int rc; 2871 2872 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", 2873 reset_reason_to_string(rwi->reset_reason)); 2874 2875 /* read the state and check (again) after getting rtnl */ 2876 reset_state = adapter->state; 2877 2878 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2879 rc = -EBUSY; 2880 goto out; 2881 } 2882 2883 netif_carrier_off(netdev); 2884 adapter->reset_reason = rwi->reset_reason; 2885 2886 ibmvnic_cleanup(netdev); 2887 release_resources(adapter); 2888 release_sub_crqs(adapter, 0); 2889 release_crq_queue(adapter); 2890 2891 /* remove the closed state so when we call open it appears 2892 * we are coming from the probed state. 2893 */ 2894 adapter->state = VNIC_PROBED; 2895 2896 reinit_init_done(adapter); 2897 2898 rc = init_crq_queue(adapter); 2899 if (rc) { 2900 netdev_err(adapter->netdev, 2901 "Couldn't initialize crq. rc=%d\n", rc); 2902 goto out; 2903 } 2904 2905 rc = ibmvnic_reset_init(adapter, false); 2906 if (rc) 2907 goto out; 2908 2909 /* If the adapter was in PROBE or DOWN state prior to the reset, 2910 * exit here. 2911 */ 2912 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) 2913 goto out; 2914 2915 rc = ibmvnic_login(netdev); 2916 if (rc) 2917 goto out; 2918 2919 rc = init_resources(adapter); 2920 if (rc) 2921 goto out; 2922 2923 ibmvnic_disable_irqs(adapter); 2924 adapter->state = VNIC_CLOSED; 2925 2926 if (reset_state == VNIC_CLOSED) 2927 goto out; 2928 2929 rc = __ibmvnic_open(netdev); 2930 if (rc) { 2931 rc = IBMVNIC_OPEN_FAILED; 2932 goto out; 2933 } 2934 2935 __netdev_notify_peers(netdev); 2936 out: 2937 /* restore adapter state if reset failed */ 2938 if (rc) 2939 adapter->state = reset_state; 2940 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", 2941 adapter_state_to_string(adapter->state), 2942 adapter->failover_pending, rc); 2943 return rc; 2944 } 2945 2946 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 2947 { 2948 struct ibmvnic_rwi *rwi; 2949 unsigned long flags; 2950 2951 spin_lock_irqsave(&adapter->rwi_lock, flags); 2952 2953 if (!list_empty(&adapter->rwi_list)) { 2954 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 2955 list); 2956 list_del(&rwi->list); 2957 } else { 2958 rwi = NULL; 2959 } 2960 2961 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2962 return rwi; 2963 } 2964 2965 /** 2966 * do_passive_init - complete probing when partner device is detected. 2967 * @adapter: ibmvnic_adapter struct 2968 * 2969 * If the ibmvnic device does not have a partner device to communicate with at boot 2970 * and that partner device comes online at a later time, this function is called 2971 * to complete the initialization process of ibmvnic device. 2972 * Caller is expected to hold rtnl_lock(). 2973 * 2974 * Returns non-zero if sub-CRQs are not initialized properly leaving the device 2975 * in the down state. 2976 * Returns 0 upon success and the device is in PROBED state. 2977 */ 2978 2979 static int do_passive_init(struct ibmvnic_adapter *adapter) 2980 { 2981 unsigned long timeout = msecs_to_jiffies(30000); 2982 struct net_device *netdev = adapter->netdev; 2983 struct device *dev = &adapter->vdev->dev; 2984 int rc; 2985 2986 netdev_dbg(netdev, "Partner device found, probing.\n"); 2987 2988 adapter->state = VNIC_PROBING; 2989 reinit_completion(&adapter->init_done); 2990 adapter->init_done_rc = 0; 2991 adapter->crq.active = true; 2992 2993 rc = send_crq_init_complete(adapter); 2994 if (rc) 2995 goto out; 2996 2997 rc = send_version_xchg(adapter); 2998 if (rc) 2999 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); 3000 3001 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3002 dev_err(dev, "Initialization sequence timed out\n"); 3003 rc = -ETIMEDOUT; 3004 goto out; 3005 } 3006 3007 rc = init_sub_crqs(adapter); 3008 if (rc) { 3009 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc); 3010 goto out; 3011 } 3012 3013 rc = init_sub_crq_irqs(adapter); 3014 if (rc) { 3015 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc); 3016 goto init_failed; 3017 } 3018 3019 netdev->mtu = adapter->req_mtu - ETH_HLEN; 3020 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 3021 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 3022 3023 adapter->state = VNIC_PROBED; 3024 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n"); 3025 3026 return 0; 3027 3028 init_failed: 3029 release_sub_crqs(adapter, 1); 3030 out: 3031 adapter->state = VNIC_DOWN; 3032 return rc; 3033 } 3034 3035 static void __ibmvnic_reset(struct work_struct *work) 3036 { 3037 struct ibmvnic_adapter *adapter; 3038 unsigned int timeout = 5000; 3039 struct ibmvnic_rwi *tmprwi; 3040 bool saved_state = false; 3041 struct ibmvnic_rwi *rwi; 3042 unsigned long flags; 3043 struct device *dev; 3044 bool need_reset; 3045 int num_fails = 0; 3046 u32 reset_state; 3047 int rc = 0; 3048 3049 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 3050 dev = &adapter->vdev->dev; 3051 3052 /* Wait for ibmvnic_probe() to complete. If probe is taking too long 3053 * or if another reset is in progress, defer work for now. If probe 3054 * eventually fails it will flush and terminate our work. 3055 * 3056 * Three possibilities here: 3057 * 1. Adpater being removed - just return 3058 * 2. Timed out on probe or another reset in progress - delay the work 3059 * 3. Completed probe - perform any resets in queue 3060 */ 3061 if (adapter->state == VNIC_PROBING && 3062 !wait_for_completion_timeout(&adapter->probe_done, timeout)) { 3063 dev_err(dev, "Reset thread timed out on probe"); 3064 queue_delayed_work(system_long_wq, 3065 &adapter->ibmvnic_delayed_reset, 3066 IBMVNIC_RESET_DELAY); 3067 return; 3068 } 3069 3070 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */ 3071 if (adapter->state == VNIC_REMOVING) 3072 return; 3073 3074 /* ->rwi_list is stable now (no one else is removing entries) */ 3075 3076 /* ibmvnic_probe() may have purged the reset queue after we were 3077 * scheduled to process a reset so there maybe no resets to process. 3078 * Before setting the ->resetting bit though, we have to make sure 3079 * that there is infact a reset to process. Otherwise we may race 3080 * with ibmvnic_open() and end up leaving the vnic down: 3081 * 3082 * __ibmvnic_reset() ibmvnic_open() 3083 * ----------------- -------------- 3084 * 3085 * set ->resetting bit 3086 * find ->resetting bit is set 3087 * set ->state to IBMVNIC_OPEN (i.e 3088 * assume reset will open device) 3089 * return 3090 * find reset queue empty 3091 * return 3092 * 3093 * Neither performed vnic login/open and vnic stays down 3094 * 3095 * If we hold the lock and conditionally set the bit, either we 3096 * or ibmvnic_open() will complete the open. 3097 */ 3098 need_reset = false; 3099 spin_lock(&adapter->rwi_lock); 3100 if (!list_empty(&adapter->rwi_list)) { 3101 if (test_and_set_bit_lock(0, &adapter->resetting)) { 3102 queue_delayed_work(system_long_wq, 3103 &adapter->ibmvnic_delayed_reset, 3104 IBMVNIC_RESET_DELAY); 3105 } else { 3106 need_reset = true; 3107 } 3108 } 3109 spin_unlock(&adapter->rwi_lock); 3110 3111 if (!need_reset) 3112 return; 3113 3114 rwi = get_next_rwi(adapter); 3115 while (rwi) { 3116 spin_lock_irqsave(&adapter->state_lock, flags); 3117 3118 if (adapter->state == VNIC_REMOVING || 3119 adapter->state == VNIC_REMOVED) { 3120 spin_unlock_irqrestore(&adapter->state_lock, flags); 3121 kfree(rwi); 3122 rc = EBUSY; 3123 break; 3124 } 3125 3126 if (!saved_state) { 3127 reset_state = adapter->state; 3128 saved_state = true; 3129 } 3130 spin_unlock_irqrestore(&adapter->state_lock, flags); 3131 3132 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) { 3133 rtnl_lock(); 3134 rc = do_passive_init(adapter); 3135 rtnl_unlock(); 3136 if (!rc) 3137 netif_carrier_on(adapter->netdev); 3138 } else if (adapter->force_reset_recovery) { 3139 /* Since we are doing a hard reset now, clear the 3140 * failover_pending flag so we don't ignore any 3141 * future MOBILITY or other resets. 3142 */ 3143 adapter->failover_pending = false; 3144 3145 /* Transport event occurred during previous reset */ 3146 if (adapter->wait_for_reset) { 3147 /* Previous was CHANGE_PARAM; caller locked */ 3148 adapter->force_reset_recovery = false; 3149 rc = do_hard_reset(adapter, rwi, reset_state); 3150 } else { 3151 rtnl_lock(); 3152 adapter->force_reset_recovery = false; 3153 rc = do_hard_reset(adapter, rwi, reset_state); 3154 rtnl_unlock(); 3155 } 3156 if (rc) 3157 num_fails++; 3158 else 3159 num_fails = 0; 3160 3161 /* If auto-priority-failover is enabled we can get 3162 * back to back failovers during resets, resulting 3163 * in at least two failed resets (from high-priority 3164 * backing device to low-priority one and then back) 3165 * If resets continue to fail beyond that, give the 3166 * adapter some time to settle down before retrying. 3167 */ 3168 if (num_fails >= 3) { 3169 netdev_dbg(adapter->netdev, 3170 "[S:%s] Hard reset failed %d times, waiting 60 secs\n", 3171 adapter_state_to_string(adapter->state), 3172 num_fails); 3173 set_current_state(TASK_UNINTERRUPTIBLE); 3174 schedule_timeout(60 * HZ); 3175 } 3176 } else { 3177 rc = do_reset(adapter, rwi, reset_state); 3178 } 3179 tmprwi = rwi; 3180 adapter->last_reset_time = jiffies; 3181 3182 if (rc) 3183 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); 3184 3185 rwi = get_next_rwi(adapter); 3186 3187 /* 3188 * If there are no resets queued and the previous reset failed, 3189 * the adapter would be in an undefined state. So retry the 3190 * previous reset as a hard reset. 3191 * 3192 * Else, free the previous rwi and, if there is another reset 3193 * queued, process the new reset even if previous reset failed 3194 * (the previous reset could have failed because of a fail 3195 * over for instance, so process the fail over). 3196 */ 3197 if (!rwi && rc) 3198 rwi = tmprwi; 3199 else 3200 kfree(tmprwi); 3201 3202 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || 3203 rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) 3204 adapter->force_reset_recovery = true; 3205 } 3206 3207 if (adapter->wait_for_reset) { 3208 adapter->reset_done_rc = rc; 3209 complete(&adapter->reset_done); 3210 } 3211 3212 clear_bit_unlock(0, &adapter->resetting); 3213 3214 netdev_dbg(adapter->netdev, 3215 "[S:%s FRR:%d WFR:%d] Done processing resets\n", 3216 adapter_state_to_string(adapter->state), 3217 adapter->force_reset_recovery, 3218 adapter->wait_for_reset); 3219 } 3220 3221 static void __ibmvnic_delayed_reset(struct work_struct *work) 3222 { 3223 struct ibmvnic_adapter *adapter; 3224 3225 adapter = container_of(work, struct ibmvnic_adapter, 3226 ibmvnic_delayed_reset.work); 3227 __ibmvnic_reset(&adapter->ibmvnic_reset); 3228 } 3229 3230 static void flush_reset_queue(struct ibmvnic_adapter *adapter) 3231 { 3232 struct list_head *entry, *tmp_entry; 3233 3234 if (!list_empty(&adapter->rwi_list)) { 3235 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { 3236 list_del(entry); 3237 kfree(list_entry(entry, struct ibmvnic_rwi, list)); 3238 } 3239 } 3240 } 3241 3242 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 3243 enum ibmvnic_reset_reason reason) 3244 { 3245 struct net_device *netdev = adapter->netdev; 3246 struct ibmvnic_rwi *rwi, *tmp; 3247 unsigned long flags; 3248 int ret; 3249 3250 spin_lock_irqsave(&adapter->rwi_lock, flags); 3251 3252 /* If failover is pending don't schedule any other reset. 3253 * Instead let the failover complete. If there is already a 3254 * a failover reset scheduled, we will detect and drop the 3255 * duplicate reset when walking the ->rwi_list below. 3256 */ 3257 if (adapter->state == VNIC_REMOVING || 3258 adapter->state == VNIC_REMOVED || 3259 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { 3260 ret = EBUSY; 3261 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 3262 goto err; 3263 } 3264 3265 list_for_each_entry(tmp, &adapter->rwi_list, list) { 3266 if (tmp->reset_reason == reason) { 3267 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", 3268 reset_reason_to_string(reason)); 3269 ret = EBUSY; 3270 goto err; 3271 } 3272 } 3273 3274 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); 3275 if (!rwi) { 3276 ret = ENOMEM; 3277 goto err; 3278 } 3279 /* if we just received a transport event, 3280 * flush reset queue and process this reset 3281 */ 3282 if (adapter->force_reset_recovery) 3283 flush_reset_queue(adapter); 3284 3285 rwi->reset_reason = reason; 3286 list_add_tail(&rwi->list, &adapter->rwi_list); 3287 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", 3288 reset_reason_to_string(reason)); 3289 queue_work(system_long_wq, &adapter->ibmvnic_reset); 3290 3291 ret = 0; 3292 err: 3293 /* ibmvnic_close() below can block, so drop the lock first */ 3294 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 3295 3296 if (ret == ENOMEM) 3297 ibmvnic_close(netdev); 3298 3299 return -ret; 3300 } 3301 3302 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) 3303 { 3304 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3305 3306 if (test_bit(0, &adapter->resetting)) { 3307 netdev_err(adapter->netdev, 3308 "Adapter is resetting, skip timeout reset\n"); 3309 return; 3310 } 3311 /* No queuing up reset until at least 5 seconds (default watchdog val) 3312 * after last reset 3313 */ 3314 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { 3315 netdev_dbg(dev, "Not yet time to tx timeout.\n"); 3316 return; 3317 } 3318 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 3319 } 3320 3321 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 3322 struct ibmvnic_rx_buff *rx_buff) 3323 { 3324 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 3325 3326 rx_buff->skb = NULL; 3327 3328 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 3329 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 3330 3331 atomic_dec(&pool->available); 3332 } 3333 3334 static int ibmvnic_poll(struct napi_struct *napi, int budget) 3335 { 3336 struct ibmvnic_sub_crq_queue *rx_scrq; 3337 struct ibmvnic_adapter *adapter; 3338 struct net_device *netdev; 3339 int frames_processed; 3340 int scrq_num; 3341 3342 netdev = napi->dev; 3343 adapter = netdev_priv(netdev); 3344 scrq_num = (int)(napi - adapter->napi); 3345 frames_processed = 0; 3346 rx_scrq = adapter->rx_scrq[scrq_num]; 3347 3348 restart_poll: 3349 while (frames_processed < budget) { 3350 struct sk_buff *skb; 3351 struct ibmvnic_rx_buff *rx_buff; 3352 union sub_crq *next; 3353 u32 length; 3354 u16 offset; 3355 u8 flags = 0; 3356 3357 if (unlikely(test_bit(0, &adapter->resetting) && 3358 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 3359 enable_scrq_irq(adapter, rx_scrq); 3360 napi_complete_done(napi, frames_processed); 3361 return frames_processed; 3362 } 3363 3364 if (!pending_scrq(adapter, rx_scrq)) 3365 break; 3366 next = ibmvnic_next_scrq(adapter, rx_scrq); 3367 rx_buff = (struct ibmvnic_rx_buff *) 3368 be64_to_cpu(next->rx_comp.correlator); 3369 /* do error checking */ 3370 if (next->rx_comp.rc) { 3371 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 3372 be16_to_cpu(next->rx_comp.rc)); 3373 /* free the entry */ 3374 next->rx_comp.first = 0; 3375 dev_kfree_skb_any(rx_buff->skb); 3376 remove_buff_from_pool(adapter, rx_buff); 3377 continue; 3378 } else if (!rx_buff->skb) { 3379 /* free the entry */ 3380 next->rx_comp.first = 0; 3381 remove_buff_from_pool(adapter, rx_buff); 3382 continue; 3383 } 3384 3385 length = be32_to_cpu(next->rx_comp.len); 3386 offset = be16_to_cpu(next->rx_comp.off_frame_data); 3387 flags = next->rx_comp.flags; 3388 skb = rx_buff->skb; 3389 /* load long_term_buff before copying to skb */ 3390 dma_rmb(); 3391 skb_copy_to_linear_data(skb, rx_buff->data + offset, 3392 length); 3393 3394 /* VLAN Header has been stripped by the system firmware and 3395 * needs to be inserted by the driver 3396 */ 3397 if (adapter->rx_vlan_header_insertion && 3398 (flags & IBMVNIC_VLAN_STRIPPED)) 3399 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3400 ntohs(next->rx_comp.vlan_tci)); 3401 3402 /* free the entry */ 3403 next->rx_comp.first = 0; 3404 remove_buff_from_pool(adapter, rx_buff); 3405 3406 skb_put(skb, length); 3407 skb->protocol = eth_type_trans(skb, netdev); 3408 skb_record_rx_queue(skb, scrq_num); 3409 3410 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 3411 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 3412 skb->ip_summed = CHECKSUM_UNNECESSARY; 3413 } 3414 3415 length = skb->len; 3416 napi_gro_receive(napi, skb); /* send it up */ 3417 netdev->stats.rx_packets++; 3418 netdev->stats.rx_bytes += length; 3419 adapter->rx_stats_buffers[scrq_num].packets++; 3420 adapter->rx_stats_buffers[scrq_num].bytes += length; 3421 frames_processed++; 3422 } 3423 3424 if (adapter->state != VNIC_CLOSING && 3425 ((atomic_read(&adapter->rx_pool[scrq_num].available) < 3426 adapter->req_rx_add_entries_per_subcrq / 2) || 3427 frames_processed < budget)) 3428 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 3429 if (frames_processed < budget) { 3430 if (napi_complete_done(napi, frames_processed)) { 3431 enable_scrq_irq(adapter, rx_scrq); 3432 if (pending_scrq(adapter, rx_scrq)) { 3433 if (napi_reschedule(napi)) { 3434 disable_scrq_irq(adapter, rx_scrq); 3435 goto restart_poll; 3436 } 3437 } 3438 } 3439 } 3440 return frames_processed; 3441 } 3442 3443 static int wait_for_reset(struct ibmvnic_adapter *adapter) 3444 { 3445 int rc, ret; 3446 3447 adapter->fallback.mtu = adapter->req_mtu; 3448 adapter->fallback.rx_queues = adapter->req_rx_queues; 3449 adapter->fallback.tx_queues = adapter->req_tx_queues; 3450 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 3451 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 3452 3453 reinit_completion(&adapter->reset_done); 3454 adapter->wait_for_reset = true; 3455 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3456 3457 if (rc) { 3458 ret = rc; 3459 goto out; 3460 } 3461 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); 3462 if (rc) { 3463 ret = -ENODEV; 3464 goto out; 3465 } 3466 3467 ret = 0; 3468 if (adapter->reset_done_rc) { 3469 ret = -EIO; 3470 adapter->desired.mtu = adapter->fallback.mtu; 3471 adapter->desired.rx_queues = adapter->fallback.rx_queues; 3472 adapter->desired.tx_queues = adapter->fallback.tx_queues; 3473 adapter->desired.rx_entries = adapter->fallback.rx_entries; 3474 adapter->desired.tx_entries = adapter->fallback.tx_entries; 3475 3476 reinit_completion(&adapter->reset_done); 3477 adapter->wait_for_reset = true; 3478 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3479 if (rc) { 3480 ret = rc; 3481 goto out; 3482 } 3483 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 3484 60000); 3485 if (rc) { 3486 ret = -ENODEV; 3487 goto out; 3488 } 3489 } 3490 out: 3491 adapter->wait_for_reset = false; 3492 3493 return ret; 3494 } 3495 3496 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 3497 { 3498 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3499 3500 adapter->desired.mtu = new_mtu + ETH_HLEN; 3501 3502 return wait_for_reset(adapter); 3503 } 3504 3505 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 3506 struct net_device *dev, 3507 netdev_features_t features) 3508 { 3509 /* Some backing hardware adapters can not 3510 * handle packets with a MSS less than 224 3511 * or with only one segment. 3512 */ 3513 if (skb_is_gso(skb)) { 3514 if (skb_shinfo(skb)->gso_size < 224 || 3515 skb_shinfo(skb)->gso_segs == 1) 3516 features &= ~NETIF_F_GSO_MASK; 3517 } 3518 3519 return features; 3520 } 3521 3522 static const struct net_device_ops ibmvnic_netdev_ops = { 3523 .ndo_open = ibmvnic_open, 3524 .ndo_stop = ibmvnic_close, 3525 .ndo_start_xmit = ibmvnic_xmit, 3526 .ndo_set_rx_mode = ibmvnic_set_multi, 3527 .ndo_set_mac_address = ibmvnic_set_mac, 3528 .ndo_validate_addr = eth_validate_addr, 3529 .ndo_tx_timeout = ibmvnic_tx_timeout, 3530 .ndo_change_mtu = ibmvnic_change_mtu, 3531 .ndo_features_check = ibmvnic_features_check, 3532 }; 3533 3534 /* ethtool functions */ 3535 3536 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 3537 struct ethtool_link_ksettings *cmd) 3538 { 3539 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3540 int rc; 3541 3542 rc = send_query_phys_parms(adapter); 3543 if (rc) { 3544 adapter->speed = SPEED_UNKNOWN; 3545 adapter->duplex = DUPLEX_UNKNOWN; 3546 } 3547 cmd->base.speed = adapter->speed; 3548 cmd->base.duplex = adapter->duplex; 3549 cmd->base.port = PORT_FIBRE; 3550 cmd->base.phy_address = 0; 3551 cmd->base.autoneg = AUTONEG_ENABLE; 3552 3553 return 0; 3554 } 3555 3556 static void ibmvnic_get_drvinfo(struct net_device *netdev, 3557 struct ethtool_drvinfo *info) 3558 { 3559 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3560 3561 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 3562 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 3563 strscpy(info->fw_version, adapter->fw_version, 3564 sizeof(info->fw_version)); 3565 } 3566 3567 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 3568 { 3569 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3570 3571 return adapter->msg_enable; 3572 } 3573 3574 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 3575 { 3576 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3577 3578 adapter->msg_enable = data; 3579 } 3580 3581 static u32 ibmvnic_get_link(struct net_device *netdev) 3582 { 3583 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3584 3585 /* Don't need to send a query because we request a logical link up at 3586 * init and then we wait for link state indications 3587 */ 3588 return adapter->logical_link_state; 3589 } 3590 3591 static void ibmvnic_get_ringparam(struct net_device *netdev, 3592 struct ethtool_ringparam *ring, 3593 struct kernel_ethtool_ringparam *kernel_ring, 3594 struct netlink_ext_ack *extack) 3595 { 3596 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3597 3598 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 3599 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 3600 ring->rx_mini_max_pending = 0; 3601 ring->rx_jumbo_max_pending = 0; 3602 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 3603 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 3604 ring->rx_mini_pending = 0; 3605 ring->rx_jumbo_pending = 0; 3606 } 3607 3608 static int ibmvnic_set_ringparam(struct net_device *netdev, 3609 struct ethtool_ringparam *ring, 3610 struct kernel_ethtool_ringparam *kernel_ring, 3611 struct netlink_ext_ack *extack) 3612 { 3613 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3614 3615 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || 3616 ring->tx_pending > adapter->max_tx_entries_per_subcrq) { 3617 netdev_err(netdev, "Invalid request.\n"); 3618 netdev_err(netdev, "Max tx buffers = %llu\n", 3619 adapter->max_rx_add_entries_per_subcrq); 3620 netdev_err(netdev, "Max rx buffers = %llu\n", 3621 adapter->max_tx_entries_per_subcrq); 3622 return -EINVAL; 3623 } 3624 3625 adapter->desired.rx_entries = ring->rx_pending; 3626 adapter->desired.tx_entries = ring->tx_pending; 3627 3628 return wait_for_reset(adapter); 3629 } 3630 3631 static void ibmvnic_get_channels(struct net_device *netdev, 3632 struct ethtool_channels *channels) 3633 { 3634 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3635 3636 channels->max_rx = adapter->max_rx_queues; 3637 channels->max_tx = adapter->max_tx_queues; 3638 channels->max_other = 0; 3639 channels->max_combined = 0; 3640 channels->rx_count = adapter->req_rx_queues; 3641 channels->tx_count = adapter->req_tx_queues; 3642 channels->other_count = 0; 3643 channels->combined_count = 0; 3644 } 3645 3646 static int ibmvnic_set_channels(struct net_device *netdev, 3647 struct ethtool_channels *channels) 3648 { 3649 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3650 3651 adapter->desired.rx_queues = channels->rx_count; 3652 adapter->desired.tx_queues = channels->tx_count; 3653 3654 return wait_for_reset(adapter); 3655 } 3656 3657 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 3658 { 3659 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3660 int i; 3661 3662 if (stringset != ETH_SS_STATS) 3663 return; 3664 3665 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) 3666 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 3667 3668 for (i = 0; i < adapter->req_tx_queues; i++) { 3669 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 3670 data += ETH_GSTRING_LEN; 3671 3672 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 3673 data += ETH_GSTRING_LEN; 3674 3675 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); 3676 data += ETH_GSTRING_LEN; 3677 } 3678 3679 for (i = 0; i < adapter->req_rx_queues; i++) { 3680 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 3681 data += ETH_GSTRING_LEN; 3682 3683 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 3684 data += ETH_GSTRING_LEN; 3685 3686 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 3687 data += ETH_GSTRING_LEN; 3688 } 3689 } 3690 3691 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 3692 { 3693 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3694 3695 switch (sset) { 3696 case ETH_SS_STATS: 3697 return ARRAY_SIZE(ibmvnic_stats) + 3698 adapter->req_tx_queues * NUM_TX_STATS + 3699 adapter->req_rx_queues * NUM_RX_STATS; 3700 default: 3701 return -EOPNOTSUPP; 3702 } 3703 } 3704 3705 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 3706 struct ethtool_stats *stats, u64 *data) 3707 { 3708 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3709 union ibmvnic_crq crq; 3710 int i, j; 3711 int rc; 3712 3713 memset(&crq, 0, sizeof(crq)); 3714 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 3715 crq.request_statistics.cmd = REQUEST_STATISTICS; 3716 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 3717 crq.request_statistics.len = 3718 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 3719 3720 /* Wait for data to be written */ 3721 reinit_completion(&adapter->stats_done); 3722 rc = ibmvnic_send_crq(adapter, &crq); 3723 if (rc) 3724 return; 3725 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); 3726 if (rc) 3727 return; 3728 3729 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 3730 data[i] = be64_to_cpu(IBMVNIC_GET_STAT 3731 (adapter, ibmvnic_stats[i].offset)); 3732 3733 for (j = 0; j < adapter->req_tx_queues; j++) { 3734 data[i] = adapter->tx_stats_buffers[j].packets; 3735 i++; 3736 data[i] = adapter->tx_stats_buffers[j].bytes; 3737 i++; 3738 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 3739 i++; 3740 } 3741 3742 for (j = 0; j < adapter->req_rx_queues; j++) { 3743 data[i] = adapter->rx_stats_buffers[j].packets; 3744 i++; 3745 data[i] = adapter->rx_stats_buffers[j].bytes; 3746 i++; 3747 data[i] = adapter->rx_stats_buffers[j].interrupts; 3748 i++; 3749 } 3750 } 3751 3752 static const struct ethtool_ops ibmvnic_ethtool_ops = { 3753 .get_drvinfo = ibmvnic_get_drvinfo, 3754 .get_msglevel = ibmvnic_get_msglevel, 3755 .set_msglevel = ibmvnic_set_msglevel, 3756 .get_link = ibmvnic_get_link, 3757 .get_ringparam = ibmvnic_get_ringparam, 3758 .set_ringparam = ibmvnic_set_ringparam, 3759 .get_channels = ibmvnic_get_channels, 3760 .set_channels = ibmvnic_set_channels, 3761 .get_strings = ibmvnic_get_strings, 3762 .get_sset_count = ibmvnic_get_sset_count, 3763 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 3764 .get_link_ksettings = ibmvnic_get_link_ksettings, 3765 }; 3766 3767 /* Routines for managing CRQs/sCRQs */ 3768 3769 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 3770 struct ibmvnic_sub_crq_queue *scrq) 3771 { 3772 int rc; 3773 3774 if (!scrq) { 3775 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); 3776 return -EINVAL; 3777 } 3778 3779 if (scrq->irq) { 3780 free_irq(scrq->irq, scrq); 3781 irq_dispose_mapping(scrq->irq); 3782 scrq->irq = 0; 3783 } 3784 3785 if (scrq->msgs) { 3786 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 3787 atomic_set(&scrq->used, 0); 3788 scrq->cur = 0; 3789 scrq->ind_buf.index = 0; 3790 } else { 3791 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); 3792 return -EINVAL; 3793 } 3794 3795 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3796 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3797 return rc; 3798 } 3799 3800 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 3801 { 3802 int i, rc; 3803 3804 if (!adapter->tx_scrq || !adapter->rx_scrq) 3805 return -EINVAL; 3806 3807 ibmvnic_clean_affinity(adapter); 3808 3809 for (i = 0; i < adapter->req_tx_queues; i++) { 3810 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 3811 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 3812 if (rc) 3813 return rc; 3814 } 3815 3816 for (i = 0; i < adapter->req_rx_queues; i++) { 3817 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 3818 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 3819 if (rc) 3820 return rc; 3821 } 3822 3823 return rc; 3824 } 3825 3826 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 3827 struct ibmvnic_sub_crq_queue *scrq, 3828 bool do_h_free) 3829 { 3830 struct device *dev = &adapter->vdev->dev; 3831 long rc; 3832 3833 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 3834 3835 if (do_h_free) { 3836 /* Close the sub-crqs */ 3837 do { 3838 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3839 adapter->vdev->unit_address, 3840 scrq->crq_num); 3841 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 3842 3843 if (rc) { 3844 netdev_err(adapter->netdev, 3845 "Failed to release sub-CRQ %16lx, rc = %ld\n", 3846 scrq->crq_num, rc); 3847 } 3848 } 3849 3850 dma_free_coherent(dev, 3851 IBMVNIC_IND_ARR_SZ, 3852 scrq->ind_buf.indir_arr, 3853 scrq->ind_buf.indir_dma); 3854 3855 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3856 DMA_BIDIRECTIONAL); 3857 free_pages((unsigned long)scrq->msgs, 2); 3858 free_cpumask_var(scrq->affinity_mask); 3859 kfree(scrq); 3860 } 3861 3862 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 3863 *adapter) 3864 { 3865 struct device *dev = &adapter->vdev->dev; 3866 struct ibmvnic_sub_crq_queue *scrq; 3867 int rc; 3868 3869 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 3870 if (!scrq) 3871 return NULL; 3872 3873 scrq->msgs = 3874 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 3875 if (!scrq->msgs) { 3876 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 3877 goto zero_page_failed; 3878 } 3879 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL)) 3880 goto cpumask_alloc_failed; 3881 3882 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 3883 DMA_BIDIRECTIONAL); 3884 if (dma_mapping_error(dev, scrq->msg_token)) { 3885 dev_warn(dev, "Couldn't map crq queue messages page\n"); 3886 goto map_failed; 3887 } 3888 3889 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3890 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3891 3892 if (rc == H_RESOURCE) 3893 rc = ibmvnic_reset_crq(adapter); 3894 3895 if (rc == H_CLOSED) { 3896 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 3897 } else if (rc) { 3898 dev_warn(dev, "Error %d registering sub-crq\n", rc); 3899 goto reg_failed; 3900 } 3901 3902 scrq->adapter = adapter; 3903 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 3904 scrq->ind_buf.index = 0; 3905 3906 scrq->ind_buf.indir_arr = 3907 dma_alloc_coherent(dev, 3908 IBMVNIC_IND_ARR_SZ, 3909 &scrq->ind_buf.indir_dma, 3910 GFP_KERNEL); 3911 3912 if (!scrq->ind_buf.indir_arr) 3913 goto indir_failed; 3914 3915 spin_lock_init(&scrq->lock); 3916 3917 netdev_dbg(adapter->netdev, 3918 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 3919 scrq->crq_num, scrq->hw_irq, scrq->irq); 3920 3921 return scrq; 3922 3923 indir_failed: 3924 do { 3925 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3926 adapter->vdev->unit_address, 3927 scrq->crq_num); 3928 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); 3929 reg_failed: 3930 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3931 DMA_BIDIRECTIONAL); 3932 map_failed: 3933 free_cpumask_var(scrq->affinity_mask); 3934 cpumask_alloc_failed: 3935 free_pages((unsigned long)scrq->msgs, 2); 3936 zero_page_failed: 3937 kfree(scrq); 3938 3939 return NULL; 3940 } 3941 3942 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 3943 { 3944 int i; 3945 3946 ibmvnic_clean_affinity(adapter); 3947 if (adapter->tx_scrq) { 3948 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 3949 if (!adapter->tx_scrq[i]) 3950 continue; 3951 3952 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 3953 i); 3954 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); 3955 if (adapter->tx_scrq[i]->irq) { 3956 free_irq(adapter->tx_scrq[i]->irq, 3957 adapter->tx_scrq[i]); 3958 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 3959 adapter->tx_scrq[i]->irq = 0; 3960 } 3961 3962 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 3963 do_h_free); 3964 } 3965 3966 kfree(adapter->tx_scrq); 3967 adapter->tx_scrq = NULL; 3968 adapter->num_active_tx_scrqs = 0; 3969 } 3970 3971 if (adapter->rx_scrq) { 3972 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 3973 if (!adapter->rx_scrq[i]) 3974 continue; 3975 3976 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 3977 i); 3978 if (adapter->rx_scrq[i]->irq) { 3979 free_irq(adapter->rx_scrq[i]->irq, 3980 adapter->rx_scrq[i]); 3981 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 3982 adapter->rx_scrq[i]->irq = 0; 3983 } 3984 3985 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 3986 do_h_free); 3987 } 3988 3989 kfree(adapter->rx_scrq); 3990 adapter->rx_scrq = NULL; 3991 adapter->num_active_rx_scrqs = 0; 3992 } 3993 } 3994 3995 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 3996 struct ibmvnic_sub_crq_queue *scrq) 3997 { 3998 struct device *dev = &adapter->vdev->dev; 3999 unsigned long rc; 4000 4001 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4002 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 4003 if (rc) 4004 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 4005 scrq->hw_irq, rc); 4006 return rc; 4007 } 4008 4009 /* We can not use the IRQ chip EOI handler because that has the 4010 * unintended effect of changing the interrupt priority. 4011 */ 4012 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) 4013 { 4014 u64 val = 0xff000000 | scrq->hw_irq; 4015 unsigned long rc; 4016 4017 rc = plpar_hcall_norets(H_EOI, val); 4018 if (rc) 4019 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); 4020 } 4021 4022 /* Due to a firmware bug, the hypervisor can send an interrupt to a 4023 * transmit or receive queue just prior to a partition migration. 4024 * Force an EOI after migration. 4025 */ 4026 static void ibmvnic_clear_pending_interrupt(struct device *dev, 4027 struct ibmvnic_sub_crq_queue *scrq) 4028 { 4029 if (!xive_enabled()) 4030 ibmvnic_xics_eoi(dev, scrq); 4031 } 4032 4033 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 4034 struct ibmvnic_sub_crq_queue *scrq) 4035 { 4036 struct device *dev = &adapter->vdev->dev; 4037 unsigned long rc; 4038 4039 if (scrq->hw_irq > 0x100000000ULL) { 4040 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 4041 return 1; 4042 } 4043 4044 if (test_bit(0, &adapter->resetting) && 4045 adapter->reset_reason == VNIC_RESET_MOBILITY) { 4046 ibmvnic_clear_pending_interrupt(dev, scrq); 4047 } 4048 4049 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4050 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 4051 if (rc) 4052 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 4053 scrq->hw_irq, rc); 4054 return rc; 4055 } 4056 4057 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 4058 struct ibmvnic_sub_crq_queue *scrq) 4059 { 4060 struct device *dev = &adapter->vdev->dev; 4061 struct ibmvnic_tx_pool *tx_pool; 4062 struct ibmvnic_tx_buff *txbuff; 4063 struct netdev_queue *txq; 4064 union sub_crq *next; 4065 int index; 4066 int i; 4067 4068 restart_loop: 4069 while (pending_scrq(adapter, scrq)) { 4070 unsigned int pool = scrq->pool_index; 4071 int num_entries = 0; 4072 int total_bytes = 0; 4073 int num_packets = 0; 4074 4075 next = ibmvnic_next_scrq(adapter, scrq); 4076 for (i = 0; i < next->tx_comp.num_comps; i++) { 4077 index = be32_to_cpu(next->tx_comp.correlators[i]); 4078 if (index & IBMVNIC_TSO_POOL_MASK) { 4079 tx_pool = &adapter->tso_pool[pool]; 4080 index &= ~IBMVNIC_TSO_POOL_MASK; 4081 } else { 4082 tx_pool = &adapter->tx_pool[pool]; 4083 } 4084 4085 txbuff = &tx_pool->tx_buff[index]; 4086 num_packets++; 4087 num_entries += txbuff->num_entries; 4088 if (txbuff->skb) { 4089 total_bytes += txbuff->skb->len; 4090 if (next->tx_comp.rcs[i]) { 4091 dev_err(dev, "tx error %x\n", 4092 next->tx_comp.rcs[i]); 4093 dev_kfree_skb_irq(txbuff->skb); 4094 } else { 4095 dev_consume_skb_irq(txbuff->skb); 4096 } 4097 txbuff->skb = NULL; 4098 } else { 4099 netdev_warn(adapter->netdev, 4100 "TX completion received with NULL socket buffer\n"); 4101 } 4102 tx_pool->free_map[tx_pool->producer_index] = index; 4103 tx_pool->producer_index = 4104 (tx_pool->producer_index + 1) % 4105 tx_pool->num_buffers; 4106 } 4107 /* remove tx_comp scrq*/ 4108 next->tx_comp.first = 0; 4109 4110 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); 4111 netdev_tx_completed_queue(txq, num_packets, total_bytes); 4112 4113 if (atomic_sub_return(num_entries, &scrq->used) <= 4114 (adapter->req_tx_entries_per_subcrq / 2) && 4115 __netif_subqueue_stopped(adapter->netdev, 4116 scrq->pool_index)) { 4117 rcu_read_lock(); 4118 if (adapter->tx_queues_active) { 4119 netif_wake_subqueue(adapter->netdev, 4120 scrq->pool_index); 4121 netdev_dbg(adapter->netdev, 4122 "Started queue %d\n", 4123 scrq->pool_index); 4124 } 4125 rcu_read_unlock(); 4126 } 4127 } 4128 4129 enable_scrq_irq(adapter, scrq); 4130 4131 if (pending_scrq(adapter, scrq)) { 4132 disable_scrq_irq(adapter, scrq); 4133 goto restart_loop; 4134 } 4135 4136 return 0; 4137 } 4138 4139 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 4140 { 4141 struct ibmvnic_sub_crq_queue *scrq = instance; 4142 struct ibmvnic_adapter *adapter = scrq->adapter; 4143 4144 disable_scrq_irq(adapter, scrq); 4145 ibmvnic_complete_tx(adapter, scrq); 4146 4147 return IRQ_HANDLED; 4148 } 4149 4150 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 4151 { 4152 struct ibmvnic_sub_crq_queue *scrq = instance; 4153 struct ibmvnic_adapter *adapter = scrq->adapter; 4154 4155 /* When booting a kdump kernel we can hit pending interrupts 4156 * prior to completing driver initialization. 4157 */ 4158 if (unlikely(adapter->state != VNIC_OPEN)) 4159 return IRQ_NONE; 4160 4161 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 4162 4163 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 4164 disable_scrq_irq(adapter, scrq); 4165 __napi_schedule(&adapter->napi[scrq->scrq_num]); 4166 } 4167 4168 return IRQ_HANDLED; 4169 } 4170 4171 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 4172 { 4173 struct device *dev = &adapter->vdev->dev; 4174 struct ibmvnic_sub_crq_queue *scrq; 4175 int i = 0, j = 0; 4176 int rc = 0; 4177 4178 for (i = 0; i < adapter->req_tx_queues; i++) { 4179 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 4180 i); 4181 scrq = adapter->tx_scrq[i]; 4182 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 4183 4184 if (!scrq->irq) { 4185 rc = -EINVAL; 4186 dev_err(dev, "Error mapping irq\n"); 4187 goto req_tx_irq_failed; 4188 } 4189 4190 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", 4191 adapter->vdev->unit_address, i); 4192 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 4193 0, scrq->name, scrq); 4194 4195 if (rc) { 4196 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 4197 scrq->irq, rc); 4198 irq_dispose_mapping(scrq->irq); 4199 goto req_tx_irq_failed; 4200 } 4201 } 4202 4203 for (i = 0; i < adapter->req_rx_queues; i++) { 4204 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 4205 i); 4206 scrq = adapter->rx_scrq[i]; 4207 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 4208 if (!scrq->irq) { 4209 rc = -EINVAL; 4210 dev_err(dev, "Error mapping irq\n"); 4211 goto req_rx_irq_failed; 4212 } 4213 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", 4214 adapter->vdev->unit_address, i); 4215 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 4216 0, scrq->name, scrq); 4217 if (rc) { 4218 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 4219 scrq->irq, rc); 4220 irq_dispose_mapping(scrq->irq); 4221 goto req_rx_irq_failed; 4222 } 4223 } 4224 4225 cpus_read_lock(); 4226 ibmvnic_set_affinity(adapter); 4227 cpus_read_unlock(); 4228 4229 return rc; 4230 4231 req_rx_irq_failed: 4232 for (j = 0; j < i; j++) { 4233 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 4234 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 4235 } 4236 i = adapter->req_tx_queues; 4237 req_tx_irq_failed: 4238 for (j = 0; j < i; j++) { 4239 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 4240 irq_dispose_mapping(adapter->tx_scrq[j]->irq); 4241 } 4242 release_sub_crqs(adapter, 1); 4243 return rc; 4244 } 4245 4246 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 4247 { 4248 struct device *dev = &adapter->vdev->dev; 4249 struct ibmvnic_sub_crq_queue **allqueues; 4250 int registered_queues = 0; 4251 int total_queues; 4252 int more = 0; 4253 int i; 4254 4255 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 4256 4257 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 4258 if (!allqueues) 4259 return -ENOMEM; 4260 4261 for (i = 0; i < total_queues; i++) { 4262 allqueues[i] = init_sub_crq_queue(adapter); 4263 if (!allqueues[i]) { 4264 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 4265 break; 4266 } 4267 registered_queues++; 4268 } 4269 4270 /* Make sure we were able to register the minimum number of queues */ 4271 if (registered_queues < 4272 adapter->min_tx_queues + adapter->min_rx_queues) { 4273 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 4274 goto tx_failed; 4275 } 4276 4277 /* Distribute the failed allocated queues*/ 4278 for (i = 0; i < total_queues - registered_queues + more ; i++) { 4279 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 4280 switch (i % 3) { 4281 case 0: 4282 if (adapter->req_rx_queues > adapter->min_rx_queues) 4283 adapter->req_rx_queues--; 4284 else 4285 more++; 4286 break; 4287 case 1: 4288 if (adapter->req_tx_queues > adapter->min_tx_queues) 4289 adapter->req_tx_queues--; 4290 else 4291 more++; 4292 break; 4293 } 4294 } 4295 4296 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 4297 sizeof(*adapter->tx_scrq), GFP_KERNEL); 4298 if (!adapter->tx_scrq) 4299 goto tx_failed; 4300 4301 for (i = 0; i < adapter->req_tx_queues; i++) { 4302 adapter->tx_scrq[i] = allqueues[i]; 4303 adapter->tx_scrq[i]->pool_index = i; 4304 adapter->num_active_tx_scrqs++; 4305 } 4306 4307 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 4308 sizeof(*adapter->rx_scrq), GFP_KERNEL); 4309 if (!adapter->rx_scrq) 4310 goto rx_failed; 4311 4312 for (i = 0; i < adapter->req_rx_queues; i++) { 4313 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 4314 adapter->rx_scrq[i]->scrq_num = i; 4315 adapter->num_active_rx_scrqs++; 4316 } 4317 4318 kfree(allqueues); 4319 return 0; 4320 4321 rx_failed: 4322 kfree(adapter->tx_scrq); 4323 adapter->tx_scrq = NULL; 4324 tx_failed: 4325 for (i = 0; i < registered_queues; i++) 4326 release_sub_crq_queue(adapter, allqueues[i], 1); 4327 kfree(allqueues); 4328 return -ENOMEM; 4329 } 4330 4331 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) 4332 { 4333 struct device *dev = &adapter->vdev->dev; 4334 union ibmvnic_crq crq; 4335 int max_entries; 4336 int cap_reqs; 4337 4338 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on 4339 * the PROMISC flag). Initialize this count upfront. When the tasklet 4340 * receives a response to all of these, it will send the next protocol 4341 * message (QUERY_IP_OFFLOAD). 4342 */ 4343 if (!(adapter->netdev->flags & IFF_PROMISC) || 4344 adapter->promisc_supported) 4345 cap_reqs = 7; 4346 else 4347 cap_reqs = 6; 4348 4349 if (!retry) { 4350 /* Sub-CRQ entries are 32 byte long */ 4351 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 4352 4353 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4354 4355 if (adapter->min_tx_entries_per_subcrq > entries_page || 4356 adapter->min_rx_add_entries_per_subcrq > entries_page) { 4357 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 4358 return; 4359 } 4360 4361 if (adapter->desired.mtu) 4362 adapter->req_mtu = adapter->desired.mtu; 4363 else 4364 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 4365 4366 if (!adapter->desired.tx_entries) 4367 adapter->desired.tx_entries = 4368 adapter->max_tx_entries_per_subcrq; 4369 if (!adapter->desired.rx_entries) 4370 adapter->desired.rx_entries = 4371 adapter->max_rx_add_entries_per_subcrq; 4372 4373 max_entries = IBMVNIC_LTB_SET_SIZE / 4374 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 4375 4376 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4377 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) { 4378 adapter->desired.tx_entries = max_entries; 4379 } 4380 4381 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4382 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) { 4383 adapter->desired.rx_entries = max_entries; 4384 } 4385 4386 if (adapter->desired.tx_entries) 4387 adapter->req_tx_entries_per_subcrq = 4388 adapter->desired.tx_entries; 4389 else 4390 adapter->req_tx_entries_per_subcrq = 4391 adapter->max_tx_entries_per_subcrq; 4392 4393 if (adapter->desired.rx_entries) 4394 adapter->req_rx_add_entries_per_subcrq = 4395 adapter->desired.rx_entries; 4396 else 4397 adapter->req_rx_add_entries_per_subcrq = 4398 adapter->max_rx_add_entries_per_subcrq; 4399 4400 if (adapter->desired.tx_queues) 4401 adapter->req_tx_queues = 4402 adapter->desired.tx_queues; 4403 else 4404 adapter->req_tx_queues = 4405 adapter->opt_tx_comp_sub_queues; 4406 4407 if (adapter->desired.rx_queues) 4408 adapter->req_rx_queues = 4409 adapter->desired.rx_queues; 4410 else 4411 adapter->req_rx_queues = 4412 adapter->opt_rx_comp_queues; 4413 4414 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 4415 } else { 4416 atomic_add(cap_reqs, &adapter->running_cap_crqs); 4417 } 4418 memset(&crq, 0, sizeof(crq)); 4419 crq.request_capability.first = IBMVNIC_CRQ_CMD; 4420 crq.request_capability.cmd = REQUEST_CAPABILITY; 4421 4422 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 4423 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 4424 cap_reqs--; 4425 ibmvnic_send_crq(adapter, &crq); 4426 4427 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 4428 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 4429 cap_reqs--; 4430 ibmvnic_send_crq(adapter, &crq); 4431 4432 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 4433 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 4434 cap_reqs--; 4435 ibmvnic_send_crq(adapter, &crq); 4436 4437 crq.request_capability.capability = 4438 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 4439 crq.request_capability.number = 4440 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 4441 cap_reqs--; 4442 ibmvnic_send_crq(adapter, &crq); 4443 4444 crq.request_capability.capability = 4445 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 4446 crq.request_capability.number = 4447 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 4448 cap_reqs--; 4449 ibmvnic_send_crq(adapter, &crq); 4450 4451 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 4452 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 4453 cap_reqs--; 4454 ibmvnic_send_crq(adapter, &crq); 4455 4456 if (adapter->netdev->flags & IFF_PROMISC) { 4457 if (adapter->promisc_supported) { 4458 crq.request_capability.capability = 4459 cpu_to_be16(PROMISC_REQUESTED); 4460 crq.request_capability.number = cpu_to_be64(1); 4461 cap_reqs--; 4462 ibmvnic_send_crq(adapter, &crq); 4463 } 4464 } else { 4465 crq.request_capability.capability = 4466 cpu_to_be16(PROMISC_REQUESTED); 4467 crq.request_capability.number = cpu_to_be64(0); 4468 cap_reqs--; 4469 ibmvnic_send_crq(adapter, &crq); 4470 } 4471 4472 /* Keep at end to catch any discrepancy between expected and actual 4473 * CRQs sent. 4474 */ 4475 WARN_ON(cap_reqs != 0); 4476 } 4477 4478 static int pending_scrq(struct ibmvnic_adapter *adapter, 4479 struct ibmvnic_sub_crq_queue *scrq) 4480 { 4481 union sub_crq *entry = &scrq->msgs[scrq->cur]; 4482 int rc; 4483 4484 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); 4485 4486 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4487 * contents of the SCRQ descriptor 4488 */ 4489 dma_rmb(); 4490 4491 return rc; 4492 } 4493 4494 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 4495 struct ibmvnic_sub_crq_queue *scrq) 4496 { 4497 union sub_crq *entry; 4498 unsigned long flags; 4499 4500 spin_lock_irqsave(&scrq->lock, flags); 4501 entry = &scrq->msgs[scrq->cur]; 4502 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4503 if (++scrq->cur == scrq->size) 4504 scrq->cur = 0; 4505 } else { 4506 entry = NULL; 4507 } 4508 spin_unlock_irqrestore(&scrq->lock, flags); 4509 4510 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4511 * contents of the SCRQ descriptor 4512 */ 4513 dma_rmb(); 4514 4515 return entry; 4516 } 4517 4518 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 4519 { 4520 struct ibmvnic_crq_queue *queue = &adapter->crq; 4521 union ibmvnic_crq *crq; 4522 4523 crq = &queue->msgs[queue->cur]; 4524 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4525 if (++queue->cur == queue->size) 4526 queue->cur = 0; 4527 } else { 4528 crq = NULL; 4529 } 4530 4531 return crq; 4532 } 4533 4534 static void print_subcrq_error(struct device *dev, int rc, const char *func) 4535 { 4536 switch (rc) { 4537 case H_PARAMETER: 4538 dev_warn_ratelimited(dev, 4539 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", 4540 func, rc); 4541 break; 4542 case H_CLOSED: 4543 dev_warn_ratelimited(dev, 4544 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", 4545 func, rc); 4546 break; 4547 default: 4548 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); 4549 break; 4550 } 4551 } 4552 4553 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 4554 u64 remote_handle, u64 ioba, u64 num_entries) 4555 { 4556 unsigned int ua = adapter->vdev->unit_address; 4557 struct device *dev = &adapter->vdev->dev; 4558 int rc; 4559 4560 /* Make sure the hypervisor sees the complete request */ 4561 dma_wmb(); 4562 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 4563 cpu_to_be64(remote_handle), 4564 ioba, num_entries); 4565 4566 if (rc) 4567 print_subcrq_error(dev, rc, __func__); 4568 4569 return rc; 4570 } 4571 4572 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 4573 union ibmvnic_crq *crq) 4574 { 4575 unsigned int ua = adapter->vdev->unit_address; 4576 struct device *dev = &adapter->vdev->dev; 4577 u64 *u64_crq = (u64 *)crq; 4578 int rc; 4579 4580 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 4581 (unsigned long)cpu_to_be64(u64_crq[0]), 4582 (unsigned long)cpu_to_be64(u64_crq[1])); 4583 4584 if (!adapter->crq.active && 4585 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 4586 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 4587 return -EINVAL; 4588 } 4589 4590 /* Make sure the hypervisor sees the complete request */ 4591 dma_wmb(); 4592 4593 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 4594 cpu_to_be64(u64_crq[0]), 4595 cpu_to_be64(u64_crq[1])); 4596 4597 if (rc) { 4598 if (rc == H_CLOSED) { 4599 dev_warn(dev, "CRQ Queue closed\n"); 4600 /* do not reset, report the fail, wait for passive init from server */ 4601 } 4602 4603 dev_warn(dev, "Send error (rc=%d)\n", rc); 4604 } 4605 4606 return rc; 4607 } 4608 4609 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 4610 { 4611 struct device *dev = &adapter->vdev->dev; 4612 union ibmvnic_crq crq; 4613 int retries = 100; 4614 int rc; 4615 4616 memset(&crq, 0, sizeof(crq)); 4617 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 4618 crq.generic.cmd = IBMVNIC_CRQ_INIT; 4619 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 4620 4621 do { 4622 rc = ibmvnic_send_crq(adapter, &crq); 4623 if (rc != H_CLOSED) 4624 break; 4625 retries--; 4626 msleep(50); 4627 4628 } while (retries > 0); 4629 4630 if (rc) { 4631 dev_err(dev, "Failed to send init request, rc = %d\n", rc); 4632 return rc; 4633 } 4634 4635 return 0; 4636 } 4637 4638 struct vnic_login_client_data { 4639 u8 type; 4640 __be16 len; 4641 char name[]; 4642 } __packed; 4643 4644 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 4645 { 4646 int len; 4647 4648 /* Calculate the amount of buffer space needed for the 4649 * vnic client data in the login buffer. There are four entries, 4650 * OS name, LPAR name, device name, and a null last entry. 4651 */ 4652 len = 4 * sizeof(struct vnic_login_client_data); 4653 len += 6; /* "Linux" plus NULL */ 4654 len += strlen(utsname()->nodename) + 1; 4655 len += strlen(adapter->netdev->name) + 1; 4656 4657 return len; 4658 } 4659 4660 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 4661 struct vnic_login_client_data *vlcd) 4662 { 4663 const char *os_name = "Linux"; 4664 int len; 4665 4666 /* Type 1 - LPAR OS */ 4667 vlcd->type = 1; 4668 len = strlen(os_name) + 1; 4669 vlcd->len = cpu_to_be16(len); 4670 strscpy(vlcd->name, os_name, len); 4671 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4672 4673 /* Type 2 - LPAR name */ 4674 vlcd->type = 2; 4675 len = strlen(utsname()->nodename) + 1; 4676 vlcd->len = cpu_to_be16(len); 4677 strscpy(vlcd->name, utsname()->nodename, len); 4678 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4679 4680 /* Type 3 - device name */ 4681 vlcd->type = 3; 4682 len = strlen(adapter->netdev->name) + 1; 4683 vlcd->len = cpu_to_be16(len); 4684 strscpy(vlcd->name, adapter->netdev->name, len); 4685 } 4686 4687 static int send_login(struct ibmvnic_adapter *adapter) 4688 { 4689 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 4690 struct ibmvnic_login_buffer *login_buffer; 4691 struct device *dev = &adapter->vdev->dev; 4692 struct vnic_login_client_data *vlcd; 4693 dma_addr_t rsp_buffer_token; 4694 dma_addr_t buffer_token; 4695 size_t rsp_buffer_size; 4696 union ibmvnic_crq crq; 4697 int client_data_len; 4698 size_t buffer_size; 4699 __be64 *tx_list_p; 4700 __be64 *rx_list_p; 4701 int rc; 4702 int i; 4703 4704 if (!adapter->tx_scrq || !adapter->rx_scrq) { 4705 netdev_err(adapter->netdev, 4706 "RX or TX queues are not allocated, device login failed\n"); 4707 return -ENOMEM; 4708 } 4709 4710 release_login_buffer(adapter); 4711 release_login_rsp_buffer(adapter); 4712 4713 client_data_len = vnic_client_data_len(adapter); 4714 4715 buffer_size = 4716 sizeof(struct ibmvnic_login_buffer) + 4717 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 4718 client_data_len; 4719 4720 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 4721 if (!login_buffer) 4722 goto buf_alloc_failed; 4723 4724 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 4725 DMA_TO_DEVICE); 4726 if (dma_mapping_error(dev, buffer_token)) { 4727 dev_err(dev, "Couldn't map login buffer\n"); 4728 goto buf_map_failed; 4729 } 4730 4731 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 4732 sizeof(u64) * adapter->req_tx_queues + 4733 sizeof(u64) * adapter->req_rx_queues + 4734 sizeof(u64) * adapter->req_rx_queues + 4735 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 4736 4737 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 4738 if (!login_rsp_buffer) 4739 goto buf_rsp_alloc_failed; 4740 4741 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 4742 rsp_buffer_size, DMA_FROM_DEVICE); 4743 if (dma_mapping_error(dev, rsp_buffer_token)) { 4744 dev_err(dev, "Couldn't map login rsp buffer\n"); 4745 goto buf_rsp_map_failed; 4746 } 4747 4748 adapter->login_buf = login_buffer; 4749 adapter->login_buf_token = buffer_token; 4750 adapter->login_buf_sz = buffer_size; 4751 adapter->login_rsp_buf = login_rsp_buffer; 4752 adapter->login_rsp_buf_token = rsp_buffer_token; 4753 adapter->login_rsp_buf_sz = rsp_buffer_size; 4754 4755 login_buffer->len = cpu_to_be32(buffer_size); 4756 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 4757 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 4758 login_buffer->off_txcomp_subcrqs = 4759 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 4760 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 4761 login_buffer->off_rxcomp_subcrqs = 4762 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 4763 sizeof(u64) * adapter->req_tx_queues); 4764 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 4765 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 4766 4767 tx_list_p = (__be64 *)((char *)login_buffer + 4768 sizeof(struct ibmvnic_login_buffer)); 4769 rx_list_p = (__be64 *)((char *)login_buffer + 4770 sizeof(struct ibmvnic_login_buffer) + 4771 sizeof(u64) * adapter->req_tx_queues); 4772 4773 for (i = 0; i < adapter->req_tx_queues; i++) { 4774 if (adapter->tx_scrq[i]) { 4775 tx_list_p[i] = 4776 cpu_to_be64(adapter->tx_scrq[i]->crq_num); 4777 } 4778 } 4779 4780 for (i = 0; i < adapter->req_rx_queues; i++) { 4781 if (adapter->rx_scrq[i]) { 4782 rx_list_p[i] = 4783 cpu_to_be64(adapter->rx_scrq[i]->crq_num); 4784 } 4785 } 4786 4787 /* Insert vNIC login client data */ 4788 vlcd = (struct vnic_login_client_data *) 4789 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 4790 login_buffer->client_data_offset = 4791 cpu_to_be32((char *)vlcd - (char *)login_buffer); 4792 login_buffer->client_data_len = cpu_to_be32(client_data_len); 4793 4794 vnic_add_client_data(adapter, vlcd); 4795 4796 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 4797 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 4798 netdev_dbg(adapter->netdev, "%016lx\n", 4799 ((unsigned long *)(adapter->login_buf))[i]); 4800 } 4801 4802 memset(&crq, 0, sizeof(crq)); 4803 crq.login.first = IBMVNIC_CRQ_CMD; 4804 crq.login.cmd = LOGIN; 4805 crq.login.ioba = cpu_to_be32(buffer_token); 4806 crq.login.len = cpu_to_be32(buffer_size); 4807 4808 adapter->login_pending = true; 4809 rc = ibmvnic_send_crq(adapter, &crq); 4810 if (rc) { 4811 adapter->login_pending = false; 4812 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); 4813 goto buf_rsp_map_failed; 4814 } 4815 4816 return 0; 4817 4818 buf_rsp_map_failed: 4819 kfree(login_rsp_buffer); 4820 adapter->login_rsp_buf = NULL; 4821 buf_rsp_alloc_failed: 4822 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 4823 buf_map_failed: 4824 kfree(login_buffer); 4825 adapter->login_buf = NULL; 4826 buf_alloc_failed: 4827 return -ENOMEM; 4828 } 4829 4830 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 4831 u32 len, u8 map_id) 4832 { 4833 union ibmvnic_crq crq; 4834 4835 memset(&crq, 0, sizeof(crq)); 4836 crq.request_map.first = IBMVNIC_CRQ_CMD; 4837 crq.request_map.cmd = REQUEST_MAP; 4838 crq.request_map.map_id = map_id; 4839 crq.request_map.ioba = cpu_to_be32(addr); 4840 crq.request_map.len = cpu_to_be32(len); 4841 return ibmvnic_send_crq(adapter, &crq); 4842 } 4843 4844 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 4845 { 4846 union ibmvnic_crq crq; 4847 4848 memset(&crq, 0, sizeof(crq)); 4849 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 4850 crq.request_unmap.cmd = REQUEST_UNMAP; 4851 crq.request_unmap.map_id = map_id; 4852 return ibmvnic_send_crq(adapter, &crq); 4853 } 4854 4855 static void send_query_map(struct ibmvnic_adapter *adapter) 4856 { 4857 union ibmvnic_crq crq; 4858 4859 memset(&crq, 0, sizeof(crq)); 4860 crq.query_map.first = IBMVNIC_CRQ_CMD; 4861 crq.query_map.cmd = QUERY_MAP; 4862 ibmvnic_send_crq(adapter, &crq); 4863 } 4864 4865 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 4866 static void send_query_cap(struct ibmvnic_adapter *adapter) 4867 { 4868 union ibmvnic_crq crq; 4869 int cap_reqs; 4870 4871 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count 4872 * upfront. When the tasklet receives a response to all of these, it 4873 * can send out the next protocol messaage (REQUEST_CAPABILITY). 4874 */ 4875 cap_reqs = 25; 4876 4877 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4878 4879 memset(&crq, 0, sizeof(crq)); 4880 crq.query_capability.first = IBMVNIC_CRQ_CMD; 4881 crq.query_capability.cmd = QUERY_CAPABILITY; 4882 4883 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 4884 ibmvnic_send_crq(adapter, &crq); 4885 cap_reqs--; 4886 4887 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 4888 ibmvnic_send_crq(adapter, &crq); 4889 cap_reqs--; 4890 4891 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 4892 ibmvnic_send_crq(adapter, &crq); 4893 cap_reqs--; 4894 4895 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 4896 ibmvnic_send_crq(adapter, &crq); 4897 cap_reqs--; 4898 4899 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 4900 ibmvnic_send_crq(adapter, &crq); 4901 cap_reqs--; 4902 4903 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 4904 ibmvnic_send_crq(adapter, &crq); 4905 cap_reqs--; 4906 4907 crq.query_capability.capability = 4908 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 4909 ibmvnic_send_crq(adapter, &crq); 4910 cap_reqs--; 4911 4912 crq.query_capability.capability = 4913 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 4914 ibmvnic_send_crq(adapter, &crq); 4915 cap_reqs--; 4916 4917 crq.query_capability.capability = 4918 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 4919 ibmvnic_send_crq(adapter, &crq); 4920 cap_reqs--; 4921 4922 crq.query_capability.capability = 4923 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 4924 ibmvnic_send_crq(adapter, &crq); 4925 cap_reqs--; 4926 4927 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 4928 ibmvnic_send_crq(adapter, &crq); 4929 cap_reqs--; 4930 4931 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 4932 ibmvnic_send_crq(adapter, &crq); 4933 cap_reqs--; 4934 4935 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 4936 ibmvnic_send_crq(adapter, &crq); 4937 cap_reqs--; 4938 4939 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 4940 ibmvnic_send_crq(adapter, &crq); 4941 cap_reqs--; 4942 4943 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 4944 ibmvnic_send_crq(adapter, &crq); 4945 cap_reqs--; 4946 4947 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 4948 ibmvnic_send_crq(adapter, &crq); 4949 cap_reqs--; 4950 4951 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 4952 ibmvnic_send_crq(adapter, &crq); 4953 cap_reqs--; 4954 4955 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 4956 ibmvnic_send_crq(adapter, &crq); 4957 cap_reqs--; 4958 4959 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 4960 ibmvnic_send_crq(adapter, &crq); 4961 cap_reqs--; 4962 4963 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 4964 ibmvnic_send_crq(adapter, &crq); 4965 cap_reqs--; 4966 4967 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 4968 ibmvnic_send_crq(adapter, &crq); 4969 cap_reqs--; 4970 4971 crq.query_capability.capability = 4972 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 4973 ibmvnic_send_crq(adapter, &crq); 4974 cap_reqs--; 4975 4976 crq.query_capability.capability = 4977 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 4978 ibmvnic_send_crq(adapter, &crq); 4979 cap_reqs--; 4980 4981 crq.query_capability.capability = 4982 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 4983 ibmvnic_send_crq(adapter, &crq); 4984 cap_reqs--; 4985 4986 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 4987 4988 ibmvnic_send_crq(adapter, &crq); 4989 cap_reqs--; 4990 4991 /* Keep at end to catch any discrepancy between expected and actual 4992 * CRQs sent. 4993 */ 4994 WARN_ON(cap_reqs != 0); 4995 } 4996 4997 static void send_query_ip_offload(struct ibmvnic_adapter *adapter) 4998 { 4999 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 5000 struct device *dev = &adapter->vdev->dev; 5001 union ibmvnic_crq crq; 5002 5003 adapter->ip_offload_tok = 5004 dma_map_single(dev, 5005 &adapter->ip_offload_buf, 5006 buf_sz, 5007 DMA_FROM_DEVICE); 5008 5009 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 5010 if (!firmware_has_feature(FW_FEATURE_CMO)) 5011 dev_err(dev, "Couldn't map offload buffer\n"); 5012 return; 5013 } 5014 5015 memset(&crq, 0, sizeof(crq)); 5016 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 5017 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 5018 crq.query_ip_offload.len = cpu_to_be32(buf_sz); 5019 crq.query_ip_offload.ioba = 5020 cpu_to_be32(adapter->ip_offload_tok); 5021 5022 ibmvnic_send_crq(adapter, &crq); 5023 } 5024 5025 static void send_control_ip_offload(struct ibmvnic_adapter *adapter) 5026 { 5027 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; 5028 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 5029 struct device *dev = &adapter->vdev->dev; 5030 netdev_features_t old_hw_features = 0; 5031 union ibmvnic_crq crq; 5032 5033 adapter->ip_offload_ctrl_tok = 5034 dma_map_single(dev, 5035 ctrl_buf, 5036 sizeof(adapter->ip_offload_ctrl), 5037 DMA_TO_DEVICE); 5038 5039 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 5040 dev_err(dev, "Couldn't map ip offload control buffer\n"); 5041 return; 5042 } 5043 5044 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 5045 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); 5046 ctrl_buf->ipv4_chksum = buf->ipv4_chksum; 5047 ctrl_buf->ipv6_chksum = buf->ipv6_chksum; 5048 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 5049 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; 5050 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 5051 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; 5052 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; 5053 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; 5054 5055 /* large_rx disabled for now, additional features needed */ 5056 ctrl_buf->large_rx_ipv4 = 0; 5057 ctrl_buf->large_rx_ipv6 = 0; 5058 5059 if (adapter->state != VNIC_PROBING) { 5060 old_hw_features = adapter->netdev->hw_features; 5061 adapter->netdev->hw_features = 0; 5062 } 5063 5064 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 5065 5066 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 5067 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; 5068 5069 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 5070 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; 5071 5072 if ((adapter->netdev->features & 5073 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 5074 adapter->netdev->hw_features |= NETIF_F_RXCSUM; 5075 5076 if (buf->large_tx_ipv4) 5077 adapter->netdev->hw_features |= NETIF_F_TSO; 5078 if (buf->large_tx_ipv6) 5079 adapter->netdev->hw_features |= NETIF_F_TSO6; 5080 5081 if (adapter->state == VNIC_PROBING) { 5082 adapter->netdev->features |= adapter->netdev->hw_features; 5083 } else if (old_hw_features != adapter->netdev->hw_features) { 5084 netdev_features_t tmp = 0; 5085 5086 /* disable features no longer supported */ 5087 adapter->netdev->features &= adapter->netdev->hw_features; 5088 /* turn on features now supported if previously enabled */ 5089 tmp = (old_hw_features ^ adapter->netdev->hw_features) & 5090 adapter->netdev->hw_features; 5091 adapter->netdev->features |= 5092 tmp & adapter->netdev->wanted_features; 5093 } 5094 5095 memset(&crq, 0, sizeof(crq)); 5096 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 5097 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 5098 crq.control_ip_offload.len = 5099 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 5100 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 5101 ibmvnic_send_crq(adapter, &crq); 5102 } 5103 5104 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 5105 struct ibmvnic_adapter *adapter) 5106 { 5107 struct device *dev = &adapter->vdev->dev; 5108 5109 if (crq->get_vpd_size_rsp.rc.code) { 5110 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 5111 crq->get_vpd_size_rsp.rc.code); 5112 complete(&adapter->fw_done); 5113 return; 5114 } 5115 5116 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 5117 complete(&adapter->fw_done); 5118 } 5119 5120 static void handle_vpd_rsp(union ibmvnic_crq *crq, 5121 struct ibmvnic_adapter *adapter) 5122 { 5123 struct device *dev = &adapter->vdev->dev; 5124 unsigned char *substr = NULL; 5125 u8 fw_level_len = 0; 5126 5127 memset(adapter->fw_version, 0, 32); 5128 5129 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 5130 DMA_FROM_DEVICE); 5131 5132 if (crq->get_vpd_rsp.rc.code) { 5133 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 5134 crq->get_vpd_rsp.rc.code); 5135 goto complete; 5136 } 5137 5138 /* get the position of the firmware version info 5139 * located after the ASCII 'RM' substring in the buffer 5140 */ 5141 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 5142 if (!substr) { 5143 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 5144 goto complete; 5145 } 5146 5147 /* get length of firmware level ASCII substring */ 5148 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 5149 fw_level_len = *(substr + 2); 5150 } else { 5151 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 5152 goto complete; 5153 } 5154 5155 /* copy firmware version string from vpd into adapter */ 5156 if ((substr + 3 + fw_level_len) < 5157 (adapter->vpd->buff + adapter->vpd->len)) { 5158 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 5159 } else { 5160 dev_info(dev, "FW substr extrapolated VPD buff\n"); 5161 } 5162 5163 complete: 5164 if (adapter->fw_version[0] == '\0') 5165 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); 5166 complete(&adapter->fw_done); 5167 } 5168 5169 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 5170 { 5171 struct device *dev = &adapter->vdev->dev; 5172 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 5173 int i; 5174 5175 dma_unmap_single(dev, adapter->ip_offload_tok, 5176 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 5177 5178 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 5179 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 5180 netdev_dbg(adapter->netdev, "%016lx\n", 5181 ((unsigned long *)(buf))[i]); 5182 5183 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 5184 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 5185 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 5186 buf->tcp_ipv4_chksum); 5187 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 5188 buf->tcp_ipv6_chksum); 5189 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 5190 buf->udp_ipv4_chksum); 5191 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 5192 buf->udp_ipv6_chksum); 5193 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 5194 buf->large_tx_ipv4); 5195 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 5196 buf->large_tx_ipv6); 5197 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 5198 buf->large_rx_ipv4); 5199 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 5200 buf->large_rx_ipv6); 5201 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 5202 buf->max_ipv4_header_size); 5203 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 5204 buf->max_ipv6_header_size); 5205 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 5206 buf->max_tcp_header_size); 5207 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 5208 buf->max_udp_header_size); 5209 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 5210 buf->max_large_tx_size); 5211 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 5212 buf->max_large_rx_size); 5213 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 5214 buf->ipv6_extension_header); 5215 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 5216 buf->tcp_pseudosum_req); 5217 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 5218 buf->num_ipv6_ext_headers); 5219 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 5220 buf->off_ipv6_ext_headers); 5221 5222 send_control_ip_offload(adapter); 5223 } 5224 5225 static const char *ibmvnic_fw_err_cause(u16 cause) 5226 { 5227 switch (cause) { 5228 case ADAPTER_PROBLEM: 5229 return "adapter problem"; 5230 case BUS_PROBLEM: 5231 return "bus problem"; 5232 case FW_PROBLEM: 5233 return "firmware problem"; 5234 case DD_PROBLEM: 5235 return "device driver problem"; 5236 case EEH_RECOVERY: 5237 return "EEH recovery"; 5238 case FW_UPDATED: 5239 return "firmware updated"; 5240 case LOW_MEMORY: 5241 return "low Memory"; 5242 default: 5243 return "unknown"; 5244 } 5245 } 5246 5247 static void handle_error_indication(union ibmvnic_crq *crq, 5248 struct ibmvnic_adapter *adapter) 5249 { 5250 struct device *dev = &adapter->vdev->dev; 5251 u16 cause; 5252 5253 cause = be16_to_cpu(crq->error_indication.error_cause); 5254 5255 dev_warn_ratelimited(dev, 5256 "Firmware reports %serror, cause: %s. Starting recovery...\n", 5257 crq->error_indication.flags 5258 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 5259 ibmvnic_fw_err_cause(cause)); 5260 5261 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 5262 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5263 else 5264 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 5265 } 5266 5267 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 5268 struct ibmvnic_adapter *adapter) 5269 { 5270 struct net_device *netdev = adapter->netdev; 5271 struct device *dev = &adapter->vdev->dev; 5272 long rc; 5273 5274 rc = crq->change_mac_addr_rsp.rc.code; 5275 if (rc) { 5276 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 5277 goto out; 5278 } 5279 /* crq->change_mac_addr.mac_addr is the requested one 5280 * crq->change_mac_addr_rsp.mac_addr is the returned valid one. 5281 */ 5282 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); 5283 ether_addr_copy(adapter->mac_addr, 5284 &crq->change_mac_addr_rsp.mac_addr[0]); 5285 out: 5286 complete(&adapter->fw_done); 5287 return rc; 5288 } 5289 5290 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 5291 struct ibmvnic_adapter *adapter) 5292 { 5293 struct device *dev = &adapter->vdev->dev; 5294 u64 *req_value; 5295 char *name; 5296 5297 atomic_dec(&adapter->running_cap_crqs); 5298 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", 5299 atomic_read(&adapter->running_cap_crqs)); 5300 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 5301 case REQ_TX_QUEUES: 5302 req_value = &adapter->req_tx_queues; 5303 name = "tx"; 5304 break; 5305 case REQ_RX_QUEUES: 5306 req_value = &adapter->req_rx_queues; 5307 name = "rx"; 5308 break; 5309 case REQ_RX_ADD_QUEUES: 5310 req_value = &adapter->req_rx_add_queues; 5311 name = "rx_add"; 5312 break; 5313 case REQ_TX_ENTRIES_PER_SUBCRQ: 5314 req_value = &adapter->req_tx_entries_per_subcrq; 5315 name = "tx_entries_per_subcrq"; 5316 break; 5317 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 5318 req_value = &adapter->req_rx_add_entries_per_subcrq; 5319 name = "rx_add_entries_per_subcrq"; 5320 break; 5321 case REQ_MTU: 5322 req_value = &adapter->req_mtu; 5323 name = "mtu"; 5324 break; 5325 case PROMISC_REQUESTED: 5326 req_value = &adapter->promisc; 5327 name = "promisc"; 5328 break; 5329 default: 5330 dev_err(dev, "Got invalid cap request rsp %d\n", 5331 crq->request_capability.capability); 5332 return; 5333 } 5334 5335 switch (crq->request_capability_rsp.rc.code) { 5336 case SUCCESS: 5337 break; 5338 case PARTIALSUCCESS: 5339 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 5340 *req_value, 5341 (long)be64_to_cpu(crq->request_capability_rsp.number), 5342 name); 5343 5344 if (be16_to_cpu(crq->request_capability_rsp.capability) == 5345 REQ_MTU) { 5346 pr_err("mtu of %llu is not supported. Reverting.\n", 5347 *req_value); 5348 *req_value = adapter->fallback.mtu; 5349 } else { 5350 *req_value = 5351 be64_to_cpu(crq->request_capability_rsp.number); 5352 } 5353 5354 send_request_cap(adapter, 1); 5355 return; 5356 default: 5357 dev_err(dev, "Error %d in request cap rsp\n", 5358 crq->request_capability_rsp.rc.code); 5359 return; 5360 } 5361 5362 /* Done receiving requested capabilities, query IP offload support */ 5363 if (atomic_read(&adapter->running_cap_crqs) == 0) 5364 send_query_ip_offload(adapter); 5365 } 5366 5367 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 5368 struct ibmvnic_adapter *adapter) 5369 { 5370 struct device *dev = &adapter->vdev->dev; 5371 struct net_device *netdev = adapter->netdev; 5372 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 5373 struct ibmvnic_login_buffer *login = adapter->login_buf; 5374 u64 *tx_handle_array; 5375 u64 *rx_handle_array; 5376 int num_tx_pools; 5377 int num_rx_pools; 5378 u64 *size_array; 5379 int i; 5380 5381 /* CHECK: Test/set of login_pending does not need to be atomic 5382 * because only ibmvnic_tasklet tests/clears this. 5383 */ 5384 if (!adapter->login_pending) { 5385 netdev_warn(netdev, "Ignoring unexpected login response\n"); 5386 return 0; 5387 } 5388 adapter->login_pending = false; 5389 5390 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 5391 DMA_TO_DEVICE); 5392 dma_unmap_single(dev, adapter->login_rsp_buf_token, 5393 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 5394 5395 /* If the number of queues requested can't be allocated by the 5396 * server, the login response will return with code 1. We will need 5397 * to resend the login buffer with fewer queues requested. 5398 */ 5399 if (login_rsp_crq->generic.rc.code) { 5400 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 5401 complete(&adapter->init_done); 5402 return 0; 5403 } 5404 5405 if (adapter->failover_pending) { 5406 adapter->init_done_rc = -EAGAIN; 5407 netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 5408 complete(&adapter->init_done); 5409 /* login response buffer will be released on reset */ 5410 return 0; 5411 } 5412 5413 netdev->mtu = adapter->req_mtu - ETH_HLEN; 5414 5415 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 5416 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 5417 netdev_dbg(adapter->netdev, "%016lx\n", 5418 ((unsigned long *)(adapter->login_rsp_buf))[i]); 5419 } 5420 5421 /* Sanity checks */ 5422 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 5423 (be32_to_cpu(login->num_rxcomp_subcrqs) * 5424 adapter->req_rx_add_queues != 5425 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 5426 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 5427 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5428 return -EIO; 5429 } 5430 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5431 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 5432 /* variable buffer sizes are not supported, so just read the 5433 * first entry. 5434 */ 5435 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); 5436 5437 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 5438 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 5439 5440 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5441 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 5442 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5443 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); 5444 5445 for (i = 0; i < num_tx_pools; i++) 5446 adapter->tx_scrq[i]->handle = tx_handle_array[i]; 5447 5448 for (i = 0; i < num_rx_pools; i++) 5449 adapter->rx_scrq[i]->handle = rx_handle_array[i]; 5450 5451 adapter->num_active_tx_scrqs = num_tx_pools; 5452 adapter->num_active_rx_scrqs = num_rx_pools; 5453 release_login_rsp_buffer(adapter); 5454 release_login_buffer(adapter); 5455 complete(&adapter->init_done); 5456 5457 return 0; 5458 } 5459 5460 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 5461 struct ibmvnic_adapter *adapter) 5462 { 5463 struct device *dev = &adapter->vdev->dev; 5464 long rc; 5465 5466 rc = crq->request_unmap_rsp.rc.code; 5467 if (rc) 5468 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 5469 } 5470 5471 static void handle_query_map_rsp(union ibmvnic_crq *crq, 5472 struct ibmvnic_adapter *adapter) 5473 { 5474 struct net_device *netdev = adapter->netdev; 5475 struct device *dev = &adapter->vdev->dev; 5476 long rc; 5477 5478 rc = crq->query_map_rsp.rc.code; 5479 if (rc) { 5480 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 5481 return; 5482 } 5483 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", 5484 crq->query_map_rsp.page_size, 5485 __be32_to_cpu(crq->query_map_rsp.tot_pages), 5486 __be32_to_cpu(crq->query_map_rsp.free_pages)); 5487 } 5488 5489 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 5490 struct ibmvnic_adapter *adapter) 5491 { 5492 struct net_device *netdev = adapter->netdev; 5493 struct device *dev = &adapter->vdev->dev; 5494 long rc; 5495 5496 atomic_dec(&adapter->running_cap_crqs); 5497 netdev_dbg(netdev, "Outstanding queries: %d\n", 5498 atomic_read(&adapter->running_cap_crqs)); 5499 rc = crq->query_capability.rc.code; 5500 if (rc) { 5501 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 5502 goto out; 5503 } 5504 5505 switch (be16_to_cpu(crq->query_capability.capability)) { 5506 case MIN_TX_QUEUES: 5507 adapter->min_tx_queues = 5508 be64_to_cpu(crq->query_capability.number); 5509 netdev_dbg(netdev, "min_tx_queues = %lld\n", 5510 adapter->min_tx_queues); 5511 break; 5512 case MIN_RX_QUEUES: 5513 adapter->min_rx_queues = 5514 be64_to_cpu(crq->query_capability.number); 5515 netdev_dbg(netdev, "min_rx_queues = %lld\n", 5516 adapter->min_rx_queues); 5517 break; 5518 case MIN_RX_ADD_QUEUES: 5519 adapter->min_rx_add_queues = 5520 be64_to_cpu(crq->query_capability.number); 5521 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 5522 adapter->min_rx_add_queues); 5523 break; 5524 case MAX_TX_QUEUES: 5525 adapter->max_tx_queues = 5526 be64_to_cpu(crq->query_capability.number); 5527 netdev_dbg(netdev, "max_tx_queues = %lld\n", 5528 adapter->max_tx_queues); 5529 break; 5530 case MAX_RX_QUEUES: 5531 adapter->max_rx_queues = 5532 be64_to_cpu(crq->query_capability.number); 5533 netdev_dbg(netdev, "max_rx_queues = %lld\n", 5534 adapter->max_rx_queues); 5535 break; 5536 case MAX_RX_ADD_QUEUES: 5537 adapter->max_rx_add_queues = 5538 be64_to_cpu(crq->query_capability.number); 5539 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 5540 adapter->max_rx_add_queues); 5541 break; 5542 case MIN_TX_ENTRIES_PER_SUBCRQ: 5543 adapter->min_tx_entries_per_subcrq = 5544 be64_to_cpu(crq->query_capability.number); 5545 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 5546 adapter->min_tx_entries_per_subcrq); 5547 break; 5548 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 5549 adapter->min_rx_add_entries_per_subcrq = 5550 be64_to_cpu(crq->query_capability.number); 5551 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 5552 adapter->min_rx_add_entries_per_subcrq); 5553 break; 5554 case MAX_TX_ENTRIES_PER_SUBCRQ: 5555 adapter->max_tx_entries_per_subcrq = 5556 be64_to_cpu(crq->query_capability.number); 5557 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 5558 adapter->max_tx_entries_per_subcrq); 5559 break; 5560 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 5561 adapter->max_rx_add_entries_per_subcrq = 5562 be64_to_cpu(crq->query_capability.number); 5563 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 5564 adapter->max_rx_add_entries_per_subcrq); 5565 break; 5566 case TCP_IP_OFFLOAD: 5567 adapter->tcp_ip_offload = 5568 be64_to_cpu(crq->query_capability.number); 5569 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 5570 adapter->tcp_ip_offload); 5571 break; 5572 case PROMISC_SUPPORTED: 5573 adapter->promisc_supported = 5574 be64_to_cpu(crq->query_capability.number); 5575 netdev_dbg(netdev, "promisc_supported = %lld\n", 5576 adapter->promisc_supported); 5577 break; 5578 case MIN_MTU: 5579 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 5580 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 5581 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 5582 break; 5583 case MAX_MTU: 5584 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 5585 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 5586 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 5587 break; 5588 case MAX_MULTICAST_FILTERS: 5589 adapter->max_multicast_filters = 5590 be64_to_cpu(crq->query_capability.number); 5591 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 5592 adapter->max_multicast_filters); 5593 break; 5594 case VLAN_HEADER_INSERTION: 5595 adapter->vlan_header_insertion = 5596 be64_to_cpu(crq->query_capability.number); 5597 if (adapter->vlan_header_insertion) 5598 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 5599 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 5600 adapter->vlan_header_insertion); 5601 break; 5602 case RX_VLAN_HEADER_INSERTION: 5603 adapter->rx_vlan_header_insertion = 5604 be64_to_cpu(crq->query_capability.number); 5605 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 5606 adapter->rx_vlan_header_insertion); 5607 break; 5608 case MAX_TX_SG_ENTRIES: 5609 adapter->max_tx_sg_entries = 5610 be64_to_cpu(crq->query_capability.number); 5611 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 5612 adapter->max_tx_sg_entries); 5613 break; 5614 case RX_SG_SUPPORTED: 5615 adapter->rx_sg_supported = 5616 be64_to_cpu(crq->query_capability.number); 5617 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 5618 adapter->rx_sg_supported); 5619 break; 5620 case OPT_TX_COMP_SUB_QUEUES: 5621 adapter->opt_tx_comp_sub_queues = 5622 be64_to_cpu(crq->query_capability.number); 5623 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 5624 adapter->opt_tx_comp_sub_queues); 5625 break; 5626 case OPT_RX_COMP_QUEUES: 5627 adapter->opt_rx_comp_queues = 5628 be64_to_cpu(crq->query_capability.number); 5629 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 5630 adapter->opt_rx_comp_queues); 5631 break; 5632 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 5633 adapter->opt_rx_bufadd_q_per_rx_comp_q = 5634 be64_to_cpu(crq->query_capability.number); 5635 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 5636 adapter->opt_rx_bufadd_q_per_rx_comp_q); 5637 break; 5638 case OPT_TX_ENTRIES_PER_SUBCRQ: 5639 adapter->opt_tx_entries_per_subcrq = 5640 be64_to_cpu(crq->query_capability.number); 5641 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 5642 adapter->opt_tx_entries_per_subcrq); 5643 break; 5644 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 5645 adapter->opt_rxba_entries_per_subcrq = 5646 be64_to_cpu(crq->query_capability.number); 5647 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 5648 adapter->opt_rxba_entries_per_subcrq); 5649 break; 5650 case TX_RX_DESC_REQ: 5651 adapter->tx_rx_desc_req = crq->query_capability.number; 5652 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 5653 adapter->tx_rx_desc_req); 5654 break; 5655 5656 default: 5657 netdev_err(netdev, "Got invalid cap rsp %d\n", 5658 crq->query_capability.capability); 5659 } 5660 5661 out: 5662 if (atomic_read(&adapter->running_cap_crqs) == 0) 5663 send_request_cap(adapter, 0); 5664 } 5665 5666 static int send_query_phys_parms(struct ibmvnic_adapter *adapter) 5667 { 5668 union ibmvnic_crq crq; 5669 int rc; 5670 5671 memset(&crq, 0, sizeof(crq)); 5672 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; 5673 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; 5674 5675 mutex_lock(&adapter->fw_lock); 5676 adapter->fw_done_rc = 0; 5677 reinit_completion(&adapter->fw_done); 5678 5679 rc = ibmvnic_send_crq(adapter, &crq); 5680 if (rc) { 5681 mutex_unlock(&adapter->fw_lock); 5682 return rc; 5683 } 5684 5685 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 5686 if (rc) { 5687 mutex_unlock(&adapter->fw_lock); 5688 return rc; 5689 } 5690 5691 mutex_unlock(&adapter->fw_lock); 5692 return adapter->fw_done_rc ? -EIO : 0; 5693 } 5694 5695 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, 5696 struct ibmvnic_adapter *adapter) 5697 { 5698 struct net_device *netdev = adapter->netdev; 5699 int rc; 5700 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); 5701 5702 rc = crq->query_phys_parms_rsp.rc.code; 5703 if (rc) { 5704 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); 5705 return rc; 5706 } 5707 switch (rspeed) { 5708 case IBMVNIC_10MBPS: 5709 adapter->speed = SPEED_10; 5710 break; 5711 case IBMVNIC_100MBPS: 5712 adapter->speed = SPEED_100; 5713 break; 5714 case IBMVNIC_1GBPS: 5715 adapter->speed = SPEED_1000; 5716 break; 5717 case IBMVNIC_10GBPS: 5718 adapter->speed = SPEED_10000; 5719 break; 5720 case IBMVNIC_25GBPS: 5721 adapter->speed = SPEED_25000; 5722 break; 5723 case IBMVNIC_40GBPS: 5724 adapter->speed = SPEED_40000; 5725 break; 5726 case IBMVNIC_50GBPS: 5727 adapter->speed = SPEED_50000; 5728 break; 5729 case IBMVNIC_100GBPS: 5730 adapter->speed = SPEED_100000; 5731 break; 5732 case IBMVNIC_200GBPS: 5733 adapter->speed = SPEED_200000; 5734 break; 5735 default: 5736 if (netif_carrier_ok(netdev)) 5737 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); 5738 adapter->speed = SPEED_UNKNOWN; 5739 } 5740 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) 5741 adapter->duplex = DUPLEX_FULL; 5742 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) 5743 adapter->duplex = DUPLEX_HALF; 5744 else 5745 adapter->duplex = DUPLEX_UNKNOWN; 5746 5747 return rc; 5748 } 5749 5750 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 5751 struct ibmvnic_adapter *adapter) 5752 { 5753 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 5754 struct net_device *netdev = adapter->netdev; 5755 struct device *dev = &adapter->vdev->dev; 5756 u64 *u64_crq = (u64 *)crq; 5757 long rc; 5758 5759 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 5760 (unsigned long)cpu_to_be64(u64_crq[0]), 5761 (unsigned long)cpu_to_be64(u64_crq[1])); 5762 switch (gen_crq->first) { 5763 case IBMVNIC_CRQ_INIT_RSP: 5764 switch (gen_crq->cmd) { 5765 case IBMVNIC_CRQ_INIT: 5766 dev_info(dev, "Partner initialized\n"); 5767 adapter->from_passive_init = true; 5768 /* Discard any stale login responses from prev reset. 5769 * CHECK: should we clear even on INIT_COMPLETE? 5770 */ 5771 adapter->login_pending = false; 5772 5773 if (adapter->state == VNIC_DOWN) 5774 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); 5775 else 5776 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 5777 5778 if (rc && rc != -EBUSY) { 5779 /* We were unable to schedule the failover 5780 * reset either because the adapter was still 5781 * probing (eg: during kexec) or we could not 5782 * allocate memory. Clear the failover_pending 5783 * flag since no one else will. We ignore 5784 * EBUSY because it means either FAILOVER reset 5785 * is already scheduled or the adapter is 5786 * being removed. 5787 */ 5788 netdev_err(netdev, 5789 "Error %ld scheduling failover reset\n", 5790 rc); 5791 adapter->failover_pending = false; 5792 } 5793 5794 if (!completion_done(&adapter->init_done)) { 5795 if (!adapter->init_done_rc) 5796 adapter->init_done_rc = -EAGAIN; 5797 complete(&adapter->init_done); 5798 } 5799 5800 break; 5801 case IBMVNIC_CRQ_INIT_COMPLETE: 5802 dev_info(dev, "Partner initialization complete\n"); 5803 adapter->crq.active = true; 5804 send_version_xchg(adapter); 5805 break; 5806 default: 5807 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 5808 } 5809 return; 5810 case IBMVNIC_CRQ_XPORT_EVENT: 5811 netif_carrier_off(netdev); 5812 adapter->crq.active = false; 5813 /* terminate any thread waiting for a response 5814 * from the device 5815 */ 5816 if (!completion_done(&adapter->fw_done)) { 5817 adapter->fw_done_rc = -EIO; 5818 complete(&adapter->fw_done); 5819 } 5820 5821 /* if we got here during crq-init, retry crq-init */ 5822 if (!completion_done(&adapter->init_done)) { 5823 adapter->init_done_rc = -EAGAIN; 5824 complete(&adapter->init_done); 5825 } 5826 5827 if (!completion_done(&adapter->stats_done)) 5828 complete(&adapter->stats_done); 5829 if (test_bit(0, &adapter->resetting)) 5830 adapter->force_reset_recovery = true; 5831 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 5832 dev_info(dev, "Migrated, re-enabling adapter\n"); 5833 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 5834 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 5835 dev_info(dev, "Backing device failover detected\n"); 5836 adapter->failover_pending = true; 5837 } else { 5838 /* The adapter lost the connection */ 5839 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 5840 gen_crq->cmd); 5841 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5842 } 5843 return; 5844 case IBMVNIC_CRQ_CMD_RSP: 5845 break; 5846 default: 5847 dev_err(dev, "Got an invalid msg type 0x%02x\n", 5848 gen_crq->first); 5849 return; 5850 } 5851 5852 switch (gen_crq->cmd) { 5853 case VERSION_EXCHANGE_RSP: 5854 rc = crq->version_exchange_rsp.rc.code; 5855 if (rc) { 5856 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 5857 break; 5858 } 5859 ibmvnic_version = 5860 be16_to_cpu(crq->version_exchange_rsp.version); 5861 dev_info(dev, "Partner protocol version is %d\n", 5862 ibmvnic_version); 5863 send_query_cap(adapter); 5864 break; 5865 case QUERY_CAPABILITY_RSP: 5866 handle_query_cap_rsp(crq, adapter); 5867 break; 5868 case QUERY_MAP_RSP: 5869 handle_query_map_rsp(crq, adapter); 5870 break; 5871 case REQUEST_MAP_RSP: 5872 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 5873 complete(&adapter->fw_done); 5874 break; 5875 case REQUEST_UNMAP_RSP: 5876 handle_request_unmap_rsp(crq, adapter); 5877 break; 5878 case REQUEST_CAPABILITY_RSP: 5879 handle_request_cap_rsp(crq, adapter); 5880 break; 5881 case LOGIN_RSP: 5882 netdev_dbg(netdev, "Got Login Response\n"); 5883 handle_login_rsp(crq, adapter); 5884 break; 5885 case LOGICAL_LINK_STATE_RSP: 5886 netdev_dbg(netdev, 5887 "Got Logical Link State Response, state: %d rc: %d\n", 5888 crq->logical_link_state_rsp.link_state, 5889 crq->logical_link_state_rsp.rc.code); 5890 adapter->logical_link_state = 5891 crq->logical_link_state_rsp.link_state; 5892 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 5893 complete(&adapter->init_done); 5894 break; 5895 case LINK_STATE_INDICATION: 5896 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 5897 adapter->phys_link_state = 5898 crq->link_state_indication.phys_link_state; 5899 adapter->logical_link_state = 5900 crq->link_state_indication.logical_link_state; 5901 if (adapter->phys_link_state && adapter->logical_link_state) 5902 netif_carrier_on(netdev); 5903 else 5904 netif_carrier_off(netdev); 5905 break; 5906 case CHANGE_MAC_ADDR_RSP: 5907 netdev_dbg(netdev, "Got MAC address change Response\n"); 5908 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 5909 break; 5910 case ERROR_INDICATION: 5911 netdev_dbg(netdev, "Got Error Indication\n"); 5912 handle_error_indication(crq, adapter); 5913 break; 5914 case REQUEST_STATISTICS_RSP: 5915 netdev_dbg(netdev, "Got Statistics Response\n"); 5916 complete(&adapter->stats_done); 5917 break; 5918 case QUERY_IP_OFFLOAD_RSP: 5919 netdev_dbg(netdev, "Got Query IP offload Response\n"); 5920 handle_query_ip_offload_rsp(adapter); 5921 break; 5922 case MULTICAST_CTRL_RSP: 5923 netdev_dbg(netdev, "Got multicast control Response\n"); 5924 break; 5925 case CONTROL_IP_OFFLOAD_RSP: 5926 netdev_dbg(netdev, "Got Control IP offload Response\n"); 5927 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 5928 sizeof(adapter->ip_offload_ctrl), 5929 DMA_TO_DEVICE); 5930 complete(&adapter->init_done); 5931 break; 5932 case COLLECT_FW_TRACE_RSP: 5933 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 5934 complete(&adapter->fw_done); 5935 break; 5936 case GET_VPD_SIZE_RSP: 5937 handle_vpd_size_rsp(crq, adapter); 5938 break; 5939 case GET_VPD_RSP: 5940 handle_vpd_rsp(crq, adapter); 5941 break; 5942 case QUERY_PHYS_PARMS_RSP: 5943 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); 5944 complete(&adapter->fw_done); 5945 break; 5946 default: 5947 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 5948 gen_crq->cmd); 5949 } 5950 } 5951 5952 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 5953 { 5954 struct ibmvnic_adapter *adapter = instance; 5955 5956 tasklet_schedule(&adapter->tasklet); 5957 return IRQ_HANDLED; 5958 } 5959 5960 static void ibmvnic_tasklet(struct tasklet_struct *t) 5961 { 5962 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); 5963 struct ibmvnic_crq_queue *queue = &adapter->crq; 5964 union ibmvnic_crq *crq; 5965 unsigned long flags; 5966 5967 spin_lock_irqsave(&queue->lock, flags); 5968 5969 /* Pull all the valid messages off the CRQ */ 5970 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 5971 /* This barrier makes sure ibmvnic_next_crq()'s 5972 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded 5973 * before ibmvnic_handle_crq()'s 5974 * switch(gen_crq->first) and switch(gen_crq->cmd). 5975 */ 5976 dma_rmb(); 5977 ibmvnic_handle_crq(crq, adapter); 5978 crq->generic.first = 0; 5979 } 5980 5981 spin_unlock_irqrestore(&queue->lock, flags); 5982 } 5983 5984 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 5985 { 5986 struct vio_dev *vdev = adapter->vdev; 5987 int rc; 5988 5989 do { 5990 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 5991 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5992 5993 if (rc) 5994 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 5995 5996 return rc; 5997 } 5998 5999 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 6000 { 6001 struct ibmvnic_crq_queue *crq = &adapter->crq; 6002 struct device *dev = &adapter->vdev->dev; 6003 struct vio_dev *vdev = adapter->vdev; 6004 int rc; 6005 6006 /* Close the CRQ */ 6007 do { 6008 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6009 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6010 6011 /* Clean out the queue */ 6012 if (!crq->msgs) 6013 return -EINVAL; 6014 6015 memset(crq->msgs, 0, PAGE_SIZE); 6016 crq->cur = 0; 6017 crq->active = false; 6018 6019 /* And re-open it again */ 6020 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 6021 crq->msg_token, PAGE_SIZE); 6022 6023 if (rc == H_CLOSED) 6024 /* Adapter is good, but other end is not ready */ 6025 dev_warn(dev, "Partner adapter not ready\n"); 6026 else if (rc != 0) 6027 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 6028 6029 return rc; 6030 } 6031 6032 static void release_crq_queue(struct ibmvnic_adapter *adapter) 6033 { 6034 struct ibmvnic_crq_queue *crq = &adapter->crq; 6035 struct vio_dev *vdev = adapter->vdev; 6036 long rc; 6037 6038 if (!crq->msgs) 6039 return; 6040 6041 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 6042 free_irq(vdev->irq, adapter); 6043 tasklet_kill(&adapter->tasklet); 6044 do { 6045 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6046 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6047 6048 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 6049 DMA_BIDIRECTIONAL); 6050 free_page((unsigned long)crq->msgs); 6051 crq->msgs = NULL; 6052 crq->active = false; 6053 } 6054 6055 static int init_crq_queue(struct ibmvnic_adapter *adapter) 6056 { 6057 struct ibmvnic_crq_queue *crq = &adapter->crq; 6058 struct device *dev = &adapter->vdev->dev; 6059 struct vio_dev *vdev = adapter->vdev; 6060 int rc, retrc = -ENOMEM; 6061 6062 if (crq->msgs) 6063 return 0; 6064 6065 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 6066 /* Should we allocate more than one page? */ 6067 6068 if (!crq->msgs) 6069 return -ENOMEM; 6070 6071 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 6072 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 6073 DMA_BIDIRECTIONAL); 6074 if (dma_mapping_error(dev, crq->msg_token)) 6075 goto map_failed; 6076 6077 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 6078 crq->msg_token, PAGE_SIZE); 6079 6080 if (rc == H_RESOURCE) 6081 /* maybe kexecing and resource is busy. try a reset */ 6082 rc = ibmvnic_reset_crq(adapter); 6083 retrc = rc; 6084 6085 if (rc == H_CLOSED) { 6086 dev_warn(dev, "Partner adapter not ready\n"); 6087 } else if (rc) { 6088 dev_warn(dev, "Error %d opening adapter\n", rc); 6089 goto reg_crq_failed; 6090 } 6091 6092 retrc = 0; 6093 6094 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); 6095 6096 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 6097 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", 6098 adapter->vdev->unit_address); 6099 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); 6100 if (rc) { 6101 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 6102 vdev->irq, rc); 6103 goto req_irq_failed; 6104 } 6105 6106 rc = vio_enable_interrupts(vdev); 6107 if (rc) { 6108 dev_err(dev, "Error %d enabling interrupts\n", rc); 6109 goto req_irq_failed; 6110 } 6111 6112 crq->cur = 0; 6113 spin_lock_init(&crq->lock); 6114 6115 /* process any CRQs that were queued before we enabled interrupts */ 6116 tasklet_schedule(&adapter->tasklet); 6117 6118 return retrc; 6119 6120 req_irq_failed: 6121 tasklet_kill(&adapter->tasklet); 6122 do { 6123 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6124 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6125 reg_crq_failed: 6126 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 6127 map_failed: 6128 free_page((unsigned long)crq->msgs); 6129 crq->msgs = NULL; 6130 return retrc; 6131 } 6132 6133 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) 6134 { 6135 struct device *dev = &adapter->vdev->dev; 6136 unsigned long timeout = msecs_to_jiffies(20000); 6137 u64 old_num_rx_queues = adapter->req_rx_queues; 6138 u64 old_num_tx_queues = adapter->req_tx_queues; 6139 int rc; 6140 6141 adapter->from_passive_init = false; 6142 6143 rc = ibmvnic_send_crq_init(adapter); 6144 if (rc) { 6145 dev_err(dev, "Send crq init failed with error %d\n", rc); 6146 return rc; 6147 } 6148 6149 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 6150 dev_err(dev, "Initialization sequence timed out\n"); 6151 return -ETIMEDOUT; 6152 } 6153 6154 if (adapter->init_done_rc) { 6155 release_crq_queue(adapter); 6156 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc); 6157 return adapter->init_done_rc; 6158 } 6159 6160 if (adapter->from_passive_init) { 6161 adapter->state = VNIC_OPEN; 6162 adapter->from_passive_init = false; 6163 dev_err(dev, "CRQ-init failed, passive-init\n"); 6164 return -EINVAL; 6165 } 6166 6167 if (reset && 6168 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && 6169 adapter->reset_reason != VNIC_RESET_MOBILITY) { 6170 if (adapter->req_rx_queues != old_num_rx_queues || 6171 adapter->req_tx_queues != old_num_tx_queues) { 6172 release_sub_crqs(adapter, 0); 6173 rc = init_sub_crqs(adapter); 6174 } else { 6175 /* no need to reinitialize completely, but we do 6176 * need to clean up transmits that were in flight 6177 * when we processed the reset. Failure to do so 6178 * will confound the upper layer, usually TCP, by 6179 * creating the illusion of transmits that are 6180 * awaiting completion. 6181 */ 6182 clean_tx_pools(adapter); 6183 6184 rc = reset_sub_crq_queues(adapter); 6185 } 6186 } else { 6187 rc = init_sub_crqs(adapter); 6188 } 6189 6190 if (rc) { 6191 dev_err(dev, "Initialization of sub crqs failed\n"); 6192 release_crq_queue(adapter); 6193 return rc; 6194 } 6195 6196 rc = init_sub_crq_irqs(adapter); 6197 if (rc) { 6198 dev_err(dev, "Failed to initialize sub crq irqs\n"); 6199 release_crq_queue(adapter); 6200 } 6201 6202 return rc; 6203 } 6204 6205 static struct device_attribute dev_attr_failover; 6206 6207 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 6208 { 6209 struct ibmvnic_adapter *adapter; 6210 struct net_device *netdev; 6211 unsigned char *mac_addr_p; 6212 unsigned long flags; 6213 bool init_success; 6214 int rc; 6215 6216 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 6217 dev->unit_address); 6218 6219 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 6220 VETH_MAC_ADDR, NULL); 6221 if (!mac_addr_p) { 6222 dev_err(&dev->dev, 6223 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 6224 __FILE__, __LINE__); 6225 return 0; 6226 } 6227 6228 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 6229 IBMVNIC_MAX_QUEUES); 6230 if (!netdev) 6231 return -ENOMEM; 6232 6233 adapter = netdev_priv(netdev); 6234 adapter->state = VNIC_PROBING; 6235 dev_set_drvdata(&dev->dev, netdev); 6236 adapter->vdev = dev; 6237 adapter->netdev = netdev; 6238 adapter->login_pending = false; 6239 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); 6240 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ 6241 bitmap_set(adapter->map_ids, 0, 1); 6242 6243 ether_addr_copy(adapter->mac_addr, mac_addr_p); 6244 eth_hw_addr_set(netdev, adapter->mac_addr); 6245 netdev->irq = dev->irq; 6246 netdev->netdev_ops = &ibmvnic_netdev_ops; 6247 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 6248 SET_NETDEV_DEV(netdev, &dev->dev); 6249 6250 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 6251 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, 6252 __ibmvnic_delayed_reset); 6253 INIT_LIST_HEAD(&adapter->rwi_list); 6254 spin_lock_init(&adapter->rwi_lock); 6255 spin_lock_init(&adapter->state_lock); 6256 mutex_init(&adapter->fw_lock); 6257 init_completion(&adapter->probe_done); 6258 init_completion(&adapter->init_done); 6259 init_completion(&adapter->fw_done); 6260 init_completion(&adapter->reset_done); 6261 init_completion(&adapter->stats_done); 6262 clear_bit(0, &adapter->resetting); 6263 adapter->prev_rx_buf_sz = 0; 6264 adapter->prev_mtu = 0; 6265 6266 init_success = false; 6267 do { 6268 reinit_init_done(adapter); 6269 6270 /* clear any failovers we got in the previous pass 6271 * since we are reinitializing the CRQ 6272 */ 6273 adapter->failover_pending = false; 6274 6275 /* If we had already initialized CRQ, we may have one or 6276 * more resets queued already. Discard those and release 6277 * the CRQ before initializing the CRQ again. 6278 */ 6279 release_crq_queue(adapter); 6280 6281 /* Since we are still in PROBING state, __ibmvnic_reset() 6282 * will not access the ->rwi_list and since we released CRQ, 6283 * we won't get _new_ transport events. But there maybe an 6284 * ongoing ibmvnic_reset() call. So serialize access to 6285 * rwi_list. If we win the race, ibvmnic_reset() could add 6286 * a reset after we purged but thats ok - we just may end 6287 * up with an extra reset (i.e similar to having two or more 6288 * resets in the queue at once). 6289 * CHECK. 6290 */ 6291 spin_lock_irqsave(&adapter->rwi_lock, flags); 6292 flush_reset_queue(adapter); 6293 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 6294 6295 rc = init_crq_queue(adapter); 6296 if (rc) { 6297 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 6298 rc); 6299 goto ibmvnic_init_fail; 6300 } 6301 6302 rc = ibmvnic_reset_init(adapter, false); 6303 } while (rc == -EAGAIN); 6304 6305 /* We are ignoring the error from ibmvnic_reset_init() assuming that the 6306 * partner is not ready. CRQ is not active. When the partner becomes 6307 * ready, we will do the passive init reset. 6308 */ 6309 6310 if (!rc) 6311 init_success = true; 6312 6313 rc = init_stats_buffers(adapter); 6314 if (rc) 6315 goto ibmvnic_init_fail; 6316 6317 rc = init_stats_token(adapter); 6318 if (rc) 6319 goto ibmvnic_stats_fail; 6320 6321 rc = device_create_file(&dev->dev, &dev_attr_failover); 6322 if (rc) 6323 goto ibmvnic_dev_file_err; 6324 6325 netif_carrier_off(netdev); 6326 6327 if (init_success) { 6328 adapter->state = VNIC_PROBED; 6329 netdev->mtu = adapter->req_mtu - ETH_HLEN; 6330 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 6331 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 6332 } else { 6333 adapter->state = VNIC_DOWN; 6334 } 6335 6336 adapter->wait_for_reset = false; 6337 adapter->last_reset_time = jiffies; 6338 6339 rc = register_netdev(netdev); 6340 if (rc) { 6341 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 6342 goto ibmvnic_register_fail; 6343 } 6344 dev_info(&dev->dev, "ibmvnic registered\n"); 6345 6346 rc = ibmvnic_cpu_notif_add(adapter); 6347 if (rc) { 6348 netdev_err(netdev, "Registering cpu notifier failed\n"); 6349 goto cpu_notif_add_failed; 6350 } 6351 6352 complete(&adapter->probe_done); 6353 6354 return 0; 6355 6356 cpu_notif_add_failed: 6357 unregister_netdev(netdev); 6358 6359 ibmvnic_register_fail: 6360 device_remove_file(&dev->dev, &dev_attr_failover); 6361 6362 ibmvnic_dev_file_err: 6363 release_stats_token(adapter); 6364 6365 ibmvnic_stats_fail: 6366 release_stats_buffers(adapter); 6367 6368 ibmvnic_init_fail: 6369 release_sub_crqs(adapter, 1); 6370 release_crq_queue(adapter); 6371 6372 /* cleanup worker thread after releasing CRQ so we don't get 6373 * transport events (i.e new work items for the worker thread). 6374 */ 6375 adapter->state = VNIC_REMOVING; 6376 complete(&adapter->probe_done); 6377 flush_work(&adapter->ibmvnic_reset); 6378 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6379 6380 flush_reset_queue(adapter); 6381 6382 mutex_destroy(&adapter->fw_lock); 6383 free_netdev(netdev); 6384 6385 return rc; 6386 } 6387 6388 static void ibmvnic_remove(struct vio_dev *dev) 6389 { 6390 struct net_device *netdev = dev_get_drvdata(&dev->dev); 6391 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6392 unsigned long flags; 6393 6394 spin_lock_irqsave(&adapter->state_lock, flags); 6395 6396 /* If ibmvnic_reset() is scheduling a reset, wait for it to 6397 * finish. Then, set the state to REMOVING to prevent it from 6398 * scheduling any more work and to have reset functions ignore 6399 * any resets that have already been scheduled. Drop the lock 6400 * after setting state, so __ibmvnic_reset() which is called 6401 * from the flush_work() below, can make progress. 6402 */ 6403 spin_lock(&adapter->rwi_lock); 6404 adapter->state = VNIC_REMOVING; 6405 spin_unlock(&adapter->rwi_lock); 6406 6407 spin_unlock_irqrestore(&adapter->state_lock, flags); 6408 6409 ibmvnic_cpu_notif_remove(adapter); 6410 6411 flush_work(&adapter->ibmvnic_reset); 6412 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6413 6414 rtnl_lock(); 6415 unregister_netdevice(netdev); 6416 6417 release_resources(adapter); 6418 release_rx_pools(adapter); 6419 release_tx_pools(adapter); 6420 release_sub_crqs(adapter, 1); 6421 release_crq_queue(adapter); 6422 6423 release_stats_token(adapter); 6424 release_stats_buffers(adapter); 6425 6426 adapter->state = VNIC_REMOVED; 6427 6428 rtnl_unlock(); 6429 mutex_destroy(&adapter->fw_lock); 6430 device_remove_file(&dev->dev, &dev_attr_failover); 6431 free_netdev(netdev); 6432 dev_set_drvdata(&dev->dev, NULL); 6433 } 6434 6435 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 6436 const char *buf, size_t count) 6437 { 6438 struct net_device *netdev = dev_get_drvdata(dev); 6439 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6440 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 6441 __be64 session_token; 6442 long rc; 6443 6444 if (!sysfs_streq(buf, "1")) 6445 return -EINVAL; 6446 6447 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 6448 H_GET_SESSION_TOKEN, 0, 0, 0); 6449 if (rc) { 6450 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 6451 rc); 6452 goto last_resort; 6453 } 6454 6455 session_token = (__be64)retbuf[0]; 6456 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 6457 be64_to_cpu(session_token)); 6458 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 6459 H_SESSION_ERR_DETECTED, session_token, 0, 0); 6460 if (rc) { 6461 netdev_err(netdev, 6462 "H_VIOCTL initiated failover failed, rc %ld\n", 6463 rc); 6464 goto last_resort; 6465 } 6466 6467 return count; 6468 6469 last_resort: 6470 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); 6471 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 6472 6473 return count; 6474 } 6475 static DEVICE_ATTR_WO(failover); 6476 6477 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 6478 { 6479 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 6480 struct ibmvnic_adapter *adapter; 6481 struct iommu_table *tbl; 6482 unsigned long ret = 0; 6483 int i; 6484 6485 tbl = get_iommu_table_base(&vdev->dev); 6486 6487 /* netdev inits at probe time along with the structures we need below*/ 6488 if (!netdev) 6489 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 6490 6491 adapter = netdev_priv(netdev); 6492 6493 ret += PAGE_SIZE; /* the crq message queue */ 6494 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 6495 6496 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 6497 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 6498 6499 for (i = 0; i < adapter->num_active_rx_pools; i++) 6500 ret += adapter->rx_pool[i].size * 6501 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 6502 6503 return ret; 6504 } 6505 6506 static int ibmvnic_resume(struct device *dev) 6507 { 6508 struct net_device *netdev = dev_get_drvdata(dev); 6509 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6510 6511 if (adapter->state != VNIC_OPEN) 6512 return 0; 6513 6514 tasklet_schedule(&adapter->tasklet); 6515 6516 return 0; 6517 } 6518 6519 static const struct vio_device_id ibmvnic_device_table[] = { 6520 {"network", "IBM,vnic"}, 6521 {"", "" } 6522 }; 6523 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 6524 6525 static const struct dev_pm_ops ibmvnic_pm_ops = { 6526 .resume = ibmvnic_resume 6527 }; 6528 6529 static struct vio_driver ibmvnic_driver = { 6530 .id_table = ibmvnic_device_table, 6531 .probe = ibmvnic_probe, 6532 .remove = ibmvnic_remove, 6533 .get_desired_dma = ibmvnic_get_desired_dma, 6534 .name = ibmvnic_driver_name, 6535 .pm = &ibmvnic_pm_ops, 6536 }; 6537 6538 /* module functions */ 6539 static int __init ibmvnic_module_init(void) 6540 { 6541 int ret; 6542 6543 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online", 6544 ibmvnic_cpu_online, 6545 ibmvnic_cpu_down_prep); 6546 if (ret < 0) 6547 goto out; 6548 ibmvnic_online = ret; 6549 ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead", 6550 NULL, ibmvnic_cpu_dead); 6551 if (ret) 6552 goto err_dead; 6553 6554 ret = vio_register_driver(&ibmvnic_driver); 6555 if (ret) 6556 goto err_vio_register; 6557 6558 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 6559 IBMVNIC_DRIVER_VERSION); 6560 6561 return 0; 6562 err_vio_register: 6563 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD); 6564 err_dead: 6565 cpuhp_remove_multi_state(ibmvnic_online); 6566 out: 6567 return ret; 6568 } 6569 6570 static void __exit ibmvnic_module_exit(void) 6571 { 6572 vio_unregister_driver(&ibmvnic_driver); 6573 cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD); 6574 cpuhp_remove_multi_state(ibmvnic_online); 6575 } 6576 6577 module_init(ibmvnic_module_init); 6578 module_exit(ibmvnic_module_exit); 6579