1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /* Messages are passed between the VNIC driver and the VNIC server using */ 17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 18 /* issue and receive commands that initiate communication with the server */ 19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 20 /* are used by the driver to notify the server that a packet is */ 21 /* ready for transmission or that a buffer has been added to receive a */ 22 /* packet. Subsequently, sCRQs are used by the server to notify the */ 23 /* driver that a packet transmission has been completed or that a packet */ 24 /* has been received and placed in a waiting buffer. */ 25 /* */ 26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 27 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 28 /* or receive has been completed, the VNIC driver is required to use */ 29 /* "long term mapping". This entails that large, continuous DMA mapped */ 30 /* buffers are allocated on driver initialization and these buffers are */ 31 /* then continuously reused to pass skbs to and from the VNIC server. */ 32 /* */ 33 /**************************************************************************/ 34 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/types.h> 38 #include <linux/errno.h> 39 #include <linux/completion.h> 40 #include <linux/ioport.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kernel.h> 43 #include <linux/netdevice.h> 44 #include <linux/etherdevice.h> 45 #include <linux/skbuff.h> 46 #include <linux/init.h> 47 #include <linux/delay.h> 48 #include <linux/mm.h> 49 #include <linux/ethtool.h> 50 #include <linux/proc_fs.h> 51 #include <linux/if_arp.h> 52 #include <linux/in.h> 53 #include <linux/ip.h> 54 #include <linux/ipv6.h> 55 #include <linux/irq.h> 56 #include <linux/irqdomain.h> 57 #include <linux/kthread.h> 58 #include <linux/seq_file.h> 59 #include <linux/interrupt.h> 60 #include <net/net_namespace.h> 61 #include <asm/hvcall.h> 62 #include <linux/atomic.h> 63 #include <asm/vio.h> 64 #include <asm/xive.h> 65 #include <asm/iommu.h> 66 #include <linux/uaccess.h> 67 #include <asm/firmware.h> 68 #include <linux/workqueue.h> 69 #include <linux/if_vlan.h> 70 #include <linux/utsname.h> 71 #include <linux/cpu.h> 72 73 #include "ibmvnic.h" 74 75 static const char ibmvnic_driver_name[] = "ibmvnic"; 76 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 77 78 MODULE_AUTHOR("Santiago Leon"); 79 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 80 MODULE_LICENSE("GPL"); 81 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 82 83 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 84 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 85 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 86 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 87 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 88 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 91 static int enable_scrq_irq(struct ibmvnic_adapter *, 92 struct ibmvnic_sub_crq_queue *); 93 static int disable_scrq_irq(struct ibmvnic_adapter *, 94 struct ibmvnic_sub_crq_queue *); 95 static int pending_scrq(struct ibmvnic_adapter *, 96 struct ibmvnic_sub_crq_queue *); 97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 98 struct ibmvnic_sub_crq_queue *); 99 static int ibmvnic_poll(struct napi_struct *napi, int data); 100 static void send_query_map(struct ibmvnic_adapter *adapter); 101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8); 102 static int send_request_unmap(struct ibmvnic_adapter *, u8); 103 static int send_login(struct ibmvnic_adapter *adapter); 104 static void send_query_cap(struct ibmvnic_adapter *adapter); 105 static int init_sub_crqs(struct ibmvnic_adapter *); 106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 107 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); 108 static void release_crq_queue(struct ibmvnic_adapter *); 109 static int __ibmvnic_set_mac(struct net_device *, u8 *); 110 static int init_crq_queue(struct ibmvnic_adapter *adapter); 111 static int send_query_phys_parms(struct ibmvnic_adapter *adapter); 112 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 113 struct ibmvnic_sub_crq_queue *tx_scrq); 114 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 115 struct ibmvnic_long_term_buff *ltb); 116 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); 117 118 struct ibmvnic_stat { 119 char name[ETH_GSTRING_LEN]; 120 int offset; 121 }; 122 123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 124 offsetof(struct ibmvnic_statistics, stat)) 125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) 126 127 static const struct ibmvnic_stat ibmvnic_stats[] = { 128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 138 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 150 }; 151 152 static int send_crq_init_complete(struct ibmvnic_adapter *adapter) 153 { 154 union ibmvnic_crq crq; 155 156 memset(&crq, 0, sizeof(crq)); 157 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 158 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; 159 160 return ibmvnic_send_crq(adapter, &crq); 161 } 162 163 static int send_version_xchg(struct ibmvnic_adapter *adapter) 164 { 165 union ibmvnic_crq crq; 166 167 memset(&crq, 0, sizeof(crq)); 168 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 169 crq.version_exchange.cmd = VERSION_EXCHANGE; 170 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 171 172 return ibmvnic_send_crq(adapter, &crq); 173 } 174 175 static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter, 176 struct ibmvnic_sub_crq_queue *queue) 177 { 178 if (!(queue && queue->irq)) 179 return; 180 181 cpumask_clear(queue->affinity_mask); 182 183 if (irq_set_affinity_and_hint(queue->irq, NULL)) 184 netdev_warn(adapter->netdev, 185 "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n", 186 __func__, queue, queue->irq); 187 } 188 189 static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter) 190 { 191 struct ibmvnic_sub_crq_queue **rxqs; 192 struct ibmvnic_sub_crq_queue **txqs; 193 int num_rxqs, num_txqs; 194 int rc, i; 195 196 rc = 0; 197 rxqs = adapter->rx_scrq; 198 txqs = adapter->tx_scrq; 199 num_txqs = adapter->num_active_tx_scrqs; 200 num_rxqs = adapter->num_active_rx_scrqs; 201 202 netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__); 203 if (txqs) { 204 for (i = 0; i < num_txqs; i++) 205 ibmvnic_clean_queue_affinity(adapter, txqs[i]); 206 } 207 if (rxqs) { 208 for (i = 0; i < num_rxqs; i++) 209 ibmvnic_clean_queue_affinity(adapter, rxqs[i]); 210 } 211 } 212 213 static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue, 214 unsigned int *cpu, int *stragglers, 215 int stride) 216 { 217 cpumask_var_t mask; 218 int i; 219 int rc = 0; 220 221 if (!(queue && queue->irq)) 222 return rc; 223 224 /* cpumask_var_t is either a pointer or array, allocation works here */ 225 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 226 return -ENOMEM; 227 228 /* while we have extra cpu give one extra to this irq */ 229 if (*stragglers) { 230 stride++; 231 (*stragglers)--; 232 } 233 /* atomic write is safer than writing bit by bit directly */ 234 for (i = 0; i < stride; i++) { 235 cpumask_set_cpu(*cpu, mask); 236 *cpu = cpumask_next_wrap(*cpu, cpu_online_mask, 237 nr_cpu_ids, false); 238 } 239 /* set queue affinity mask */ 240 cpumask_copy(queue->affinity_mask, mask); 241 rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask); 242 free_cpumask_var(mask); 243 244 return rc; 245 } 246 247 /* assumes cpu read lock is held */ 248 static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter) 249 { 250 struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq; 251 struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq; 252 struct ibmvnic_sub_crq_queue *queue; 253 int num_rxqs = adapter->num_active_rx_scrqs; 254 int num_txqs = adapter->num_active_tx_scrqs; 255 int total_queues, stride, stragglers, i; 256 unsigned int num_cpu, cpu; 257 int rc = 0; 258 259 netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__); 260 if (!(adapter->rx_scrq && adapter->tx_scrq)) { 261 netdev_warn(adapter->netdev, 262 "%s: Set affinity failed, queues not allocated\n", 263 __func__); 264 return; 265 } 266 267 total_queues = num_rxqs + num_txqs; 268 num_cpu = num_online_cpus(); 269 /* number of cpu's assigned per irq */ 270 stride = max_t(int, num_cpu / total_queues, 1); 271 /* number of leftover cpu's */ 272 stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0; 273 /* next available cpu to assign irq to */ 274 cpu = cpumask_next(-1, cpu_online_mask); 275 276 for (i = 0; i < num_txqs; i++) { 277 queue = txqs[i]; 278 rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers, 279 stride); 280 if (rc) 281 goto out; 282 } 283 284 for (i = 0; i < num_rxqs; i++) { 285 queue = rxqs[i]; 286 rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers, 287 stride); 288 if (rc) 289 goto out; 290 } 291 292 out: 293 if (rc) { 294 netdev_warn(adapter->netdev, 295 "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n", 296 __func__, queue, queue->irq, rc); 297 ibmvnic_clean_affinity(adapter); 298 } 299 } 300 301 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 302 unsigned long length, unsigned long *number, 303 unsigned long *irq) 304 { 305 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 306 long rc; 307 308 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 309 *number = retbuf[0]; 310 *irq = retbuf[1]; 311 312 return rc; 313 } 314 315 /** 316 * ibmvnic_wait_for_completion - Check device state and wait for completion 317 * @adapter: private device data 318 * @comp_done: completion structure to wait for 319 * @timeout: time to wait in milliseconds 320 * 321 * Wait for a completion signal or until the timeout limit is reached 322 * while checking that the device is still active. 323 */ 324 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, 325 struct completion *comp_done, 326 unsigned long timeout) 327 { 328 struct net_device *netdev; 329 unsigned long div_timeout; 330 u8 retry; 331 332 netdev = adapter->netdev; 333 retry = 5; 334 div_timeout = msecs_to_jiffies(timeout / retry); 335 while (true) { 336 if (!adapter->crq.active) { 337 netdev_err(netdev, "Device down!\n"); 338 return -ENODEV; 339 } 340 if (!retry--) 341 break; 342 if (wait_for_completion_timeout(comp_done, div_timeout)) 343 return 0; 344 } 345 netdev_err(netdev, "Operation timed out.\n"); 346 return -ETIMEDOUT; 347 } 348 349 /** 350 * reuse_ltb() - Check if a long term buffer can be reused 351 * @ltb: The long term buffer to be checked 352 * @size: The size of the long term buffer. 353 * 354 * An LTB can be reused unless its size has changed. 355 * 356 * Return: Return true if the LTB can be reused, false otherwise. 357 */ 358 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) 359 { 360 return (ltb->buff && ltb->size == size); 361 } 362 363 /** 364 * alloc_long_term_buff() - Allocate a long term buffer (LTB) 365 * 366 * @adapter: ibmvnic adapter associated to the LTB 367 * @ltb: container object for the LTB 368 * @size: size of the LTB 369 * 370 * Allocate an LTB of the specified size and notify VIOS. 371 * 372 * If the given @ltb already has the correct size, reuse it. Otherwise if 373 * its non-NULL, free it. Then allocate a new one of the correct size. 374 * Notify the VIOS either way since we may now be working with a new VIOS. 375 * 376 * Allocating larger chunks of memory during resets, specially LPM or under 377 * low memory situations can cause resets to fail/timeout and for LPAR to 378 * lose connectivity. So hold onto the LTB even if we fail to communicate 379 * with the VIOS and reuse it on next open. Free LTB when adapter is closed. 380 * 381 * Return: 0 if we were able to allocate the LTB and notify the VIOS and 382 * a negative value otherwise. 383 */ 384 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 385 struct ibmvnic_long_term_buff *ltb, int size) 386 { 387 struct device *dev = &adapter->vdev->dev; 388 u64 prev = 0; 389 int rc; 390 391 if (!reuse_ltb(ltb, size)) { 392 dev_dbg(dev, 393 "LTB size changed from 0x%llx to 0x%x, reallocating\n", 394 ltb->size, size); 395 prev = ltb->size; 396 free_long_term_buff(adapter, ltb); 397 } 398 399 if (ltb->buff) { 400 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", 401 ltb->map_id, ltb->size); 402 } else { 403 ltb->buff = dma_alloc_coherent(dev, size, <b->addr, 404 GFP_KERNEL); 405 if (!ltb->buff) { 406 dev_err(dev, "Couldn't alloc long term buffer\n"); 407 return -ENOMEM; 408 } 409 ltb->size = size; 410 411 ltb->map_id = find_first_zero_bit(adapter->map_ids, 412 MAX_MAP_ID); 413 bitmap_set(adapter->map_ids, ltb->map_id, 1); 414 415 dev_dbg(dev, 416 "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n", 417 ltb->map_id, ltb->size, prev); 418 } 419 420 /* Ensure ltb is zeroed - specially when reusing it. */ 421 memset(ltb->buff, 0, ltb->size); 422 423 mutex_lock(&adapter->fw_lock); 424 adapter->fw_done_rc = 0; 425 reinit_completion(&adapter->fw_done); 426 427 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 428 if (rc) { 429 dev_err(dev, "send_request_map failed, rc = %d\n", rc); 430 goto out; 431 } 432 433 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 434 if (rc) { 435 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", 436 rc); 437 goto out; 438 } 439 440 if (adapter->fw_done_rc) { 441 dev_err(dev, "Couldn't map LTB, rc = %d\n", 442 adapter->fw_done_rc); 443 rc = -EIO; 444 goto out; 445 } 446 rc = 0; 447 out: 448 /* don't free LTB on communication error - see function header */ 449 mutex_unlock(&adapter->fw_lock); 450 return rc; 451 } 452 453 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 454 struct ibmvnic_long_term_buff *ltb) 455 { 456 struct device *dev = &adapter->vdev->dev; 457 458 if (!ltb->buff) 459 return; 460 461 /* VIOS automatically unmaps the long term buffer at remote 462 * end for the following resets: 463 * FAILOVER, MOBILITY, TIMEOUT. 464 */ 465 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 466 adapter->reset_reason != VNIC_RESET_MOBILITY && 467 adapter->reset_reason != VNIC_RESET_TIMEOUT) 468 send_request_unmap(adapter, ltb->map_id); 469 470 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 471 472 ltb->buff = NULL; 473 /* mark this map_id free */ 474 bitmap_clear(adapter->map_ids, ltb->map_id, 1); 475 ltb->map_id = 0; 476 } 477 478 /** 479 * free_ltb_set - free the given set of long term buffers (LTBS) 480 * @adapter: The ibmvnic adapter containing this ltb set 481 * @ltb_set: The ltb_set to be freed 482 * 483 * Free the set of LTBs in the given set. 484 */ 485 486 static void free_ltb_set(struct ibmvnic_adapter *adapter, 487 struct ibmvnic_ltb_set *ltb_set) 488 { 489 int i; 490 491 for (i = 0; i < ltb_set->num_ltbs; i++) 492 free_long_term_buff(adapter, <b_set->ltbs[i]); 493 494 kfree(ltb_set->ltbs); 495 ltb_set->ltbs = NULL; 496 ltb_set->num_ltbs = 0; 497 } 498 499 /** 500 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs) 501 * 502 * @adapter: ibmvnic adapter associated to the LTB 503 * @ltb_set: container object for the set of LTBs 504 * @num_buffs: Number of buffers in the LTB 505 * @buff_size: Size of each buffer in the LTB 506 * 507 * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size 508 * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the 509 * new set of LTBs have fewer LTBs than the old set, free the excess LTBs. 510 * If new set needs more than in old set, allocate the remaining ones. 511 * Try and reuse as many LTBs as possible and avoid reallocation. 512 * 513 * Any changes to this allocation strategy must be reflected in 514 * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb(). 515 */ 516 static int alloc_ltb_set(struct ibmvnic_adapter *adapter, 517 struct ibmvnic_ltb_set *ltb_set, int num_buffs, 518 int buff_size) 519 { 520 struct device *dev = &adapter->vdev->dev; 521 struct ibmvnic_ltb_set old_set; 522 struct ibmvnic_ltb_set new_set; 523 int rem_size; 524 int tot_size; /* size of all ltbs */ 525 int ltb_size; /* size of one ltb */ 526 int nltbs; 527 int rc; 528 int n; 529 int i; 530 531 dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs, 532 buff_size); 533 534 ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size); 535 tot_size = num_buffs * buff_size; 536 537 if (ltb_size > tot_size) 538 ltb_size = tot_size; 539 540 nltbs = tot_size / ltb_size; 541 if (tot_size % ltb_size) 542 nltbs++; 543 544 old_set = *ltb_set; 545 546 if (old_set.num_ltbs == nltbs) { 547 new_set = old_set; 548 } else { 549 int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff); 550 551 new_set.ltbs = kzalloc(tmp, GFP_KERNEL); 552 if (!new_set.ltbs) 553 return -ENOMEM; 554 555 new_set.num_ltbs = nltbs; 556 557 /* Free any excess ltbs in old set */ 558 for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++) 559 free_long_term_buff(adapter, &old_set.ltbs[i]); 560 561 /* Copy remaining ltbs to new set. All LTBs except the 562 * last one are of the same size. alloc_long_term_buff() 563 * will realloc if the size changes. 564 */ 565 n = min(old_set.num_ltbs, new_set.num_ltbs); 566 for (i = 0; i < n; i++) 567 new_set.ltbs[i] = old_set.ltbs[i]; 568 569 /* Any additional ltbs in new set will have NULL ltbs for 570 * now and will be allocated in alloc_long_term_buff(). 571 */ 572 573 /* We no longer need the old_set so free it. Note that we 574 * may have reused some ltbs from old set and freed excess 575 * ltbs above. So we only need to free the container now 576 * not the LTBs themselves. (i.e. dont free_ltb_set()!) 577 */ 578 kfree(old_set.ltbs); 579 old_set.ltbs = NULL; 580 old_set.num_ltbs = 0; 581 582 /* Install the new set. If allocations fail below, we will 583 * retry later and know what size LTBs we need. 584 */ 585 *ltb_set = new_set; 586 } 587 588 i = 0; 589 rem_size = tot_size; 590 while (rem_size) { 591 if (ltb_size > rem_size) 592 ltb_size = rem_size; 593 594 rem_size -= ltb_size; 595 596 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size); 597 if (rc) 598 goto out; 599 i++; 600 } 601 602 WARN_ON(i != new_set.num_ltbs); 603 604 return 0; 605 out: 606 /* We may have allocated one/more LTBs before failing and we 607 * want to try and reuse on next reset. So don't free ltb set. 608 */ 609 return rc; 610 } 611 612 /** 613 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB. 614 * @rxpool: The receive buffer pool containing buffer 615 * @bufidx: Index of buffer in rxpool 616 * @ltbp: (Output) pointer to the long term buffer containing the buffer 617 * @offset: (Output) offset of buffer in the LTB from @ltbp 618 * 619 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the 620 * pool and its corresponding offset. Assume for now that each LTB is of 621 * different size but could possibly be optimized based on the allocation 622 * strategy in alloc_ltb_set(). 623 */ 624 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, 625 unsigned int bufidx, 626 struct ibmvnic_long_term_buff **ltbp, 627 unsigned int *offset) 628 { 629 struct ibmvnic_long_term_buff *ltb; 630 int nbufs; /* # of buffers in one ltb */ 631 int i; 632 633 WARN_ON(bufidx >= rxpool->size); 634 635 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) { 636 ltb = &rxpool->ltb_set.ltbs[i]; 637 nbufs = ltb->size / rxpool->buff_size; 638 if (bufidx < nbufs) 639 break; 640 bufidx -= nbufs; 641 } 642 643 *ltbp = ltb; 644 *offset = bufidx * rxpool->buff_size; 645 } 646 647 /** 648 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB. 649 * @txpool: The transmit buffer pool containing buffer 650 * @bufidx: Index of buffer in txpool 651 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer 652 * @offset: (Output) offset of buffer in the LTB from @ltbp 653 * 654 * Map the given buffer identified by [txpool, bufidx] to an LTB in the 655 * pool and its corresponding offset. 656 */ 657 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool, 658 unsigned int bufidx, 659 struct ibmvnic_long_term_buff **ltbp, 660 unsigned int *offset) 661 { 662 struct ibmvnic_long_term_buff *ltb; 663 int nbufs; /* # of buffers in one ltb */ 664 int i; 665 666 WARN_ON_ONCE(bufidx >= txpool->num_buffers); 667 668 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) { 669 ltb = &txpool->ltb_set.ltbs[i]; 670 nbufs = ltb->size / txpool->buf_size; 671 if (bufidx < nbufs) 672 break; 673 bufidx -= nbufs; 674 } 675 676 *ltbp = ltb; 677 *offset = bufidx * txpool->buf_size; 678 } 679 680 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 681 { 682 int i; 683 684 for (i = 0; i < adapter->num_active_rx_pools; i++) 685 adapter->rx_pool[i].active = 0; 686 } 687 688 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 689 struct ibmvnic_rx_pool *pool) 690 { 691 int count = pool->size - atomic_read(&pool->available); 692 u64 handle = adapter->rx_scrq[pool->index]->handle; 693 struct device *dev = &adapter->vdev->dev; 694 struct ibmvnic_ind_xmit_queue *ind_bufp; 695 struct ibmvnic_sub_crq_queue *rx_scrq; 696 struct ibmvnic_long_term_buff *ltb; 697 union sub_crq *sub_crq; 698 int buffers_added = 0; 699 unsigned long lpar_rc; 700 struct sk_buff *skb; 701 unsigned int offset; 702 dma_addr_t dma_addr; 703 unsigned char *dst; 704 int shift = 0; 705 int bufidx; 706 int i; 707 708 if (!pool->active) 709 return; 710 711 rx_scrq = adapter->rx_scrq[pool->index]; 712 ind_bufp = &rx_scrq->ind_buf; 713 714 /* netdev_skb_alloc() could have failed after we saved a few skbs 715 * in the indir_buf and we would not have sent them to VIOS yet. 716 * To account for them, start the loop at ind_bufp->index rather 717 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will 718 * be 0. 719 */ 720 for (i = ind_bufp->index; i < count; ++i) { 721 bufidx = pool->free_map[pool->next_free]; 722 723 /* We maybe reusing the skb from earlier resets. Allocate 724 * only if necessary. But since the LTB may have changed 725 * during reset (see init_rx_pools()), update LTB below 726 * even if reusing skb. 727 */ 728 skb = pool->rx_buff[bufidx].skb; 729 if (!skb) { 730 skb = netdev_alloc_skb(adapter->netdev, 731 pool->buff_size); 732 if (!skb) { 733 dev_err(dev, "Couldn't replenish rx buff\n"); 734 adapter->replenish_no_mem++; 735 break; 736 } 737 } 738 739 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 740 pool->next_free = (pool->next_free + 1) % pool->size; 741 742 /* Copy the skb to the long term mapped DMA buffer */ 743 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset); 744 dst = ltb->buff + offset; 745 memset(dst, 0, pool->buff_size); 746 dma_addr = ltb->addr + offset; 747 748 /* add the skb to an rx_buff in the pool */ 749 pool->rx_buff[bufidx].data = dst; 750 pool->rx_buff[bufidx].dma = dma_addr; 751 pool->rx_buff[bufidx].skb = skb; 752 pool->rx_buff[bufidx].pool_index = pool->index; 753 pool->rx_buff[bufidx].size = pool->buff_size; 754 755 /* queue the rx_buff for the next send_subcrq_indirect */ 756 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; 757 memset(sub_crq, 0, sizeof(*sub_crq)); 758 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; 759 sub_crq->rx_add.correlator = 760 cpu_to_be64((u64)&pool->rx_buff[bufidx]); 761 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); 762 sub_crq->rx_add.map_id = ltb->map_id; 763 764 /* The length field of the sCRQ is defined to be 24 bits so the 765 * buffer size needs to be left shifted by a byte before it is 766 * converted to big endian to prevent the last byte from being 767 * truncated. 768 */ 769 #ifdef __LITTLE_ENDIAN__ 770 shift = 8; 771 #endif 772 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); 773 774 /* if send_subcrq_indirect queue is full, flush to VIOS */ 775 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || 776 i == count - 1) { 777 lpar_rc = 778 send_subcrq_indirect(adapter, handle, 779 (u64)ind_bufp->indir_dma, 780 (u64)ind_bufp->index); 781 if (lpar_rc != H_SUCCESS) 782 goto failure; 783 buffers_added += ind_bufp->index; 784 adapter->replenish_add_buff_success += ind_bufp->index; 785 ind_bufp->index = 0; 786 } 787 } 788 atomic_add(buffers_added, &pool->available); 789 return; 790 791 failure: 792 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) 793 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); 794 for (i = ind_bufp->index - 1; i >= 0; --i) { 795 struct ibmvnic_rx_buff *rx_buff; 796 797 pool->next_free = pool->next_free == 0 ? 798 pool->size - 1 : pool->next_free - 1; 799 sub_crq = &ind_bufp->indir_arr[i]; 800 rx_buff = (struct ibmvnic_rx_buff *) 801 be64_to_cpu(sub_crq->rx_add.correlator); 802 bufidx = (int)(rx_buff - pool->rx_buff); 803 pool->free_map[pool->next_free] = bufidx; 804 dev_kfree_skb_any(pool->rx_buff[bufidx].skb); 805 pool->rx_buff[bufidx].skb = NULL; 806 } 807 adapter->replenish_add_buff_failure += ind_bufp->index; 808 atomic_add(buffers_added, &pool->available); 809 ind_bufp->index = 0; 810 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 811 /* Disable buffer pool replenishment and report carrier off if 812 * queue is closed or pending failover. 813 * Firmware guarantees that a signal will be sent to the 814 * driver, triggering a reset. 815 */ 816 deactivate_rx_pools(adapter); 817 netif_carrier_off(adapter->netdev); 818 } 819 } 820 821 static void replenish_pools(struct ibmvnic_adapter *adapter) 822 { 823 int i; 824 825 adapter->replenish_task_cycles++; 826 for (i = 0; i < adapter->num_active_rx_pools; i++) { 827 if (adapter->rx_pool[i].active) 828 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 829 } 830 831 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); 832 } 833 834 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 835 { 836 kfree(adapter->tx_stats_buffers); 837 kfree(adapter->rx_stats_buffers); 838 adapter->tx_stats_buffers = NULL; 839 adapter->rx_stats_buffers = NULL; 840 } 841 842 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 843 { 844 adapter->tx_stats_buffers = 845 kcalloc(IBMVNIC_MAX_QUEUES, 846 sizeof(struct ibmvnic_tx_queue_stats), 847 GFP_KERNEL); 848 if (!adapter->tx_stats_buffers) 849 return -ENOMEM; 850 851 adapter->rx_stats_buffers = 852 kcalloc(IBMVNIC_MAX_QUEUES, 853 sizeof(struct ibmvnic_rx_queue_stats), 854 GFP_KERNEL); 855 if (!adapter->rx_stats_buffers) 856 return -ENOMEM; 857 858 return 0; 859 } 860 861 static void release_stats_token(struct ibmvnic_adapter *adapter) 862 { 863 struct device *dev = &adapter->vdev->dev; 864 865 if (!adapter->stats_token) 866 return; 867 868 dma_unmap_single(dev, adapter->stats_token, 869 sizeof(struct ibmvnic_statistics), 870 DMA_FROM_DEVICE); 871 adapter->stats_token = 0; 872 } 873 874 static int init_stats_token(struct ibmvnic_adapter *adapter) 875 { 876 struct device *dev = &adapter->vdev->dev; 877 dma_addr_t stok; 878 int rc; 879 880 stok = dma_map_single(dev, &adapter->stats, 881 sizeof(struct ibmvnic_statistics), 882 DMA_FROM_DEVICE); 883 rc = dma_mapping_error(dev, stok); 884 if (rc) { 885 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc); 886 return rc; 887 } 888 889 adapter->stats_token = stok; 890 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 891 return 0; 892 } 893 894 /** 895 * release_rx_pools() - Release any rx pools attached to @adapter. 896 * @adapter: ibmvnic adapter 897 * 898 * Safe to call this multiple times - even if no pools are attached. 899 */ 900 static void release_rx_pools(struct ibmvnic_adapter *adapter) 901 { 902 struct ibmvnic_rx_pool *rx_pool; 903 int i, j; 904 905 if (!adapter->rx_pool) 906 return; 907 908 for (i = 0; i < adapter->num_active_rx_pools; i++) { 909 rx_pool = &adapter->rx_pool[i]; 910 911 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 912 913 kfree(rx_pool->free_map); 914 915 free_ltb_set(adapter, &rx_pool->ltb_set); 916 917 if (!rx_pool->rx_buff) 918 continue; 919 920 for (j = 0; j < rx_pool->size; j++) { 921 if (rx_pool->rx_buff[j].skb) { 922 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); 923 rx_pool->rx_buff[j].skb = NULL; 924 } 925 } 926 927 kfree(rx_pool->rx_buff); 928 } 929 930 kfree(adapter->rx_pool); 931 adapter->rx_pool = NULL; 932 adapter->num_active_rx_pools = 0; 933 adapter->prev_rx_pool_size = 0; 934 } 935 936 /** 937 * reuse_rx_pools() - Check if the existing rx pools can be reused. 938 * @adapter: ibmvnic adapter 939 * 940 * Check if the existing rx pools in the adapter can be reused. The 941 * pools can be reused if the pool parameters (number of pools, 942 * number of buffers in the pool and size of each buffer) have not 943 * changed. 944 * 945 * NOTE: This assumes that all pools have the same number of buffers 946 * which is the case currently. If that changes, we must fix this. 947 * 948 * Return: true if the rx pools can be reused, false otherwise. 949 */ 950 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) 951 { 952 u64 old_num_pools, new_num_pools; 953 u64 old_pool_size, new_pool_size; 954 u64 old_buff_size, new_buff_size; 955 956 if (!adapter->rx_pool) 957 return false; 958 959 old_num_pools = adapter->num_active_rx_pools; 960 new_num_pools = adapter->req_rx_queues; 961 962 old_pool_size = adapter->prev_rx_pool_size; 963 new_pool_size = adapter->req_rx_add_entries_per_subcrq; 964 965 old_buff_size = adapter->prev_rx_buf_sz; 966 new_buff_size = adapter->cur_rx_buf_sz; 967 968 if (old_buff_size != new_buff_size || 969 old_num_pools != new_num_pools || 970 old_pool_size != new_pool_size) 971 return false; 972 973 return true; 974 } 975 976 /** 977 * init_rx_pools(): Initialize the set of receiver pools in the adapter. 978 * @netdev: net device associated with the vnic interface 979 * 980 * Initialize the set of receiver pools in the ibmvnic adapter associated 981 * with the net_device @netdev. If possible, reuse the existing rx pools. 982 * Otherwise free any existing pools and allocate a new set of pools 983 * before initializing them. 984 * 985 * Return: 0 on success and negative value on error. 986 */ 987 static int init_rx_pools(struct net_device *netdev) 988 { 989 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 990 struct device *dev = &adapter->vdev->dev; 991 struct ibmvnic_rx_pool *rx_pool; 992 u64 num_pools; 993 u64 pool_size; /* # of buffers in one pool */ 994 u64 buff_size; 995 int i, j, rc; 996 997 pool_size = adapter->req_rx_add_entries_per_subcrq; 998 num_pools = adapter->req_rx_queues; 999 buff_size = adapter->cur_rx_buf_sz; 1000 1001 if (reuse_rx_pools(adapter)) { 1002 dev_dbg(dev, "Reusing rx pools\n"); 1003 goto update_ltb; 1004 } 1005 1006 /* Allocate/populate the pools. */ 1007 release_rx_pools(adapter); 1008 1009 adapter->rx_pool = kcalloc(num_pools, 1010 sizeof(struct ibmvnic_rx_pool), 1011 GFP_KERNEL); 1012 if (!adapter->rx_pool) { 1013 dev_err(dev, "Failed to allocate rx pools\n"); 1014 return -ENOMEM; 1015 } 1016 1017 /* Set num_active_rx_pools early. If we fail below after partial 1018 * allocation, release_rx_pools() will know how many to look for. 1019 */ 1020 adapter->num_active_rx_pools = num_pools; 1021 1022 for (i = 0; i < num_pools; i++) { 1023 rx_pool = &adapter->rx_pool[i]; 1024 1025 netdev_dbg(adapter->netdev, 1026 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 1027 i, pool_size, buff_size); 1028 1029 rx_pool->size = pool_size; 1030 rx_pool->index = i; 1031 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 1032 1033 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 1034 GFP_KERNEL); 1035 if (!rx_pool->free_map) { 1036 dev_err(dev, "Couldn't alloc free_map %d\n", i); 1037 rc = -ENOMEM; 1038 goto out_release; 1039 } 1040 1041 rx_pool->rx_buff = kcalloc(rx_pool->size, 1042 sizeof(struct ibmvnic_rx_buff), 1043 GFP_KERNEL); 1044 if (!rx_pool->rx_buff) { 1045 dev_err(dev, "Couldn't alloc rx buffers\n"); 1046 rc = -ENOMEM; 1047 goto out_release; 1048 } 1049 } 1050 1051 adapter->prev_rx_pool_size = pool_size; 1052 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; 1053 1054 update_ltb: 1055 for (i = 0; i < num_pools; i++) { 1056 rx_pool = &adapter->rx_pool[i]; 1057 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", 1058 i, rx_pool->size, rx_pool->buff_size); 1059 1060 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set, 1061 rx_pool->size, rx_pool->buff_size); 1062 if (rc) 1063 goto out; 1064 1065 for (j = 0; j < rx_pool->size; ++j) { 1066 struct ibmvnic_rx_buff *rx_buff; 1067 1068 rx_pool->free_map[j] = j; 1069 1070 /* NOTE: Don't clear rx_buff->skb here - will leak 1071 * memory! replenish_rx_pool() will reuse skbs or 1072 * allocate as necessary. 1073 */ 1074 rx_buff = &rx_pool->rx_buff[j]; 1075 rx_buff->dma = 0; 1076 rx_buff->data = 0; 1077 rx_buff->size = 0; 1078 rx_buff->pool_index = 0; 1079 } 1080 1081 /* Mark pool "empty" so replenish_rx_pools() will 1082 * update the LTB info for each buffer 1083 */ 1084 atomic_set(&rx_pool->available, 0); 1085 rx_pool->next_alloc = 0; 1086 rx_pool->next_free = 0; 1087 /* replenish_rx_pool() may have called deactivate_rx_pools() 1088 * on failover. Ensure pool is active now. 1089 */ 1090 rx_pool->active = 1; 1091 } 1092 return 0; 1093 out_release: 1094 release_rx_pools(adapter); 1095 out: 1096 /* We failed to allocate one or more LTBs or map them on the VIOS. 1097 * Hold onto the pools and any LTBs that we did allocate/map. 1098 */ 1099 return rc; 1100 } 1101 1102 static void release_vpd_data(struct ibmvnic_adapter *adapter) 1103 { 1104 if (!adapter->vpd) 1105 return; 1106 1107 kfree(adapter->vpd->buff); 1108 kfree(adapter->vpd); 1109 1110 adapter->vpd = NULL; 1111 } 1112 1113 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 1114 struct ibmvnic_tx_pool *tx_pool) 1115 { 1116 kfree(tx_pool->tx_buff); 1117 kfree(tx_pool->free_map); 1118 free_ltb_set(adapter, &tx_pool->ltb_set); 1119 } 1120 1121 /** 1122 * release_tx_pools() - Release any tx pools attached to @adapter. 1123 * @adapter: ibmvnic adapter 1124 * 1125 * Safe to call this multiple times - even if no pools are attached. 1126 */ 1127 static void release_tx_pools(struct ibmvnic_adapter *adapter) 1128 { 1129 int i; 1130 1131 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are 1132 * both NULL or both non-NULL. So we only need to check one. 1133 */ 1134 if (!adapter->tx_pool) 1135 return; 1136 1137 for (i = 0; i < adapter->num_active_tx_pools; i++) { 1138 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 1139 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 1140 } 1141 1142 kfree(adapter->tx_pool); 1143 adapter->tx_pool = NULL; 1144 kfree(adapter->tso_pool); 1145 adapter->tso_pool = NULL; 1146 adapter->num_active_tx_pools = 0; 1147 adapter->prev_tx_pool_size = 0; 1148 } 1149 1150 static int init_one_tx_pool(struct net_device *netdev, 1151 struct ibmvnic_tx_pool *tx_pool, 1152 int pool_size, int buf_size) 1153 { 1154 int i; 1155 1156 tx_pool->tx_buff = kcalloc(pool_size, 1157 sizeof(struct ibmvnic_tx_buff), 1158 GFP_KERNEL); 1159 if (!tx_pool->tx_buff) 1160 return -ENOMEM; 1161 1162 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); 1163 if (!tx_pool->free_map) { 1164 kfree(tx_pool->tx_buff); 1165 tx_pool->tx_buff = NULL; 1166 return -ENOMEM; 1167 } 1168 1169 for (i = 0; i < pool_size; i++) 1170 tx_pool->free_map[i] = i; 1171 1172 tx_pool->consumer_index = 0; 1173 tx_pool->producer_index = 0; 1174 tx_pool->num_buffers = pool_size; 1175 tx_pool->buf_size = buf_size; 1176 1177 return 0; 1178 } 1179 1180 /** 1181 * reuse_tx_pools() - Check if the existing tx pools can be reused. 1182 * @adapter: ibmvnic adapter 1183 * 1184 * Check if the existing tx pools in the adapter can be reused. The 1185 * pools can be reused if the pool parameters (number of pools, 1186 * number of buffers in the pool and mtu) have not changed. 1187 * 1188 * NOTE: This assumes that all pools have the same number of buffers 1189 * which is the case currently. If that changes, we must fix this. 1190 * 1191 * Return: true if the tx pools can be reused, false otherwise. 1192 */ 1193 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) 1194 { 1195 u64 old_num_pools, new_num_pools; 1196 u64 old_pool_size, new_pool_size; 1197 u64 old_mtu, new_mtu; 1198 1199 if (!adapter->tx_pool) 1200 return false; 1201 1202 old_num_pools = adapter->num_active_tx_pools; 1203 new_num_pools = adapter->num_active_tx_scrqs; 1204 old_pool_size = adapter->prev_tx_pool_size; 1205 new_pool_size = adapter->req_tx_entries_per_subcrq; 1206 old_mtu = adapter->prev_mtu; 1207 new_mtu = adapter->req_mtu; 1208 1209 if (old_mtu != new_mtu || 1210 old_num_pools != new_num_pools || 1211 old_pool_size != new_pool_size) 1212 return false; 1213 1214 return true; 1215 } 1216 1217 /** 1218 * init_tx_pools(): Initialize the set of transmit pools in the adapter. 1219 * @netdev: net device associated with the vnic interface 1220 * 1221 * Initialize the set of transmit pools in the ibmvnic adapter associated 1222 * with the net_device @netdev. If possible, reuse the existing tx pools. 1223 * Otherwise free any existing pools and allocate a new set of pools 1224 * before initializing them. 1225 * 1226 * Return: 0 on success and negative value on error. 1227 */ 1228 static int init_tx_pools(struct net_device *netdev) 1229 { 1230 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1231 struct device *dev = &adapter->vdev->dev; 1232 int num_pools; 1233 u64 pool_size; /* # of buffers in pool */ 1234 u64 buff_size; 1235 int i, j, rc; 1236 1237 num_pools = adapter->req_tx_queues; 1238 1239 /* We must notify the VIOS about the LTB on all resets - but we only 1240 * need to alloc/populate pools if either the number of buffers or 1241 * size of each buffer in the pool has changed. 1242 */ 1243 if (reuse_tx_pools(adapter)) { 1244 netdev_dbg(netdev, "Reusing tx pools\n"); 1245 goto update_ltb; 1246 } 1247 1248 /* Allocate/populate the pools. */ 1249 release_tx_pools(adapter); 1250 1251 pool_size = adapter->req_tx_entries_per_subcrq; 1252 num_pools = adapter->num_active_tx_scrqs; 1253 1254 adapter->tx_pool = kcalloc(num_pools, 1255 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 1256 if (!adapter->tx_pool) 1257 return -ENOMEM; 1258 1259 adapter->tso_pool = kcalloc(num_pools, 1260 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 1261 /* To simplify release_tx_pools() ensure that ->tx_pool and 1262 * ->tso_pool are either both NULL or both non-NULL. 1263 */ 1264 if (!adapter->tso_pool) { 1265 kfree(adapter->tx_pool); 1266 adapter->tx_pool = NULL; 1267 return -ENOMEM; 1268 } 1269 1270 /* Set num_active_tx_pools early. If we fail below after partial 1271 * allocation, release_tx_pools() will know how many to look for. 1272 */ 1273 adapter->num_active_tx_pools = num_pools; 1274 1275 buff_size = adapter->req_mtu + VLAN_HLEN; 1276 buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 1277 1278 for (i = 0; i < num_pools; i++) { 1279 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", 1280 i, adapter->req_tx_entries_per_subcrq, buff_size); 1281 1282 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 1283 pool_size, buff_size); 1284 if (rc) 1285 goto out_release; 1286 1287 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], 1288 IBMVNIC_TSO_BUFS, 1289 IBMVNIC_TSO_BUF_SZ); 1290 if (rc) 1291 goto out_release; 1292 } 1293 1294 adapter->prev_tx_pool_size = pool_size; 1295 adapter->prev_mtu = adapter->req_mtu; 1296 1297 update_ltb: 1298 /* NOTE: All tx_pools have the same number of buffers (which is 1299 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS 1300 * buffers (see calls init_one_tx_pool() for these). 1301 * For consistency, we use tx_pool->num_buffers and 1302 * tso_pool->num_buffers below. 1303 */ 1304 rc = -1; 1305 for (i = 0; i < num_pools; i++) { 1306 struct ibmvnic_tx_pool *tso_pool; 1307 struct ibmvnic_tx_pool *tx_pool; 1308 1309 tx_pool = &adapter->tx_pool[i]; 1310 1311 dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n", 1312 i, tx_pool->num_buffers, tx_pool->buf_size); 1313 1314 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set, 1315 tx_pool->num_buffers, tx_pool->buf_size); 1316 if (rc) 1317 goto out; 1318 1319 tx_pool->consumer_index = 0; 1320 tx_pool->producer_index = 0; 1321 1322 for (j = 0; j < tx_pool->num_buffers; j++) 1323 tx_pool->free_map[j] = j; 1324 1325 tso_pool = &adapter->tso_pool[i]; 1326 1327 dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n", 1328 i, tso_pool->num_buffers, tso_pool->buf_size); 1329 1330 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set, 1331 tso_pool->num_buffers, tso_pool->buf_size); 1332 if (rc) 1333 goto out; 1334 1335 tso_pool->consumer_index = 0; 1336 tso_pool->producer_index = 0; 1337 1338 for (j = 0; j < tso_pool->num_buffers; j++) 1339 tso_pool->free_map[j] = j; 1340 } 1341 1342 return 0; 1343 out_release: 1344 release_tx_pools(adapter); 1345 out: 1346 /* We failed to allocate one or more LTBs or map them on the VIOS. 1347 * Hold onto the pools and any LTBs that we did allocate/map. 1348 */ 1349 return rc; 1350 } 1351 1352 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 1353 { 1354 int i; 1355 1356 if (adapter->napi_enabled) 1357 return; 1358 1359 for (i = 0; i < adapter->req_rx_queues; i++) 1360 napi_enable(&adapter->napi[i]); 1361 1362 adapter->napi_enabled = true; 1363 } 1364 1365 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 1366 { 1367 int i; 1368 1369 if (!adapter->napi_enabled) 1370 return; 1371 1372 for (i = 0; i < adapter->req_rx_queues; i++) { 1373 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 1374 napi_disable(&adapter->napi[i]); 1375 } 1376 1377 adapter->napi_enabled = false; 1378 } 1379 1380 static int init_napi(struct ibmvnic_adapter *adapter) 1381 { 1382 int i; 1383 1384 adapter->napi = kcalloc(adapter->req_rx_queues, 1385 sizeof(struct napi_struct), GFP_KERNEL); 1386 if (!adapter->napi) 1387 return -ENOMEM; 1388 1389 for (i = 0; i < adapter->req_rx_queues; i++) { 1390 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 1391 netif_napi_add(adapter->netdev, &adapter->napi[i], 1392 ibmvnic_poll); 1393 } 1394 1395 adapter->num_active_rx_napi = adapter->req_rx_queues; 1396 return 0; 1397 } 1398 1399 static void release_napi(struct ibmvnic_adapter *adapter) 1400 { 1401 int i; 1402 1403 if (!adapter->napi) 1404 return; 1405 1406 for (i = 0; i < adapter->num_active_rx_napi; i++) { 1407 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); 1408 netif_napi_del(&adapter->napi[i]); 1409 } 1410 1411 kfree(adapter->napi); 1412 adapter->napi = NULL; 1413 adapter->num_active_rx_napi = 0; 1414 adapter->napi_enabled = false; 1415 } 1416 1417 static const char *adapter_state_to_string(enum vnic_state state) 1418 { 1419 switch (state) { 1420 case VNIC_PROBING: 1421 return "PROBING"; 1422 case VNIC_PROBED: 1423 return "PROBED"; 1424 case VNIC_OPENING: 1425 return "OPENING"; 1426 case VNIC_OPEN: 1427 return "OPEN"; 1428 case VNIC_CLOSING: 1429 return "CLOSING"; 1430 case VNIC_CLOSED: 1431 return "CLOSED"; 1432 case VNIC_REMOVING: 1433 return "REMOVING"; 1434 case VNIC_REMOVED: 1435 return "REMOVED"; 1436 case VNIC_DOWN: 1437 return "DOWN"; 1438 } 1439 return "UNKNOWN"; 1440 } 1441 1442 static int ibmvnic_login(struct net_device *netdev) 1443 { 1444 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1445 unsigned long timeout = msecs_to_jiffies(20000); 1446 int retry_count = 0; 1447 int retries = 10; 1448 bool retry; 1449 int rc; 1450 1451 do { 1452 retry = false; 1453 if (retry_count > retries) { 1454 netdev_warn(netdev, "Login attempts exceeded\n"); 1455 return -EACCES; 1456 } 1457 1458 adapter->init_done_rc = 0; 1459 reinit_completion(&adapter->init_done); 1460 rc = send_login(adapter); 1461 if (rc) 1462 return rc; 1463 1464 if (!wait_for_completion_timeout(&adapter->init_done, 1465 timeout)) { 1466 netdev_warn(netdev, "Login timed out, retrying...\n"); 1467 retry = true; 1468 adapter->init_done_rc = 0; 1469 retry_count++; 1470 continue; 1471 } 1472 1473 if (adapter->init_done_rc == ABORTED) { 1474 netdev_warn(netdev, "Login aborted, retrying...\n"); 1475 retry = true; 1476 adapter->init_done_rc = 0; 1477 retry_count++; 1478 /* FW or device may be busy, so 1479 * wait a bit before retrying login 1480 */ 1481 msleep(500); 1482 } else if (adapter->init_done_rc == PARTIALSUCCESS) { 1483 retry_count++; 1484 release_sub_crqs(adapter, 1); 1485 1486 retry = true; 1487 netdev_dbg(netdev, 1488 "Received partial success, retrying...\n"); 1489 adapter->init_done_rc = 0; 1490 reinit_completion(&adapter->init_done); 1491 send_query_cap(adapter); 1492 if (!wait_for_completion_timeout(&adapter->init_done, 1493 timeout)) { 1494 netdev_warn(netdev, 1495 "Capabilities query timed out\n"); 1496 return -ETIMEDOUT; 1497 } 1498 1499 rc = init_sub_crqs(adapter); 1500 if (rc) { 1501 netdev_warn(netdev, 1502 "SCRQ initialization failed\n"); 1503 return rc; 1504 } 1505 1506 rc = init_sub_crq_irqs(adapter); 1507 if (rc) { 1508 netdev_warn(netdev, 1509 "SCRQ irq initialization failed\n"); 1510 return rc; 1511 } 1512 } else if (adapter->init_done_rc) { 1513 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", 1514 adapter->init_done_rc); 1515 return -EIO; 1516 } 1517 } while (retry); 1518 1519 __ibmvnic_set_mac(netdev, adapter->mac_addr); 1520 1521 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); 1522 return 0; 1523 } 1524 1525 static void release_login_buffer(struct ibmvnic_adapter *adapter) 1526 { 1527 kfree(adapter->login_buf); 1528 adapter->login_buf = NULL; 1529 } 1530 1531 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 1532 { 1533 kfree(adapter->login_rsp_buf); 1534 adapter->login_rsp_buf = NULL; 1535 } 1536 1537 static void release_resources(struct ibmvnic_adapter *adapter) 1538 { 1539 release_vpd_data(adapter); 1540 1541 release_napi(adapter); 1542 release_login_buffer(adapter); 1543 release_login_rsp_buffer(adapter); 1544 } 1545 1546 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 1547 { 1548 struct net_device *netdev = adapter->netdev; 1549 unsigned long timeout = msecs_to_jiffies(20000); 1550 union ibmvnic_crq crq; 1551 bool resend; 1552 int rc; 1553 1554 netdev_dbg(netdev, "setting link state %d\n", link_state); 1555 1556 memset(&crq, 0, sizeof(crq)); 1557 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 1558 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 1559 crq.logical_link_state.link_state = link_state; 1560 1561 do { 1562 resend = false; 1563 1564 reinit_completion(&adapter->init_done); 1565 rc = ibmvnic_send_crq(adapter, &crq); 1566 if (rc) { 1567 netdev_err(netdev, "Failed to set link state\n"); 1568 return rc; 1569 } 1570 1571 if (!wait_for_completion_timeout(&adapter->init_done, 1572 timeout)) { 1573 netdev_err(netdev, "timeout setting link state\n"); 1574 return -ETIMEDOUT; 1575 } 1576 1577 if (adapter->init_done_rc == PARTIALSUCCESS) { 1578 /* Partuial success, delay and re-send */ 1579 mdelay(1000); 1580 resend = true; 1581 } else if (adapter->init_done_rc) { 1582 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 1583 adapter->init_done_rc); 1584 return adapter->init_done_rc; 1585 } 1586 } while (resend); 1587 1588 return 0; 1589 } 1590 1591 static int set_real_num_queues(struct net_device *netdev) 1592 { 1593 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1594 int rc; 1595 1596 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 1597 adapter->req_tx_queues, adapter->req_rx_queues); 1598 1599 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 1600 if (rc) { 1601 netdev_err(netdev, "failed to set the number of tx queues\n"); 1602 return rc; 1603 } 1604 1605 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 1606 if (rc) 1607 netdev_err(netdev, "failed to set the number of rx queues\n"); 1608 1609 return rc; 1610 } 1611 1612 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 1613 { 1614 struct device *dev = &adapter->vdev->dev; 1615 union ibmvnic_crq crq; 1616 int len = 0; 1617 int rc; 1618 1619 if (adapter->vpd->buff) 1620 len = adapter->vpd->len; 1621 1622 mutex_lock(&adapter->fw_lock); 1623 adapter->fw_done_rc = 0; 1624 reinit_completion(&adapter->fw_done); 1625 1626 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 1627 crq.get_vpd_size.cmd = GET_VPD_SIZE; 1628 rc = ibmvnic_send_crq(adapter, &crq); 1629 if (rc) { 1630 mutex_unlock(&adapter->fw_lock); 1631 return rc; 1632 } 1633 1634 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1635 if (rc) { 1636 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); 1637 mutex_unlock(&adapter->fw_lock); 1638 return rc; 1639 } 1640 mutex_unlock(&adapter->fw_lock); 1641 1642 if (!adapter->vpd->len) 1643 return -ENODATA; 1644 1645 if (!adapter->vpd->buff) 1646 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 1647 else if (adapter->vpd->len != len) 1648 adapter->vpd->buff = 1649 krealloc(adapter->vpd->buff, 1650 adapter->vpd->len, GFP_KERNEL); 1651 1652 if (!adapter->vpd->buff) { 1653 dev_err(dev, "Could allocate VPD buffer\n"); 1654 return -ENOMEM; 1655 } 1656 1657 adapter->vpd->dma_addr = 1658 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 1659 DMA_FROM_DEVICE); 1660 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 1661 dev_err(dev, "Could not map VPD buffer\n"); 1662 kfree(adapter->vpd->buff); 1663 adapter->vpd->buff = NULL; 1664 return -ENOMEM; 1665 } 1666 1667 mutex_lock(&adapter->fw_lock); 1668 adapter->fw_done_rc = 0; 1669 reinit_completion(&adapter->fw_done); 1670 1671 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 1672 crq.get_vpd.cmd = GET_VPD; 1673 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 1674 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1675 rc = ibmvnic_send_crq(adapter, &crq); 1676 if (rc) { 1677 kfree(adapter->vpd->buff); 1678 adapter->vpd->buff = NULL; 1679 mutex_unlock(&adapter->fw_lock); 1680 return rc; 1681 } 1682 1683 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1684 if (rc) { 1685 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); 1686 kfree(adapter->vpd->buff); 1687 adapter->vpd->buff = NULL; 1688 mutex_unlock(&adapter->fw_lock); 1689 return rc; 1690 } 1691 1692 mutex_unlock(&adapter->fw_lock); 1693 return 0; 1694 } 1695 1696 static int init_resources(struct ibmvnic_adapter *adapter) 1697 { 1698 struct net_device *netdev = adapter->netdev; 1699 int rc; 1700 1701 rc = set_real_num_queues(netdev); 1702 if (rc) 1703 return rc; 1704 1705 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1706 if (!adapter->vpd) 1707 return -ENOMEM; 1708 1709 /* Vital Product Data (VPD) */ 1710 rc = ibmvnic_get_vpd(adapter); 1711 if (rc) { 1712 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1713 return rc; 1714 } 1715 1716 rc = init_napi(adapter); 1717 if (rc) 1718 return rc; 1719 1720 send_query_map(adapter); 1721 1722 rc = init_rx_pools(netdev); 1723 if (rc) 1724 return rc; 1725 1726 rc = init_tx_pools(netdev); 1727 return rc; 1728 } 1729 1730 static int __ibmvnic_open(struct net_device *netdev) 1731 { 1732 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1733 enum vnic_state prev_state = adapter->state; 1734 int i, rc; 1735 1736 adapter->state = VNIC_OPENING; 1737 replenish_pools(adapter); 1738 ibmvnic_napi_enable(adapter); 1739 1740 /* We're ready to receive frames, enable the sub-crq interrupts and 1741 * set the logical link state to up 1742 */ 1743 for (i = 0; i < adapter->req_rx_queues; i++) { 1744 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1745 if (prev_state == VNIC_CLOSED) 1746 enable_irq(adapter->rx_scrq[i]->irq); 1747 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1748 } 1749 1750 for (i = 0; i < adapter->req_tx_queues; i++) { 1751 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1752 if (prev_state == VNIC_CLOSED) 1753 enable_irq(adapter->tx_scrq[i]->irq); 1754 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1755 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); 1756 } 1757 1758 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1759 if (rc) { 1760 ibmvnic_napi_disable(adapter); 1761 ibmvnic_disable_irqs(adapter); 1762 return rc; 1763 } 1764 1765 adapter->tx_queues_active = true; 1766 1767 /* Since queues were stopped until now, there shouldn't be any 1768 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we 1769 * don't need the synchronize_rcu()? Leaving it for consistency 1770 * with setting ->tx_queues_active = false. 1771 */ 1772 synchronize_rcu(); 1773 1774 netif_tx_start_all_queues(netdev); 1775 1776 if (prev_state == VNIC_CLOSED) { 1777 for (i = 0; i < adapter->req_rx_queues; i++) 1778 napi_schedule(&adapter->napi[i]); 1779 } 1780 1781 adapter->state = VNIC_OPEN; 1782 return rc; 1783 } 1784 1785 static int ibmvnic_open(struct net_device *netdev) 1786 { 1787 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1788 int rc; 1789 1790 ASSERT_RTNL(); 1791 1792 /* If device failover is pending or we are about to reset, just set 1793 * device state and return. Device operation will be handled by reset 1794 * routine. 1795 * 1796 * It should be safe to overwrite the adapter->state here. Since 1797 * we hold the rtnl, either the reset has not actually started or 1798 * the rtnl got dropped during the set_link_state() in do_reset(). 1799 * In the former case, no one else is changing the state (again we 1800 * have the rtnl) and in the latter case, do_reset() will detect and 1801 * honor our setting below. 1802 */ 1803 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { 1804 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n", 1805 adapter_state_to_string(adapter->state), 1806 adapter->failover_pending); 1807 adapter->state = VNIC_OPEN; 1808 rc = 0; 1809 goto out; 1810 } 1811 1812 if (adapter->state != VNIC_CLOSED) { 1813 rc = ibmvnic_login(netdev); 1814 if (rc) 1815 goto out; 1816 1817 rc = init_resources(adapter); 1818 if (rc) { 1819 netdev_err(netdev, "failed to initialize resources\n"); 1820 goto out; 1821 } 1822 } 1823 1824 rc = __ibmvnic_open(netdev); 1825 1826 out: 1827 /* If open failed and there is a pending failover or in-progress reset, 1828 * set device state and return. Device operation will be handled by 1829 * reset routine. See also comments above regarding rtnl. 1830 */ 1831 if (rc && 1832 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { 1833 adapter->state = VNIC_OPEN; 1834 rc = 0; 1835 } 1836 1837 if (rc) { 1838 release_resources(adapter); 1839 release_rx_pools(adapter); 1840 release_tx_pools(adapter); 1841 } 1842 1843 return rc; 1844 } 1845 1846 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1847 { 1848 struct ibmvnic_rx_pool *rx_pool; 1849 struct ibmvnic_rx_buff *rx_buff; 1850 u64 rx_entries; 1851 int rx_scrqs; 1852 int i, j; 1853 1854 if (!adapter->rx_pool) 1855 return; 1856 1857 rx_scrqs = adapter->num_active_rx_pools; 1858 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1859 1860 /* Free any remaining skbs in the rx buffer pools */ 1861 for (i = 0; i < rx_scrqs; i++) { 1862 rx_pool = &adapter->rx_pool[i]; 1863 if (!rx_pool || !rx_pool->rx_buff) 1864 continue; 1865 1866 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1867 for (j = 0; j < rx_entries; j++) { 1868 rx_buff = &rx_pool->rx_buff[j]; 1869 if (rx_buff && rx_buff->skb) { 1870 dev_kfree_skb_any(rx_buff->skb); 1871 rx_buff->skb = NULL; 1872 } 1873 } 1874 } 1875 } 1876 1877 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1878 struct ibmvnic_tx_pool *tx_pool) 1879 { 1880 struct ibmvnic_tx_buff *tx_buff; 1881 u64 tx_entries; 1882 int i; 1883 1884 if (!tx_pool || !tx_pool->tx_buff) 1885 return; 1886 1887 tx_entries = tx_pool->num_buffers; 1888 1889 for (i = 0; i < tx_entries; i++) { 1890 tx_buff = &tx_pool->tx_buff[i]; 1891 if (tx_buff && tx_buff->skb) { 1892 dev_kfree_skb_any(tx_buff->skb); 1893 tx_buff->skb = NULL; 1894 } 1895 } 1896 } 1897 1898 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1899 { 1900 int tx_scrqs; 1901 int i; 1902 1903 if (!adapter->tx_pool || !adapter->tso_pool) 1904 return; 1905 1906 tx_scrqs = adapter->num_active_tx_pools; 1907 1908 /* Free any remaining skbs in the tx buffer pools */ 1909 for (i = 0; i < tx_scrqs; i++) { 1910 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1911 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1912 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1913 } 1914 } 1915 1916 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1917 { 1918 struct net_device *netdev = adapter->netdev; 1919 int i; 1920 1921 if (adapter->tx_scrq) { 1922 for (i = 0; i < adapter->req_tx_queues; i++) 1923 if (adapter->tx_scrq[i]->irq) { 1924 netdev_dbg(netdev, 1925 "Disabling tx_scrq[%d] irq\n", i); 1926 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1927 disable_irq(adapter->tx_scrq[i]->irq); 1928 } 1929 } 1930 1931 if (adapter->rx_scrq) { 1932 for (i = 0; i < adapter->req_rx_queues; i++) { 1933 if (adapter->rx_scrq[i]->irq) { 1934 netdev_dbg(netdev, 1935 "Disabling rx_scrq[%d] irq\n", i); 1936 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1937 disable_irq(adapter->rx_scrq[i]->irq); 1938 } 1939 } 1940 } 1941 } 1942 1943 static void ibmvnic_cleanup(struct net_device *netdev) 1944 { 1945 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1946 1947 /* ensure that transmissions are stopped if called by do_reset */ 1948 1949 adapter->tx_queues_active = false; 1950 1951 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active 1952 * update so they don't restart a queue after we stop it below. 1953 */ 1954 synchronize_rcu(); 1955 1956 if (test_bit(0, &adapter->resetting)) 1957 netif_tx_disable(netdev); 1958 else 1959 netif_tx_stop_all_queues(netdev); 1960 1961 ibmvnic_napi_disable(adapter); 1962 ibmvnic_disable_irqs(adapter); 1963 } 1964 1965 static int __ibmvnic_close(struct net_device *netdev) 1966 { 1967 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1968 int rc = 0; 1969 1970 adapter->state = VNIC_CLOSING; 1971 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1972 adapter->state = VNIC_CLOSED; 1973 return rc; 1974 } 1975 1976 static int ibmvnic_close(struct net_device *netdev) 1977 { 1978 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1979 int rc; 1980 1981 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n", 1982 adapter_state_to_string(adapter->state), 1983 adapter->failover_pending, 1984 adapter->force_reset_recovery); 1985 1986 /* If device failover is pending, just set device state and return. 1987 * Device operation will be handled by reset routine. 1988 */ 1989 if (adapter->failover_pending) { 1990 adapter->state = VNIC_CLOSED; 1991 return 0; 1992 } 1993 1994 rc = __ibmvnic_close(netdev); 1995 ibmvnic_cleanup(netdev); 1996 clean_rx_pools(adapter); 1997 clean_tx_pools(adapter); 1998 1999 return rc; 2000 } 2001 2002 /** 2003 * build_hdr_data - creates L2/L3/L4 header data buffer 2004 * @hdr_field: bitfield determining needed headers 2005 * @skb: socket buffer 2006 * @hdr_len: array of header lengths 2007 * @hdr_data: buffer to write the header to 2008 * 2009 * Reads hdr_field to determine which headers are needed by firmware. 2010 * Builds a buffer containing these headers. Saves individual header 2011 * lengths and total buffer length to be used to build descriptors. 2012 */ 2013 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 2014 int *hdr_len, u8 *hdr_data) 2015 { 2016 int len = 0; 2017 u8 *hdr; 2018 2019 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 2020 hdr_len[0] = sizeof(struct vlan_ethhdr); 2021 else 2022 hdr_len[0] = sizeof(struct ethhdr); 2023 2024 if (skb->protocol == htons(ETH_P_IP)) { 2025 hdr_len[1] = ip_hdr(skb)->ihl * 4; 2026 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2027 hdr_len[2] = tcp_hdrlen(skb); 2028 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 2029 hdr_len[2] = sizeof(struct udphdr); 2030 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2031 hdr_len[1] = sizeof(struct ipv6hdr); 2032 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2033 hdr_len[2] = tcp_hdrlen(skb); 2034 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 2035 hdr_len[2] = sizeof(struct udphdr); 2036 } else if (skb->protocol == htons(ETH_P_ARP)) { 2037 hdr_len[1] = arp_hdr_len(skb->dev); 2038 hdr_len[2] = 0; 2039 } 2040 2041 memset(hdr_data, 0, 120); 2042 if ((hdr_field >> 6) & 1) { 2043 hdr = skb_mac_header(skb); 2044 memcpy(hdr_data, hdr, hdr_len[0]); 2045 len += hdr_len[0]; 2046 } 2047 2048 if ((hdr_field >> 5) & 1) { 2049 hdr = skb_network_header(skb); 2050 memcpy(hdr_data + len, hdr, hdr_len[1]); 2051 len += hdr_len[1]; 2052 } 2053 2054 if ((hdr_field >> 4) & 1) { 2055 hdr = skb_transport_header(skb); 2056 memcpy(hdr_data + len, hdr, hdr_len[2]); 2057 len += hdr_len[2]; 2058 } 2059 return len; 2060 } 2061 2062 /** 2063 * create_hdr_descs - create header and header extension descriptors 2064 * @hdr_field: bitfield determining needed headers 2065 * @hdr_data: buffer containing header data 2066 * @len: length of data buffer 2067 * @hdr_len: array of individual header lengths 2068 * @scrq_arr: descriptor array 2069 * 2070 * Creates header and, if needed, header extension descriptors and 2071 * places them in a descriptor array, scrq_arr 2072 */ 2073 2074 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 2075 union sub_crq *scrq_arr) 2076 { 2077 union sub_crq hdr_desc; 2078 int tmp_len = len; 2079 int num_descs = 0; 2080 u8 *data, *cur; 2081 int tmp; 2082 2083 while (tmp_len > 0) { 2084 cur = hdr_data + len - tmp_len; 2085 2086 memset(&hdr_desc, 0, sizeof(hdr_desc)); 2087 if (cur != hdr_data) { 2088 data = hdr_desc.hdr_ext.data; 2089 tmp = tmp_len > 29 ? 29 : tmp_len; 2090 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 2091 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 2092 hdr_desc.hdr_ext.len = tmp; 2093 } else { 2094 data = hdr_desc.hdr.data; 2095 tmp = tmp_len > 24 ? 24 : tmp_len; 2096 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 2097 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 2098 hdr_desc.hdr.len = tmp; 2099 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 2100 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 2101 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 2102 hdr_desc.hdr.flag = hdr_field << 1; 2103 } 2104 memcpy(data, cur, tmp); 2105 tmp_len -= tmp; 2106 *scrq_arr = hdr_desc; 2107 scrq_arr++; 2108 num_descs++; 2109 } 2110 2111 return num_descs; 2112 } 2113 2114 /** 2115 * build_hdr_descs_arr - build a header descriptor array 2116 * @skb: tx socket buffer 2117 * @indir_arr: indirect array 2118 * @num_entries: number of descriptors to be sent 2119 * @hdr_field: bit field determining which headers will be sent 2120 * 2121 * This function will build a TX descriptor array with applicable 2122 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 2123 */ 2124 2125 static void build_hdr_descs_arr(struct sk_buff *skb, 2126 union sub_crq *indir_arr, 2127 int *num_entries, u8 hdr_field) 2128 { 2129 int hdr_len[3] = {0, 0, 0}; 2130 u8 hdr_data[140] = {0}; 2131 int tot_len; 2132 2133 tot_len = build_hdr_data(hdr_field, skb, hdr_len, 2134 hdr_data); 2135 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 2136 indir_arr + 1); 2137 } 2138 2139 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 2140 struct net_device *netdev) 2141 { 2142 /* For some backing devices, mishandling of small packets 2143 * can result in a loss of connection or TX stall. Device 2144 * architects recommend that no packet should be smaller 2145 * than the minimum MTU value provided to the driver, so 2146 * pad any packets to that length 2147 */ 2148 if (skb->len < netdev->min_mtu) 2149 return skb_put_padto(skb, netdev->min_mtu); 2150 2151 return 0; 2152 } 2153 2154 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 2155 struct ibmvnic_sub_crq_queue *tx_scrq) 2156 { 2157 struct ibmvnic_ind_xmit_queue *ind_bufp; 2158 struct ibmvnic_tx_buff *tx_buff; 2159 struct ibmvnic_tx_pool *tx_pool; 2160 union sub_crq tx_scrq_entry; 2161 int queue_num; 2162 int entries; 2163 int index; 2164 int i; 2165 2166 ind_bufp = &tx_scrq->ind_buf; 2167 entries = (u64)ind_bufp->index; 2168 queue_num = tx_scrq->pool_index; 2169 2170 for (i = entries - 1; i >= 0; --i) { 2171 tx_scrq_entry = ind_bufp->indir_arr[i]; 2172 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) 2173 continue; 2174 index = be32_to_cpu(tx_scrq_entry.v1.correlator); 2175 if (index & IBMVNIC_TSO_POOL_MASK) { 2176 tx_pool = &adapter->tso_pool[queue_num]; 2177 index &= ~IBMVNIC_TSO_POOL_MASK; 2178 } else { 2179 tx_pool = &adapter->tx_pool[queue_num]; 2180 } 2181 tx_pool->free_map[tx_pool->consumer_index] = index; 2182 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2183 tx_pool->num_buffers - 1 : 2184 tx_pool->consumer_index - 1; 2185 tx_buff = &tx_pool->tx_buff[index]; 2186 adapter->netdev->stats.tx_packets--; 2187 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; 2188 adapter->tx_stats_buffers[queue_num].packets--; 2189 adapter->tx_stats_buffers[queue_num].bytes -= 2190 tx_buff->skb->len; 2191 dev_kfree_skb_any(tx_buff->skb); 2192 tx_buff->skb = NULL; 2193 adapter->netdev->stats.tx_dropped++; 2194 } 2195 2196 ind_bufp->index = 0; 2197 2198 if (atomic_sub_return(entries, &tx_scrq->used) <= 2199 (adapter->req_tx_entries_per_subcrq / 2) && 2200 __netif_subqueue_stopped(adapter->netdev, queue_num)) { 2201 rcu_read_lock(); 2202 2203 if (adapter->tx_queues_active) { 2204 netif_wake_subqueue(adapter->netdev, queue_num); 2205 netdev_dbg(adapter->netdev, "Started queue %d\n", 2206 queue_num); 2207 } 2208 2209 rcu_read_unlock(); 2210 } 2211 } 2212 2213 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, 2214 struct ibmvnic_sub_crq_queue *tx_scrq) 2215 { 2216 struct ibmvnic_ind_xmit_queue *ind_bufp; 2217 u64 dma_addr; 2218 u64 entries; 2219 u64 handle; 2220 int rc; 2221 2222 ind_bufp = &tx_scrq->ind_buf; 2223 dma_addr = (u64)ind_bufp->indir_dma; 2224 entries = (u64)ind_bufp->index; 2225 handle = tx_scrq->handle; 2226 2227 if (!entries) 2228 return 0; 2229 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); 2230 if (rc) 2231 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); 2232 else 2233 ind_bufp->index = 0; 2234 return 0; 2235 } 2236 2237 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 2238 { 2239 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2240 int queue_num = skb_get_queue_mapping(skb); 2241 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 2242 struct device *dev = &adapter->vdev->dev; 2243 struct ibmvnic_ind_xmit_queue *ind_bufp; 2244 struct ibmvnic_tx_buff *tx_buff = NULL; 2245 struct ibmvnic_sub_crq_queue *tx_scrq; 2246 struct ibmvnic_long_term_buff *ltb; 2247 struct ibmvnic_tx_pool *tx_pool; 2248 unsigned int tx_send_failed = 0; 2249 netdev_tx_t ret = NETDEV_TX_OK; 2250 unsigned int tx_map_failed = 0; 2251 union sub_crq indir_arr[16]; 2252 unsigned int tx_dropped = 0; 2253 unsigned int tx_packets = 0; 2254 unsigned int tx_bytes = 0; 2255 dma_addr_t data_dma_addr; 2256 struct netdev_queue *txq; 2257 unsigned long lpar_rc; 2258 union sub_crq tx_crq; 2259 unsigned int offset; 2260 int num_entries = 1; 2261 unsigned char *dst; 2262 int bufidx = 0; 2263 u8 proto = 0; 2264 2265 /* If a reset is in progress, drop the packet since 2266 * the scrqs may get torn down. Otherwise use the 2267 * rcu to ensure reset waits for us to complete. 2268 */ 2269 rcu_read_lock(); 2270 if (!adapter->tx_queues_active) { 2271 dev_kfree_skb_any(skb); 2272 2273 tx_send_failed++; 2274 tx_dropped++; 2275 ret = NETDEV_TX_OK; 2276 goto out; 2277 } 2278 2279 tx_scrq = adapter->tx_scrq[queue_num]; 2280 txq = netdev_get_tx_queue(netdev, queue_num); 2281 ind_bufp = &tx_scrq->ind_buf; 2282 2283 if (ibmvnic_xmit_workarounds(skb, netdev)) { 2284 tx_dropped++; 2285 tx_send_failed++; 2286 ret = NETDEV_TX_OK; 2287 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2288 goto out; 2289 } 2290 2291 if (skb_is_gso(skb)) 2292 tx_pool = &adapter->tso_pool[queue_num]; 2293 else 2294 tx_pool = &adapter->tx_pool[queue_num]; 2295 2296 bufidx = tx_pool->free_map[tx_pool->consumer_index]; 2297 2298 if (bufidx == IBMVNIC_INVALID_MAP) { 2299 dev_kfree_skb_any(skb); 2300 tx_send_failed++; 2301 tx_dropped++; 2302 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2303 ret = NETDEV_TX_OK; 2304 goto out; 2305 } 2306 2307 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 2308 2309 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset); 2310 2311 dst = ltb->buff + offset; 2312 memset(dst, 0, tx_pool->buf_size); 2313 data_dma_addr = ltb->addr + offset; 2314 2315 if (skb_shinfo(skb)->nr_frags) { 2316 int cur, i; 2317 2318 /* Copy the head */ 2319 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 2320 cur = skb_headlen(skb); 2321 2322 /* Copy the frags */ 2323 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2324 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2325 2326 memcpy(dst + cur, skb_frag_address(frag), 2327 skb_frag_size(frag)); 2328 cur += skb_frag_size(frag); 2329 } 2330 } else { 2331 skb_copy_from_linear_data(skb, dst, skb->len); 2332 } 2333 2334 /* post changes to long_term_buff *dst before VIOS accessing it */ 2335 dma_wmb(); 2336 2337 tx_pool->consumer_index = 2338 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 2339 2340 tx_buff = &tx_pool->tx_buff[bufidx]; 2341 tx_buff->skb = skb; 2342 tx_buff->index = bufidx; 2343 tx_buff->pool_index = queue_num; 2344 2345 memset(&tx_crq, 0, sizeof(tx_crq)); 2346 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 2347 tx_crq.v1.type = IBMVNIC_TX_DESC; 2348 tx_crq.v1.n_crq_elem = 1; 2349 tx_crq.v1.n_sge = 1; 2350 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 2351 2352 if (skb_is_gso(skb)) 2353 tx_crq.v1.correlator = 2354 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK); 2355 else 2356 tx_crq.v1.correlator = cpu_to_be32(bufidx); 2357 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id); 2358 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 2359 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 2360 2361 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { 2362 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 2363 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 2364 } 2365 2366 if (skb->protocol == htons(ETH_P_IP)) { 2367 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 2368 proto = ip_hdr(skb)->protocol; 2369 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2370 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 2371 proto = ipv6_hdr(skb)->nexthdr; 2372 } 2373 2374 if (proto == IPPROTO_TCP) 2375 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 2376 else if (proto == IPPROTO_UDP) 2377 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 2378 2379 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2380 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 2381 hdrs += 2; 2382 } 2383 if (skb_is_gso(skb)) { 2384 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 2385 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 2386 hdrs += 2; 2387 } 2388 2389 if ((*hdrs >> 7) & 1) 2390 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); 2391 2392 tx_crq.v1.n_crq_elem = num_entries; 2393 tx_buff->num_entries = num_entries; 2394 /* flush buffer if current entry can not fit */ 2395 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { 2396 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2397 if (lpar_rc != H_SUCCESS) 2398 goto tx_flush_err; 2399 } 2400 2401 indir_arr[0] = tx_crq; 2402 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], 2403 num_entries * sizeof(struct ibmvnic_generic_scrq)); 2404 ind_bufp->index += num_entries; 2405 if (__netdev_tx_sent_queue(txq, skb->len, 2406 netdev_xmit_more() && 2407 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { 2408 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2409 if (lpar_rc != H_SUCCESS) 2410 goto tx_err; 2411 } 2412 2413 if (atomic_add_return(num_entries, &tx_scrq->used) 2414 >= adapter->req_tx_entries_per_subcrq) { 2415 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 2416 netif_stop_subqueue(netdev, queue_num); 2417 } 2418 2419 tx_packets++; 2420 tx_bytes += skb->len; 2421 txq_trans_cond_update(txq); 2422 ret = NETDEV_TX_OK; 2423 goto out; 2424 2425 tx_flush_err: 2426 dev_kfree_skb_any(skb); 2427 tx_buff->skb = NULL; 2428 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2429 tx_pool->num_buffers - 1 : 2430 tx_pool->consumer_index - 1; 2431 tx_dropped++; 2432 tx_err: 2433 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) 2434 dev_err_ratelimited(dev, "tx: send failed\n"); 2435 2436 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 2437 /* Disable TX and report carrier off if queue is closed 2438 * or pending failover. 2439 * Firmware guarantees that a signal will be sent to the 2440 * driver, triggering a reset or some other action. 2441 */ 2442 netif_tx_stop_all_queues(netdev); 2443 netif_carrier_off(netdev); 2444 } 2445 out: 2446 rcu_read_unlock(); 2447 netdev->stats.tx_dropped += tx_dropped; 2448 netdev->stats.tx_bytes += tx_bytes; 2449 netdev->stats.tx_packets += tx_packets; 2450 adapter->tx_send_failed += tx_send_failed; 2451 adapter->tx_map_failed += tx_map_failed; 2452 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 2453 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 2454 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 2455 2456 return ret; 2457 } 2458 2459 static void ibmvnic_set_multi(struct net_device *netdev) 2460 { 2461 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2462 struct netdev_hw_addr *ha; 2463 union ibmvnic_crq crq; 2464 2465 memset(&crq, 0, sizeof(crq)); 2466 crq.request_capability.first = IBMVNIC_CRQ_CMD; 2467 crq.request_capability.cmd = REQUEST_CAPABILITY; 2468 2469 if (netdev->flags & IFF_PROMISC) { 2470 if (!adapter->promisc_supported) 2471 return; 2472 } else { 2473 if (netdev->flags & IFF_ALLMULTI) { 2474 /* Accept all multicast */ 2475 memset(&crq, 0, sizeof(crq)); 2476 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2477 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2478 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 2479 ibmvnic_send_crq(adapter, &crq); 2480 } else if (netdev_mc_empty(netdev)) { 2481 /* Reject all multicast */ 2482 memset(&crq, 0, sizeof(crq)); 2483 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2484 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2485 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 2486 ibmvnic_send_crq(adapter, &crq); 2487 } else { 2488 /* Accept one or more multicast(s) */ 2489 netdev_for_each_mc_addr(ha, netdev) { 2490 memset(&crq, 0, sizeof(crq)); 2491 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2492 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2493 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 2494 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 2495 ha->addr); 2496 ibmvnic_send_crq(adapter, &crq); 2497 } 2498 } 2499 } 2500 } 2501 2502 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) 2503 { 2504 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2505 union ibmvnic_crq crq; 2506 int rc; 2507 2508 if (!is_valid_ether_addr(dev_addr)) { 2509 rc = -EADDRNOTAVAIL; 2510 goto err; 2511 } 2512 2513 memset(&crq, 0, sizeof(crq)); 2514 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 2515 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 2516 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); 2517 2518 mutex_lock(&adapter->fw_lock); 2519 adapter->fw_done_rc = 0; 2520 reinit_completion(&adapter->fw_done); 2521 2522 rc = ibmvnic_send_crq(adapter, &crq); 2523 if (rc) { 2524 rc = -EIO; 2525 mutex_unlock(&adapter->fw_lock); 2526 goto err; 2527 } 2528 2529 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 2530 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 2531 if (rc || adapter->fw_done_rc) { 2532 rc = -EIO; 2533 mutex_unlock(&adapter->fw_lock); 2534 goto err; 2535 } 2536 mutex_unlock(&adapter->fw_lock); 2537 return 0; 2538 err: 2539 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 2540 return rc; 2541 } 2542 2543 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 2544 { 2545 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2546 struct sockaddr *addr = p; 2547 int rc; 2548 2549 rc = 0; 2550 if (!is_valid_ether_addr(addr->sa_data)) 2551 return -EADDRNOTAVAIL; 2552 2553 ether_addr_copy(adapter->mac_addr, addr->sa_data); 2554 if (adapter->state != VNIC_PROBED) 2555 rc = __ibmvnic_set_mac(netdev, addr->sa_data); 2556 2557 return rc; 2558 } 2559 2560 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) 2561 { 2562 switch (reason) { 2563 case VNIC_RESET_FAILOVER: 2564 return "FAILOVER"; 2565 case VNIC_RESET_MOBILITY: 2566 return "MOBILITY"; 2567 case VNIC_RESET_FATAL: 2568 return "FATAL"; 2569 case VNIC_RESET_NON_FATAL: 2570 return "NON_FATAL"; 2571 case VNIC_RESET_TIMEOUT: 2572 return "TIMEOUT"; 2573 case VNIC_RESET_CHANGE_PARAM: 2574 return "CHANGE_PARAM"; 2575 case VNIC_RESET_PASSIVE_INIT: 2576 return "PASSIVE_INIT"; 2577 } 2578 return "UNKNOWN"; 2579 } 2580 2581 /* 2582 * Initialize the init_done completion and return code values. We 2583 * can get a transport event just after registering the CRQ and the 2584 * tasklet will use this to communicate the transport event. To ensure 2585 * we don't miss the notification/error, initialize these _before_ 2586 * regisering the CRQ. 2587 */ 2588 static inline void reinit_init_done(struct ibmvnic_adapter *adapter) 2589 { 2590 reinit_completion(&adapter->init_done); 2591 adapter->init_done_rc = 0; 2592 } 2593 2594 /* 2595 * do_reset returns zero if we are able to keep processing reset events, or 2596 * non-zero if we hit a fatal error and must halt. 2597 */ 2598 static int do_reset(struct ibmvnic_adapter *adapter, 2599 struct ibmvnic_rwi *rwi, u32 reset_state) 2600 { 2601 struct net_device *netdev = adapter->netdev; 2602 u64 old_num_rx_queues, old_num_tx_queues; 2603 u64 old_num_rx_slots, old_num_tx_slots; 2604 int rc; 2605 2606 netdev_dbg(adapter->netdev, 2607 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", 2608 adapter_state_to_string(adapter->state), 2609 adapter->failover_pending, 2610 reset_reason_to_string(rwi->reset_reason), 2611 adapter_state_to_string(reset_state)); 2612 2613 adapter->reset_reason = rwi->reset_reason; 2614 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ 2615 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2616 rtnl_lock(); 2617 2618 /* Now that we have the rtnl lock, clear any pending failover. 2619 * This will ensure ibmvnic_open() has either completed or will 2620 * block until failover is complete. 2621 */ 2622 if (rwi->reset_reason == VNIC_RESET_FAILOVER) 2623 adapter->failover_pending = false; 2624 2625 /* read the state and check (again) after getting rtnl */ 2626 reset_state = adapter->state; 2627 2628 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2629 rc = -EBUSY; 2630 goto out; 2631 } 2632 2633 netif_carrier_off(netdev); 2634 2635 old_num_rx_queues = adapter->req_rx_queues; 2636 old_num_tx_queues = adapter->req_tx_queues; 2637 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; 2638 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; 2639 2640 ibmvnic_cleanup(netdev); 2641 2642 if (reset_state == VNIC_OPEN && 2643 adapter->reset_reason != VNIC_RESET_MOBILITY && 2644 adapter->reset_reason != VNIC_RESET_FAILOVER) { 2645 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2646 rc = __ibmvnic_close(netdev); 2647 if (rc) 2648 goto out; 2649 } else { 2650 adapter->state = VNIC_CLOSING; 2651 2652 /* Release the RTNL lock before link state change and 2653 * re-acquire after the link state change to allow 2654 * linkwatch_event to grab the RTNL lock and run during 2655 * a reset. 2656 */ 2657 rtnl_unlock(); 2658 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2659 rtnl_lock(); 2660 if (rc) 2661 goto out; 2662 2663 if (adapter->state == VNIC_OPEN) { 2664 /* When we dropped rtnl, ibmvnic_open() got 2665 * it and noticed that we are resetting and 2666 * set the adapter state to OPEN. Update our 2667 * new "target" state, and resume the reset 2668 * from VNIC_CLOSING state. 2669 */ 2670 netdev_dbg(netdev, 2671 "Open changed state from %s, updating.\n", 2672 adapter_state_to_string(reset_state)); 2673 reset_state = VNIC_OPEN; 2674 adapter->state = VNIC_CLOSING; 2675 } 2676 2677 if (adapter->state != VNIC_CLOSING) { 2678 /* If someone else changed the adapter state 2679 * when we dropped the rtnl, fail the reset 2680 */ 2681 rc = -EAGAIN; 2682 goto out; 2683 } 2684 adapter->state = VNIC_CLOSED; 2685 } 2686 } 2687 2688 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2689 release_resources(adapter); 2690 release_sub_crqs(adapter, 1); 2691 release_crq_queue(adapter); 2692 } 2693 2694 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 2695 /* remove the closed state so when we call open it appears 2696 * we are coming from the probed state. 2697 */ 2698 adapter->state = VNIC_PROBED; 2699 2700 reinit_init_done(adapter); 2701 2702 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2703 rc = init_crq_queue(adapter); 2704 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 2705 rc = ibmvnic_reenable_crq_queue(adapter); 2706 release_sub_crqs(adapter, 1); 2707 } else { 2708 rc = ibmvnic_reset_crq(adapter); 2709 if (rc == H_CLOSED || rc == H_SUCCESS) { 2710 rc = vio_enable_interrupts(adapter->vdev); 2711 if (rc) 2712 netdev_err(adapter->netdev, 2713 "Reset failed to enable interrupts. rc=%d\n", 2714 rc); 2715 } 2716 } 2717 2718 if (rc) { 2719 netdev_err(adapter->netdev, 2720 "Reset couldn't initialize crq. rc=%d\n", rc); 2721 goto out; 2722 } 2723 2724 rc = ibmvnic_reset_init(adapter, true); 2725 if (rc) 2726 goto out; 2727 2728 /* If the adapter was in PROBE or DOWN state prior to the reset, 2729 * exit here. 2730 */ 2731 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) { 2732 rc = 0; 2733 goto out; 2734 } 2735 2736 rc = ibmvnic_login(netdev); 2737 if (rc) 2738 goto out; 2739 2740 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2741 rc = init_resources(adapter); 2742 if (rc) 2743 goto out; 2744 } else if (adapter->req_rx_queues != old_num_rx_queues || 2745 adapter->req_tx_queues != old_num_tx_queues || 2746 adapter->req_rx_add_entries_per_subcrq != 2747 old_num_rx_slots || 2748 adapter->req_tx_entries_per_subcrq != 2749 old_num_tx_slots || 2750 !adapter->rx_pool || 2751 !adapter->tso_pool || 2752 !adapter->tx_pool) { 2753 release_napi(adapter); 2754 release_vpd_data(adapter); 2755 2756 rc = init_resources(adapter); 2757 if (rc) 2758 goto out; 2759 2760 } else { 2761 rc = init_tx_pools(netdev); 2762 if (rc) { 2763 netdev_dbg(netdev, 2764 "init tx pools failed (%d)\n", 2765 rc); 2766 goto out; 2767 } 2768 2769 rc = init_rx_pools(netdev); 2770 if (rc) { 2771 netdev_dbg(netdev, 2772 "init rx pools failed (%d)\n", 2773 rc); 2774 goto out; 2775 } 2776 } 2777 ibmvnic_disable_irqs(adapter); 2778 } 2779 adapter->state = VNIC_CLOSED; 2780 2781 if (reset_state == VNIC_CLOSED) { 2782 rc = 0; 2783 goto out; 2784 } 2785 2786 rc = __ibmvnic_open(netdev); 2787 if (rc) { 2788 rc = IBMVNIC_OPEN_FAILED; 2789 goto out; 2790 } 2791 2792 /* refresh device's multicast list */ 2793 ibmvnic_set_multi(netdev); 2794 2795 if (adapter->reset_reason == VNIC_RESET_FAILOVER || 2796 adapter->reset_reason == VNIC_RESET_MOBILITY) 2797 __netdev_notify_peers(netdev); 2798 2799 rc = 0; 2800 2801 out: 2802 /* restore the adapter state if reset failed */ 2803 if (rc) 2804 adapter->state = reset_state; 2805 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */ 2806 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2807 rtnl_unlock(); 2808 2809 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", 2810 adapter_state_to_string(adapter->state), 2811 adapter->failover_pending, rc); 2812 return rc; 2813 } 2814 2815 static int do_hard_reset(struct ibmvnic_adapter *adapter, 2816 struct ibmvnic_rwi *rwi, u32 reset_state) 2817 { 2818 struct net_device *netdev = adapter->netdev; 2819 int rc; 2820 2821 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", 2822 reset_reason_to_string(rwi->reset_reason)); 2823 2824 /* read the state and check (again) after getting rtnl */ 2825 reset_state = adapter->state; 2826 2827 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2828 rc = -EBUSY; 2829 goto out; 2830 } 2831 2832 netif_carrier_off(netdev); 2833 adapter->reset_reason = rwi->reset_reason; 2834 2835 ibmvnic_cleanup(netdev); 2836 release_resources(adapter); 2837 release_sub_crqs(adapter, 0); 2838 release_crq_queue(adapter); 2839 2840 /* remove the closed state so when we call open it appears 2841 * we are coming from the probed state. 2842 */ 2843 adapter->state = VNIC_PROBED; 2844 2845 reinit_init_done(adapter); 2846 2847 rc = init_crq_queue(adapter); 2848 if (rc) { 2849 netdev_err(adapter->netdev, 2850 "Couldn't initialize crq. rc=%d\n", rc); 2851 goto out; 2852 } 2853 2854 rc = ibmvnic_reset_init(adapter, false); 2855 if (rc) 2856 goto out; 2857 2858 /* If the adapter was in PROBE or DOWN state prior to the reset, 2859 * exit here. 2860 */ 2861 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) 2862 goto out; 2863 2864 rc = ibmvnic_login(netdev); 2865 if (rc) 2866 goto out; 2867 2868 rc = init_resources(adapter); 2869 if (rc) 2870 goto out; 2871 2872 ibmvnic_disable_irqs(adapter); 2873 adapter->state = VNIC_CLOSED; 2874 2875 if (reset_state == VNIC_CLOSED) 2876 goto out; 2877 2878 rc = __ibmvnic_open(netdev); 2879 if (rc) { 2880 rc = IBMVNIC_OPEN_FAILED; 2881 goto out; 2882 } 2883 2884 __netdev_notify_peers(netdev); 2885 out: 2886 /* restore adapter state if reset failed */ 2887 if (rc) 2888 adapter->state = reset_state; 2889 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", 2890 adapter_state_to_string(adapter->state), 2891 adapter->failover_pending, rc); 2892 return rc; 2893 } 2894 2895 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 2896 { 2897 struct ibmvnic_rwi *rwi; 2898 unsigned long flags; 2899 2900 spin_lock_irqsave(&adapter->rwi_lock, flags); 2901 2902 if (!list_empty(&adapter->rwi_list)) { 2903 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 2904 list); 2905 list_del(&rwi->list); 2906 } else { 2907 rwi = NULL; 2908 } 2909 2910 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2911 return rwi; 2912 } 2913 2914 /** 2915 * do_passive_init - complete probing when partner device is detected. 2916 * @adapter: ibmvnic_adapter struct 2917 * 2918 * If the ibmvnic device does not have a partner device to communicate with at boot 2919 * and that partner device comes online at a later time, this function is called 2920 * to complete the initialization process of ibmvnic device. 2921 * Caller is expected to hold rtnl_lock(). 2922 * 2923 * Returns non-zero if sub-CRQs are not initialized properly leaving the device 2924 * in the down state. 2925 * Returns 0 upon success and the device is in PROBED state. 2926 */ 2927 2928 static int do_passive_init(struct ibmvnic_adapter *adapter) 2929 { 2930 unsigned long timeout = msecs_to_jiffies(30000); 2931 struct net_device *netdev = adapter->netdev; 2932 struct device *dev = &adapter->vdev->dev; 2933 int rc; 2934 2935 netdev_dbg(netdev, "Partner device found, probing.\n"); 2936 2937 adapter->state = VNIC_PROBING; 2938 reinit_completion(&adapter->init_done); 2939 adapter->init_done_rc = 0; 2940 adapter->crq.active = true; 2941 2942 rc = send_crq_init_complete(adapter); 2943 if (rc) 2944 goto out; 2945 2946 rc = send_version_xchg(adapter); 2947 if (rc) 2948 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); 2949 2950 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 2951 dev_err(dev, "Initialization sequence timed out\n"); 2952 rc = -ETIMEDOUT; 2953 goto out; 2954 } 2955 2956 rc = init_sub_crqs(adapter); 2957 if (rc) { 2958 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc); 2959 goto out; 2960 } 2961 2962 rc = init_sub_crq_irqs(adapter); 2963 if (rc) { 2964 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc); 2965 goto init_failed; 2966 } 2967 2968 netdev->mtu = adapter->req_mtu - ETH_HLEN; 2969 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 2970 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 2971 2972 adapter->state = VNIC_PROBED; 2973 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n"); 2974 2975 return 0; 2976 2977 init_failed: 2978 release_sub_crqs(adapter, 1); 2979 out: 2980 adapter->state = VNIC_DOWN; 2981 return rc; 2982 } 2983 2984 static void __ibmvnic_reset(struct work_struct *work) 2985 { 2986 struct ibmvnic_adapter *adapter; 2987 unsigned int timeout = 5000; 2988 struct ibmvnic_rwi *tmprwi; 2989 bool saved_state = false; 2990 struct ibmvnic_rwi *rwi; 2991 unsigned long flags; 2992 struct device *dev; 2993 bool need_reset; 2994 int num_fails = 0; 2995 u32 reset_state; 2996 int rc = 0; 2997 2998 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 2999 dev = &adapter->vdev->dev; 3000 3001 /* Wait for ibmvnic_probe() to complete. If probe is taking too long 3002 * or if another reset is in progress, defer work for now. If probe 3003 * eventually fails it will flush and terminate our work. 3004 * 3005 * Three possibilities here: 3006 * 1. Adpater being removed - just return 3007 * 2. Timed out on probe or another reset in progress - delay the work 3008 * 3. Completed probe - perform any resets in queue 3009 */ 3010 if (adapter->state == VNIC_PROBING && 3011 !wait_for_completion_timeout(&adapter->probe_done, timeout)) { 3012 dev_err(dev, "Reset thread timed out on probe"); 3013 queue_delayed_work(system_long_wq, 3014 &adapter->ibmvnic_delayed_reset, 3015 IBMVNIC_RESET_DELAY); 3016 return; 3017 } 3018 3019 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */ 3020 if (adapter->state == VNIC_REMOVING) 3021 return; 3022 3023 /* ->rwi_list is stable now (no one else is removing entries) */ 3024 3025 /* ibmvnic_probe() may have purged the reset queue after we were 3026 * scheduled to process a reset so there maybe no resets to process. 3027 * Before setting the ->resetting bit though, we have to make sure 3028 * that there is infact a reset to process. Otherwise we may race 3029 * with ibmvnic_open() and end up leaving the vnic down: 3030 * 3031 * __ibmvnic_reset() ibmvnic_open() 3032 * ----------------- -------------- 3033 * 3034 * set ->resetting bit 3035 * find ->resetting bit is set 3036 * set ->state to IBMVNIC_OPEN (i.e 3037 * assume reset will open device) 3038 * return 3039 * find reset queue empty 3040 * return 3041 * 3042 * Neither performed vnic login/open and vnic stays down 3043 * 3044 * If we hold the lock and conditionally set the bit, either we 3045 * or ibmvnic_open() will complete the open. 3046 */ 3047 need_reset = false; 3048 spin_lock(&adapter->rwi_lock); 3049 if (!list_empty(&adapter->rwi_list)) { 3050 if (test_and_set_bit_lock(0, &adapter->resetting)) { 3051 queue_delayed_work(system_long_wq, 3052 &adapter->ibmvnic_delayed_reset, 3053 IBMVNIC_RESET_DELAY); 3054 } else { 3055 need_reset = true; 3056 } 3057 } 3058 spin_unlock(&adapter->rwi_lock); 3059 3060 if (!need_reset) 3061 return; 3062 3063 rwi = get_next_rwi(adapter); 3064 while (rwi) { 3065 spin_lock_irqsave(&adapter->state_lock, flags); 3066 3067 if (adapter->state == VNIC_REMOVING || 3068 adapter->state == VNIC_REMOVED) { 3069 spin_unlock_irqrestore(&adapter->state_lock, flags); 3070 kfree(rwi); 3071 rc = EBUSY; 3072 break; 3073 } 3074 3075 if (!saved_state) { 3076 reset_state = adapter->state; 3077 saved_state = true; 3078 } 3079 spin_unlock_irqrestore(&adapter->state_lock, flags); 3080 3081 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) { 3082 rtnl_lock(); 3083 rc = do_passive_init(adapter); 3084 rtnl_unlock(); 3085 if (!rc) 3086 netif_carrier_on(adapter->netdev); 3087 } else if (adapter->force_reset_recovery) { 3088 /* Since we are doing a hard reset now, clear the 3089 * failover_pending flag so we don't ignore any 3090 * future MOBILITY or other resets. 3091 */ 3092 adapter->failover_pending = false; 3093 3094 /* Transport event occurred during previous reset */ 3095 if (adapter->wait_for_reset) { 3096 /* Previous was CHANGE_PARAM; caller locked */ 3097 adapter->force_reset_recovery = false; 3098 rc = do_hard_reset(adapter, rwi, reset_state); 3099 } else { 3100 rtnl_lock(); 3101 adapter->force_reset_recovery = false; 3102 rc = do_hard_reset(adapter, rwi, reset_state); 3103 rtnl_unlock(); 3104 } 3105 if (rc) 3106 num_fails++; 3107 else 3108 num_fails = 0; 3109 3110 /* If auto-priority-failover is enabled we can get 3111 * back to back failovers during resets, resulting 3112 * in at least two failed resets (from high-priority 3113 * backing device to low-priority one and then back) 3114 * If resets continue to fail beyond that, give the 3115 * adapter some time to settle down before retrying. 3116 */ 3117 if (num_fails >= 3) { 3118 netdev_dbg(adapter->netdev, 3119 "[S:%s] Hard reset failed %d times, waiting 60 secs\n", 3120 adapter_state_to_string(adapter->state), 3121 num_fails); 3122 set_current_state(TASK_UNINTERRUPTIBLE); 3123 schedule_timeout(60 * HZ); 3124 } 3125 } else { 3126 rc = do_reset(adapter, rwi, reset_state); 3127 } 3128 tmprwi = rwi; 3129 adapter->last_reset_time = jiffies; 3130 3131 if (rc) 3132 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); 3133 3134 rwi = get_next_rwi(adapter); 3135 3136 /* 3137 * If there are no resets queued and the previous reset failed, 3138 * the adapter would be in an undefined state. So retry the 3139 * previous reset as a hard reset. 3140 * 3141 * Else, free the previous rwi and, if there is another reset 3142 * queued, process the new reset even if previous reset failed 3143 * (the previous reset could have failed because of a fail 3144 * over for instance, so process the fail over). 3145 */ 3146 if (!rwi && rc) 3147 rwi = tmprwi; 3148 else 3149 kfree(tmprwi); 3150 3151 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || 3152 rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) 3153 adapter->force_reset_recovery = true; 3154 } 3155 3156 if (adapter->wait_for_reset) { 3157 adapter->reset_done_rc = rc; 3158 complete(&adapter->reset_done); 3159 } 3160 3161 clear_bit_unlock(0, &adapter->resetting); 3162 3163 netdev_dbg(adapter->netdev, 3164 "[S:%s FRR:%d WFR:%d] Done processing resets\n", 3165 adapter_state_to_string(adapter->state), 3166 adapter->force_reset_recovery, 3167 adapter->wait_for_reset); 3168 } 3169 3170 static void __ibmvnic_delayed_reset(struct work_struct *work) 3171 { 3172 struct ibmvnic_adapter *adapter; 3173 3174 adapter = container_of(work, struct ibmvnic_adapter, 3175 ibmvnic_delayed_reset.work); 3176 __ibmvnic_reset(&adapter->ibmvnic_reset); 3177 } 3178 3179 static void flush_reset_queue(struct ibmvnic_adapter *adapter) 3180 { 3181 struct list_head *entry, *tmp_entry; 3182 3183 if (!list_empty(&adapter->rwi_list)) { 3184 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { 3185 list_del(entry); 3186 kfree(list_entry(entry, struct ibmvnic_rwi, list)); 3187 } 3188 } 3189 } 3190 3191 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 3192 enum ibmvnic_reset_reason reason) 3193 { 3194 struct net_device *netdev = adapter->netdev; 3195 struct ibmvnic_rwi *rwi, *tmp; 3196 unsigned long flags; 3197 int ret; 3198 3199 spin_lock_irqsave(&adapter->rwi_lock, flags); 3200 3201 /* If failover is pending don't schedule any other reset. 3202 * Instead let the failover complete. If there is already a 3203 * a failover reset scheduled, we will detect and drop the 3204 * duplicate reset when walking the ->rwi_list below. 3205 */ 3206 if (adapter->state == VNIC_REMOVING || 3207 adapter->state == VNIC_REMOVED || 3208 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { 3209 ret = EBUSY; 3210 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 3211 goto err; 3212 } 3213 3214 list_for_each_entry(tmp, &adapter->rwi_list, list) { 3215 if (tmp->reset_reason == reason) { 3216 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", 3217 reset_reason_to_string(reason)); 3218 ret = EBUSY; 3219 goto err; 3220 } 3221 } 3222 3223 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); 3224 if (!rwi) { 3225 ret = ENOMEM; 3226 goto err; 3227 } 3228 /* if we just received a transport event, 3229 * flush reset queue and process this reset 3230 */ 3231 if (adapter->force_reset_recovery) 3232 flush_reset_queue(adapter); 3233 3234 rwi->reset_reason = reason; 3235 list_add_tail(&rwi->list, &adapter->rwi_list); 3236 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", 3237 reset_reason_to_string(reason)); 3238 queue_work(system_long_wq, &adapter->ibmvnic_reset); 3239 3240 ret = 0; 3241 err: 3242 /* ibmvnic_close() below can block, so drop the lock first */ 3243 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 3244 3245 if (ret == ENOMEM) 3246 ibmvnic_close(netdev); 3247 3248 return -ret; 3249 } 3250 3251 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) 3252 { 3253 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3254 3255 if (test_bit(0, &adapter->resetting)) { 3256 netdev_err(adapter->netdev, 3257 "Adapter is resetting, skip timeout reset\n"); 3258 return; 3259 } 3260 /* No queuing up reset until at least 5 seconds (default watchdog val) 3261 * after last reset 3262 */ 3263 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { 3264 netdev_dbg(dev, "Not yet time to tx timeout.\n"); 3265 return; 3266 } 3267 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 3268 } 3269 3270 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 3271 struct ibmvnic_rx_buff *rx_buff) 3272 { 3273 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 3274 3275 rx_buff->skb = NULL; 3276 3277 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 3278 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 3279 3280 atomic_dec(&pool->available); 3281 } 3282 3283 static int ibmvnic_poll(struct napi_struct *napi, int budget) 3284 { 3285 struct ibmvnic_sub_crq_queue *rx_scrq; 3286 struct ibmvnic_adapter *adapter; 3287 struct net_device *netdev; 3288 int frames_processed; 3289 int scrq_num; 3290 3291 netdev = napi->dev; 3292 adapter = netdev_priv(netdev); 3293 scrq_num = (int)(napi - adapter->napi); 3294 frames_processed = 0; 3295 rx_scrq = adapter->rx_scrq[scrq_num]; 3296 3297 restart_poll: 3298 while (frames_processed < budget) { 3299 struct sk_buff *skb; 3300 struct ibmvnic_rx_buff *rx_buff; 3301 union sub_crq *next; 3302 u32 length; 3303 u16 offset; 3304 u8 flags = 0; 3305 3306 if (unlikely(test_bit(0, &adapter->resetting) && 3307 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 3308 enable_scrq_irq(adapter, rx_scrq); 3309 napi_complete_done(napi, frames_processed); 3310 return frames_processed; 3311 } 3312 3313 if (!pending_scrq(adapter, rx_scrq)) 3314 break; 3315 next = ibmvnic_next_scrq(adapter, rx_scrq); 3316 rx_buff = (struct ibmvnic_rx_buff *) 3317 be64_to_cpu(next->rx_comp.correlator); 3318 /* do error checking */ 3319 if (next->rx_comp.rc) { 3320 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 3321 be16_to_cpu(next->rx_comp.rc)); 3322 /* free the entry */ 3323 next->rx_comp.first = 0; 3324 dev_kfree_skb_any(rx_buff->skb); 3325 remove_buff_from_pool(adapter, rx_buff); 3326 continue; 3327 } else if (!rx_buff->skb) { 3328 /* free the entry */ 3329 next->rx_comp.first = 0; 3330 remove_buff_from_pool(adapter, rx_buff); 3331 continue; 3332 } 3333 3334 length = be32_to_cpu(next->rx_comp.len); 3335 offset = be16_to_cpu(next->rx_comp.off_frame_data); 3336 flags = next->rx_comp.flags; 3337 skb = rx_buff->skb; 3338 /* load long_term_buff before copying to skb */ 3339 dma_rmb(); 3340 skb_copy_to_linear_data(skb, rx_buff->data + offset, 3341 length); 3342 3343 /* VLAN Header has been stripped by the system firmware and 3344 * needs to be inserted by the driver 3345 */ 3346 if (adapter->rx_vlan_header_insertion && 3347 (flags & IBMVNIC_VLAN_STRIPPED)) 3348 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3349 ntohs(next->rx_comp.vlan_tci)); 3350 3351 /* free the entry */ 3352 next->rx_comp.first = 0; 3353 remove_buff_from_pool(adapter, rx_buff); 3354 3355 skb_put(skb, length); 3356 skb->protocol = eth_type_trans(skb, netdev); 3357 skb_record_rx_queue(skb, scrq_num); 3358 3359 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 3360 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 3361 skb->ip_summed = CHECKSUM_UNNECESSARY; 3362 } 3363 3364 length = skb->len; 3365 napi_gro_receive(napi, skb); /* send it up */ 3366 netdev->stats.rx_packets++; 3367 netdev->stats.rx_bytes += length; 3368 adapter->rx_stats_buffers[scrq_num].packets++; 3369 adapter->rx_stats_buffers[scrq_num].bytes += length; 3370 frames_processed++; 3371 } 3372 3373 if (adapter->state != VNIC_CLOSING && 3374 ((atomic_read(&adapter->rx_pool[scrq_num].available) < 3375 adapter->req_rx_add_entries_per_subcrq / 2) || 3376 frames_processed < budget)) 3377 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 3378 if (frames_processed < budget) { 3379 if (napi_complete_done(napi, frames_processed)) { 3380 enable_scrq_irq(adapter, rx_scrq); 3381 if (pending_scrq(adapter, rx_scrq)) { 3382 if (napi_reschedule(napi)) { 3383 disable_scrq_irq(adapter, rx_scrq); 3384 goto restart_poll; 3385 } 3386 } 3387 } 3388 } 3389 return frames_processed; 3390 } 3391 3392 static int wait_for_reset(struct ibmvnic_adapter *adapter) 3393 { 3394 int rc, ret; 3395 3396 adapter->fallback.mtu = adapter->req_mtu; 3397 adapter->fallback.rx_queues = adapter->req_rx_queues; 3398 adapter->fallback.tx_queues = adapter->req_tx_queues; 3399 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 3400 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 3401 3402 reinit_completion(&adapter->reset_done); 3403 adapter->wait_for_reset = true; 3404 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3405 3406 if (rc) { 3407 ret = rc; 3408 goto out; 3409 } 3410 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); 3411 if (rc) { 3412 ret = -ENODEV; 3413 goto out; 3414 } 3415 3416 ret = 0; 3417 if (adapter->reset_done_rc) { 3418 ret = -EIO; 3419 adapter->desired.mtu = adapter->fallback.mtu; 3420 adapter->desired.rx_queues = adapter->fallback.rx_queues; 3421 adapter->desired.tx_queues = adapter->fallback.tx_queues; 3422 adapter->desired.rx_entries = adapter->fallback.rx_entries; 3423 adapter->desired.tx_entries = adapter->fallback.tx_entries; 3424 3425 reinit_completion(&adapter->reset_done); 3426 adapter->wait_for_reset = true; 3427 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3428 if (rc) { 3429 ret = rc; 3430 goto out; 3431 } 3432 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 3433 60000); 3434 if (rc) { 3435 ret = -ENODEV; 3436 goto out; 3437 } 3438 } 3439 out: 3440 adapter->wait_for_reset = false; 3441 3442 return ret; 3443 } 3444 3445 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 3446 { 3447 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3448 3449 adapter->desired.mtu = new_mtu + ETH_HLEN; 3450 3451 return wait_for_reset(adapter); 3452 } 3453 3454 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 3455 struct net_device *dev, 3456 netdev_features_t features) 3457 { 3458 /* Some backing hardware adapters can not 3459 * handle packets with a MSS less than 224 3460 * or with only one segment. 3461 */ 3462 if (skb_is_gso(skb)) { 3463 if (skb_shinfo(skb)->gso_size < 224 || 3464 skb_shinfo(skb)->gso_segs == 1) 3465 features &= ~NETIF_F_GSO_MASK; 3466 } 3467 3468 return features; 3469 } 3470 3471 static const struct net_device_ops ibmvnic_netdev_ops = { 3472 .ndo_open = ibmvnic_open, 3473 .ndo_stop = ibmvnic_close, 3474 .ndo_start_xmit = ibmvnic_xmit, 3475 .ndo_set_rx_mode = ibmvnic_set_multi, 3476 .ndo_set_mac_address = ibmvnic_set_mac, 3477 .ndo_validate_addr = eth_validate_addr, 3478 .ndo_tx_timeout = ibmvnic_tx_timeout, 3479 .ndo_change_mtu = ibmvnic_change_mtu, 3480 .ndo_features_check = ibmvnic_features_check, 3481 }; 3482 3483 /* ethtool functions */ 3484 3485 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 3486 struct ethtool_link_ksettings *cmd) 3487 { 3488 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3489 int rc; 3490 3491 rc = send_query_phys_parms(adapter); 3492 if (rc) { 3493 adapter->speed = SPEED_UNKNOWN; 3494 adapter->duplex = DUPLEX_UNKNOWN; 3495 } 3496 cmd->base.speed = adapter->speed; 3497 cmd->base.duplex = adapter->duplex; 3498 cmd->base.port = PORT_FIBRE; 3499 cmd->base.phy_address = 0; 3500 cmd->base.autoneg = AUTONEG_ENABLE; 3501 3502 return 0; 3503 } 3504 3505 static void ibmvnic_get_drvinfo(struct net_device *netdev, 3506 struct ethtool_drvinfo *info) 3507 { 3508 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3509 3510 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 3511 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 3512 strscpy(info->fw_version, adapter->fw_version, 3513 sizeof(info->fw_version)); 3514 } 3515 3516 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 3517 { 3518 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3519 3520 return adapter->msg_enable; 3521 } 3522 3523 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 3524 { 3525 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3526 3527 adapter->msg_enable = data; 3528 } 3529 3530 static u32 ibmvnic_get_link(struct net_device *netdev) 3531 { 3532 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3533 3534 /* Don't need to send a query because we request a logical link up at 3535 * init and then we wait for link state indications 3536 */ 3537 return adapter->logical_link_state; 3538 } 3539 3540 static void ibmvnic_get_ringparam(struct net_device *netdev, 3541 struct ethtool_ringparam *ring, 3542 struct kernel_ethtool_ringparam *kernel_ring, 3543 struct netlink_ext_ack *extack) 3544 { 3545 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3546 3547 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 3548 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 3549 ring->rx_mini_max_pending = 0; 3550 ring->rx_jumbo_max_pending = 0; 3551 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 3552 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 3553 ring->rx_mini_pending = 0; 3554 ring->rx_jumbo_pending = 0; 3555 } 3556 3557 static int ibmvnic_set_ringparam(struct net_device *netdev, 3558 struct ethtool_ringparam *ring, 3559 struct kernel_ethtool_ringparam *kernel_ring, 3560 struct netlink_ext_ack *extack) 3561 { 3562 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3563 3564 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || 3565 ring->tx_pending > adapter->max_tx_entries_per_subcrq) { 3566 netdev_err(netdev, "Invalid request.\n"); 3567 netdev_err(netdev, "Max tx buffers = %llu\n", 3568 adapter->max_rx_add_entries_per_subcrq); 3569 netdev_err(netdev, "Max rx buffers = %llu\n", 3570 adapter->max_tx_entries_per_subcrq); 3571 return -EINVAL; 3572 } 3573 3574 adapter->desired.rx_entries = ring->rx_pending; 3575 adapter->desired.tx_entries = ring->tx_pending; 3576 3577 return wait_for_reset(adapter); 3578 } 3579 3580 static void ibmvnic_get_channels(struct net_device *netdev, 3581 struct ethtool_channels *channels) 3582 { 3583 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3584 3585 channels->max_rx = adapter->max_rx_queues; 3586 channels->max_tx = adapter->max_tx_queues; 3587 channels->max_other = 0; 3588 channels->max_combined = 0; 3589 channels->rx_count = adapter->req_rx_queues; 3590 channels->tx_count = adapter->req_tx_queues; 3591 channels->other_count = 0; 3592 channels->combined_count = 0; 3593 } 3594 3595 static int ibmvnic_set_channels(struct net_device *netdev, 3596 struct ethtool_channels *channels) 3597 { 3598 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3599 3600 adapter->desired.rx_queues = channels->rx_count; 3601 adapter->desired.tx_queues = channels->tx_count; 3602 3603 return wait_for_reset(adapter); 3604 } 3605 3606 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 3607 { 3608 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3609 int i; 3610 3611 if (stringset != ETH_SS_STATS) 3612 return; 3613 3614 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) 3615 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 3616 3617 for (i = 0; i < adapter->req_tx_queues; i++) { 3618 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 3619 data += ETH_GSTRING_LEN; 3620 3621 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 3622 data += ETH_GSTRING_LEN; 3623 3624 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); 3625 data += ETH_GSTRING_LEN; 3626 } 3627 3628 for (i = 0; i < adapter->req_rx_queues; i++) { 3629 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 3630 data += ETH_GSTRING_LEN; 3631 3632 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 3633 data += ETH_GSTRING_LEN; 3634 3635 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 3636 data += ETH_GSTRING_LEN; 3637 } 3638 } 3639 3640 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 3641 { 3642 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3643 3644 switch (sset) { 3645 case ETH_SS_STATS: 3646 return ARRAY_SIZE(ibmvnic_stats) + 3647 adapter->req_tx_queues * NUM_TX_STATS + 3648 adapter->req_rx_queues * NUM_RX_STATS; 3649 default: 3650 return -EOPNOTSUPP; 3651 } 3652 } 3653 3654 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 3655 struct ethtool_stats *stats, u64 *data) 3656 { 3657 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3658 union ibmvnic_crq crq; 3659 int i, j; 3660 int rc; 3661 3662 memset(&crq, 0, sizeof(crq)); 3663 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 3664 crq.request_statistics.cmd = REQUEST_STATISTICS; 3665 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 3666 crq.request_statistics.len = 3667 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 3668 3669 /* Wait for data to be written */ 3670 reinit_completion(&adapter->stats_done); 3671 rc = ibmvnic_send_crq(adapter, &crq); 3672 if (rc) 3673 return; 3674 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); 3675 if (rc) 3676 return; 3677 3678 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 3679 data[i] = be64_to_cpu(IBMVNIC_GET_STAT 3680 (adapter, ibmvnic_stats[i].offset)); 3681 3682 for (j = 0; j < adapter->req_tx_queues; j++) { 3683 data[i] = adapter->tx_stats_buffers[j].packets; 3684 i++; 3685 data[i] = adapter->tx_stats_buffers[j].bytes; 3686 i++; 3687 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 3688 i++; 3689 } 3690 3691 for (j = 0; j < adapter->req_rx_queues; j++) { 3692 data[i] = adapter->rx_stats_buffers[j].packets; 3693 i++; 3694 data[i] = adapter->rx_stats_buffers[j].bytes; 3695 i++; 3696 data[i] = adapter->rx_stats_buffers[j].interrupts; 3697 i++; 3698 } 3699 } 3700 3701 static const struct ethtool_ops ibmvnic_ethtool_ops = { 3702 .get_drvinfo = ibmvnic_get_drvinfo, 3703 .get_msglevel = ibmvnic_get_msglevel, 3704 .set_msglevel = ibmvnic_set_msglevel, 3705 .get_link = ibmvnic_get_link, 3706 .get_ringparam = ibmvnic_get_ringparam, 3707 .set_ringparam = ibmvnic_set_ringparam, 3708 .get_channels = ibmvnic_get_channels, 3709 .set_channels = ibmvnic_set_channels, 3710 .get_strings = ibmvnic_get_strings, 3711 .get_sset_count = ibmvnic_get_sset_count, 3712 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 3713 .get_link_ksettings = ibmvnic_get_link_ksettings, 3714 }; 3715 3716 /* Routines for managing CRQs/sCRQs */ 3717 3718 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 3719 struct ibmvnic_sub_crq_queue *scrq) 3720 { 3721 int rc; 3722 3723 if (!scrq) { 3724 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); 3725 return -EINVAL; 3726 } 3727 3728 if (scrq->irq) { 3729 free_irq(scrq->irq, scrq); 3730 irq_dispose_mapping(scrq->irq); 3731 scrq->irq = 0; 3732 } 3733 3734 if (scrq->msgs) { 3735 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 3736 atomic_set(&scrq->used, 0); 3737 scrq->cur = 0; 3738 scrq->ind_buf.index = 0; 3739 } else { 3740 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); 3741 return -EINVAL; 3742 } 3743 3744 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3745 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3746 return rc; 3747 } 3748 3749 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 3750 { 3751 int i, rc; 3752 3753 if (!adapter->tx_scrq || !adapter->rx_scrq) 3754 return -EINVAL; 3755 3756 ibmvnic_clean_affinity(adapter); 3757 3758 for (i = 0; i < adapter->req_tx_queues; i++) { 3759 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 3760 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 3761 if (rc) 3762 return rc; 3763 } 3764 3765 for (i = 0; i < adapter->req_rx_queues; i++) { 3766 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 3767 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 3768 if (rc) 3769 return rc; 3770 } 3771 3772 return rc; 3773 } 3774 3775 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 3776 struct ibmvnic_sub_crq_queue *scrq, 3777 bool do_h_free) 3778 { 3779 struct device *dev = &adapter->vdev->dev; 3780 long rc; 3781 3782 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 3783 3784 if (do_h_free) { 3785 /* Close the sub-crqs */ 3786 do { 3787 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3788 adapter->vdev->unit_address, 3789 scrq->crq_num); 3790 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 3791 3792 if (rc) { 3793 netdev_err(adapter->netdev, 3794 "Failed to release sub-CRQ %16lx, rc = %ld\n", 3795 scrq->crq_num, rc); 3796 } 3797 } 3798 3799 dma_free_coherent(dev, 3800 IBMVNIC_IND_ARR_SZ, 3801 scrq->ind_buf.indir_arr, 3802 scrq->ind_buf.indir_dma); 3803 3804 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3805 DMA_BIDIRECTIONAL); 3806 free_pages((unsigned long)scrq->msgs, 2); 3807 free_cpumask_var(scrq->affinity_mask); 3808 kfree(scrq); 3809 } 3810 3811 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 3812 *adapter) 3813 { 3814 struct device *dev = &adapter->vdev->dev; 3815 struct ibmvnic_sub_crq_queue *scrq; 3816 int rc; 3817 3818 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 3819 if (!scrq) 3820 return NULL; 3821 3822 scrq->msgs = 3823 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 3824 if (!scrq->msgs) { 3825 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 3826 goto zero_page_failed; 3827 } 3828 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL)) 3829 goto cpumask_alloc_failed; 3830 3831 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 3832 DMA_BIDIRECTIONAL); 3833 if (dma_mapping_error(dev, scrq->msg_token)) { 3834 dev_warn(dev, "Couldn't map crq queue messages page\n"); 3835 goto map_failed; 3836 } 3837 3838 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3839 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3840 3841 if (rc == H_RESOURCE) 3842 rc = ibmvnic_reset_crq(adapter); 3843 3844 if (rc == H_CLOSED) { 3845 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 3846 } else if (rc) { 3847 dev_warn(dev, "Error %d registering sub-crq\n", rc); 3848 goto reg_failed; 3849 } 3850 3851 scrq->adapter = adapter; 3852 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 3853 scrq->ind_buf.index = 0; 3854 3855 scrq->ind_buf.indir_arr = 3856 dma_alloc_coherent(dev, 3857 IBMVNIC_IND_ARR_SZ, 3858 &scrq->ind_buf.indir_dma, 3859 GFP_KERNEL); 3860 3861 if (!scrq->ind_buf.indir_arr) 3862 goto indir_failed; 3863 3864 spin_lock_init(&scrq->lock); 3865 3866 netdev_dbg(adapter->netdev, 3867 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 3868 scrq->crq_num, scrq->hw_irq, scrq->irq); 3869 3870 return scrq; 3871 3872 indir_failed: 3873 do { 3874 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3875 adapter->vdev->unit_address, 3876 scrq->crq_num); 3877 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); 3878 reg_failed: 3879 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3880 DMA_BIDIRECTIONAL); 3881 map_failed: 3882 free_cpumask_var(scrq->affinity_mask); 3883 cpumask_alloc_failed: 3884 free_pages((unsigned long)scrq->msgs, 2); 3885 zero_page_failed: 3886 kfree(scrq); 3887 3888 return NULL; 3889 } 3890 3891 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 3892 { 3893 int i; 3894 3895 ibmvnic_clean_affinity(adapter); 3896 if (adapter->tx_scrq) { 3897 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 3898 if (!adapter->tx_scrq[i]) 3899 continue; 3900 3901 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 3902 i); 3903 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); 3904 if (adapter->tx_scrq[i]->irq) { 3905 free_irq(adapter->tx_scrq[i]->irq, 3906 adapter->tx_scrq[i]); 3907 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 3908 adapter->tx_scrq[i]->irq = 0; 3909 } 3910 3911 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 3912 do_h_free); 3913 } 3914 3915 kfree(adapter->tx_scrq); 3916 adapter->tx_scrq = NULL; 3917 adapter->num_active_tx_scrqs = 0; 3918 } 3919 3920 if (adapter->rx_scrq) { 3921 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 3922 if (!adapter->rx_scrq[i]) 3923 continue; 3924 3925 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 3926 i); 3927 if (adapter->rx_scrq[i]->irq) { 3928 free_irq(adapter->rx_scrq[i]->irq, 3929 adapter->rx_scrq[i]); 3930 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 3931 adapter->rx_scrq[i]->irq = 0; 3932 } 3933 3934 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 3935 do_h_free); 3936 } 3937 3938 kfree(adapter->rx_scrq); 3939 adapter->rx_scrq = NULL; 3940 adapter->num_active_rx_scrqs = 0; 3941 } 3942 } 3943 3944 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 3945 struct ibmvnic_sub_crq_queue *scrq) 3946 { 3947 struct device *dev = &adapter->vdev->dev; 3948 unsigned long rc; 3949 3950 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3951 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 3952 if (rc) 3953 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 3954 scrq->hw_irq, rc); 3955 return rc; 3956 } 3957 3958 /* We can not use the IRQ chip EOI handler because that has the 3959 * unintended effect of changing the interrupt priority. 3960 */ 3961 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) 3962 { 3963 u64 val = 0xff000000 | scrq->hw_irq; 3964 unsigned long rc; 3965 3966 rc = plpar_hcall_norets(H_EOI, val); 3967 if (rc) 3968 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); 3969 } 3970 3971 /* Due to a firmware bug, the hypervisor can send an interrupt to a 3972 * transmit or receive queue just prior to a partition migration. 3973 * Force an EOI after migration. 3974 */ 3975 static void ibmvnic_clear_pending_interrupt(struct device *dev, 3976 struct ibmvnic_sub_crq_queue *scrq) 3977 { 3978 if (!xive_enabled()) 3979 ibmvnic_xics_eoi(dev, scrq); 3980 } 3981 3982 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 3983 struct ibmvnic_sub_crq_queue *scrq) 3984 { 3985 struct device *dev = &adapter->vdev->dev; 3986 unsigned long rc; 3987 3988 if (scrq->hw_irq > 0x100000000ULL) { 3989 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 3990 return 1; 3991 } 3992 3993 if (test_bit(0, &adapter->resetting) && 3994 adapter->reset_reason == VNIC_RESET_MOBILITY) { 3995 ibmvnic_clear_pending_interrupt(dev, scrq); 3996 } 3997 3998 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3999 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 4000 if (rc) 4001 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 4002 scrq->hw_irq, rc); 4003 return rc; 4004 } 4005 4006 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 4007 struct ibmvnic_sub_crq_queue *scrq) 4008 { 4009 struct device *dev = &adapter->vdev->dev; 4010 struct ibmvnic_tx_pool *tx_pool; 4011 struct ibmvnic_tx_buff *txbuff; 4012 struct netdev_queue *txq; 4013 union sub_crq *next; 4014 int index; 4015 int i; 4016 4017 restart_loop: 4018 while (pending_scrq(adapter, scrq)) { 4019 unsigned int pool = scrq->pool_index; 4020 int num_entries = 0; 4021 int total_bytes = 0; 4022 int num_packets = 0; 4023 4024 next = ibmvnic_next_scrq(adapter, scrq); 4025 for (i = 0; i < next->tx_comp.num_comps; i++) { 4026 index = be32_to_cpu(next->tx_comp.correlators[i]); 4027 if (index & IBMVNIC_TSO_POOL_MASK) { 4028 tx_pool = &adapter->tso_pool[pool]; 4029 index &= ~IBMVNIC_TSO_POOL_MASK; 4030 } else { 4031 tx_pool = &adapter->tx_pool[pool]; 4032 } 4033 4034 txbuff = &tx_pool->tx_buff[index]; 4035 num_packets++; 4036 num_entries += txbuff->num_entries; 4037 if (txbuff->skb) { 4038 total_bytes += txbuff->skb->len; 4039 if (next->tx_comp.rcs[i]) { 4040 dev_err(dev, "tx error %x\n", 4041 next->tx_comp.rcs[i]); 4042 dev_kfree_skb_irq(txbuff->skb); 4043 } else { 4044 dev_consume_skb_irq(txbuff->skb); 4045 } 4046 txbuff->skb = NULL; 4047 } else { 4048 netdev_warn(adapter->netdev, 4049 "TX completion received with NULL socket buffer\n"); 4050 } 4051 tx_pool->free_map[tx_pool->producer_index] = index; 4052 tx_pool->producer_index = 4053 (tx_pool->producer_index + 1) % 4054 tx_pool->num_buffers; 4055 } 4056 /* remove tx_comp scrq*/ 4057 next->tx_comp.first = 0; 4058 4059 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); 4060 netdev_tx_completed_queue(txq, num_packets, total_bytes); 4061 4062 if (atomic_sub_return(num_entries, &scrq->used) <= 4063 (adapter->req_tx_entries_per_subcrq / 2) && 4064 __netif_subqueue_stopped(adapter->netdev, 4065 scrq->pool_index)) { 4066 rcu_read_lock(); 4067 if (adapter->tx_queues_active) { 4068 netif_wake_subqueue(adapter->netdev, 4069 scrq->pool_index); 4070 netdev_dbg(adapter->netdev, 4071 "Started queue %d\n", 4072 scrq->pool_index); 4073 } 4074 rcu_read_unlock(); 4075 } 4076 } 4077 4078 enable_scrq_irq(adapter, scrq); 4079 4080 if (pending_scrq(adapter, scrq)) { 4081 disable_scrq_irq(adapter, scrq); 4082 goto restart_loop; 4083 } 4084 4085 return 0; 4086 } 4087 4088 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 4089 { 4090 struct ibmvnic_sub_crq_queue *scrq = instance; 4091 struct ibmvnic_adapter *adapter = scrq->adapter; 4092 4093 disable_scrq_irq(adapter, scrq); 4094 ibmvnic_complete_tx(adapter, scrq); 4095 4096 return IRQ_HANDLED; 4097 } 4098 4099 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 4100 { 4101 struct ibmvnic_sub_crq_queue *scrq = instance; 4102 struct ibmvnic_adapter *adapter = scrq->adapter; 4103 4104 /* When booting a kdump kernel we can hit pending interrupts 4105 * prior to completing driver initialization. 4106 */ 4107 if (unlikely(adapter->state != VNIC_OPEN)) 4108 return IRQ_NONE; 4109 4110 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 4111 4112 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 4113 disable_scrq_irq(adapter, scrq); 4114 __napi_schedule(&adapter->napi[scrq->scrq_num]); 4115 } 4116 4117 return IRQ_HANDLED; 4118 } 4119 4120 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 4121 { 4122 struct device *dev = &adapter->vdev->dev; 4123 struct ibmvnic_sub_crq_queue *scrq; 4124 int i = 0, j = 0; 4125 int rc = 0; 4126 4127 for (i = 0; i < adapter->req_tx_queues; i++) { 4128 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 4129 i); 4130 scrq = adapter->tx_scrq[i]; 4131 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 4132 4133 if (!scrq->irq) { 4134 rc = -EINVAL; 4135 dev_err(dev, "Error mapping irq\n"); 4136 goto req_tx_irq_failed; 4137 } 4138 4139 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", 4140 adapter->vdev->unit_address, i); 4141 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 4142 0, scrq->name, scrq); 4143 4144 if (rc) { 4145 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 4146 scrq->irq, rc); 4147 irq_dispose_mapping(scrq->irq); 4148 goto req_tx_irq_failed; 4149 } 4150 } 4151 4152 for (i = 0; i < adapter->req_rx_queues; i++) { 4153 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 4154 i); 4155 scrq = adapter->rx_scrq[i]; 4156 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 4157 if (!scrq->irq) { 4158 rc = -EINVAL; 4159 dev_err(dev, "Error mapping irq\n"); 4160 goto req_rx_irq_failed; 4161 } 4162 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", 4163 adapter->vdev->unit_address, i); 4164 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 4165 0, scrq->name, scrq); 4166 if (rc) { 4167 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 4168 scrq->irq, rc); 4169 irq_dispose_mapping(scrq->irq); 4170 goto req_rx_irq_failed; 4171 } 4172 } 4173 4174 cpus_read_lock(); 4175 ibmvnic_set_affinity(adapter); 4176 cpus_read_unlock(); 4177 4178 return rc; 4179 4180 req_rx_irq_failed: 4181 for (j = 0; j < i; j++) { 4182 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 4183 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 4184 } 4185 i = adapter->req_tx_queues; 4186 req_tx_irq_failed: 4187 for (j = 0; j < i; j++) { 4188 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 4189 irq_dispose_mapping(adapter->tx_scrq[j]->irq); 4190 } 4191 release_sub_crqs(adapter, 1); 4192 return rc; 4193 } 4194 4195 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 4196 { 4197 struct device *dev = &adapter->vdev->dev; 4198 struct ibmvnic_sub_crq_queue **allqueues; 4199 int registered_queues = 0; 4200 int total_queues; 4201 int more = 0; 4202 int i; 4203 4204 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 4205 4206 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 4207 if (!allqueues) 4208 return -ENOMEM; 4209 4210 for (i = 0; i < total_queues; i++) { 4211 allqueues[i] = init_sub_crq_queue(adapter); 4212 if (!allqueues[i]) { 4213 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 4214 break; 4215 } 4216 registered_queues++; 4217 } 4218 4219 /* Make sure we were able to register the minimum number of queues */ 4220 if (registered_queues < 4221 adapter->min_tx_queues + adapter->min_rx_queues) { 4222 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 4223 goto tx_failed; 4224 } 4225 4226 /* Distribute the failed allocated queues*/ 4227 for (i = 0; i < total_queues - registered_queues + more ; i++) { 4228 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 4229 switch (i % 3) { 4230 case 0: 4231 if (adapter->req_rx_queues > adapter->min_rx_queues) 4232 adapter->req_rx_queues--; 4233 else 4234 more++; 4235 break; 4236 case 1: 4237 if (adapter->req_tx_queues > adapter->min_tx_queues) 4238 adapter->req_tx_queues--; 4239 else 4240 more++; 4241 break; 4242 } 4243 } 4244 4245 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 4246 sizeof(*adapter->tx_scrq), GFP_KERNEL); 4247 if (!adapter->tx_scrq) 4248 goto tx_failed; 4249 4250 for (i = 0; i < adapter->req_tx_queues; i++) { 4251 adapter->tx_scrq[i] = allqueues[i]; 4252 adapter->tx_scrq[i]->pool_index = i; 4253 adapter->num_active_tx_scrqs++; 4254 } 4255 4256 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 4257 sizeof(*adapter->rx_scrq), GFP_KERNEL); 4258 if (!adapter->rx_scrq) 4259 goto rx_failed; 4260 4261 for (i = 0; i < adapter->req_rx_queues; i++) { 4262 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 4263 adapter->rx_scrq[i]->scrq_num = i; 4264 adapter->num_active_rx_scrqs++; 4265 } 4266 4267 kfree(allqueues); 4268 return 0; 4269 4270 rx_failed: 4271 kfree(adapter->tx_scrq); 4272 adapter->tx_scrq = NULL; 4273 tx_failed: 4274 for (i = 0; i < registered_queues; i++) 4275 release_sub_crq_queue(adapter, allqueues[i], 1); 4276 kfree(allqueues); 4277 return -ENOMEM; 4278 } 4279 4280 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) 4281 { 4282 struct device *dev = &adapter->vdev->dev; 4283 union ibmvnic_crq crq; 4284 int max_entries; 4285 int cap_reqs; 4286 4287 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on 4288 * the PROMISC flag). Initialize this count upfront. When the tasklet 4289 * receives a response to all of these, it will send the next protocol 4290 * message (QUERY_IP_OFFLOAD). 4291 */ 4292 if (!(adapter->netdev->flags & IFF_PROMISC) || 4293 adapter->promisc_supported) 4294 cap_reqs = 7; 4295 else 4296 cap_reqs = 6; 4297 4298 if (!retry) { 4299 /* Sub-CRQ entries are 32 byte long */ 4300 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 4301 4302 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4303 4304 if (adapter->min_tx_entries_per_subcrq > entries_page || 4305 adapter->min_rx_add_entries_per_subcrq > entries_page) { 4306 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 4307 return; 4308 } 4309 4310 if (adapter->desired.mtu) 4311 adapter->req_mtu = adapter->desired.mtu; 4312 else 4313 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 4314 4315 if (!adapter->desired.tx_entries) 4316 adapter->desired.tx_entries = 4317 adapter->max_tx_entries_per_subcrq; 4318 if (!adapter->desired.rx_entries) 4319 adapter->desired.rx_entries = 4320 adapter->max_rx_add_entries_per_subcrq; 4321 4322 max_entries = IBMVNIC_LTB_SET_SIZE / 4323 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 4324 4325 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4326 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) { 4327 adapter->desired.tx_entries = max_entries; 4328 } 4329 4330 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4331 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) { 4332 adapter->desired.rx_entries = max_entries; 4333 } 4334 4335 if (adapter->desired.tx_entries) 4336 adapter->req_tx_entries_per_subcrq = 4337 adapter->desired.tx_entries; 4338 else 4339 adapter->req_tx_entries_per_subcrq = 4340 adapter->max_tx_entries_per_subcrq; 4341 4342 if (adapter->desired.rx_entries) 4343 adapter->req_rx_add_entries_per_subcrq = 4344 adapter->desired.rx_entries; 4345 else 4346 adapter->req_rx_add_entries_per_subcrq = 4347 adapter->max_rx_add_entries_per_subcrq; 4348 4349 if (adapter->desired.tx_queues) 4350 adapter->req_tx_queues = 4351 adapter->desired.tx_queues; 4352 else 4353 adapter->req_tx_queues = 4354 adapter->opt_tx_comp_sub_queues; 4355 4356 if (adapter->desired.rx_queues) 4357 adapter->req_rx_queues = 4358 adapter->desired.rx_queues; 4359 else 4360 adapter->req_rx_queues = 4361 adapter->opt_rx_comp_queues; 4362 4363 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 4364 } else { 4365 atomic_add(cap_reqs, &adapter->running_cap_crqs); 4366 } 4367 memset(&crq, 0, sizeof(crq)); 4368 crq.request_capability.first = IBMVNIC_CRQ_CMD; 4369 crq.request_capability.cmd = REQUEST_CAPABILITY; 4370 4371 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 4372 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 4373 cap_reqs--; 4374 ibmvnic_send_crq(adapter, &crq); 4375 4376 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 4377 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 4378 cap_reqs--; 4379 ibmvnic_send_crq(adapter, &crq); 4380 4381 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 4382 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 4383 cap_reqs--; 4384 ibmvnic_send_crq(adapter, &crq); 4385 4386 crq.request_capability.capability = 4387 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 4388 crq.request_capability.number = 4389 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 4390 cap_reqs--; 4391 ibmvnic_send_crq(adapter, &crq); 4392 4393 crq.request_capability.capability = 4394 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 4395 crq.request_capability.number = 4396 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 4397 cap_reqs--; 4398 ibmvnic_send_crq(adapter, &crq); 4399 4400 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 4401 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 4402 cap_reqs--; 4403 ibmvnic_send_crq(adapter, &crq); 4404 4405 if (adapter->netdev->flags & IFF_PROMISC) { 4406 if (adapter->promisc_supported) { 4407 crq.request_capability.capability = 4408 cpu_to_be16(PROMISC_REQUESTED); 4409 crq.request_capability.number = cpu_to_be64(1); 4410 cap_reqs--; 4411 ibmvnic_send_crq(adapter, &crq); 4412 } 4413 } else { 4414 crq.request_capability.capability = 4415 cpu_to_be16(PROMISC_REQUESTED); 4416 crq.request_capability.number = cpu_to_be64(0); 4417 cap_reqs--; 4418 ibmvnic_send_crq(adapter, &crq); 4419 } 4420 4421 /* Keep at end to catch any discrepancy between expected and actual 4422 * CRQs sent. 4423 */ 4424 WARN_ON(cap_reqs != 0); 4425 } 4426 4427 static int pending_scrq(struct ibmvnic_adapter *adapter, 4428 struct ibmvnic_sub_crq_queue *scrq) 4429 { 4430 union sub_crq *entry = &scrq->msgs[scrq->cur]; 4431 int rc; 4432 4433 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); 4434 4435 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4436 * contents of the SCRQ descriptor 4437 */ 4438 dma_rmb(); 4439 4440 return rc; 4441 } 4442 4443 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 4444 struct ibmvnic_sub_crq_queue *scrq) 4445 { 4446 union sub_crq *entry; 4447 unsigned long flags; 4448 4449 spin_lock_irqsave(&scrq->lock, flags); 4450 entry = &scrq->msgs[scrq->cur]; 4451 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4452 if (++scrq->cur == scrq->size) 4453 scrq->cur = 0; 4454 } else { 4455 entry = NULL; 4456 } 4457 spin_unlock_irqrestore(&scrq->lock, flags); 4458 4459 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4460 * contents of the SCRQ descriptor 4461 */ 4462 dma_rmb(); 4463 4464 return entry; 4465 } 4466 4467 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 4468 { 4469 struct ibmvnic_crq_queue *queue = &adapter->crq; 4470 union ibmvnic_crq *crq; 4471 4472 crq = &queue->msgs[queue->cur]; 4473 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4474 if (++queue->cur == queue->size) 4475 queue->cur = 0; 4476 } else { 4477 crq = NULL; 4478 } 4479 4480 return crq; 4481 } 4482 4483 static void print_subcrq_error(struct device *dev, int rc, const char *func) 4484 { 4485 switch (rc) { 4486 case H_PARAMETER: 4487 dev_warn_ratelimited(dev, 4488 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", 4489 func, rc); 4490 break; 4491 case H_CLOSED: 4492 dev_warn_ratelimited(dev, 4493 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", 4494 func, rc); 4495 break; 4496 default: 4497 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); 4498 break; 4499 } 4500 } 4501 4502 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 4503 u64 remote_handle, u64 ioba, u64 num_entries) 4504 { 4505 unsigned int ua = adapter->vdev->unit_address; 4506 struct device *dev = &adapter->vdev->dev; 4507 int rc; 4508 4509 /* Make sure the hypervisor sees the complete request */ 4510 dma_wmb(); 4511 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 4512 cpu_to_be64(remote_handle), 4513 ioba, num_entries); 4514 4515 if (rc) 4516 print_subcrq_error(dev, rc, __func__); 4517 4518 return rc; 4519 } 4520 4521 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 4522 union ibmvnic_crq *crq) 4523 { 4524 unsigned int ua = adapter->vdev->unit_address; 4525 struct device *dev = &adapter->vdev->dev; 4526 u64 *u64_crq = (u64 *)crq; 4527 int rc; 4528 4529 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 4530 (unsigned long)cpu_to_be64(u64_crq[0]), 4531 (unsigned long)cpu_to_be64(u64_crq[1])); 4532 4533 if (!adapter->crq.active && 4534 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 4535 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 4536 return -EINVAL; 4537 } 4538 4539 /* Make sure the hypervisor sees the complete request */ 4540 dma_wmb(); 4541 4542 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 4543 cpu_to_be64(u64_crq[0]), 4544 cpu_to_be64(u64_crq[1])); 4545 4546 if (rc) { 4547 if (rc == H_CLOSED) { 4548 dev_warn(dev, "CRQ Queue closed\n"); 4549 /* do not reset, report the fail, wait for passive init from server */ 4550 } 4551 4552 dev_warn(dev, "Send error (rc=%d)\n", rc); 4553 } 4554 4555 return rc; 4556 } 4557 4558 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 4559 { 4560 struct device *dev = &adapter->vdev->dev; 4561 union ibmvnic_crq crq; 4562 int retries = 100; 4563 int rc; 4564 4565 memset(&crq, 0, sizeof(crq)); 4566 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 4567 crq.generic.cmd = IBMVNIC_CRQ_INIT; 4568 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 4569 4570 do { 4571 rc = ibmvnic_send_crq(adapter, &crq); 4572 if (rc != H_CLOSED) 4573 break; 4574 retries--; 4575 msleep(50); 4576 4577 } while (retries > 0); 4578 4579 if (rc) { 4580 dev_err(dev, "Failed to send init request, rc = %d\n", rc); 4581 return rc; 4582 } 4583 4584 return 0; 4585 } 4586 4587 struct vnic_login_client_data { 4588 u8 type; 4589 __be16 len; 4590 char name[]; 4591 } __packed; 4592 4593 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 4594 { 4595 int len; 4596 4597 /* Calculate the amount of buffer space needed for the 4598 * vnic client data in the login buffer. There are four entries, 4599 * OS name, LPAR name, device name, and a null last entry. 4600 */ 4601 len = 4 * sizeof(struct vnic_login_client_data); 4602 len += 6; /* "Linux" plus NULL */ 4603 len += strlen(utsname()->nodename) + 1; 4604 len += strlen(adapter->netdev->name) + 1; 4605 4606 return len; 4607 } 4608 4609 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 4610 struct vnic_login_client_data *vlcd) 4611 { 4612 const char *os_name = "Linux"; 4613 int len; 4614 4615 /* Type 1 - LPAR OS */ 4616 vlcd->type = 1; 4617 len = strlen(os_name) + 1; 4618 vlcd->len = cpu_to_be16(len); 4619 strscpy(vlcd->name, os_name, len); 4620 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4621 4622 /* Type 2 - LPAR name */ 4623 vlcd->type = 2; 4624 len = strlen(utsname()->nodename) + 1; 4625 vlcd->len = cpu_to_be16(len); 4626 strscpy(vlcd->name, utsname()->nodename, len); 4627 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4628 4629 /* Type 3 - device name */ 4630 vlcd->type = 3; 4631 len = strlen(adapter->netdev->name) + 1; 4632 vlcd->len = cpu_to_be16(len); 4633 strscpy(vlcd->name, adapter->netdev->name, len); 4634 } 4635 4636 static int send_login(struct ibmvnic_adapter *adapter) 4637 { 4638 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 4639 struct ibmvnic_login_buffer *login_buffer; 4640 struct device *dev = &adapter->vdev->dev; 4641 struct vnic_login_client_data *vlcd; 4642 dma_addr_t rsp_buffer_token; 4643 dma_addr_t buffer_token; 4644 size_t rsp_buffer_size; 4645 union ibmvnic_crq crq; 4646 int client_data_len; 4647 size_t buffer_size; 4648 __be64 *tx_list_p; 4649 __be64 *rx_list_p; 4650 int rc; 4651 int i; 4652 4653 if (!adapter->tx_scrq || !adapter->rx_scrq) { 4654 netdev_err(adapter->netdev, 4655 "RX or TX queues are not allocated, device login failed\n"); 4656 return -ENOMEM; 4657 } 4658 4659 release_login_buffer(adapter); 4660 release_login_rsp_buffer(adapter); 4661 4662 client_data_len = vnic_client_data_len(adapter); 4663 4664 buffer_size = 4665 sizeof(struct ibmvnic_login_buffer) + 4666 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 4667 client_data_len; 4668 4669 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 4670 if (!login_buffer) 4671 goto buf_alloc_failed; 4672 4673 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 4674 DMA_TO_DEVICE); 4675 if (dma_mapping_error(dev, buffer_token)) { 4676 dev_err(dev, "Couldn't map login buffer\n"); 4677 goto buf_map_failed; 4678 } 4679 4680 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 4681 sizeof(u64) * adapter->req_tx_queues + 4682 sizeof(u64) * adapter->req_rx_queues + 4683 sizeof(u64) * adapter->req_rx_queues + 4684 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 4685 4686 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 4687 if (!login_rsp_buffer) 4688 goto buf_rsp_alloc_failed; 4689 4690 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 4691 rsp_buffer_size, DMA_FROM_DEVICE); 4692 if (dma_mapping_error(dev, rsp_buffer_token)) { 4693 dev_err(dev, "Couldn't map login rsp buffer\n"); 4694 goto buf_rsp_map_failed; 4695 } 4696 4697 adapter->login_buf = login_buffer; 4698 adapter->login_buf_token = buffer_token; 4699 adapter->login_buf_sz = buffer_size; 4700 adapter->login_rsp_buf = login_rsp_buffer; 4701 adapter->login_rsp_buf_token = rsp_buffer_token; 4702 adapter->login_rsp_buf_sz = rsp_buffer_size; 4703 4704 login_buffer->len = cpu_to_be32(buffer_size); 4705 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 4706 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 4707 login_buffer->off_txcomp_subcrqs = 4708 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 4709 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 4710 login_buffer->off_rxcomp_subcrqs = 4711 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 4712 sizeof(u64) * adapter->req_tx_queues); 4713 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 4714 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 4715 4716 tx_list_p = (__be64 *)((char *)login_buffer + 4717 sizeof(struct ibmvnic_login_buffer)); 4718 rx_list_p = (__be64 *)((char *)login_buffer + 4719 sizeof(struct ibmvnic_login_buffer) + 4720 sizeof(u64) * adapter->req_tx_queues); 4721 4722 for (i = 0; i < adapter->req_tx_queues; i++) { 4723 if (adapter->tx_scrq[i]) { 4724 tx_list_p[i] = 4725 cpu_to_be64(adapter->tx_scrq[i]->crq_num); 4726 } 4727 } 4728 4729 for (i = 0; i < adapter->req_rx_queues; i++) { 4730 if (adapter->rx_scrq[i]) { 4731 rx_list_p[i] = 4732 cpu_to_be64(adapter->rx_scrq[i]->crq_num); 4733 } 4734 } 4735 4736 /* Insert vNIC login client data */ 4737 vlcd = (struct vnic_login_client_data *) 4738 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 4739 login_buffer->client_data_offset = 4740 cpu_to_be32((char *)vlcd - (char *)login_buffer); 4741 login_buffer->client_data_len = cpu_to_be32(client_data_len); 4742 4743 vnic_add_client_data(adapter, vlcd); 4744 4745 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 4746 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 4747 netdev_dbg(adapter->netdev, "%016lx\n", 4748 ((unsigned long *)(adapter->login_buf))[i]); 4749 } 4750 4751 memset(&crq, 0, sizeof(crq)); 4752 crq.login.first = IBMVNIC_CRQ_CMD; 4753 crq.login.cmd = LOGIN; 4754 crq.login.ioba = cpu_to_be32(buffer_token); 4755 crq.login.len = cpu_to_be32(buffer_size); 4756 4757 adapter->login_pending = true; 4758 rc = ibmvnic_send_crq(adapter, &crq); 4759 if (rc) { 4760 adapter->login_pending = false; 4761 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); 4762 goto buf_rsp_map_failed; 4763 } 4764 4765 return 0; 4766 4767 buf_rsp_map_failed: 4768 kfree(login_rsp_buffer); 4769 adapter->login_rsp_buf = NULL; 4770 buf_rsp_alloc_failed: 4771 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 4772 buf_map_failed: 4773 kfree(login_buffer); 4774 adapter->login_buf = NULL; 4775 buf_alloc_failed: 4776 return -ENOMEM; 4777 } 4778 4779 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 4780 u32 len, u8 map_id) 4781 { 4782 union ibmvnic_crq crq; 4783 4784 memset(&crq, 0, sizeof(crq)); 4785 crq.request_map.first = IBMVNIC_CRQ_CMD; 4786 crq.request_map.cmd = REQUEST_MAP; 4787 crq.request_map.map_id = map_id; 4788 crq.request_map.ioba = cpu_to_be32(addr); 4789 crq.request_map.len = cpu_to_be32(len); 4790 return ibmvnic_send_crq(adapter, &crq); 4791 } 4792 4793 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 4794 { 4795 union ibmvnic_crq crq; 4796 4797 memset(&crq, 0, sizeof(crq)); 4798 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 4799 crq.request_unmap.cmd = REQUEST_UNMAP; 4800 crq.request_unmap.map_id = map_id; 4801 return ibmvnic_send_crq(adapter, &crq); 4802 } 4803 4804 static void send_query_map(struct ibmvnic_adapter *adapter) 4805 { 4806 union ibmvnic_crq crq; 4807 4808 memset(&crq, 0, sizeof(crq)); 4809 crq.query_map.first = IBMVNIC_CRQ_CMD; 4810 crq.query_map.cmd = QUERY_MAP; 4811 ibmvnic_send_crq(adapter, &crq); 4812 } 4813 4814 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 4815 static void send_query_cap(struct ibmvnic_adapter *adapter) 4816 { 4817 union ibmvnic_crq crq; 4818 int cap_reqs; 4819 4820 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count 4821 * upfront. When the tasklet receives a response to all of these, it 4822 * can send out the next protocol messaage (REQUEST_CAPABILITY). 4823 */ 4824 cap_reqs = 25; 4825 4826 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4827 4828 memset(&crq, 0, sizeof(crq)); 4829 crq.query_capability.first = IBMVNIC_CRQ_CMD; 4830 crq.query_capability.cmd = QUERY_CAPABILITY; 4831 4832 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 4833 ibmvnic_send_crq(adapter, &crq); 4834 cap_reqs--; 4835 4836 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 4837 ibmvnic_send_crq(adapter, &crq); 4838 cap_reqs--; 4839 4840 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 4841 ibmvnic_send_crq(adapter, &crq); 4842 cap_reqs--; 4843 4844 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 4845 ibmvnic_send_crq(adapter, &crq); 4846 cap_reqs--; 4847 4848 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 4849 ibmvnic_send_crq(adapter, &crq); 4850 cap_reqs--; 4851 4852 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 4853 ibmvnic_send_crq(adapter, &crq); 4854 cap_reqs--; 4855 4856 crq.query_capability.capability = 4857 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 4858 ibmvnic_send_crq(adapter, &crq); 4859 cap_reqs--; 4860 4861 crq.query_capability.capability = 4862 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 4863 ibmvnic_send_crq(adapter, &crq); 4864 cap_reqs--; 4865 4866 crq.query_capability.capability = 4867 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 4868 ibmvnic_send_crq(adapter, &crq); 4869 cap_reqs--; 4870 4871 crq.query_capability.capability = 4872 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 4873 ibmvnic_send_crq(adapter, &crq); 4874 cap_reqs--; 4875 4876 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 4877 ibmvnic_send_crq(adapter, &crq); 4878 cap_reqs--; 4879 4880 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 4881 ibmvnic_send_crq(adapter, &crq); 4882 cap_reqs--; 4883 4884 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 4885 ibmvnic_send_crq(adapter, &crq); 4886 cap_reqs--; 4887 4888 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 4889 ibmvnic_send_crq(adapter, &crq); 4890 cap_reqs--; 4891 4892 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 4893 ibmvnic_send_crq(adapter, &crq); 4894 cap_reqs--; 4895 4896 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 4897 ibmvnic_send_crq(adapter, &crq); 4898 cap_reqs--; 4899 4900 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 4901 ibmvnic_send_crq(adapter, &crq); 4902 cap_reqs--; 4903 4904 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 4905 ibmvnic_send_crq(adapter, &crq); 4906 cap_reqs--; 4907 4908 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 4909 ibmvnic_send_crq(adapter, &crq); 4910 cap_reqs--; 4911 4912 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 4913 ibmvnic_send_crq(adapter, &crq); 4914 cap_reqs--; 4915 4916 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 4917 ibmvnic_send_crq(adapter, &crq); 4918 cap_reqs--; 4919 4920 crq.query_capability.capability = 4921 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 4922 ibmvnic_send_crq(adapter, &crq); 4923 cap_reqs--; 4924 4925 crq.query_capability.capability = 4926 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 4927 ibmvnic_send_crq(adapter, &crq); 4928 cap_reqs--; 4929 4930 crq.query_capability.capability = 4931 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 4932 ibmvnic_send_crq(adapter, &crq); 4933 cap_reqs--; 4934 4935 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 4936 4937 ibmvnic_send_crq(adapter, &crq); 4938 cap_reqs--; 4939 4940 /* Keep at end to catch any discrepancy between expected and actual 4941 * CRQs sent. 4942 */ 4943 WARN_ON(cap_reqs != 0); 4944 } 4945 4946 static void send_query_ip_offload(struct ibmvnic_adapter *adapter) 4947 { 4948 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 4949 struct device *dev = &adapter->vdev->dev; 4950 union ibmvnic_crq crq; 4951 4952 adapter->ip_offload_tok = 4953 dma_map_single(dev, 4954 &adapter->ip_offload_buf, 4955 buf_sz, 4956 DMA_FROM_DEVICE); 4957 4958 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 4959 if (!firmware_has_feature(FW_FEATURE_CMO)) 4960 dev_err(dev, "Couldn't map offload buffer\n"); 4961 return; 4962 } 4963 4964 memset(&crq, 0, sizeof(crq)); 4965 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 4966 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 4967 crq.query_ip_offload.len = cpu_to_be32(buf_sz); 4968 crq.query_ip_offload.ioba = 4969 cpu_to_be32(adapter->ip_offload_tok); 4970 4971 ibmvnic_send_crq(adapter, &crq); 4972 } 4973 4974 static void send_control_ip_offload(struct ibmvnic_adapter *adapter) 4975 { 4976 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; 4977 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 4978 struct device *dev = &adapter->vdev->dev; 4979 netdev_features_t old_hw_features = 0; 4980 union ibmvnic_crq crq; 4981 4982 adapter->ip_offload_ctrl_tok = 4983 dma_map_single(dev, 4984 ctrl_buf, 4985 sizeof(adapter->ip_offload_ctrl), 4986 DMA_TO_DEVICE); 4987 4988 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 4989 dev_err(dev, "Couldn't map ip offload control buffer\n"); 4990 return; 4991 } 4992 4993 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 4994 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); 4995 ctrl_buf->ipv4_chksum = buf->ipv4_chksum; 4996 ctrl_buf->ipv6_chksum = buf->ipv6_chksum; 4997 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 4998 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; 4999 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 5000 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; 5001 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; 5002 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; 5003 5004 /* large_rx disabled for now, additional features needed */ 5005 ctrl_buf->large_rx_ipv4 = 0; 5006 ctrl_buf->large_rx_ipv6 = 0; 5007 5008 if (adapter->state != VNIC_PROBING) { 5009 old_hw_features = adapter->netdev->hw_features; 5010 adapter->netdev->hw_features = 0; 5011 } 5012 5013 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 5014 5015 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 5016 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; 5017 5018 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 5019 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; 5020 5021 if ((adapter->netdev->features & 5022 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 5023 adapter->netdev->hw_features |= NETIF_F_RXCSUM; 5024 5025 if (buf->large_tx_ipv4) 5026 adapter->netdev->hw_features |= NETIF_F_TSO; 5027 if (buf->large_tx_ipv6) 5028 adapter->netdev->hw_features |= NETIF_F_TSO6; 5029 5030 if (adapter->state == VNIC_PROBING) { 5031 adapter->netdev->features |= adapter->netdev->hw_features; 5032 } else if (old_hw_features != adapter->netdev->hw_features) { 5033 netdev_features_t tmp = 0; 5034 5035 /* disable features no longer supported */ 5036 adapter->netdev->features &= adapter->netdev->hw_features; 5037 /* turn on features now supported if previously enabled */ 5038 tmp = (old_hw_features ^ adapter->netdev->hw_features) & 5039 adapter->netdev->hw_features; 5040 adapter->netdev->features |= 5041 tmp & adapter->netdev->wanted_features; 5042 } 5043 5044 memset(&crq, 0, sizeof(crq)); 5045 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 5046 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 5047 crq.control_ip_offload.len = 5048 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 5049 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 5050 ibmvnic_send_crq(adapter, &crq); 5051 } 5052 5053 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 5054 struct ibmvnic_adapter *adapter) 5055 { 5056 struct device *dev = &adapter->vdev->dev; 5057 5058 if (crq->get_vpd_size_rsp.rc.code) { 5059 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 5060 crq->get_vpd_size_rsp.rc.code); 5061 complete(&adapter->fw_done); 5062 return; 5063 } 5064 5065 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 5066 complete(&adapter->fw_done); 5067 } 5068 5069 static void handle_vpd_rsp(union ibmvnic_crq *crq, 5070 struct ibmvnic_adapter *adapter) 5071 { 5072 struct device *dev = &adapter->vdev->dev; 5073 unsigned char *substr = NULL; 5074 u8 fw_level_len = 0; 5075 5076 memset(adapter->fw_version, 0, 32); 5077 5078 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 5079 DMA_FROM_DEVICE); 5080 5081 if (crq->get_vpd_rsp.rc.code) { 5082 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 5083 crq->get_vpd_rsp.rc.code); 5084 goto complete; 5085 } 5086 5087 /* get the position of the firmware version info 5088 * located after the ASCII 'RM' substring in the buffer 5089 */ 5090 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 5091 if (!substr) { 5092 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 5093 goto complete; 5094 } 5095 5096 /* get length of firmware level ASCII substring */ 5097 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 5098 fw_level_len = *(substr + 2); 5099 } else { 5100 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 5101 goto complete; 5102 } 5103 5104 /* copy firmware version string from vpd into adapter */ 5105 if ((substr + 3 + fw_level_len) < 5106 (adapter->vpd->buff + adapter->vpd->len)) { 5107 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 5108 } else { 5109 dev_info(dev, "FW substr extrapolated VPD buff\n"); 5110 } 5111 5112 complete: 5113 if (adapter->fw_version[0] == '\0') 5114 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); 5115 complete(&adapter->fw_done); 5116 } 5117 5118 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 5119 { 5120 struct device *dev = &adapter->vdev->dev; 5121 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 5122 int i; 5123 5124 dma_unmap_single(dev, adapter->ip_offload_tok, 5125 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 5126 5127 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 5128 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 5129 netdev_dbg(adapter->netdev, "%016lx\n", 5130 ((unsigned long *)(buf))[i]); 5131 5132 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 5133 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 5134 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 5135 buf->tcp_ipv4_chksum); 5136 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 5137 buf->tcp_ipv6_chksum); 5138 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 5139 buf->udp_ipv4_chksum); 5140 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 5141 buf->udp_ipv6_chksum); 5142 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 5143 buf->large_tx_ipv4); 5144 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 5145 buf->large_tx_ipv6); 5146 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 5147 buf->large_rx_ipv4); 5148 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 5149 buf->large_rx_ipv6); 5150 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 5151 buf->max_ipv4_header_size); 5152 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 5153 buf->max_ipv6_header_size); 5154 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 5155 buf->max_tcp_header_size); 5156 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 5157 buf->max_udp_header_size); 5158 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 5159 buf->max_large_tx_size); 5160 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 5161 buf->max_large_rx_size); 5162 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 5163 buf->ipv6_extension_header); 5164 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 5165 buf->tcp_pseudosum_req); 5166 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 5167 buf->num_ipv6_ext_headers); 5168 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 5169 buf->off_ipv6_ext_headers); 5170 5171 send_control_ip_offload(adapter); 5172 } 5173 5174 static const char *ibmvnic_fw_err_cause(u16 cause) 5175 { 5176 switch (cause) { 5177 case ADAPTER_PROBLEM: 5178 return "adapter problem"; 5179 case BUS_PROBLEM: 5180 return "bus problem"; 5181 case FW_PROBLEM: 5182 return "firmware problem"; 5183 case DD_PROBLEM: 5184 return "device driver problem"; 5185 case EEH_RECOVERY: 5186 return "EEH recovery"; 5187 case FW_UPDATED: 5188 return "firmware updated"; 5189 case LOW_MEMORY: 5190 return "low Memory"; 5191 default: 5192 return "unknown"; 5193 } 5194 } 5195 5196 static void handle_error_indication(union ibmvnic_crq *crq, 5197 struct ibmvnic_adapter *adapter) 5198 { 5199 struct device *dev = &adapter->vdev->dev; 5200 u16 cause; 5201 5202 cause = be16_to_cpu(crq->error_indication.error_cause); 5203 5204 dev_warn_ratelimited(dev, 5205 "Firmware reports %serror, cause: %s. Starting recovery...\n", 5206 crq->error_indication.flags 5207 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 5208 ibmvnic_fw_err_cause(cause)); 5209 5210 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 5211 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5212 else 5213 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 5214 } 5215 5216 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 5217 struct ibmvnic_adapter *adapter) 5218 { 5219 struct net_device *netdev = adapter->netdev; 5220 struct device *dev = &adapter->vdev->dev; 5221 long rc; 5222 5223 rc = crq->change_mac_addr_rsp.rc.code; 5224 if (rc) { 5225 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 5226 goto out; 5227 } 5228 /* crq->change_mac_addr.mac_addr is the requested one 5229 * crq->change_mac_addr_rsp.mac_addr is the returned valid one. 5230 */ 5231 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); 5232 ether_addr_copy(adapter->mac_addr, 5233 &crq->change_mac_addr_rsp.mac_addr[0]); 5234 out: 5235 complete(&adapter->fw_done); 5236 return rc; 5237 } 5238 5239 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 5240 struct ibmvnic_adapter *adapter) 5241 { 5242 struct device *dev = &adapter->vdev->dev; 5243 u64 *req_value; 5244 char *name; 5245 5246 atomic_dec(&adapter->running_cap_crqs); 5247 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", 5248 atomic_read(&adapter->running_cap_crqs)); 5249 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 5250 case REQ_TX_QUEUES: 5251 req_value = &adapter->req_tx_queues; 5252 name = "tx"; 5253 break; 5254 case REQ_RX_QUEUES: 5255 req_value = &adapter->req_rx_queues; 5256 name = "rx"; 5257 break; 5258 case REQ_RX_ADD_QUEUES: 5259 req_value = &adapter->req_rx_add_queues; 5260 name = "rx_add"; 5261 break; 5262 case REQ_TX_ENTRIES_PER_SUBCRQ: 5263 req_value = &adapter->req_tx_entries_per_subcrq; 5264 name = "tx_entries_per_subcrq"; 5265 break; 5266 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 5267 req_value = &adapter->req_rx_add_entries_per_subcrq; 5268 name = "rx_add_entries_per_subcrq"; 5269 break; 5270 case REQ_MTU: 5271 req_value = &adapter->req_mtu; 5272 name = "mtu"; 5273 break; 5274 case PROMISC_REQUESTED: 5275 req_value = &adapter->promisc; 5276 name = "promisc"; 5277 break; 5278 default: 5279 dev_err(dev, "Got invalid cap request rsp %d\n", 5280 crq->request_capability.capability); 5281 return; 5282 } 5283 5284 switch (crq->request_capability_rsp.rc.code) { 5285 case SUCCESS: 5286 break; 5287 case PARTIALSUCCESS: 5288 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 5289 *req_value, 5290 (long)be64_to_cpu(crq->request_capability_rsp.number), 5291 name); 5292 5293 if (be16_to_cpu(crq->request_capability_rsp.capability) == 5294 REQ_MTU) { 5295 pr_err("mtu of %llu is not supported. Reverting.\n", 5296 *req_value); 5297 *req_value = adapter->fallback.mtu; 5298 } else { 5299 *req_value = 5300 be64_to_cpu(crq->request_capability_rsp.number); 5301 } 5302 5303 send_request_cap(adapter, 1); 5304 return; 5305 default: 5306 dev_err(dev, "Error %d in request cap rsp\n", 5307 crq->request_capability_rsp.rc.code); 5308 return; 5309 } 5310 5311 /* Done receiving requested capabilities, query IP offload support */ 5312 if (atomic_read(&adapter->running_cap_crqs) == 0) 5313 send_query_ip_offload(adapter); 5314 } 5315 5316 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 5317 struct ibmvnic_adapter *adapter) 5318 { 5319 struct device *dev = &adapter->vdev->dev; 5320 struct net_device *netdev = adapter->netdev; 5321 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 5322 struct ibmvnic_login_buffer *login = adapter->login_buf; 5323 u64 *tx_handle_array; 5324 u64 *rx_handle_array; 5325 int num_tx_pools; 5326 int num_rx_pools; 5327 u64 *size_array; 5328 int i; 5329 5330 /* CHECK: Test/set of login_pending does not need to be atomic 5331 * because only ibmvnic_tasklet tests/clears this. 5332 */ 5333 if (!adapter->login_pending) { 5334 netdev_warn(netdev, "Ignoring unexpected login response\n"); 5335 return 0; 5336 } 5337 adapter->login_pending = false; 5338 5339 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 5340 DMA_TO_DEVICE); 5341 dma_unmap_single(dev, adapter->login_rsp_buf_token, 5342 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 5343 5344 /* If the number of queues requested can't be allocated by the 5345 * server, the login response will return with code 1. We will need 5346 * to resend the login buffer with fewer queues requested. 5347 */ 5348 if (login_rsp_crq->generic.rc.code) { 5349 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 5350 complete(&adapter->init_done); 5351 return 0; 5352 } 5353 5354 if (adapter->failover_pending) { 5355 adapter->init_done_rc = -EAGAIN; 5356 netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 5357 complete(&adapter->init_done); 5358 /* login response buffer will be released on reset */ 5359 return 0; 5360 } 5361 5362 netdev->mtu = adapter->req_mtu - ETH_HLEN; 5363 5364 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 5365 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 5366 netdev_dbg(adapter->netdev, "%016lx\n", 5367 ((unsigned long *)(adapter->login_rsp_buf))[i]); 5368 } 5369 5370 /* Sanity checks */ 5371 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 5372 (be32_to_cpu(login->num_rxcomp_subcrqs) * 5373 adapter->req_rx_add_queues != 5374 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 5375 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 5376 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5377 return -EIO; 5378 } 5379 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5380 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 5381 /* variable buffer sizes are not supported, so just read the 5382 * first entry. 5383 */ 5384 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); 5385 5386 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 5387 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 5388 5389 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5390 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 5391 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5392 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); 5393 5394 for (i = 0; i < num_tx_pools; i++) 5395 adapter->tx_scrq[i]->handle = tx_handle_array[i]; 5396 5397 for (i = 0; i < num_rx_pools; i++) 5398 adapter->rx_scrq[i]->handle = rx_handle_array[i]; 5399 5400 adapter->num_active_tx_scrqs = num_tx_pools; 5401 adapter->num_active_rx_scrqs = num_rx_pools; 5402 release_login_rsp_buffer(adapter); 5403 release_login_buffer(adapter); 5404 complete(&adapter->init_done); 5405 5406 return 0; 5407 } 5408 5409 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 5410 struct ibmvnic_adapter *adapter) 5411 { 5412 struct device *dev = &adapter->vdev->dev; 5413 long rc; 5414 5415 rc = crq->request_unmap_rsp.rc.code; 5416 if (rc) 5417 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 5418 } 5419 5420 static void handle_query_map_rsp(union ibmvnic_crq *crq, 5421 struct ibmvnic_adapter *adapter) 5422 { 5423 struct net_device *netdev = adapter->netdev; 5424 struct device *dev = &adapter->vdev->dev; 5425 long rc; 5426 5427 rc = crq->query_map_rsp.rc.code; 5428 if (rc) { 5429 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 5430 return; 5431 } 5432 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", 5433 crq->query_map_rsp.page_size, 5434 __be32_to_cpu(crq->query_map_rsp.tot_pages), 5435 __be32_to_cpu(crq->query_map_rsp.free_pages)); 5436 } 5437 5438 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 5439 struct ibmvnic_adapter *adapter) 5440 { 5441 struct net_device *netdev = adapter->netdev; 5442 struct device *dev = &adapter->vdev->dev; 5443 long rc; 5444 5445 atomic_dec(&adapter->running_cap_crqs); 5446 netdev_dbg(netdev, "Outstanding queries: %d\n", 5447 atomic_read(&adapter->running_cap_crqs)); 5448 rc = crq->query_capability.rc.code; 5449 if (rc) { 5450 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 5451 goto out; 5452 } 5453 5454 switch (be16_to_cpu(crq->query_capability.capability)) { 5455 case MIN_TX_QUEUES: 5456 adapter->min_tx_queues = 5457 be64_to_cpu(crq->query_capability.number); 5458 netdev_dbg(netdev, "min_tx_queues = %lld\n", 5459 adapter->min_tx_queues); 5460 break; 5461 case MIN_RX_QUEUES: 5462 adapter->min_rx_queues = 5463 be64_to_cpu(crq->query_capability.number); 5464 netdev_dbg(netdev, "min_rx_queues = %lld\n", 5465 adapter->min_rx_queues); 5466 break; 5467 case MIN_RX_ADD_QUEUES: 5468 adapter->min_rx_add_queues = 5469 be64_to_cpu(crq->query_capability.number); 5470 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 5471 adapter->min_rx_add_queues); 5472 break; 5473 case MAX_TX_QUEUES: 5474 adapter->max_tx_queues = 5475 be64_to_cpu(crq->query_capability.number); 5476 netdev_dbg(netdev, "max_tx_queues = %lld\n", 5477 adapter->max_tx_queues); 5478 break; 5479 case MAX_RX_QUEUES: 5480 adapter->max_rx_queues = 5481 be64_to_cpu(crq->query_capability.number); 5482 netdev_dbg(netdev, "max_rx_queues = %lld\n", 5483 adapter->max_rx_queues); 5484 break; 5485 case MAX_RX_ADD_QUEUES: 5486 adapter->max_rx_add_queues = 5487 be64_to_cpu(crq->query_capability.number); 5488 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 5489 adapter->max_rx_add_queues); 5490 break; 5491 case MIN_TX_ENTRIES_PER_SUBCRQ: 5492 adapter->min_tx_entries_per_subcrq = 5493 be64_to_cpu(crq->query_capability.number); 5494 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 5495 adapter->min_tx_entries_per_subcrq); 5496 break; 5497 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 5498 adapter->min_rx_add_entries_per_subcrq = 5499 be64_to_cpu(crq->query_capability.number); 5500 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 5501 adapter->min_rx_add_entries_per_subcrq); 5502 break; 5503 case MAX_TX_ENTRIES_PER_SUBCRQ: 5504 adapter->max_tx_entries_per_subcrq = 5505 be64_to_cpu(crq->query_capability.number); 5506 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 5507 adapter->max_tx_entries_per_subcrq); 5508 break; 5509 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 5510 adapter->max_rx_add_entries_per_subcrq = 5511 be64_to_cpu(crq->query_capability.number); 5512 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 5513 adapter->max_rx_add_entries_per_subcrq); 5514 break; 5515 case TCP_IP_OFFLOAD: 5516 adapter->tcp_ip_offload = 5517 be64_to_cpu(crq->query_capability.number); 5518 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 5519 adapter->tcp_ip_offload); 5520 break; 5521 case PROMISC_SUPPORTED: 5522 adapter->promisc_supported = 5523 be64_to_cpu(crq->query_capability.number); 5524 netdev_dbg(netdev, "promisc_supported = %lld\n", 5525 adapter->promisc_supported); 5526 break; 5527 case MIN_MTU: 5528 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 5529 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 5530 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 5531 break; 5532 case MAX_MTU: 5533 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 5534 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 5535 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 5536 break; 5537 case MAX_MULTICAST_FILTERS: 5538 adapter->max_multicast_filters = 5539 be64_to_cpu(crq->query_capability.number); 5540 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 5541 adapter->max_multicast_filters); 5542 break; 5543 case VLAN_HEADER_INSERTION: 5544 adapter->vlan_header_insertion = 5545 be64_to_cpu(crq->query_capability.number); 5546 if (adapter->vlan_header_insertion) 5547 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 5548 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 5549 adapter->vlan_header_insertion); 5550 break; 5551 case RX_VLAN_HEADER_INSERTION: 5552 adapter->rx_vlan_header_insertion = 5553 be64_to_cpu(crq->query_capability.number); 5554 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 5555 adapter->rx_vlan_header_insertion); 5556 break; 5557 case MAX_TX_SG_ENTRIES: 5558 adapter->max_tx_sg_entries = 5559 be64_to_cpu(crq->query_capability.number); 5560 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 5561 adapter->max_tx_sg_entries); 5562 break; 5563 case RX_SG_SUPPORTED: 5564 adapter->rx_sg_supported = 5565 be64_to_cpu(crq->query_capability.number); 5566 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 5567 adapter->rx_sg_supported); 5568 break; 5569 case OPT_TX_COMP_SUB_QUEUES: 5570 adapter->opt_tx_comp_sub_queues = 5571 be64_to_cpu(crq->query_capability.number); 5572 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 5573 adapter->opt_tx_comp_sub_queues); 5574 break; 5575 case OPT_RX_COMP_QUEUES: 5576 adapter->opt_rx_comp_queues = 5577 be64_to_cpu(crq->query_capability.number); 5578 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 5579 adapter->opt_rx_comp_queues); 5580 break; 5581 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 5582 adapter->opt_rx_bufadd_q_per_rx_comp_q = 5583 be64_to_cpu(crq->query_capability.number); 5584 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 5585 adapter->opt_rx_bufadd_q_per_rx_comp_q); 5586 break; 5587 case OPT_TX_ENTRIES_PER_SUBCRQ: 5588 adapter->opt_tx_entries_per_subcrq = 5589 be64_to_cpu(crq->query_capability.number); 5590 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 5591 adapter->opt_tx_entries_per_subcrq); 5592 break; 5593 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 5594 adapter->opt_rxba_entries_per_subcrq = 5595 be64_to_cpu(crq->query_capability.number); 5596 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 5597 adapter->opt_rxba_entries_per_subcrq); 5598 break; 5599 case TX_RX_DESC_REQ: 5600 adapter->tx_rx_desc_req = crq->query_capability.number; 5601 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 5602 adapter->tx_rx_desc_req); 5603 break; 5604 5605 default: 5606 netdev_err(netdev, "Got invalid cap rsp %d\n", 5607 crq->query_capability.capability); 5608 } 5609 5610 out: 5611 if (atomic_read(&adapter->running_cap_crqs) == 0) 5612 send_request_cap(adapter, 0); 5613 } 5614 5615 static int send_query_phys_parms(struct ibmvnic_adapter *adapter) 5616 { 5617 union ibmvnic_crq crq; 5618 int rc; 5619 5620 memset(&crq, 0, sizeof(crq)); 5621 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; 5622 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; 5623 5624 mutex_lock(&adapter->fw_lock); 5625 adapter->fw_done_rc = 0; 5626 reinit_completion(&adapter->fw_done); 5627 5628 rc = ibmvnic_send_crq(adapter, &crq); 5629 if (rc) { 5630 mutex_unlock(&adapter->fw_lock); 5631 return rc; 5632 } 5633 5634 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 5635 if (rc) { 5636 mutex_unlock(&adapter->fw_lock); 5637 return rc; 5638 } 5639 5640 mutex_unlock(&adapter->fw_lock); 5641 return adapter->fw_done_rc ? -EIO : 0; 5642 } 5643 5644 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, 5645 struct ibmvnic_adapter *adapter) 5646 { 5647 struct net_device *netdev = adapter->netdev; 5648 int rc; 5649 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); 5650 5651 rc = crq->query_phys_parms_rsp.rc.code; 5652 if (rc) { 5653 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); 5654 return rc; 5655 } 5656 switch (rspeed) { 5657 case IBMVNIC_10MBPS: 5658 adapter->speed = SPEED_10; 5659 break; 5660 case IBMVNIC_100MBPS: 5661 adapter->speed = SPEED_100; 5662 break; 5663 case IBMVNIC_1GBPS: 5664 adapter->speed = SPEED_1000; 5665 break; 5666 case IBMVNIC_10GBPS: 5667 adapter->speed = SPEED_10000; 5668 break; 5669 case IBMVNIC_25GBPS: 5670 adapter->speed = SPEED_25000; 5671 break; 5672 case IBMVNIC_40GBPS: 5673 adapter->speed = SPEED_40000; 5674 break; 5675 case IBMVNIC_50GBPS: 5676 adapter->speed = SPEED_50000; 5677 break; 5678 case IBMVNIC_100GBPS: 5679 adapter->speed = SPEED_100000; 5680 break; 5681 case IBMVNIC_200GBPS: 5682 adapter->speed = SPEED_200000; 5683 break; 5684 default: 5685 if (netif_carrier_ok(netdev)) 5686 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); 5687 adapter->speed = SPEED_UNKNOWN; 5688 } 5689 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) 5690 adapter->duplex = DUPLEX_FULL; 5691 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) 5692 adapter->duplex = DUPLEX_HALF; 5693 else 5694 adapter->duplex = DUPLEX_UNKNOWN; 5695 5696 return rc; 5697 } 5698 5699 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 5700 struct ibmvnic_adapter *adapter) 5701 { 5702 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 5703 struct net_device *netdev = adapter->netdev; 5704 struct device *dev = &adapter->vdev->dev; 5705 u64 *u64_crq = (u64 *)crq; 5706 long rc; 5707 5708 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 5709 (unsigned long)cpu_to_be64(u64_crq[0]), 5710 (unsigned long)cpu_to_be64(u64_crq[1])); 5711 switch (gen_crq->first) { 5712 case IBMVNIC_CRQ_INIT_RSP: 5713 switch (gen_crq->cmd) { 5714 case IBMVNIC_CRQ_INIT: 5715 dev_info(dev, "Partner initialized\n"); 5716 adapter->from_passive_init = true; 5717 /* Discard any stale login responses from prev reset. 5718 * CHECK: should we clear even on INIT_COMPLETE? 5719 */ 5720 adapter->login_pending = false; 5721 5722 if (adapter->state == VNIC_DOWN) 5723 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); 5724 else 5725 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 5726 5727 if (rc && rc != -EBUSY) { 5728 /* We were unable to schedule the failover 5729 * reset either because the adapter was still 5730 * probing (eg: during kexec) or we could not 5731 * allocate memory. Clear the failover_pending 5732 * flag since no one else will. We ignore 5733 * EBUSY because it means either FAILOVER reset 5734 * is already scheduled or the adapter is 5735 * being removed. 5736 */ 5737 netdev_err(netdev, 5738 "Error %ld scheduling failover reset\n", 5739 rc); 5740 adapter->failover_pending = false; 5741 } 5742 5743 if (!completion_done(&adapter->init_done)) { 5744 if (!adapter->init_done_rc) 5745 adapter->init_done_rc = -EAGAIN; 5746 complete(&adapter->init_done); 5747 } 5748 5749 break; 5750 case IBMVNIC_CRQ_INIT_COMPLETE: 5751 dev_info(dev, "Partner initialization complete\n"); 5752 adapter->crq.active = true; 5753 send_version_xchg(adapter); 5754 break; 5755 default: 5756 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 5757 } 5758 return; 5759 case IBMVNIC_CRQ_XPORT_EVENT: 5760 netif_carrier_off(netdev); 5761 adapter->crq.active = false; 5762 /* terminate any thread waiting for a response 5763 * from the device 5764 */ 5765 if (!completion_done(&adapter->fw_done)) { 5766 adapter->fw_done_rc = -EIO; 5767 complete(&adapter->fw_done); 5768 } 5769 5770 /* if we got here during crq-init, retry crq-init */ 5771 if (!completion_done(&adapter->init_done)) { 5772 adapter->init_done_rc = -EAGAIN; 5773 complete(&adapter->init_done); 5774 } 5775 5776 if (!completion_done(&adapter->stats_done)) 5777 complete(&adapter->stats_done); 5778 if (test_bit(0, &adapter->resetting)) 5779 adapter->force_reset_recovery = true; 5780 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 5781 dev_info(dev, "Migrated, re-enabling adapter\n"); 5782 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 5783 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 5784 dev_info(dev, "Backing device failover detected\n"); 5785 adapter->failover_pending = true; 5786 } else { 5787 /* The adapter lost the connection */ 5788 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 5789 gen_crq->cmd); 5790 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5791 } 5792 return; 5793 case IBMVNIC_CRQ_CMD_RSP: 5794 break; 5795 default: 5796 dev_err(dev, "Got an invalid msg type 0x%02x\n", 5797 gen_crq->first); 5798 return; 5799 } 5800 5801 switch (gen_crq->cmd) { 5802 case VERSION_EXCHANGE_RSP: 5803 rc = crq->version_exchange_rsp.rc.code; 5804 if (rc) { 5805 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 5806 break; 5807 } 5808 ibmvnic_version = 5809 be16_to_cpu(crq->version_exchange_rsp.version); 5810 dev_info(dev, "Partner protocol version is %d\n", 5811 ibmvnic_version); 5812 send_query_cap(adapter); 5813 break; 5814 case QUERY_CAPABILITY_RSP: 5815 handle_query_cap_rsp(crq, adapter); 5816 break; 5817 case QUERY_MAP_RSP: 5818 handle_query_map_rsp(crq, adapter); 5819 break; 5820 case REQUEST_MAP_RSP: 5821 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 5822 complete(&adapter->fw_done); 5823 break; 5824 case REQUEST_UNMAP_RSP: 5825 handle_request_unmap_rsp(crq, adapter); 5826 break; 5827 case REQUEST_CAPABILITY_RSP: 5828 handle_request_cap_rsp(crq, adapter); 5829 break; 5830 case LOGIN_RSP: 5831 netdev_dbg(netdev, "Got Login Response\n"); 5832 handle_login_rsp(crq, adapter); 5833 break; 5834 case LOGICAL_LINK_STATE_RSP: 5835 netdev_dbg(netdev, 5836 "Got Logical Link State Response, state: %d rc: %d\n", 5837 crq->logical_link_state_rsp.link_state, 5838 crq->logical_link_state_rsp.rc.code); 5839 adapter->logical_link_state = 5840 crq->logical_link_state_rsp.link_state; 5841 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 5842 complete(&adapter->init_done); 5843 break; 5844 case LINK_STATE_INDICATION: 5845 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 5846 adapter->phys_link_state = 5847 crq->link_state_indication.phys_link_state; 5848 adapter->logical_link_state = 5849 crq->link_state_indication.logical_link_state; 5850 if (adapter->phys_link_state && adapter->logical_link_state) 5851 netif_carrier_on(netdev); 5852 else 5853 netif_carrier_off(netdev); 5854 break; 5855 case CHANGE_MAC_ADDR_RSP: 5856 netdev_dbg(netdev, "Got MAC address change Response\n"); 5857 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 5858 break; 5859 case ERROR_INDICATION: 5860 netdev_dbg(netdev, "Got Error Indication\n"); 5861 handle_error_indication(crq, adapter); 5862 break; 5863 case REQUEST_STATISTICS_RSP: 5864 netdev_dbg(netdev, "Got Statistics Response\n"); 5865 complete(&adapter->stats_done); 5866 break; 5867 case QUERY_IP_OFFLOAD_RSP: 5868 netdev_dbg(netdev, "Got Query IP offload Response\n"); 5869 handle_query_ip_offload_rsp(adapter); 5870 break; 5871 case MULTICAST_CTRL_RSP: 5872 netdev_dbg(netdev, "Got multicast control Response\n"); 5873 break; 5874 case CONTROL_IP_OFFLOAD_RSP: 5875 netdev_dbg(netdev, "Got Control IP offload Response\n"); 5876 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 5877 sizeof(adapter->ip_offload_ctrl), 5878 DMA_TO_DEVICE); 5879 complete(&adapter->init_done); 5880 break; 5881 case COLLECT_FW_TRACE_RSP: 5882 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 5883 complete(&adapter->fw_done); 5884 break; 5885 case GET_VPD_SIZE_RSP: 5886 handle_vpd_size_rsp(crq, adapter); 5887 break; 5888 case GET_VPD_RSP: 5889 handle_vpd_rsp(crq, adapter); 5890 break; 5891 case QUERY_PHYS_PARMS_RSP: 5892 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); 5893 complete(&adapter->fw_done); 5894 break; 5895 default: 5896 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 5897 gen_crq->cmd); 5898 } 5899 } 5900 5901 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 5902 { 5903 struct ibmvnic_adapter *adapter = instance; 5904 5905 tasklet_schedule(&adapter->tasklet); 5906 return IRQ_HANDLED; 5907 } 5908 5909 static void ibmvnic_tasklet(struct tasklet_struct *t) 5910 { 5911 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); 5912 struct ibmvnic_crq_queue *queue = &adapter->crq; 5913 union ibmvnic_crq *crq; 5914 unsigned long flags; 5915 5916 spin_lock_irqsave(&queue->lock, flags); 5917 5918 /* Pull all the valid messages off the CRQ */ 5919 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 5920 /* This barrier makes sure ibmvnic_next_crq()'s 5921 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded 5922 * before ibmvnic_handle_crq()'s 5923 * switch(gen_crq->first) and switch(gen_crq->cmd). 5924 */ 5925 dma_rmb(); 5926 ibmvnic_handle_crq(crq, adapter); 5927 crq->generic.first = 0; 5928 } 5929 5930 spin_unlock_irqrestore(&queue->lock, flags); 5931 } 5932 5933 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 5934 { 5935 struct vio_dev *vdev = adapter->vdev; 5936 int rc; 5937 5938 do { 5939 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 5940 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5941 5942 if (rc) 5943 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 5944 5945 return rc; 5946 } 5947 5948 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 5949 { 5950 struct ibmvnic_crq_queue *crq = &adapter->crq; 5951 struct device *dev = &adapter->vdev->dev; 5952 struct vio_dev *vdev = adapter->vdev; 5953 int rc; 5954 5955 /* Close the CRQ */ 5956 do { 5957 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5958 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5959 5960 /* Clean out the queue */ 5961 if (!crq->msgs) 5962 return -EINVAL; 5963 5964 memset(crq->msgs, 0, PAGE_SIZE); 5965 crq->cur = 0; 5966 crq->active = false; 5967 5968 /* And re-open it again */ 5969 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 5970 crq->msg_token, PAGE_SIZE); 5971 5972 if (rc == H_CLOSED) 5973 /* Adapter is good, but other end is not ready */ 5974 dev_warn(dev, "Partner adapter not ready\n"); 5975 else if (rc != 0) 5976 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 5977 5978 return rc; 5979 } 5980 5981 static void release_crq_queue(struct ibmvnic_adapter *adapter) 5982 { 5983 struct ibmvnic_crq_queue *crq = &adapter->crq; 5984 struct vio_dev *vdev = adapter->vdev; 5985 long rc; 5986 5987 if (!crq->msgs) 5988 return; 5989 5990 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 5991 free_irq(vdev->irq, adapter); 5992 tasklet_kill(&adapter->tasklet); 5993 do { 5994 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5995 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5996 5997 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 5998 DMA_BIDIRECTIONAL); 5999 free_page((unsigned long)crq->msgs); 6000 crq->msgs = NULL; 6001 crq->active = false; 6002 } 6003 6004 static int init_crq_queue(struct ibmvnic_adapter *adapter) 6005 { 6006 struct ibmvnic_crq_queue *crq = &adapter->crq; 6007 struct device *dev = &adapter->vdev->dev; 6008 struct vio_dev *vdev = adapter->vdev; 6009 int rc, retrc = -ENOMEM; 6010 6011 if (crq->msgs) 6012 return 0; 6013 6014 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 6015 /* Should we allocate more than one page? */ 6016 6017 if (!crq->msgs) 6018 return -ENOMEM; 6019 6020 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 6021 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 6022 DMA_BIDIRECTIONAL); 6023 if (dma_mapping_error(dev, crq->msg_token)) 6024 goto map_failed; 6025 6026 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 6027 crq->msg_token, PAGE_SIZE); 6028 6029 if (rc == H_RESOURCE) 6030 /* maybe kexecing and resource is busy. try a reset */ 6031 rc = ibmvnic_reset_crq(adapter); 6032 retrc = rc; 6033 6034 if (rc == H_CLOSED) { 6035 dev_warn(dev, "Partner adapter not ready\n"); 6036 } else if (rc) { 6037 dev_warn(dev, "Error %d opening adapter\n", rc); 6038 goto reg_crq_failed; 6039 } 6040 6041 retrc = 0; 6042 6043 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); 6044 6045 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 6046 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", 6047 adapter->vdev->unit_address); 6048 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); 6049 if (rc) { 6050 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 6051 vdev->irq, rc); 6052 goto req_irq_failed; 6053 } 6054 6055 rc = vio_enable_interrupts(vdev); 6056 if (rc) { 6057 dev_err(dev, "Error %d enabling interrupts\n", rc); 6058 goto req_irq_failed; 6059 } 6060 6061 crq->cur = 0; 6062 spin_lock_init(&crq->lock); 6063 6064 /* process any CRQs that were queued before we enabled interrupts */ 6065 tasklet_schedule(&adapter->tasklet); 6066 6067 return retrc; 6068 6069 req_irq_failed: 6070 tasklet_kill(&adapter->tasklet); 6071 do { 6072 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 6073 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 6074 reg_crq_failed: 6075 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 6076 map_failed: 6077 free_page((unsigned long)crq->msgs); 6078 crq->msgs = NULL; 6079 return retrc; 6080 } 6081 6082 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) 6083 { 6084 struct device *dev = &adapter->vdev->dev; 6085 unsigned long timeout = msecs_to_jiffies(20000); 6086 u64 old_num_rx_queues = adapter->req_rx_queues; 6087 u64 old_num_tx_queues = adapter->req_tx_queues; 6088 int rc; 6089 6090 adapter->from_passive_init = false; 6091 6092 rc = ibmvnic_send_crq_init(adapter); 6093 if (rc) { 6094 dev_err(dev, "Send crq init failed with error %d\n", rc); 6095 return rc; 6096 } 6097 6098 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 6099 dev_err(dev, "Initialization sequence timed out\n"); 6100 return -ETIMEDOUT; 6101 } 6102 6103 if (adapter->init_done_rc) { 6104 release_crq_queue(adapter); 6105 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc); 6106 return adapter->init_done_rc; 6107 } 6108 6109 if (adapter->from_passive_init) { 6110 adapter->state = VNIC_OPEN; 6111 adapter->from_passive_init = false; 6112 dev_err(dev, "CRQ-init failed, passive-init\n"); 6113 return -EINVAL; 6114 } 6115 6116 if (reset && 6117 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && 6118 adapter->reset_reason != VNIC_RESET_MOBILITY) { 6119 if (adapter->req_rx_queues != old_num_rx_queues || 6120 adapter->req_tx_queues != old_num_tx_queues) { 6121 release_sub_crqs(adapter, 0); 6122 rc = init_sub_crqs(adapter); 6123 } else { 6124 /* no need to reinitialize completely, but we do 6125 * need to clean up transmits that were in flight 6126 * when we processed the reset. Failure to do so 6127 * will confound the upper layer, usually TCP, by 6128 * creating the illusion of transmits that are 6129 * awaiting completion. 6130 */ 6131 clean_tx_pools(adapter); 6132 6133 rc = reset_sub_crq_queues(adapter); 6134 } 6135 } else { 6136 rc = init_sub_crqs(adapter); 6137 } 6138 6139 if (rc) { 6140 dev_err(dev, "Initialization of sub crqs failed\n"); 6141 release_crq_queue(adapter); 6142 return rc; 6143 } 6144 6145 rc = init_sub_crq_irqs(adapter); 6146 if (rc) { 6147 dev_err(dev, "Failed to initialize sub crq irqs\n"); 6148 release_crq_queue(adapter); 6149 } 6150 6151 return rc; 6152 } 6153 6154 static struct device_attribute dev_attr_failover; 6155 6156 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 6157 { 6158 struct ibmvnic_adapter *adapter; 6159 struct net_device *netdev; 6160 unsigned char *mac_addr_p; 6161 unsigned long flags; 6162 bool init_success; 6163 int rc; 6164 6165 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 6166 dev->unit_address); 6167 6168 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 6169 VETH_MAC_ADDR, NULL); 6170 if (!mac_addr_p) { 6171 dev_err(&dev->dev, 6172 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 6173 __FILE__, __LINE__); 6174 return 0; 6175 } 6176 6177 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 6178 IBMVNIC_MAX_QUEUES); 6179 if (!netdev) 6180 return -ENOMEM; 6181 6182 adapter = netdev_priv(netdev); 6183 adapter->state = VNIC_PROBING; 6184 dev_set_drvdata(&dev->dev, netdev); 6185 adapter->vdev = dev; 6186 adapter->netdev = netdev; 6187 adapter->login_pending = false; 6188 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); 6189 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ 6190 bitmap_set(adapter->map_ids, 0, 1); 6191 6192 ether_addr_copy(adapter->mac_addr, mac_addr_p); 6193 eth_hw_addr_set(netdev, adapter->mac_addr); 6194 netdev->irq = dev->irq; 6195 netdev->netdev_ops = &ibmvnic_netdev_ops; 6196 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 6197 SET_NETDEV_DEV(netdev, &dev->dev); 6198 6199 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 6200 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, 6201 __ibmvnic_delayed_reset); 6202 INIT_LIST_HEAD(&adapter->rwi_list); 6203 spin_lock_init(&adapter->rwi_lock); 6204 spin_lock_init(&adapter->state_lock); 6205 mutex_init(&adapter->fw_lock); 6206 init_completion(&adapter->probe_done); 6207 init_completion(&adapter->init_done); 6208 init_completion(&adapter->fw_done); 6209 init_completion(&adapter->reset_done); 6210 init_completion(&adapter->stats_done); 6211 clear_bit(0, &adapter->resetting); 6212 adapter->prev_rx_buf_sz = 0; 6213 adapter->prev_mtu = 0; 6214 6215 init_success = false; 6216 do { 6217 reinit_init_done(adapter); 6218 6219 /* clear any failovers we got in the previous pass 6220 * since we are reinitializing the CRQ 6221 */ 6222 adapter->failover_pending = false; 6223 6224 /* If we had already initialized CRQ, we may have one or 6225 * more resets queued already. Discard those and release 6226 * the CRQ before initializing the CRQ again. 6227 */ 6228 release_crq_queue(adapter); 6229 6230 /* Since we are still in PROBING state, __ibmvnic_reset() 6231 * will not access the ->rwi_list and since we released CRQ, 6232 * we won't get _new_ transport events. But there maybe an 6233 * ongoing ibmvnic_reset() call. So serialize access to 6234 * rwi_list. If we win the race, ibvmnic_reset() could add 6235 * a reset after we purged but thats ok - we just may end 6236 * up with an extra reset (i.e similar to having two or more 6237 * resets in the queue at once). 6238 * CHECK. 6239 */ 6240 spin_lock_irqsave(&adapter->rwi_lock, flags); 6241 flush_reset_queue(adapter); 6242 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 6243 6244 rc = init_crq_queue(adapter); 6245 if (rc) { 6246 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 6247 rc); 6248 goto ibmvnic_init_fail; 6249 } 6250 6251 rc = ibmvnic_reset_init(adapter, false); 6252 } while (rc == -EAGAIN); 6253 6254 /* We are ignoring the error from ibmvnic_reset_init() assuming that the 6255 * partner is not ready. CRQ is not active. When the partner becomes 6256 * ready, we will do the passive init reset. 6257 */ 6258 6259 if (!rc) 6260 init_success = true; 6261 6262 rc = init_stats_buffers(adapter); 6263 if (rc) 6264 goto ibmvnic_init_fail; 6265 6266 rc = init_stats_token(adapter); 6267 if (rc) 6268 goto ibmvnic_stats_fail; 6269 6270 rc = device_create_file(&dev->dev, &dev_attr_failover); 6271 if (rc) 6272 goto ibmvnic_dev_file_err; 6273 6274 netif_carrier_off(netdev); 6275 6276 if (init_success) { 6277 adapter->state = VNIC_PROBED; 6278 netdev->mtu = adapter->req_mtu - ETH_HLEN; 6279 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 6280 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 6281 } else { 6282 adapter->state = VNIC_DOWN; 6283 } 6284 6285 adapter->wait_for_reset = false; 6286 adapter->last_reset_time = jiffies; 6287 6288 rc = register_netdev(netdev); 6289 if (rc) { 6290 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 6291 goto ibmvnic_register_fail; 6292 } 6293 dev_info(&dev->dev, "ibmvnic registered\n"); 6294 6295 complete(&adapter->probe_done); 6296 6297 return 0; 6298 6299 ibmvnic_register_fail: 6300 device_remove_file(&dev->dev, &dev_attr_failover); 6301 6302 ibmvnic_dev_file_err: 6303 release_stats_token(adapter); 6304 6305 ibmvnic_stats_fail: 6306 release_stats_buffers(adapter); 6307 6308 ibmvnic_init_fail: 6309 release_sub_crqs(adapter, 1); 6310 release_crq_queue(adapter); 6311 6312 /* cleanup worker thread after releasing CRQ so we don't get 6313 * transport events (i.e new work items for the worker thread). 6314 */ 6315 adapter->state = VNIC_REMOVING; 6316 complete(&adapter->probe_done); 6317 flush_work(&adapter->ibmvnic_reset); 6318 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6319 6320 flush_reset_queue(adapter); 6321 6322 mutex_destroy(&adapter->fw_lock); 6323 free_netdev(netdev); 6324 6325 return rc; 6326 } 6327 6328 static void ibmvnic_remove(struct vio_dev *dev) 6329 { 6330 struct net_device *netdev = dev_get_drvdata(&dev->dev); 6331 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6332 unsigned long flags; 6333 6334 spin_lock_irqsave(&adapter->state_lock, flags); 6335 6336 /* If ibmvnic_reset() is scheduling a reset, wait for it to 6337 * finish. Then, set the state to REMOVING to prevent it from 6338 * scheduling any more work and to have reset functions ignore 6339 * any resets that have already been scheduled. Drop the lock 6340 * after setting state, so __ibmvnic_reset() which is called 6341 * from the flush_work() below, can make progress. 6342 */ 6343 spin_lock(&adapter->rwi_lock); 6344 adapter->state = VNIC_REMOVING; 6345 spin_unlock(&adapter->rwi_lock); 6346 6347 spin_unlock_irqrestore(&adapter->state_lock, flags); 6348 6349 flush_work(&adapter->ibmvnic_reset); 6350 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6351 6352 rtnl_lock(); 6353 unregister_netdevice(netdev); 6354 6355 release_resources(adapter); 6356 release_rx_pools(adapter); 6357 release_tx_pools(adapter); 6358 release_sub_crqs(adapter, 1); 6359 release_crq_queue(adapter); 6360 6361 release_stats_token(adapter); 6362 release_stats_buffers(adapter); 6363 6364 adapter->state = VNIC_REMOVED; 6365 6366 rtnl_unlock(); 6367 mutex_destroy(&adapter->fw_lock); 6368 device_remove_file(&dev->dev, &dev_attr_failover); 6369 free_netdev(netdev); 6370 dev_set_drvdata(&dev->dev, NULL); 6371 } 6372 6373 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 6374 const char *buf, size_t count) 6375 { 6376 struct net_device *netdev = dev_get_drvdata(dev); 6377 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6378 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 6379 __be64 session_token; 6380 long rc; 6381 6382 if (!sysfs_streq(buf, "1")) 6383 return -EINVAL; 6384 6385 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 6386 H_GET_SESSION_TOKEN, 0, 0, 0); 6387 if (rc) { 6388 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 6389 rc); 6390 goto last_resort; 6391 } 6392 6393 session_token = (__be64)retbuf[0]; 6394 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 6395 be64_to_cpu(session_token)); 6396 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 6397 H_SESSION_ERR_DETECTED, session_token, 0, 0); 6398 if (rc) { 6399 netdev_err(netdev, 6400 "H_VIOCTL initiated failover failed, rc %ld\n", 6401 rc); 6402 goto last_resort; 6403 } 6404 6405 return count; 6406 6407 last_resort: 6408 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); 6409 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 6410 6411 return count; 6412 } 6413 static DEVICE_ATTR_WO(failover); 6414 6415 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 6416 { 6417 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 6418 struct ibmvnic_adapter *adapter; 6419 struct iommu_table *tbl; 6420 unsigned long ret = 0; 6421 int i; 6422 6423 tbl = get_iommu_table_base(&vdev->dev); 6424 6425 /* netdev inits at probe time along with the structures we need below*/ 6426 if (!netdev) 6427 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 6428 6429 adapter = netdev_priv(netdev); 6430 6431 ret += PAGE_SIZE; /* the crq message queue */ 6432 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 6433 6434 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 6435 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 6436 6437 for (i = 0; i < adapter->num_active_rx_pools; i++) 6438 ret += adapter->rx_pool[i].size * 6439 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 6440 6441 return ret; 6442 } 6443 6444 static int ibmvnic_resume(struct device *dev) 6445 { 6446 struct net_device *netdev = dev_get_drvdata(dev); 6447 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6448 6449 if (adapter->state != VNIC_OPEN) 6450 return 0; 6451 6452 tasklet_schedule(&adapter->tasklet); 6453 6454 return 0; 6455 } 6456 6457 static const struct vio_device_id ibmvnic_device_table[] = { 6458 {"network", "IBM,vnic"}, 6459 {"", "" } 6460 }; 6461 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 6462 6463 static const struct dev_pm_ops ibmvnic_pm_ops = { 6464 .resume = ibmvnic_resume 6465 }; 6466 6467 static struct vio_driver ibmvnic_driver = { 6468 .id_table = ibmvnic_device_table, 6469 .probe = ibmvnic_probe, 6470 .remove = ibmvnic_remove, 6471 .get_desired_dma = ibmvnic_get_desired_dma, 6472 .name = ibmvnic_driver_name, 6473 .pm = &ibmvnic_pm_ops, 6474 }; 6475 6476 /* module functions */ 6477 static int __init ibmvnic_module_init(void) 6478 { 6479 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 6480 IBMVNIC_DRIVER_VERSION); 6481 6482 return vio_register_driver(&ibmvnic_driver); 6483 } 6484 6485 static void __exit ibmvnic_module_exit(void) 6486 { 6487 vio_unregister_driver(&ibmvnic_driver); 6488 } 6489 6490 module_init(ibmvnic_module_init); 6491 module_exit(ibmvnic_module_exit); 6492