1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /* Messages are passed between the VNIC driver and the VNIC server using */ 17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 18 /* issue and receive commands that initiate communication with the server */ 19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 20 /* are used by the driver to notify the server that a packet is */ 21 /* ready for transmission or that a buffer has been added to receive a */ 22 /* packet. Subsequently, sCRQs are used by the server to notify the */ 23 /* driver that a packet transmission has been completed or that a packet */ 24 /* has been received and placed in a waiting buffer. */ 25 /* */ 26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 27 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 28 /* or receive has been completed, the VNIC driver is required to use */ 29 /* "long term mapping". This entails that large, continuous DMA mapped */ 30 /* buffers are allocated on driver initialization and these buffers are */ 31 /* then continuously reused to pass skbs to and from the VNIC server. */ 32 /* */ 33 /**************************************************************************/ 34 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/types.h> 38 #include <linux/errno.h> 39 #include <linux/completion.h> 40 #include <linux/ioport.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kernel.h> 43 #include <linux/netdevice.h> 44 #include <linux/etherdevice.h> 45 #include <linux/skbuff.h> 46 #include <linux/init.h> 47 #include <linux/delay.h> 48 #include <linux/mm.h> 49 #include <linux/ethtool.h> 50 #include <linux/proc_fs.h> 51 #include <linux/if_arp.h> 52 #include <linux/in.h> 53 #include <linux/ip.h> 54 #include <linux/ipv6.h> 55 #include <linux/irq.h> 56 #include <linux/kthread.h> 57 #include <linux/seq_file.h> 58 #include <linux/interrupt.h> 59 #include <net/net_namespace.h> 60 #include <asm/hvcall.h> 61 #include <linux/atomic.h> 62 #include <asm/vio.h> 63 #include <asm/iommu.h> 64 #include <linux/uaccess.h> 65 #include <asm/firmware.h> 66 #include <linux/workqueue.h> 67 #include <linux/if_vlan.h> 68 #include <linux/utsname.h> 69 70 #include "ibmvnic.h" 71 72 static const char ibmvnic_driver_name[] = "ibmvnic"; 73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 74 75 MODULE_AUTHOR("Santiago Leon"); 76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 77 MODULE_LICENSE("GPL"); 78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 79 80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 81 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 82 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 83 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 84 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 85 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 86 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 87 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 88 static int enable_scrq_irq(struct ibmvnic_adapter *, 89 struct ibmvnic_sub_crq_queue *); 90 static int disable_scrq_irq(struct ibmvnic_adapter *, 91 struct ibmvnic_sub_crq_queue *); 92 static int pending_scrq(struct ibmvnic_adapter *, 93 struct ibmvnic_sub_crq_queue *); 94 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 95 struct ibmvnic_sub_crq_queue *); 96 static int ibmvnic_poll(struct napi_struct *napi, int data); 97 static void send_query_map(struct ibmvnic_adapter *adapter); 98 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8); 99 static int send_request_unmap(struct ibmvnic_adapter *, u8); 100 static int send_login(struct ibmvnic_adapter *adapter); 101 static void send_query_cap(struct ibmvnic_adapter *adapter); 102 static int init_sub_crqs(struct ibmvnic_adapter *); 103 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 104 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); 105 static void release_crq_queue(struct ibmvnic_adapter *); 106 static int __ibmvnic_set_mac(struct net_device *, u8 *); 107 static int init_crq_queue(struct ibmvnic_adapter *adapter); 108 static int send_query_phys_parms(struct ibmvnic_adapter *adapter); 109 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 110 struct ibmvnic_sub_crq_queue *tx_scrq); 111 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 112 struct ibmvnic_long_term_buff *ltb); 113 114 struct ibmvnic_stat { 115 char name[ETH_GSTRING_LEN]; 116 int offset; 117 }; 118 119 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 120 offsetof(struct ibmvnic_statistics, stat)) 121 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) 122 123 static const struct ibmvnic_stat ibmvnic_stats[] = { 124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 146 }; 147 148 static int send_crq_init_complete(struct ibmvnic_adapter *adapter) 149 { 150 union ibmvnic_crq crq; 151 152 memset(&crq, 0, sizeof(crq)); 153 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 154 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; 155 156 return ibmvnic_send_crq(adapter, &crq); 157 } 158 159 static int send_version_xchg(struct ibmvnic_adapter *adapter) 160 { 161 union ibmvnic_crq crq; 162 163 memset(&crq, 0, sizeof(crq)); 164 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 165 crq.version_exchange.cmd = VERSION_EXCHANGE; 166 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 167 168 return ibmvnic_send_crq(adapter, &crq); 169 } 170 171 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 172 unsigned long length, unsigned long *number, 173 unsigned long *irq) 174 { 175 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 176 long rc; 177 178 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 179 *number = retbuf[0]; 180 *irq = retbuf[1]; 181 182 return rc; 183 } 184 185 /** 186 * ibmvnic_wait_for_completion - Check device state and wait for completion 187 * @adapter: private device data 188 * @comp_done: completion structure to wait for 189 * @timeout: time to wait in milliseconds 190 * 191 * Wait for a completion signal or until the timeout limit is reached 192 * while checking that the device is still active. 193 */ 194 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, 195 struct completion *comp_done, 196 unsigned long timeout) 197 { 198 struct net_device *netdev; 199 unsigned long div_timeout; 200 u8 retry; 201 202 netdev = adapter->netdev; 203 retry = 5; 204 div_timeout = msecs_to_jiffies(timeout / retry); 205 while (true) { 206 if (!adapter->crq.active) { 207 netdev_err(netdev, "Device down!\n"); 208 return -ENODEV; 209 } 210 if (!retry--) 211 break; 212 if (wait_for_completion_timeout(comp_done, div_timeout)) 213 return 0; 214 } 215 netdev_err(netdev, "Operation timed out.\n"); 216 return -ETIMEDOUT; 217 } 218 219 /** 220 * reuse_ltb() - Check if a long term buffer can be reused 221 * @ltb: The long term buffer to be checked 222 * @size: The size of the long term buffer. 223 * 224 * An LTB can be reused unless its size has changed. 225 * 226 * Return: Return true if the LTB can be reused, false otherwise. 227 */ 228 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) 229 { 230 return (ltb->buff && ltb->size == size); 231 } 232 233 /** 234 * alloc_long_term_buff() - Allocate a long term buffer (LTB) 235 * 236 * @adapter: ibmvnic adapter associated to the LTB 237 * @ltb: container object for the LTB 238 * @size: size of the LTB 239 * 240 * Allocate an LTB of the specified size and notify VIOS. 241 * 242 * If the given @ltb already has the correct size, reuse it. Otherwise if 243 * its non-NULL, free it. Then allocate a new one of the correct size. 244 * Notify the VIOS either way since we may now be working with a new VIOS. 245 * 246 * Allocating larger chunks of memory during resets, specially LPM or under 247 * low memory situations can cause resets to fail/timeout and for LPAR to 248 * lose connectivity. So hold onto the LTB even if we fail to communicate 249 * with the VIOS and reuse it on next open. Free LTB when adapter is closed. 250 * 251 * Return: 0 if we were able to allocate the LTB and notify the VIOS and 252 * a negative value otherwise. 253 */ 254 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 255 struct ibmvnic_long_term_buff *ltb, int size) 256 { 257 struct device *dev = &adapter->vdev->dev; 258 int rc; 259 260 if (!reuse_ltb(ltb, size)) { 261 dev_dbg(dev, 262 "LTB size changed from 0x%llx to 0x%x, reallocating\n", 263 ltb->size, size); 264 free_long_term_buff(adapter, ltb); 265 } 266 267 if (ltb->buff) { 268 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", 269 ltb->map_id, ltb->size); 270 } else { 271 ltb->buff = dma_alloc_coherent(dev, size, <b->addr, 272 GFP_KERNEL); 273 if (!ltb->buff) { 274 dev_err(dev, "Couldn't alloc long term buffer\n"); 275 return -ENOMEM; 276 } 277 ltb->size = size; 278 279 ltb->map_id = find_first_zero_bit(adapter->map_ids, 280 MAX_MAP_ID); 281 bitmap_set(adapter->map_ids, ltb->map_id, 1); 282 283 dev_dbg(dev, 284 "Allocated new LTB [map %d, size 0x%llx]\n", 285 ltb->map_id, ltb->size); 286 } 287 288 /* Ensure ltb is zeroed - specially when reusing it. */ 289 memset(ltb->buff, 0, ltb->size); 290 291 mutex_lock(&adapter->fw_lock); 292 adapter->fw_done_rc = 0; 293 reinit_completion(&adapter->fw_done); 294 295 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 296 if (rc) { 297 dev_err(dev, "send_request_map failed, rc = %d\n", rc); 298 goto out; 299 } 300 301 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 302 if (rc) { 303 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", 304 rc); 305 goto out; 306 } 307 308 if (adapter->fw_done_rc) { 309 dev_err(dev, "Couldn't map LTB, rc = %d\n", 310 adapter->fw_done_rc); 311 rc = -1; 312 goto out; 313 } 314 rc = 0; 315 out: 316 /* don't free LTB on communication error - see function header */ 317 mutex_unlock(&adapter->fw_lock); 318 return rc; 319 } 320 321 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 322 struct ibmvnic_long_term_buff *ltb) 323 { 324 struct device *dev = &adapter->vdev->dev; 325 326 if (!ltb->buff) 327 return; 328 329 /* VIOS automatically unmaps the long term buffer at remote 330 * end for the following resets: 331 * FAILOVER, MOBILITY, TIMEOUT. 332 */ 333 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 334 adapter->reset_reason != VNIC_RESET_MOBILITY && 335 adapter->reset_reason != VNIC_RESET_TIMEOUT) 336 send_request_unmap(adapter, ltb->map_id); 337 338 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 339 340 ltb->buff = NULL; 341 /* mark this map_id free */ 342 bitmap_clear(adapter->map_ids, ltb->map_id, 1); 343 ltb->map_id = 0; 344 } 345 346 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 347 { 348 int i; 349 350 for (i = 0; i < adapter->num_active_rx_pools; i++) 351 adapter->rx_pool[i].active = 0; 352 } 353 354 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 355 struct ibmvnic_rx_pool *pool) 356 { 357 int count = pool->size - atomic_read(&pool->available); 358 u64 handle = adapter->rx_scrq[pool->index]->handle; 359 struct device *dev = &adapter->vdev->dev; 360 struct ibmvnic_ind_xmit_queue *ind_bufp; 361 struct ibmvnic_sub_crq_queue *rx_scrq; 362 union sub_crq *sub_crq; 363 int buffers_added = 0; 364 unsigned long lpar_rc; 365 struct sk_buff *skb; 366 unsigned int offset; 367 dma_addr_t dma_addr; 368 unsigned char *dst; 369 int shift = 0; 370 int index; 371 int i; 372 373 if (!pool->active) 374 return; 375 376 rx_scrq = adapter->rx_scrq[pool->index]; 377 ind_bufp = &rx_scrq->ind_buf; 378 379 /* netdev_skb_alloc() could have failed after we saved a few skbs 380 * in the indir_buf and we would not have sent them to VIOS yet. 381 * To account for them, start the loop at ind_bufp->index rather 382 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will 383 * be 0. 384 */ 385 for (i = ind_bufp->index; i < count; ++i) { 386 index = pool->free_map[pool->next_free]; 387 388 /* We maybe reusing the skb from earlier resets. Allocate 389 * only if necessary. But since the LTB may have changed 390 * during reset (see init_rx_pools()), update LTB below 391 * even if reusing skb. 392 */ 393 skb = pool->rx_buff[index].skb; 394 if (!skb) { 395 skb = netdev_alloc_skb(adapter->netdev, 396 pool->buff_size); 397 if (!skb) { 398 dev_err(dev, "Couldn't replenish rx buff\n"); 399 adapter->replenish_no_mem++; 400 break; 401 } 402 } 403 404 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 405 pool->next_free = (pool->next_free + 1) % pool->size; 406 407 /* Copy the skb to the long term mapped DMA buffer */ 408 offset = index * pool->buff_size; 409 dst = pool->long_term_buff.buff + offset; 410 memset(dst, 0, pool->buff_size); 411 dma_addr = pool->long_term_buff.addr + offset; 412 413 /* add the skb to an rx_buff in the pool */ 414 pool->rx_buff[index].data = dst; 415 pool->rx_buff[index].dma = dma_addr; 416 pool->rx_buff[index].skb = skb; 417 pool->rx_buff[index].pool_index = pool->index; 418 pool->rx_buff[index].size = pool->buff_size; 419 420 /* queue the rx_buff for the next send_subcrq_indirect */ 421 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; 422 memset(sub_crq, 0, sizeof(*sub_crq)); 423 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; 424 sub_crq->rx_add.correlator = 425 cpu_to_be64((u64)&pool->rx_buff[index]); 426 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); 427 sub_crq->rx_add.map_id = pool->long_term_buff.map_id; 428 429 /* The length field of the sCRQ is defined to be 24 bits so the 430 * buffer size needs to be left shifted by a byte before it is 431 * converted to big endian to prevent the last byte from being 432 * truncated. 433 */ 434 #ifdef __LITTLE_ENDIAN__ 435 shift = 8; 436 #endif 437 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); 438 439 /* if send_subcrq_indirect queue is full, flush to VIOS */ 440 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || 441 i == count - 1) { 442 lpar_rc = 443 send_subcrq_indirect(adapter, handle, 444 (u64)ind_bufp->indir_dma, 445 (u64)ind_bufp->index); 446 if (lpar_rc != H_SUCCESS) 447 goto failure; 448 buffers_added += ind_bufp->index; 449 adapter->replenish_add_buff_success += ind_bufp->index; 450 ind_bufp->index = 0; 451 } 452 } 453 atomic_add(buffers_added, &pool->available); 454 return; 455 456 failure: 457 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) 458 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); 459 for (i = ind_bufp->index - 1; i >= 0; --i) { 460 struct ibmvnic_rx_buff *rx_buff; 461 462 pool->next_free = pool->next_free == 0 ? 463 pool->size - 1 : pool->next_free - 1; 464 sub_crq = &ind_bufp->indir_arr[i]; 465 rx_buff = (struct ibmvnic_rx_buff *) 466 be64_to_cpu(sub_crq->rx_add.correlator); 467 index = (int)(rx_buff - pool->rx_buff); 468 pool->free_map[pool->next_free] = index; 469 dev_kfree_skb_any(pool->rx_buff[index].skb); 470 pool->rx_buff[index].skb = NULL; 471 } 472 adapter->replenish_add_buff_failure += ind_bufp->index; 473 atomic_add(buffers_added, &pool->available); 474 ind_bufp->index = 0; 475 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 476 /* Disable buffer pool replenishment and report carrier off if 477 * queue is closed or pending failover. 478 * Firmware guarantees that a signal will be sent to the 479 * driver, triggering a reset. 480 */ 481 deactivate_rx_pools(adapter); 482 netif_carrier_off(adapter->netdev); 483 } 484 } 485 486 static void replenish_pools(struct ibmvnic_adapter *adapter) 487 { 488 int i; 489 490 adapter->replenish_task_cycles++; 491 for (i = 0; i < adapter->num_active_rx_pools; i++) { 492 if (adapter->rx_pool[i].active) 493 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 494 } 495 496 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); 497 } 498 499 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 500 { 501 kfree(adapter->tx_stats_buffers); 502 kfree(adapter->rx_stats_buffers); 503 adapter->tx_stats_buffers = NULL; 504 adapter->rx_stats_buffers = NULL; 505 } 506 507 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 508 { 509 adapter->tx_stats_buffers = 510 kcalloc(IBMVNIC_MAX_QUEUES, 511 sizeof(struct ibmvnic_tx_queue_stats), 512 GFP_KERNEL); 513 if (!adapter->tx_stats_buffers) 514 return -ENOMEM; 515 516 adapter->rx_stats_buffers = 517 kcalloc(IBMVNIC_MAX_QUEUES, 518 sizeof(struct ibmvnic_rx_queue_stats), 519 GFP_KERNEL); 520 if (!adapter->rx_stats_buffers) 521 return -ENOMEM; 522 523 return 0; 524 } 525 526 static void release_stats_token(struct ibmvnic_adapter *adapter) 527 { 528 struct device *dev = &adapter->vdev->dev; 529 530 if (!adapter->stats_token) 531 return; 532 533 dma_unmap_single(dev, adapter->stats_token, 534 sizeof(struct ibmvnic_statistics), 535 DMA_FROM_DEVICE); 536 adapter->stats_token = 0; 537 } 538 539 static int init_stats_token(struct ibmvnic_adapter *adapter) 540 { 541 struct device *dev = &adapter->vdev->dev; 542 dma_addr_t stok; 543 544 stok = dma_map_single(dev, &adapter->stats, 545 sizeof(struct ibmvnic_statistics), 546 DMA_FROM_DEVICE); 547 if (dma_mapping_error(dev, stok)) { 548 dev_err(dev, "Couldn't map stats buffer\n"); 549 return -1; 550 } 551 552 adapter->stats_token = stok; 553 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 554 return 0; 555 } 556 557 /** 558 * release_rx_pools() - Release any rx pools attached to @adapter. 559 * @adapter: ibmvnic adapter 560 * 561 * Safe to call this multiple times - even if no pools are attached. 562 */ 563 static void release_rx_pools(struct ibmvnic_adapter *adapter) 564 { 565 struct ibmvnic_rx_pool *rx_pool; 566 int i, j; 567 568 if (!adapter->rx_pool) 569 return; 570 571 for (i = 0; i < adapter->num_active_rx_pools; i++) { 572 rx_pool = &adapter->rx_pool[i]; 573 574 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 575 576 kfree(rx_pool->free_map); 577 578 free_long_term_buff(adapter, &rx_pool->long_term_buff); 579 580 if (!rx_pool->rx_buff) 581 continue; 582 583 for (j = 0; j < rx_pool->size; j++) { 584 if (rx_pool->rx_buff[j].skb) { 585 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); 586 rx_pool->rx_buff[j].skb = NULL; 587 } 588 } 589 590 kfree(rx_pool->rx_buff); 591 } 592 593 kfree(adapter->rx_pool); 594 adapter->rx_pool = NULL; 595 adapter->num_active_rx_pools = 0; 596 adapter->prev_rx_pool_size = 0; 597 } 598 599 /** 600 * reuse_rx_pools() - Check if the existing rx pools can be reused. 601 * @adapter: ibmvnic adapter 602 * 603 * Check if the existing rx pools in the adapter can be reused. The 604 * pools can be reused if the pool parameters (number of pools, 605 * number of buffers in the pool and size of each buffer) have not 606 * changed. 607 * 608 * NOTE: This assumes that all pools have the same number of buffers 609 * which is the case currently. If that changes, we must fix this. 610 * 611 * Return: true if the rx pools can be reused, false otherwise. 612 */ 613 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) 614 { 615 u64 old_num_pools, new_num_pools; 616 u64 old_pool_size, new_pool_size; 617 u64 old_buff_size, new_buff_size; 618 619 if (!adapter->rx_pool) 620 return false; 621 622 old_num_pools = adapter->num_active_rx_pools; 623 new_num_pools = adapter->req_rx_queues; 624 625 old_pool_size = adapter->prev_rx_pool_size; 626 new_pool_size = adapter->req_rx_add_entries_per_subcrq; 627 628 old_buff_size = adapter->prev_rx_buf_sz; 629 new_buff_size = adapter->cur_rx_buf_sz; 630 631 /* Require buff size to be exactly same for now */ 632 if (old_buff_size != new_buff_size) 633 return false; 634 635 if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) 636 return true; 637 638 if (old_num_pools < adapter->min_rx_queues || 639 old_num_pools > adapter->max_rx_queues || 640 old_pool_size < adapter->min_rx_add_entries_per_subcrq || 641 old_pool_size > adapter->max_rx_add_entries_per_subcrq) 642 return false; 643 644 return true; 645 } 646 647 /** 648 * init_rx_pools(): Initialize the set of receiver pools in the adapter. 649 * @netdev: net device associated with the vnic interface 650 * 651 * Initialize the set of receiver pools in the ibmvnic adapter associated 652 * with the net_device @netdev. If possible, reuse the existing rx pools. 653 * Otherwise free any existing pools and allocate a new set of pools 654 * before initializing them. 655 * 656 * Return: 0 on success and negative value on error. 657 */ 658 static int init_rx_pools(struct net_device *netdev) 659 { 660 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 661 struct device *dev = &adapter->vdev->dev; 662 struct ibmvnic_rx_pool *rx_pool; 663 u64 num_pools; 664 u64 pool_size; /* # of buffers in one pool */ 665 u64 buff_size; 666 int i, j; 667 668 pool_size = adapter->req_rx_add_entries_per_subcrq; 669 num_pools = adapter->req_rx_queues; 670 buff_size = adapter->cur_rx_buf_sz; 671 672 if (reuse_rx_pools(adapter)) { 673 dev_dbg(dev, "Reusing rx pools\n"); 674 goto update_ltb; 675 } 676 677 /* Allocate/populate the pools. */ 678 release_rx_pools(adapter); 679 680 adapter->rx_pool = kcalloc(num_pools, 681 sizeof(struct ibmvnic_rx_pool), 682 GFP_KERNEL); 683 if (!adapter->rx_pool) { 684 dev_err(dev, "Failed to allocate rx pools\n"); 685 return -1; 686 } 687 688 /* Set num_active_rx_pools early. If we fail below after partial 689 * allocation, release_rx_pools() will know how many to look for. 690 */ 691 adapter->num_active_rx_pools = num_pools; 692 693 for (i = 0; i < num_pools; i++) { 694 rx_pool = &adapter->rx_pool[i]; 695 696 netdev_dbg(adapter->netdev, 697 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 698 i, pool_size, buff_size); 699 700 rx_pool->size = pool_size; 701 rx_pool->index = i; 702 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 703 704 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 705 GFP_KERNEL); 706 if (!rx_pool->free_map) { 707 dev_err(dev, "Couldn't alloc free_map %d\n", i); 708 goto out_release; 709 } 710 711 rx_pool->rx_buff = kcalloc(rx_pool->size, 712 sizeof(struct ibmvnic_rx_buff), 713 GFP_KERNEL); 714 if (!rx_pool->rx_buff) { 715 dev_err(dev, "Couldn't alloc rx buffers\n"); 716 goto out_release; 717 } 718 } 719 720 adapter->prev_rx_pool_size = pool_size; 721 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; 722 723 update_ltb: 724 for (i = 0; i < num_pools; i++) { 725 rx_pool = &adapter->rx_pool[i]; 726 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", 727 i, rx_pool->size, rx_pool->buff_size); 728 729 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 730 rx_pool->size * rx_pool->buff_size)) 731 goto out; 732 733 for (j = 0; j < rx_pool->size; ++j) { 734 struct ibmvnic_rx_buff *rx_buff; 735 736 rx_pool->free_map[j] = j; 737 738 /* NOTE: Don't clear rx_buff->skb here - will leak 739 * memory! replenish_rx_pool() will reuse skbs or 740 * allocate as necessary. 741 */ 742 rx_buff = &rx_pool->rx_buff[j]; 743 rx_buff->dma = 0; 744 rx_buff->data = 0; 745 rx_buff->size = 0; 746 rx_buff->pool_index = 0; 747 } 748 749 /* Mark pool "empty" so replenish_rx_pools() will 750 * update the LTB info for each buffer 751 */ 752 atomic_set(&rx_pool->available, 0); 753 rx_pool->next_alloc = 0; 754 rx_pool->next_free = 0; 755 /* replenish_rx_pool() may have called deactivate_rx_pools() 756 * on failover. Ensure pool is active now. 757 */ 758 rx_pool->active = 1; 759 } 760 return 0; 761 out_release: 762 release_rx_pools(adapter); 763 out: 764 /* We failed to allocate one or more LTBs or map them on the VIOS. 765 * Hold onto the pools and any LTBs that we did allocate/map. 766 */ 767 return -1; 768 } 769 770 static void release_vpd_data(struct ibmvnic_adapter *adapter) 771 { 772 if (!adapter->vpd) 773 return; 774 775 kfree(adapter->vpd->buff); 776 kfree(adapter->vpd); 777 778 adapter->vpd = NULL; 779 } 780 781 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 782 struct ibmvnic_tx_pool *tx_pool) 783 { 784 kfree(tx_pool->tx_buff); 785 kfree(tx_pool->free_map); 786 free_long_term_buff(adapter, &tx_pool->long_term_buff); 787 } 788 789 /** 790 * release_tx_pools() - Release any tx pools attached to @adapter. 791 * @adapter: ibmvnic adapter 792 * 793 * Safe to call this multiple times - even if no pools are attached. 794 */ 795 static void release_tx_pools(struct ibmvnic_adapter *adapter) 796 { 797 int i; 798 799 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are 800 * both NULL or both non-NULL. So we only need to check one. 801 */ 802 if (!adapter->tx_pool) 803 return; 804 805 for (i = 0; i < adapter->num_active_tx_pools; i++) { 806 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 807 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 808 } 809 810 kfree(adapter->tx_pool); 811 adapter->tx_pool = NULL; 812 kfree(adapter->tso_pool); 813 adapter->tso_pool = NULL; 814 adapter->num_active_tx_pools = 0; 815 adapter->prev_tx_pool_size = 0; 816 } 817 818 static int init_one_tx_pool(struct net_device *netdev, 819 struct ibmvnic_tx_pool *tx_pool, 820 int pool_size, int buf_size) 821 { 822 int i; 823 824 tx_pool->tx_buff = kcalloc(pool_size, 825 sizeof(struct ibmvnic_tx_buff), 826 GFP_KERNEL); 827 if (!tx_pool->tx_buff) 828 return -1; 829 830 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); 831 if (!tx_pool->free_map) { 832 kfree(tx_pool->tx_buff); 833 tx_pool->tx_buff = NULL; 834 return -1; 835 } 836 837 for (i = 0; i < pool_size; i++) 838 tx_pool->free_map[i] = i; 839 840 tx_pool->consumer_index = 0; 841 tx_pool->producer_index = 0; 842 tx_pool->num_buffers = pool_size; 843 tx_pool->buf_size = buf_size; 844 845 return 0; 846 } 847 848 /** 849 * reuse_tx_pools() - Check if the existing tx pools can be reused. 850 * @adapter: ibmvnic adapter 851 * 852 * Check if the existing tx pools in the adapter can be reused. The 853 * pools can be reused if the pool parameters (number of pools, 854 * number of buffers in the pool and mtu) have not changed. 855 * 856 * NOTE: This assumes that all pools have the same number of buffers 857 * which is the case currently. If that changes, we must fix this. 858 * 859 * Return: true if the tx pools can be reused, false otherwise. 860 */ 861 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) 862 { 863 u64 old_num_pools, new_num_pools; 864 u64 old_pool_size, new_pool_size; 865 u64 old_mtu, new_mtu; 866 867 if (!adapter->tx_pool) 868 return false; 869 870 old_num_pools = adapter->num_active_tx_pools; 871 new_num_pools = adapter->num_active_tx_scrqs; 872 old_pool_size = adapter->prev_tx_pool_size; 873 new_pool_size = adapter->req_tx_entries_per_subcrq; 874 old_mtu = adapter->prev_mtu; 875 new_mtu = adapter->req_mtu; 876 877 /* Require MTU to be exactly same to reuse pools for now */ 878 if (old_mtu != new_mtu) 879 return false; 880 881 if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) 882 return true; 883 884 if (old_num_pools < adapter->min_tx_queues || 885 old_num_pools > adapter->max_tx_queues || 886 old_pool_size < adapter->min_tx_entries_per_subcrq || 887 old_pool_size > adapter->max_tx_entries_per_subcrq) 888 return false; 889 890 return true; 891 } 892 893 /** 894 * init_tx_pools(): Initialize the set of transmit pools in the adapter. 895 * @netdev: net device associated with the vnic interface 896 * 897 * Initialize the set of transmit pools in the ibmvnic adapter associated 898 * with the net_device @netdev. If possible, reuse the existing tx pools. 899 * Otherwise free any existing pools and allocate a new set of pools 900 * before initializing them. 901 * 902 * Return: 0 on success and negative value on error. 903 */ 904 static int init_tx_pools(struct net_device *netdev) 905 { 906 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 907 struct device *dev = &adapter->vdev->dev; 908 int num_pools; 909 u64 pool_size; /* # of buffers in pool */ 910 u64 buff_size; 911 int i, j, rc; 912 913 num_pools = adapter->req_tx_queues; 914 915 /* We must notify the VIOS about the LTB on all resets - but we only 916 * need to alloc/populate pools if either the number of buffers or 917 * size of each buffer in the pool has changed. 918 */ 919 if (reuse_tx_pools(adapter)) { 920 netdev_dbg(netdev, "Reusing tx pools\n"); 921 goto update_ltb; 922 } 923 924 /* Allocate/populate the pools. */ 925 release_tx_pools(adapter); 926 927 pool_size = adapter->req_tx_entries_per_subcrq; 928 num_pools = adapter->num_active_tx_scrqs; 929 930 adapter->tx_pool = kcalloc(num_pools, 931 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 932 if (!adapter->tx_pool) 933 return -1; 934 935 adapter->tso_pool = kcalloc(num_pools, 936 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 937 /* To simplify release_tx_pools() ensure that ->tx_pool and 938 * ->tso_pool are either both NULL or both non-NULL. 939 */ 940 if (!adapter->tso_pool) { 941 kfree(adapter->tx_pool); 942 adapter->tx_pool = NULL; 943 return -1; 944 } 945 946 /* Set num_active_tx_pools early. If we fail below after partial 947 * allocation, release_tx_pools() will know how many to look for. 948 */ 949 adapter->num_active_tx_pools = num_pools; 950 951 buff_size = adapter->req_mtu + VLAN_HLEN; 952 buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 953 954 for (i = 0; i < num_pools; i++) { 955 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", 956 i, adapter->req_tx_entries_per_subcrq, buff_size); 957 958 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 959 pool_size, buff_size); 960 if (rc) 961 goto out_release; 962 963 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], 964 IBMVNIC_TSO_BUFS, 965 IBMVNIC_TSO_BUF_SZ); 966 if (rc) 967 goto out_release; 968 } 969 970 adapter->prev_tx_pool_size = pool_size; 971 adapter->prev_mtu = adapter->req_mtu; 972 973 update_ltb: 974 /* NOTE: All tx_pools have the same number of buffers (which is 975 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS 976 * buffers (see calls init_one_tx_pool() for these). 977 * For consistency, we use tx_pool->num_buffers and 978 * tso_pool->num_buffers below. 979 */ 980 rc = -1; 981 for (i = 0; i < num_pools; i++) { 982 struct ibmvnic_tx_pool *tso_pool; 983 struct ibmvnic_tx_pool *tx_pool; 984 u32 ltb_size; 985 986 tx_pool = &adapter->tx_pool[i]; 987 ltb_size = tx_pool->num_buffers * tx_pool->buf_size; 988 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 989 ltb_size)) 990 goto out; 991 992 dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n", 993 i, tx_pool->long_term_buff.buff, 994 tx_pool->num_buffers, tx_pool->buf_size); 995 996 tx_pool->consumer_index = 0; 997 tx_pool->producer_index = 0; 998 999 for (j = 0; j < tx_pool->num_buffers; j++) 1000 tx_pool->free_map[j] = j; 1001 1002 tso_pool = &adapter->tso_pool[i]; 1003 ltb_size = tso_pool->num_buffers * tso_pool->buf_size; 1004 if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff, 1005 ltb_size)) 1006 goto out; 1007 1008 dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n", 1009 i, tso_pool->long_term_buff.buff, 1010 tso_pool->num_buffers, tso_pool->buf_size); 1011 1012 tso_pool->consumer_index = 0; 1013 tso_pool->producer_index = 0; 1014 1015 for (j = 0; j < tso_pool->num_buffers; j++) 1016 tso_pool->free_map[j] = j; 1017 } 1018 1019 return 0; 1020 out_release: 1021 release_tx_pools(adapter); 1022 out: 1023 /* We failed to allocate one or more LTBs or map them on the VIOS. 1024 * Hold onto the pools and any LTBs that we did allocate/map. 1025 */ 1026 return rc; 1027 } 1028 1029 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 1030 { 1031 int i; 1032 1033 if (adapter->napi_enabled) 1034 return; 1035 1036 for (i = 0; i < adapter->req_rx_queues; i++) 1037 napi_enable(&adapter->napi[i]); 1038 1039 adapter->napi_enabled = true; 1040 } 1041 1042 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 1043 { 1044 int i; 1045 1046 if (!adapter->napi_enabled) 1047 return; 1048 1049 for (i = 0; i < adapter->req_rx_queues; i++) { 1050 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 1051 napi_disable(&adapter->napi[i]); 1052 } 1053 1054 adapter->napi_enabled = false; 1055 } 1056 1057 static int init_napi(struct ibmvnic_adapter *adapter) 1058 { 1059 int i; 1060 1061 adapter->napi = kcalloc(adapter->req_rx_queues, 1062 sizeof(struct napi_struct), GFP_KERNEL); 1063 if (!adapter->napi) 1064 return -ENOMEM; 1065 1066 for (i = 0; i < adapter->req_rx_queues; i++) { 1067 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 1068 netif_napi_add(adapter->netdev, &adapter->napi[i], 1069 ibmvnic_poll, NAPI_POLL_WEIGHT); 1070 } 1071 1072 adapter->num_active_rx_napi = adapter->req_rx_queues; 1073 return 0; 1074 } 1075 1076 static void release_napi(struct ibmvnic_adapter *adapter) 1077 { 1078 int i; 1079 1080 if (!adapter->napi) 1081 return; 1082 1083 for (i = 0; i < adapter->num_active_rx_napi; i++) { 1084 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); 1085 netif_napi_del(&adapter->napi[i]); 1086 } 1087 1088 kfree(adapter->napi); 1089 adapter->napi = NULL; 1090 adapter->num_active_rx_napi = 0; 1091 adapter->napi_enabled = false; 1092 } 1093 1094 static const char *adapter_state_to_string(enum vnic_state state) 1095 { 1096 switch (state) { 1097 case VNIC_PROBING: 1098 return "PROBING"; 1099 case VNIC_PROBED: 1100 return "PROBED"; 1101 case VNIC_OPENING: 1102 return "OPENING"; 1103 case VNIC_OPEN: 1104 return "OPEN"; 1105 case VNIC_CLOSING: 1106 return "CLOSING"; 1107 case VNIC_CLOSED: 1108 return "CLOSED"; 1109 case VNIC_REMOVING: 1110 return "REMOVING"; 1111 case VNIC_REMOVED: 1112 return "REMOVED"; 1113 case VNIC_DOWN: 1114 return "DOWN"; 1115 } 1116 return "UNKNOWN"; 1117 } 1118 1119 static int ibmvnic_login(struct net_device *netdev) 1120 { 1121 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1122 unsigned long timeout = msecs_to_jiffies(20000); 1123 int retry_count = 0; 1124 int retries = 10; 1125 bool retry; 1126 int rc; 1127 1128 do { 1129 retry = false; 1130 if (retry_count > retries) { 1131 netdev_warn(netdev, "Login attempts exceeded\n"); 1132 return -1; 1133 } 1134 1135 adapter->init_done_rc = 0; 1136 reinit_completion(&adapter->init_done); 1137 rc = send_login(adapter); 1138 if (rc) 1139 return rc; 1140 1141 if (!wait_for_completion_timeout(&adapter->init_done, 1142 timeout)) { 1143 netdev_warn(netdev, "Login timed out, retrying...\n"); 1144 retry = true; 1145 adapter->init_done_rc = 0; 1146 retry_count++; 1147 continue; 1148 } 1149 1150 if (adapter->init_done_rc == ABORTED) { 1151 netdev_warn(netdev, "Login aborted, retrying...\n"); 1152 retry = true; 1153 adapter->init_done_rc = 0; 1154 retry_count++; 1155 /* FW or device may be busy, so 1156 * wait a bit before retrying login 1157 */ 1158 msleep(500); 1159 } else if (adapter->init_done_rc == PARTIALSUCCESS) { 1160 retry_count++; 1161 release_sub_crqs(adapter, 1); 1162 1163 retry = true; 1164 netdev_dbg(netdev, 1165 "Received partial success, retrying...\n"); 1166 adapter->init_done_rc = 0; 1167 reinit_completion(&adapter->init_done); 1168 send_query_cap(adapter); 1169 if (!wait_for_completion_timeout(&adapter->init_done, 1170 timeout)) { 1171 netdev_warn(netdev, 1172 "Capabilities query timed out\n"); 1173 return -1; 1174 } 1175 1176 rc = init_sub_crqs(adapter); 1177 if (rc) { 1178 netdev_warn(netdev, 1179 "SCRQ initialization failed\n"); 1180 return -1; 1181 } 1182 1183 rc = init_sub_crq_irqs(adapter); 1184 if (rc) { 1185 netdev_warn(netdev, 1186 "SCRQ irq initialization failed\n"); 1187 return -1; 1188 } 1189 } else if (adapter->init_done_rc) { 1190 netdev_warn(netdev, "Adapter login failed\n"); 1191 return -1; 1192 } 1193 } while (retry); 1194 1195 __ibmvnic_set_mac(netdev, adapter->mac_addr); 1196 1197 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); 1198 return 0; 1199 } 1200 1201 static void release_login_buffer(struct ibmvnic_adapter *adapter) 1202 { 1203 kfree(adapter->login_buf); 1204 adapter->login_buf = NULL; 1205 } 1206 1207 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 1208 { 1209 kfree(adapter->login_rsp_buf); 1210 adapter->login_rsp_buf = NULL; 1211 } 1212 1213 static void release_resources(struct ibmvnic_adapter *adapter) 1214 { 1215 release_vpd_data(adapter); 1216 1217 release_napi(adapter); 1218 release_login_buffer(adapter); 1219 release_login_rsp_buffer(adapter); 1220 } 1221 1222 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 1223 { 1224 struct net_device *netdev = adapter->netdev; 1225 unsigned long timeout = msecs_to_jiffies(20000); 1226 union ibmvnic_crq crq; 1227 bool resend; 1228 int rc; 1229 1230 netdev_dbg(netdev, "setting link state %d\n", link_state); 1231 1232 memset(&crq, 0, sizeof(crq)); 1233 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 1234 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 1235 crq.logical_link_state.link_state = link_state; 1236 1237 do { 1238 resend = false; 1239 1240 reinit_completion(&adapter->init_done); 1241 rc = ibmvnic_send_crq(adapter, &crq); 1242 if (rc) { 1243 netdev_err(netdev, "Failed to set link state\n"); 1244 return rc; 1245 } 1246 1247 if (!wait_for_completion_timeout(&adapter->init_done, 1248 timeout)) { 1249 netdev_err(netdev, "timeout setting link state\n"); 1250 return -1; 1251 } 1252 1253 if (adapter->init_done_rc == PARTIALSUCCESS) { 1254 /* Partuial success, delay and re-send */ 1255 mdelay(1000); 1256 resend = true; 1257 } else if (adapter->init_done_rc) { 1258 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 1259 adapter->init_done_rc); 1260 return adapter->init_done_rc; 1261 } 1262 } while (resend); 1263 1264 return 0; 1265 } 1266 1267 static int set_real_num_queues(struct net_device *netdev) 1268 { 1269 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1270 int rc; 1271 1272 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 1273 adapter->req_tx_queues, adapter->req_rx_queues); 1274 1275 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 1276 if (rc) { 1277 netdev_err(netdev, "failed to set the number of tx queues\n"); 1278 return rc; 1279 } 1280 1281 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 1282 if (rc) 1283 netdev_err(netdev, "failed to set the number of rx queues\n"); 1284 1285 return rc; 1286 } 1287 1288 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 1289 { 1290 struct device *dev = &adapter->vdev->dev; 1291 union ibmvnic_crq crq; 1292 int len = 0; 1293 int rc; 1294 1295 if (adapter->vpd->buff) 1296 len = adapter->vpd->len; 1297 1298 mutex_lock(&adapter->fw_lock); 1299 adapter->fw_done_rc = 0; 1300 reinit_completion(&adapter->fw_done); 1301 1302 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 1303 crq.get_vpd_size.cmd = GET_VPD_SIZE; 1304 rc = ibmvnic_send_crq(adapter, &crq); 1305 if (rc) { 1306 mutex_unlock(&adapter->fw_lock); 1307 return rc; 1308 } 1309 1310 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1311 if (rc) { 1312 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); 1313 mutex_unlock(&adapter->fw_lock); 1314 return rc; 1315 } 1316 mutex_unlock(&adapter->fw_lock); 1317 1318 if (!adapter->vpd->len) 1319 return -ENODATA; 1320 1321 if (!adapter->vpd->buff) 1322 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 1323 else if (adapter->vpd->len != len) 1324 adapter->vpd->buff = 1325 krealloc(adapter->vpd->buff, 1326 adapter->vpd->len, GFP_KERNEL); 1327 1328 if (!adapter->vpd->buff) { 1329 dev_err(dev, "Could allocate VPD buffer\n"); 1330 return -ENOMEM; 1331 } 1332 1333 adapter->vpd->dma_addr = 1334 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 1335 DMA_FROM_DEVICE); 1336 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 1337 dev_err(dev, "Could not map VPD buffer\n"); 1338 kfree(adapter->vpd->buff); 1339 adapter->vpd->buff = NULL; 1340 return -ENOMEM; 1341 } 1342 1343 mutex_lock(&adapter->fw_lock); 1344 adapter->fw_done_rc = 0; 1345 reinit_completion(&adapter->fw_done); 1346 1347 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 1348 crq.get_vpd.cmd = GET_VPD; 1349 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 1350 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1351 rc = ibmvnic_send_crq(adapter, &crq); 1352 if (rc) { 1353 kfree(adapter->vpd->buff); 1354 adapter->vpd->buff = NULL; 1355 mutex_unlock(&adapter->fw_lock); 1356 return rc; 1357 } 1358 1359 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1360 if (rc) { 1361 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); 1362 kfree(adapter->vpd->buff); 1363 adapter->vpd->buff = NULL; 1364 mutex_unlock(&adapter->fw_lock); 1365 return rc; 1366 } 1367 1368 mutex_unlock(&adapter->fw_lock); 1369 return 0; 1370 } 1371 1372 static int init_resources(struct ibmvnic_adapter *adapter) 1373 { 1374 struct net_device *netdev = adapter->netdev; 1375 int rc; 1376 1377 rc = set_real_num_queues(netdev); 1378 if (rc) 1379 return rc; 1380 1381 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1382 if (!adapter->vpd) 1383 return -ENOMEM; 1384 1385 /* Vital Product Data (VPD) */ 1386 rc = ibmvnic_get_vpd(adapter); 1387 if (rc) { 1388 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1389 return rc; 1390 } 1391 1392 rc = init_napi(adapter); 1393 if (rc) 1394 return rc; 1395 1396 send_query_map(adapter); 1397 1398 rc = init_rx_pools(netdev); 1399 if (rc) 1400 return rc; 1401 1402 rc = init_tx_pools(netdev); 1403 return rc; 1404 } 1405 1406 static int __ibmvnic_open(struct net_device *netdev) 1407 { 1408 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1409 enum vnic_state prev_state = adapter->state; 1410 int i, rc; 1411 1412 adapter->state = VNIC_OPENING; 1413 replenish_pools(adapter); 1414 ibmvnic_napi_enable(adapter); 1415 1416 /* We're ready to receive frames, enable the sub-crq interrupts and 1417 * set the logical link state to up 1418 */ 1419 for (i = 0; i < adapter->req_rx_queues; i++) { 1420 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1421 if (prev_state == VNIC_CLOSED) 1422 enable_irq(adapter->rx_scrq[i]->irq); 1423 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1424 } 1425 1426 for (i = 0; i < adapter->req_tx_queues; i++) { 1427 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1428 if (prev_state == VNIC_CLOSED) 1429 enable_irq(adapter->tx_scrq[i]->irq); 1430 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1431 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); 1432 } 1433 1434 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1435 if (rc) { 1436 ibmvnic_napi_disable(adapter); 1437 release_resources(adapter); 1438 return rc; 1439 } 1440 1441 netif_tx_start_all_queues(netdev); 1442 1443 if (prev_state == VNIC_CLOSED) { 1444 for (i = 0; i < adapter->req_rx_queues; i++) 1445 napi_schedule(&adapter->napi[i]); 1446 } 1447 1448 adapter->state = VNIC_OPEN; 1449 return rc; 1450 } 1451 1452 static int ibmvnic_open(struct net_device *netdev) 1453 { 1454 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1455 int rc; 1456 1457 ASSERT_RTNL(); 1458 1459 /* If device failover is pending or we are about to reset, just set 1460 * device state and return. Device operation will be handled by reset 1461 * routine. 1462 * 1463 * It should be safe to overwrite the adapter->state here. Since 1464 * we hold the rtnl, either the reset has not actually started or 1465 * the rtnl got dropped during the set_link_state() in do_reset(). 1466 * In the former case, no one else is changing the state (again we 1467 * have the rtnl) and in the latter case, do_reset() will detect and 1468 * honor our setting below. 1469 */ 1470 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { 1471 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n", 1472 adapter_state_to_string(adapter->state), 1473 adapter->failover_pending); 1474 adapter->state = VNIC_OPEN; 1475 rc = 0; 1476 goto out; 1477 } 1478 1479 if (adapter->state != VNIC_CLOSED) { 1480 rc = ibmvnic_login(netdev); 1481 if (rc) 1482 goto out; 1483 1484 rc = init_resources(adapter); 1485 if (rc) { 1486 netdev_err(netdev, "failed to initialize resources\n"); 1487 release_resources(adapter); 1488 release_rx_pools(adapter); 1489 release_tx_pools(adapter); 1490 goto out; 1491 } 1492 } 1493 1494 rc = __ibmvnic_open(netdev); 1495 1496 out: 1497 /* If open failed and there is a pending failover or in-progress reset, 1498 * set device state and return. Device operation will be handled by 1499 * reset routine. See also comments above regarding rtnl. 1500 */ 1501 if (rc && 1502 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { 1503 adapter->state = VNIC_OPEN; 1504 rc = 0; 1505 } 1506 return rc; 1507 } 1508 1509 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1510 { 1511 struct ibmvnic_rx_pool *rx_pool; 1512 struct ibmvnic_rx_buff *rx_buff; 1513 u64 rx_entries; 1514 int rx_scrqs; 1515 int i, j; 1516 1517 if (!adapter->rx_pool) 1518 return; 1519 1520 rx_scrqs = adapter->num_active_rx_pools; 1521 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1522 1523 /* Free any remaining skbs in the rx buffer pools */ 1524 for (i = 0; i < rx_scrqs; i++) { 1525 rx_pool = &adapter->rx_pool[i]; 1526 if (!rx_pool || !rx_pool->rx_buff) 1527 continue; 1528 1529 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1530 for (j = 0; j < rx_entries; j++) { 1531 rx_buff = &rx_pool->rx_buff[j]; 1532 if (rx_buff && rx_buff->skb) { 1533 dev_kfree_skb_any(rx_buff->skb); 1534 rx_buff->skb = NULL; 1535 } 1536 } 1537 } 1538 } 1539 1540 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1541 struct ibmvnic_tx_pool *tx_pool) 1542 { 1543 struct ibmvnic_tx_buff *tx_buff; 1544 u64 tx_entries; 1545 int i; 1546 1547 if (!tx_pool || !tx_pool->tx_buff) 1548 return; 1549 1550 tx_entries = tx_pool->num_buffers; 1551 1552 for (i = 0; i < tx_entries; i++) { 1553 tx_buff = &tx_pool->tx_buff[i]; 1554 if (tx_buff && tx_buff->skb) { 1555 dev_kfree_skb_any(tx_buff->skb); 1556 tx_buff->skb = NULL; 1557 } 1558 } 1559 } 1560 1561 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1562 { 1563 int tx_scrqs; 1564 int i; 1565 1566 if (!adapter->tx_pool || !adapter->tso_pool) 1567 return; 1568 1569 tx_scrqs = adapter->num_active_tx_pools; 1570 1571 /* Free any remaining skbs in the tx buffer pools */ 1572 for (i = 0; i < tx_scrqs; i++) { 1573 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1574 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1575 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1576 } 1577 } 1578 1579 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1580 { 1581 struct net_device *netdev = adapter->netdev; 1582 int i; 1583 1584 if (adapter->tx_scrq) { 1585 for (i = 0; i < adapter->req_tx_queues; i++) 1586 if (adapter->tx_scrq[i]->irq) { 1587 netdev_dbg(netdev, 1588 "Disabling tx_scrq[%d] irq\n", i); 1589 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1590 disable_irq(adapter->tx_scrq[i]->irq); 1591 } 1592 } 1593 1594 if (adapter->rx_scrq) { 1595 for (i = 0; i < adapter->req_rx_queues; i++) { 1596 if (adapter->rx_scrq[i]->irq) { 1597 netdev_dbg(netdev, 1598 "Disabling rx_scrq[%d] irq\n", i); 1599 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1600 disable_irq(adapter->rx_scrq[i]->irq); 1601 } 1602 } 1603 } 1604 } 1605 1606 static void ibmvnic_cleanup(struct net_device *netdev) 1607 { 1608 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1609 1610 /* ensure that transmissions are stopped if called by do_reset */ 1611 if (test_bit(0, &adapter->resetting)) 1612 netif_tx_disable(netdev); 1613 else 1614 netif_tx_stop_all_queues(netdev); 1615 1616 ibmvnic_napi_disable(adapter); 1617 ibmvnic_disable_irqs(adapter); 1618 } 1619 1620 static int __ibmvnic_close(struct net_device *netdev) 1621 { 1622 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1623 int rc = 0; 1624 1625 adapter->state = VNIC_CLOSING; 1626 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1627 adapter->state = VNIC_CLOSED; 1628 return rc; 1629 } 1630 1631 static int ibmvnic_close(struct net_device *netdev) 1632 { 1633 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1634 int rc; 1635 1636 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n", 1637 adapter_state_to_string(adapter->state), 1638 adapter->failover_pending, 1639 adapter->force_reset_recovery); 1640 1641 /* If device failover is pending, just set device state and return. 1642 * Device operation will be handled by reset routine. 1643 */ 1644 if (adapter->failover_pending) { 1645 adapter->state = VNIC_CLOSED; 1646 return 0; 1647 } 1648 1649 rc = __ibmvnic_close(netdev); 1650 ibmvnic_cleanup(netdev); 1651 clean_rx_pools(adapter); 1652 clean_tx_pools(adapter); 1653 1654 return rc; 1655 } 1656 1657 /** 1658 * build_hdr_data - creates L2/L3/L4 header data buffer 1659 * @hdr_field: bitfield determining needed headers 1660 * @skb: socket buffer 1661 * @hdr_len: array of header lengths 1662 * @hdr_data: buffer to write the header to 1663 * 1664 * Reads hdr_field to determine which headers are needed by firmware. 1665 * Builds a buffer containing these headers. Saves individual header 1666 * lengths and total buffer length to be used to build descriptors. 1667 */ 1668 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 1669 int *hdr_len, u8 *hdr_data) 1670 { 1671 int len = 0; 1672 u8 *hdr; 1673 1674 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 1675 hdr_len[0] = sizeof(struct vlan_ethhdr); 1676 else 1677 hdr_len[0] = sizeof(struct ethhdr); 1678 1679 if (skb->protocol == htons(ETH_P_IP)) { 1680 hdr_len[1] = ip_hdr(skb)->ihl * 4; 1681 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1682 hdr_len[2] = tcp_hdrlen(skb); 1683 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1684 hdr_len[2] = sizeof(struct udphdr); 1685 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1686 hdr_len[1] = sizeof(struct ipv6hdr); 1687 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1688 hdr_len[2] = tcp_hdrlen(skb); 1689 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 1690 hdr_len[2] = sizeof(struct udphdr); 1691 } else if (skb->protocol == htons(ETH_P_ARP)) { 1692 hdr_len[1] = arp_hdr_len(skb->dev); 1693 hdr_len[2] = 0; 1694 } 1695 1696 memset(hdr_data, 0, 120); 1697 if ((hdr_field >> 6) & 1) { 1698 hdr = skb_mac_header(skb); 1699 memcpy(hdr_data, hdr, hdr_len[0]); 1700 len += hdr_len[0]; 1701 } 1702 1703 if ((hdr_field >> 5) & 1) { 1704 hdr = skb_network_header(skb); 1705 memcpy(hdr_data + len, hdr, hdr_len[1]); 1706 len += hdr_len[1]; 1707 } 1708 1709 if ((hdr_field >> 4) & 1) { 1710 hdr = skb_transport_header(skb); 1711 memcpy(hdr_data + len, hdr, hdr_len[2]); 1712 len += hdr_len[2]; 1713 } 1714 return len; 1715 } 1716 1717 /** 1718 * create_hdr_descs - create header and header extension descriptors 1719 * @hdr_field: bitfield determining needed headers 1720 * @hdr_data: buffer containing header data 1721 * @len: length of data buffer 1722 * @hdr_len: array of individual header lengths 1723 * @scrq_arr: descriptor array 1724 * 1725 * Creates header and, if needed, header extension descriptors and 1726 * places them in a descriptor array, scrq_arr 1727 */ 1728 1729 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1730 union sub_crq *scrq_arr) 1731 { 1732 union sub_crq hdr_desc; 1733 int tmp_len = len; 1734 int num_descs = 0; 1735 u8 *data, *cur; 1736 int tmp; 1737 1738 while (tmp_len > 0) { 1739 cur = hdr_data + len - tmp_len; 1740 1741 memset(&hdr_desc, 0, sizeof(hdr_desc)); 1742 if (cur != hdr_data) { 1743 data = hdr_desc.hdr_ext.data; 1744 tmp = tmp_len > 29 ? 29 : tmp_len; 1745 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 1746 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 1747 hdr_desc.hdr_ext.len = tmp; 1748 } else { 1749 data = hdr_desc.hdr.data; 1750 tmp = tmp_len > 24 ? 24 : tmp_len; 1751 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 1752 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 1753 hdr_desc.hdr.len = tmp; 1754 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 1755 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 1756 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 1757 hdr_desc.hdr.flag = hdr_field << 1; 1758 } 1759 memcpy(data, cur, tmp); 1760 tmp_len -= tmp; 1761 *scrq_arr = hdr_desc; 1762 scrq_arr++; 1763 num_descs++; 1764 } 1765 1766 return num_descs; 1767 } 1768 1769 /** 1770 * build_hdr_descs_arr - build a header descriptor array 1771 * @skb: tx socket buffer 1772 * @indir_arr: indirect array 1773 * @num_entries: number of descriptors to be sent 1774 * @hdr_field: bit field determining which headers will be sent 1775 * 1776 * This function will build a TX descriptor array with applicable 1777 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 1778 */ 1779 1780 static void build_hdr_descs_arr(struct sk_buff *skb, 1781 union sub_crq *indir_arr, 1782 int *num_entries, u8 hdr_field) 1783 { 1784 int hdr_len[3] = {0, 0, 0}; 1785 u8 hdr_data[140] = {0}; 1786 int tot_len; 1787 1788 tot_len = build_hdr_data(hdr_field, skb, hdr_len, 1789 hdr_data); 1790 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 1791 indir_arr + 1); 1792 } 1793 1794 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 1795 struct net_device *netdev) 1796 { 1797 /* For some backing devices, mishandling of small packets 1798 * can result in a loss of connection or TX stall. Device 1799 * architects recommend that no packet should be smaller 1800 * than the minimum MTU value provided to the driver, so 1801 * pad any packets to that length 1802 */ 1803 if (skb->len < netdev->min_mtu) 1804 return skb_put_padto(skb, netdev->min_mtu); 1805 1806 return 0; 1807 } 1808 1809 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 1810 struct ibmvnic_sub_crq_queue *tx_scrq) 1811 { 1812 struct ibmvnic_ind_xmit_queue *ind_bufp; 1813 struct ibmvnic_tx_buff *tx_buff; 1814 struct ibmvnic_tx_pool *tx_pool; 1815 union sub_crq tx_scrq_entry; 1816 int queue_num; 1817 int entries; 1818 int index; 1819 int i; 1820 1821 ind_bufp = &tx_scrq->ind_buf; 1822 entries = (u64)ind_bufp->index; 1823 queue_num = tx_scrq->pool_index; 1824 1825 for (i = entries - 1; i >= 0; --i) { 1826 tx_scrq_entry = ind_bufp->indir_arr[i]; 1827 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) 1828 continue; 1829 index = be32_to_cpu(tx_scrq_entry.v1.correlator); 1830 if (index & IBMVNIC_TSO_POOL_MASK) { 1831 tx_pool = &adapter->tso_pool[queue_num]; 1832 index &= ~IBMVNIC_TSO_POOL_MASK; 1833 } else { 1834 tx_pool = &adapter->tx_pool[queue_num]; 1835 } 1836 tx_pool->free_map[tx_pool->consumer_index] = index; 1837 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 1838 tx_pool->num_buffers - 1 : 1839 tx_pool->consumer_index - 1; 1840 tx_buff = &tx_pool->tx_buff[index]; 1841 adapter->netdev->stats.tx_packets--; 1842 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; 1843 adapter->tx_stats_buffers[queue_num].packets--; 1844 adapter->tx_stats_buffers[queue_num].bytes -= 1845 tx_buff->skb->len; 1846 dev_kfree_skb_any(tx_buff->skb); 1847 tx_buff->skb = NULL; 1848 adapter->netdev->stats.tx_dropped++; 1849 } 1850 ind_bufp->index = 0; 1851 if (atomic_sub_return(entries, &tx_scrq->used) <= 1852 (adapter->req_tx_entries_per_subcrq / 2) && 1853 __netif_subqueue_stopped(adapter->netdev, queue_num) && 1854 !test_bit(0, &adapter->resetting)) { 1855 netif_wake_subqueue(adapter->netdev, queue_num); 1856 netdev_dbg(adapter->netdev, "Started queue %d\n", 1857 queue_num); 1858 } 1859 } 1860 1861 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, 1862 struct ibmvnic_sub_crq_queue *tx_scrq) 1863 { 1864 struct ibmvnic_ind_xmit_queue *ind_bufp; 1865 u64 dma_addr; 1866 u64 entries; 1867 u64 handle; 1868 int rc; 1869 1870 ind_bufp = &tx_scrq->ind_buf; 1871 dma_addr = (u64)ind_bufp->indir_dma; 1872 entries = (u64)ind_bufp->index; 1873 handle = tx_scrq->handle; 1874 1875 if (!entries) 1876 return 0; 1877 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); 1878 if (rc) 1879 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); 1880 else 1881 ind_bufp->index = 0; 1882 return 0; 1883 } 1884 1885 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 1886 { 1887 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1888 int queue_num = skb_get_queue_mapping(skb); 1889 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 1890 struct device *dev = &adapter->vdev->dev; 1891 struct ibmvnic_ind_xmit_queue *ind_bufp; 1892 struct ibmvnic_tx_buff *tx_buff = NULL; 1893 struct ibmvnic_sub_crq_queue *tx_scrq; 1894 struct ibmvnic_tx_pool *tx_pool; 1895 unsigned int tx_send_failed = 0; 1896 netdev_tx_t ret = NETDEV_TX_OK; 1897 unsigned int tx_map_failed = 0; 1898 union sub_crq indir_arr[16]; 1899 unsigned int tx_dropped = 0; 1900 unsigned int tx_packets = 0; 1901 unsigned int tx_bytes = 0; 1902 dma_addr_t data_dma_addr; 1903 struct netdev_queue *txq; 1904 unsigned long lpar_rc; 1905 union sub_crq tx_crq; 1906 unsigned int offset; 1907 int num_entries = 1; 1908 unsigned char *dst; 1909 int index = 0; 1910 u8 proto = 0; 1911 1912 tx_scrq = adapter->tx_scrq[queue_num]; 1913 txq = netdev_get_tx_queue(netdev, queue_num); 1914 ind_bufp = &tx_scrq->ind_buf; 1915 1916 if (test_bit(0, &adapter->resetting)) { 1917 dev_kfree_skb_any(skb); 1918 1919 tx_send_failed++; 1920 tx_dropped++; 1921 ret = NETDEV_TX_OK; 1922 goto out; 1923 } 1924 1925 if (ibmvnic_xmit_workarounds(skb, netdev)) { 1926 tx_dropped++; 1927 tx_send_failed++; 1928 ret = NETDEV_TX_OK; 1929 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1930 goto out; 1931 } 1932 if (skb_is_gso(skb)) 1933 tx_pool = &adapter->tso_pool[queue_num]; 1934 else 1935 tx_pool = &adapter->tx_pool[queue_num]; 1936 1937 index = tx_pool->free_map[tx_pool->consumer_index]; 1938 1939 if (index == IBMVNIC_INVALID_MAP) { 1940 dev_kfree_skb_any(skb); 1941 tx_send_failed++; 1942 tx_dropped++; 1943 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1944 ret = NETDEV_TX_OK; 1945 goto out; 1946 } 1947 1948 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 1949 1950 offset = index * tx_pool->buf_size; 1951 dst = tx_pool->long_term_buff.buff + offset; 1952 memset(dst, 0, tx_pool->buf_size); 1953 data_dma_addr = tx_pool->long_term_buff.addr + offset; 1954 1955 if (skb_shinfo(skb)->nr_frags) { 1956 int cur, i; 1957 1958 /* Copy the head */ 1959 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 1960 cur = skb_headlen(skb); 1961 1962 /* Copy the frags */ 1963 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1964 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1965 1966 memcpy(dst + cur, skb_frag_address(frag), 1967 skb_frag_size(frag)); 1968 cur += skb_frag_size(frag); 1969 } 1970 } else { 1971 skb_copy_from_linear_data(skb, dst, skb->len); 1972 } 1973 1974 /* post changes to long_term_buff *dst before VIOS accessing it */ 1975 dma_wmb(); 1976 1977 tx_pool->consumer_index = 1978 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 1979 1980 tx_buff = &tx_pool->tx_buff[index]; 1981 tx_buff->skb = skb; 1982 tx_buff->index = index; 1983 tx_buff->pool_index = queue_num; 1984 1985 memset(&tx_crq, 0, sizeof(tx_crq)); 1986 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 1987 tx_crq.v1.type = IBMVNIC_TX_DESC; 1988 tx_crq.v1.n_crq_elem = 1; 1989 tx_crq.v1.n_sge = 1; 1990 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 1991 1992 if (skb_is_gso(skb)) 1993 tx_crq.v1.correlator = 1994 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); 1995 else 1996 tx_crq.v1.correlator = cpu_to_be32(index); 1997 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); 1998 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 1999 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 2000 2001 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { 2002 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 2003 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 2004 } 2005 2006 if (skb->protocol == htons(ETH_P_IP)) { 2007 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 2008 proto = ip_hdr(skb)->protocol; 2009 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2010 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 2011 proto = ipv6_hdr(skb)->nexthdr; 2012 } 2013 2014 if (proto == IPPROTO_TCP) 2015 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 2016 else if (proto == IPPROTO_UDP) 2017 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 2018 2019 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2020 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 2021 hdrs += 2; 2022 } 2023 if (skb_is_gso(skb)) { 2024 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 2025 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 2026 hdrs += 2; 2027 } 2028 2029 if ((*hdrs >> 7) & 1) 2030 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); 2031 2032 tx_crq.v1.n_crq_elem = num_entries; 2033 tx_buff->num_entries = num_entries; 2034 /* flush buffer if current entry can not fit */ 2035 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { 2036 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2037 if (lpar_rc != H_SUCCESS) 2038 goto tx_flush_err; 2039 } 2040 2041 indir_arr[0] = tx_crq; 2042 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], 2043 num_entries * sizeof(struct ibmvnic_generic_scrq)); 2044 ind_bufp->index += num_entries; 2045 if (__netdev_tx_sent_queue(txq, skb->len, 2046 netdev_xmit_more() && 2047 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { 2048 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2049 if (lpar_rc != H_SUCCESS) 2050 goto tx_err; 2051 } 2052 2053 if (atomic_add_return(num_entries, &tx_scrq->used) 2054 >= adapter->req_tx_entries_per_subcrq) { 2055 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 2056 netif_stop_subqueue(netdev, queue_num); 2057 } 2058 2059 tx_packets++; 2060 tx_bytes += skb->len; 2061 txq_trans_cond_update(txq); 2062 ret = NETDEV_TX_OK; 2063 goto out; 2064 2065 tx_flush_err: 2066 dev_kfree_skb_any(skb); 2067 tx_buff->skb = NULL; 2068 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2069 tx_pool->num_buffers - 1 : 2070 tx_pool->consumer_index - 1; 2071 tx_dropped++; 2072 tx_err: 2073 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) 2074 dev_err_ratelimited(dev, "tx: send failed\n"); 2075 2076 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 2077 /* Disable TX and report carrier off if queue is closed 2078 * or pending failover. 2079 * Firmware guarantees that a signal will be sent to the 2080 * driver, triggering a reset or some other action. 2081 */ 2082 netif_tx_stop_all_queues(netdev); 2083 netif_carrier_off(netdev); 2084 } 2085 out: 2086 netdev->stats.tx_dropped += tx_dropped; 2087 netdev->stats.tx_bytes += tx_bytes; 2088 netdev->stats.tx_packets += tx_packets; 2089 adapter->tx_send_failed += tx_send_failed; 2090 adapter->tx_map_failed += tx_map_failed; 2091 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 2092 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 2093 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 2094 2095 return ret; 2096 } 2097 2098 static void ibmvnic_set_multi(struct net_device *netdev) 2099 { 2100 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2101 struct netdev_hw_addr *ha; 2102 union ibmvnic_crq crq; 2103 2104 memset(&crq, 0, sizeof(crq)); 2105 crq.request_capability.first = IBMVNIC_CRQ_CMD; 2106 crq.request_capability.cmd = REQUEST_CAPABILITY; 2107 2108 if (netdev->flags & IFF_PROMISC) { 2109 if (!adapter->promisc_supported) 2110 return; 2111 } else { 2112 if (netdev->flags & IFF_ALLMULTI) { 2113 /* Accept all multicast */ 2114 memset(&crq, 0, sizeof(crq)); 2115 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2116 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2117 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 2118 ibmvnic_send_crq(adapter, &crq); 2119 } else if (netdev_mc_empty(netdev)) { 2120 /* Reject all multicast */ 2121 memset(&crq, 0, sizeof(crq)); 2122 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2123 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2124 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 2125 ibmvnic_send_crq(adapter, &crq); 2126 } else { 2127 /* Accept one or more multicast(s) */ 2128 netdev_for_each_mc_addr(ha, netdev) { 2129 memset(&crq, 0, sizeof(crq)); 2130 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2131 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2132 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 2133 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 2134 ha->addr); 2135 ibmvnic_send_crq(adapter, &crq); 2136 } 2137 } 2138 } 2139 } 2140 2141 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) 2142 { 2143 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2144 union ibmvnic_crq crq; 2145 int rc; 2146 2147 if (!is_valid_ether_addr(dev_addr)) { 2148 rc = -EADDRNOTAVAIL; 2149 goto err; 2150 } 2151 2152 memset(&crq, 0, sizeof(crq)); 2153 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 2154 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 2155 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); 2156 2157 mutex_lock(&adapter->fw_lock); 2158 adapter->fw_done_rc = 0; 2159 reinit_completion(&adapter->fw_done); 2160 2161 rc = ibmvnic_send_crq(adapter, &crq); 2162 if (rc) { 2163 rc = -EIO; 2164 mutex_unlock(&adapter->fw_lock); 2165 goto err; 2166 } 2167 2168 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 2169 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 2170 if (rc || adapter->fw_done_rc) { 2171 rc = -EIO; 2172 mutex_unlock(&adapter->fw_lock); 2173 goto err; 2174 } 2175 mutex_unlock(&adapter->fw_lock); 2176 return 0; 2177 err: 2178 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 2179 return rc; 2180 } 2181 2182 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 2183 { 2184 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2185 struct sockaddr *addr = p; 2186 int rc; 2187 2188 rc = 0; 2189 if (!is_valid_ether_addr(addr->sa_data)) 2190 return -EADDRNOTAVAIL; 2191 2192 ether_addr_copy(adapter->mac_addr, addr->sa_data); 2193 if (adapter->state != VNIC_PROBED) 2194 rc = __ibmvnic_set_mac(netdev, addr->sa_data); 2195 2196 return rc; 2197 } 2198 2199 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) 2200 { 2201 switch (reason) { 2202 case VNIC_RESET_FAILOVER: 2203 return "FAILOVER"; 2204 case VNIC_RESET_MOBILITY: 2205 return "MOBILITY"; 2206 case VNIC_RESET_FATAL: 2207 return "FATAL"; 2208 case VNIC_RESET_NON_FATAL: 2209 return "NON_FATAL"; 2210 case VNIC_RESET_TIMEOUT: 2211 return "TIMEOUT"; 2212 case VNIC_RESET_CHANGE_PARAM: 2213 return "CHANGE_PARAM"; 2214 case VNIC_RESET_PASSIVE_INIT: 2215 return "PASSIVE_INIT"; 2216 } 2217 return "UNKNOWN"; 2218 } 2219 2220 /* 2221 * do_reset returns zero if we are able to keep processing reset events, or 2222 * non-zero if we hit a fatal error and must halt. 2223 */ 2224 static int do_reset(struct ibmvnic_adapter *adapter, 2225 struct ibmvnic_rwi *rwi, u32 reset_state) 2226 { 2227 struct net_device *netdev = adapter->netdev; 2228 u64 old_num_rx_queues, old_num_tx_queues; 2229 u64 old_num_rx_slots, old_num_tx_slots; 2230 int rc; 2231 2232 netdev_dbg(adapter->netdev, 2233 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", 2234 adapter_state_to_string(adapter->state), 2235 adapter->failover_pending, 2236 reset_reason_to_string(rwi->reset_reason), 2237 adapter_state_to_string(reset_state)); 2238 2239 adapter->reset_reason = rwi->reset_reason; 2240 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ 2241 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2242 rtnl_lock(); 2243 2244 /* Now that we have the rtnl lock, clear any pending failover. 2245 * This will ensure ibmvnic_open() has either completed or will 2246 * block until failover is complete. 2247 */ 2248 if (rwi->reset_reason == VNIC_RESET_FAILOVER) 2249 adapter->failover_pending = false; 2250 2251 /* read the state and check (again) after getting rtnl */ 2252 reset_state = adapter->state; 2253 2254 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2255 rc = -EBUSY; 2256 goto out; 2257 } 2258 2259 netif_carrier_off(netdev); 2260 2261 old_num_rx_queues = adapter->req_rx_queues; 2262 old_num_tx_queues = adapter->req_tx_queues; 2263 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; 2264 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; 2265 2266 ibmvnic_cleanup(netdev); 2267 2268 if (reset_state == VNIC_OPEN && 2269 adapter->reset_reason != VNIC_RESET_MOBILITY && 2270 adapter->reset_reason != VNIC_RESET_FAILOVER) { 2271 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2272 rc = __ibmvnic_close(netdev); 2273 if (rc) 2274 goto out; 2275 } else { 2276 adapter->state = VNIC_CLOSING; 2277 2278 /* Release the RTNL lock before link state change and 2279 * re-acquire after the link state change to allow 2280 * linkwatch_event to grab the RTNL lock and run during 2281 * a reset. 2282 */ 2283 rtnl_unlock(); 2284 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2285 rtnl_lock(); 2286 if (rc) 2287 goto out; 2288 2289 if (adapter->state == VNIC_OPEN) { 2290 /* When we dropped rtnl, ibmvnic_open() got 2291 * it and noticed that we are resetting and 2292 * set the adapter state to OPEN. Update our 2293 * new "target" state, and resume the reset 2294 * from VNIC_CLOSING state. 2295 */ 2296 netdev_dbg(netdev, 2297 "Open changed state from %s, updating.\n", 2298 adapter_state_to_string(reset_state)); 2299 reset_state = VNIC_OPEN; 2300 adapter->state = VNIC_CLOSING; 2301 } 2302 2303 if (adapter->state != VNIC_CLOSING) { 2304 /* If someone else changed the adapter state 2305 * when we dropped the rtnl, fail the reset 2306 */ 2307 rc = -1; 2308 goto out; 2309 } 2310 adapter->state = VNIC_CLOSED; 2311 } 2312 } 2313 2314 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2315 release_resources(adapter); 2316 release_sub_crqs(adapter, 1); 2317 release_crq_queue(adapter); 2318 } 2319 2320 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 2321 /* remove the closed state so when we call open it appears 2322 * we are coming from the probed state. 2323 */ 2324 adapter->state = VNIC_PROBED; 2325 2326 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2327 rc = init_crq_queue(adapter); 2328 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 2329 rc = ibmvnic_reenable_crq_queue(adapter); 2330 release_sub_crqs(adapter, 1); 2331 } else { 2332 rc = ibmvnic_reset_crq(adapter); 2333 if (rc == H_CLOSED || rc == H_SUCCESS) { 2334 rc = vio_enable_interrupts(adapter->vdev); 2335 if (rc) 2336 netdev_err(adapter->netdev, 2337 "Reset failed to enable interrupts. rc=%d\n", 2338 rc); 2339 } 2340 } 2341 2342 if (rc) { 2343 netdev_err(adapter->netdev, 2344 "Reset couldn't initialize crq. rc=%d\n", rc); 2345 goto out; 2346 } 2347 2348 rc = ibmvnic_reset_init(adapter, true); 2349 if (rc) { 2350 rc = IBMVNIC_INIT_FAILED; 2351 goto out; 2352 } 2353 2354 /* If the adapter was in PROBE or DOWN state prior to the reset, 2355 * exit here. 2356 */ 2357 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) { 2358 rc = 0; 2359 goto out; 2360 } 2361 2362 rc = ibmvnic_login(netdev); 2363 if (rc) 2364 goto out; 2365 2366 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2367 rc = init_resources(adapter); 2368 if (rc) 2369 goto out; 2370 } else if (adapter->req_rx_queues != old_num_rx_queues || 2371 adapter->req_tx_queues != old_num_tx_queues || 2372 adapter->req_rx_add_entries_per_subcrq != 2373 old_num_rx_slots || 2374 adapter->req_tx_entries_per_subcrq != 2375 old_num_tx_slots || 2376 !adapter->rx_pool || 2377 !adapter->tso_pool || 2378 !adapter->tx_pool) { 2379 release_napi(adapter); 2380 release_vpd_data(adapter); 2381 2382 rc = init_resources(adapter); 2383 if (rc) 2384 goto out; 2385 2386 } else { 2387 rc = init_tx_pools(netdev); 2388 if (rc) { 2389 netdev_dbg(netdev, 2390 "init tx pools failed (%d)\n", 2391 rc); 2392 goto out; 2393 } 2394 2395 rc = init_rx_pools(netdev); 2396 if (rc) { 2397 netdev_dbg(netdev, 2398 "init rx pools failed (%d)\n", 2399 rc); 2400 goto out; 2401 } 2402 } 2403 ibmvnic_disable_irqs(adapter); 2404 } 2405 adapter->state = VNIC_CLOSED; 2406 2407 if (reset_state == VNIC_CLOSED) { 2408 rc = 0; 2409 goto out; 2410 } 2411 2412 rc = __ibmvnic_open(netdev); 2413 if (rc) { 2414 rc = IBMVNIC_OPEN_FAILED; 2415 goto out; 2416 } 2417 2418 /* refresh device's multicast list */ 2419 ibmvnic_set_multi(netdev); 2420 2421 if (adapter->reset_reason == VNIC_RESET_FAILOVER || 2422 adapter->reset_reason == VNIC_RESET_MOBILITY) 2423 __netdev_notify_peers(netdev); 2424 2425 rc = 0; 2426 2427 out: 2428 /* restore the adapter state if reset failed */ 2429 if (rc) 2430 adapter->state = reset_state; 2431 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */ 2432 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2433 rtnl_unlock(); 2434 2435 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", 2436 adapter_state_to_string(adapter->state), 2437 adapter->failover_pending, rc); 2438 return rc; 2439 } 2440 2441 static int do_hard_reset(struct ibmvnic_adapter *adapter, 2442 struct ibmvnic_rwi *rwi, u32 reset_state) 2443 { 2444 struct net_device *netdev = adapter->netdev; 2445 int rc; 2446 2447 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", 2448 reset_reason_to_string(rwi->reset_reason)); 2449 2450 /* read the state and check (again) after getting rtnl */ 2451 reset_state = adapter->state; 2452 2453 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2454 rc = -EBUSY; 2455 goto out; 2456 } 2457 2458 netif_carrier_off(netdev); 2459 adapter->reset_reason = rwi->reset_reason; 2460 2461 ibmvnic_cleanup(netdev); 2462 release_resources(adapter); 2463 release_sub_crqs(adapter, 0); 2464 release_crq_queue(adapter); 2465 2466 /* remove the closed state so when we call open it appears 2467 * we are coming from the probed state. 2468 */ 2469 adapter->state = VNIC_PROBED; 2470 2471 reinit_completion(&adapter->init_done); 2472 rc = init_crq_queue(adapter); 2473 if (rc) { 2474 netdev_err(adapter->netdev, 2475 "Couldn't initialize crq. rc=%d\n", rc); 2476 goto out; 2477 } 2478 2479 rc = ibmvnic_reset_init(adapter, false); 2480 if (rc) 2481 goto out; 2482 2483 /* If the adapter was in PROBE or DOWN state prior to the reset, 2484 * exit here. 2485 */ 2486 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) 2487 goto out; 2488 2489 rc = ibmvnic_login(netdev); 2490 if (rc) 2491 goto out; 2492 2493 rc = init_resources(adapter); 2494 if (rc) 2495 goto out; 2496 2497 ibmvnic_disable_irqs(adapter); 2498 adapter->state = VNIC_CLOSED; 2499 2500 if (reset_state == VNIC_CLOSED) 2501 goto out; 2502 2503 rc = __ibmvnic_open(netdev); 2504 if (rc) { 2505 rc = IBMVNIC_OPEN_FAILED; 2506 goto out; 2507 } 2508 2509 __netdev_notify_peers(netdev); 2510 out: 2511 /* restore adapter state if reset failed */ 2512 if (rc) 2513 adapter->state = reset_state; 2514 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", 2515 adapter_state_to_string(adapter->state), 2516 adapter->failover_pending, rc); 2517 return rc; 2518 } 2519 2520 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 2521 { 2522 struct ibmvnic_rwi *rwi; 2523 unsigned long flags; 2524 2525 spin_lock_irqsave(&adapter->rwi_lock, flags); 2526 2527 if (!list_empty(&adapter->rwi_list)) { 2528 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 2529 list); 2530 list_del(&rwi->list); 2531 } else { 2532 rwi = NULL; 2533 } 2534 2535 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2536 return rwi; 2537 } 2538 2539 /** 2540 * do_passive_init - complete probing when partner device is detected. 2541 * @adapter: ibmvnic_adapter struct 2542 * 2543 * If the ibmvnic device does not have a partner device to communicate with at boot 2544 * and that partner device comes online at a later time, this function is called 2545 * to complete the initialization process of ibmvnic device. 2546 * Caller is expected to hold rtnl_lock(). 2547 * 2548 * Returns non-zero if sub-CRQs are not initialized properly leaving the device 2549 * in the down state. 2550 * Returns 0 upon success and the device is in PROBED state. 2551 */ 2552 2553 static int do_passive_init(struct ibmvnic_adapter *adapter) 2554 { 2555 unsigned long timeout = msecs_to_jiffies(30000); 2556 struct net_device *netdev = adapter->netdev; 2557 struct device *dev = &adapter->vdev->dev; 2558 int rc; 2559 2560 netdev_dbg(netdev, "Partner device found, probing.\n"); 2561 2562 adapter->state = VNIC_PROBING; 2563 reinit_completion(&adapter->init_done); 2564 adapter->init_done_rc = 0; 2565 adapter->crq.active = true; 2566 2567 rc = send_crq_init_complete(adapter); 2568 if (rc) 2569 goto out; 2570 2571 rc = send_version_xchg(adapter); 2572 if (rc) 2573 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); 2574 2575 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 2576 dev_err(dev, "Initialization sequence timed out\n"); 2577 rc = -ETIMEDOUT; 2578 goto out; 2579 } 2580 2581 rc = init_sub_crqs(adapter); 2582 if (rc) { 2583 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc); 2584 goto out; 2585 } 2586 2587 rc = init_sub_crq_irqs(adapter); 2588 if (rc) { 2589 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc); 2590 goto init_failed; 2591 } 2592 2593 netdev->mtu = adapter->req_mtu - ETH_HLEN; 2594 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 2595 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 2596 2597 adapter->state = VNIC_PROBED; 2598 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n"); 2599 2600 return 0; 2601 2602 init_failed: 2603 release_sub_crqs(adapter, 1); 2604 out: 2605 adapter->state = VNIC_DOWN; 2606 return rc; 2607 } 2608 2609 static void __ibmvnic_reset(struct work_struct *work) 2610 { 2611 struct ibmvnic_adapter *adapter; 2612 bool saved_state = false; 2613 struct ibmvnic_rwi *tmprwi; 2614 struct ibmvnic_rwi *rwi; 2615 unsigned long flags; 2616 u32 reset_state; 2617 int rc = 0; 2618 2619 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 2620 2621 if (test_and_set_bit_lock(0, &adapter->resetting)) { 2622 queue_delayed_work(system_long_wq, 2623 &adapter->ibmvnic_delayed_reset, 2624 IBMVNIC_RESET_DELAY); 2625 return; 2626 } 2627 2628 rwi = get_next_rwi(adapter); 2629 while (rwi) { 2630 spin_lock_irqsave(&adapter->state_lock, flags); 2631 2632 if (adapter->state == VNIC_REMOVING || 2633 adapter->state == VNIC_REMOVED) { 2634 spin_unlock_irqrestore(&adapter->state_lock, flags); 2635 kfree(rwi); 2636 rc = EBUSY; 2637 break; 2638 } 2639 2640 if (!saved_state) { 2641 reset_state = adapter->state; 2642 saved_state = true; 2643 } 2644 spin_unlock_irqrestore(&adapter->state_lock, flags); 2645 2646 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) { 2647 rtnl_lock(); 2648 rc = do_passive_init(adapter); 2649 rtnl_unlock(); 2650 if (!rc) 2651 netif_carrier_on(adapter->netdev); 2652 } else if (adapter->force_reset_recovery) { 2653 /* Since we are doing a hard reset now, clear the 2654 * failover_pending flag so we don't ignore any 2655 * future MOBILITY or other resets. 2656 */ 2657 adapter->failover_pending = false; 2658 2659 /* Transport event occurred during previous reset */ 2660 if (adapter->wait_for_reset) { 2661 /* Previous was CHANGE_PARAM; caller locked */ 2662 adapter->force_reset_recovery = false; 2663 rc = do_hard_reset(adapter, rwi, reset_state); 2664 } else { 2665 rtnl_lock(); 2666 adapter->force_reset_recovery = false; 2667 rc = do_hard_reset(adapter, rwi, reset_state); 2668 rtnl_unlock(); 2669 } 2670 if (rc) { 2671 /* give backing device time to settle down */ 2672 netdev_dbg(adapter->netdev, 2673 "[S:%s] Hard reset failed, waiting 60 secs\n", 2674 adapter_state_to_string(adapter->state)); 2675 set_current_state(TASK_UNINTERRUPTIBLE); 2676 schedule_timeout(60 * HZ); 2677 } 2678 } else { 2679 rc = do_reset(adapter, rwi, reset_state); 2680 } 2681 tmprwi = rwi; 2682 adapter->last_reset_time = jiffies; 2683 2684 if (rc) 2685 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); 2686 2687 rwi = get_next_rwi(adapter); 2688 2689 /* 2690 * If there is another reset queued, free the previous rwi 2691 * and process the new reset even if previous reset failed 2692 * (the previous reset could have failed because of a fail 2693 * over for instance, so process the fail over). 2694 * 2695 * If there are no resets queued and the previous reset failed, 2696 * the adapter would be in an undefined state. So retry the 2697 * previous reset as a hard reset. 2698 */ 2699 if (rwi) 2700 kfree(tmprwi); 2701 else if (rc) 2702 rwi = tmprwi; 2703 2704 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || 2705 rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) 2706 adapter->force_reset_recovery = true; 2707 } 2708 2709 if (adapter->wait_for_reset) { 2710 adapter->reset_done_rc = rc; 2711 complete(&adapter->reset_done); 2712 } 2713 2714 clear_bit_unlock(0, &adapter->resetting); 2715 2716 netdev_dbg(adapter->netdev, 2717 "[S:%s FRR:%d WFR:%d] Done processing resets\n", 2718 adapter_state_to_string(adapter->state), 2719 adapter->force_reset_recovery, 2720 adapter->wait_for_reset); 2721 } 2722 2723 static void __ibmvnic_delayed_reset(struct work_struct *work) 2724 { 2725 struct ibmvnic_adapter *adapter; 2726 2727 adapter = container_of(work, struct ibmvnic_adapter, 2728 ibmvnic_delayed_reset.work); 2729 __ibmvnic_reset(&adapter->ibmvnic_reset); 2730 } 2731 2732 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 2733 enum ibmvnic_reset_reason reason) 2734 { 2735 struct list_head *entry, *tmp_entry; 2736 struct ibmvnic_rwi *rwi, *tmp; 2737 struct net_device *netdev = adapter->netdev; 2738 unsigned long flags; 2739 int ret; 2740 2741 spin_lock_irqsave(&adapter->rwi_lock, flags); 2742 2743 /* If failover is pending don't schedule any other reset. 2744 * Instead let the failover complete. If there is already a 2745 * a failover reset scheduled, we will detect and drop the 2746 * duplicate reset when walking the ->rwi_list below. 2747 */ 2748 if (adapter->state == VNIC_REMOVING || 2749 adapter->state == VNIC_REMOVED || 2750 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { 2751 ret = EBUSY; 2752 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 2753 goto err; 2754 } 2755 2756 if (adapter->state == VNIC_PROBING) { 2757 netdev_warn(netdev, "Adapter reset during probe\n"); 2758 adapter->init_done_rc = -EAGAIN; 2759 ret = EAGAIN; 2760 goto err; 2761 } 2762 2763 list_for_each_entry(tmp, &adapter->rwi_list, list) { 2764 if (tmp->reset_reason == reason) { 2765 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", 2766 reset_reason_to_string(reason)); 2767 ret = EBUSY; 2768 goto err; 2769 } 2770 } 2771 2772 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); 2773 if (!rwi) { 2774 ret = ENOMEM; 2775 goto err; 2776 } 2777 /* if we just received a transport event, 2778 * flush reset queue and process this reset 2779 */ 2780 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { 2781 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) 2782 list_del(entry); 2783 } 2784 rwi->reset_reason = reason; 2785 list_add_tail(&rwi->list, &adapter->rwi_list); 2786 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", 2787 reset_reason_to_string(reason)); 2788 queue_work(system_long_wq, &adapter->ibmvnic_reset); 2789 2790 ret = 0; 2791 err: 2792 /* ibmvnic_close() below can block, so drop the lock first */ 2793 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2794 2795 if (ret == ENOMEM) 2796 ibmvnic_close(netdev); 2797 2798 return -ret; 2799 } 2800 2801 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) 2802 { 2803 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2804 2805 if (test_bit(0, &adapter->resetting)) { 2806 netdev_err(adapter->netdev, 2807 "Adapter is resetting, skip timeout reset\n"); 2808 return; 2809 } 2810 /* No queuing up reset until at least 5 seconds (default watchdog val) 2811 * after last reset 2812 */ 2813 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { 2814 netdev_dbg(dev, "Not yet time to tx timeout.\n"); 2815 return; 2816 } 2817 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 2818 } 2819 2820 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 2821 struct ibmvnic_rx_buff *rx_buff) 2822 { 2823 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 2824 2825 rx_buff->skb = NULL; 2826 2827 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 2828 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 2829 2830 atomic_dec(&pool->available); 2831 } 2832 2833 static int ibmvnic_poll(struct napi_struct *napi, int budget) 2834 { 2835 struct ibmvnic_sub_crq_queue *rx_scrq; 2836 struct ibmvnic_adapter *adapter; 2837 struct net_device *netdev; 2838 int frames_processed; 2839 int scrq_num; 2840 2841 netdev = napi->dev; 2842 adapter = netdev_priv(netdev); 2843 scrq_num = (int)(napi - adapter->napi); 2844 frames_processed = 0; 2845 rx_scrq = adapter->rx_scrq[scrq_num]; 2846 2847 restart_poll: 2848 while (frames_processed < budget) { 2849 struct sk_buff *skb; 2850 struct ibmvnic_rx_buff *rx_buff; 2851 union sub_crq *next; 2852 u32 length; 2853 u16 offset; 2854 u8 flags = 0; 2855 2856 if (unlikely(test_bit(0, &adapter->resetting) && 2857 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 2858 enable_scrq_irq(adapter, rx_scrq); 2859 napi_complete_done(napi, frames_processed); 2860 return frames_processed; 2861 } 2862 2863 if (!pending_scrq(adapter, rx_scrq)) 2864 break; 2865 next = ibmvnic_next_scrq(adapter, rx_scrq); 2866 rx_buff = (struct ibmvnic_rx_buff *) 2867 be64_to_cpu(next->rx_comp.correlator); 2868 /* do error checking */ 2869 if (next->rx_comp.rc) { 2870 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 2871 be16_to_cpu(next->rx_comp.rc)); 2872 /* free the entry */ 2873 next->rx_comp.first = 0; 2874 dev_kfree_skb_any(rx_buff->skb); 2875 remove_buff_from_pool(adapter, rx_buff); 2876 continue; 2877 } else if (!rx_buff->skb) { 2878 /* free the entry */ 2879 next->rx_comp.first = 0; 2880 remove_buff_from_pool(adapter, rx_buff); 2881 continue; 2882 } 2883 2884 length = be32_to_cpu(next->rx_comp.len); 2885 offset = be16_to_cpu(next->rx_comp.off_frame_data); 2886 flags = next->rx_comp.flags; 2887 skb = rx_buff->skb; 2888 /* load long_term_buff before copying to skb */ 2889 dma_rmb(); 2890 skb_copy_to_linear_data(skb, rx_buff->data + offset, 2891 length); 2892 2893 /* VLAN Header has been stripped by the system firmware and 2894 * needs to be inserted by the driver 2895 */ 2896 if (adapter->rx_vlan_header_insertion && 2897 (flags & IBMVNIC_VLAN_STRIPPED)) 2898 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2899 ntohs(next->rx_comp.vlan_tci)); 2900 2901 /* free the entry */ 2902 next->rx_comp.first = 0; 2903 remove_buff_from_pool(adapter, rx_buff); 2904 2905 skb_put(skb, length); 2906 skb->protocol = eth_type_trans(skb, netdev); 2907 skb_record_rx_queue(skb, scrq_num); 2908 2909 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 2910 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 2911 skb->ip_summed = CHECKSUM_UNNECESSARY; 2912 } 2913 2914 length = skb->len; 2915 napi_gro_receive(napi, skb); /* send it up */ 2916 netdev->stats.rx_packets++; 2917 netdev->stats.rx_bytes += length; 2918 adapter->rx_stats_buffers[scrq_num].packets++; 2919 adapter->rx_stats_buffers[scrq_num].bytes += length; 2920 frames_processed++; 2921 } 2922 2923 if (adapter->state != VNIC_CLOSING && 2924 ((atomic_read(&adapter->rx_pool[scrq_num].available) < 2925 adapter->req_rx_add_entries_per_subcrq / 2) || 2926 frames_processed < budget)) 2927 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 2928 if (frames_processed < budget) { 2929 if (napi_complete_done(napi, frames_processed)) { 2930 enable_scrq_irq(adapter, rx_scrq); 2931 if (pending_scrq(adapter, rx_scrq)) { 2932 if (napi_reschedule(napi)) { 2933 disable_scrq_irq(adapter, rx_scrq); 2934 goto restart_poll; 2935 } 2936 } 2937 } 2938 } 2939 return frames_processed; 2940 } 2941 2942 static int wait_for_reset(struct ibmvnic_adapter *adapter) 2943 { 2944 int rc, ret; 2945 2946 adapter->fallback.mtu = adapter->req_mtu; 2947 adapter->fallback.rx_queues = adapter->req_rx_queues; 2948 adapter->fallback.tx_queues = adapter->req_tx_queues; 2949 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 2950 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 2951 2952 reinit_completion(&adapter->reset_done); 2953 adapter->wait_for_reset = true; 2954 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2955 2956 if (rc) { 2957 ret = rc; 2958 goto out; 2959 } 2960 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); 2961 if (rc) { 2962 ret = -ENODEV; 2963 goto out; 2964 } 2965 2966 ret = 0; 2967 if (adapter->reset_done_rc) { 2968 ret = -EIO; 2969 adapter->desired.mtu = adapter->fallback.mtu; 2970 adapter->desired.rx_queues = adapter->fallback.rx_queues; 2971 adapter->desired.tx_queues = adapter->fallback.tx_queues; 2972 adapter->desired.rx_entries = adapter->fallback.rx_entries; 2973 adapter->desired.tx_entries = adapter->fallback.tx_entries; 2974 2975 reinit_completion(&adapter->reset_done); 2976 adapter->wait_for_reset = true; 2977 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2978 if (rc) { 2979 ret = rc; 2980 goto out; 2981 } 2982 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 2983 60000); 2984 if (rc) { 2985 ret = -ENODEV; 2986 goto out; 2987 } 2988 } 2989 out: 2990 adapter->wait_for_reset = false; 2991 2992 return ret; 2993 } 2994 2995 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 2996 { 2997 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2998 2999 adapter->desired.mtu = new_mtu + ETH_HLEN; 3000 3001 return wait_for_reset(adapter); 3002 } 3003 3004 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 3005 struct net_device *dev, 3006 netdev_features_t features) 3007 { 3008 /* Some backing hardware adapters can not 3009 * handle packets with a MSS less than 224 3010 * or with only one segment. 3011 */ 3012 if (skb_is_gso(skb)) { 3013 if (skb_shinfo(skb)->gso_size < 224 || 3014 skb_shinfo(skb)->gso_segs == 1) 3015 features &= ~NETIF_F_GSO_MASK; 3016 } 3017 3018 return features; 3019 } 3020 3021 static const struct net_device_ops ibmvnic_netdev_ops = { 3022 .ndo_open = ibmvnic_open, 3023 .ndo_stop = ibmvnic_close, 3024 .ndo_start_xmit = ibmvnic_xmit, 3025 .ndo_set_rx_mode = ibmvnic_set_multi, 3026 .ndo_set_mac_address = ibmvnic_set_mac, 3027 .ndo_validate_addr = eth_validate_addr, 3028 .ndo_tx_timeout = ibmvnic_tx_timeout, 3029 .ndo_change_mtu = ibmvnic_change_mtu, 3030 .ndo_features_check = ibmvnic_features_check, 3031 }; 3032 3033 /* ethtool functions */ 3034 3035 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 3036 struct ethtool_link_ksettings *cmd) 3037 { 3038 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3039 int rc; 3040 3041 rc = send_query_phys_parms(adapter); 3042 if (rc) { 3043 adapter->speed = SPEED_UNKNOWN; 3044 adapter->duplex = DUPLEX_UNKNOWN; 3045 } 3046 cmd->base.speed = adapter->speed; 3047 cmd->base.duplex = adapter->duplex; 3048 cmd->base.port = PORT_FIBRE; 3049 cmd->base.phy_address = 0; 3050 cmd->base.autoneg = AUTONEG_ENABLE; 3051 3052 return 0; 3053 } 3054 3055 static void ibmvnic_get_drvinfo(struct net_device *netdev, 3056 struct ethtool_drvinfo *info) 3057 { 3058 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3059 3060 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 3061 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 3062 strscpy(info->fw_version, adapter->fw_version, 3063 sizeof(info->fw_version)); 3064 } 3065 3066 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 3067 { 3068 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3069 3070 return adapter->msg_enable; 3071 } 3072 3073 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 3074 { 3075 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3076 3077 adapter->msg_enable = data; 3078 } 3079 3080 static u32 ibmvnic_get_link(struct net_device *netdev) 3081 { 3082 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3083 3084 /* Don't need to send a query because we request a logical link up at 3085 * init and then we wait for link state indications 3086 */ 3087 return adapter->logical_link_state; 3088 } 3089 3090 static void ibmvnic_get_ringparam(struct net_device *netdev, 3091 struct ethtool_ringparam *ring, 3092 struct kernel_ethtool_ringparam *kernel_ring, 3093 struct netlink_ext_ack *extack) 3094 { 3095 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3096 3097 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { 3098 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 3099 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 3100 } else { 3101 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ; 3102 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ; 3103 } 3104 ring->rx_mini_max_pending = 0; 3105 ring->rx_jumbo_max_pending = 0; 3106 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 3107 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 3108 ring->rx_mini_pending = 0; 3109 ring->rx_jumbo_pending = 0; 3110 } 3111 3112 static int ibmvnic_set_ringparam(struct net_device *netdev, 3113 struct ethtool_ringparam *ring, 3114 struct kernel_ethtool_ringparam *kernel_ring, 3115 struct netlink_ext_ack *extack) 3116 { 3117 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3118 int ret; 3119 3120 ret = 0; 3121 adapter->desired.rx_entries = ring->rx_pending; 3122 adapter->desired.tx_entries = ring->tx_pending; 3123 3124 ret = wait_for_reset(adapter); 3125 3126 if (!ret && 3127 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || 3128 adapter->req_tx_entries_per_subcrq != ring->tx_pending)) 3129 netdev_info(netdev, 3130 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", 3131 ring->rx_pending, ring->tx_pending, 3132 adapter->req_rx_add_entries_per_subcrq, 3133 adapter->req_tx_entries_per_subcrq); 3134 return ret; 3135 } 3136 3137 static void ibmvnic_get_channels(struct net_device *netdev, 3138 struct ethtool_channels *channels) 3139 { 3140 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3141 3142 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { 3143 channels->max_rx = adapter->max_rx_queues; 3144 channels->max_tx = adapter->max_tx_queues; 3145 } else { 3146 channels->max_rx = IBMVNIC_MAX_QUEUES; 3147 channels->max_tx = IBMVNIC_MAX_QUEUES; 3148 } 3149 3150 channels->max_other = 0; 3151 channels->max_combined = 0; 3152 channels->rx_count = adapter->req_rx_queues; 3153 channels->tx_count = adapter->req_tx_queues; 3154 channels->other_count = 0; 3155 channels->combined_count = 0; 3156 } 3157 3158 static int ibmvnic_set_channels(struct net_device *netdev, 3159 struct ethtool_channels *channels) 3160 { 3161 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3162 int ret; 3163 3164 ret = 0; 3165 adapter->desired.rx_queues = channels->rx_count; 3166 adapter->desired.tx_queues = channels->tx_count; 3167 3168 ret = wait_for_reset(adapter); 3169 3170 if (!ret && 3171 (adapter->req_rx_queues != channels->rx_count || 3172 adapter->req_tx_queues != channels->tx_count)) 3173 netdev_info(netdev, 3174 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", 3175 channels->rx_count, channels->tx_count, 3176 adapter->req_rx_queues, adapter->req_tx_queues); 3177 return ret; 3178 } 3179 3180 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 3181 { 3182 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3183 int i; 3184 3185 switch (stringset) { 3186 case ETH_SS_STATS: 3187 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); 3188 i++, data += ETH_GSTRING_LEN) 3189 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 3190 3191 for (i = 0; i < adapter->req_tx_queues; i++) { 3192 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 3193 data += ETH_GSTRING_LEN; 3194 3195 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 3196 data += ETH_GSTRING_LEN; 3197 3198 snprintf(data, ETH_GSTRING_LEN, 3199 "tx%d_dropped_packets", i); 3200 data += ETH_GSTRING_LEN; 3201 } 3202 3203 for (i = 0; i < adapter->req_rx_queues; i++) { 3204 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 3205 data += ETH_GSTRING_LEN; 3206 3207 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 3208 data += ETH_GSTRING_LEN; 3209 3210 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 3211 data += ETH_GSTRING_LEN; 3212 } 3213 break; 3214 3215 case ETH_SS_PRIV_FLAGS: 3216 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++) 3217 strcpy(data + i * ETH_GSTRING_LEN, 3218 ibmvnic_priv_flags[i]); 3219 break; 3220 default: 3221 return; 3222 } 3223 } 3224 3225 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 3226 { 3227 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3228 3229 switch (sset) { 3230 case ETH_SS_STATS: 3231 return ARRAY_SIZE(ibmvnic_stats) + 3232 adapter->req_tx_queues * NUM_TX_STATS + 3233 adapter->req_rx_queues * NUM_RX_STATS; 3234 case ETH_SS_PRIV_FLAGS: 3235 return ARRAY_SIZE(ibmvnic_priv_flags); 3236 default: 3237 return -EOPNOTSUPP; 3238 } 3239 } 3240 3241 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 3242 struct ethtool_stats *stats, u64 *data) 3243 { 3244 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3245 union ibmvnic_crq crq; 3246 int i, j; 3247 int rc; 3248 3249 memset(&crq, 0, sizeof(crq)); 3250 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 3251 crq.request_statistics.cmd = REQUEST_STATISTICS; 3252 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 3253 crq.request_statistics.len = 3254 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 3255 3256 /* Wait for data to be written */ 3257 reinit_completion(&adapter->stats_done); 3258 rc = ibmvnic_send_crq(adapter, &crq); 3259 if (rc) 3260 return; 3261 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); 3262 if (rc) 3263 return; 3264 3265 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 3266 data[i] = be64_to_cpu(IBMVNIC_GET_STAT 3267 (adapter, ibmvnic_stats[i].offset)); 3268 3269 for (j = 0; j < adapter->req_tx_queues; j++) { 3270 data[i] = adapter->tx_stats_buffers[j].packets; 3271 i++; 3272 data[i] = adapter->tx_stats_buffers[j].bytes; 3273 i++; 3274 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 3275 i++; 3276 } 3277 3278 for (j = 0; j < adapter->req_rx_queues; j++) { 3279 data[i] = adapter->rx_stats_buffers[j].packets; 3280 i++; 3281 data[i] = adapter->rx_stats_buffers[j].bytes; 3282 i++; 3283 data[i] = adapter->rx_stats_buffers[j].interrupts; 3284 i++; 3285 } 3286 } 3287 3288 static u32 ibmvnic_get_priv_flags(struct net_device *netdev) 3289 { 3290 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3291 3292 return adapter->priv_flags; 3293 } 3294 3295 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) 3296 { 3297 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3298 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES); 3299 3300 if (which_maxes) 3301 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; 3302 else 3303 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; 3304 3305 return 0; 3306 } 3307 3308 static const struct ethtool_ops ibmvnic_ethtool_ops = { 3309 .get_drvinfo = ibmvnic_get_drvinfo, 3310 .get_msglevel = ibmvnic_get_msglevel, 3311 .set_msglevel = ibmvnic_set_msglevel, 3312 .get_link = ibmvnic_get_link, 3313 .get_ringparam = ibmvnic_get_ringparam, 3314 .set_ringparam = ibmvnic_set_ringparam, 3315 .get_channels = ibmvnic_get_channels, 3316 .set_channels = ibmvnic_set_channels, 3317 .get_strings = ibmvnic_get_strings, 3318 .get_sset_count = ibmvnic_get_sset_count, 3319 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 3320 .get_link_ksettings = ibmvnic_get_link_ksettings, 3321 .get_priv_flags = ibmvnic_get_priv_flags, 3322 .set_priv_flags = ibmvnic_set_priv_flags, 3323 }; 3324 3325 /* Routines for managing CRQs/sCRQs */ 3326 3327 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 3328 struct ibmvnic_sub_crq_queue *scrq) 3329 { 3330 int rc; 3331 3332 if (!scrq) { 3333 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); 3334 return -EINVAL; 3335 } 3336 3337 if (scrq->irq) { 3338 free_irq(scrq->irq, scrq); 3339 irq_dispose_mapping(scrq->irq); 3340 scrq->irq = 0; 3341 } 3342 3343 if (scrq->msgs) { 3344 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 3345 atomic_set(&scrq->used, 0); 3346 scrq->cur = 0; 3347 scrq->ind_buf.index = 0; 3348 } else { 3349 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); 3350 return -EINVAL; 3351 } 3352 3353 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3354 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3355 return rc; 3356 } 3357 3358 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 3359 { 3360 int i, rc; 3361 3362 if (!adapter->tx_scrq || !adapter->rx_scrq) 3363 return -EINVAL; 3364 3365 for (i = 0; i < adapter->req_tx_queues; i++) { 3366 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 3367 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 3368 if (rc) 3369 return rc; 3370 } 3371 3372 for (i = 0; i < adapter->req_rx_queues; i++) { 3373 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 3374 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 3375 if (rc) 3376 return rc; 3377 } 3378 3379 return rc; 3380 } 3381 3382 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 3383 struct ibmvnic_sub_crq_queue *scrq, 3384 bool do_h_free) 3385 { 3386 struct device *dev = &adapter->vdev->dev; 3387 long rc; 3388 3389 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 3390 3391 if (do_h_free) { 3392 /* Close the sub-crqs */ 3393 do { 3394 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3395 adapter->vdev->unit_address, 3396 scrq->crq_num); 3397 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 3398 3399 if (rc) { 3400 netdev_err(adapter->netdev, 3401 "Failed to release sub-CRQ %16lx, rc = %ld\n", 3402 scrq->crq_num, rc); 3403 } 3404 } 3405 3406 dma_free_coherent(dev, 3407 IBMVNIC_IND_ARR_SZ, 3408 scrq->ind_buf.indir_arr, 3409 scrq->ind_buf.indir_dma); 3410 3411 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3412 DMA_BIDIRECTIONAL); 3413 free_pages((unsigned long)scrq->msgs, 2); 3414 kfree(scrq); 3415 } 3416 3417 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 3418 *adapter) 3419 { 3420 struct device *dev = &adapter->vdev->dev; 3421 struct ibmvnic_sub_crq_queue *scrq; 3422 int rc; 3423 3424 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 3425 if (!scrq) 3426 return NULL; 3427 3428 scrq->msgs = 3429 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 3430 if (!scrq->msgs) { 3431 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 3432 goto zero_page_failed; 3433 } 3434 3435 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 3436 DMA_BIDIRECTIONAL); 3437 if (dma_mapping_error(dev, scrq->msg_token)) { 3438 dev_warn(dev, "Couldn't map crq queue messages page\n"); 3439 goto map_failed; 3440 } 3441 3442 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3443 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3444 3445 if (rc == H_RESOURCE) 3446 rc = ibmvnic_reset_crq(adapter); 3447 3448 if (rc == H_CLOSED) { 3449 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 3450 } else if (rc) { 3451 dev_warn(dev, "Error %d registering sub-crq\n", rc); 3452 goto reg_failed; 3453 } 3454 3455 scrq->adapter = adapter; 3456 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 3457 scrq->ind_buf.index = 0; 3458 3459 scrq->ind_buf.indir_arr = 3460 dma_alloc_coherent(dev, 3461 IBMVNIC_IND_ARR_SZ, 3462 &scrq->ind_buf.indir_dma, 3463 GFP_KERNEL); 3464 3465 if (!scrq->ind_buf.indir_arr) 3466 goto indir_failed; 3467 3468 spin_lock_init(&scrq->lock); 3469 3470 netdev_dbg(adapter->netdev, 3471 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 3472 scrq->crq_num, scrq->hw_irq, scrq->irq); 3473 3474 return scrq; 3475 3476 indir_failed: 3477 do { 3478 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3479 adapter->vdev->unit_address, 3480 scrq->crq_num); 3481 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); 3482 reg_failed: 3483 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3484 DMA_BIDIRECTIONAL); 3485 map_failed: 3486 free_pages((unsigned long)scrq->msgs, 2); 3487 zero_page_failed: 3488 kfree(scrq); 3489 3490 return NULL; 3491 } 3492 3493 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 3494 { 3495 int i; 3496 3497 if (adapter->tx_scrq) { 3498 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 3499 if (!adapter->tx_scrq[i]) 3500 continue; 3501 3502 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 3503 i); 3504 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); 3505 if (adapter->tx_scrq[i]->irq) { 3506 free_irq(adapter->tx_scrq[i]->irq, 3507 adapter->tx_scrq[i]); 3508 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 3509 adapter->tx_scrq[i]->irq = 0; 3510 } 3511 3512 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 3513 do_h_free); 3514 } 3515 3516 kfree(adapter->tx_scrq); 3517 adapter->tx_scrq = NULL; 3518 adapter->num_active_tx_scrqs = 0; 3519 } 3520 3521 if (adapter->rx_scrq) { 3522 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 3523 if (!adapter->rx_scrq[i]) 3524 continue; 3525 3526 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 3527 i); 3528 if (adapter->rx_scrq[i]->irq) { 3529 free_irq(adapter->rx_scrq[i]->irq, 3530 adapter->rx_scrq[i]); 3531 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 3532 adapter->rx_scrq[i]->irq = 0; 3533 } 3534 3535 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 3536 do_h_free); 3537 } 3538 3539 kfree(adapter->rx_scrq); 3540 adapter->rx_scrq = NULL; 3541 adapter->num_active_rx_scrqs = 0; 3542 } 3543 } 3544 3545 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 3546 struct ibmvnic_sub_crq_queue *scrq) 3547 { 3548 struct device *dev = &adapter->vdev->dev; 3549 unsigned long rc; 3550 3551 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3552 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 3553 if (rc) 3554 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 3555 scrq->hw_irq, rc); 3556 return rc; 3557 } 3558 3559 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 3560 struct ibmvnic_sub_crq_queue *scrq) 3561 { 3562 struct device *dev = &adapter->vdev->dev; 3563 unsigned long rc; 3564 3565 if (scrq->hw_irq > 0x100000000ULL) { 3566 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 3567 return 1; 3568 } 3569 3570 if (test_bit(0, &adapter->resetting) && 3571 adapter->reset_reason == VNIC_RESET_MOBILITY) { 3572 u64 val = (0xff000000) | scrq->hw_irq; 3573 3574 rc = plpar_hcall_norets(H_EOI, val); 3575 /* H_EOI would fail with rc = H_FUNCTION when running 3576 * in XIVE mode which is expected, but not an error. 3577 */ 3578 if (rc && (rc != H_FUNCTION)) 3579 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 3580 val, rc); 3581 } 3582 3583 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3584 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 3585 if (rc) 3586 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 3587 scrq->hw_irq, rc); 3588 return rc; 3589 } 3590 3591 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 3592 struct ibmvnic_sub_crq_queue *scrq) 3593 { 3594 struct device *dev = &adapter->vdev->dev; 3595 struct ibmvnic_tx_pool *tx_pool; 3596 struct ibmvnic_tx_buff *txbuff; 3597 struct netdev_queue *txq; 3598 union sub_crq *next; 3599 int index; 3600 int i; 3601 3602 restart_loop: 3603 while (pending_scrq(adapter, scrq)) { 3604 unsigned int pool = scrq->pool_index; 3605 int num_entries = 0; 3606 int total_bytes = 0; 3607 int num_packets = 0; 3608 3609 next = ibmvnic_next_scrq(adapter, scrq); 3610 for (i = 0; i < next->tx_comp.num_comps; i++) { 3611 index = be32_to_cpu(next->tx_comp.correlators[i]); 3612 if (index & IBMVNIC_TSO_POOL_MASK) { 3613 tx_pool = &adapter->tso_pool[pool]; 3614 index &= ~IBMVNIC_TSO_POOL_MASK; 3615 } else { 3616 tx_pool = &adapter->tx_pool[pool]; 3617 } 3618 3619 txbuff = &tx_pool->tx_buff[index]; 3620 num_packets++; 3621 num_entries += txbuff->num_entries; 3622 if (txbuff->skb) { 3623 total_bytes += txbuff->skb->len; 3624 if (next->tx_comp.rcs[i]) { 3625 dev_err(dev, "tx error %x\n", 3626 next->tx_comp.rcs[i]); 3627 dev_kfree_skb_irq(txbuff->skb); 3628 } else { 3629 dev_consume_skb_irq(txbuff->skb); 3630 } 3631 txbuff->skb = NULL; 3632 } else { 3633 netdev_warn(adapter->netdev, 3634 "TX completion received with NULL socket buffer\n"); 3635 } 3636 tx_pool->free_map[tx_pool->producer_index] = index; 3637 tx_pool->producer_index = 3638 (tx_pool->producer_index + 1) % 3639 tx_pool->num_buffers; 3640 } 3641 /* remove tx_comp scrq*/ 3642 next->tx_comp.first = 0; 3643 3644 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); 3645 netdev_tx_completed_queue(txq, num_packets, total_bytes); 3646 3647 if (atomic_sub_return(num_entries, &scrq->used) <= 3648 (adapter->req_tx_entries_per_subcrq / 2) && 3649 __netif_subqueue_stopped(adapter->netdev, 3650 scrq->pool_index)) { 3651 netif_wake_subqueue(adapter->netdev, scrq->pool_index); 3652 netdev_dbg(adapter->netdev, "Started queue %d\n", 3653 scrq->pool_index); 3654 } 3655 } 3656 3657 enable_scrq_irq(adapter, scrq); 3658 3659 if (pending_scrq(adapter, scrq)) { 3660 disable_scrq_irq(adapter, scrq); 3661 goto restart_loop; 3662 } 3663 3664 return 0; 3665 } 3666 3667 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 3668 { 3669 struct ibmvnic_sub_crq_queue *scrq = instance; 3670 struct ibmvnic_adapter *adapter = scrq->adapter; 3671 3672 disable_scrq_irq(adapter, scrq); 3673 ibmvnic_complete_tx(adapter, scrq); 3674 3675 return IRQ_HANDLED; 3676 } 3677 3678 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 3679 { 3680 struct ibmvnic_sub_crq_queue *scrq = instance; 3681 struct ibmvnic_adapter *adapter = scrq->adapter; 3682 3683 /* When booting a kdump kernel we can hit pending interrupts 3684 * prior to completing driver initialization. 3685 */ 3686 if (unlikely(adapter->state != VNIC_OPEN)) 3687 return IRQ_NONE; 3688 3689 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 3690 3691 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 3692 disable_scrq_irq(adapter, scrq); 3693 __napi_schedule(&adapter->napi[scrq->scrq_num]); 3694 } 3695 3696 return IRQ_HANDLED; 3697 } 3698 3699 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 3700 { 3701 struct device *dev = &adapter->vdev->dev; 3702 struct ibmvnic_sub_crq_queue *scrq; 3703 int i = 0, j = 0; 3704 int rc = 0; 3705 3706 for (i = 0; i < adapter->req_tx_queues; i++) { 3707 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 3708 i); 3709 scrq = adapter->tx_scrq[i]; 3710 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 3711 3712 if (!scrq->irq) { 3713 rc = -EINVAL; 3714 dev_err(dev, "Error mapping irq\n"); 3715 goto req_tx_irq_failed; 3716 } 3717 3718 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", 3719 adapter->vdev->unit_address, i); 3720 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 3721 0, scrq->name, scrq); 3722 3723 if (rc) { 3724 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 3725 scrq->irq, rc); 3726 irq_dispose_mapping(scrq->irq); 3727 goto req_tx_irq_failed; 3728 } 3729 } 3730 3731 for (i = 0; i < adapter->req_rx_queues; i++) { 3732 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 3733 i); 3734 scrq = adapter->rx_scrq[i]; 3735 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 3736 if (!scrq->irq) { 3737 rc = -EINVAL; 3738 dev_err(dev, "Error mapping irq\n"); 3739 goto req_rx_irq_failed; 3740 } 3741 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", 3742 adapter->vdev->unit_address, i); 3743 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 3744 0, scrq->name, scrq); 3745 if (rc) { 3746 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 3747 scrq->irq, rc); 3748 irq_dispose_mapping(scrq->irq); 3749 goto req_rx_irq_failed; 3750 } 3751 } 3752 return rc; 3753 3754 req_rx_irq_failed: 3755 for (j = 0; j < i; j++) { 3756 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 3757 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 3758 } 3759 i = adapter->req_tx_queues; 3760 req_tx_irq_failed: 3761 for (j = 0; j < i; j++) { 3762 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 3763 irq_dispose_mapping(adapter->tx_scrq[j]->irq); 3764 } 3765 release_sub_crqs(adapter, 1); 3766 return rc; 3767 } 3768 3769 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 3770 { 3771 struct device *dev = &adapter->vdev->dev; 3772 struct ibmvnic_sub_crq_queue **allqueues; 3773 int registered_queues = 0; 3774 int total_queues; 3775 int more = 0; 3776 int i; 3777 3778 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 3779 3780 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 3781 if (!allqueues) 3782 return -1; 3783 3784 for (i = 0; i < total_queues; i++) { 3785 allqueues[i] = init_sub_crq_queue(adapter); 3786 if (!allqueues[i]) { 3787 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 3788 break; 3789 } 3790 registered_queues++; 3791 } 3792 3793 /* Make sure we were able to register the minimum number of queues */ 3794 if (registered_queues < 3795 adapter->min_tx_queues + adapter->min_rx_queues) { 3796 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 3797 goto tx_failed; 3798 } 3799 3800 /* Distribute the failed allocated queues*/ 3801 for (i = 0; i < total_queues - registered_queues + more ; i++) { 3802 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 3803 switch (i % 3) { 3804 case 0: 3805 if (adapter->req_rx_queues > adapter->min_rx_queues) 3806 adapter->req_rx_queues--; 3807 else 3808 more++; 3809 break; 3810 case 1: 3811 if (adapter->req_tx_queues > adapter->min_tx_queues) 3812 adapter->req_tx_queues--; 3813 else 3814 more++; 3815 break; 3816 } 3817 } 3818 3819 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 3820 sizeof(*adapter->tx_scrq), GFP_KERNEL); 3821 if (!adapter->tx_scrq) 3822 goto tx_failed; 3823 3824 for (i = 0; i < adapter->req_tx_queues; i++) { 3825 adapter->tx_scrq[i] = allqueues[i]; 3826 adapter->tx_scrq[i]->pool_index = i; 3827 adapter->num_active_tx_scrqs++; 3828 } 3829 3830 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 3831 sizeof(*adapter->rx_scrq), GFP_KERNEL); 3832 if (!adapter->rx_scrq) 3833 goto rx_failed; 3834 3835 for (i = 0; i < adapter->req_rx_queues; i++) { 3836 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 3837 adapter->rx_scrq[i]->scrq_num = i; 3838 adapter->num_active_rx_scrqs++; 3839 } 3840 3841 kfree(allqueues); 3842 return 0; 3843 3844 rx_failed: 3845 kfree(adapter->tx_scrq); 3846 adapter->tx_scrq = NULL; 3847 tx_failed: 3848 for (i = 0; i < registered_queues; i++) 3849 release_sub_crq_queue(adapter, allqueues[i], 1); 3850 kfree(allqueues); 3851 return -1; 3852 } 3853 3854 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) 3855 { 3856 struct device *dev = &adapter->vdev->dev; 3857 union ibmvnic_crq crq; 3858 int max_entries; 3859 3860 if (!retry) { 3861 /* Sub-CRQ entries are 32 byte long */ 3862 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 3863 3864 if (adapter->min_tx_entries_per_subcrq > entries_page || 3865 adapter->min_rx_add_entries_per_subcrq > entries_page) { 3866 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 3867 return; 3868 } 3869 3870 if (adapter->desired.mtu) 3871 adapter->req_mtu = adapter->desired.mtu; 3872 else 3873 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 3874 3875 if (!adapter->desired.tx_entries) 3876 adapter->desired.tx_entries = 3877 adapter->max_tx_entries_per_subcrq; 3878 if (!adapter->desired.rx_entries) 3879 adapter->desired.rx_entries = 3880 adapter->max_rx_add_entries_per_subcrq; 3881 3882 max_entries = IBMVNIC_MAX_LTB_SIZE / 3883 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 3884 3885 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 3886 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { 3887 adapter->desired.tx_entries = max_entries; 3888 } 3889 3890 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 3891 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { 3892 adapter->desired.rx_entries = max_entries; 3893 } 3894 3895 if (adapter->desired.tx_entries) 3896 adapter->req_tx_entries_per_subcrq = 3897 adapter->desired.tx_entries; 3898 else 3899 adapter->req_tx_entries_per_subcrq = 3900 adapter->max_tx_entries_per_subcrq; 3901 3902 if (adapter->desired.rx_entries) 3903 adapter->req_rx_add_entries_per_subcrq = 3904 adapter->desired.rx_entries; 3905 else 3906 adapter->req_rx_add_entries_per_subcrq = 3907 adapter->max_rx_add_entries_per_subcrq; 3908 3909 if (adapter->desired.tx_queues) 3910 adapter->req_tx_queues = 3911 adapter->desired.tx_queues; 3912 else 3913 adapter->req_tx_queues = 3914 adapter->opt_tx_comp_sub_queues; 3915 3916 if (adapter->desired.rx_queues) 3917 adapter->req_rx_queues = 3918 adapter->desired.rx_queues; 3919 else 3920 adapter->req_rx_queues = 3921 adapter->opt_rx_comp_queues; 3922 3923 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 3924 } 3925 3926 memset(&crq, 0, sizeof(crq)); 3927 crq.request_capability.first = IBMVNIC_CRQ_CMD; 3928 crq.request_capability.cmd = REQUEST_CAPABILITY; 3929 3930 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 3931 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 3932 atomic_inc(&adapter->running_cap_crqs); 3933 ibmvnic_send_crq(adapter, &crq); 3934 3935 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 3936 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 3937 atomic_inc(&adapter->running_cap_crqs); 3938 ibmvnic_send_crq(adapter, &crq); 3939 3940 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 3941 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 3942 atomic_inc(&adapter->running_cap_crqs); 3943 ibmvnic_send_crq(adapter, &crq); 3944 3945 crq.request_capability.capability = 3946 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 3947 crq.request_capability.number = 3948 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 3949 atomic_inc(&adapter->running_cap_crqs); 3950 ibmvnic_send_crq(adapter, &crq); 3951 3952 crq.request_capability.capability = 3953 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 3954 crq.request_capability.number = 3955 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 3956 atomic_inc(&adapter->running_cap_crqs); 3957 ibmvnic_send_crq(adapter, &crq); 3958 3959 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 3960 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 3961 atomic_inc(&adapter->running_cap_crqs); 3962 ibmvnic_send_crq(adapter, &crq); 3963 3964 if (adapter->netdev->flags & IFF_PROMISC) { 3965 if (adapter->promisc_supported) { 3966 crq.request_capability.capability = 3967 cpu_to_be16(PROMISC_REQUESTED); 3968 crq.request_capability.number = cpu_to_be64(1); 3969 atomic_inc(&adapter->running_cap_crqs); 3970 ibmvnic_send_crq(adapter, &crq); 3971 } 3972 } else { 3973 crq.request_capability.capability = 3974 cpu_to_be16(PROMISC_REQUESTED); 3975 crq.request_capability.number = cpu_to_be64(0); 3976 atomic_inc(&adapter->running_cap_crqs); 3977 ibmvnic_send_crq(adapter, &crq); 3978 } 3979 } 3980 3981 static int pending_scrq(struct ibmvnic_adapter *adapter, 3982 struct ibmvnic_sub_crq_queue *scrq) 3983 { 3984 union sub_crq *entry = &scrq->msgs[scrq->cur]; 3985 int rc; 3986 3987 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); 3988 3989 /* Ensure that the SCRQ valid flag is loaded prior to loading the 3990 * contents of the SCRQ descriptor 3991 */ 3992 dma_rmb(); 3993 3994 return rc; 3995 } 3996 3997 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 3998 struct ibmvnic_sub_crq_queue *scrq) 3999 { 4000 union sub_crq *entry; 4001 unsigned long flags; 4002 4003 spin_lock_irqsave(&scrq->lock, flags); 4004 entry = &scrq->msgs[scrq->cur]; 4005 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4006 if (++scrq->cur == scrq->size) 4007 scrq->cur = 0; 4008 } else { 4009 entry = NULL; 4010 } 4011 spin_unlock_irqrestore(&scrq->lock, flags); 4012 4013 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4014 * contents of the SCRQ descriptor 4015 */ 4016 dma_rmb(); 4017 4018 return entry; 4019 } 4020 4021 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 4022 { 4023 struct ibmvnic_crq_queue *queue = &adapter->crq; 4024 union ibmvnic_crq *crq; 4025 4026 crq = &queue->msgs[queue->cur]; 4027 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4028 if (++queue->cur == queue->size) 4029 queue->cur = 0; 4030 } else { 4031 crq = NULL; 4032 } 4033 4034 return crq; 4035 } 4036 4037 static void print_subcrq_error(struct device *dev, int rc, const char *func) 4038 { 4039 switch (rc) { 4040 case H_PARAMETER: 4041 dev_warn_ratelimited(dev, 4042 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", 4043 func, rc); 4044 break; 4045 case H_CLOSED: 4046 dev_warn_ratelimited(dev, 4047 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", 4048 func, rc); 4049 break; 4050 default: 4051 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); 4052 break; 4053 } 4054 } 4055 4056 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 4057 u64 remote_handle, u64 ioba, u64 num_entries) 4058 { 4059 unsigned int ua = adapter->vdev->unit_address; 4060 struct device *dev = &adapter->vdev->dev; 4061 int rc; 4062 4063 /* Make sure the hypervisor sees the complete request */ 4064 dma_wmb(); 4065 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 4066 cpu_to_be64(remote_handle), 4067 ioba, num_entries); 4068 4069 if (rc) 4070 print_subcrq_error(dev, rc, __func__); 4071 4072 return rc; 4073 } 4074 4075 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 4076 union ibmvnic_crq *crq) 4077 { 4078 unsigned int ua = adapter->vdev->unit_address; 4079 struct device *dev = &adapter->vdev->dev; 4080 u64 *u64_crq = (u64 *)crq; 4081 int rc; 4082 4083 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 4084 (unsigned long)cpu_to_be64(u64_crq[0]), 4085 (unsigned long)cpu_to_be64(u64_crq[1])); 4086 4087 if (!adapter->crq.active && 4088 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 4089 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 4090 return -EINVAL; 4091 } 4092 4093 /* Make sure the hypervisor sees the complete request */ 4094 dma_wmb(); 4095 4096 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 4097 cpu_to_be64(u64_crq[0]), 4098 cpu_to_be64(u64_crq[1])); 4099 4100 if (rc) { 4101 if (rc == H_CLOSED) { 4102 dev_warn(dev, "CRQ Queue closed\n"); 4103 /* do not reset, report the fail, wait for passive init from server */ 4104 } 4105 4106 dev_warn(dev, "Send error (rc=%d)\n", rc); 4107 } 4108 4109 return rc; 4110 } 4111 4112 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 4113 { 4114 struct device *dev = &adapter->vdev->dev; 4115 union ibmvnic_crq crq; 4116 int retries = 100; 4117 int rc; 4118 4119 memset(&crq, 0, sizeof(crq)); 4120 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 4121 crq.generic.cmd = IBMVNIC_CRQ_INIT; 4122 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 4123 4124 do { 4125 rc = ibmvnic_send_crq(adapter, &crq); 4126 if (rc != H_CLOSED) 4127 break; 4128 retries--; 4129 msleep(50); 4130 4131 } while (retries > 0); 4132 4133 if (rc) { 4134 dev_err(dev, "Failed to send init request, rc = %d\n", rc); 4135 return rc; 4136 } 4137 4138 return 0; 4139 } 4140 4141 struct vnic_login_client_data { 4142 u8 type; 4143 __be16 len; 4144 char name[]; 4145 } __packed; 4146 4147 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 4148 { 4149 int len; 4150 4151 /* Calculate the amount of buffer space needed for the 4152 * vnic client data in the login buffer. There are four entries, 4153 * OS name, LPAR name, device name, and a null last entry. 4154 */ 4155 len = 4 * sizeof(struct vnic_login_client_data); 4156 len += 6; /* "Linux" plus NULL */ 4157 len += strlen(utsname()->nodename) + 1; 4158 len += strlen(adapter->netdev->name) + 1; 4159 4160 return len; 4161 } 4162 4163 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 4164 struct vnic_login_client_data *vlcd) 4165 { 4166 const char *os_name = "Linux"; 4167 int len; 4168 4169 /* Type 1 - LPAR OS */ 4170 vlcd->type = 1; 4171 len = strlen(os_name) + 1; 4172 vlcd->len = cpu_to_be16(len); 4173 strscpy(vlcd->name, os_name, len); 4174 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4175 4176 /* Type 2 - LPAR name */ 4177 vlcd->type = 2; 4178 len = strlen(utsname()->nodename) + 1; 4179 vlcd->len = cpu_to_be16(len); 4180 strscpy(vlcd->name, utsname()->nodename, len); 4181 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4182 4183 /* Type 3 - device name */ 4184 vlcd->type = 3; 4185 len = strlen(adapter->netdev->name) + 1; 4186 vlcd->len = cpu_to_be16(len); 4187 strscpy(vlcd->name, adapter->netdev->name, len); 4188 } 4189 4190 static int send_login(struct ibmvnic_adapter *adapter) 4191 { 4192 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 4193 struct ibmvnic_login_buffer *login_buffer; 4194 struct device *dev = &adapter->vdev->dev; 4195 struct vnic_login_client_data *vlcd; 4196 dma_addr_t rsp_buffer_token; 4197 dma_addr_t buffer_token; 4198 size_t rsp_buffer_size; 4199 union ibmvnic_crq crq; 4200 int client_data_len; 4201 size_t buffer_size; 4202 __be64 *tx_list_p; 4203 __be64 *rx_list_p; 4204 int rc; 4205 int i; 4206 4207 if (!adapter->tx_scrq || !adapter->rx_scrq) { 4208 netdev_err(adapter->netdev, 4209 "RX or TX queues are not allocated, device login failed\n"); 4210 return -1; 4211 } 4212 4213 release_login_buffer(adapter); 4214 release_login_rsp_buffer(adapter); 4215 4216 client_data_len = vnic_client_data_len(adapter); 4217 4218 buffer_size = 4219 sizeof(struct ibmvnic_login_buffer) + 4220 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 4221 client_data_len; 4222 4223 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 4224 if (!login_buffer) 4225 goto buf_alloc_failed; 4226 4227 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 4228 DMA_TO_DEVICE); 4229 if (dma_mapping_error(dev, buffer_token)) { 4230 dev_err(dev, "Couldn't map login buffer\n"); 4231 goto buf_map_failed; 4232 } 4233 4234 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 4235 sizeof(u64) * adapter->req_tx_queues + 4236 sizeof(u64) * adapter->req_rx_queues + 4237 sizeof(u64) * adapter->req_rx_queues + 4238 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 4239 4240 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 4241 if (!login_rsp_buffer) 4242 goto buf_rsp_alloc_failed; 4243 4244 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 4245 rsp_buffer_size, DMA_FROM_DEVICE); 4246 if (dma_mapping_error(dev, rsp_buffer_token)) { 4247 dev_err(dev, "Couldn't map login rsp buffer\n"); 4248 goto buf_rsp_map_failed; 4249 } 4250 4251 adapter->login_buf = login_buffer; 4252 adapter->login_buf_token = buffer_token; 4253 adapter->login_buf_sz = buffer_size; 4254 adapter->login_rsp_buf = login_rsp_buffer; 4255 adapter->login_rsp_buf_token = rsp_buffer_token; 4256 adapter->login_rsp_buf_sz = rsp_buffer_size; 4257 4258 login_buffer->len = cpu_to_be32(buffer_size); 4259 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 4260 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 4261 login_buffer->off_txcomp_subcrqs = 4262 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 4263 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 4264 login_buffer->off_rxcomp_subcrqs = 4265 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 4266 sizeof(u64) * adapter->req_tx_queues); 4267 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 4268 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 4269 4270 tx_list_p = (__be64 *)((char *)login_buffer + 4271 sizeof(struct ibmvnic_login_buffer)); 4272 rx_list_p = (__be64 *)((char *)login_buffer + 4273 sizeof(struct ibmvnic_login_buffer) + 4274 sizeof(u64) * adapter->req_tx_queues); 4275 4276 for (i = 0; i < adapter->req_tx_queues; i++) { 4277 if (adapter->tx_scrq[i]) { 4278 tx_list_p[i] = 4279 cpu_to_be64(adapter->tx_scrq[i]->crq_num); 4280 } 4281 } 4282 4283 for (i = 0; i < adapter->req_rx_queues; i++) { 4284 if (adapter->rx_scrq[i]) { 4285 rx_list_p[i] = 4286 cpu_to_be64(adapter->rx_scrq[i]->crq_num); 4287 } 4288 } 4289 4290 /* Insert vNIC login client data */ 4291 vlcd = (struct vnic_login_client_data *) 4292 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 4293 login_buffer->client_data_offset = 4294 cpu_to_be32((char *)vlcd - (char *)login_buffer); 4295 login_buffer->client_data_len = cpu_to_be32(client_data_len); 4296 4297 vnic_add_client_data(adapter, vlcd); 4298 4299 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 4300 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 4301 netdev_dbg(adapter->netdev, "%016lx\n", 4302 ((unsigned long *)(adapter->login_buf))[i]); 4303 } 4304 4305 memset(&crq, 0, sizeof(crq)); 4306 crq.login.first = IBMVNIC_CRQ_CMD; 4307 crq.login.cmd = LOGIN; 4308 crq.login.ioba = cpu_to_be32(buffer_token); 4309 crq.login.len = cpu_to_be32(buffer_size); 4310 4311 adapter->login_pending = true; 4312 rc = ibmvnic_send_crq(adapter, &crq); 4313 if (rc) { 4314 adapter->login_pending = false; 4315 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); 4316 goto buf_rsp_map_failed; 4317 } 4318 4319 return 0; 4320 4321 buf_rsp_map_failed: 4322 kfree(login_rsp_buffer); 4323 adapter->login_rsp_buf = NULL; 4324 buf_rsp_alloc_failed: 4325 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 4326 buf_map_failed: 4327 kfree(login_buffer); 4328 adapter->login_buf = NULL; 4329 buf_alloc_failed: 4330 return -1; 4331 } 4332 4333 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 4334 u32 len, u8 map_id) 4335 { 4336 union ibmvnic_crq crq; 4337 4338 memset(&crq, 0, sizeof(crq)); 4339 crq.request_map.first = IBMVNIC_CRQ_CMD; 4340 crq.request_map.cmd = REQUEST_MAP; 4341 crq.request_map.map_id = map_id; 4342 crq.request_map.ioba = cpu_to_be32(addr); 4343 crq.request_map.len = cpu_to_be32(len); 4344 return ibmvnic_send_crq(adapter, &crq); 4345 } 4346 4347 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 4348 { 4349 union ibmvnic_crq crq; 4350 4351 memset(&crq, 0, sizeof(crq)); 4352 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 4353 crq.request_unmap.cmd = REQUEST_UNMAP; 4354 crq.request_unmap.map_id = map_id; 4355 return ibmvnic_send_crq(adapter, &crq); 4356 } 4357 4358 static void send_query_map(struct ibmvnic_adapter *adapter) 4359 { 4360 union ibmvnic_crq crq; 4361 4362 memset(&crq, 0, sizeof(crq)); 4363 crq.query_map.first = IBMVNIC_CRQ_CMD; 4364 crq.query_map.cmd = QUERY_MAP; 4365 ibmvnic_send_crq(adapter, &crq); 4366 } 4367 4368 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 4369 static void send_query_cap(struct ibmvnic_adapter *adapter) 4370 { 4371 union ibmvnic_crq crq; 4372 4373 atomic_set(&adapter->running_cap_crqs, 0); 4374 memset(&crq, 0, sizeof(crq)); 4375 crq.query_capability.first = IBMVNIC_CRQ_CMD; 4376 crq.query_capability.cmd = QUERY_CAPABILITY; 4377 4378 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 4379 atomic_inc(&adapter->running_cap_crqs); 4380 ibmvnic_send_crq(adapter, &crq); 4381 4382 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 4383 atomic_inc(&adapter->running_cap_crqs); 4384 ibmvnic_send_crq(adapter, &crq); 4385 4386 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 4387 atomic_inc(&adapter->running_cap_crqs); 4388 ibmvnic_send_crq(adapter, &crq); 4389 4390 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 4391 atomic_inc(&adapter->running_cap_crqs); 4392 ibmvnic_send_crq(adapter, &crq); 4393 4394 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 4395 atomic_inc(&adapter->running_cap_crqs); 4396 ibmvnic_send_crq(adapter, &crq); 4397 4398 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 4399 atomic_inc(&adapter->running_cap_crqs); 4400 ibmvnic_send_crq(adapter, &crq); 4401 4402 crq.query_capability.capability = 4403 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 4404 atomic_inc(&adapter->running_cap_crqs); 4405 ibmvnic_send_crq(adapter, &crq); 4406 4407 crq.query_capability.capability = 4408 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 4409 atomic_inc(&adapter->running_cap_crqs); 4410 ibmvnic_send_crq(adapter, &crq); 4411 4412 crq.query_capability.capability = 4413 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 4414 atomic_inc(&adapter->running_cap_crqs); 4415 ibmvnic_send_crq(adapter, &crq); 4416 4417 crq.query_capability.capability = 4418 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 4419 atomic_inc(&adapter->running_cap_crqs); 4420 ibmvnic_send_crq(adapter, &crq); 4421 4422 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 4423 atomic_inc(&adapter->running_cap_crqs); 4424 ibmvnic_send_crq(adapter, &crq); 4425 4426 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 4427 atomic_inc(&adapter->running_cap_crqs); 4428 ibmvnic_send_crq(adapter, &crq); 4429 4430 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 4431 atomic_inc(&adapter->running_cap_crqs); 4432 ibmvnic_send_crq(adapter, &crq); 4433 4434 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 4435 atomic_inc(&adapter->running_cap_crqs); 4436 ibmvnic_send_crq(adapter, &crq); 4437 4438 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 4439 atomic_inc(&adapter->running_cap_crqs); 4440 ibmvnic_send_crq(adapter, &crq); 4441 4442 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 4443 atomic_inc(&adapter->running_cap_crqs); 4444 ibmvnic_send_crq(adapter, &crq); 4445 4446 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 4447 atomic_inc(&adapter->running_cap_crqs); 4448 ibmvnic_send_crq(adapter, &crq); 4449 4450 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 4451 atomic_inc(&adapter->running_cap_crqs); 4452 ibmvnic_send_crq(adapter, &crq); 4453 4454 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 4455 atomic_inc(&adapter->running_cap_crqs); 4456 ibmvnic_send_crq(adapter, &crq); 4457 4458 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 4459 atomic_inc(&adapter->running_cap_crqs); 4460 ibmvnic_send_crq(adapter, &crq); 4461 4462 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 4463 atomic_inc(&adapter->running_cap_crqs); 4464 ibmvnic_send_crq(adapter, &crq); 4465 4466 crq.query_capability.capability = 4467 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 4468 atomic_inc(&adapter->running_cap_crqs); 4469 ibmvnic_send_crq(adapter, &crq); 4470 4471 crq.query_capability.capability = 4472 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 4473 atomic_inc(&adapter->running_cap_crqs); 4474 ibmvnic_send_crq(adapter, &crq); 4475 4476 crq.query_capability.capability = 4477 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 4478 atomic_inc(&adapter->running_cap_crqs); 4479 ibmvnic_send_crq(adapter, &crq); 4480 4481 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 4482 atomic_inc(&adapter->running_cap_crqs); 4483 ibmvnic_send_crq(adapter, &crq); 4484 } 4485 4486 static void send_query_ip_offload(struct ibmvnic_adapter *adapter) 4487 { 4488 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 4489 struct device *dev = &adapter->vdev->dev; 4490 union ibmvnic_crq crq; 4491 4492 adapter->ip_offload_tok = 4493 dma_map_single(dev, 4494 &adapter->ip_offload_buf, 4495 buf_sz, 4496 DMA_FROM_DEVICE); 4497 4498 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 4499 if (!firmware_has_feature(FW_FEATURE_CMO)) 4500 dev_err(dev, "Couldn't map offload buffer\n"); 4501 return; 4502 } 4503 4504 memset(&crq, 0, sizeof(crq)); 4505 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 4506 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 4507 crq.query_ip_offload.len = cpu_to_be32(buf_sz); 4508 crq.query_ip_offload.ioba = 4509 cpu_to_be32(adapter->ip_offload_tok); 4510 4511 ibmvnic_send_crq(adapter, &crq); 4512 } 4513 4514 static void send_control_ip_offload(struct ibmvnic_adapter *adapter) 4515 { 4516 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; 4517 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 4518 struct device *dev = &adapter->vdev->dev; 4519 netdev_features_t old_hw_features = 0; 4520 union ibmvnic_crq crq; 4521 4522 adapter->ip_offload_ctrl_tok = 4523 dma_map_single(dev, 4524 ctrl_buf, 4525 sizeof(adapter->ip_offload_ctrl), 4526 DMA_TO_DEVICE); 4527 4528 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 4529 dev_err(dev, "Couldn't map ip offload control buffer\n"); 4530 return; 4531 } 4532 4533 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 4534 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); 4535 ctrl_buf->ipv4_chksum = buf->ipv4_chksum; 4536 ctrl_buf->ipv6_chksum = buf->ipv6_chksum; 4537 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 4538 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; 4539 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 4540 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; 4541 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; 4542 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; 4543 4544 /* large_rx disabled for now, additional features needed */ 4545 ctrl_buf->large_rx_ipv4 = 0; 4546 ctrl_buf->large_rx_ipv6 = 0; 4547 4548 if (adapter->state != VNIC_PROBING) { 4549 old_hw_features = adapter->netdev->hw_features; 4550 adapter->netdev->hw_features = 0; 4551 } 4552 4553 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 4554 4555 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 4556 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; 4557 4558 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 4559 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; 4560 4561 if ((adapter->netdev->features & 4562 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 4563 adapter->netdev->hw_features |= NETIF_F_RXCSUM; 4564 4565 if (buf->large_tx_ipv4) 4566 adapter->netdev->hw_features |= NETIF_F_TSO; 4567 if (buf->large_tx_ipv6) 4568 adapter->netdev->hw_features |= NETIF_F_TSO6; 4569 4570 if (adapter->state == VNIC_PROBING) { 4571 adapter->netdev->features |= adapter->netdev->hw_features; 4572 } else if (old_hw_features != adapter->netdev->hw_features) { 4573 netdev_features_t tmp = 0; 4574 4575 /* disable features no longer supported */ 4576 adapter->netdev->features &= adapter->netdev->hw_features; 4577 /* turn on features now supported if previously enabled */ 4578 tmp = (old_hw_features ^ adapter->netdev->hw_features) & 4579 adapter->netdev->hw_features; 4580 adapter->netdev->features |= 4581 tmp & adapter->netdev->wanted_features; 4582 } 4583 4584 memset(&crq, 0, sizeof(crq)); 4585 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 4586 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 4587 crq.control_ip_offload.len = 4588 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 4589 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 4590 ibmvnic_send_crq(adapter, &crq); 4591 } 4592 4593 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 4594 struct ibmvnic_adapter *adapter) 4595 { 4596 struct device *dev = &adapter->vdev->dev; 4597 4598 if (crq->get_vpd_size_rsp.rc.code) { 4599 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 4600 crq->get_vpd_size_rsp.rc.code); 4601 complete(&adapter->fw_done); 4602 return; 4603 } 4604 4605 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 4606 complete(&adapter->fw_done); 4607 } 4608 4609 static void handle_vpd_rsp(union ibmvnic_crq *crq, 4610 struct ibmvnic_adapter *adapter) 4611 { 4612 struct device *dev = &adapter->vdev->dev; 4613 unsigned char *substr = NULL; 4614 u8 fw_level_len = 0; 4615 4616 memset(adapter->fw_version, 0, 32); 4617 4618 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 4619 DMA_FROM_DEVICE); 4620 4621 if (crq->get_vpd_rsp.rc.code) { 4622 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 4623 crq->get_vpd_rsp.rc.code); 4624 goto complete; 4625 } 4626 4627 /* get the position of the firmware version info 4628 * located after the ASCII 'RM' substring in the buffer 4629 */ 4630 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 4631 if (!substr) { 4632 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 4633 goto complete; 4634 } 4635 4636 /* get length of firmware level ASCII substring */ 4637 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 4638 fw_level_len = *(substr + 2); 4639 } else { 4640 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 4641 goto complete; 4642 } 4643 4644 /* copy firmware version string from vpd into adapter */ 4645 if ((substr + 3 + fw_level_len) < 4646 (adapter->vpd->buff + adapter->vpd->len)) { 4647 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 4648 } else { 4649 dev_info(dev, "FW substr extrapolated VPD buff\n"); 4650 } 4651 4652 complete: 4653 if (adapter->fw_version[0] == '\0') 4654 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); 4655 complete(&adapter->fw_done); 4656 } 4657 4658 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 4659 { 4660 struct device *dev = &adapter->vdev->dev; 4661 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 4662 int i; 4663 4664 dma_unmap_single(dev, adapter->ip_offload_tok, 4665 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 4666 4667 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 4668 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 4669 netdev_dbg(adapter->netdev, "%016lx\n", 4670 ((unsigned long *)(buf))[i]); 4671 4672 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 4673 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 4674 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 4675 buf->tcp_ipv4_chksum); 4676 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 4677 buf->tcp_ipv6_chksum); 4678 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 4679 buf->udp_ipv4_chksum); 4680 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 4681 buf->udp_ipv6_chksum); 4682 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 4683 buf->large_tx_ipv4); 4684 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 4685 buf->large_tx_ipv6); 4686 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 4687 buf->large_rx_ipv4); 4688 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 4689 buf->large_rx_ipv6); 4690 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 4691 buf->max_ipv4_header_size); 4692 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 4693 buf->max_ipv6_header_size); 4694 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 4695 buf->max_tcp_header_size); 4696 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 4697 buf->max_udp_header_size); 4698 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 4699 buf->max_large_tx_size); 4700 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 4701 buf->max_large_rx_size); 4702 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 4703 buf->ipv6_extension_header); 4704 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 4705 buf->tcp_pseudosum_req); 4706 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 4707 buf->num_ipv6_ext_headers); 4708 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 4709 buf->off_ipv6_ext_headers); 4710 4711 send_control_ip_offload(adapter); 4712 } 4713 4714 static const char *ibmvnic_fw_err_cause(u16 cause) 4715 { 4716 switch (cause) { 4717 case ADAPTER_PROBLEM: 4718 return "adapter problem"; 4719 case BUS_PROBLEM: 4720 return "bus problem"; 4721 case FW_PROBLEM: 4722 return "firmware problem"; 4723 case DD_PROBLEM: 4724 return "device driver problem"; 4725 case EEH_RECOVERY: 4726 return "EEH recovery"; 4727 case FW_UPDATED: 4728 return "firmware updated"; 4729 case LOW_MEMORY: 4730 return "low Memory"; 4731 default: 4732 return "unknown"; 4733 } 4734 } 4735 4736 static void handle_error_indication(union ibmvnic_crq *crq, 4737 struct ibmvnic_adapter *adapter) 4738 { 4739 struct device *dev = &adapter->vdev->dev; 4740 u16 cause; 4741 4742 cause = be16_to_cpu(crq->error_indication.error_cause); 4743 4744 dev_warn_ratelimited(dev, 4745 "Firmware reports %serror, cause: %s. Starting recovery...\n", 4746 crq->error_indication.flags 4747 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 4748 ibmvnic_fw_err_cause(cause)); 4749 4750 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 4751 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4752 else 4753 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 4754 } 4755 4756 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 4757 struct ibmvnic_adapter *adapter) 4758 { 4759 struct net_device *netdev = adapter->netdev; 4760 struct device *dev = &adapter->vdev->dev; 4761 long rc; 4762 4763 rc = crq->change_mac_addr_rsp.rc.code; 4764 if (rc) { 4765 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 4766 goto out; 4767 } 4768 /* crq->change_mac_addr.mac_addr is the requested one 4769 * crq->change_mac_addr_rsp.mac_addr is the returned valid one. 4770 */ 4771 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); 4772 ether_addr_copy(adapter->mac_addr, 4773 &crq->change_mac_addr_rsp.mac_addr[0]); 4774 out: 4775 complete(&adapter->fw_done); 4776 return rc; 4777 } 4778 4779 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 4780 struct ibmvnic_adapter *adapter) 4781 { 4782 struct device *dev = &adapter->vdev->dev; 4783 u64 *req_value; 4784 char *name; 4785 4786 atomic_dec(&adapter->running_cap_crqs); 4787 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 4788 case REQ_TX_QUEUES: 4789 req_value = &adapter->req_tx_queues; 4790 name = "tx"; 4791 break; 4792 case REQ_RX_QUEUES: 4793 req_value = &adapter->req_rx_queues; 4794 name = "rx"; 4795 break; 4796 case REQ_RX_ADD_QUEUES: 4797 req_value = &adapter->req_rx_add_queues; 4798 name = "rx_add"; 4799 break; 4800 case REQ_TX_ENTRIES_PER_SUBCRQ: 4801 req_value = &adapter->req_tx_entries_per_subcrq; 4802 name = "tx_entries_per_subcrq"; 4803 break; 4804 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 4805 req_value = &adapter->req_rx_add_entries_per_subcrq; 4806 name = "rx_add_entries_per_subcrq"; 4807 break; 4808 case REQ_MTU: 4809 req_value = &adapter->req_mtu; 4810 name = "mtu"; 4811 break; 4812 case PROMISC_REQUESTED: 4813 req_value = &adapter->promisc; 4814 name = "promisc"; 4815 break; 4816 default: 4817 dev_err(dev, "Got invalid cap request rsp %d\n", 4818 crq->request_capability.capability); 4819 return; 4820 } 4821 4822 switch (crq->request_capability_rsp.rc.code) { 4823 case SUCCESS: 4824 break; 4825 case PARTIALSUCCESS: 4826 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 4827 *req_value, 4828 (long)be64_to_cpu(crq->request_capability_rsp.number), 4829 name); 4830 4831 if (be16_to_cpu(crq->request_capability_rsp.capability) == 4832 REQ_MTU) { 4833 pr_err("mtu of %llu is not supported. Reverting.\n", 4834 *req_value); 4835 *req_value = adapter->fallback.mtu; 4836 } else { 4837 *req_value = 4838 be64_to_cpu(crq->request_capability_rsp.number); 4839 } 4840 4841 send_request_cap(adapter, 1); 4842 return; 4843 default: 4844 dev_err(dev, "Error %d in request cap rsp\n", 4845 crq->request_capability_rsp.rc.code); 4846 return; 4847 } 4848 4849 /* Done receiving requested capabilities, query IP offload support */ 4850 if (atomic_read(&adapter->running_cap_crqs) == 0) { 4851 adapter->wait_capability = false; 4852 send_query_ip_offload(adapter); 4853 } 4854 } 4855 4856 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 4857 struct ibmvnic_adapter *adapter) 4858 { 4859 struct device *dev = &adapter->vdev->dev; 4860 struct net_device *netdev = adapter->netdev; 4861 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 4862 struct ibmvnic_login_buffer *login = adapter->login_buf; 4863 u64 *tx_handle_array; 4864 u64 *rx_handle_array; 4865 int num_tx_pools; 4866 int num_rx_pools; 4867 u64 *size_array; 4868 int i; 4869 4870 /* CHECK: Test/set of login_pending does not need to be atomic 4871 * because only ibmvnic_tasklet tests/clears this. 4872 */ 4873 if (!adapter->login_pending) { 4874 netdev_warn(netdev, "Ignoring unexpected login response\n"); 4875 return 0; 4876 } 4877 adapter->login_pending = false; 4878 4879 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 4880 DMA_TO_DEVICE); 4881 dma_unmap_single(dev, adapter->login_rsp_buf_token, 4882 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 4883 4884 /* If the number of queues requested can't be allocated by the 4885 * server, the login response will return with code 1. We will need 4886 * to resend the login buffer with fewer queues requested. 4887 */ 4888 if (login_rsp_crq->generic.rc.code) { 4889 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 4890 complete(&adapter->init_done); 4891 return 0; 4892 } 4893 4894 if (adapter->failover_pending) { 4895 adapter->init_done_rc = -EAGAIN; 4896 netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 4897 complete(&adapter->init_done); 4898 /* login response buffer will be released on reset */ 4899 return 0; 4900 } 4901 4902 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4903 4904 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 4905 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 4906 netdev_dbg(adapter->netdev, "%016lx\n", 4907 ((unsigned long *)(adapter->login_rsp_buf))[i]); 4908 } 4909 4910 /* Sanity checks */ 4911 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 4912 (be32_to_cpu(login->num_rxcomp_subcrqs) * 4913 adapter->req_rx_add_queues != 4914 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 4915 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 4916 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4917 return -EIO; 4918 } 4919 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 4920 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 4921 /* variable buffer sizes are not supported, so just read the 4922 * first entry. 4923 */ 4924 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); 4925 4926 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 4927 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 4928 4929 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 4930 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 4931 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 4932 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); 4933 4934 for (i = 0; i < num_tx_pools; i++) 4935 adapter->tx_scrq[i]->handle = tx_handle_array[i]; 4936 4937 for (i = 0; i < num_rx_pools; i++) 4938 adapter->rx_scrq[i]->handle = rx_handle_array[i]; 4939 4940 adapter->num_active_tx_scrqs = num_tx_pools; 4941 adapter->num_active_rx_scrqs = num_rx_pools; 4942 release_login_rsp_buffer(adapter); 4943 release_login_buffer(adapter); 4944 complete(&adapter->init_done); 4945 4946 return 0; 4947 } 4948 4949 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 4950 struct ibmvnic_adapter *adapter) 4951 { 4952 struct device *dev = &adapter->vdev->dev; 4953 long rc; 4954 4955 rc = crq->request_unmap_rsp.rc.code; 4956 if (rc) 4957 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 4958 } 4959 4960 static void handle_query_map_rsp(union ibmvnic_crq *crq, 4961 struct ibmvnic_adapter *adapter) 4962 { 4963 struct net_device *netdev = adapter->netdev; 4964 struct device *dev = &adapter->vdev->dev; 4965 long rc; 4966 4967 rc = crq->query_map_rsp.rc.code; 4968 if (rc) { 4969 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 4970 return; 4971 } 4972 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", 4973 crq->query_map_rsp.page_size, 4974 __be32_to_cpu(crq->query_map_rsp.tot_pages), 4975 __be32_to_cpu(crq->query_map_rsp.free_pages)); 4976 } 4977 4978 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 4979 struct ibmvnic_adapter *adapter) 4980 { 4981 struct net_device *netdev = adapter->netdev; 4982 struct device *dev = &adapter->vdev->dev; 4983 long rc; 4984 4985 atomic_dec(&adapter->running_cap_crqs); 4986 netdev_dbg(netdev, "Outstanding queries: %d\n", 4987 atomic_read(&adapter->running_cap_crqs)); 4988 rc = crq->query_capability.rc.code; 4989 if (rc) { 4990 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 4991 goto out; 4992 } 4993 4994 switch (be16_to_cpu(crq->query_capability.capability)) { 4995 case MIN_TX_QUEUES: 4996 adapter->min_tx_queues = 4997 be64_to_cpu(crq->query_capability.number); 4998 netdev_dbg(netdev, "min_tx_queues = %lld\n", 4999 adapter->min_tx_queues); 5000 break; 5001 case MIN_RX_QUEUES: 5002 adapter->min_rx_queues = 5003 be64_to_cpu(crq->query_capability.number); 5004 netdev_dbg(netdev, "min_rx_queues = %lld\n", 5005 adapter->min_rx_queues); 5006 break; 5007 case MIN_RX_ADD_QUEUES: 5008 adapter->min_rx_add_queues = 5009 be64_to_cpu(crq->query_capability.number); 5010 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 5011 adapter->min_rx_add_queues); 5012 break; 5013 case MAX_TX_QUEUES: 5014 adapter->max_tx_queues = 5015 be64_to_cpu(crq->query_capability.number); 5016 netdev_dbg(netdev, "max_tx_queues = %lld\n", 5017 adapter->max_tx_queues); 5018 break; 5019 case MAX_RX_QUEUES: 5020 adapter->max_rx_queues = 5021 be64_to_cpu(crq->query_capability.number); 5022 netdev_dbg(netdev, "max_rx_queues = %lld\n", 5023 adapter->max_rx_queues); 5024 break; 5025 case MAX_RX_ADD_QUEUES: 5026 adapter->max_rx_add_queues = 5027 be64_to_cpu(crq->query_capability.number); 5028 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 5029 adapter->max_rx_add_queues); 5030 break; 5031 case MIN_TX_ENTRIES_PER_SUBCRQ: 5032 adapter->min_tx_entries_per_subcrq = 5033 be64_to_cpu(crq->query_capability.number); 5034 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 5035 adapter->min_tx_entries_per_subcrq); 5036 break; 5037 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 5038 adapter->min_rx_add_entries_per_subcrq = 5039 be64_to_cpu(crq->query_capability.number); 5040 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 5041 adapter->min_rx_add_entries_per_subcrq); 5042 break; 5043 case MAX_TX_ENTRIES_PER_SUBCRQ: 5044 adapter->max_tx_entries_per_subcrq = 5045 be64_to_cpu(crq->query_capability.number); 5046 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 5047 adapter->max_tx_entries_per_subcrq); 5048 break; 5049 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 5050 adapter->max_rx_add_entries_per_subcrq = 5051 be64_to_cpu(crq->query_capability.number); 5052 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 5053 adapter->max_rx_add_entries_per_subcrq); 5054 break; 5055 case TCP_IP_OFFLOAD: 5056 adapter->tcp_ip_offload = 5057 be64_to_cpu(crq->query_capability.number); 5058 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 5059 adapter->tcp_ip_offload); 5060 break; 5061 case PROMISC_SUPPORTED: 5062 adapter->promisc_supported = 5063 be64_to_cpu(crq->query_capability.number); 5064 netdev_dbg(netdev, "promisc_supported = %lld\n", 5065 adapter->promisc_supported); 5066 break; 5067 case MIN_MTU: 5068 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 5069 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 5070 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 5071 break; 5072 case MAX_MTU: 5073 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 5074 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 5075 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 5076 break; 5077 case MAX_MULTICAST_FILTERS: 5078 adapter->max_multicast_filters = 5079 be64_to_cpu(crq->query_capability.number); 5080 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 5081 adapter->max_multicast_filters); 5082 break; 5083 case VLAN_HEADER_INSERTION: 5084 adapter->vlan_header_insertion = 5085 be64_to_cpu(crq->query_capability.number); 5086 if (adapter->vlan_header_insertion) 5087 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 5088 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 5089 adapter->vlan_header_insertion); 5090 break; 5091 case RX_VLAN_HEADER_INSERTION: 5092 adapter->rx_vlan_header_insertion = 5093 be64_to_cpu(crq->query_capability.number); 5094 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 5095 adapter->rx_vlan_header_insertion); 5096 break; 5097 case MAX_TX_SG_ENTRIES: 5098 adapter->max_tx_sg_entries = 5099 be64_to_cpu(crq->query_capability.number); 5100 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 5101 adapter->max_tx_sg_entries); 5102 break; 5103 case RX_SG_SUPPORTED: 5104 adapter->rx_sg_supported = 5105 be64_to_cpu(crq->query_capability.number); 5106 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 5107 adapter->rx_sg_supported); 5108 break; 5109 case OPT_TX_COMP_SUB_QUEUES: 5110 adapter->opt_tx_comp_sub_queues = 5111 be64_to_cpu(crq->query_capability.number); 5112 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 5113 adapter->opt_tx_comp_sub_queues); 5114 break; 5115 case OPT_RX_COMP_QUEUES: 5116 adapter->opt_rx_comp_queues = 5117 be64_to_cpu(crq->query_capability.number); 5118 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 5119 adapter->opt_rx_comp_queues); 5120 break; 5121 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 5122 adapter->opt_rx_bufadd_q_per_rx_comp_q = 5123 be64_to_cpu(crq->query_capability.number); 5124 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 5125 adapter->opt_rx_bufadd_q_per_rx_comp_q); 5126 break; 5127 case OPT_TX_ENTRIES_PER_SUBCRQ: 5128 adapter->opt_tx_entries_per_subcrq = 5129 be64_to_cpu(crq->query_capability.number); 5130 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 5131 adapter->opt_tx_entries_per_subcrq); 5132 break; 5133 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 5134 adapter->opt_rxba_entries_per_subcrq = 5135 be64_to_cpu(crq->query_capability.number); 5136 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 5137 adapter->opt_rxba_entries_per_subcrq); 5138 break; 5139 case TX_RX_DESC_REQ: 5140 adapter->tx_rx_desc_req = crq->query_capability.number; 5141 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 5142 adapter->tx_rx_desc_req); 5143 break; 5144 5145 default: 5146 netdev_err(netdev, "Got invalid cap rsp %d\n", 5147 crq->query_capability.capability); 5148 } 5149 5150 out: 5151 if (atomic_read(&adapter->running_cap_crqs) == 0) { 5152 adapter->wait_capability = false; 5153 send_request_cap(adapter, 0); 5154 } 5155 } 5156 5157 static int send_query_phys_parms(struct ibmvnic_adapter *adapter) 5158 { 5159 union ibmvnic_crq crq; 5160 int rc; 5161 5162 memset(&crq, 0, sizeof(crq)); 5163 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; 5164 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; 5165 5166 mutex_lock(&adapter->fw_lock); 5167 adapter->fw_done_rc = 0; 5168 reinit_completion(&adapter->fw_done); 5169 5170 rc = ibmvnic_send_crq(adapter, &crq); 5171 if (rc) { 5172 mutex_unlock(&adapter->fw_lock); 5173 return rc; 5174 } 5175 5176 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 5177 if (rc) { 5178 mutex_unlock(&adapter->fw_lock); 5179 return rc; 5180 } 5181 5182 mutex_unlock(&adapter->fw_lock); 5183 return adapter->fw_done_rc ? -EIO : 0; 5184 } 5185 5186 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, 5187 struct ibmvnic_adapter *adapter) 5188 { 5189 struct net_device *netdev = adapter->netdev; 5190 int rc; 5191 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); 5192 5193 rc = crq->query_phys_parms_rsp.rc.code; 5194 if (rc) { 5195 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); 5196 return rc; 5197 } 5198 switch (rspeed) { 5199 case IBMVNIC_10MBPS: 5200 adapter->speed = SPEED_10; 5201 break; 5202 case IBMVNIC_100MBPS: 5203 adapter->speed = SPEED_100; 5204 break; 5205 case IBMVNIC_1GBPS: 5206 adapter->speed = SPEED_1000; 5207 break; 5208 case IBMVNIC_10GBPS: 5209 adapter->speed = SPEED_10000; 5210 break; 5211 case IBMVNIC_25GBPS: 5212 adapter->speed = SPEED_25000; 5213 break; 5214 case IBMVNIC_40GBPS: 5215 adapter->speed = SPEED_40000; 5216 break; 5217 case IBMVNIC_50GBPS: 5218 adapter->speed = SPEED_50000; 5219 break; 5220 case IBMVNIC_100GBPS: 5221 adapter->speed = SPEED_100000; 5222 break; 5223 case IBMVNIC_200GBPS: 5224 adapter->speed = SPEED_200000; 5225 break; 5226 default: 5227 if (netif_carrier_ok(netdev)) 5228 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); 5229 adapter->speed = SPEED_UNKNOWN; 5230 } 5231 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) 5232 adapter->duplex = DUPLEX_FULL; 5233 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) 5234 adapter->duplex = DUPLEX_HALF; 5235 else 5236 adapter->duplex = DUPLEX_UNKNOWN; 5237 5238 return rc; 5239 } 5240 5241 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 5242 struct ibmvnic_adapter *adapter) 5243 { 5244 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 5245 struct net_device *netdev = adapter->netdev; 5246 struct device *dev = &adapter->vdev->dev; 5247 u64 *u64_crq = (u64 *)crq; 5248 long rc; 5249 5250 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 5251 (unsigned long)cpu_to_be64(u64_crq[0]), 5252 (unsigned long)cpu_to_be64(u64_crq[1])); 5253 switch (gen_crq->first) { 5254 case IBMVNIC_CRQ_INIT_RSP: 5255 switch (gen_crq->cmd) { 5256 case IBMVNIC_CRQ_INIT: 5257 dev_info(dev, "Partner initialized\n"); 5258 adapter->from_passive_init = true; 5259 /* Discard any stale login responses from prev reset. 5260 * CHECK: should we clear even on INIT_COMPLETE? 5261 */ 5262 adapter->login_pending = false; 5263 5264 if (adapter->state == VNIC_DOWN) 5265 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); 5266 else 5267 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 5268 5269 if (rc && rc != -EBUSY) { 5270 /* We were unable to schedule the failover 5271 * reset either because the adapter was still 5272 * probing (eg: during kexec) or we could not 5273 * allocate memory. Clear the failover_pending 5274 * flag since no one else will. We ignore 5275 * EBUSY because it means either FAILOVER reset 5276 * is already scheduled or the adapter is 5277 * being removed. 5278 */ 5279 netdev_err(netdev, 5280 "Error %ld scheduling failover reset\n", 5281 rc); 5282 adapter->failover_pending = false; 5283 } 5284 5285 if (!completion_done(&adapter->init_done)) { 5286 complete(&adapter->init_done); 5287 if (!adapter->init_done_rc) 5288 adapter->init_done_rc = -EAGAIN; 5289 } 5290 5291 break; 5292 case IBMVNIC_CRQ_INIT_COMPLETE: 5293 dev_info(dev, "Partner initialization complete\n"); 5294 adapter->crq.active = true; 5295 send_version_xchg(adapter); 5296 break; 5297 default: 5298 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 5299 } 5300 return; 5301 case IBMVNIC_CRQ_XPORT_EVENT: 5302 netif_carrier_off(netdev); 5303 adapter->crq.active = false; 5304 /* terminate any thread waiting for a response 5305 * from the device 5306 */ 5307 if (!completion_done(&adapter->fw_done)) { 5308 adapter->fw_done_rc = -EIO; 5309 complete(&adapter->fw_done); 5310 } 5311 if (!completion_done(&adapter->stats_done)) 5312 complete(&adapter->stats_done); 5313 if (test_bit(0, &adapter->resetting)) 5314 adapter->force_reset_recovery = true; 5315 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 5316 dev_info(dev, "Migrated, re-enabling adapter\n"); 5317 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 5318 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 5319 dev_info(dev, "Backing device failover detected\n"); 5320 adapter->failover_pending = true; 5321 } else { 5322 /* The adapter lost the connection */ 5323 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 5324 gen_crq->cmd); 5325 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5326 } 5327 return; 5328 case IBMVNIC_CRQ_CMD_RSP: 5329 break; 5330 default: 5331 dev_err(dev, "Got an invalid msg type 0x%02x\n", 5332 gen_crq->first); 5333 return; 5334 } 5335 5336 switch (gen_crq->cmd) { 5337 case VERSION_EXCHANGE_RSP: 5338 rc = crq->version_exchange_rsp.rc.code; 5339 if (rc) { 5340 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 5341 break; 5342 } 5343 ibmvnic_version = 5344 be16_to_cpu(crq->version_exchange_rsp.version); 5345 dev_info(dev, "Partner protocol version is %d\n", 5346 ibmvnic_version); 5347 send_query_cap(adapter); 5348 break; 5349 case QUERY_CAPABILITY_RSP: 5350 handle_query_cap_rsp(crq, adapter); 5351 break; 5352 case QUERY_MAP_RSP: 5353 handle_query_map_rsp(crq, adapter); 5354 break; 5355 case REQUEST_MAP_RSP: 5356 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 5357 complete(&adapter->fw_done); 5358 break; 5359 case REQUEST_UNMAP_RSP: 5360 handle_request_unmap_rsp(crq, adapter); 5361 break; 5362 case REQUEST_CAPABILITY_RSP: 5363 handle_request_cap_rsp(crq, adapter); 5364 break; 5365 case LOGIN_RSP: 5366 netdev_dbg(netdev, "Got Login Response\n"); 5367 handle_login_rsp(crq, adapter); 5368 break; 5369 case LOGICAL_LINK_STATE_RSP: 5370 netdev_dbg(netdev, 5371 "Got Logical Link State Response, state: %d rc: %d\n", 5372 crq->logical_link_state_rsp.link_state, 5373 crq->logical_link_state_rsp.rc.code); 5374 adapter->logical_link_state = 5375 crq->logical_link_state_rsp.link_state; 5376 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 5377 complete(&adapter->init_done); 5378 break; 5379 case LINK_STATE_INDICATION: 5380 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 5381 adapter->phys_link_state = 5382 crq->link_state_indication.phys_link_state; 5383 adapter->logical_link_state = 5384 crq->link_state_indication.logical_link_state; 5385 if (adapter->phys_link_state && adapter->logical_link_state) 5386 netif_carrier_on(netdev); 5387 else 5388 netif_carrier_off(netdev); 5389 break; 5390 case CHANGE_MAC_ADDR_RSP: 5391 netdev_dbg(netdev, "Got MAC address change Response\n"); 5392 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 5393 break; 5394 case ERROR_INDICATION: 5395 netdev_dbg(netdev, "Got Error Indication\n"); 5396 handle_error_indication(crq, adapter); 5397 break; 5398 case REQUEST_STATISTICS_RSP: 5399 netdev_dbg(netdev, "Got Statistics Response\n"); 5400 complete(&adapter->stats_done); 5401 break; 5402 case QUERY_IP_OFFLOAD_RSP: 5403 netdev_dbg(netdev, "Got Query IP offload Response\n"); 5404 handle_query_ip_offload_rsp(adapter); 5405 break; 5406 case MULTICAST_CTRL_RSP: 5407 netdev_dbg(netdev, "Got multicast control Response\n"); 5408 break; 5409 case CONTROL_IP_OFFLOAD_RSP: 5410 netdev_dbg(netdev, "Got Control IP offload Response\n"); 5411 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 5412 sizeof(adapter->ip_offload_ctrl), 5413 DMA_TO_DEVICE); 5414 complete(&adapter->init_done); 5415 break; 5416 case COLLECT_FW_TRACE_RSP: 5417 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 5418 complete(&adapter->fw_done); 5419 break; 5420 case GET_VPD_SIZE_RSP: 5421 handle_vpd_size_rsp(crq, adapter); 5422 break; 5423 case GET_VPD_RSP: 5424 handle_vpd_rsp(crq, adapter); 5425 break; 5426 case QUERY_PHYS_PARMS_RSP: 5427 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); 5428 complete(&adapter->fw_done); 5429 break; 5430 default: 5431 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 5432 gen_crq->cmd); 5433 } 5434 } 5435 5436 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 5437 { 5438 struct ibmvnic_adapter *adapter = instance; 5439 5440 tasklet_schedule(&adapter->tasklet); 5441 return IRQ_HANDLED; 5442 } 5443 5444 static void ibmvnic_tasklet(struct tasklet_struct *t) 5445 { 5446 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); 5447 struct ibmvnic_crq_queue *queue = &adapter->crq; 5448 union ibmvnic_crq *crq; 5449 unsigned long flags; 5450 bool done = false; 5451 5452 spin_lock_irqsave(&queue->lock, flags); 5453 while (!done) { 5454 /* Pull all the valid messages off the CRQ */ 5455 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 5456 /* This barrier makes sure ibmvnic_next_crq()'s 5457 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded 5458 * before ibmvnic_handle_crq()'s 5459 * switch(gen_crq->first) and switch(gen_crq->cmd). 5460 */ 5461 dma_rmb(); 5462 ibmvnic_handle_crq(crq, adapter); 5463 crq->generic.first = 0; 5464 } 5465 5466 /* remain in tasklet until all 5467 * capabilities responses are received 5468 */ 5469 if (!adapter->wait_capability) 5470 done = true; 5471 } 5472 /* if capabilities CRQ's were sent in this tasklet, the following 5473 * tasklet must wait until all responses are received 5474 */ 5475 if (atomic_read(&adapter->running_cap_crqs) != 0) 5476 adapter->wait_capability = true; 5477 spin_unlock_irqrestore(&queue->lock, flags); 5478 } 5479 5480 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 5481 { 5482 struct vio_dev *vdev = adapter->vdev; 5483 int rc; 5484 5485 do { 5486 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 5487 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5488 5489 if (rc) 5490 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 5491 5492 return rc; 5493 } 5494 5495 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 5496 { 5497 struct ibmvnic_crq_queue *crq = &adapter->crq; 5498 struct device *dev = &adapter->vdev->dev; 5499 struct vio_dev *vdev = adapter->vdev; 5500 int rc; 5501 5502 /* Close the CRQ */ 5503 do { 5504 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5505 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5506 5507 /* Clean out the queue */ 5508 if (!crq->msgs) 5509 return -EINVAL; 5510 5511 memset(crq->msgs, 0, PAGE_SIZE); 5512 crq->cur = 0; 5513 crq->active = false; 5514 5515 /* And re-open it again */ 5516 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 5517 crq->msg_token, PAGE_SIZE); 5518 5519 if (rc == H_CLOSED) 5520 /* Adapter is good, but other end is not ready */ 5521 dev_warn(dev, "Partner adapter not ready\n"); 5522 else if (rc != 0) 5523 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 5524 5525 return rc; 5526 } 5527 5528 static void release_crq_queue(struct ibmvnic_adapter *adapter) 5529 { 5530 struct ibmvnic_crq_queue *crq = &adapter->crq; 5531 struct vio_dev *vdev = adapter->vdev; 5532 long rc; 5533 5534 if (!crq->msgs) 5535 return; 5536 5537 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 5538 free_irq(vdev->irq, adapter); 5539 tasklet_kill(&adapter->tasklet); 5540 do { 5541 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5542 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5543 5544 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 5545 DMA_BIDIRECTIONAL); 5546 free_page((unsigned long)crq->msgs); 5547 crq->msgs = NULL; 5548 crq->active = false; 5549 } 5550 5551 static int init_crq_queue(struct ibmvnic_adapter *adapter) 5552 { 5553 struct ibmvnic_crq_queue *crq = &adapter->crq; 5554 struct device *dev = &adapter->vdev->dev; 5555 struct vio_dev *vdev = adapter->vdev; 5556 int rc, retrc = -ENOMEM; 5557 5558 if (crq->msgs) 5559 return 0; 5560 5561 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 5562 /* Should we allocate more than one page? */ 5563 5564 if (!crq->msgs) 5565 return -ENOMEM; 5566 5567 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 5568 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 5569 DMA_BIDIRECTIONAL); 5570 if (dma_mapping_error(dev, crq->msg_token)) 5571 goto map_failed; 5572 5573 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 5574 crq->msg_token, PAGE_SIZE); 5575 5576 if (rc == H_RESOURCE) 5577 /* maybe kexecing and resource is busy. try a reset */ 5578 rc = ibmvnic_reset_crq(adapter); 5579 retrc = rc; 5580 5581 if (rc == H_CLOSED) { 5582 dev_warn(dev, "Partner adapter not ready\n"); 5583 } else if (rc) { 5584 dev_warn(dev, "Error %d opening adapter\n", rc); 5585 goto reg_crq_failed; 5586 } 5587 5588 retrc = 0; 5589 5590 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); 5591 5592 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 5593 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", 5594 adapter->vdev->unit_address); 5595 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); 5596 if (rc) { 5597 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 5598 vdev->irq, rc); 5599 goto req_irq_failed; 5600 } 5601 5602 rc = vio_enable_interrupts(vdev); 5603 if (rc) { 5604 dev_err(dev, "Error %d enabling interrupts\n", rc); 5605 goto req_irq_failed; 5606 } 5607 5608 crq->cur = 0; 5609 spin_lock_init(&crq->lock); 5610 5611 /* process any CRQs that were queued before we enabled interrupts */ 5612 tasklet_schedule(&adapter->tasklet); 5613 5614 return retrc; 5615 5616 req_irq_failed: 5617 tasklet_kill(&adapter->tasklet); 5618 do { 5619 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5620 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5621 reg_crq_failed: 5622 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 5623 map_failed: 5624 free_page((unsigned long)crq->msgs); 5625 crq->msgs = NULL; 5626 return retrc; 5627 } 5628 5629 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) 5630 { 5631 struct device *dev = &adapter->vdev->dev; 5632 unsigned long timeout = msecs_to_jiffies(20000); 5633 u64 old_num_rx_queues = adapter->req_rx_queues; 5634 u64 old_num_tx_queues = adapter->req_tx_queues; 5635 int rc; 5636 5637 adapter->from_passive_init = false; 5638 5639 if (reset) 5640 reinit_completion(&adapter->init_done); 5641 5642 adapter->init_done_rc = 0; 5643 rc = ibmvnic_send_crq_init(adapter); 5644 if (rc) { 5645 dev_err(dev, "Send crq init failed with error %d\n", rc); 5646 return rc; 5647 } 5648 5649 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 5650 dev_err(dev, "Initialization sequence timed out\n"); 5651 return -1; 5652 } 5653 5654 if (adapter->init_done_rc) { 5655 release_crq_queue(adapter); 5656 return adapter->init_done_rc; 5657 } 5658 5659 if (adapter->from_passive_init) { 5660 adapter->state = VNIC_OPEN; 5661 adapter->from_passive_init = false; 5662 return -1; 5663 } 5664 5665 if (reset && 5666 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && 5667 adapter->reset_reason != VNIC_RESET_MOBILITY) { 5668 if (adapter->req_rx_queues != old_num_rx_queues || 5669 adapter->req_tx_queues != old_num_tx_queues) { 5670 release_sub_crqs(adapter, 0); 5671 rc = init_sub_crqs(adapter); 5672 } else { 5673 rc = reset_sub_crq_queues(adapter); 5674 } 5675 } else { 5676 rc = init_sub_crqs(adapter); 5677 } 5678 5679 if (rc) { 5680 dev_err(dev, "Initialization of sub crqs failed\n"); 5681 release_crq_queue(adapter); 5682 return rc; 5683 } 5684 5685 rc = init_sub_crq_irqs(adapter); 5686 if (rc) { 5687 dev_err(dev, "Failed to initialize sub crq irqs\n"); 5688 release_crq_queue(adapter); 5689 } 5690 5691 return rc; 5692 } 5693 5694 static struct device_attribute dev_attr_failover; 5695 5696 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 5697 { 5698 struct ibmvnic_adapter *adapter; 5699 struct net_device *netdev; 5700 unsigned char *mac_addr_p; 5701 bool init_success; 5702 int rc; 5703 5704 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 5705 dev->unit_address); 5706 5707 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 5708 VETH_MAC_ADDR, NULL); 5709 if (!mac_addr_p) { 5710 dev_err(&dev->dev, 5711 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 5712 __FILE__, __LINE__); 5713 return 0; 5714 } 5715 5716 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 5717 IBMVNIC_MAX_QUEUES); 5718 if (!netdev) 5719 return -ENOMEM; 5720 5721 adapter = netdev_priv(netdev); 5722 adapter->state = VNIC_PROBING; 5723 dev_set_drvdata(&dev->dev, netdev); 5724 adapter->vdev = dev; 5725 adapter->netdev = netdev; 5726 adapter->login_pending = false; 5727 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); 5728 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ 5729 bitmap_set(adapter->map_ids, 0, 1); 5730 5731 ether_addr_copy(adapter->mac_addr, mac_addr_p); 5732 eth_hw_addr_set(netdev, adapter->mac_addr); 5733 netdev->irq = dev->irq; 5734 netdev->netdev_ops = &ibmvnic_netdev_ops; 5735 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 5736 SET_NETDEV_DEV(netdev, &dev->dev); 5737 5738 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 5739 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, 5740 __ibmvnic_delayed_reset); 5741 INIT_LIST_HEAD(&adapter->rwi_list); 5742 spin_lock_init(&adapter->rwi_lock); 5743 spin_lock_init(&adapter->state_lock); 5744 mutex_init(&adapter->fw_lock); 5745 init_completion(&adapter->init_done); 5746 init_completion(&adapter->fw_done); 5747 init_completion(&adapter->reset_done); 5748 init_completion(&adapter->stats_done); 5749 clear_bit(0, &adapter->resetting); 5750 adapter->prev_rx_buf_sz = 0; 5751 adapter->prev_mtu = 0; 5752 5753 init_success = false; 5754 do { 5755 rc = init_crq_queue(adapter); 5756 if (rc) { 5757 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 5758 rc); 5759 goto ibmvnic_init_fail; 5760 } 5761 5762 rc = ibmvnic_reset_init(adapter, false); 5763 } while (rc == -EAGAIN); 5764 5765 /* We are ignoring the error from ibmvnic_reset_init() assuming that the 5766 * partner is not ready. CRQ is not active. When the partner becomes 5767 * ready, we will do the passive init reset. 5768 */ 5769 5770 if (!rc) 5771 init_success = true; 5772 5773 rc = init_stats_buffers(adapter); 5774 if (rc) 5775 goto ibmvnic_init_fail; 5776 5777 rc = init_stats_token(adapter); 5778 if (rc) 5779 goto ibmvnic_stats_fail; 5780 5781 rc = device_create_file(&dev->dev, &dev_attr_failover); 5782 if (rc) 5783 goto ibmvnic_dev_file_err; 5784 5785 netif_carrier_off(netdev); 5786 rc = register_netdev(netdev); 5787 if (rc) { 5788 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 5789 goto ibmvnic_register_fail; 5790 } 5791 dev_info(&dev->dev, "ibmvnic registered\n"); 5792 5793 if (init_success) { 5794 adapter->state = VNIC_PROBED; 5795 netdev->mtu = adapter->req_mtu - ETH_HLEN; 5796 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 5797 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 5798 } else { 5799 adapter->state = VNIC_DOWN; 5800 } 5801 5802 adapter->wait_for_reset = false; 5803 adapter->last_reset_time = jiffies; 5804 return 0; 5805 5806 ibmvnic_register_fail: 5807 device_remove_file(&dev->dev, &dev_attr_failover); 5808 5809 ibmvnic_dev_file_err: 5810 release_stats_token(adapter); 5811 5812 ibmvnic_stats_fail: 5813 release_stats_buffers(adapter); 5814 5815 ibmvnic_init_fail: 5816 release_sub_crqs(adapter, 1); 5817 release_crq_queue(adapter); 5818 mutex_destroy(&adapter->fw_lock); 5819 free_netdev(netdev); 5820 5821 return rc; 5822 } 5823 5824 static void ibmvnic_remove(struct vio_dev *dev) 5825 { 5826 struct net_device *netdev = dev_get_drvdata(&dev->dev); 5827 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 5828 unsigned long flags; 5829 5830 spin_lock_irqsave(&adapter->state_lock, flags); 5831 5832 /* If ibmvnic_reset() is scheduling a reset, wait for it to 5833 * finish. Then, set the state to REMOVING to prevent it from 5834 * scheduling any more work and to have reset functions ignore 5835 * any resets that have already been scheduled. Drop the lock 5836 * after setting state, so __ibmvnic_reset() which is called 5837 * from the flush_work() below, can make progress. 5838 */ 5839 spin_lock(&adapter->rwi_lock); 5840 adapter->state = VNIC_REMOVING; 5841 spin_unlock(&adapter->rwi_lock); 5842 5843 spin_unlock_irqrestore(&adapter->state_lock, flags); 5844 5845 flush_work(&adapter->ibmvnic_reset); 5846 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 5847 5848 rtnl_lock(); 5849 unregister_netdevice(netdev); 5850 5851 release_resources(adapter); 5852 release_rx_pools(adapter); 5853 release_tx_pools(adapter); 5854 release_sub_crqs(adapter, 1); 5855 release_crq_queue(adapter); 5856 5857 release_stats_token(adapter); 5858 release_stats_buffers(adapter); 5859 5860 adapter->state = VNIC_REMOVED; 5861 5862 rtnl_unlock(); 5863 mutex_destroy(&adapter->fw_lock); 5864 device_remove_file(&dev->dev, &dev_attr_failover); 5865 free_netdev(netdev); 5866 dev_set_drvdata(&dev->dev, NULL); 5867 } 5868 5869 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 5870 const char *buf, size_t count) 5871 { 5872 struct net_device *netdev = dev_get_drvdata(dev); 5873 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 5874 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 5875 __be64 session_token; 5876 long rc; 5877 5878 if (!sysfs_streq(buf, "1")) 5879 return -EINVAL; 5880 5881 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 5882 H_GET_SESSION_TOKEN, 0, 0, 0); 5883 if (rc) { 5884 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 5885 rc); 5886 goto last_resort; 5887 } 5888 5889 session_token = (__be64)retbuf[0]; 5890 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 5891 be64_to_cpu(session_token)); 5892 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 5893 H_SESSION_ERR_DETECTED, session_token, 0, 0); 5894 if (rc) 5895 netdev_err(netdev, 5896 "H_VIOCTL initiated failover failed, rc %ld\n", 5897 rc); 5898 5899 last_resort: 5900 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); 5901 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 5902 5903 return count; 5904 } 5905 static DEVICE_ATTR_WO(failover); 5906 5907 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 5908 { 5909 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 5910 struct ibmvnic_adapter *adapter; 5911 struct iommu_table *tbl; 5912 unsigned long ret = 0; 5913 int i; 5914 5915 tbl = get_iommu_table_base(&vdev->dev); 5916 5917 /* netdev inits at probe time along with the structures we need below*/ 5918 if (!netdev) 5919 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 5920 5921 adapter = netdev_priv(netdev); 5922 5923 ret += PAGE_SIZE; /* the crq message queue */ 5924 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 5925 5926 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 5927 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 5928 5929 for (i = 0; i < adapter->num_active_rx_pools; i++) 5930 ret += adapter->rx_pool[i].size * 5931 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 5932 5933 return ret; 5934 } 5935 5936 static int ibmvnic_resume(struct device *dev) 5937 { 5938 struct net_device *netdev = dev_get_drvdata(dev); 5939 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 5940 5941 if (adapter->state != VNIC_OPEN) 5942 return 0; 5943 5944 tasklet_schedule(&adapter->tasklet); 5945 5946 return 0; 5947 } 5948 5949 static const struct vio_device_id ibmvnic_device_table[] = { 5950 {"network", "IBM,vnic"}, 5951 {"", "" } 5952 }; 5953 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 5954 5955 static const struct dev_pm_ops ibmvnic_pm_ops = { 5956 .resume = ibmvnic_resume 5957 }; 5958 5959 static struct vio_driver ibmvnic_driver = { 5960 .id_table = ibmvnic_device_table, 5961 .probe = ibmvnic_probe, 5962 .remove = ibmvnic_remove, 5963 .get_desired_dma = ibmvnic_get_desired_dma, 5964 .name = ibmvnic_driver_name, 5965 .pm = &ibmvnic_pm_ops, 5966 }; 5967 5968 /* module functions */ 5969 static int __init ibmvnic_module_init(void) 5970 { 5971 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 5972 IBMVNIC_DRIVER_VERSION); 5973 5974 return vio_register_driver(&ibmvnic_driver); 5975 } 5976 5977 static void __exit ibmvnic_module_exit(void) 5978 { 5979 vio_unregister_driver(&ibmvnic_driver); 5980 } 5981 5982 module_init(ibmvnic_module_init); 5983 module_exit(ibmvnic_module_exit); 5984