1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /* Messages are passed between the VNIC driver and the VNIC server using */ 17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 18 /* issue and receive commands that initiate communication with the server */ 19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 20 /* are used by the driver to notify the server that a packet is */ 21 /* ready for transmission or that a buffer has been added to receive a */ 22 /* packet. Subsequently, sCRQs are used by the server to notify the */ 23 /* driver that a packet transmission has been completed or that a packet */ 24 /* has been received and placed in a waiting buffer. */ 25 /* */ 26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 27 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 28 /* or receive has been completed, the VNIC driver is required to use */ 29 /* "long term mapping". This entails that large, continuous DMA mapped */ 30 /* buffers are allocated on driver initialization and these buffers are */ 31 /* then continuously reused to pass skbs to and from the VNIC server. */ 32 /* */ 33 /**************************************************************************/ 34 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/types.h> 38 #include <linux/errno.h> 39 #include <linux/completion.h> 40 #include <linux/ioport.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kernel.h> 43 #include <linux/netdevice.h> 44 #include <linux/etherdevice.h> 45 #include <linux/skbuff.h> 46 #include <linux/init.h> 47 #include <linux/delay.h> 48 #include <linux/mm.h> 49 #include <linux/ethtool.h> 50 #include <linux/proc_fs.h> 51 #include <linux/if_arp.h> 52 #include <linux/in.h> 53 #include <linux/ip.h> 54 #include <linux/ipv6.h> 55 #include <linux/irq.h> 56 #include <linux/kthread.h> 57 #include <linux/seq_file.h> 58 #include <linux/interrupt.h> 59 #include <net/net_namespace.h> 60 #include <asm/hvcall.h> 61 #include <linux/atomic.h> 62 #include <asm/vio.h> 63 #include <asm/xive.h> 64 #include <asm/iommu.h> 65 #include <linux/uaccess.h> 66 #include <asm/firmware.h> 67 #include <linux/workqueue.h> 68 #include <linux/if_vlan.h> 69 #include <linux/utsname.h> 70 71 #include "ibmvnic.h" 72 73 static const char ibmvnic_driver_name[] = "ibmvnic"; 74 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 75 76 MODULE_AUTHOR("Santiago Leon"); 77 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 78 MODULE_LICENSE("GPL"); 79 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 80 81 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 82 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 87 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 88 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 89 static int enable_scrq_irq(struct ibmvnic_adapter *, 90 struct ibmvnic_sub_crq_queue *); 91 static int disable_scrq_irq(struct ibmvnic_adapter *, 92 struct ibmvnic_sub_crq_queue *); 93 static int pending_scrq(struct ibmvnic_adapter *, 94 struct ibmvnic_sub_crq_queue *); 95 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 96 struct ibmvnic_sub_crq_queue *); 97 static int ibmvnic_poll(struct napi_struct *napi, int data); 98 static void send_query_map(struct ibmvnic_adapter *adapter); 99 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8); 100 static int send_request_unmap(struct ibmvnic_adapter *, u8); 101 static int send_login(struct ibmvnic_adapter *adapter); 102 static void send_query_cap(struct ibmvnic_adapter *adapter); 103 static int init_sub_crqs(struct ibmvnic_adapter *); 104 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 105 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); 106 static void release_crq_queue(struct ibmvnic_adapter *); 107 static int __ibmvnic_set_mac(struct net_device *, u8 *); 108 static int init_crq_queue(struct ibmvnic_adapter *adapter); 109 static int send_query_phys_parms(struct ibmvnic_adapter *adapter); 110 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 111 struct ibmvnic_sub_crq_queue *tx_scrq); 112 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 113 struct ibmvnic_long_term_buff *ltb); 114 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); 115 116 struct ibmvnic_stat { 117 char name[ETH_GSTRING_LEN]; 118 int offset; 119 }; 120 121 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 122 offsetof(struct ibmvnic_statistics, stat)) 123 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) 124 125 static const struct ibmvnic_stat ibmvnic_stats[] = { 126 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 127 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 128 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 129 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 130 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 131 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 132 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 133 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 134 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 135 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 136 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 137 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 138 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 139 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 140 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 141 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 142 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 143 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 144 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 145 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 146 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 147 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 148 }; 149 150 static int send_crq_init_complete(struct ibmvnic_adapter *adapter) 151 { 152 union ibmvnic_crq crq; 153 154 memset(&crq, 0, sizeof(crq)); 155 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 156 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; 157 158 return ibmvnic_send_crq(adapter, &crq); 159 } 160 161 static int send_version_xchg(struct ibmvnic_adapter *adapter) 162 { 163 union ibmvnic_crq crq; 164 165 memset(&crq, 0, sizeof(crq)); 166 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 167 crq.version_exchange.cmd = VERSION_EXCHANGE; 168 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 169 170 return ibmvnic_send_crq(adapter, &crq); 171 } 172 173 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 174 unsigned long length, unsigned long *number, 175 unsigned long *irq) 176 { 177 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 178 long rc; 179 180 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 181 *number = retbuf[0]; 182 *irq = retbuf[1]; 183 184 return rc; 185 } 186 187 /** 188 * ibmvnic_wait_for_completion - Check device state and wait for completion 189 * @adapter: private device data 190 * @comp_done: completion structure to wait for 191 * @timeout: time to wait in milliseconds 192 * 193 * Wait for a completion signal or until the timeout limit is reached 194 * while checking that the device is still active. 195 */ 196 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, 197 struct completion *comp_done, 198 unsigned long timeout) 199 { 200 struct net_device *netdev; 201 unsigned long div_timeout; 202 u8 retry; 203 204 netdev = adapter->netdev; 205 retry = 5; 206 div_timeout = msecs_to_jiffies(timeout / retry); 207 while (true) { 208 if (!adapter->crq.active) { 209 netdev_err(netdev, "Device down!\n"); 210 return -ENODEV; 211 } 212 if (!retry--) 213 break; 214 if (wait_for_completion_timeout(comp_done, div_timeout)) 215 return 0; 216 } 217 netdev_err(netdev, "Operation timed out.\n"); 218 return -ETIMEDOUT; 219 } 220 221 /** 222 * reuse_ltb() - Check if a long term buffer can be reused 223 * @ltb: The long term buffer to be checked 224 * @size: The size of the long term buffer. 225 * 226 * An LTB can be reused unless its size has changed. 227 * 228 * Return: Return true if the LTB can be reused, false otherwise. 229 */ 230 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) 231 { 232 return (ltb->buff && ltb->size == size); 233 } 234 235 /** 236 * alloc_long_term_buff() - Allocate a long term buffer (LTB) 237 * 238 * @adapter: ibmvnic adapter associated to the LTB 239 * @ltb: container object for the LTB 240 * @size: size of the LTB 241 * 242 * Allocate an LTB of the specified size and notify VIOS. 243 * 244 * If the given @ltb already has the correct size, reuse it. Otherwise if 245 * its non-NULL, free it. Then allocate a new one of the correct size. 246 * Notify the VIOS either way since we may now be working with a new VIOS. 247 * 248 * Allocating larger chunks of memory during resets, specially LPM or under 249 * low memory situations can cause resets to fail/timeout and for LPAR to 250 * lose connectivity. So hold onto the LTB even if we fail to communicate 251 * with the VIOS and reuse it on next open. Free LTB when adapter is closed. 252 * 253 * Return: 0 if we were able to allocate the LTB and notify the VIOS and 254 * a negative value otherwise. 255 */ 256 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 257 struct ibmvnic_long_term_buff *ltb, int size) 258 { 259 struct device *dev = &adapter->vdev->dev; 260 int rc; 261 262 if (!reuse_ltb(ltb, size)) { 263 dev_dbg(dev, 264 "LTB size changed from 0x%llx to 0x%x, reallocating\n", 265 ltb->size, size); 266 free_long_term_buff(adapter, ltb); 267 } 268 269 if (ltb->buff) { 270 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", 271 ltb->map_id, ltb->size); 272 } else { 273 ltb->buff = dma_alloc_coherent(dev, size, <b->addr, 274 GFP_KERNEL); 275 if (!ltb->buff) { 276 dev_err(dev, "Couldn't alloc long term buffer\n"); 277 return -ENOMEM; 278 } 279 ltb->size = size; 280 281 ltb->map_id = find_first_zero_bit(adapter->map_ids, 282 MAX_MAP_ID); 283 bitmap_set(adapter->map_ids, ltb->map_id, 1); 284 285 dev_dbg(dev, 286 "Allocated new LTB [map %d, size 0x%llx]\n", 287 ltb->map_id, ltb->size); 288 } 289 290 /* Ensure ltb is zeroed - specially when reusing it. */ 291 memset(ltb->buff, 0, ltb->size); 292 293 mutex_lock(&adapter->fw_lock); 294 adapter->fw_done_rc = 0; 295 reinit_completion(&adapter->fw_done); 296 297 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 298 if (rc) { 299 dev_err(dev, "send_request_map failed, rc = %d\n", rc); 300 goto out; 301 } 302 303 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 304 if (rc) { 305 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", 306 rc); 307 goto out; 308 } 309 310 if (adapter->fw_done_rc) { 311 dev_err(dev, "Couldn't map LTB, rc = %d\n", 312 adapter->fw_done_rc); 313 rc = -EIO; 314 goto out; 315 } 316 rc = 0; 317 out: 318 /* don't free LTB on communication error - see function header */ 319 mutex_unlock(&adapter->fw_lock); 320 return rc; 321 } 322 323 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 324 struct ibmvnic_long_term_buff *ltb) 325 { 326 struct device *dev = &adapter->vdev->dev; 327 328 if (!ltb->buff) 329 return; 330 331 /* VIOS automatically unmaps the long term buffer at remote 332 * end for the following resets: 333 * FAILOVER, MOBILITY, TIMEOUT. 334 */ 335 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 336 adapter->reset_reason != VNIC_RESET_MOBILITY && 337 adapter->reset_reason != VNIC_RESET_TIMEOUT) 338 send_request_unmap(adapter, ltb->map_id); 339 340 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 341 342 ltb->buff = NULL; 343 /* mark this map_id free */ 344 bitmap_clear(adapter->map_ids, ltb->map_id, 1); 345 ltb->map_id = 0; 346 } 347 348 /** 349 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB. 350 * @rxpool: The receive buffer pool containing buffer 351 * @bufidx: Index of buffer in rxpool 352 * @ltbp: (Output) pointer to the long term buffer containing the buffer 353 * @offset: (Output) offset of buffer in the LTB from @ltbp 354 * 355 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the 356 * pool and its corresponding offset. 357 */ 358 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, 359 unsigned int bufidx, 360 struct ibmvnic_long_term_buff **ltbp, 361 unsigned int *offset) 362 { 363 *ltbp = &rxpool->long_term_buff; 364 *offset = bufidx * rxpool->buff_size; 365 } 366 367 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 368 { 369 int i; 370 371 for (i = 0; i < adapter->num_active_rx_pools; i++) 372 adapter->rx_pool[i].active = 0; 373 } 374 375 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 376 struct ibmvnic_rx_pool *pool) 377 { 378 int count = pool->size - atomic_read(&pool->available); 379 u64 handle = adapter->rx_scrq[pool->index]->handle; 380 struct device *dev = &adapter->vdev->dev; 381 struct ibmvnic_ind_xmit_queue *ind_bufp; 382 struct ibmvnic_sub_crq_queue *rx_scrq; 383 struct ibmvnic_long_term_buff *ltb; 384 union sub_crq *sub_crq; 385 int buffers_added = 0; 386 unsigned long lpar_rc; 387 struct sk_buff *skb; 388 unsigned int offset; 389 dma_addr_t dma_addr; 390 unsigned char *dst; 391 int shift = 0; 392 int bufidx; 393 int i; 394 395 if (!pool->active) 396 return; 397 398 rx_scrq = adapter->rx_scrq[pool->index]; 399 ind_bufp = &rx_scrq->ind_buf; 400 401 /* netdev_skb_alloc() could have failed after we saved a few skbs 402 * in the indir_buf and we would not have sent them to VIOS yet. 403 * To account for them, start the loop at ind_bufp->index rather 404 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will 405 * be 0. 406 */ 407 for (i = ind_bufp->index; i < count; ++i) { 408 bufidx = pool->free_map[pool->next_free]; 409 410 /* We maybe reusing the skb from earlier resets. Allocate 411 * only if necessary. But since the LTB may have changed 412 * during reset (see init_rx_pools()), update LTB below 413 * even if reusing skb. 414 */ 415 skb = pool->rx_buff[bufidx].skb; 416 if (!skb) { 417 skb = netdev_alloc_skb(adapter->netdev, 418 pool->buff_size); 419 if (!skb) { 420 dev_err(dev, "Couldn't replenish rx buff\n"); 421 adapter->replenish_no_mem++; 422 break; 423 } 424 } 425 426 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 427 pool->next_free = (pool->next_free + 1) % pool->size; 428 429 /* Copy the skb to the long term mapped DMA buffer */ 430 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset); 431 dst = ltb->buff + offset; 432 memset(dst, 0, pool->buff_size); 433 dma_addr = ltb->addr + offset; 434 435 /* add the skb to an rx_buff in the pool */ 436 pool->rx_buff[bufidx].data = dst; 437 pool->rx_buff[bufidx].dma = dma_addr; 438 pool->rx_buff[bufidx].skb = skb; 439 pool->rx_buff[bufidx].pool_index = pool->index; 440 pool->rx_buff[bufidx].size = pool->buff_size; 441 442 /* queue the rx_buff for the next send_subcrq_indirect */ 443 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; 444 memset(sub_crq, 0, sizeof(*sub_crq)); 445 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; 446 sub_crq->rx_add.correlator = 447 cpu_to_be64((u64)&pool->rx_buff[bufidx]); 448 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); 449 sub_crq->rx_add.map_id = ltb->map_id; 450 451 /* The length field of the sCRQ is defined to be 24 bits so the 452 * buffer size needs to be left shifted by a byte before it is 453 * converted to big endian to prevent the last byte from being 454 * truncated. 455 */ 456 #ifdef __LITTLE_ENDIAN__ 457 shift = 8; 458 #endif 459 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); 460 461 /* if send_subcrq_indirect queue is full, flush to VIOS */ 462 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || 463 i == count - 1) { 464 lpar_rc = 465 send_subcrq_indirect(adapter, handle, 466 (u64)ind_bufp->indir_dma, 467 (u64)ind_bufp->index); 468 if (lpar_rc != H_SUCCESS) 469 goto failure; 470 buffers_added += ind_bufp->index; 471 adapter->replenish_add_buff_success += ind_bufp->index; 472 ind_bufp->index = 0; 473 } 474 } 475 atomic_add(buffers_added, &pool->available); 476 return; 477 478 failure: 479 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) 480 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); 481 for (i = ind_bufp->index - 1; i >= 0; --i) { 482 struct ibmvnic_rx_buff *rx_buff; 483 484 pool->next_free = pool->next_free == 0 ? 485 pool->size - 1 : pool->next_free - 1; 486 sub_crq = &ind_bufp->indir_arr[i]; 487 rx_buff = (struct ibmvnic_rx_buff *) 488 be64_to_cpu(sub_crq->rx_add.correlator); 489 bufidx = (int)(rx_buff - pool->rx_buff); 490 pool->free_map[pool->next_free] = bufidx; 491 dev_kfree_skb_any(pool->rx_buff[bufidx].skb); 492 pool->rx_buff[bufidx].skb = NULL; 493 } 494 adapter->replenish_add_buff_failure += ind_bufp->index; 495 atomic_add(buffers_added, &pool->available); 496 ind_bufp->index = 0; 497 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 498 /* Disable buffer pool replenishment and report carrier off if 499 * queue is closed or pending failover. 500 * Firmware guarantees that a signal will be sent to the 501 * driver, triggering a reset. 502 */ 503 deactivate_rx_pools(adapter); 504 netif_carrier_off(adapter->netdev); 505 } 506 } 507 508 static void replenish_pools(struct ibmvnic_adapter *adapter) 509 { 510 int i; 511 512 adapter->replenish_task_cycles++; 513 for (i = 0; i < adapter->num_active_rx_pools; i++) { 514 if (adapter->rx_pool[i].active) 515 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 516 } 517 518 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); 519 } 520 521 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 522 { 523 kfree(adapter->tx_stats_buffers); 524 kfree(adapter->rx_stats_buffers); 525 adapter->tx_stats_buffers = NULL; 526 adapter->rx_stats_buffers = NULL; 527 } 528 529 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 530 { 531 adapter->tx_stats_buffers = 532 kcalloc(IBMVNIC_MAX_QUEUES, 533 sizeof(struct ibmvnic_tx_queue_stats), 534 GFP_KERNEL); 535 if (!adapter->tx_stats_buffers) 536 return -ENOMEM; 537 538 adapter->rx_stats_buffers = 539 kcalloc(IBMVNIC_MAX_QUEUES, 540 sizeof(struct ibmvnic_rx_queue_stats), 541 GFP_KERNEL); 542 if (!adapter->rx_stats_buffers) 543 return -ENOMEM; 544 545 return 0; 546 } 547 548 static void release_stats_token(struct ibmvnic_adapter *adapter) 549 { 550 struct device *dev = &adapter->vdev->dev; 551 552 if (!adapter->stats_token) 553 return; 554 555 dma_unmap_single(dev, adapter->stats_token, 556 sizeof(struct ibmvnic_statistics), 557 DMA_FROM_DEVICE); 558 adapter->stats_token = 0; 559 } 560 561 static int init_stats_token(struct ibmvnic_adapter *adapter) 562 { 563 struct device *dev = &adapter->vdev->dev; 564 dma_addr_t stok; 565 int rc; 566 567 stok = dma_map_single(dev, &adapter->stats, 568 sizeof(struct ibmvnic_statistics), 569 DMA_FROM_DEVICE); 570 rc = dma_mapping_error(dev, stok); 571 if (rc) { 572 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc); 573 return rc; 574 } 575 576 adapter->stats_token = stok; 577 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 578 return 0; 579 } 580 581 /** 582 * release_rx_pools() - Release any rx pools attached to @adapter. 583 * @adapter: ibmvnic adapter 584 * 585 * Safe to call this multiple times - even if no pools are attached. 586 */ 587 static void release_rx_pools(struct ibmvnic_adapter *adapter) 588 { 589 struct ibmvnic_rx_pool *rx_pool; 590 int i, j; 591 592 if (!adapter->rx_pool) 593 return; 594 595 for (i = 0; i < adapter->num_active_rx_pools; i++) { 596 rx_pool = &adapter->rx_pool[i]; 597 598 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 599 600 kfree(rx_pool->free_map); 601 602 free_long_term_buff(adapter, &rx_pool->long_term_buff); 603 604 if (!rx_pool->rx_buff) 605 continue; 606 607 for (j = 0; j < rx_pool->size; j++) { 608 if (rx_pool->rx_buff[j].skb) { 609 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); 610 rx_pool->rx_buff[j].skb = NULL; 611 } 612 } 613 614 kfree(rx_pool->rx_buff); 615 } 616 617 kfree(adapter->rx_pool); 618 adapter->rx_pool = NULL; 619 adapter->num_active_rx_pools = 0; 620 adapter->prev_rx_pool_size = 0; 621 } 622 623 /** 624 * reuse_rx_pools() - Check if the existing rx pools can be reused. 625 * @adapter: ibmvnic adapter 626 * 627 * Check if the existing rx pools in the adapter can be reused. The 628 * pools can be reused if the pool parameters (number of pools, 629 * number of buffers in the pool and size of each buffer) have not 630 * changed. 631 * 632 * NOTE: This assumes that all pools have the same number of buffers 633 * which is the case currently. If that changes, we must fix this. 634 * 635 * Return: true if the rx pools can be reused, false otherwise. 636 */ 637 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) 638 { 639 u64 old_num_pools, new_num_pools; 640 u64 old_pool_size, new_pool_size; 641 u64 old_buff_size, new_buff_size; 642 643 if (!adapter->rx_pool) 644 return false; 645 646 old_num_pools = adapter->num_active_rx_pools; 647 new_num_pools = adapter->req_rx_queues; 648 649 old_pool_size = adapter->prev_rx_pool_size; 650 new_pool_size = adapter->req_rx_add_entries_per_subcrq; 651 652 old_buff_size = adapter->prev_rx_buf_sz; 653 new_buff_size = adapter->cur_rx_buf_sz; 654 655 if (old_buff_size != new_buff_size || 656 old_num_pools != new_num_pools || 657 old_pool_size != new_pool_size) 658 return false; 659 660 return true; 661 } 662 663 /** 664 * init_rx_pools(): Initialize the set of receiver pools in the adapter. 665 * @netdev: net device associated with the vnic interface 666 * 667 * Initialize the set of receiver pools in the ibmvnic adapter associated 668 * with the net_device @netdev. If possible, reuse the existing rx pools. 669 * Otherwise free any existing pools and allocate a new set of pools 670 * before initializing them. 671 * 672 * Return: 0 on success and negative value on error. 673 */ 674 static int init_rx_pools(struct net_device *netdev) 675 { 676 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 677 struct device *dev = &adapter->vdev->dev; 678 struct ibmvnic_rx_pool *rx_pool; 679 u64 num_pools; 680 u64 pool_size; /* # of buffers in one pool */ 681 u64 buff_size; 682 int i, j, rc; 683 684 pool_size = adapter->req_rx_add_entries_per_subcrq; 685 num_pools = adapter->req_rx_queues; 686 buff_size = adapter->cur_rx_buf_sz; 687 688 if (reuse_rx_pools(adapter)) { 689 dev_dbg(dev, "Reusing rx pools\n"); 690 goto update_ltb; 691 } 692 693 /* Allocate/populate the pools. */ 694 release_rx_pools(adapter); 695 696 adapter->rx_pool = kcalloc(num_pools, 697 sizeof(struct ibmvnic_rx_pool), 698 GFP_KERNEL); 699 if (!adapter->rx_pool) { 700 dev_err(dev, "Failed to allocate rx pools\n"); 701 return -ENOMEM; 702 } 703 704 /* Set num_active_rx_pools early. If we fail below after partial 705 * allocation, release_rx_pools() will know how many to look for. 706 */ 707 adapter->num_active_rx_pools = num_pools; 708 709 for (i = 0; i < num_pools; i++) { 710 rx_pool = &adapter->rx_pool[i]; 711 712 netdev_dbg(adapter->netdev, 713 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 714 i, pool_size, buff_size); 715 716 rx_pool->size = pool_size; 717 rx_pool->index = i; 718 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 719 720 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 721 GFP_KERNEL); 722 if (!rx_pool->free_map) { 723 dev_err(dev, "Couldn't alloc free_map %d\n", i); 724 rc = -ENOMEM; 725 goto out_release; 726 } 727 728 rx_pool->rx_buff = kcalloc(rx_pool->size, 729 sizeof(struct ibmvnic_rx_buff), 730 GFP_KERNEL); 731 if (!rx_pool->rx_buff) { 732 dev_err(dev, "Couldn't alloc rx buffers\n"); 733 rc = -ENOMEM; 734 goto out_release; 735 } 736 } 737 738 adapter->prev_rx_pool_size = pool_size; 739 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; 740 741 update_ltb: 742 for (i = 0; i < num_pools; i++) { 743 rx_pool = &adapter->rx_pool[i]; 744 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", 745 i, rx_pool->size, rx_pool->buff_size); 746 747 rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 748 rx_pool->size * rx_pool->buff_size); 749 if (rc) 750 goto out; 751 752 for (j = 0; j < rx_pool->size; ++j) { 753 struct ibmvnic_rx_buff *rx_buff; 754 755 rx_pool->free_map[j] = j; 756 757 /* NOTE: Don't clear rx_buff->skb here - will leak 758 * memory! replenish_rx_pool() will reuse skbs or 759 * allocate as necessary. 760 */ 761 rx_buff = &rx_pool->rx_buff[j]; 762 rx_buff->dma = 0; 763 rx_buff->data = 0; 764 rx_buff->size = 0; 765 rx_buff->pool_index = 0; 766 } 767 768 /* Mark pool "empty" so replenish_rx_pools() will 769 * update the LTB info for each buffer 770 */ 771 atomic_set(&rx_pool->available, 0); 772 rx_pool->next_alloc = 0; 773 rx_pool->next_free = 0; 774 /* replenish_rx_pool() may have called deactivate_rx_pools() 775 * on failover. Ensure pool is active now. 776 */ 777 rx_pool->active = 1; 778 } 779 return 0; 780 out_release: 781 release_rx_pools(adapter); 782 out: 783 /* We failed to allocate one or more LTBs or map them on the VIOS. 784 * Hold onto the pools and any LTBs that we did allocate/map. 785 */ 786 return rc; 787 } 788 789 static void release_vpd_data(struct ibmvnic_adapter *adapter) 790 { 791 if (!adapter->vpd) 792 return; 793 794 kfree(adapter->vpd->buff); 795 kfree(adapter->vpd); 796 797 adapter->vpd = NULL; 798 } 799 800 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 801 struct ibmvnic_tx_pool *tx_pool) 802 { 803 kfree(tx_pool->tx_buff); 804 kfree(tx_pool->free_map); 805 free_long_term_buff(adapter, &tx_pool->long_term_buff); 806 } 807 808 /** 809 * release_tx_pools() - Release any tx pools attached to @adapter. 810 * @adapter: ibmvnic adapter 811 * 812 * Safe to call this multiple times - even if no pools are attached. 813 */ 814 static void release_tx_pools(struct ibmvnic_adapter *adapter) 815 { 816 int i; 817 818 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are 819 * both NULL or both non-NULL. So we only need to check one. 820 */ 821 if (!adapter->tx_pool) 822 return; 823 824 for (i = 0; i < adapter->num_active_tx_pools; i++) { 825 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 826 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 827 } 828 829 kfree(adapter->tx_pool); 830 adapter->tx_pool = NULL; 831 kfree(adapter->tso_pool); 832 adapter->tso_pool = NULL; 833 adapter->num_active_tx_pools = 0; 834 adapter->prev_tx_pool_size = 0; 835 } 836 837 static int init_one_tx_pool(struct net_device *netdev, 838 struct ibmvnic_tx_pool *tx_pool, 839 int pool_size, int buf_size) 840 { 841 int i; 842 843 tx_pool->tx_buff = kcalloc(pool_size, 844 sizeof(struct ibmvnic_tx_buff), 845 GFP_KERNEL); 846 if (!tx_pool->tx_buff) 847 return -ENOMEM; 848 849 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); 850 if (!tx_pool->free_map) { 851 kfree(tx_pool->tx_buff); 852 tx_pool->tx_buff = NULL; 853 return -ENOMEM; 854 } 855 856 for (i = 0; i < pool_size; i++) 857 tx_pool->free_map[i] = i; 858 859 tx_pool->consumer_index = 0; 860 tx_pool->producer_index = 0; 861 tx_pool->num_buffers = pool_size; 862 tx_pool->buf_size = buf_size; 863 864 return 0; 865 } 866 867 /** 868 * reuse_tx_pools() - Check if the existing tx pools can be reused. 869 * @adapter: ibmvnic adapter 870 * 871 * Check if the existing tx pools in the adapter can be reused. The 872 * pools can be reused if the pool parameters (number of pools, 873 * number of buffers in the pool and mtu) have not changed. 874 * 875 * NOTE: This assumes that all pools have the same number of buffers 876 * which is the case currently. If that changes, we must fix this. 877 * 878 * Return: true if the tx pools can be reused, false otherwise. 879 */ 880 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) 881 { 882 u64 old_num_pools, new_num_pools; 883 u64 old_pool_size, new_pool_size; 884 u64 old_mtu, new_mtu; 885 886 if (!adapter->tx_pool) 887 return false; 888 889 old_num_pools = adapter->num_active_tx_pools; 890 new_num_pools = adapter->num_active_tx_scrqs; 891 old_pool_size = adapter->prev_tx_pool_size; 892 new_pool_size = adapter->req_tx_entries_per_subcrq; 893 old_mtu = adapter->prev_mtu; 894 new_mtu = adapter->req_mtu; 895 896 if (old_mtu != new_mtu || 897 old_num_pools != new_num_pools || 898 old_pool_size != new_pool_size) 899 return false; 900 901 return true; 902 } 903 904 /** 905 * init_tx_pools(): Initialize the set of transmit pools in the adapter. 906 * @netdev: net device associated with the vnic interface 907 * 908 * Initialize the set of transmit pools in the ibmvnic adapter associated 909 * with the net_device @netdev. If possible, reuse the existing tx pools. 910 * Otherwise free any existing pools and allocate a new set of pools 911 * before initializing them. 912 * 913 * Return: 0 on success and negative value on error. 914 */ 915 static int init_tx_pools(struct net_device *netdev) 916 { 917 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 918 struct device *dev = &adapter->vdev->dev; 919 int num_pools; 920 u64 pool_size; /* # of buffers in pool */ 921 u64 buff_size; 922 int i, j, rc; 923 924 num_pools = adapter->req_tx_queues; 925 926 /* We must notify the VIOS about the LTB on all resets - but we only 927 * need to alloc/populate pools if either the number of buffers or 928 * size of each buffer in the pool has changed. 929 */ 930 if (reuse_tx_pools(adapter)) { 931 netdev_dbg(netdev, "Reusing tx pools\n"); 932 goto update_ltb; 933 } 934 935 /* Allocate/populate the pools. */ 936 release_tx_pools(adapter); 937 938 pool_size = adapter->req_tx_entries_per_subcrq; 939 num_pools = adapter->num_active_tx_scrqs; 940 941 adapter->tx_pool = kcalloc(num_pools, 942 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 943 if (!adapter->tx_pool) 944 return -ENOMEM; 945 946 adapter->tso_pool = kcalloc(num_pools, 947 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 948 /* To simplify release_tx_pools() ensure that ->tx_pool and 949 * ->tso_pool are either both NULL or both non-NULL. 950 */ 951 if (!adapter->tso_pool) { 952 kfree(adapter->tx_pool); 953 adapter->tx_pool = NULL; 954 return -ENOMEM; 955 } 956 957 /* Set num_active_tx_pools early. If we fail below after partial 958 * allocation, release_tx_pools() will know how many to look for. 959 */ 960 adapter->num_active_tx_pools = num_pools; 961 962 buff_size = adapter->req_mtu + VLAN_HLEN; 963 buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 964 965 for (i = 0; i < num_pools; i++) { 966 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", 967 i, adapter->req_tx_entries_per_subcrq, buff_size); 968 969 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 970 pool_size, buff_size); 971 if (rc) 972 goto out_release; 973 974 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], 975 IBMVNIC_TSO_BUFS, 976 IBMVNIC_TSO_BUF_SZ); 977 if (rc) 978 goto out_release; 979 } 980 981 adapter->prev_tx_pool_size = pool_size; 982 adapter->prev_mtu = adapter->req_mtu; 983 984 update_ltb: 985 /* NOTE: All tx_pools have the same number of buffers (which is 986 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS 987 * buffers (see calls init_one_tx_pool() for these). 988 * For consistency, we use tx_pool->num_buffers and 989 * tso_pool->num_buffers below. 990 */ 991 rc = -1; 992 for (i = 0; i < num_pools; i++) { 993 struct ibmvnic_tx_pool *tso_pool; 994 struct ibmvnic_tx_pool *tx_pool; 995 u32 ltb_size; 996 997 tx_pool = &adapter->tx_pool[i]; 998 ltb_size = tx_pool->num_buffers * tx_pool->buf_size; 999 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 1000 ltb_size)) 1001 goto out; 1002 1003 dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n", 1004 i, tx_pool->long_term_buff.buff, 1005 tx_pool->num_buffers, tx_pool->buf_size); 1006 1007 tx_pool->consumer_index = 0; 1008 tx_pool->producer_index = 0; 1009 1010 for (j = 0; j < tx_pool->num_buffers; j++) 1011 tx_pool->free_map[j] = j; 1012 1013 tso_pool = &adapter->tso_pool[i]; 1014 ltb_size = tso_pool->num_buffers * tso_pool->buf_size; 1015 if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff, 1016 ltb_size)) 1017 goto out; 1018 1019 dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n", 1020 i, tso_pool->long_term_buff.buff, 1021 tso_pool->num_buffers, tso_pool->buf_size); 1022 1023 tso_pool->consumer_index = 0; 1024 tso_pool->producer_index = 0; 1025 1026 for (j = 0; j < tso_pool->num_buffers; j++) 1027 tso_pool->free_map[j] = j; 1028 } 1029 1030 return 0; 1031 out_release: 1032 release_tx_pools(adapter); 1033 out: 1034 /* We failed to allocate one or more LTBs or map them on the VIOS. 1035 * Hold onto the pools and any LTBs that we did allocate/map. 1036 */ 1037 return rc; 1038 } 1039 1040 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 1041 { 1042 int i; 1043 1044 if (adapter->napi_enabled) 1045 return; 1046 1047 for (i = 0; i < adapter->req_rx_queues; i++) 1048 napi_enable(&adapter->napi[i]); 1049 1050 adapter->napi_enabled = true; 1051 } 1052 1053 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 1054 { 1055 int i; 1056 1057 if (!adapter->napi_enabled) 1058 return; 1059 1060 for (i = 0; i < adapter->req_rx_queues; i++) { 1061 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 1062 napi_disable(&adapter->napi[i]); 1063 } 1064 1065 adapter->napi_enabled = false; 1066 } 1067 1068 static int init_napi(struct ibmvnic_adapter *adapter) 1069 { 1070 int i; 1071 1072 adapter->napi = kcalloc(adapter->req_rx_queues, 1073 sizeof(struct napi_struct), GFP_KERNEL); 1074 if (!adapter->napi) 1075 return -ENOMEM; 1076 1077 for (i = 0; i < adapter->req_rx_queues; i++) { 1078 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 1079 netif_napi_add(adapter->netdev, &adapter->napi[i], 1080 ibmvnic_poll, NAPI_POLL_WEIGHT); 1081 } 1082 1083 adapter->num_active_rx_napi = adapter->req_rx_queues; 1084 return 0; 1085 } 1086 1087 static void release_napi(struct ibmvnic_adapter *adapter) 1088 { 1089 int i; 1090 1091 if (!adapter->napi) 1092 return; 1093 1094 for (i = 0; i < adapter->num_active_rx_napi; i++) { 1095 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); 1096 netif_napi_del(&adapter->napi[i]); 1097 } 1098 1099 kfree(adapter->napi); 1100 adapter->napi = NULL; 1101 adapter->num_active_rx_napi = 0; 1102 adapter->napi_enabled = false; 1103 } 1104 1105 static const char *adapter_state_to_string(enum vnic_state state) 1106 { 1107 switch (state) { 1108 case VNIC_PROBING: 1109 return "PROBING"; 1110 case VNIC_PROBED: 1111 return "PROBED"; 1112 case VNIC_OPENING: 1113 return "OPENING"; 1114 case VNIC_OPEN: 1115 return "OPEN"; 1116 case VNIC_CLOSING: 1117 return "CLOSING"; 1118 case VNIC_CLOSED: 1119 return "CLOSED"; 1120 case VNIC_REMOVING: 1121 return "REMOVING"; 1122 case VNIC_REMOVED: 1123 return "REMOVED"; 1124 case VNIC_DOWN: 1125 return "DOWN"; 1126 } 1127 return "UNKNOWN"; 1128 } 1129 1130 static int ibmvnic_login(struct net_device *netdev) 1131 { 1132 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1133 unsigned long timeout = msecs_to_jiffies(20000); 1134 int retry_count = 0; 1135 int retries = 10; 1136 bool retry; 1137 int rc; 1138 1139 do { 1140 retry = false; 1141 if (retry_count > retries) { 1142 netdev_warn(netdev, "Login attempts exceeded\n"); 1143 return -EACCES; 1144 } 1145 1146 adapter->init_done_rc = 0; 1147 reinit_completion(&adapter->init_done); 1148 rc = send_login(adapter); 1149 if (rc) 1150 return rc; 1151 1152 if (!wait_for_completion_timeout(&adapter->init_done, 1153 timeout)) { 1154 netdev_warn(netdev, "Login timed out, retrying...\n"); 1155 retry = true; 1156 adapter->init_done_rc = 0; 1157 retry_count++; 1158 continue; 1159 } 1160 1161 if (adapter->init_done_rc == ABORTED) { 1162 netdev_warn(netdev, "Login aborted, retrying...\n"); 1163 retry = true; 1164 adapter->init_done_rc = 0; 1165 retry_count++; 1166 /* FW or device may be busy, so 1167 * wait a bit before retrying login 1168 */ 1169 msleep(500); 1170 } else if (adapter->init_done_rc == PARTIALSUCCESS) { 1171 retry_count++; 1172 release_sub_crqs(adapter, 1); 1173 1174 retry = true; 1175 netdev_dbg(netdev, 1176 "Received partial success, retrying...\n"); 1177 adapter->init_done_rc = 0; 1178 reinit_completion(&adapter->init_done); 1179 send_query_cap(adapter); 1180 if (!wait_for_completion_timeout(&adapter->init_done, 1181 timeout)) { 1182 netdev_warn(netdev, 1183 "Capabilities query timed out\n"); 1184 return -ETIMEDOUT; 1185 } 1186 1187 rc = init_sub_crqs(adapter); 1188 if (rc) { 1189 netdev_warn(netdev, 1190 "SCRQ initialization failed\n"); 1191 return rc; 1192 } 1193 1194 rc = init_sub_crq_irqs(adapter); 1195 if (rc) { 1196 netdev_warn(netdev, 1197 "SCRQ irq initialization failed\n"); 1198 return rc; 1199 } 1200 } else if (adapter->init_done_rc) { 1201 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", 1202 adapter->init_done_rc); 1203 return -EIO; 1204 } 1205 } while (retry); 1206 1207 __ibmvnic_set_mac(netdev, adapter->mac_addr); 1208 1209 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); 1210 return 0; 1211 } 1212 1213 static void release_login_buffer(struct ibmvnic_adapter *adapter) 1214 { 1215 kfree(adapter->login_buf); 1216 adapter->login_buf = NULL; 1217 } 1218 1219 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 1220 { 1221 kfree(adapter->login_rsp_buf); 1222 adapter->login_rsp_buf = NULL; 1223 } 1224 1225 static void release_resources(struct ibmvnic_adapter *adapter) 1226 { 1227 release_vpd_data(adapter); 1228 1229 release_napi(adapter); 1230 release_login_buffer(adapter); 1231 release_login_rsp_buffer(adapter); 1232 } 1233 1234 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 1235 { 1236 struct net_device *netdev = adapter->netdev; 1237 unsigned long timeout = msecs_to_jiffies(20000); 1238 union ibmvnic_crq crq; 1239 bool resend; 1240 int rc; 1241 1242 netdev_dbg(netdev, "setting link state %d\n", link_state); 1243 1244 memset(&crq, 0, sizeof(crq)); 1245 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 1246 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 1247 crq.logical_link_state.link_state = link_state; 1248 1249 do { 1250 resend = false; 1251 1252 reinit_completion(&adapter->init_done); 1253 rc = ibmvnic_send_crq(adapter, &crq); 1254 if (rc) { 1255 netdev_err(netdev, "Failed to set link state\n"); 1256 return rc; 1257 } 1258 1259 if (!wait_for_completion_timeout(&adapter->init_done, 1260 timeout)) { 1261 netdev_err(netdev, "timeout setting link state\n"); 1262 return -ETIMEDOUT; 1263 } 1264 1265 if (adapter->init_done_rc == PARTIALSUCCESS) { 1266 /* Partuial success, delay and re-send */ 1267 mdelay(1000); 1268 resend = true; 1269 } else if (adapter->init_done_rc) { 1270 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 1271 adapter->init_done_rc); 1272 return adapter->init_done_rc; 1273 } 1274 } while (resend); 1275 1276 return 0; 1277 } 1278 1279 static int set_real_num_queues(struct net_device *netdev) 1280 { 1281 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1282 int rc; 1283 1284 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 1285 adapter->req_tx_queues, adapter->req_rx_queues); 1286 1287 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 1288 if (rc) { 1289 netdev_err(netdev, "failed to set the number of tx queues\n"); 1290 return rc; 1291 } 1292 1293 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 1294 if (rc) 1295 netdev_err(netdev, "failed to set the number of rx queues\n"); 1296 1297 return rc; 1298 } 1299 1300 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 1301 { 1302 struct device *dev = &adapter->vdev->dev; 1303 union ibmvnic_crq crq; 1304 int len = 0; 1305 int rc; 1306 1307 if (adapter->vpd->buff) 1308 len = adapter->vpd->len; 1309 1310 mutex_lock(&adapter->fw_lock); 1311 adapter->fw_done_rc = 0; 1312 reinit_completion(&adapter->fw_done); 1313 1314 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 1315 crq.get_vpd_size.cmd = GET_VPD_SIZE; 1316 rc = ibmvnic_send_crq(adapter, &crq); 1317 if (rc) { 1318 mutex_unlock(&adapter->fw_lock); 1319 return rc; 1320 } 1321 1322 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1323 if (rc) { 1324 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); 1325 mutex_unlock(&adapter->fw_lock); 1326 return rc; 1327 } 1328 mutex_unlock(&adapter->fw_lock); 1329 1330 if (!adapter->vpd->len) 1331 return -ENODATA; 1332 1333 if (!adapter->vpd->buff) 1334 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 1335 else if (adapter->vpd->len != len) 1336 adapter->vpd->buff = 1337 krealloc(adapter->vpd->buff, 1338 adapter->vpd->len, GFP_KERNEL); 1339 1340 if (!adapter->vpd->buff) { 1341 dev_err(dev, "Could allocate VPD buffer\n"); 1342 return -ENOMEM; 1343 } 1344 1345 adapter->vpd->dma_addr = 1346 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 1347 DMA_FROM_DEVICE); 1348 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 1349 dev_err(dev, "Could not map VPD buffer\n"); 1350 kfree(adapter->vpd->buff); 1351 adapter->vpd->buff = NULL; 1352 return -ENOMEM; 1353 } 1354 1355 mutex_lock(&adapter->fw_lock); 1356 adapter->fw_done_rc = 0; 1357 reinit_completion(&adapter->fw_done); 1358 1359 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 1360 crq.get_vpd.cmd = GET_VPD; 1361 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 1362 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1363 rc = ibmvnic_send_crq(adapter, &crq); 1364 if (rc) { 1365 kfree(adapter->vpd->buff); 1366 adapter->vpd->buff = NULL; 1367 mutex_unlock(&adapter->fw_lock); 1368 return rc; 1369 } 1370 1371 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1372 if (rc) { 1373 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); 1374 kfree(adapter->vpd->buff); 1375 adapter->vpd->buff = NULL; 1376 mutex_unlock(&adapter->fw_lock); 1377 return rc; 1378 } 1379 1380 mutex_unlock(&adapter->fw_lock); 1381 return 0; 1382 } 1383 1384 static int init_resources(struct ibmvnic_adapter *adapter) 1385 { 1386 struct net_device *netdev = adapter->netdev; 1387 int rc; 1388 1389 rc = set_real_num_queues(netdev); 1390 if (rc) 1391 return rc; 1392 1393 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1394 if (!adapter->vpd) 1395 return -ENOMEM; 1396 1397 /* Vital Product Data (VPD) */ 1398 rc = ibmvnic_get_vpd(adapter); 1399 if (rc) { 1400 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1401 return rc; 1402 } 1403 1404 rc = init_napi(adapter); 1405 if (rc) 1406 return rc; 1407 1408 send_query_map(adapter); 1409 1410 rc = init_rx_pools(netdev); 1411 if (rc) 1412 return rc; 1413 1414 rc = init_tx_pools(netdev); 1415 return rc; 1416 } 1417 1418 static int __ibmvnic_open(struct net_device *netdev) 1419 { 1420 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1421 enum vnic_state prev_state = adapter->state; 1422 int i, rc; 1423 1424 adapter->state = VNIC_OPENING; 1425 replenish_pools(adapter); 1426 ibmvnic_napi_enable(adapter); 1427 1428 /* We're ready to receive frames, enable the sub-crq interrupts and 1429 * set the logical link state to up 1430 */ 1431 for (i = 0; i < adapter->req_rx_queues; i++) { 1432 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1433 if (prev_state == VNIC_CLOSED) 1434 enable_irq(adapter->rx_scrq[i]->irq); 1435 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1436 } 1437 1438 for (i = 0; i < adapter->req_tx_queues; i++) { 1439 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1440 if (prev_state == VNIC_CLOSED) 1441 enable_irq(adapter->tx_scrq[i]->irq); 1442 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1443 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); 1444 } 1445 1446 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1447 if (rc) { 1448 ibmvnic_napi_disable(adapter); 1449 ibmvnic_disable_irqs(adapter); 1450 return rc; 1451 } 1452 1453 adapter->tx_queues_active = true; 1454 1455 /* Since queues were stopped until now, there shouldn't be any 1456 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we 1457 * don't need the synchronize_rcu()? Leaving it for consistency 1458 * with setting ->tx_queues_active = false. 1459 */ 1460 synchronize_rcu(); 1461 1462 netif_tx_start_all_queues(netdev); 1463 1464 if (prev_state == VNIC_CLOSED) { 1465 for (i = 0; i < adapter->req_rx_queues; i++) 1466 napi_schedule(&adapter->napi[i]); 1467 } 1468 1469 adapter->state = VNIC_OPEN; 1470 return rc; 1471 } 1472 1473 static int ibmvnic_open(struct net_device *netdev) 1474 { 1475 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1476 int rc; 1477 1478 ASSERT_RTNL(); 1479 1480 /* If device failover is pending or we are about to reset, just set 1481 * device state and return. Device operation will be handled by reset 1482 * routine. 1483 * 1484 * It should be safe to overwrite the adapter->state here. Since 1485 * we hold the rtnl, either the reset has not actually started or 1486 * the rtnl got dropped during the set_link_state() in do_reset(). 1487 * In the former case, no one else is changing the state (again we 1488 * have the rtnl) and in the latter case, do_reset() will detect and 1489 * honor our setting below. 1490 */ 1491 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { 1492 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n", 1493 adapter_state_to_string(adapter->state), 1494 adapter->failover_pending); 1495 adapter->state = VNIC_OPEN; 1496 rc = 0; 1497 goto out; 1498 } 1499 1500 if (adapter->state != VNIC_CLOSED) { 1501 rc = ibmvnic_login(netdev); 1502 if (rc) 1503 goto out; 1504 1505 rc = init_resources(adapter); 1506 if (rc) { 1507 netdev_err(netdev, "failed to initialize resources\n"); 1508 goto out; 1509 } 1510 } 1511 1512 rc = __ibmvnic_open(netdev); 1513 1514 out: 1515 /* If open failed and there is a pending failover or in-progress reset, 1516 * set device state and return. Device operation will be handled by 1517 * reset routine. See also comments above regarding rtnl. 1518 */ 1519 if (rc && 1520 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { 1521 adapter->state = VNIC_OPEN; 1522 rc = 0; 1523 } 1524 1525 if (rc) { 1526 release_resources(adapter); 1527 release_rx_pools(adapter); 1528 release_tx_pools(adapter); 1529 } 1530 1531 return rc; 1532 } 1533 1534 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1535 { 1536 struct ibmvnic_rx_pool *rx_pool; 1537 struct ibmvnic_rx_buff *rx_buff; 1538 u64 rx_entries; 1539 int rx_scrqs; 1540 int i, j; 1541 1542 if (!adapter->rx_pool) 1543 return; 1544 1545 rx_scrqs = adapter->num_active_rx_pools; 1546 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1547 1548 /* Free any remaining skbs in the rx buffer pools */ 1549 for (i = 0; i < rx_scrqs; i++) { 1550 rx_pool = &adapter->rx_pool[i]; 1551 if (!rx_pool || !rx_pool->rx_buff) 1552 continue; 1553 1554 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1555 for (j = 0; j < rx_entries; j++) { 1556 rx_buff = &rx_pool->rx_buff[j]; 1557 if (rx_buff && rx_buff->skb) { 1558 dev_kfree_skb_any(rx_buff->skb); 1559 rx_buff->skb = NULL; 1560 } 1561 } 1562 } 1563 } 1564 1565 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1566 struct ibmvnic_tx_pool *tx_pool) 1567 { 1568 struct ibmvnic_tx_buff *tx_buff; 1569 u64 tx_entries; 1570 int i; 1571 1572 if (!tx_pool || !tx_pool->tx_buff) 1573 return; 1574 1575 tx_entries = tx_pool->num_buffers; 1576 1577 for (i = 0; i < tx_entries; i++) { 1578 tx_buff = &tx_pool->tx_buff[i]; 1579 if (tx_buff && tx_buff->skb) { 1580 dev_kfree_skb_any(tx_buff->skb); 1581 tx_buff->skb = NULL; 1582 } 1583 } 1584 } 1585 1586 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1587 { 1588 int tx_scrqs; 1589 int i; 1590 1591 if (!adapter->tx_pool || !adapter->tso_pool) 1592 return; 1593 1594 tx_scrqs = adapter->num_active_tx_pools; 1595 1596 /* Free any remaining skbs in the tx buffer pools */ 1597 for (i = 0; i < tx_scrqs; i++) { 1598 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1599 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1600 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1601 } 1602 } 1603 1604 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1605 { 1606 struct net_device *netdev = adapter->netdev; 1607 int i; 1608 1609 if (adapter->tx_scrq) { 1610 for (i = 0; i < adapter->req_tx_queues; i++) 1611 if (adapter->tx_scrq[i]->irq) { 1612 netdev_dbg(netdev, 1613 "Disabling tx_scrq[%d] irq\n", i); 1614 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1615 disable_irq(adapter->tx_scrq[i]->irq); 1616 } 1617 } 1618 1619 if (adapter->rx_scrq) { 1620 for (i = 0; i < adapter->req_rx_queues; i++) { 1621 if (adapter->rx_scrq[i]->irq) { 1622 netdev_dbg(netdev, 1623 "Disabling rx_scrq[%d] irq\n", i); 1624 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1625 disable_irq(adapter->rx_scrq[i]->irq); 1626 } 1627 } 1628 } 1629 } 1630 1631 static void ibmvnic_cleanup(struct net_device *netdev) 1632 { 1633 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1634 1635 /* ensure that transmissions are stopped if called by do_reset */ 1636 1637 adapter->tx_queues_active = false; 1638 1639 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active 1640 * update so they don't restart a queue after we stop it below. 1641 */ 1642 synchronize_rcu(); 1643 1644 if (test_bit(0, &adapter->resetting)) 1645 netif_tx_disable(netdev); 1646 else 1647 netif_tx_stop_all_queues(netdev); 1648 1649 ibmvnic_napi_disable(adapter); 1650 ibmvnic_disable_irqs(adapter); 1651 } 1652 1653 static int __ibmvnic_close(struct net_device *netdev) 1654 { 1655 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1656 int rc = 0; 1657 1658 adapter->state = VNIC_CLOSING; 1659 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1660 adapter->state = VNIC_CLOSED; 1661 return rc; 1662 } 1663 1664 static int ibmvnic_close(struct net_device *netdev) 1665 { 1666 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1667 int rc; 1668 1669 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n", 1670 adapter_state_to_string(adapter->state), 1671 adapter->failover_pending, 1672 adapter->force_reset_recovery); 1673 1674 /* If device failover is pending, just set device state and return. 1675 * Device operation will be handled by reset routine. 1676 */ 1677 if (adapter->failover_pending) { 1678 adapter->state = VNIC_CLOSED; 1679 return 0; 1680 } 1681 1682 rc = __ibmvnic_close(netdev); 1683 ibmvnic_cleanup(netdev); 1684 clean_rx_pools(adapter); 1685 clean_tx_pools(adapter); 1686 1687 return rc; 1688 } 1689 1690 /** 1691 * build_hdr_data - creates L2/L3/L4 header data buffer 1692 * @hdr_field: bitfield determining needed headers 1693 * @skb: socket buffer 1694 * @hdr_len: array of header lengths 1695 * @hdr_data: buffer to write the header to 1696 * 1697 * Reads hdr_field to determine which headers are needed by firmware. 1698 * Builds a buffer containing these headers. Saves individual header 1699 * lengths and total buffer length to be used to build descriptors. 1700 */ 1701 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 1702 int *hdr_len, u8 *hdr_data) 1703 { 1704 int len = 0; 1705 u8 *hdr; 1706 1707 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 1708 hdr_len[0] = sizeof(struct vlan_ethhdr); 1709 else 1710 hdr_len[0] = sizeof(struct ethhdr); 1711 1712 if (skb->protocol == htons(ETH_P_IP)) { 1713 hdr_len[1] = ip_hdr(skb)->ihl * 4; 1714 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1715 hdr_len[2] = tcp_hdrlen(skb); 1716 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1717 hdr_len[2] = sizeof(struct udphdr); 1718 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1719 hdr_len[1] = sizeof(struct ipv6hdr); 1720 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1721 hdr_len[2] = tcp_hdrlen(skb); 1722 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 1723 hdr_len[2] = sizeof(struct udphdr); 1724 } else if (skb->protocol == htons(ETH_P_ARP)) { 1725 hdr_len[1] = arp_hdr_len(skb->dev); 1726 hdr_len[2] = 0; 1727 } 1728 1729 memset(hdr_data, 0, 120); 1730 if ((hdr_field >> 6) & 1) { 1731 hdr = skb_mac_header(skb); 1732 memcpy(hdr_data, hdr, hdr_len[0]); 1733 len += hdr_len[0]; 1734 } 1735 1736 if ((hdr_field >> 5) & 1) { 1737 hdr = skb_network_header(skb); 1738 memcpy(hdr_data + len, hdr, hdr_len[1]); 1739 len += hdr_len[1]; 1740 } 1741 1742 if ((hdr_field >> 4) & 1) { 1743 hdr = skb_transport_header(skb); 1744 memcpy(hdr_data + len, hdr, hdr_len[2]); 1745 len += hdr_len[2]; 1746 } 1747 return len; 1748 } 1749 1750 /** 1751 * create_hdr_descs - create header and header extension descriptors 1752 * @hdr_field: bitfield determining needed headers 1753 * @hdr_data: buffer containing header data 1754 * @len: length of data buffer 1755 * @hdr_len: array of individual header lengths 1756 * @scrq_arr: descriptor array 1757 * 1758 * Creates header and, if needed, header extension descriptors and 1759 * places them in a descriptor array, scrq_arr 1760 */ 1761 1762 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1763 union sub_crq *scrq_arr) 1764 { 1765 union sub_crq hdr_desc; 1766 int tmp_len = len; 1767 int num_descs = 0; 1768 u8 *data, *cur; 1769 int tmp; 1770 1771 while (tmp_len > 0) { 1772 cur = hdr_data + len - tmp_len; 1773 1774 memset(&hdr_desc, 0, sizeof(hdr_desc)); 1775 if (cur != hdr_data) { 1776 data = hdr_desc.hdr_ext.data; 1777 tmp = tmp_len > 29 ? 29 : tmp_len; 1778 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 1779 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 1780 hdr_desc.hdr_ext.len = tmp; 1781 } else { 1782 data = hdr_desc.hdr.data; 1783 tmp = tmp_len > 24 ? 24 : tmp_len; 1784 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 1785 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 1786 hdr_desc.hdr.len = tmp; 1787 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 1788 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 1789 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 1790 hdr_desc.hdr.flag = hdr_field << 1; 1791 } 1792 memcpy(data, cur, tmp); 1793 tmp_len -= tmp; 1794 *scrq_arr = hdr_desc; 1795 scrq_arr++; 1796 num_descs++; 1797 } 1798 1799 return num_descs; 1800 } 1801 1802 /** 1803 * build_hdr_descs_arr - build a header descriptor array 1804 * @skb: tx socket buffer 1805 * @indir_arr: indirect array 1806 * @num_entries: number of descriptors to be sent 1807 * @hdr_field: bit field determining which headers will be sent 1808 * 1809 * This function will build a TX descriptor array with applicable 1810 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 1811 */ 1812 1813 static void build_hdr_descs_arr(struct sk_buff *skb, 1814 union sub_crq *indir_arr, 1815 int *num_entries, u8 hdr_field) 1816 { 1817 int hdr_len[3] = {0, 0, 0}; 1818 u8 hdr_data[140] = {0}; 1819 int tot_len; 1820 1821 tot_len = build_hdr_data(hdr_field, skb, hdr_len, 1822 hdr_data); 1823 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 1824 indir_arr + 1); 1825 } 1826 1827 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 1828 struct net_device *netdev) 1829 { 1830 /* For some backing devices, mishandling of small packets 1831 * can result in a loss of connection or TX stall. Device 1832 * architects recommend that no packet should be smaller 1833 * than the minimum MTU value provided to the driver, so 1834 * pad any packets to that length 1835 */ 1836 if (skb->len < netdev->min_mtu) 1837 return skb_put_padto(skb, netdev->min_mtu); 1838 1839 return 0; 1840 } 1841 1842 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 1843 struct ibmvnic_sub_crq_queue *tx_scrq) 1844 { 1845 struct ibmvnic_ind_xmit_queue *ind_bufp; 1846 struct ibmvnic_tx_buff *tx_buff; 1847 struct ibmvnic_tx_pool *tx_pool; 1848 union sub_crq tx_scrq_entry; 1849 int queue_num; 1850 int entries; 1851 int index; 1852 int i; 1853 1854 ind_bufp = &tx_scrq->ind_buf; 1855 entries = (u64)ind_bufp->index; 1856 queue_num = tx_scrq->pool_index; 1857 1858 for (i = entries - 1; i >= 0; --i) { 1859 tx_scrq_entry = ind_bufp->indir_arr[i]; 1860 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) 1861 continue; 1862 index = be32_to_cpu(tx_scrq_entry.v1.correlator); 1863 if (index & IBMVNIC_TSO_POOL_MASK) { 1864 tx_pool = &adapter->tso_pool[queue_num]; 1865 index &= ~IBMVNIC_TSO_POOL_MASK; 1866 } else { 1867 tx_pool = &adapter->tx_pool[queue_num]; 1868 } 1869 tx_pool->free_map[tx_pool->consumer_index] = index; 1870 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 1871 tx_pool->num_buffers - 1 : 1872 tx_pool->consumer_index - 1; 1873 tx_buff = &tx_pool->tx_buff[index]; 1874 adapter->netdev->stats.tx_packets--; 1875 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; 1876 adapter->tx_stats_buffers[queue_num].packets--; 1877 adapter->tx_stats_buffers[queue_num].bytes -= 1878 tx_buff->skb->len; 1879 dev_kfree_skb_any(tx_buff->skb); 1880 tx_buff->skb = NULL; 1881 adapter->netdev->stats.tx_dropped++; 1882 } 1883 1884 ind_bufp->index = 0; 1885 1886 if (atomic_sub_return(entries, &tx_scrq->used) <= 1887 (adapter->req_tx_entries_per_subcrq / 2) && 1888 __netif_subqueue_stopped(adapter->netdev, queue_num)) { 1889 rcu_read_lock(); 1890 1891 if (adapter->tx_queues_active) { 1892 netif_wake_subqueue(adapter->netdev, queue_num); 1893 netdev_dbg(adapter->netdev, "Started queue %d\n", 1894 queue_num); 1895 } 1896 1897 rcu_read_unlock(); 1898 } 1899 } 1900 1901 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, 1902 struct ibmvnic_sub_crq_queue *tx_scrq) 1903 { 1904 struct ibmvnic_ind_xmit_queue *ind_bufp; 1905 u64 dma_addr; 1906 u64 entries; 1907 u64 handle; 1908 int rc; 1909 1910 ind_bufp = &tx_scrq->ind_buf; 1911 dma_addr = (u64)ind_bufp->indir_dma; 1912 entries = (u64)ind_bufp->index; 1913 handle = tx_scrq->handle; 1914 1915 if (!entries) 1916 return 0; 1917 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); 1918 if (rc) 1919 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); 1920 else 1921 ind_bufp->index = 0; 1922 return 0; 1923 } 1924 1925 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 1926 { 1927 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1928 int queue_num = skb_get_queue_mapping(skb); 1929 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 1930 struct device *dev = &adapter->vdev->dev; 1931 struct ibmvnic_ind_xmit_queue *ind_bufp; 1932 struct ibmvnic_tx_buff *tx_buff = NULL; 1933 struct ibmvnic_sub_crq_queue *tx_scrq; 1934 struct ibmvnic_tx_pool *tx_pool; 1935 unsigned int tx_send_failed = 0; 1936 netdev_tx_t ret = NETDEV_TX_OK; 1937 unsigned int tx_map_failed = 0; 1938 union sub_crq indir_arr[16]; 1939 unsigned int tx_dropped = 0; 1940 unsigned int tx_packets = 0; 1941 unsigned int tx_bytes = 0; 1942 dma_addr_t data_dma_addr; 1943 struct netdev_queue *txq; 1944 unsigned long lpar_rc; 1945 union sub_crq tx_crq; 1946 unsigned int offset; 1947 int num_entries = 1; 1948 unsigned char *dst; 1949 int bufidx = 0; 1950 u8 proto = 0; 1951 1952 /* If a reset is in progress, drop the packet since 1953 * the scrqs may get torn down. Otherwise use the 1954 * rcu to ensure reset waits for us to complete. 1955 */ 1956 rcu_read_lock(); 1957 if (!adapter->tx_queues_active) { 1958 dev_kfree_skb_any(skb); 1959 1960 tx_send_failed++; 1961 tx_dropped++; 1962 ret = NETDEV_TX_OK; 1963 goto out; 1964 } 1965 1966 tx_scrq = adapter->tx_scrq[queue_num]; 1967 txq = netdev_get_tx_queue(netdev, queue_num); 1968 ind_bufp = &tx_scrq->ind_buf; 1969 1970 if (ibmvnic_xmit_workarounds(skb, netdev)) { 1971 tx_dropped++; 1972 tx_send_failed++; 1973 ret = NETDEV_TX_OK; 1974 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1975 goto out; 1976 } 1977 1978 if (skb_is_gso(skb)) 1979 tx_pool = &adapter->tso_pool[queue_num]; 1980 else 1981 tx_pool = &adapter->tx_pool[queue_num]; 1982 1983 bufidx = tx_pool->free_map[tx_pool->consumer_index]; 1984 1985 if (bufidx == IBMVNIC_INVALID_MAP) { 1986 dev_kfree_skb_any(skb); 1987 tx_send_failed++; 1988 tx_dropped++; 1989 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1990 ret = NETDEV_TX_OK; 1991 goto out; 1992 } 1993 1994 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 1995 1996 offset = bufidx * tx_pool->buf_size; 1997 dst = tx_pool->long_term_buff.buff + offset; 1998 memset(dst, 0, tx_pool->buf_size); 1999 data_dma_addr = tx_pool->long_term_buff.addr + offset; 2000 2001 if (skb_shinfo(skb)->nr_frags) { 2002 int cur, i; 2003 2004 /* Copy the head */ 2005 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 2006 cur = skb_headlen(skb); 2007 2008 /* Copy the frags */ 2009 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2010 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2011 2012 memcpy(dst + cur, skb_frag_address(frag), 2013 skb_frag_size(frag)); 2014 cur += skb_frag_size(frag); 2015 } 2016 } else { 2017 skb_copy_from_linear_data(skb, dst, skb->len); 2018 } 2019 2020 /* post changes to long_term_buff *dst before VIOS accessing it */ 2021 dma_wmb(); 2022 2023 tx_pool->consumer_index = 2024 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 2025 2026 tx_buff = &tx_pool->tx_buff[bufidx]; 2027 tx_buff->skb = skb; 2028 tx_buff->index = bufidx; 2029 tx_buff->pool_index = queue_num; 2030 2031 memset(&tx_crq, 0, sizeof(tx_crq)); 2032 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 2033 tx_crq.v1.type = IBMVNIC_TX_DESC; 2034 tx_crq.v1.n_crq_elem = 1; 2035 tx_crq.v1.n_sge = 1; 2036 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 2037 2038 if (skb_is_gso(skb)) 2039 tx_crq.v1.correlator = 2040 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK); 2041 else 2042 tx_crq.v1.correlator = cpu_to_be32(bufidx); 2043 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); 2044 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 2045 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 2046 2047 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { 2048 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 2049 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 2050 } 2051 2052 if (skb->protocol == htons(ETH_P_IP)) { 2053 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 2054 proto = ip_hdr(skb)->protocol; 2055 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2056 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 2057 proto = ipv6_hdr(skb)->nexthdr; 2058 } 2059 2060 if (proto == IPPROTO_TCP) 2061 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 2062 else if (proto == IPPROTO_UDP) 2063 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 2064 2065 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2066 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 2067 hdrs += 2; 2068 } 2069 if (skb_is_gso(skb)) { 2070 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 2071 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 2072 hdrs += 2; 2073 } 2074 2075 if ((*hdrs >> 7) & 1) 2076 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); 2077 2078 tx_crq.v1.n_crq_elem = num_entries; 2079 tx_buff->num_entries = num_entries; 2080 /* flush buffer if current entry can not fit */ 2081 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { 2082 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2083 if (lpar_rc != H_SUCCESS) 2084 goto tx_flush_err; 2085 } 2086 2087 indir_arr[0] = tx_crq; 2088 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], 2089 num_entries * sizeof(struct ibmvnic_generic_scrq)); 2090 ind_bufp->index += num_entries; 2091 if (__netdev_tx_sent_queue(txq, skb->len, 2092 netdev_xmit_more() && 2093 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { 2094 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2095 if (lpar_rc != H_SUCCESS) 2096 goto tx_err; 2097 } 2098 2099 if (atomic_add_return(num_entries, &tx_scrq->used) 2100 >= adapter->req_tx_entries_per_subcrq) { 2101 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 2102 netif_stop_subqueue(netdev, queue_num); 2103 } 2104 2105 tx_packets++; 2106 tx_bytes += skb->len; 2107 txq_trans_cond_update(txq); 2108 ret = NETDEV_TX_OK; 2109 goto out; 2110 2111 tx_flush_err: 2112 dev_kfree_skb_any(skb); 2113 tx_buff->skb = NULL; 2114 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2115 tx_pool->num_buffers - 1 : 2116 tx_pool->consumer_index - 1; 2117 tx_dropped++; 2118 tx_err: 2119 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) 2120 dev_err_ratelimited(dev, "tx: send failed\n"); 2121 2122 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 2123 /* Disable TX and report carrier off if queue is closed 2124 * or pending failover. 2125 * Firmware guarantees that a signal will be sent to the 2126 * driver, triggering a reset or some other action. 2127 */ 2128 netif_tx_stop_all_queues(netdev); 2129 netif_carrier_off(netdev); 2130 } 2131 out: 2132 rcu_read_unlock(); 2133 netdev->stats.tx_dropped += tx_dropped; 2134 netdev->stats.tx_bytes += tx_bytes; 2135 netdev->stats.tx_packets += tx_packets; 2136 adapter->tx_send_failed += tx_send_failed; 2137 adapter->tx_map_failed += tx_map_failed; 2138 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 2139 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 2140 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 2141 2142 return ret; 2143 } 2144 2145 static void ibmvnic_set_multi(struct net_device *netdev) 2146 { 2147 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2148 struct netdev_hw_addr *ha; 2149 union ibmvnic_crq crq; 2150 2151 memset(&crq, 0, sizeof(crq)); 2152 crq.request_capability.first = IBMVNIC_CRQ_CMD; 2153 crq.request_capability.cmd = REQUEST_CAPABILITY; 2154 2155 if (netdev->flags & IFF_PROMISC) { 2156 if (!adapter->promisc_supported) 2157 return; 2158 } else { 2159 if (netdev->flags & IFF_ALLMULTI) { 2160 /* Accept all multicast */ 2161 memset(&crq, 0, sizeof(crq)); 2162 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2163 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2164 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 2165 ibmvnic_send_crq(adapter, &crq); 2166 } else if (netdev_mc_empty(netdev)) { 2167 /* Reject all multicast */ 2168 memset(&crq, 0, sizeof(crq)); 2169 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2170 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2171 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 2172 ibmvnic_send_crq(adapter, &crq); 2173 } else { 2174 /* Accept one or more multicast(s) */ 2175 netdev_for_each_mc_addr(ha, netdev) { 2176 memset(&crq, 0, sizeof(crq)); 2177 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2178 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2179 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 2180 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 2181 ha->addr); 2182 ibmvnic_send_crq(adapter, &crq); 2183 } 2184 } 2185 } 2186 } 2187 2188 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) 2189 { 2190 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2191 union ibmvnic_crq crq; 2192 int rc; 2193 2194 if (!is_valid_ether_addr(dev_addr)) { 2195 rc = -EADDRNOTAVAIL; 2196 goto err; 2197 } 2198 2199 memset(&crq, 0, sizeof(crq)); 2200 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 2201 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 2202 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); 2203 2204 mutex_lock(&adapter->fw_lock); 2205 adapter->fw_done_rc = 0; 2206 reinit_completion(&adapter->fw_done); 2207 2208 rc = ibmvnic_send_crq(adapter, &crq); 2209 if (rc) { 2210 rc = -EIO; 2211 mutex_unlock(&adapter->fw_lock); 2212 goto err; 2213 } 2214 2215 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 2216 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 2217 if (rc || adapter->fw_done_rc) { 2218 rc = -EIO; 2219 mutex_unlock(&adapter->fw_lock); 2220 goto err; 2221 } 2222 mutex_unlock(&adapter->fw_lock); 2223 return 0; 2224 err: 2225 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 2226 return rc; 2227 } 2228 2229 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 2230 { 2231 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2232 struct sockaddr *addr = p; 2233 int rc; 2234 2235 rc = 0; 2236 if (!is_valid_ether_addr(addr->sa_data)) 2237 return -EADDRNOTAVAIL; 2238 2239 ether_addr_copy(adapter->mac_addr, addr->sa_data); 2240 if (adapter->state != VNIC_PROBED) 2241 rc = __ibmvnic_set_mac(netdev, addr->sa_data); 2242 2243 return rc; 2244 } 2245 2246 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) 2247 { 2248 switch (reason) { 2249 case VNIC_RESET_FAILOVER: 2250 return "FAILOVER"; 2251 case VNIC_RESET_MOBILITY: 2252 return "MOBILITY"; 2253 case VNIC_RESET_FATAL: 2254 return "FATAL"; 2255 case VNIC_RESET_NON_FATAL: 2256 return "NON_FATAL"; 2257 case VNIC_RESET_TIMEOUT: 2258 return "TIMEOUT"; 2259 case VNIC_RESET_CHANGE_PARAM: 2260 return "CHANGE_PARAM"; 2261 case VNIC_RESET_PASSIVE_INIT: 2262 return "PASSIVE_INIT"; 2263 } 2264 return "UNKNOWN"; 2265 } 2266 2267 /* 2268 * Initialize the init_done completion and return code values. We 2269 * can get a transport event just after registering the CRQ and the 2270 * tasklet will use this to communicate the transport event. To ensure 2271 * we don't miss the notification/error, initialize these _before_ 2272 * regisering the CRQ. 2273 */ 2274 static inline void reinit_init_done(struct ibmvnic_adapter *adapter) 2275 { 2276 reinit_completion(&adapter->init_done); 2277 adapter->init_done_rc = 0; 2278 } 2279 2280 /* 2281 * do_reset returns zero if we are able to keep processing reset events, or 2282 * non-zero if we hit a fatal error and must halt. 2283 */ 2284 static int do_reset(struct ibmvnic_adapter *adapter, 2285 struct ibmvnic_rwi *rwi, u32 reset_state) 2286 { 2287 struct net_device *netdev = adapter->netdev; 2288 u64 old_num_rx_queues, old_num_tx_queues; 2289 u64 old_num_rx_slots, old_num_tx_slots; 2290 int rc; 2291 2292 netdev_dbg(adapter->netdev, 2293 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", 2294 adapter_state_to_string(adapter->state), 2295 adapter->failover_pending, 2296 reset_reason_to_string(rwi->reset_reason), 2297 adapter_state_to_string(reset_state)); 2298 2299 adapter->reset_reason = rwi->reset_reason; 2300 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ 2301 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2302 rtnl_lock(); 2303 2304 /* Now that we have the rtnl lock, clear any pending failover. 2305 * This will ensure ibmvnic_open() has either completed or will 2306 * block until failover is complete. 2307 */ 2308 if (rwi->reset_reason == VNIC_RESET_FAILOVER) 2309 adapter->failover_pending = false; 2310 2311 /* read the state and check (again) after getting rtnl */ 2312 reset_state = adapter->state; 2313 2314 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2315 rc = -EBUSY; 2316 goto out; 2317 } 2318 2319 netif_carrier_off(netdev); 2320 2321 old_num_rx_queues = adapter->req_rx_queues; 2322 old_num_tx_queues = adapter->req_tx_queues; 2323 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; 2324 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; 2325 2326 ibmvnic_cleanup(netdev); 2327 2328 if (reset_state == VNIC_OPEN && 2329 adapter->reset_reason != VNIC_RESET_MOBILITY && 2330 adapter->reset_reason != VNIC_RESET_FAILOVER) { 2331 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2332 rc = __ibmvnic_close(netdev); 2333 if (rc) 2334 goto out; 2335 } else { 2336 adapter->state = VNIC_CLOSING; 2337 2338 /* Release the RTNL lock before link state change and 2339 * re-acquire after the link state change to allow 2340 * linkwatch_event to grab the RTNL lock and run during 2341 * a reset. 2342 */ 2343 rtnl_unlock(); 2344 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2345 rtnl_lock(); 2346 if (rc) 2347 goto out; 2348 2349 if (adapter->state == VNIC_OPEN) { 2350 /* When we dropped rtnl, ibmvnic_open() got 2351 * it and noticed that we are resetting and 2352 * set the adapter state to OPEN. Update our 2353 * new "target" state, and resume the reset 2354 * from VNIC_CLOSING state. 2355 */ 2356 netdev_dbg(netdev, 2357 "Open changed state from %s, updating.\n", 2358 adapter_state_to_string(reset_state)); 2359 reset_state = VNIC_OPEN; 2360 adapter->state = VNIC_CLOSING; 2361 } 2362 2363 if (adapter->state != VNIC_CLOSING) { 2364 /* If someone else changed the adapter state 2365 * when we dropped the rtnl, fail the reset 2366 */ 2367 rc = -EAGAIN; 2368 goto out; 2369 } 2370 adapter->state = VNIC_CLOSED; 2371 } 2372 } 2373 2374 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2375 release_resources(adapter); 2376 release_sub_crqs(adapter, 1); 2377 release_crq_queue(adapter); 2378 } 2379 2380 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 2381 /* remove the closed state so when we call open it appears 2382 * we are coming from the probed state. 2383 */ 2384 adapter->state = VNIC_PROBED; 2385 2386 reinit_init_done(adapter); 2387 2388 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2389 rc = init_crq_queue(adapter); 2390 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 2391 rc = ibmvnic_reenable_crq_queue(adapter); 2392 release_sub_crqs(adapter, 1); 2393 } else { 2394 rc = ibmvnic_reset_crq(adapter); 2395 if (rc == H_CLOSED || rc == H_SUCCESS) { 2396 rc = vio_enable_interrupts(adapter->vdev); 2397 if (rc) 2398 netdev_err(adapter->netdev, 2399 "Reset failed to enable interrupts. rc=%d\n", 2400 rc); 2401 } 2402 } 2403 2404 if (rc) { 2405 netdev_err(adapter->netdev, 2406 "Reset couldn't initialize crq. rc=%d\n", rc); 2407 goto out; 2408 } 2409 2410 rc = ibmvnic_reset_init(adapter, true); 2411 if (rc) 2412 goto out; 2413 2414 /* If the adapter was in PROBE or DOWN state prior to the reset, 2415 * exit here. 2416 */ 2417 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) { 2418 rc = 0; 2419 goto out; 2420 } 2421 2422 rc = ibmvnic_login(netdev); 2423 if (rc) 2424 goto out; 2425 2426 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2427 rc = init_resources(adapter); 2428 if (rc) 2429 goto out; 2430 } else if (adapter->req_rx_queues != old_num_rx_queues || 2431 adapter->req_tx_queues != old_num_tx_queues || 2432 adapter->req_rx_add_entries_per_subcrq != 2433 old_num_rx_slots || 2434 adapter->req_tx_entries_per_subcrq != 2435 old_num_tx_slots || 2436 !adapter->rx_pool || 2437 !adapter->tso_pool || 2438 !adapter->tx_pool) { 2439 release_napi(adapter); 2440 release_vpd_data(adapter); 2441 2442 rc = init_resources(adapter); 2443 if (rc) 2444 goto out; 2445 2446 } else { 2447 rc = init_tx_pools(netdev); 2448 if (rc) { 2449 netdev_dbg(netdev, 2450 "init tx pools failed (%d)\n", 2451 rc); 2452 goto out; 2453 } 2454 2455 rc = init_rx_pools(netdev); 2456 if (rc) { 2457 netdev_dbg(netdev, 2458 "init rx pools failed (%d)\n", 2459 rc); 2460 goto out; 2461 } 2462 } 2463 ibmvnic_disable_irqs(adapter); 2464 } 2465 adapter->state = VNIC_CLOSED; 2466 2467 if (reset_state == VNIC_CLOSED) { 2468 rc = 0; 2469 goto out; 2470 } 2471 2472 rc = __ibmvnic_open(netdev); 2473 if (rc) { 2474 rc = IBMVNIC_OPEN_FAILED; 2475 goto out; 2476 } 2477 2478 /* refresh device's multicast list */ 2479 ibmvnic_set_multi(netdev); 2480 2481 if (adapter->reset_reason == VNIC_RESET_FAILOVER || 2482 adapter->reset_reason == VNIC_RESET_MOBILITY) 2483 __netdev_notify_peers(netdev); 2484 2485 rc = 0; 2486 2487 out: 2488 /* restore the adapter state if reset failed */ 2489 if (rc) 2490 adapter->state = reset_state; 2491 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */ 2492 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2493 rtnl_unlock(); 2494 2495 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", 2496 adapter_state_to_string(adapter->state), 2497 adapter->failover_pending, rc); 2498 return rc; 2499 } 2500 2501 static int do_hard_reset(struct ibmvnic_adapter *adapter, 2502 struct ibmvnic_rwi *rwi, u32 reset_state) 2503 { 2504 struct net_device *netdev = adapter->netdev; 2505 int rc; 2506 2507 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", 2508 reset_reason_to_string(rwi->reset_reason)); 2509 2510 /* read the state and check (again) after getting rtnl */ 2511 reset_state = adapter->state; 2512 2513 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2514 rc = -EBUSY; 2515 goto out; 2516 } 2517 2518 netif_carrier_off(netdev); 2519 adapter->reset_reason = rwi->reset_reason; 2520 2521 ibmvnic_cleanup(netdev); 2522 release_resources(adapter); 2523 release_sub_crqs(adapter, 0); 2524 release_crq_queue(adapter); 2525 2526 /* remove the closed state so when we call open it appears 2527 * we are coming from the probed state. 2528 */ 2529 adapter->state = VNIC_PROBED; 2530 2531 reinit_init_done(adapter); 2532 2533 rc = init_crq_queue(adapter); 2534 if (rc) { 2535 netdev_err(adapter->netdev, 2536 "Couldn't initialize crq. rc=%d\n", rc); 2537 goto out; 2538 } 2539 2540 rc = ibmvnic_reset_init(adapter, false); 2541 if (rc) 2542 goto out; 2543 2544 /* If the adapter was in PROBE or DOWN state prior to the reset, 2545 * exit here. 2546 */ 2547 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) 2548 goto out; 2549 2550 rc = ibmvnic_login(netdev); 2551 if (rc) 2552 goto out; 2553 2554 rc = init_resources(adapter); 2555 if (rc) 2556 goto out; 2557 2558 ibmvnic_disable_irqs(adapter); 2559 adapter->state = VNIC_CLOSED; 2560 2561 if (reset_state == VNIC_CLOSED) 2562 goto out; 2563 2564 rc = __ibmvnic_open(netdev); 2565 if (rc) { 2566 rc = IBMVNIC_OPEN_FAILED; 2567 goto out; 2568 } 2569 2570 __netdev_notify_peers(netdev); 2571 out: 2572 /* restore adapter state if reset failed */ 2573 if (rc) 2574 adapter->state = reset_state; 2575 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", 2576 adapter_state_to_string(adapter->state), 2577 adapter->failover_pending, rc); 2578 return rc; 2579 } 2580 2581 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 2582 { 2583 struct ibmvnic_rwi *rwi; 2584 unsigned long flags; 2585 2586 spin_lock_irqsave(&adapter->rwi_lock, flags); 2587 2588 if (!list_empty(&adapter->rwi_list)) { 2589 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 2590 list); 2591 list_del(&rwi->list); 2592 } else { 2593 rwi = NULL; 2594 } 2595 2596 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2597 return rwi; 2598 } 2599 2600 /** 2601 * do_passive_init - complete probing when partner device is detected. 2602 * @adapter: ibmvnic_adapter struct 2603 * 2604 * If the ibmvnic device does not have a partner device to communicate with at boot 2605 * and that partner device comes online at a later time, this function is called 2606 * to complete the initialization process of ibmvnic device. 2607 * Caller is expected to hold rtnl_lock(). 2608 * 2609 * Returns non-zero if sub-CRQs are not initialized properly leaving the device 2610 * in the down state. 2611 * Returns 0 upon success and the device is in PROBED state. 2612 */ 2613 2614 static int do_passive_init(struct ibmvnic_adapter *adapter) 2615 { 2616 unsigned long timeout = msecs_to_jiffies(30000); 2617 struct net_device *netdev = adapter->netdev; 2618 struct device *dev = &adapter->vdev->dev; 2619 int rc; 2620 2621 netdev_dbg(netdev, "Partner device found, probing.\n"); 2622 2623 adapter->state = VNIC_PROBING; 2624 reinit_completion(&adapter->init_done); 2625 adapter->init_done_rc = 0; 2626 adapter->crq.active = true; 2627 2628 rc = send_crq_init_complete(adapter); 2629 if (rc) 2630 goto out; 2631 2632 rc = send_version_xchg(adapter); 2633 if (rc) 2634 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); 2635 2636 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 2637 dev_err(dev, "Initialization sequence timed out\n"); 2638 rc = -ETIMEDOUT; 2639 goto out; 2640 } 2641 2642 rc = init_sub_crqs(adapter); 2643 if (rc) { 2644 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc); 2645 goto out; 2646 } 2647 2648 rc = init_sub_crq_irqs(adapter); 2649 if (rc) { 2650 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc); 2651 goto init_failed; 2652 } 2653 2654 netdev->mtu = adapter->req_mtu - ETH_HLEN; 2655 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 2656 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 2657 2658 adapter->state = VNIC_PROBED; 2659 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n"); 2660 2661 return 0; 2662 2663 init_failed: 2664 release_sub_crqs(adapter, 1); 2665 out: 2666 adapter->state = VNIC_DOWN; 2667 return rc; 2668 } 2669 2670 static void __ibmvnic_reset(struct work_struct *work) 2671 { 2672 struct ibmvnic_adapter *adapter; 2673 unsigned int timeout = 5000; 2674 struct ibmvnic_rwi *tmprwi; 2675 bool saved_state = false; 2676 struct ibmvnic_rwi *rwi; 2677 unsigned long flags; 2678 struct device *dev; 2679 bool need_reset; 2680 int num_fails = 0; 2681 u32 reset_state; 2682 int rc = 0; 2683 2684 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 2685 dev = &adapter->vdev->dev; 2686 2687 /* Wait for ibmvnic_probe() to complete. If probe is taking too long 2688 * or if another reset is in progress, defer work for now. If probe 2689 * eventually fails it will flush and terminate our work. 2690 * 2691 * Three possibilities here: 2692 * 1. Adpater being removed - just return 2693 * 2. Timed out on probe or another reset in progress - delay the work 2694 * 3. Completed probe - perform any resets in queue 2695 */ 2696 if (adapter->state == VNIC_PROBING && 2697 !wait_for_completion_timeout(&adapter->probe_done, timeout)) { 2698 dev_err(dev, "Reset thread timed out on probe"); 2699 queue_delayed_work(system_long_wq, 2700 &adapter->ibmvnic_delayed_reset, 2701 IBMVNIC_RESET_DELAY); 2702 return; 2703 } 2704 2705 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */ 2706 if (adapter->state == VNIC_REMOVING) 2707 return; 2708 2709 /* ->rwi_list is stable now (no one else is removing entries) */ 2710 2711 /* ibmvnic_probe() may have purged the reset queue after we were 2712 * scheduled to process a reset so there maybe no resets to process. 2713 * Before setting the ->resetting bit though, we have to make sure 2714 * that there is infact a reset to process. Otherwise we may race 2715 * with ibmvnic_open() and end up leaving the vnic down: 2716 * 2717 * __ibmvnic_reset() ibmvnic_open() 2718 * ----------------- -------------- 2719 * 2720 * set ->resetting bit 2721 * find ->resetting bit is set 2722 * set ->state to IBMVNIC_OPEN (i.e 2723 * assume reset will open device) 2724 * return 2725 * find reset queue empty 2726 * return 2727 * 2728 * Neither performed vnic login/open and vnic stays down 2729 * 2730 * If we hold the lock and conditionally set the bit, either we 2731 * or ibmvnic_open() will complete the open. 2732 */ 2733 need_reset = false; 2734 spin_lock(&adapter->rwi_lock); 2735 if (!list_empty(&adapter->rwi_list)) { 2736 if (test_and_set_bit_lock(0, &adapter->resetting)) { 2737 queue_delayed_work(system_long_wq, 2738 &adapter->ibmvnic_delayed_reset, 2739 IBMVNIC_RESET_DELAY); 2740 } else { 2741 need_reset = true; 2742 } 2743 } 2744 spin_unlock(&adapter->rwi_lock); 2745 2746 if (!need_reset) 2747 return; 2748 2749 rwi = get_next_rwi(adapter); 2750 while (rwi) { 2751 spin_lock_irqsave(&adapter->state_lock, flags); 2752 2753 if (adapter->state == VNIC_REMOVING || 2754 adapter->state == VNIC_REMOVED) { 2755 spin_unlock_irqrestore(&adapter->state_lock, flags); 2756 kfree(rwi); 2757 rc = EBUSY; 2758 break; 2759 } 2760 2761 if (!saved_state) { 2762 reset_state = adapter->state; 2763 saved_state = true; 2764 } 2765 spin_unlock_irqrestore(&adapter->state_lock, flags); 2766 2767 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) { 2768 rtnl_lock(); 2769 rc = do_passive_init(adapter); 2770 rtnl_unlock(); 2771 if (!rc) 2772 netif_carrier_on(adapter->netdev); 2773 } else if (adapter->force_reset_recovery) { 2774 /* Since we are doing a hard reset now, clear the 2775 * failover_pending flag so we don't ignore any 2776 * future MOBILITY or other resets. 2777 */ 2778 adapter->failover_pending = false; 2779 2780 /* Transport event occurred during previous reset */ 2781 if (adapter->wait_for_reset) { 2782 /* Previous was CHANGE_PARAM; caller locked */ 2783 adapter->force_reset_recovery = false; 2784 rc = do_hard_reset(adapter, rwi, reset_state); 2785 } else { 2786 rtnl_lock(); 2787 adapter->force_reset_recovery = false; 2788 rc = do_hard_reset(adapter, rwi, reset_state); 2789 rtnl_unlock(); 2790 } 2791 if (rc) 2792 num_fails++; 2793 else 2794 num_fails = 0; 2795 2796 /* If auto-priority-failover is enabled we can get 2797 * back to back failovers during resets, resulting 2798 * in at least two failed resets (from high-priority 2799 * backing device to low-priority one and then back) 2800 * If resets continue to fail beyond that, give the 2801 * adapter some time to settle down before retrying. 2802 */ 2803 if (num_fails >= 3) { 2804 netdev_dbg(adapter->netdev, 2805 "[S:%s] Hard reset failed %d times, waiting 60 secs\n", 2806 adapter_state_to_string(adapter->state), 2807 num_fails); 2808 set_current_state(TASK_UNINTERRUPTIBLE); 2809 schedule_timeout(60 * HZ); 2810 } 2811 } else { 2812 rc = do_reset(adapter, rwi, reset_state); 2813 } 2814 tmprwi = rwi; 2815 adapter->last_reset_time = jiffies; 2816 2817 if (rc) 2818 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); 2819 2820 rwi = get_next_rwi(adapter); 2821 2822 /* 2823 * If there is another reset queued, free the previous rwi 2824 * and process the new reset even if previous reset failed 2825 * (the previous reset could have failed because of a fail 2826 * over for instance, so process the fail over). 2827 * 2828 * If there are no resets queued and the previous reset failed, 2829 * the adapter would be in an undefined state. So retry the 2830 * previous reset as a hard reset. 2831 */ 2832 if (rwi) 2833 kfree(tmprwi); 2834 else if (rc) 2835 rwi = tmprwi; 2836 2837 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || 2838 rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) 2839 adapter->force_reset_recovery = true; 2840 } 2841 2842 if (adapter->wait_for_reset) { 2843 adapter->reset_done_rc = rc; 2844 complete(&adapter->reset_done); 2845 } 2846 2847 clear_bit_unlock(0, &adapter->resetting); 2848 2849 netdev_dbg(adapter->netdev, 2850 "[S:%s FRR:%d WFR:%d] Done processing resets\n", 2851 adapter_state_to_string(adapter->state), 2852 adapter->force_reset_recovery, 2853 adapter->wait_for_reset); 2854 } 2855 2856 static void __ibmvnic_delayed_reset(struct work_struct *work) 2857 { 2858 struct ibmvnic_adapter *adapter; 2859 2860 adapter = container_of(work, struct ibmvnic_adapter, 2861 ibmvnic_delayed_reset.work); 2862 __ibmvnic_reset(&adapter->ibmvnic_reset); 2863 } 2864 2865 static void flush_reset_queue(struct ibmvnic_adapter *adapter) 2866 { 2867 struct list_head *entry, *tmp_entry; 2868 2869 if (!list_empty(&adapter->rwi_list)) { 2870 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { 2871 list_del(entry); 2872 kfree(list_entry(entry, struct ibmvnic_rwi, list)); 2873 } 2874 } 2875 } 2876 2877 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 2878 enum ibmvnic_reset_reason reason) 2879 { 2880 struct net_device *netdev = adapter->netdev; 2881 struct ibmvnic_rwi *rwi, *tmp; 2882 unsigned long flags; 2883 int ret; 2884 2885 spin_lock_irqsave(&adapter->rwi_lock, flags); 2886 2887 /* If failover is pending don't schedule any other reset. 2888 * Instead let the failover complete. If there is already a 2889 * a failover reset scheduled, we will detect and drop the 2890 * duplicate reset when walking the ->rwi_list below. 2891 */ 2892 if (adapter->state == VNIC_REMOVING || 2893 adapter->state == VNIC_REMOVED || 2894 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { 2895 ret = EBUSY; 2896 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 2897 goto err; 2898 } 2899 2900 list_for_each_entry(tmp, &adapter->rwi_list, list) { 2901 if (tmp->reset_reason == reason) { 2902 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", 2903 reset_reason_to_string(reason)); 2904 ret = EBUSY; 2905 goto err; 2906 } 2907 } 2908 2909 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); 2910 if (!rwi) { 2911 ret = ENOMEM; 2912 goto err; 2913 } 2914 /* if we just received a transport event, 2915 * flush reset queue and process this reset 2916 */ 2917 if (adapter->force_reset_recovery) 2918 flush_reset_queue(adapter); 2919 2920 rwi->reset_reason = reason; 2921 list_add_tail(&rwi->list, &adapter->rwi_list); 2922 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", 2923 reset_reason_to_string(reason)); 2924 queue_work(system_long_wq, &adapter->ibmvnic_reset); 2925 2926 ret = 0; 2927 err: 2928 /* ibmvnic_close() below can block, so drop the lock first */ 2929 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2930 2931 if (ret == ENOMEM) 2932 ibmvnic_close(netdev); 2933 2934 return -ret; 2935 } 2936 2937 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) 2938 { 2939 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2940 2941 if (test_bit(0, &adapter->resetting)) { 2942 netdev_err(adapter->netdev, 2943 "Adapter is resetting, skip timeout reset\n"); 2944 return; 2945 } 2946 /* No queuing up reset until at least 5 seconds (default watchdog val) 2947 * after last reset 2948 */ 2949 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { 2950 netdev_dbg(dev, "Not yet time to tx timeout.\n"); 2951 return; 2952 } 2953 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 2954 } 2955 2956 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 2957 struct ibmvnic_rx_buff *rx_buff) 2958 { 2959 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 2960 2961 rx_buff->skb = NULL; 2962 2963 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 2964 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 2965 2966 atomic_dec(&pool->available); 2967 } 2968 2969 static int ibmvnic_poll(struct napi_struct *napi, int budget) 2970 { 2971 struct ibmvnic_sub_crq_queue *rx_scrq; 2972 struct ibmvnic_adapter *adapter; 2973 struct net_device *netdev; 2974 int frames_processed; 2975 int scrq_num; 2976 2977 netdev = napi->dev; 2978 adapter = netdev_priv(netdev); 2979 scrq_num = (int)(napi - adapter->napi); 2980 frames_processed = 0; 2981 rx_scrq = adapter->rx_scrq[scrq_num]; 2982 2983 restart_poll: 2984 while (frames_processed < budget) { 2985 struct sk_buff *skb; 2986 struct ibmvnic_rx_buff *rx_buff; 2987 union sub_crq *next; 2988 u32 length; 2989 u16 offset; 2990 u8 flags = 0; 2991 2992 if (unlikely(test_bit(0, &adapter->resetting) && 2993 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 2994 enable_scrq_irq(adapter, rx_scrq); 2995 napi_complete_done(napi, frames_processed); 2996 return frames_processed; 2997 } 2998 2999 if (!pending_scrq(adapter, rx_scrq)) 3000 break; 3001 next = ibmvnic_next_scrq(adapter, rx_scrq); 3002 rx_buff = (struct ibmvnic_rx_buff *) 3003 be64_to_cpu(next->rx_comp.correlator); 3004 /* do error checking */ 3005 if (next->rx_comp.rc) { 3006 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 3007 be16_to_cpu(next->rx_comp.rc)); 3008 /* free the entry */ 3009 next->rx_comp.first = 0; 3010 dev_kfree_skb_any(rx_buff->skb); 3011 remove_buff_from_pool(adapter, rx_buff); 3012 continue; 3013 } else if (!rx_buff->skb) { 3014 /* free the entry */ 3015 next->rx_comp.first = 0; 3016 remove_buff_from_pool(adapter, rx_buff); 3017 continue; 3018 } 3019 3020 length = be32_to_cpu(next->rx_comp.len); 3021 offset = be16_to_cpu(next->rx_comp.off_frame_data); 3022 flags = next->rx_comp.flags; 3023 skb = rx_buff->skb; 3024 /* load long_term_buff before copying to skb */ 3025 dma_rmb(); 3026 skb_copy_to_linear_data(skb, rx_buff->data + offset, 3027 length); 3028 3029 /* VLAN Header has been stripped by the system firmware and 3030 * needs to be inserted by the driver 3031 */ 3032 if (adapter->rx_vlan_header_insertion && 3033 (flags & IBMVNIC_VLAN_STRIPPED)) 3034 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3035 ntohs(next->rx_comp.vlan_tci)); 3036 3037 /* free the entry */ 3038 next->rx_comp.first = 0; 3039 remove_buff_from_pool(adapter, rx_buff); 3040 3041 skb_put(skb, length); 3042 skb->protocol = eth_type_trans(skb, netdev); 3043 skb_record_rx_queue(skb, scrq_num); 3044 3045 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 3046 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 3047 skb->ip_summed = CHECKSUM_UNNECESSARY; 3048 } 3049 3050 length = skb->len; 3051 napi_gro_receive(napi, skb); /* send it up */ 3052 netdev->stats.rx_packets++; 3053 netdev->stats.rx_bytes += length; 3054 adapter->rx_stats_buffers[scrq_num].packets++; 3055 adapter->rx_stats_buffers[scrq_num].bytes += length; 3056 frames_processed++; 3057 } 3058 3059 if (adapter->state != VNIC_CLOSING && 3060 ((atomic_read(&adapter->rx_pool[scrq_num].available) < 3061 adapter->req_rx_add_entries_per_subcrq / 2) || 3062 frames_processed < budget)) 3063 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 3064 if (frames_processed < budget) { 3065 if (napi_complete_done(napi, frames_processed)) { 3066 enable_scrq_irq(adapter, rx_scrq); 3067 if (pending_scrq(adapter, rx_scrq)) { 3068 if (napi_reschedule(napi)) { 3069 disable_scrq_irq(adapter, rx_scrq); 3070 goto restart_poll; 3071 } 3072 } 3073 } 3074 } 3075 return frames_processed; 3076 } 3077 3078 static int wait_for_reset(struct ibmvnic_adapter *adapter) 3079 { 3080 int rc, ret; 3081 3082 adapter->fallback.mtu = adapter->req_mtu; 3083 adapter->fallback.rx_queues = adapter->req_rx_queues; 3084 adapter->fallback.tx_queues = adapter->req_tx_queues; 3085 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 3086 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 3087 3088 reinit_completion(&adapter->reset_done); 3089 adapter->wait_for_reset = true; 3090 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3091 3092 if (rc) { 3093 ret = rc; 3094 goto out; 3095 } 3096 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); 3097 if (rc) { 3098 ret = -ENODEV; 3099 goto out; 3100 } 3101 3102 ret = 0; 3103 if (adapter->reset_done_rc) { 3104 ret = -EIO; 3105 adapter->desired.mtu = adapter->fallback.mtu; 3106 adapter->desired.rx_queues = adapter->fallback.rx_queues; 3107 adapter->desired.tx_queues = adapter->fallback.tx_queues; 3108 adapter->desired.rx_entries = adapter->fallback.rx_entries; 3109 adapter->desired.tx_entries = adapter->fallback.tx_entries; 3110 3111 reinit_completion(&adapter->reset_done); 3112 adapter->wait_for_reset = true; 3113 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3114 if (rc) { 3115 ret = rc; 3116 goto out; 3117 } 3118 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 3119 60000); 3120 if (rc) { 3121 ret = -ENODEV; 3122 goto out; 3123 } 3124 } 3125 out: 3126 adapter->wait_for_reset = false; 3127 3128 return ret; 3129 } 3130 3131 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 3132 { 3133 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3134 3135 adapter->desired.mtu = new_mtu + ETH_HLEN; 3136 3137 return wait_for_reset(adapter); 3138 } 3139 3140 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 3141 struct net_device *dev, 3142 netdev_features_t features) 3143 { 3144 /* Some backing hardware adapters can not 3145 * handle packets with a MSS less than 224 3146 * or with only one segment. 3147 */ 3148 if (skb_is_gso(skb)) { 3149 if (skb_shinfo(skb)->gso_size < 224 || 3150 skb_shinfo(skb)->gso_segs == 1) 3151 features &= ~NETIF_F_GSO_MASK; 3152 } 3153 3154 return features; 3155 } 3156 3157 static const struct net_device_ops ibmvnic_netdev_ops = { 3158 .ndo_open = ibmvnic_open, 3159 .ndo_stop = ibmvnic_close, 3160 .ndo_start_xmit = ibmvnic_xmit, 3161 .ndo_set_rx_mode = ibmvnic_set_multi, 3162 .ndo_set_mac_address = ibmvnic_set_mac, 3163 .ndo_validate_addr = eth_validate_addr, 3164 .ndo_tx_timeout = ibmvnic_tx_timeout, 3165 .ndo_change_mtu = ibmvnic_change_mtu, 3166 .ndo_features_check = ibmvnic_features_check, 3167 }; 3168 3169 /* ethtool functions */ 3170 3171 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 3172 struct ethtool_link_ksettings *cmd) 3173 { 3174 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3175 int rc; 3176 3177 rc = send_query_phys_parms(adapter); 3178 if (rc) { 3179 adapter->speed = SPEED_UNKNOWN; 3180 adapter->duplex = DUPLEX_UNKNOWN; 3181 } 3182 cmd->base.speed = adapter->speed; 3183 cmd->base.duplex = adapter->duplex; 3184 cmd->base.port = PORT_FIBRE; 3185 cmd->base.phy_address = 0; 3186 cmd->base.autoneg = AUTONEG_ENABLE; 3187 3188 return 0; 3189 } 3190 3191 static void ibmvnic_get_drvinfo(struct net_device *netdev, 3192 struct ethtool_drvinfo *info) 3193 { 3194 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3195 3196 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 3197 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 3198 strscpy(info->fw_version, adapter->fw_version, 3199 sizeof(info->fw_version)); 3200 } 3201 3202 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 3203 { 3204 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3205 3206 return adapter->msg_enable; 3207 } 3208 3209 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 3210 { 3211 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3212 3213 adapter->msg_enable = data; 3214 } 3215 3216 static u32 ibmvnic_get_link(struct net_device *netdev) 3217 { 3218 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3219 3220 /* Don't need to send a query because we request a logical link up at 3221 * init and then we wait for link state indications 3222 */ 3223 return adapter->logical_link_state; 3224 } 3225 3226 static void ibmvnic_get_ringparam(struct net_device *netdev, 3227 struct ethtool_ringparam *ring, 3228 struct kernel_ethtool_ringparam *kernel_ring, 3229 struct netlink_ext_ack *extack) 3230 { 3231 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3232 3233 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { 3234 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 3235 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 3236 } else { 3237 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ; 3238 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ; 3239 } 3240 ring->rx_mini_max_pending = 0; 3241 ring->rx_jumbo_max_pending = 0; 3242 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 3243 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 3244 ring->rx_mini_pending = 0; 3245 ring->rx_jumbo_pending = 0; 3246 } 3247 3248 static int ibmvnic_set_ringparam(struct net_device *netdev, 3249 struct ethtool_ringparam *ring, 3250 struct kernel_ethtool_ringparam *kernel_ring, 3251 struct netlink_ext_ack *extack) 3252 { 3253 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3254 int ret; 3255 3256 ret = 0; 3257 adapter->desired.rx_entries = ring->rx_pending; 3258 adapter->desired.tx_entries = ring->tx_pending; 3259 3260 ret = wait_for_reset(adapter); 3261 3262 if (!ret && 3263 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || 3264 adapter->req_tx_entries_per_subcrq != ring->tx_pending)) 3265 netdev_info(netdev, 3266 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", 3267 ring->rx_pending, ring->tx_pending, 3268 adapter->req_rx_add_entries_per_subcrq, 3269 adapter->req_tx_entries_per_subcrq); 3270 return ret; 3271 } 3272 3273 static void ibmvnic_get_channels(struct net_device *netdev, 3274 struct ethtool_channels *channels) 3275 { 3276 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3277 3278 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { 3279 channels->max_rx = adapter->max_rx_queues; 3280 channels->max_tx = adapter->max_tx_queues; 3281 } else { 3282 channels->max_rx = IBMVNIC_MAX_QUEUES; 3283 channels->max_tx = IBMVNIC_MAX_QUEUES; 3284 } 3285 3286 channels->max_other = 0; 3287 channels->max_combined = 0; 3288 channels->rx_count = adapter->req_rx_queues; 3289 channels->tx_count = adapter->req_tx_queues; 3290 channels->other_count = 0; 3291 channels->combined_count = 0; 3292 } 3293 3294 static int ibmvnic_set_channels(struct net_device *netdev, 3295 struct ethtool_channels *channels) 3296 { 3297 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3298 int ret; 3299 3300 ret = 0; 3301 adapter->desired.rx_queues = channels->rx_count; 3302 adapter->desired.tx_queues = channels->tx_count; 3303 3304 ret = wait_for_reset(adapter); 3305 3306 if (!ret && 3307 (adapter->req_rx_queues != channels->rx_count || 3308 adapter->req_tx_queues != channels->tx_count)) 3309 netdev_info(netdev, 3310 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", 3311 channels->rx_count, channels->tx_count, 3312 adapter->req_rx_queues, adapter->req_tx_queues); 3313 return ret; 3314 } 3315 3316 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 3317 { 3318 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3319 int i; 3320 3321 switch (stringset) { 3322 case ETH_SS_STATS: 3323 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); 3324 i++, data += ETH_GSTRING_LEN) 3325 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 3326 3327 for (i = 0; i < adapter->req_tx_queues; i++) { 3328 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 3329 data += ETH_GSTRING_LEN; 3330 3331 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 3332 data += ETH_GSTRING_LEN; 3333 3334 snprintf(data, ETH_GSTRING_LEN, 3335 "tx%d_dropped_packets", i); 3336 data += ETH_GSTRING_LEN; 3337 } 3338 3339 for (i = 0; i < adapter->req_rx_queues; i++) { 3340 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 3341 data += ETH_GSTRING_LEN; 3342 3343 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 3344 data += ETH_GSTRING_LEN; 3345 3346 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 3347 data += ETH_GSTRING_LEN; 3348 } 3349 break; 3350 3351 case ETH_SS_PRIV_FLAGS: 3352 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++) 3353 strcpy(data + i * ETH_GSTRING_LEN, 3354 ibmvnic_priv_flags[i]); 3355 break; 3356 default: 3357 return; 3358 } 3359 } 3360 3361 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 3362 { 3363 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3364 3365 switch (sset) { 3366 case ETH_SS_STATS: 3367 return ARRAY_SIZE(ibmvnic_stats) + 3368 adapter->req_tx_queues * NUM_TX_STATS + 3369 adapter->req_rx_queues * NUM_RX_STATS; 3370 case ETH_SS_PRIV_FLAGS: 3371 return ARRAY_SIZE(ibmvnic_priv_flags); 3372 default: 3373 return -EOPNOTSUPP; 3374 } 3375 } 3376 3377 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 3378 struct ethtool_stats *stats, u64 *data) 3379 { 3380 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3381 union ibmvnic_crq crq; 3382 int i, j; 3383 int rc; 3384 3385 memset(&crq, 0, sizeof(crq)); 3386 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 3387 crq.request_statistics.cmd = REQUEST_STATISTICS; 3388 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 3389 crq.request_statistics.len = 3390 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 3391 3392 /* Wait for data to be written */ 3393 reinit_completion(&adapter->stats_done); 3394 rc = ibmvnic_send_crq(adapter, &crq); 3395 if (rc) 3396 return; 3397 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); 3398 if (rc) 3399 return; 3400 3401 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 3402 data[i] = be64_to_cpu(IBMVNIC_GET_STAT 3403 (adapter, ibmvnic_stats[i].offset)); 3404 3405 for (j = 0; j < adapter->req_tx_queues; j++) { 3406 data[i] = adapter->tx_stats_buffers[j].packets; 3407 i++; 3408 data[i] = adapter->tx_stats_buffers[j].bytes; 3409 i++; 3410 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 3411 i++; 3412 } 3413 3414 for (j = 0; j < adapter->req_rx_queues; j++) { 3415 data[i] = adapter->rx_stats_buffers[j].packets; 3416 i++; 3417 data[i] = adapter->rx_stats_buffers[j].bytes; 3418 i++; 3419 data[i] = adapter->rx_stats_buffers[j].interrupts; 3420 i++; 3421 } 3422 } 3423 3424 static u32 ibmvnic_get_priv_flags(struct net_device *netdev) 3425 { 3426 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3427 3428 return adapter->priv_flags; 3429 } 3430 3431 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) 3432 { 3433 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3434 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES); 3435 3436 if (which_maxes) 3437 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; 3438 else 3439 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; 3440 3441 return 0; 3442 } 3443 3444 static const struct ethtool_ops ibmvnic_ethtool_ops = { 3445 .get_drvinfo = ibmvnic_get_drvinfo, 3446 .get_msglevel = ibmvnic_get_msglevel, 3447 .set_msglevel = ibmvnic_set_msglevel, 3448 .get_link = ibmvnic_get_link, 3449 .get_ringparam = ibmvnic_get_ringparam, 3450 .set_ringparam = ibmvnic_set_ringparam, 3451 .get_channels = ibmvnic_get_channels, 3452 .set_channels = ibmvnic_set_channels, 3453 .get_strings = ibmvnic_get_strings, 3454 .get_sset_count = ibmvnic_get_sset_count, 3455 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 3456 .get_link_ksettings = ibmvnic_get_link_ksettings, 3457 .get_priv_flags = ibmvnic_get_priv_flags, 3458 .set_priv_flags = ibmvnic_set_priv_flags, 3459 }; 3460 3461 /* Routines for managing CRQs/sCRQs */ 3462 3463 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 3464 struct ibmvnic_sub_crq_queue *scrq) 3465 { 3466 int rc; 3467 3468 if (!scrq) { 3469 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); 3470 return -EINVAL; 3471 } 3472 3473 if (scrq->irq) { 3474 free_irq(scrq->irq, scrq); 3475 irq_dispose_mapping(scrq->irq); 3476 scrq->irq = 0; 3477 } 3478 3479 if (scrq->msgs) { 3480 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 3481 atomic_set(&scrq->used, 0); 3482 scrq->cur = 0; 3483 scrq->ind_buf.index = 0; 3484 } else { 3485 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); 3486 return -EINVAL; 3487 } 3488 3489 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3490 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3491 return rc; 3492 } 3493 3494 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 3495 { 3496 int i, rc; 3497 3498 if (!adapter->tx_scrq || !adapter->rx_scrq) 3499 return -EINVAL; 3500 3501 for (i = 0; i < adapter->req_tx_queues; i++) { 3502 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 3503 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 3504 if (rc) 3505 return rc; 3506 } 3507 3508 for (i = 0; i < adapter->req_rx_queues; i++) { 3509 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 3510 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 3511 if (rc) 3512 return rc; 3513 } 3514 3515 return rc; 3516 } 3517 3518 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 3519 struct ibmvnic_sub_crq_queue *scrq, 3520 bool do_h_free) 3521 { 3522 struct device *dev = &adapter->vdev->dev; 3523 long rc; 3524 3525 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 3526 3527 if (do_h_free) { 3528 /* Close the sub-crqs */ 3529 do { 3530 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3531 adapter->vdev->unit_address, 3532 scrq->crq_num); 3533 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 3534 3535 if (rc) { 3536 netdev_err(adapter->netdev, 3537 "Failed to release sub-CRQ %16lx, rc = %ld\n", 3538 scrq->crq_num, rc); 3539 } 3540 } 3541 3542 dma_free_coherent(dev, 3543 IBMVNIC_IND_ARR_SZ, 3544 scrq->ind_buf.indir_arr, 3545 scrq->ind_buf.indir_dma); 3546 3547 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3548 DMA_BIDIRECTIONAL); 3549 free_pages((unsigned long)scrq->msgs, 2); 3550 kfree(scrq); 3551 } 3552 3553 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 3554 *adapter) 3555 { 3556 struct device *dev = &adapter->vdev->dev; 3557 struct ibmvnic_sub_crq_queue *scrq; 3558 int rc; 3559 3560 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 3561 if (!scrq) 3562 return NULL; 3563 3564 scrq->msgs = 3565 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 3566 if (!scrq->msgs) { 3567 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 3568 goto zero_page_failed; 3569 } 3570 3571 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 3572 DMA_BIDIRECTIONAL); 3573 if (dma_mapping_error(dev, scrq->msg_token)) { 3574 dev_warn(dev, "Couldn't map crq queue messages page\n"); 3575 goto map_failed; 3576 } 3577 3578 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3579 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3580 3581 if (rc == H_RESOURCE) 3582 rc = ibmvnic_reset_crq(adapter); 3583 3584 if (rc == H_CLOSED) { 3585 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 3586 } else if (rc) { 3587 dev_warn(dev, "Error %d registering sub-crq\n", rc); 3588 goto reg_failed; 3589 } 3590 3591 scrq->adapter = adapter; 3592 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 3593 scrq->ind_buf.index = 0; 3594 3595 scrq->ind_buf.indir_arr = 3596 dma_alloc_coherent(dev, 3597 IBMVNIC_IND_ARR_SZ, 3598 &scrq->ind_buf.indir_dma, 3599 GFP_KERNEL); 3600 3601 if (!scrq->ind_buf.indir_arr) 3602 goto indir_failed; 3603 3604 spin_lock_init(&scrq->lock); 3605 3606 netdev_dbg(adapter->netdev, 3607 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 3608 scrq->crq_num, scrq->hw_irq, scrq->irq); 3609 3610 return scrq; 3611 3612 indir_failed: 3613 do { 3614 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3615 adapter->vdev->unit_address, 3616 scrq->crq_num); 3617 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); 3618 reg_failed: 3619 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3620 DMA_BIDIRECTIONAL); 3621 map_failed: 3622 free_pages((unsigned long)scrq->msgs, 2); 3623 zero_page_failed: 3624 kfree(scrq); 3625 3626 return NULL; 3627 } 3628 3629 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 3630 { 3631 int i; 3632 3633 if (adapter->tx_scrq) { 3634 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 3635 if (!adapter->tx_scrq[i]) 3636 continue; 3637 3638 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 3639 i); 3640 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); 3641 if (adapter->tx_scrq[i]->irq) { 3642 free_irq(adapter->tx_scrq[i]->irq, 3643 adapter->tx_scrq[i]); 3644 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 3645 adapter->tx_scrq[i]->irq = 0; 3646 } 3647 3648 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 3649 do_h_free); 3650 } 3651 3652 kfree(adapter->tx_scrq); 3653 adapter->tx_scrq = NULL; 3654 adapter->num_active_tx_scrqs = 0; 3655 } 3656 3657 if (adapter->rx_scrq) { 3658 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 3659 if (!adapter->rx_scrq[i]) 3660 continue; 3661 3662 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 3663 i); 3664 if (adapter->rx_scrq[i]->irq) { 3665 free_irq(adapter->rx_scrq[i]->irq, 3666 adapter->rx_scrq[i]); 3667 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 3668 adapter->rx_scrq[i]->irq = 0; 3669 } 3670 3671 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 3672 do_h_free); 3673 } 3674 3675 kfree(adapter->rx_scrq); 3676 adapter->rx_scrq = NULL; 3677 adapter->num_active_rx_scrqs = 0; 3678 } 3679 } 3680 3681 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 3682 struct ibmvnic_sub_crq_queue *scrq) 3683 { 3684 struct device *dev = &adapter->vdev->dev; 3685 unsigned long rc; 3686 3687 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3688 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 3689 if (rc) 3690 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 3691 scrq->hw_irq, rc); 3692 return rc; 3693 } 3694 3695 /* We can not use the IRQ chip EOI handler because that has the 3696 * unintended effect of changing the interrupt priority. 3697 */ 3698 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) 3699 { 3700 u64 val = 0xff000000 | scrq->hw_irq; 3701 unsigned long rc; 3702 3703 rc = plpar_hcall_norets(H_EOI, val); 3704 if (rc) 3705 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); 3706 } 3707 3708 /* Due to a firmware bug, the hypervisor can send an interrupt to a 3709 * transmit or receive queue just prior to a partition migration. 3710 * Force an EOI after migration. 3711 */ 3712 static void ibmvnic_clear_pending_interrupt(struct device *dev, 3713 struct ibmvnic_sub_crq_queue *scrq) 3714 { 3715 if (!xive_enabled()) 3716 ibmvnic_xics_eoi(dev, scrq); 3717 } 3718 3719 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 3720 struct ibmvnic_sub_crq_queue *scrq) 3721 { 3722 struct device *dev = &adapter->vdev->dev; 3723 unsigned long rc; 3724 3725 if (scrq->hw_irq > 0x100000000ULL) { 3726 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 3727 return 1; 3728 } 3729 3730 if (test_bit(0, &adapter->resetting) && 3731 adapter->reset_reason == VNIC_RESET_MOBILITY) { 3732 ibmvnic_clear_pending_interrupt(dev, scrq); 3733 } 3734 3735 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3736 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 3737 if (rc) 3738 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 3739 scrq->hw_irq, rc); 3740 return rc; 3741 } 3742 3743 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 3744 struct ibmvnic_sub_crq_queue *scrq) 3745 { 3746 struct device *dev = &adapter->vdev->dev; 3747 struct ibmvnic_tx_pool *tx_pool; 3748 struct ibmvnic_tx_buff *txbuff; 3749 struct netdev_queue *txq; 3750 union sub_crq *next; 3751 int index; 3752 int i; 3753 3754 restart_loop: 3755 while (pending_scrq(adapter, scrq)) { 3756 unsigned int pool = scrq->pool_index; 3757 int num_entries = 0; 3758 int total_bytes = 0; 3759 int num_packets = 0; 3760 3761 next = ibmvnic_next_scrq(adapter, scrq); 3762 for (i = 0; i < next->tx_comp.num_comps; i++) { 3763 index = be32_to_cpu(next->tx_comp.correlators[i]); 3764 if (index & IBMVNIC_TSO_POOL_MASK) { 3765 tx_pool = &adapter->tso_pool[pool]; 3766 index &= ~IBMVNIC_TSO_POOL_MASK; 3767 } else { 3768 tx_pool = &adapter->tx_pool[pool]; 3769 } 3770 3771 txbuff = &tx_pool->tx_buff[index]; 3772 num_packets++; 3773 num_entries += txbuff->num_entries; 3774 if (txbuff->skb) { 3775 total_bytes += txbuff->skb->len; 3776 if (next->tx_comp.rcs[i]) { 3777 dev_err(dev, "tx error %x\n", 3778 next->tx_comp.rcs[i]); 3779 dev_kfree_skb_irq(txbuff->skb); 3780 } else { 3781 dev_consume_skb_irq(txbuff->skb); 3782 } 3783 txbuff->skb = NULL; 3784 } else { 3785 netdev_warn(adapter->netdev, 3786 "TX completion received with NULL socket buffer\n"); 3787 } 3788 tx_pool->free_map[tx_pool->producer_index] = index; 3789 tx_pool->producer_index = 3790 (tx_pool->producer_index + 1) % 3791 tx_pool->num_buffers; 3792 } 3793 /* remove tx_comp scrq*/ 3794 next->tx_comp.first = 0; 3795 3796 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); 3797 netdev_tx_completed_queue(txq, num_packets, total_bytes); 3798 3799 if (atomic_sub_return(num_entries, &scrq->used) <= 3800 (adapter->req_tx_entries_per_subcrq / 2) && 3801 __netif_subqueue_stopped(adapter->netdev, 3802 scrq->pool_index)) { 3803 rcu_read_lock(); 3804 if (adapter->tx_queues_active) { 3805 netif_wake_subqueue(adapter->netdev, 3806 scrq->pool_index); 3807 netdev_dbg(adapter->netdev, 3808 "Started queue %d\n", 3809 scrq->pool_index); 3810 } 3811 rcu_read_unlock(); 3812 } 3813 } 3814 3815 enable_scrq_irq(adapter, scrq); 3816 3817 if (pending_scrq(adapter, scrq)) { 3818 disable_scrq_irq(adapter, scrq); 3819 goto restart_loop; 3820 } 3821 3822 return 0; 3823 } 3824 3825 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 3826 { 3827 struct ibmvnic_sub_crq_queue *scrq = instance; 3828 struct ibmvnic_adapter *adapter = scrq->adapter; 3829 3830 disable_scrq_irq(adapter, scrq); 3831 ibmvnic_complete_tx(adapter, scrq); 3832 3833 return IRQ_HANDLED; 3834 } 3835 3836 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 3837 { 3838 struct ibmvnic_sub_crq_queue *scrq = instance; 3839 struct ibmvnic_adapter *adapter = scrq->adapter; 3840 3841 /* When booting a kdump kernel we can hit pending interrupts 3842 * prior to completing driver initialization. 3843 */ 3844 if (unlikely(adapter->state != VNIC_OPEN)) 3845 return IRQ_NONE; 3846 3847 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 3848 3849 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 3850 disable_scrq_irq(adapter, scrq); 3851 __napi_schedule(&adapter->napi[scrq->scrq_num]); 3852 } 3853 3854 return IRQ_HANDLED; 3855 } 3856 3857 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 3858 { 3859 struct device *dev = &adapter->vdev->dev; 3860 struct ibmvnic_sub_crq_queue *scrq; 3861 int i = 0, j = 0; 3862 int rc = 0; 3863 3864 for (i = 0; i < adapter->req_tx_queues; i++) { 3865 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 3866 i); 3867 scrq = adapter->tx_scrq[i]; 3868 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 3869 3870 if (!scrq->irq) { 3871 rc = -EINVAL; 3872 dev_err(dev, "Error mapping irq\n"); 3873 goto req_tx_irq_failed; 3874 } 3875 3876 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", 3877 adapter->vdev->unit_address, i); 3878 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 3879 0, scrq->name, scrq); 3880 3881 if (rc) { 3882 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 3883 scrq->irq, rc); 3884 irq_dispose_mapping(scrq->irq); 3885 goto req_tx_irq_failed; 3886 } 3887 } 3888 3889 for (i = 0; i < adapter->req_rx_queues; i++) { 3890 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 3891 i); 3892 scrq = adapter->rx_scrq[i]; 3893 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 3894 if (!scrq->irq) { 3895 rc = -EINVAL; 3896 dev_err(dev, "Error mapping irq\n"); 3897 goto req_rx_irq_failed; 3898 } 3899 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", 3900 adapter->vdev->unit_address, i); 3901 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 3902 0, scrq->name, scrq); 3903 if (rc) { 3904 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 3905 scrq->irq, rc); 3906 irq_dispose_mapping(scrq->irq); 3907 goto req_rx_irq_failed; 3908 } 3909 } 3910 return rc; 3911 3912 req_rx_irq_failed: 3913 for (j = 0; j < i; j++) { 3914 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 3915 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 3916 } 3917 i = adapter->req_tx_queues; 3918 req_tx_irq_failed: 3919 for (j = 0; j < i; j++) { 3920 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 3921 irq_dispose_mapping(adapter->tx_scrq[j]->irq); 3922 } 3923 release_sub_crqs(adapter, 1); 3924 return rc; 3925 } 3926 3927 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 3928 { 3929 struct device *dev = &adapter->vdev->dev; 3930 struct ibmvnic_sub_crq_queue **allqueues; 3931 int registered_queues = 0; 3932 int total_queues; 3933 int more = 0; 3934 int i; 3935 3936 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 3937 3938 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 3939 if (!allqueues) 3940 return -ENOMEM; 3941 3942 for (i = 0; i < total_queues; i++) { 3943 allqueues[i] = init_sub_crq_queue(adapter); 3944 if (!allqueues[i]) { 3945 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 3946 break; 3947 } 3948 registered_queues++; 3949 } 3950 3951 /* Make sure we were able to register the minimum number of queues */ 3952 if (registered_queues < 3953 adapter->min_tx_queues + adapter->min_rx_queues) { 3954 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 3955 goto tx_failed; 3956 } 3957 3958 /* Distribute the failed allocated queues*/ 3959 for (i = 0; i < total_queues - registered_queues + more ; i++) { 3960 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 3961 switch (i % 3) { 3962 case 0: 3963 if (adapter->req_rx_queues > adapter->min_rx_queues) 3964 adapter->req_rx_queues--; 3965 else 3966 more++; 3967 break; 3968 case 1: 3969 if (adapter->req_tx_queues > adapter->min_tx_queues) 3970 adapter->req_tx_queues--; 3971 else 3972 more++; 3973 break; 3974 } 3975 } 3976 3977 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 3978 sizeof(*adapter->tx_scrq), GFP_KERNEL); 3979 if (!adapter->tx_scrq) 3980 goto tx_failed; 3981 3982 for (i = 0; i < adapter->req_tx_queues; i++) { 3983 adapter->tx_scrq[i] = allqueues[i]; 3984 adapter->tx_scrq[i]->pool_index = i; 3985 adapter->num_active_tx_scrqs++; 3986 } 3987 3988 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 3989 sizeof(*adapter->rx_scrq), GFP_KERNEL); 3990 if (!adapter->rx_scrq) 3991 goto rx_failed; 3992 3993 for (i = 0; i < adapter->req_rx_queues; i++) { 3994 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 3995 adapter->rx_scrq[i]->scrq_num = i; 3996 adapter->num_active_rx_scrqs++; 3997 } 3998 3999 kfree(allqueues); 4000 return 0; 4001 4002 rx_failed: 4003 kfree(adapter->tx_scrq); 4004 adapter->tx_scrq = NULL; 4005 tx_failed: 4006 for (i = 0; i < registered_queues; i++) 4007 release_sub_crq_queue(adapter, allqueues[i], 1); 4008 kfree(allqueues); 4009 return -ENOMEM; 4010 } 4011 4012 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) 4013 { 4014 struct device *dev = &adapter->vdev->dev; 4015 union ibmvnic_crq crq; 4016 int max_entries; 4017 int cap_reqs; 4018 4019 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on 4020 * the PROMISC flag). Initialize this count upfront. When the tasklet 4021 * receives a response to all of these, it will send the next protocol 4022 * message (QUERY_IP_OFFLOAD). 4023 */ 4024 if (!(adapter->netdev->flags & IFF_PROMISC) || 4025 adapter->promisc_supported) 4026 cap_reqs = 7; 4027 else 4028 cap_reqs = 6; 4029 4030 if (!retry) { 4031 /* Sub-CRQ entries are 32 byte long */ 4032 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 4033 4034 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4035 4036 if (adapter->min_tx_entries_per_subcrq > entries_page || 4037 adapter->min_rx_add_entries_per_subcrq > entries_page) { 4038 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 4039 return; 4040 } 4041 4042 if (adapter->desired.mtu) 4043 adapter->req_mtu = adapter->desired.mtu; 4044 else 4045 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 4046 4047 if (!adapter->desired.tx_entries) 4048 adapter->desired.tx_entries = 4049 adapter->max_tx_entries_per_subcrq; 4050 if (!adapter->desired.rx_entries) 4051 adapter->desired.rx_entries = 4052 adapter->max_rx_add_entries_per_subcrq; 4053 4054 max_entries = IBMVNIC_MAX_LTB_SIZE / 4055 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 4056 4057 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4058 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { 4059 adapter->desired.tx_entries = max_entries; 4060 } 4061 4062 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4063 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { 4064 adapter->desired.rx_entries = max_entries; 4065 } 4066 4067 if (adapter->desired.tx_entries) 4068 adapter->req_tx_entries_per_subcrq = 4069 adapter->desired.tx_entries; 4070 else 4071 adapter->req_tx_entries_per_subcrq = 4072 adapter->max_tx_entries_per_subcrq; 4073 4074 if (adapter->desired.rx_entries) 4075 adapter->req_rx_add_entries_per_subcrq = 4076 adapter->desired.rx_entries; 4077 else 4078 adapter->req_rx_add_entries_per_subcrq = 4079 adapter->max_rx_add_entries_per_subcrq; 4080 4081 if (adapter->desired.tx_queues) 4082 adapter->req_tx_queues = 4083 adapter->desired.tx_queues; 4084 else 4085 adapter->req_tx_queues = 4086 adapter->opt_tx_comp_sub_queues; 4087 4088 if (adapter->desired.rx_queues) 4089 adapter->req_rx_queues = 4090 adapter->desired.rx_queues; 4091 else 4092 adapter->req_rx_queues = 4093 adapter->opt_rx_comp_queues; 4094 4095 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 4096 } else { 4097 atomic_add(cap_reqs, &adapter->running_cap_crqs); 4098 } 4099 memset(&crq, 0, sizeof(crq)); 4100 crq.request_capability.first = IBMVNIC_CRQ_CMD; 4101 crq.request_capability.cmd = REQUEST_CAPABILITY; 4102 4103 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 4104 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 4105 cap_reqs--; 4106 ibmvnic_send_crq(adapter, &crq); 4107 4108 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 4109 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 4110 cap_reqs--; 4111 ibmvnic_send_crq(adapter, &crq); 4112 4113 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 4114 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 4115 cap_reqs--; 4116 ibmvnic_send_crq(adapter, &crq); 4117 4118 crq.request_capability.capability = 4119 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 4120 crq.request_capability.number = 4121 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 4122 cap_reqs--; 4123 ibmvnic_send_crq(adapter, &crq); 4124 4125 crq.request_capability.capability = 4126 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 4127 crq.request_capability.number = 4128 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 4129 cap_reqs--; 4130 ibmvnic_send_crq(adapter, &crq); 4131 4132 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 4133 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 4134 cap_reqs--; 4135 ibmvnic_send_crq(adapter, &crq); 4136 4137 if (adapter->netdev->flags & IFF_PROMISC) { 4138 if (adapter->promisc_supported) { 4139 crq.request_capability.capability = 4140 cpu_to_be16(PROMISC_REQUESTED); 4141 crq.request_capability.number = cpu_to_be64(1); 4142 cap_reqs--; 4143 ibmvnic_send_crq(adapter, &crq); 4144 } 4145 } else { 4146 crq.request_capability.capability = 4147 cpu_to_be16(PROMISC_REQUESTED); 4148 crq.request_capability.number = cpu_to_be64(0); 4149 cap_reqs--; 4150 ibmvnic_send_crq(adapter, &crq); 4151 } 4152 4153 /* Keep at end to catch any discrepancy between expected and actual 4154 * CRQs sent. 4155 */ 4156 WARN_ON(cap_reqs != 0); 4157 } 4158 4159 static int pending_scrq(struct ibmvnic_adapter *adapter, 4160 struct ibmvnic_sub_crq_queue *scrq) 4161 { 4162 union sub_crq *entry = &scrq->msgs[scrq->cur]; 4163 int rc; 4164 4165 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); 4166 4167 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4168 * contents of the SCRQ descriptor 4169 */ 4170 dma_rmb(); 4171 4172 return rc; 4173 } 4174 4175 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 4176 struct ibmvnic_sub_crq_queue *scrq) 4177 { 4178 union sub_crq *entry; 4179 unsigned long flags; 4180 4181 spin_lock_irqsave(&scrq->lock, flags); 4182 entry = &scrq->msgs[scrq->cur]; 4183 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4184 if (++scrq->cur == scrq->size) 4185 scrq->cur = 0; 4186 } else { 4187 entry = NULL; 4188 } 4189 spin_unlock_irqrestore(&scrq->lock, flags); 4190 4191 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4192 * contents of the SCRQ descriptor 4193 */ 4194 dma_rmb(); 4195 4196 return entry; 4197 } 4198 4199 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 4200 { 4201 struct ibmvnic_crq_queue *queue = &adapter->crq; 4202 union ibmvnic_crq *crq; 4203 4204 crq = &queue->msgs[queue->cur]; 4205 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4206 if (++queue->cur == queue->size) 4207 queue->cur = 0; 4208 } else { 4209 crq = NULL; 4210 } 4211 4212 return crq; 4213 } 4214 4215 static void print_subcrq_error(struct device *dev, int rc, const char *func) 4216 { 4217 switch (rc) { 4218 case H_PARAMETER: 4219 dev_warn_ratelimited(dev, 4220 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", 4221 func, rc); 4222 break; 4223 case H_CLOSED: 4224 dev_warn_ratelimited(dev, 4225 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", 4226 func, rc); 4227 break; 4228 default: 4229 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); 4230 break; 4231 } 4232 } 4233 4234 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 4235 u64 remote_handle, u64 ioba, u64 num_entries) 4236 { 4237 unsigned int ua = adapter->vdev->unit_address; 4238 struct device *dev = &adapter->vdev->dev; 4239 int rc; 4240 4241 /* Make sure the hypervisor sees the complete request */ 4242 dma_wmb(); 4243 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 4244 cpu_to_be64(remote_handle), 4245 ioba, num_entries); 4246 4247 if (rc) 4248 print_subcrq_error(dev, rc, __func__); 4249 4250 return rc; 4251 } 4252 4253 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 4254 union ibmvnic_crq *crq) 4255 { 4256 unsigned int ua = adapter->vdev->unit_address; 4257 struct device *dev = &adapter->vdev->dev; 4258 u64 *u64_crq = (u64 *)crq; 4259 int rc; 4260 4261 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 4262 (unsigned long)cpu_to_be64(u64_crq[0]), 4263 (unsigned long)cpu_to_be64(u64_crq[1])); 4264 4265 if (!adapter->crq.active && 4266 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 4267 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 4268 return -EINVAL; 4269 } 4270 4271 /* Make sure the hypervisor sees the complete request */ 4272 dma_wmb(); 4273 4274 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 4275 cpu_to_be64(u64_crq[0]), 4276 cpu_to_be64(u64_crq[1])); 4277 4278 if (rc) { 4279 if (rc == H_CLOSED) { 4280 dev_warn(dev, "CRQ Queue closed\n"); 4281 /* do not reset, report the fail, wait for passive init from server */ 4282 } 4283 4284 dev_warn(dev, "Send error (rc=%d)\n", rc); 4285 } 4286 4287 return rc; 4288 } 4289 4290 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 4291 { 4292 struct device *dev = &adapter->vdev->dev; 4293 union ibmvnic_crq crq; 4294 int retries = 100; 4295 int rc; 4296 4297 memset(&crq, 0, sizeof(crq)); 4298 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 4299 crq.generic.cmd = IBMVNIC_CRQ_INIT; 4300 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 4301 4302 do { 4303 rc = ibmvnic_send_crq(adapter, &crq); 4304 if (rc != H_CLOSED) 4305 break; 4306 retries--; 4307 msleep(50); 4308 4309 } while (retries > 0); 4310 4311 if (rc) { 4312 dev_err(dev, "Failed to send init request, rc = %d\n", rc); 4313 return rc; 4314 } 4315 4316 return 0; 4317 } 4318 4319 struct vnic_login_client_data { 4320 u8 type; 4321 __be16 len; 4322 char name[]; 4323 } __packed; 4324 4325 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 4326 { 4327 int len; 4328 4329 /* Calculate the amount of buffer space needed for the 4330 * vnic client data in the login buffer. There are four entries, 4331 * OS name, LPAR name, device name, and a null last entry. 4332 */ 4333 len = 4 * sizeof(struct vnic_login_client_data); 4334 len += 6; /* "Linux" plus NULL */ 4335 len += strlen(utsname()->nodename) + 1; 4336 len += strlen(adapter->netdev->name) + 1; 4337 4338 return len; 4339 } 4340 4341 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 4342 struct vnic_login_client_data *vlcd) 4343 { 4344 const char *os_name = "Linux"; 4345 int len; 4346 4347 /* Type 1 - LPAR OS */ 4348 vlcd->type = 1; 4349 len = strlen(os_name) + 1; 4350 vlcd->len = cpu_to_be16(len); 4351 strscpy(vlcd->name, os_name, len); 4352 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4353 4354 /* Type 2 - LPAR name */ 4355 vlcd->type = 2; 4356 len = strlen(utsname()->nodename) + 1; 4357 vlcd->len = cpu_to_be16(len); 4358 strscpy(vlcd->name, utsname()->nodename, len); 4359 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4360 4361 /* Type 3 - device name */ 4362 vlcd->type = 3; 4363 len = strlen(adapter->netdev->name) + 1; 4364 vlcd->len = cpu_to_be16(len); 4365 strscpy(vlcd->name, adapter->netdev->name, len); 4366 } 4367 4368 static int send_login(struct ibmvnic_adapter *adapter) 4369 { 4370 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 4371 struct ibmvnic_login_buffer *login_buffer; 4372 struct device *dev = &adapter->vdev->dev; 4373 struct vnic_login_client_data *vlcd; 4374 dma_addr_t rsp_buffer_token; 4375 dma_addr_t buffer_token; 4376 size_t rsp_buffer_size; 4377 union ibmvnic_crq crq; 4378 int client_data_len; 4379 size_t buffer_size; 4380 __be64 *tx_list_p; 4381 __be64 *rx_list_p; 4382 int rc; 4383 int i; 4384 4385 if (!adapter->tx_scrq || !adapter->rx_scrq) { 4386 netdev_err(adapter->netdev, 4387 "RX or TX queues are not allocated, device login failed\n"); 4388 return -ENOMEM; 4389 } 4390 4391 release_login_buffer(adapter); 4392 release_login_rsp_buffer(adapter); 4393 4394 client_data_len = vnic_client_data_len(adapter); 4395 4396 buffer_size = 4397 sizeof(struct ibmvnic_login_buffer) + 4398 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 4399 client_data_len; 4400 4401 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 4402 if (!login_buffer) 4403 goto buf_alloc_failed; 4404 4405 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 4406 DMA_TO_DEVICE); 4407 if (dma_mapping_error(dev, buffer_token)) { 4408 dev_err(dev, "Couldn't map login buffer\n"); 4409 goto buf_map_failed; 4410 } 4411 4412 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 4413 sizeof(u64) * adapter->req_tx_queues + 4414 sizeof(u64) * adapter->req_rx_queues + 4415 sizeof(u64) * adapter->req_rx_queues + 4416 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 4417 4418 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 4419 if (!login_rsp_buffer) 4420 goto buf_rsp_alloc_failed; 4421 4422 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 4423 rsp_buffer_size, DMA_FROM_DEVICE); 4424 if (dma_mapping_error(dev, rsp_buffer_token)) { 4425 dev_err(dev, "Couldn't map login rsp buffer\n"); 4426 goto buf_rsp_map_failed; 4427 } 4428 4429 adapter->login_buf = login_buffer; 4430 adapter->login_buf_token = buffer_token; 4431 adapter->login_buf_sz = buffer_size; 4432 adapter->login_rsp_buf = login_rsp_buffer; 4433 adapter->login_rsp_buf_token = rsp_buffer_token; 4434 adapter->login_rsp_buf_sz = rsp_buffer_size; 4435 4436 login_buffer->len = cpu_to_be32(buffer_size); 4437 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 4438 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 4439 login_buffer->off_txcomp_subcrqs = 4440 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 4441 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 4442 login_buffer->off_rxcomp_subcrqs = 4443 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 4444 sizeof(u64) * adapter->req_tx_queues); 4445 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 4446 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 4447 4448 tx_list_p = (__be64 *)((char *)login_buffer + 4449 sizeof(struct ibmvnic_login_buffer)); 4450 rx_list_p = (__be64 *)((char *)login_buffer + 4451 sizeof(struct ibmvnic_login_buffer) + 4452 sizeof(u64) * adapter->req_tx_queues); 4453 4454 for (i = 0; i < adapter->req_tx_queues; i++) { 4455 if (adapter->tx_scrq[i]) { 4456 tx_list_p[i] = 4457 cpu_to_be64(adapter->tx_scrq[i]->crq_num); 4458 } 4459 } 4460 4461 for (i = 0; i < adapter->req_rx_queues; i++) { 4462 if (adapter->rx_scrq[i]) { 4463 rx_list_p[i] = 4464 cpu_to_be64(adapter->rx_scrq[i]->crq_num); 4465 } 4466 } 4467 4468 /* Insert vNIC login client data */ 4469 vlcd = (struct vnic_login_client_data *) 4470 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 4471 login_buffer->client_data_offset = 4472 cpu_to_be32((char *)vlcd - (char *)login_buffer); 4473 login_buffer->client_data_len = cpu_to_be32(client_data_len); 4474 4475 vnic_add_client_data(adapter, vlcd); 4476 4477 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 4478 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 4479 netdev_dbg(adapter->netdev, "%016lx\n", 4480 ((unsigned long *)(adapter->login_buf))[i]); 4481 } 4482 4483 memset(&crq, 0, sizeof(crq)); 4484 crq.login.first = IBMVNIC_CRQ_CMD; 4485 crq.login.cmd = LOGIN; 4486 crq.login.ioba = cpu_to_be32(buffer_token); 4487 crq.login.len = cpu_to_be32(buffer_size); 4488 4489 adapter->login_pending = true; 4490 rc = ibmvnic_send_crq(adapter, &crq); 4491 if (rc) { 4492 adapter->login_pending = false; 4493 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); 4494 goto buf_rsp_map_failed; 4495 } 4496 4497 return 0; 4498 4499 buf_rsp_map_failed: 4500 kfree(login_rsp_buffer); 4501 adapter->login_rsp_buf = NULL; 4502 buf_rsp_alloc_failed: 4503 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 4504 buf_map_failed: 4505 kfree(login_buffer); 4506 adapter->login_buf = NULL; 4507 buf_alloc_failed: 4508 return -ENOMEM; 4509 } 4510 4511 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 4512 u32 len, u8 map_id) 4513 { 4514 union ibmvnic_crq crq; 4515 4516 memset(&crq, 0, sizeof(crq)); 4517 crq.request_map.first = IBMVNIC_CRQ_CMD; 4518 crq.request_map.cmd = REQUEST_MAP; 4519 crq.request_map.map_id = map_id; 4520 crq.request_map.ioba = cpu_to_be32(addr); 4521 crq.request_map.len = cpu_to_be32(len); 4522 return ibmvnic_send_crq(adapter, &crq); 4523 } 4524 4525 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 4526 { 4527 union ibmvnic_crq crq; 4528 4529 memset(&crq, 0, sizeof(crq)); 4530 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 4531 crq.request_unmap.cmd = REQUEST_UNMAP; 4532 crq.request_unmap.map_id = map_id; 4533 return ibmvnic_send_crq(adapter, &crq); 4534 } 4535 4536 static void send_query_map(struct ibmvnic_adapter *adapter) 4537 { 4538 union ibmvnic_crq crq; 4539 4540 memset(&crq, 0, sizeof(crq)); 4541 crq.query_map.first = IBMVNIC_CRQ_CMD; 4542 crq.query_map.cmd = QUERY_MAP; 4543 ibmvnic_send_crq(adapter, &crq); 4544 } 4545 4546 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 4547 static void send_query_cap(struct ibmvnic_adapter *adapter) 4548 { 4549 union ibmvnic_crq crq; 4550 int cap_reqs; 4551 4552 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count 4553 * upfront. When the tasklet receives a response to all of these, it 4554 * can send out the next protocol messaage (REQUEST_CAPABILITY). 4555 */ 4556 cap_reqs = 25; 4557 4558 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4559 4560 memset(&crq, 0, sizeof(crq)); 4561 crq.query_capability.first = IBMVNIC_CRQ_CMD; 4562 crq.query_capability.cmd = QUERY_CAPABILITY; 4563 4564 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 4565 ibmvnic_send_crq(adapter, &crq); 4566 cap_reqs--; 4567 4568 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 4569 ibmvnic_send_crq(adapter, &crq); 4570 cap_reqs--; 4571 4572 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 4573 ibmvnic_send_crq(adapter, &crq); 4574 cap_reqs--; 4575 4576 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 4577 ibmvnic_send_crq(adapter, &crq); 4578 cap_reqs--; 4579 4580 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 4581 ibmvnic_send_crq(adapter, &crq); 4582 cap_reqs--; 4583 4584 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 4585 ibmvnic_send_crq(adapter, &crq); 4586 cap_reqs--; 4587 4588 crq.query_capability.capability = 4589 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 4590 ibmvnic_send_crq(adapter, &crq); 4591 cap_reqs--; 4592 4593 crq.query_capability.capability = 4594 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 4595 ibmvnic_send_crq(adapter, &crq); 4596 cap_reqs--; 4597 4598 crq.query_capability.capability = 4599 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 4600 ibmvnic_send_crq(adapter, &crq); 4601 cap_reqs--; 4602 4603 crq.query_capability.capability = 4604 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 4605 ibmvnic_send_crq(adapter, &crq); 4606 cap_reqs--; 4607 4608 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 4609 ibmvnic_send_crq(adapter, &crq); 4610 cap_reqs--; 4611 4612 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 4613 ibmvnic_send_crq(adapter, &crq); 4614 cap_reqs--; 4615 4616 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 4617 ibmvnic_send_crq(adapter, &crq); 4618 cap_reqs--; 4619 4620 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 4621 ibmvnic_send_crq(adapter, &crq); 4622 cap_reqs--; 4623 4624 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 4625 ibmvnic_send_crq(adapter, &crq); 4626 cap_reqs--; 4627 4628 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 4629 ibmvnic_send_crq(adapter, &crq); 4630 cap_reqs--; 4631 4632 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 4633 ibmvnic_send_crq(adapter, &crq); 4634 cap_reqs--; 4635 4636 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 4637 ibmvnic_send_crq(adapter, &crq); 4638 cap_reqs--; 4639 4640 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 4641 ibmvnic_send_crq(adapter, &crq); 4642 cap_reqs--; 4643 4644 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 4645 ibmvnic_send_crq(adapter, &crq); 4646 cap_reqs--; 4647 4648 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 4649 ibmvnic_send_crq(adapter, &crq); 4650 cap_reqs--; 4651 4652 crq.query_capability.capability = 4653 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 4654 ibmvnic_send_crq(adapter, &crq); 4655 cap_reqs--; 4656 4657 crq.query_capability.capability = 4658 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 4659 ibmvnic_send_crq(adapter, &crq); 4660 cap_reqs--; 4661 4662 crq.query_capability.capability = 4663 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 4664 ibmvnic_send_crq(adapter, &crq); 4665 cap_reqs--; 4666 4667 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 4668 4669 ibmvnic_send_crq(adapter, &crq); 4670 cap_reqs--; 4671 4672 /* Keep at end to catch any discrepancy between expected and actual 4673 * CRQs sent. 4674 */ 4675 WARN_ON(cap_reqs != 0); 4676 } 4677 4678 static void send_query_ip_offload(struct ibmvnic_adapter *adapter) 4679 { 4680 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 4681 struct device *dev = &adapter->vdev->dev; 4682 union ibmvnic_crq crq; 4683 4684 adapter->ip_offload_tok = 4685 dma_map_single(dev, 4686 &adapter->ip_offload_buf, 4687 buf_sz, 4688 DMA_FROM_DEVICE); 4689 4690 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 4691 if (!firmware_has_feature(FW_FEATURE_CMO)) 4692 dev_err(dev, "Couldn't map offload buffer\n"); 4693 return; 4694 } 4695 4696 memset(&crq, 0, sizeof(crq)); 4697 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 4698 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 4699 crq.query_ip_offload.len = cpu_to_be32(buf_sz); 4700 crq.query_ip_offload.ioba = 4701 cpu_to_be32(adapter->ip_offload_tok); 4702 4703 ibmvnic_send_crq(adapter, &crq); 4704 } 4705 4706 static void send_control_ip_offload(struct ibmvnic_adapter *adapter) 4707 { 4708 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; 4709 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 4710 struct device *dev = &adapter->vdev->dev; 4711 netdev_features_t old_hw_features = 0; 4712 union ibmvnic_crq crq; 4713 4714 adapter->ip_offload_ctrl_tok = 4715 dma_map_single(dev, 4716 ctrl_buf, 4717 sizeof(adapter->ip_offload_ctrl), 4718 DMA_TO_DEVICE); 4719 4720 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 4721 dev_err(dev, "Couldn't map ip offload control buffer\n"); 4722 return; 4723 } 4724 4725 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 4726 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); 4727 ctrl_buf->ipv4_chksum = buf->ipv4_chksum; 4728 ctrl_buf->ipv6_chksum = buf->ipv6_chksum; 4729 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 4730 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; 4731 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 4732 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; 4733 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; 4734 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; 4735 4736 /* large_rx disabled for now, additional features needed */ 4737 ctrl_buf->large_rx_ipv4 = 0; 4738 ctrl_buf->large_rx_ipv6 = 0; 4739 4740 if (adapter->state != VNIC_PROBING) { 4741 old_hw_features = adapter->netdev->hw_features; 4742 adapter->netdev->hw_features = 0; 4743 } 4744 4745 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 4746 4747 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 4748 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; 4749 4750 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 4751 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; 4752 4753 if ((adapter->netdev->features & 4754 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 4755 adapter->netdev->hw_features |= NETIF_F_RXCSUM; 4756 4757 if (buf->large_tx_ipv4) 4758 adapter->netdev->hw_features |= NETIF_F_TSO; 4759 if (buf->large_tx_ipv6) 4760 adapter->netdev->hw_features |= NETIF_F_TSO6; 4761 4762 if (adapter->state == VNIC_PROBING) { 4763 adapter->netdev->features |= adapter->netdev->hw_features; 4764 } else if (old_hw_features != adapter->netdev->hw_features) { 4765 netdev_features_t tmp = 0; 4766 4767 /* disable features no longer supported */ 4768 adapter->netdev->features &= adapter->netdev->hw_features; 4769 /* turn on features now supported if previously enabled */ 4770 tmp = (old_hw_features ^ adapter->netdev->hw_features) & 4771 adapter->netdev->hw_features; 4772 adapter->netdev->features |= 4773 tmp & adapter->netdev->wanted_features; 4774 } 4775 4776 memset(&crq, 0, sizeof(crq)); 4777 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 4778 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 4779 crq.control_ip_offload.len = 4780 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 4781 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 4782 ibmvnic_send_crq(adapter, &crq); 4783 } 4784 4785 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 4786 struct ibmvnic_adapter *adapter) 4787 { 4788 struct device *dev = &adapter->vdev->dev; 4789 4790 if (crq->get_vpd_size_rsp.rc.code) { 4791 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 4792 crq->get_vpd_size_rsp.rc.code); 4793 complete(&adapter->fw_done); 4794 return; 4795 } 4796 4797 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 4798 complete(&adapter->fw_done); 4799 } 4800 4801 static void handle_vpd_rsp(union ibmvnic_crq *crq, 4802 struct ibmvnic_adapter *adapter) 4803 { 4804 struct device *dev = &adapter->vdev->dev; 4805 unsigned char *substr = NULL; 4806 u8 fw_level_len = 0; 4807 4808 memset(adapter->fw_version, 0, 32); 4809 4810 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 4811 DMA_FROM_DEVICE); 4812 4813 if (crq->get_vpd_rsp.rc.code) { 4814 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 4815 crq->get_vpd_rsp.rc.code); 4816 goto complete; 4817 } 4818 4819 /* get the position of the firmware version info 4820 * located after the ASCII 'RM' substring in the buffer 4821 */ 4822 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 4823 if (!substr) { 4824 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 4825 goto complete; 4826 } 4827 4828 /* get length of firmware level ASCII substring */ 4829 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 4830 fw_level_len = *(substr + 2); 4831 } else { 4832 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 4833 goto complete; 4834 } 4835 4836 /* copy firmware version string from vpd into adapter */ 4837 if ((substr + 3 + fw_level_len) < 4838 (adapter->vpd->buff + adapter->vpd->len)) { 4839 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 4840 } else { 4841 dev_info(dev, "FW substr extrapolated VPD buff\n"); 4842 } 4843 4844 complete: 4845 if (adapter->fw_version[0] == '\0') 4846 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); 4847 complete(&adapter->fw_done); 4848 } 4849 4850 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 4851 { 4852 struct device *dev = &adapter->vdev->dev; 4853 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 4854 int i; 4855 4856 dma_unmap_single(dev, adapter->ip_offload_tok, 4857 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 4858 4859 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 4860 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 4861 netdev_dbg(adapter->netdev, "%016lx\n", 4862 ((unsigned long *)(buf))[i]); 4863 4864 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 4865 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 4866 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 4867 buf->tcp_ipv4_chksum); 4868 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 4869 buf->tcp_ipv6_chksum); 4870 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 4871 buf->udp_ipv4_chksum); 4872 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 4873 buf->udp_ipv6_chksum); 4874 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 4875 buf->large_tx_ipv4); 4876 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 4877 buf->large_tx_ipv6); 4878 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 4879 buf->large_rx_ipv4); 4880 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 4881 buf->large_rx_ipv6); 4882 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 4883 buf->max_ipv4_header_size); 4884 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 4885 buf->max_ipv6_header_size); 4886 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 4887 buf->max_tcp_header_size); 4888 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 4889 buf->max_udp_header_size); 4890 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 4891 buf->max_large_tx_size); 4892 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 4893 buf->max_large_rx_size); 4894 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 4895 buf->ipv6_extension_header); 4896 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 4897 buf->tcp_pseudosum_req); 4898 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 4899 buf->num_ipv6_ext_headers); 4900 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 4901 buf->off_ipv6_ext_headers); 4902 4903 send_control_ip_offload(adapter); 4904 } 4905 4906 static const char *ibmvnic_fw_err_cause(u16 cause) 4907 { 4908 switch (cause) { 4909 case ADAPTER_PROBLEM: 4910 return "adapter problem"; 4911 case BUS_PROBLEM: 4912 return "bus problem"; 4913 case FW_PROBLEM: 4914 return "firmware problem"; 4915 case DD_PROBLEM: 4916 return "device driver problem"; 4917 case EEH_RECOVERY: 4918 return "EEH recovery"; 4919 case FW_UPDATED: 4920 return "firmware updated"; 4921 case LOW_MEMORY: 4922 return "low Memory"; 4923 default: 4924 return "unknown"; 4925 } 4926 } 4927 4928 static void handle_error_indication(union ibmvnic_crq *crq, 4929 struct ibmvnic_adapter *adapter) 4930 { 4931 struct device *dev = &adapter->vdev->dev; 4932 u16 cause; 4933 4934 cause = be16_to_cpu(crq->error_indication.error_cause); 4935 4936 dev_warn_ratelimited(dev, 4937 "Firmware reports %serror, cause: %s. Starting recovery...\n", 4938 crq->error_indication.flags 4939 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 4940 ibmvnic_fw_err_cause(cause)); 4941 4942 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 4943 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4944 else 4945 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 4946 } 4947 4948 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 4949 struct ibmvnic_adapter *adapter) 4950 { 4951 struct net_device *netdev = adapter->netdev; 4952 struct device *dev = &adapter->vdev->dev; 4953 long rc; 4954 4955 rc = crq->change_mac_addr_rsp.rc.code; 4956 if (rc) { 4957 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 4958 goto out; 4959 } 4960 /* crq->change_mac_addr.mac_addr is the requested one 4961 * crq->change_mac_addr_rsp.mac_addr is the returned valid one. 4962 */ 4963 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); 4964 ether_addr_copy(adapter->mac_addr, 4965 &crq->change_mac_addr_rsp.mac_addr[0]); 4966 out: 4967 complete(&adapter->fw_done); 4968 return rc; 4969 } 4970 4971 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 4972 struct ibmvnic_adapter *adapter) 4973 { 4974 struct device *dev = &adapter->vdev->dev; 4975 u64 *req_value; 4976 char *name; 4977 4978 atomic_dec(&adapter->running_cap_crqs); 4979 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", 4980 atomic_read(&adapter->running_cap_crqs)); 4981 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 4982 case REQ_TX_QUEUES: 4983 req_value = &adapter->req_tx_queues; 4984 name = "tx"; 4985 break; 4986 case REQ_RX_QUEUES: 4987 req_value = &adapter->req_rx_queues; 4988 name = "rx"; 4989 break; 4990 case REQ_RX_ADD_QUEUES: 4991 req_value = &adapter->req_rx_add_queues; 4992 name = "rx_add"; 4993 break; 4994 case REQ_TX_ENTRIES_PER_SUBCRQ: 4995 req_value = &adapter->req_tx_entries_per_subcrq; 4996 name = "tx_entries_per_subcrq"; 4997 break; 4998 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 4999 req_value = &adapter->req_rx_add_entries_per_subcrq; 5000 name = "rx_add_entries_per_subcrq"; 5001 break; 5002 case REQ_MTU: 5003 req_value = &adapter->req_mtu; 5004 name = "mtu"; 5005 break; 5006 case PROMISC_REQUESTED: 5007 req_value = &adapter->promisc; 5008 name = "promisc"; 5009 break; 5010 default: 5011 dev_err(dev, "Got invalid cap request rsp %d\n", 5012 crq->request_capability.capability); 5013 return; 5014 } 5015 5016 switch (crq->request_capability_rsp.rc.code) { 5017 case SUCCESS: 5018 break; 5019 case PARTIALSUCCESS: 5020 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 5021 *req_value, 5022 (long)be64_to_cpu(crq->request_capability_rsp.number), 5023 name); 5024 5025 if (be16_to_cpu(crq->request_capability_rsp.capability) == 5026 REQ_MTU) { 5027 pr_err("mtu of %llu is not supported. Reverting.\n", 5028 *req_value); 5029 *req_value = adapter->fallback.mtu; 5030 } else { 5031 *req_value = 5032 be64_to_cpu(crq->request_capability_rsp.number); 5033 } 5034 5035 send_request_cap(adapter, 1); 5036 return; 5037 default: 5038 dev_err(dev, "Error %d in request cap rsp\n", 5039 crq->request_capability_rsp.rc.code); 5040 return; 5041 } 5042 5043 /* Done receiving requested capabilities, query IP offload support */ 5044 if (atomic_read(&adapter->running_cap_crqs) == 0) 5045 send_query_ip_offload(adapter); 5046 } 5047 5048 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 5049 struct ibmvnic_adapter *adapter) 5050 { 5051 struct device *dev = &adapter->vdev->dev; 5052 struct net_device *netdev = adapter->netdev; 5053 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 5054 struct ibmvnic_login_buffer *login = adapter->login_buf; 5055 u64 *tx_handle_array; 5056 u64 *rx_handle_array; 5057 int num_tx_pools; 5058 int num_rx_pools; 5059 u64 *size_array; 5060 int i; 5061 5062 /* CHECK: Test/set of login_pending does not need to be atomic 5063 * because only ibmvnic_tasklet tests/clears this. 5064 */ 5065 if (!adapter->login_pending) { 5066 netdev_warn(netdev, "Ignoring unexpected login response\n"); 5067 return 0; 5068 } 5069 adapter->login_pending = false; 5070 5071 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 5072 DMA_TO_DEVICE); 5073 dma_unmap_single(dev, adapter->login_rsp_buf_token, 5074 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 5075 5076 /* If the number of queues requested can't be allocated by the 5077 * server, the login response will return with code 1. We will need 5078 * to resend the login buffer with fewer queues requested. 5079 */ 5080 if (login_rsp_crq->generic.rc.code) { 5081 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 5082 complete(&adapter->init_done); 5083 return 0; 5084 } 5085 5086 if (adapter->failover_pending) { 5087 adapter->init_done_rc = -EAGAIN; 5088 netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 5089 complete(&adapter->init_done); 5090 /* login response buffer will be released on reset */ 5091 return 0; 5092 } 5093 5094 netdev->mtu = adapter->req_mtu - ETH_HLEN; 5095 5096 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 5097 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 5098 netdev_dbg(adapter->netdev, "%016lx\n", 5099 ((unsigned long *)(adapter->login_rsp_buf))[i]); 5100 } 5101 5102 /* Sanity checks */ 5103 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 5104 (be32_to_cpu(login->num_rxcomp_subcrqs) * 5105 adapter->req_rx_add_queues != 5106 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 5107 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 5108 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5109 return -EIO; 5110 } 5111 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5112 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 5113 /* variable buffer sizes are not supported, so just read the 5114 * first entry. 5115 */ 5116 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); 5117 5118 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 5119 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 5120 5121 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5122 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 5123 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5124 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); 5125 5126 for (i = 0; i < num_tx_pools; i++) 5127 adapter->tx_scrq[i]->handle = tx_handle_array[i]; 5128 5129 for (i = 0; i < num_rx_pools; i++) 5130 adapter->rx_scrq[i]->handle = rx_handle_array[i]; 5131 5132 adapter->num_active_tx_scrqs = num_tx_pools; 5133 adapter->num_active_rx_scrqs = num_rx_pools; 5134 release_login_rsp_buffer(adapter); 5135 release_login_buffer(adapter); 5136 complete(&adapter->init_done); 5137 5138 return 0; 5139 } 5140 5141 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 5142 struct ibmvnic_adapter *adapter) 5143 { 5144 struct device *dev = &adapter->vdev->dev; 5145 long rc; 5146 5147 rc = crq->request_unmap_rsp.rc.code; 5148 if (rc) 5149 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 5150 } 5151 5152 static void handle_query_map_rsp(union ibmvnic_crq *crq, 5153 struct ibmvnic_adapter *adapter) 5154 { 5155 struct net_device *netdev = adapter->netdev; 5156 struct device *dev = &adapter->vdev->dev; 5157 long rc; 5158 5159 rc = crq->query_map_rsp.rc.code; 5160 if (rc) { 5161 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 5162 return; 5163 } 5164 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", 5165 crq->query_map_rsp.page_size, 5166 __be32_to_cpu(crq->query_map_rsp.tot_pages), 5167 __be32_to_cpu(crq->query_map_rsp.free_pages)); 5168 } 5169 5170 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 5171 struct ibmvnic_adapter *adapter) 5172 { 5173 struct net_device *netdev = adapter->netdev; 5174 struct device *dev = &adapter->vdev->dev; 5175 long rc; 5176 5177 atomic_dec(&adapter->running_cap_crqs); 5178 netdev_dbg(netdev, "Outstanding queries: %d\n", 5179 atomic_read(&adapter->running_cap_crqs)); 5180 rc = crq->query_capability.rc.code; 5181 if (rc) { 5182 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 5183 goto out; 5184 } 5185 5186 switch (be16_to_cpu(crq->query_capability.capability)) { 5187 case MIN_TX_QUEUES: 5188 adapter->min_tx_queues = 5189 be64_to_cpu(crq->query_capability.number); 5190 netdev_dbg(netdev, "min_tx_queues = %lld\n", 5191 adapter->min_tx_queues); 5192 break; 5193 case MIN_RX_QUEUES: 5194 adapter->min_rx_queues = 5195 be64_to_cpu(crq->query_capability.number); 5196 netdev_dbg(netdev, "min_rx_queues = %lld\n", 5197 adapter->min_rx_queues); 5198 break; 5199 case MIN_RX_ADD_QUEUES: 5200 adapter->min_rx_add_queues = 5201 be64_to_cpu(crq->query_capability.number); 5202 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 5203 adapter->min_rx_add_queues); 5204 break; 5205 case MAX_TX_QUEUES: 5206 adapter->max_tx_queues = 5207 be64_to_cpu(crq->query_capability.number); 5208 netdev_dbg(netdev, "max_tx_queues = %lld\n", 5209 adapter->max_tx_queues); 5210 break; 5211 case MAX_RX_QUEUES: 5212 adapter->max_rx_queues = 5213 be64_to_cpu(crq->query_capability.number); 5214 netdev_dbg(netdev, "max_rx_queues = %lld\n", 5215 adapter->max_rx_queues); 5216 break; 5217 case MAX_RX_ADD_QUEUES: 5218 adapter->max_rx_add_queues = 5219 be64_to_cpu(crq->query_capability.number); 5220 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 5221 adapter->max_rx_add_queues); 5222 break; 5223 case MIN_TX_ENTRIES_PER_SUBCRQ: 5224 adapter->min_tx_entries_per_subcrq = 5225 be64_to_cpu(crq->query_capability.number); 5226 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 5227 adapter->min_tx_entries_per_subcrq); 5228 break; 5229 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 5230 adapter->min_rx_add_entries_per_subcrq = 5231 be64_to_cpu(crq->query_capability.number); 5232 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 5233 adapter->min_rx_add_entries_per_subcrq); 5234 break; 5235 case MAX_TX_ENTRIES_PER_SUBCRQ: 5236 adapter->max_tx_entries_per_subcrq = 5237 be64_to_cpu(crq->query_capability.number); 5238 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 5239 adapter->max_tx_entries_per_subcrq); 5240 break; 5241 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 5242 adapter->max_rx_add_entries_per_subcrq = 5243 be64_to_cpu(crq->query_capability.number); 5244 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 5245 adapter->max_rx_add_entries_per_subcrq); 5246 break; 5247 case TCP_IP_OFFLOAD: 5248 adapter->tcp_ip_offload = 5249 be64_to_cpu(crq->query_capability.number); 5250 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 5251 adapter->tcp_ip_offload); 5252 break; 5253 case PROMISC_SUPPORTED: 5254 adapter->promisc_supported = 5255 be64_to_cpu(crq->query_capability.number); 5256 netdev_dbg(netdev, "promisc_supported = %lld\n", 5257 adapter->promisc_supported); 5258 break; 5259 case MIN_MTU: 5260 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 5261 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 5262 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 5263 break; 5264 case MAX_MTU: 5265 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 5266 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 5267 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 5268 break; 5269 case MAX_MULTICAST_FILTERS: 5270 adapter->max_multicast_filters = 5271 be64_to_cpu(crq->query_capability.number); 5272 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 5273 adapter->max_multicast_filters); 5274 break; 5275 case VLAN_HEADER_INSERTION: 5276 adapter->vlan_header_insertion = 5277 be64_to_cpu(crq->query_capability.number); 5278 if (adapter->vlan_header_insertion) 5279 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 5280 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 5281 adapter->vlan_header_insertion); 5282 break; 5283 case RX_VLAN_HEADER_INSERTION: 5284 adapter->rx_vlan_header_insertion = 5285 be64_to_cpu(crq->query_capability.number); 5286 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 5287 adapter->rx_vlan_header_insertion); 5288 break; 5289 case MAX_TX_SG_ENTRIES: 5290 adapter->max_tx_sg_entries = 5291 be64_to_cpu(crq->query_capability.number); 5292 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 5293 adapter->max_tx_sg_entries); 5294 break; 5295 case RX_SG_SUPPORTED: 5296 adapter->rx_sg_supported = 5297 be64_to_cpu(crq->query_capability.number); 5298 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 5299 adapter->rx_sg_supported); 5300 break; 5301 case OPT_TX_COMP_SUB_QUEUES: 5302 adapter->opt_tx_comp_sub_queues = 5303 be64_to_cpu(crq->query_capability.number); 5304 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 5305 adapter->opt_tx_comp_sub_queues); 5306 break; 5307 case OPT_RX_COMP_QUEUES: 5308 adapter->opt_rx_comp_queues = 5309 be64_to_cpu(crq->query_capability.number); 5310 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 5311 adapter->opt_rx_comp_queues); 5312 break; 5313 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 5314 adapter->opt_rx_bufadd_q_per_rx_comp_q = 5315 be64_to_cpu(crq->query_capability.number); 5316 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 5317 adapter->opt_rx_bufadd_q_per_rx_comp_q); 5318 break; 5319 case OPT_TX_ENTRIES_PER_SUBCRQ: 5320 adapter->opt_tx_entries_per_subcrq = 5321 be64_to_cpu(crq->query_capability.number); 5322 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 5323 adapter->opt_tx_entries_per_subcrq); 5324 break; 5325 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 5326 adapter->opt_rxba_entries_per_subcrq = 5327 be64_to_cpu(crq->query_capability.number); 5328 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 5329 adapter->opt_rxba_entries_per_subcrq); 5330 break; 5331 case TX_RX_DESC_REQ: 5332 adapter->tx_rx_desc_req = crq->query_capability.number; 5333 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 5334 adapter->tx_rx_desc_req); 5335 break; 5336 5337 default: 5338 netdev_err(netdev, "Got invalid cap rsp %d\n", 5339 crq->query_capability.capability); 5340 } 5341 5342 out: 5343 if (atomic_read(&adapter->running_cap_crqs) == 0) 5344 send_request_cap(adapter, 0); 5345 } 5346 5347 static int send_query_phys_parms(struct ibmvnic_adapter *adapter) 5348 { 5349 union ibmvnic_crq crq; 5350 int rc; 5351 5352 memset(&crq, 0, sizeof(crq)); 5353 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; 5354 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; 5355 5356 mutex_lock(&adapter->fw_lock); 5357 adapter->fw_done_rc = 0; 5358 reinit_completion(&adapter->fw_done); 5359 5360 rc = ibmvnic_send_crq(adapter, &crq); 5361 if (rc) { 5362 mutex_unlock(&adapter->fw_lock); 5363 return rc; 5364 } 5365 5366 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 5367 if (rc) { 5368 mutex_unlock(&adapter->fw_lock); 5369 return rc; 5370 } 5371 5372 mutex_unlock(&adapter->fw_lock); 5373 return adapter->fw_done_rc ? -EIO : 0; 5374 } 5375 5376 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, 5377 struct ibmvnic_adapter *adapter) 5378 { 5379 struct net_device *netdev = adapter->netdev; 5380 int rc; 5381 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); 5382 5383 rc = crq->query_phys_parms_rsp.rc.code; 5384 if (rc) { 5385 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); 5386 return rc; 5387 } 5388 switch (rspeed) { 5389 case IBMVNIC_10MBPS: 5390 adapter->speed = SPEED_10; 5391 break; 5392 case IBMVNIC_100MBPS: 5393 adapter->speed = SPEED_100; 5394 break; 5395 case IBMVNIC_1GBPS: 5396 adapter->speed = SPEED_1000; 5397 break; 5398 case IBMVNIC_10GBPS: 5399 adapter->speed = SPEED_10000; 5400 break; 5401 case IBMVNIC_25GBPS: 5402 adapter->speed = SPEED_25000; 5403 break; 5404 case IBMVNIC_40GBPS: 5405 adapter->speed = SPEED_40000; 5406 break; 5407 case IBMVNIC_50GBPS: 5408 adapter->speed = SPEED_50000; 5409 break; 5410 case IBMVNIC_100GBPS: 5411 adapter->speed = SPEED_100000; 5412 break; 5413 case IBMVNIC_200GBPS: 5414 adapter->speed = SPEED_200000; 5415 break; 5416 default: 5417 if (netif_carrier_ok(netdev)) 5418 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); 5419 adapter->speed = SPEED_UNKNOWN; 5420 } 5421 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) 5422 adapter->duplex = DUPLEX_FULL; 5423 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) 5424 adapter->duplex = DUPLEX_HALF; 5425 else 5426 adapter->duplex = DUPLEX_UNKNOWN; 5427 5428 return rc; 5429 } 5430 5431 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 5432 struct ibmvnic_adapter *adapter) 5433 { 5434 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 5435 struct net_device *netdev = adapter->netdev; 5436 struct device *dev = &adapter->vdev->dev; 5437 u64 *u64_crq = (u64 *)crq; 5438 long rc; 5439 5440 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 5441 (unsigned long)cpu_to_be64(u64_crq[0]), 5442 (unsigned long)cpu_to_be64(u64_crq[1])); 5443 switch (gen_crq->first) { 5444 case IBMVNIC_CRQ_INIT_RSP: 5445 switch (gen_crq->cmd) { 5446 case IBMVNIC_CRQ_INIT: 5447 dev_info(dev, "Partner initialized\n"); 5448 adapter->from_passive_init = true; 5449 /* Discard any stale login responses from prev reset. 5450 * CHECK: should we clear even on INIT_COMPLETE? 5451 */ 5452 adapter->login_pending = false; 5453 5454 if (adapter->state == VNIC_DOWN) 5455 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); 5456 else 5457 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 5458 5459 if (rc && rc != -EBUSY) { 5460 /* We were unable to schedule the failover 5461 * reset either because the adapter was still 5462 * probing (eg: during kexec) or we could not 5463 * allocate memory. Clear the failover_pending 5464 * flag since no one else will. We ignore 5465 * EBUSY because it means either FAILOVER reset 5466 * is already scheduled or the adapter is 5467 * being removed. 5468 */ 5469 netdev_err(netdev, 5470 "Error %ld scheduling failover reset\n", 5471 rc); 5472 adapter->failover_pending = false; 5473 } 5474 5475 if (!completion_done(&adapter->init_done)) { 5476 if (!adapter->init_done_rc) 5477 adapter->init_done_rc = -EAGAIN; 5478 complete(&adapter->init_done); 5479 } 5480 5481 break; 5482 case IBMVNIC_CRQ_INIT_COMPLETE: 5483 dev_info(dev, "Partner initialization complete\n"); 5484 adapter->crq.active = true; 5485 send_version_xchg(adapter); 5486 break; 5487 default: 5488 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 5489 } 5490 return; 5491 case IBMVNIC_CRQ_XPORT_EVENT: 5492 netif_carrier_off(netdev); 5493 adapter->crq.active = false; 5494 /* terminate any thread waiting for a response 5495 * from the device 5496 */ 5497 if (!completion_done(&adapter->fw_done)) { 5498 adapter->fw_done_rc = -EIO; 5499 complete(&adapter->fw_done); 5500 } 5501 5502 /* if we got here during crq-init, retry crq-init */ 5503 if (!completion_done(&adapter->init_done)) { 5504 adapter->init_done_rc = -EAGAIN; 5505 complete(&adapter->init_done); 5506 } 5507 5508 if (!completion_done(&adapter->stats_done)) 5509 complete(&adapter->stats_done); 5510 if (test_bit(0, &adapter->resetting)) 5511 adapter->force_reset_recovery = true; 5512 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 5513 dev_info(dev, "Migrated, re-enabling adapter\n"); 5514 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 5515 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 5516 dev_info(dev, "Backing device failover detected\n"); 5517 adapter->failover_pending = true; 5518 } else { 5519 /* The adapter lost the connection */ 5520 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 5521 gen_crq->cmd); 5522 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5523 } 5524 return; 5525 case IBMVNIC_CRQ_CMD_RSP: 5526 break; 5527 default: 5528 dev_err(dev, "Got an invalid msg type 0x%02x\n", 5529 gen_crq->first); 5530 return; 5531 } 5532 5533 switch (gen_crq->cmd) { 5534 case VERSION_EXCHANGE_RSP: 5535 rc = crq->version_exchange_rsp.rc.code; 5536 if (rc) { 5537 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 5538 break; 5539 } 5540 ibmvnic_version = 5541 be16_to_cpu(crq->version_exchange_rsp.version); 5542 dev_info(dev, "Partner protocol version is %d\n", 5543 ibmvnic_version); 5544 send_query_cap(adapter); 5545 break; 5546 case QUERY_CAPABILITY_RSP: 5547 handle_query_cap_rsp(crq, adapter); 5548 break; 5549 case QUERY_MAP_RSP: 5550 handle_query_map_rsp(crq, adapter); 5551 break; 5552 case REQUEST_MAP_RSP: 5553 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 5554 complete(&adapter->fw_done); 5555 break; 5556 case REQUEST_UNMAP_RSP: 5557 handle_request_unmap_rsp(crq, adapter); 5558 break; 5559 case REQUEST_CAPABILITY_RSP: 5560 handle_request_cap_rsp(crq, adapter); 5561 break; 5562 case LOGIN_RSP: 5563 netdev_dbg(netdev, "Got Login Response\n"); 5564 handle_login_rsp(crq, adapter); 5565 break; 5566 case LOGICAL_LINK_STATE_RSP: 5567 netdev_dbg(netdev, 5568 "Got Logical Link State Response, state: %d rc: %d\n", 5569 crq->logical_link_state_rsp.link_state, 5570 crq->logical_link_state_rsp.rc.code); 5571 adapter->logical_link_state = 5572 crq->logical_link_state_rsp.link_state; 5573 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 5574 complete(&adapter->init_done); 5575 break; 5576 case LINK_STATE_INDICATION: 5577 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 5578 adapter->phys_link_state = 5579 crq->link_state_indication.phys_link_state; 5580 adapter->logical_link_state = 5581 crq->link_state_indication.logical_link_state; 5582 if (adapter->phys_link_state && adapter->logical_link_state) 5583 netif_carrier_on(netdev); 5584 else 5585 netif_carrier_off(netdev); 5586 break; 5587 case CHANGE_MAC_ADDR_RSP: 5588 netdev_dbg(netdev, "Got MAC address change Response\n"); 5589 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 5590 break; 5591 case ERROR_INDICATION: 5592 netdev_dbg(netdev, "Got Error Indication\n"); 5593 handle_error_indication(crq, adapter); 5594 break; 5595 case REQUEST_STATISTICS_RSP: 5596 netdev_dbg(netdev, "Got Statistics Response\n"); 5597 complete(&adapter->stats_done); 5598 break; 5599 case QUERY_IP_OFFLOAD_RSP: 5600 netdev_dbg(netdev, "Got Query IP offload Response\n"); 5601 handle_query_ip_offload_rsp(adapter); 5602 break; 5603 case MULTICAST_CTRL_RSP: 5604 netdev_dbg(netdev, "Got multicast control Response\n"); 5605 break; 5606 case CONTROL_IP_OFFLOAD_RSP: 5607 netdev_dbg(netdev, "Got Control IP offload Response\n"); 5608 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 5609 sizeof(adapter->ip_offload_ctrl), 5610 DMA_TO_DEVICE); 5611 complete(&adapter->init_done); 5612 break; 5613 case COLLECT_FW_TRACE_RSP: 5614 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 5615 complete(&adapter->fw_done); 5616 break; 5617 case GET_VPD_SIZE_RSP: 5618 handle_vpd_size_rsp(crq, adapter); 5619 break; 5620 case GET_VPD_RSP: 5621 handle_vpd_rsp(crq, adapter); 5622 break; 5623 case QUERY_PHYS_PARMS_RSP: 5624 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); 5625 complete(&adapter->fw_done); 5626 break; 5627 default: 5628 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 5629 gen_crq->cmd); 5630 } 5631 } 5632 5633 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 5634 { 5635 struct ibmvnic_adapter *adapter = instance; 5636 5637 tasklet_schedule(&adapter->tasklet); 5638 return IRQ_HANDLED; 5639 } 5640 5641 static void ibmvnic_tasklet(struct tasklet_struct *t) 5642 { 5643 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); 5644 struct ibmvnic_crq_queue *queue = &adapter->crq; 5645 union ibmvnic_crq *crq; 5646 unsigned long flags; 5647 5648 spin_lock_irqsave(&queue->lock, flags); 5649 5650 /* Pull all the valid messages off the CRQ */ 5651 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 5652 /* This barrier makes sure ibmvnic_next_crq()'s 5653 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded 5654 * before ibmvnic_handle_crq()'s 5655 * switch(gen_crq->first) and switch(gen_crq->cmd). 5656 */ 5657 dma_rmb(); 5658 ibmvnic_handle_crq(crq, adapter); 5659 crq->generic.first = 0; 5660 } 5661 5662 spin_unlock_irqrestore(&queue->lock, flags); 5663 } 5664 5665 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 5666 { 5667 struct vio_dev *vdev = adapter->vdev; 5668 int rc; 5669 5670 do { 5671 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 5672 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5673 5674 if (rc) 5675 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 5676 5677 return rc; 5678 } 5679 5680 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 5681 { 5682 struct ibmvnic_crq_queue *crq = &adapter->crq; 5683 struct device *dev = &adapter->vdev->dev; 5684 struct vio_dev *vdev = adapter->vdev; 5685 int rc; 5686 5687 /* Close the CRQ */ 5688 do { 5689 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5690 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5691 5692 /* Clean out the queue */ 5693 if (!crq->msgs) 5694 return -EINVAL; 5695 5696 memset(crq->msgs, 0, PAGE_SIZE); 5697 crq->cur = 0; 5698 crq->active = false; 5699 5700 /* And re-open it again */ 5701 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 5702 crq->msg_token, PAGE_SIZE); 5703 5704 if (rc == H_CLOSED) 5705 /* Adapter is good, but other end is not ready */ 5706 dev_warn(dev, "Partner adapter not ready\n"); 5707 else if (rc != 0) 5708 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 5709 5710 return rc; 5711 } 5712 5713 static void release_crq_queue(struct ibmvnic_adapter *adapter) 5714 { 5715 struct ibmvnic_crq_queue *crq = &adapter->crq; 5716 struct vio_dev *vdev = adapter->vdev; 5717 long rc; 5718 5719 if (!crq->msgs) 5720 return; 5721 5722 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 5723 free_irq(vdev->irq, adapter); 5724 tasklet_kill(&adapter->tasklet); 5725 do { 5726 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5727 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5728 5729 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 5730 DMA_BIDIRECTIONAL); 5731 free_page((unsigned long)crq->msgs); 5732 crq->msgs = NULL; 5733 crq->active = false; 5734 } 5735 5736 static int init_crq_queue(struct ibmvnic_adapter *adapter) 5737 { 5738 struct ibmvnic_crq_queue *crq = &adapter->crq; 5739 struct device *dev = &adapter->vdev->dev; 5740 struct vio_dev *vdev = adapter->vdev; 5741 int rc, retrc = -ENOMEM; 5742 5743 if (crq->msgs) 5744 return 0; 5745 5746 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 5747 /* Should we allocate more than one page? */ 5748 5749 if (!crq->msgs) 5750 return -ENOMEM; 5751 5752 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 5753 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 5754 DMA_BIDIRECTIONAL); 5755 if (dma_mapping_error(dev, crq->msg_token)) 5756 goto map_failed; 5757 5758 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 5759 crq->msg_token, PAGE_SIZE); 5760 5761 if (rc == H_RESOURCE) 5762 /* maybe kexecing and resource is busy. try a reset */ 5763 rc = ibmvnic_reset_crq(adapter); 5764 retrc = rc; 5765 5766 if (rc == H_CLOSED) { 5767 dev_warn(dev, "Partner adapter not ready\n"); 5768 } else if (rc) { 5769 dev_warn(dev, "Error %d opening adapter\n", rc); 5770 goto reg_crq_failed; 5771 } 5772 5773 retrc = 0; 5774 5775 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); 5776 5777 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 5778 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", 5779 adapter->vdev->unit_address); 5780 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); 5781 if (rc) { 5782 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 5783 vdev->irq, rc); 5784 goto req_irq_failed; 5785 } 5786 5787 rc = vio_enable_interrupts(vdev); 5788 if (rc) { 5789 dev_err(dev, "Error %d enabling interrupts\n", rc); 5790 goto req_irq_failed; 5791 } 5792 5793 crq->cur = 0; 5794 spin_lock_init(&crq->lock); 5795 5796 /* process any CRQs that were queued before we enabled interrupts */ 5797 tasklet_schedule(&adapter->tasklet); 5798 5799 return retrc; 5800 5801 req_irq_failed: 5802 tasklet_kill(&adapter->tasklet); 5803 do { 5804 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5805 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5806 reg_crq_failed: 5807 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 5808 map_failed: 5809 free_page((unsigned long)crq->msgs); 5810 crq->msgs = NULL; 5811 return retrc; 5812 } 5813 5814 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) 5815 { 5816 struct device *dev = &adapter->vdev->dev; 5817 unsigned long timeout = msecs_to_jiffies(20000); 5818 u64 old_num_rx_queues = adapter->req_rx_queues; 5819 u64 old_num_tx_queues = adapter->req_tx_queues; 5820 int rc; 5821 5822 adapter->from_passive_init = false; 5823 5824 rc = ibmvnic_send_crq_init(adapter); 5825 if (rc) { 5826 dev_err(dev, "Send crq init failed with error %d\n", rc); 5827 return rc; 5828 } 5829 5830 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 5831 dev_err(dev, "Initialization sequence timed out\n"); 5832 return -ETIMEDOUT; 5833 } 5834 5835 if (adapter->init_done_rc) { 5836 release_crq_queue(adapter); 5837 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc); 5838 return adapter->init_done_rc; 5839 } 5840 5841 if (adapter->from_passive_init) { 5842 adapter->state = VNIC_OPEN; 5843 adapter->from_passive_init = false; 5844 dev_err(dev, "CRQ-init failed, passive-init\n"); 5845 return -EINVAL; 5846 } 5847 5848 if (reset && 5849 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && 5850 adapter->reset_reason != VNIC_RESET_MOBILITY) { 5851 if (adapter->req_rx_queues != old_num_rx_queues || 5852 adapter->req_tx_queues != old_num_tx_queues) { 5853 release_sub_crqs(adapter, 0); 5854 rc = init_sub_crqs(adapter); 5855 } else { 5856 rc = reset_sub_crq_queues(adapter); 5857 } 5858 } else { 5859 rc = init_sub_crqs(adapter); 5860 } 5861 5862 if (rc) { 5863 dev_err(dev, "Initialization of sub crqs failed\n"); 5864 release_crq_queue(adapter); 5865 return rc; 5866 } 5867 5868 rc = init_sub_crq_irqs(adapter); 5869 if (rc) { 5870 dev_err(dev, "Failed to initialize sub crq irqs\n"); 5871 release_crq_queue(adapter); 5872 } 5873 5874 return rc; 5875 } 5876 5877 static struct device_attribute dev_attr_failover; 5878 5879 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 5880 { 5881 struct ibmvnic_adapter *adapter; 5882 struct net_device *netdev; 5883 unsigned char *mac_addr_p; 5884 unsigned long flags; 5885 bool init_success; 5886 int rc; 5887 5888 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 5889 dev->unit_address); 5890 5891 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 5892 VETH_MAC_ADDR, NULL); 5893 if (!mac_addr_p) { 5894 dev_err(&dev->dev, 5895 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 5896 __FILE__, __LINE__); 5897 return 0; 5898 } 5899 5900 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 5901 IBMVNIC_MAX_QUEUES); 5902 if (!netdev) 5903 return -ENOMEM; 5904 5905 adapter = netdev_priv(netdev); 5906 adapter->state = VNIC_PROBING; 5907 dev_set_drvdata(&dev->dev, netdev); 5908 adapter->vdev = dev; 5909 adapter->netdev = netdev; 5910 adapter->login_pending = false; 5911 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); 5912 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ 5913 bitmap_set(adapter->map_ids, 0, 1); 5914 5915 ether_addr_copy(adapter->mac_addr, mac_addr_p); 5916 eth_hw_addr_set(netdev, adapter->mac_addr); 5917 netdev->irq = dev->irq; 5918 netdev->netdev_ops = &ibmvnic_netdev_ops; 5919 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 5920 SET_NETDEV_DEV(netdev, &dev->dev); 5921 5922 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 5923 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, 5924 __ibmvnic_delayed_reset); 5925 INIT_LIST_HEAD(&adapter->rwi_list); 5926 spin_lock_init(&adapter->rwi_lock); 5927 spin_lock_init(&adapter->state_lock); 5928 mutex_init(&adapter->fw_lock); 5929 init_completion(&adapter->probe_done); 5930 init_completion(&adapter->init_done); 5931 init_completion(&adapter->fw_done); 5932 init_completion(&adapter->reset_done); 5933 init_completion(&adapter->stats_done); 5934 clear_bit(0, &adapter->resetting); 5935 adapter->prev_rx_buf_sz = 0; 5936 adapter->prev_mtu = 0; 5937 5938 init_success = false; 5939 do { 5940 reinit_init_done(adapter); 5941 5942 /* clear any failovers we got in the previous pass 5943 * since we are reinitializing the CRQ 5944 */ 5945 adapter->failover_pending = false; 5946 5947 /* If we had already initialized CRQ, we may have one or 5948 * more resets queued already. Discard those and release 5949 * the CRQ before initializing the CRQ again. 5950 */ 5951 release_crq_queue(adapter); 5952 5953 /* Since we are still in PROBING state, __ibmvnic_reset() 5954 * will not access the ->rwi_list and since we released CRQ, 5955 * we won't get _new_ transport events. But there maybe an 5956 * ongoing ibmvnic_reset() call. So serialize access to 5957 * rwi_list. If we win the race, ibvmnic_reset() could add 5958 * a reset after we purged but thats ok - we just may end 5959 * up with an extra reset (i.e similar to having two or more 5960 * resets in the queue at once). 5961 * CHECK. 5962 */ 5963 spin_lock_irqsave(&adapter->rwi_lock, flags); 5964 flush_reset_queue(adapter); 5965 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 5966 5967 rc = init_crq_queue(adapter); 5968 if (rc) { 5969 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 5970 rc); 5971 goto ibmvnic_init_fail; 5972 } 5973 5974 rc = ibmvnic_reset_init(adapter, false); 5975 } while (rc == -EAGAIN); 5976 5977 /* We are ignoring the error from ibmvnic_reset_init() assuming that the 5978 * partner is not ready. CRQ is not active. When the partner becomes 5979 * ready, we will do the passive init reset. 5980 */ 5981 5982 if (!rc) 5983 init_success = true; 5984 5985 rc = init_stats_buffers(adapter); 5986 if (rc) 5987 goto ibmvnic_init_fail; 5988 5989 rc = init_stats_token(adapter); 5990 if (rc) 5991 goto ibmvnic_stats_fail; 5992 5993 rc = device_create_file(&dev->dev, &dev_attr_failover); 5994 if (rc) 5995 goto ibmvnic_dev_file_err; 5996 5997 netif_carrier_off(netdev); 5998 5999 if (init_success) { 6000 adapter->state = VNIC_PROBED; 6001 netdev->mtu = adapter->req_mtu - ETH_HLEN; 6002 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 6003 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 6004 } else { 6005 adapter->state = VNIC_DOWN; 6006 } 6007 6008 adapter->wait_for_reset = false; 6009 adapter->last_reset_time = jiffies; 6010 6011 rc = register_netdev(netdev); 6012 if (rc) { 6013 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 6014 goto ibmvnic_register_fail; 6015 } 6016 dev_info(&dev->dev, "ibmvnic registered\n"); 6017 6018 complete(&adapter->probe_done); 6019 6020 return 0; 6021 6022 ibmvnic_register_fail: 6023 device_remove_file(&dev->dev, &dev_attr_failover); 6024 6025 ibmvnic_dev_file_err: 6026 release_stats_token(adapter); 6027 6028 ibmvnic_stats_fail: 6029 release_stats_buffers(adapter); 6030 6031 ibmvnic_init_fail: 6032 release_sub_crqs(adapter, 1); 6033 release_crq_queue(adapter); 6034 6035 /* cleanup worker thread after releasing CRQ so we don't get 6036 * transport events (i.e new work items for the worker thread). 6037 */ 6038 adapter->state = VNIC_REMOVING; 6039 complete(&adapter->probe_done); 6040 flush_work(&adapter->ibmvnic_reset); 6041 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6042 6043 flush_reset_queue(adapter); 6044 6045 mutex_destroy(&adapter->fw_lock); 6046 free_netdev(netdev); 6047 6048 return rc; 6049 } 6050 6051 static void ibmvnic_remove(struct vio_dev *dev) 6052 { 6053 struct net_device *netdev = dev_get_drvdata(&dev->dev); 6054 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6055 unsigned long flags; 6056 6057 spin_lock_irqsave(&adapter->state_lock, flags); 6058 6059 /* If ibmvnic_reset() is scheduling a reset, wait for it to 6060 * finish. Then, set the state to REMOVING to prevent it from 6061 * scheduling any more work and to have reset functions ignore 6062 * any resets that have already been scheduled. Drop the lock 6063 * after setting state, so __ibmvnic_reset() which is called 6064 * from the flush_work() below, can make progress. 6065 */ 6066 spin_lock(&adapter->rwi_lock); 6067 adapter->state = VNIC_REMOVING; 6068 spin_unlock(&adapter->rwi_lock); 6069 6070 spin_unlock_irqrestore(&adapter->state_lock, flags); 6071 6072 flush_work(&adapter->ibmvnic_reset); 6073 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6074 6075 rtnl_lock(); 6076 unregister_netdevice(netdev); 6077 6078 release_resources(adapter); 6079 release_rx_pools(adapter); 6080 release_tx_pools(adapter); 6081 release_sub_crqs(adapter, 1); 6082 release_crq_queue(adapter); 6083 6084 release_stats_token(adapter); 6085 release_stats_buffers(adapter); 6086 6087 adapter->state = VNIC_REMOVED; 6088 6089 rtnl_unlock(); 6090 mutex_destroy(&adapter->fw_lock); 6091 device_remove_file(&dev->dev, &dev_attr_failover); 6092 free_netdev(netdev); 6093 dev_set_drvdata(&dev->dev, NULL); 6094 } 6095 6096 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 6097 const char *buf, size_t count) 6098 { 6099 struct net_device *netdev = dev_get_drvdata(dev); 6100 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6101 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 6102 __be64 session_token; 6103 long rc; 6104 6105 if (!sysfs_streq(buf, "1")) 6106 return -EINVAL; 6107 6108 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 6109 H_GET_SESSION_TOKEN, 0, 0, 0); 6110 if (rc) { 6111 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 6112 rc); 6113 goto last_resort; 6114 } 6115 6116 session_token = (__be64)retbuf[0]; 6117 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 6118 be64_to_cpu(session_token)); 6119 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 6120 H_SESSION_ERR_DETECTED, session_token, 0, 0); 6121 if (rc) { 6122 netdev_err(netdev, 6123 "H_VIOCTL initiated failover failed, rc %ld\n", 6124 rc); 6125 goto last_resort; 6126 } 6127 6128 return count; 6129 6130 last_resort: 6131 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); 6132 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 6133 6134 return count; 6135 } 6136 static DEVICE_ATTR_WO(failover); 6137 6138 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 6139 { 6140 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 6141 struct ibmvnic_adapter *adapter; 6142 struct iommu_table *tbl; 6143 unsigned long ret = 0; 6144 int i; 6145 6146 tbl = get_iommu_table_base(&vdev->dev); 6147 6148 /* netdev inits at probe time along with the structures we need below*/ 6149 if (!netdev) 6150 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 6151 6152 adapter = netdev_priv(netdev); 6153 6154 ret += PAGE_SIZE; /* the crq message queue */ 6155 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 6156 6157 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 6158 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 6159 6160 for (i = 0; i < adapter->num_active_rx_pools; i++) 6161 ret += adapter->rx_pool[i].size * 6162 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 6163 6164 return ret; 6165 } 6166 6167 static int ibmvnic_resume(struct device *dev) 6168 { 6169 struct net_device *netdev = dev_get_drvdata(dev); 6170 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6171 6172 if (adapter->state != VNIC_OPEN) 6173 return 0; 6174 6175 tasklet_schedule(&adapter->tasklet); 6176 6177 return 0; 6178 } 6179 6180 static const struct vio_device_id ibmvnic_device_table[] = { 6181 {"network", "IBM,vnic"}, 6182 {"", "" } 6183 }; 6184 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 6185 6186 static const struct dev_pm_ops ibmvnic_pm_ops = { 6187 .resume = ibmvnic_resume 6188 }; 6189 6190 static struct vio_driver ibmvnic_driver = { 6191 .id_table = ibmvnic_device_table, 6192 .probe = ibmvnic_probe, 6193 .remove = ibmvnic_remove, 6194 .get_desired_dma = ibmvnic_get_desired_dma, 6195 .name = ibmvnic_driver_name, 6196 .pm = &ibmvnic_pm_ops, 6197 }; 6198 6199 /* module functions */ 6200 static int __init ibmvnic_module_init(void) 6201 { 6202 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 6203 IBMVNIC_DRIVER_VERSION); 6204 6205 return vio_register_driver(&ibmvnic_driver); 6206 } 6207 6208 static void __exit ibmvnic_module_exit(void) 6209 { 6210 vio_unregister_driver(&ibmvnic_driver); 6211 } 6212 6213 module_init(ibmvnic_module_init); 6214 module_exit(ibmvnic_module_exit); 6215