1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /* Messages are passed between the VNIC driver and the VNIC server using */ 17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 18 /* issue and receive commands that initiate communication with the server */ 19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 20 /* are used by the driver to notify the server that a packet is */ 21 /* ready for transmission or that a buffer has been added to receive a */ 22 /* packet. Subsequently, sCRQs are used by the server to notify the */ 23 /* driver that a packet transmission has been completed or that a packet */ 24 /* has been received and placed in a waiting buffer. */ 25 /* */ 26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 27 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 28 /* or receive has been completed, the VNIC driver is required to use */ 29 /* "long term mapping". This entails that large, continuous DMA mapped */ 30 /* buffers are allocated on driver initialization and these buffers are */ 31 /* then continuously reused to pass skbs to and from the VNIC server. */ 32 /* */ 33 /**************************************************************************/ 34 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/types.h> 38 #include <linux/errno.h> 39 #include <linux/completion.h> 40 #include <linux/ioport.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kernel.h> 43 #include <linux/netdevice.h> 44 #include <linux/etherdevice.h> 45 #include <linux/skbuff.h> 46 #include <linux/init.h> 47 #include <linux/delay.h> 48 #include <linux/mm.h> 49 #include <linux/ethtool.h> 50 #include <linux/proc_fs.h> 51 #include <linux/if_arp.h> 52 #include <linux/in.h> 53 #include <linux/ip.h> 54 #include <linux/ipv6.h> 55 #include <linux/irq.h> 56 #include <linux/kthread.h> 57 #include <linux/seq_file.h> 58 #include <linux/interrupt.h> 59 #include <net/net_namespace.h> 60 #include <asm/hvcall.h> 61 #include <linux/atomic.h> 62 #include <asm/vio.h> 63 #include <asm/xive.h> 64 #include <asm/iommu.h> 65 #include <linux/uaccess.h> 66 #include <asm/firmware.h> 67 #include <linux/workqueue.h> 68 #include <linux/if_vlan.h> 69 #include <linux/utsname.h> 70 71 #include "ibmvnic.h" 72 73 static const char ibmvnic_driver_name[] = "ibmvnic"; 74 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 75 76 MODULE_AUTHOR("Santiago Leon"); 77 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 78 MODULE_LICENSE("GPL"); 79 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 80 81 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 82 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 87 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 88 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 89 static int enable_scrq_irq(struct ibmvnic_adapter *, 90 struct ibmvnic_sub_crq_queue *); 91 static int disable_scrq_irq(struct ibmvnic_adapter *, 92 struct ibmvnic_sub_crq_queue *); 93 static int pending_scrq(struct ibmvnic_adapter *, 94 struct ibmvnic_sub_crq_queue *); 95 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 96 struct ibmvnic_sub_crq_queue *); 97 static int ibmvnic_poll(struct napi_struct *napi, int data); 98 static void send_query_map(struct ibmvnic_adapter *adapter); 99 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8); 100 static int send_request_unmap(struct ibmvnic_adapter *, u8); 101 static int send_login(struct ibmvnic_adapter *adapter); 102 static void send_query_cap(struct ibmvnic_adapter *adapter); 103 static int init_sub_crqs(struct ibmvnic_adapter *); 104 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 105 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); 106 static void release_crq_queue(struct ibmvnic_adapter *); 107 static int __ibmvnic_set_mac(struct net_device *, u8 *); 108 static int init_crq_queue(struct ibmvnic_adapter *adapter); 109 static int send_query_phys_parms(struct ibmvnic_adapter *adapter); 110 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 111 struct ibmvnic_sub_crq_queue *tx_scrq); 112 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 113 struct ibmvnic_long_term_buff *ltb); 114 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); 115 116 struct ibmvnic_stat { 117 char name[ETH_GSTRING_LEN]; 118 int offset; 119 }; 120 121 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 122 offsetof(struct ibmvnic_statistics, stat)) 123 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) 124 125 static const struct ibmvnic_stat ibmvnic_stats[] = { 126 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 127 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 128 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 129 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 130 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 131 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 132 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 133 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 134 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 135 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 136 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 137 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 138 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 139 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 140 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 141 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 142 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 143 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 144 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 145 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 146 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 147 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 148 }; 149 150 static int send_crq_init_complete(struct ibmvnic_adapter *adapter) 151 { 152 union ibmvnic_crq crq; 153 154 memset(&crq, 0, sizeof(crq)); 155 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 156 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; 157 158 return ibmvnic_send_crq(adapter, &crq); 159 } 160 161 static int send_version_xchg(struct ibmvnic_adapter *adapter) 162 { 163 union ibmvnic_crq crq; 164 165 memset(&crq, 0, sizeof(crq)); 166 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 167 crq.version_exchange.cmd = VERSION_EXCHANGE; 168 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 169 170 return ibmvnic_send_crq(adapter, &crq); 171 } 172 173 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 174 unsigned long length, unsigned long *number, 175 unsigned long *irq) 176 { 177 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 178 long rc; 179 180 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 181 *number = retbuf[0]; 182 *irq = retbuf[1]; 183 184 return rc; 185 } 186 187 /** 188 * ibmvnic_wait_for_completion - Check device state and wait for completion 189 * @adapter: private device data 190 * @comp_done: completion structure to wait for 191 * @timeout: time to wait in milliseconds 192 * 193 * Wait for a completion signal or until the timeout limit is reached 194 * while checking that the device is still active. 195 */ 196 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, 197 struct completion *comp_done, 198 unsigned long timeout) 199 { 200 struct net_device *netdev; 201 unsigned long div_timeout; 202 u8 retry; 203 204 netdev = adapter->netdev; 205 retry = 5; 206 div_timeout = msecs_to_jiffies(timeout / retry); 207 while (true) { 208 if (!adapter->crq.active) { 209 netdev_err(netdev, "Device down!\n"); 210 return -ENODEV; 211 } 212 if (!retry--) 213 break; 214 if (wait_for_completion_timeout(comp_done, div_timeout)) 215 return 0; 216 } 217 netdev_err(netdev, "Operation timed out.\n"); 218 return -ETIMEDOUT; 219 } 220 221 /** 222 * reuse_ltb() - Check if a long term buffer can be reused 223 * @ltb: The long term buffer to be checked 224 * @size: The size of the long term buffer. 225 * 226 * An LTB can be reused unless its size has changed. 227 * 228 * Return: Return true if the LTB can be reused, false otherwise. 229 */ 230 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) 231 { 232 return (ltb->buff && ltb->size == size); 233 } 234 235 /** 236 * alloc_long_term_buff() - Allocate a long term buffer (LTB) 237 * 238 * @adapter: ibmvnic adapter associated to the LTB 239 * @ltb: container object for the LTB 240 * @size: size of the LTB 241 * 242 * Allocate an LTB of the specified size and notify VIOS. 243 * 244 * If the given @ltb already has the correct size, reuse it. Otherwise if 245 * its non-NULL, free it. Then allocate a new one of the correct size. 246 * Notify the VIOS either way since we may now be working with a new VIOS. 247 * 248 * Allocating larger chunks of memory during resets, specially LPM or under 249 * low memory situations can cause resets to fail/timeout and for LPAR to 250 * lose connectivity. So hold onto the LTB even if we fail to communicate 251 * with the VIOS and reuse it on next open. Free LTB when adapter is closed. 252 * 253 * Return: 0 if we were able to allocate the LTB and notify the VIOS and 254 * a negative value otherwise. 255 */ 256 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 257 struct ibmvnic_long_term_buff *ltb, int size) 258 { 259 struct device *dev = &adapter->vdev->dev; 260 int rc; 261 262 if (!reuse_ltb(ltb, size)) { 263 dev_dbg(dev, 264 "LTB size changed from 0x%llx to 0x%x, reallocating\n", 265 ltb->size, size); 266 free_long_term_buff(adapter, ltb); 267 } 268 269 if (ltb->buff) { 270 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", 271 ltb->map_id, ltb->size); 272 } else { 273 ltb->buff = dma_alloc_coherent(dev, size, <b->addr, 274 GFP_KERNEL); 275 if (!ltb->buff) { 276 dev_err(dev, "Couldn't alloc long term buffer\n"); 277 return -ENOMEM; 278 } 279 ltb->size = size; 280 281 ltb->map_id = find_first_zero_bit(adapter->map_ids, 282 MAX_MAP_ID); 283 bitmap_set(adapter->map_ids, ltb->map_id, 1); 284 285 dev_dbg(dev, 286 "Allocated new LTB [map %d, size 0x%llx]\n", 287 ltb->map_id, ltb->size); 288 } 289 290 /* Ensure ltb is zeroed - specially when reusing it. */ 291 memset(ltb->buff, 0, ltb->size); 292 293 mutex_lock(&adapter->fw_lock); 294 adapter->fw_done_rc = 0; 295 reinit_completion(&adapter->fw_done); 296 297 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 298 if (rc) { 299 dev_err(dev, "send_request_map failed, rc = %d\n", rc); 300 goto out; 301 } 302 303 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 304 if (rc) { 305 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", 306 rc); 307 goto out; 308 } 309 310 if (adapter->fw_done_rc) { 311 dev_err(dev, "Couldn't map LTB, rc = %d\n", 312 adapter->fw_done_rc); 313 rc = -EIO; 314 goto out; 315 } 316 rc = 0; 317 out: 318 /* don't free LTB on communication error - see function header */ 319 mutex_unlock(&adapter->fw_lock); 320 return rc; 321 } 322 323 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 324 struct ibmvnic_long_term_buff *ltb) 325 { 326 struct device *dev = &adapter->vdev->dev; 327 328 if (!ltb->buff) 329 return; 330 331 /* VIOS automatically unmaps the long term buffer at remote 332 * end for the following resets: 333 * FAILOVER, MOBILITY, TIMEOUT. 334 */ 335 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 336 adapter->reset_reason != VNIC_RESET_MOBILITY && 337 adapter->reset_reason != VNIC_RESET_TIMEOUT) 338 send_request_unmap(adapter, ltb->map_id); 339 340 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 341 342 ltb->buff = NULL; 343 /* mark this map_id free */ 344 bitmap_clear(adapter->map_ids, ltb->map_id, 1); 345 ltb->map_id = 0; 346 } 347 348 static void free_ltb_set(struct ibmvnic_adapter *adapter, 349 struct ibmvnic_ltb_set *ltb_set) 350 { 351 int i; 352 353 for (i = 0; i < ltb_set->num_ltbs; i++) 354 free_long_term_buff(adapter, <b_set->ltbs[i]); 355 356 kfree(ltb_set->ltbs); 357 ltb_set->ltbs = NULL; 358 ltb_set->num_ltbs = 0; 359 } 360 361 static int alloc_ltb_set(struct ibmvnic_adapter *adapter, 362 struct ibmvnic_ltb_set *ltb_set, int num_buffs, 363 int buff_size) 364 { 365 struct ibmvnic_long_term_buff *ltb; 366 int ltb_size; 367 int size; 368 369 size = sizeof(struct ibmvnic_long_term_buff); 370 371 ltb_set->ltbs = kmalloc(size, GFP_KERNEL); 372 if (!ltb_set->ltbs) 373 return -ENOMEM; 374 375 ltb_set->num_ltbs = 1; 376 ltb = <b_set->ltbs[0]; 377 378 ltb_size = num_buffs * buff_size; 379 380 return alloc_long_term_buff(adapter, ltb, ltb_size); 381 } 382 383 /** 384 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB. 385 * @rxpool: The receive buffer pool containing buffer 386 * @bufidx: Index of buffer in rxpool 387 * @ltbp: (Output) pointer to the long term buffer containing the buffer 388 * @offset: (Output) offset of buffer in the LTB from @ltbp 389 * 390 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the 391 * pool and its corresponding offset. 392 */ 393 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, 394 unsigned int bufidx, 395 struct ibmvnic_long_term_buff **ltbp, 396 unsigned int *offset) 397 { 398 *ltbp = &rxpool->ltb_set.ltbs[0]; 399 *offset = bufidx * rxpool->buff_size; 400 } 401 402 /** 403 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB. 404 * @txpool: The transmit buffer pool containing buffer 405 * @bufidx: Index of buffer in txpool 406 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer 407 * @offset: (Output) offset of buffer in the LTB from @ltbp 408 * 409 * Map the given buffer identified by [txpool, bufidx] to an LTB in the 410 * pool and its corresponding offset. 411 */ 412 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool, 413 unsigned int bufidx, 414 struct ibmvnic_long_term_buff **ltbp, 415 unsigned int *offset) 416 { 417 *ltbp = &txpool->long_term_buff; 418 *offset = bufidx * txpool->buf_size; 419 } 420 421 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 422 { 423 int i; 424 425 for (i = 0; i < adapter->num_active_rx_pools; i++) 426 adapter->rx_pool[i].active = 0; 427 } 428 429 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 430 struct ibmvnic_rx_pool *pool) 431 { 432 int count = pool->size - atomic_read(&pool->available); 433 u64 handle = adapter->rx_scrq[pool->index]->handle; 434 struct device *dev = &adapter->vdev->dev; 435 struct ibmvnic_ind_xmit_queue *ind_bufp; 436 struct ibmvnic_sub_crq_queue *rx_scrq; 437 struct ibmvnic_long_term_buff *ltb; 438 union sub_crq *sub_crq; 439 int buffers_added = 0; 440 unsigned long lpar_rc; 441 struct sk_buff *skb; 442 unsigned int offset; 443 dma_addr_t dma_addr; 444 unsigned char *dst; 445 int shift = 0; 446 int bufidx; 447 int i; 448 449 if (!pool->active) 450 return; 451 452 rx_scrq = adapter->rx_scrq[pool->index]; 453 ind_bufp = &rx_scrq->ind_buf; 454 455 /* netdev_skb_alloc() could have failed after we saved a few skbs 456 * in the indir_buf and we would not have sent them to VIOS yet. 457 * To account for them, start the loop at ind_bufp->index rather 458 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will 459 * be 0. 460 */ 461 for (i = ind_bufp->index; i < count; ++i) { 462 bufidx = pool->free_map[pool->next_free]; 463 464 /* We maybe reusing the skb from earlier resets. Allocate 465 * only if necessary. But since the LTB may have changed 466 * during reset (see init_rx_pools()), update LTB below 467 * even if reusing skb. 468 */ 469 skb = pool->rx_buff[bufidx].skb; 470 if (!skb) { 471 skb = netdev_alloc_skb(adapter->netdev, 472 pool->buff_size); 473 if (!skb) { 474 dev_err(dev, "Couldn't replenish rx buff\n"); 475 adapter->replenish_no_mem++; 476 break; 477 } 478 } 479 480 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 481 pool->next_free = (pool->next_free + 1) % pool->size; 482 483 /* Copy the skb to the long term mapped DMA buffer */ 484 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset); 485 dst = ltb->buff + offset; 486 memset(dst, 0, pool->buff_size); 487 dma_addr = ltb->addr + offset; 488 489 /* add the skb to an rx_buff in the pool */ 490 pool->rx_buff[bufidx].data = dst; 491 pool->rx_buff[bufidx].dma = dma_addr; 492 pool->rx_buff[bufidx].skb = skb; 493 pool->rx_buff[bufidx].pool_index = pool->index; 494 pool->rx_buff[bufidx].size = pool->buff_size; 495 496 /* queue the rx_buff for the next send_subcrq_indirect */ 497 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; 498 memset(sub_crq, 0, sizeof(*sub_crq)); 499 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; 500 sub_crq->rx_add.correlator = 501 cpu_to_be64((u64)&pool->rx_buff[bufidx]); 502 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); 503 sub_crq->rx_add.map_id = ltb->map_id; 504 505 /* The length field of the sCRQ is defined to be 24 bits so the 506 * buffer size needs to be left shifted by a byte before it is 507 * converted to big endian to prevent the last byte from being 508 * truncated. 509 */ 510 #ifdef __LITTLE_ENDIAN__ 511 shift = 8; 512 #endif 513 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); 514 515 /* if send_subcrq_indirect queue is full, flush to VIOS */ 516 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || 517 i == count - 1) { 518 lpar_rc = 519 send_subcrq_indirect(adapter, handle, 520 (u64)ind_bufp->indir_dma, 521 (u64)ind_bufp->index); 522 if (lpar_rc != H_SUCCESS) 523 goto failure; 524 buffers_added += ind_bufp->index; 525 adapter->replenish_add_buff_success += ind_bufp->index; 526 ind_bufp->index = 0; 527 } 528 } 529 atomic_add(buffers_added, &pool->available); 530 return; 531 532 failure: 533 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) 534 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); 535 for (i = ind_bufp->index - 1; i >= 0; --i) { 536 struct ibmvnic_rx_buff *rx_buff; 537 538 pool->next_free = pool->next_free == 0 ? 539 pool->size - 1 : pool->next_free - 1; 540 sub_crq = &ind_bufp->indir_arr[i]; 541 rx_buff = (struct ibmvnic_rx_buff *) 542 be64_to_cpu(sub_crq->rx_add.correlator); 543 bufidx = (int)(rx_buff - pool->rx_buff); 544 pool->free_map[pool->next_free] = bufidx; 545 dev_kfree_skb_any(pool->rx_buff[bufidx].skb); 546 pool->rx_buff[bufidx].skb = NULL; 547 } 548 adapter->replenish_add_buff_failure += ind_bufp->index; 549 atomic_add(buffers_added, &pool->available); 550 ind_bufp->index = 0; 551 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 552 /* Disable buffer pool replenishment and report carrier off if 553 * queue is closed or pending failover. 554 * Firmware guarantees that a signal will be sent to the 555 * driver, triggering a reset. 556 */ 557 deactivate_rx_pools(adapter); 558 netif_carrier_off(adapter->netdev); 559 } 560 } 561 562 static void replenish_pools(struct ibmvnic_adapter *adapter) 563 { 564 int i; 565 566 adapter->replenish_task_cycles++; 567 for (i = 0; i < adapter->num_active_rx_pools; i++) { 568 if (adapter->rx_pool[i].active) 569 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 570 } 571 572 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); 573 } 574 575 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 576 { 577 kfree(adapter->tx_stats_buffers); 578 kfree(adapter->rx_stats_buffers); 579 adapter->tx_stats_buffers = NULL; 580 adapter->rx_stats_buffers = NULL; 581 } 582 583 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 584 { 585 adapter->tx_stats_buffers = 586 kcalloc(IBMVNIC_MAX_QUEUES, 587 sizeof(struct ibmvnic_tx_queue_stats), 588 GFP_KERNEL); 589 if (!adapter->tx_stats_buffers) 590 return -ENOMEM; 591 592 adapter->rx_stats_buffers = 593 kcalloc(IBMVNIC_MAX_QUEUES, 594 sizeof(struct ibmvnic_rx_queue_stats), 595 GFP_KERNEL); 596 if (!adapter->rx_stats_buffers) 597 return -ENOMEM; 598 599 return 0; 600 } 601 602 static void release_stats_token(struct ibmvnic_adapter *adapter) 603 { 604 struct device *dev = &adapter->vdev->dev; 605 606 if (!adapter->stats_token) 607 return; 608 609 dma_unmap_single(dev, adapter->stats_token, 610 sizeof(struct ibmvnic_statistics), 611 DMA_FROM_DEVICE); 612 adapter->stats_token = 0; 613 } 614 615 static int init_stats_token(struct ibmvnic_adapter *adapter) 616 { 617 struct device *dev = &adapter->vdev->dev; 618 dma_addr_t stok; 619 int rc; 620 621 stok = dma_map_single(dev, &adapter->stats, 622 sizeof(struct ibmvnic_statistics), 623 DMA_FROM_DEVICE); 624 rc = dma_mapping_error(dev, stok); 625 if (rc) { 626 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc); 627 return rc; 628 } 629 630 adapter->stats_token = stok; 631 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 632 return 0; 633 } 634 635 /** 636 * release_rx_pools() - Release any rx pools attached to @adapter. 637 * @adapter: ibmvnic adapter 638 * 639 * Safe to call this multiple times - even if no pools are attached. 640 */ 641 static void release_rx_pools(struct ibmvnic_adapter *adapter) 642 { 643 struct ibmvnic_rx_pool *rx_pool; 644 int i, j; 645 646 if (!adapter->rx_pool) 647 return; 648 649 for (i = 0; i < adapter->num_active_rx_pools; i++) { 650 rx_pool = &adapter->rx_pool[i]; 651 652 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 653 654 kfree(rx_pool->free_map); 655 656 free_ltb_set(adapter, &rx_pool->ltb_set); 657 658 if (!rx_pool->rx_buff) 659 continue; 660 661 for (j = 0; j < rx_pool->size; j++) { 662 if (rx_pool->rx_buff[j].skb) { 663 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); 664 rx_pool->rx_buff[j].skb = NULL; 665 } 666 } 667 668 kfree(rx_pool->rx_buff); 669 } 670 671 kfree(adapter->rx_pool); 672 adapter->rx_pool = NULL; 673 adapter->num_active_rx_pools = 0; 674 adapter->prev_rx_pool_size = 0; 675 } 676 677 /** 678 * reuse_rx_pools() - Check if the existing rx pools can be reused. 679 * @adapter: ibmvnic adapter 680 * 681 * Check if the existing rx pools in the adapter can be reused. The 682 * pools can be reused if the pool parameters (number of pools, 683 * number of buffers in the pool and size of each buffer) have not 684 * changed. 685 * 686 * NOTE: This assumes that all pools have the same number of buffers 687 * which is the case currently. If that changes, we must fix this. 688 * 689 * Return: true if the rx pools can be reused, false otherwise. 690 */ 691 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) 692 { 693 u64 old_num_pools, new_num_pools; 694 u64 old_pool_size, new_pool_size; 695 u64 old_buff_size, new_buff_size; 696 697 if (!adapter->rx_pool) 698 return false; 699 700 old_num_pools = adapter->num_active_rx_pools; 701 new_num_pools = adapter->req_rx_queues; 702 703 old_pool_size = adapter->prev_rx_pool_size; 704 new_pool_size = adapter->req_rx_add_entries_per_subcrq; 705 706 old_buff_size = adapter->prev_rx_buf_sz; 707 new_buff_size = adapter->cur_rx_buf_sz; 708 709 if (old_buff_size != new_buff_size || 710 old_num_pools != new_num_pools || 711 old_pool_size != new_pool_size) 712 return false; 713 714 return true; 715 } 716 717 /** 718 * init_rx_pools(): Initialize the set of receiver pools in the adapter. 719 * @netdev: net device associated with the vnic interface 720 * 721 * Initialize the set of receiver pools in the ibmvnic adapter associated 722 * with the net_device @netdev. If possible, reuse the existing rx pools. 723 * Otherwise free any existing pools and allocate a new set of pools 724 * before initializing them. 725 * 726 * Return: 0 on success and negative value on error. 727 */ 728 static int init_rx_pools(struct net_device *netdev) 729 { 730 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 731 struct device *dev = &adapter->vdev->dev; 732 struct ibmvnic_rx_pool *rx_pool; 733 u64 num_pools; 734 u64 pool_size; /* # of buffers in one pool */ 735 u64 buff_size; 736 int i, j, rc; 737 738 pool_size = adapter->req_rx_add_entries_per_subcrq; 739 num_pools = adapter->req_rx_queues; 740 buff_size = adapter->cur_rx_buf_sz; 741 742 if (reuse_rx_pools(adapter)) { 743 dev_dbg(dev, "Reusing rx pools\n"); 744 goto update_ltb; 745 } 746 747 /* Allocate/populate the pools. */ 748 release_rx_pools(adapter); 749 750 adapter->rx_pool = kcalloc(num_pools, 751 sizeof(struct ibmvnic_rx_pool), 752 GFP_KERNEL); 753 if (!adapter->rx_pool) { 754 dev_err(dev, "Failed to allocate rx pools\n"); 755 return -ENOMEM; 756 } 757 758 /* Set num_active_rx_pools early. If we fail below after partial 759 * allocation, release_rx_pools() will know how many to look for. 760 */ 761 adapter->num_active_rx_pools = num_pools; 762 763 for (i = 0; i < num_pools; i++) { 764 rx_pool = &adapter->rx_pool[i]; 765 766 netdev_dbg(adapter->netdev, 767 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 768 i, pool_size, buff_size); 769 770 rx_pool->size = pool_size; 771 rx_pool->index = i; 772 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 773 774 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 775 GFP_KERNEL); 776 if (!rx_pool->free_map) { 777 dev_err(dev, "Couldn't alloc free_map %d\n", i); 778 rc = -ENOMEM; 779 goto out_release; 780 } 781 782 rx_pool->rx_buff = kcalloc(rx_pool->size, 783 sizeof(struct ibmvnic_rx_buff), 784 GFP_KERNEL); 785 if (!rx_pool->rx_buff) { 786 dev_err(dev, "Couldn't alloc rx buffers\n"); 787 rc = -ENOMEM; 788 goto out_release; 789 } 790 } 791 792 adapter->prev_rx_pool_size = pool_size; 793 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; 794 795 update_ltb: 796 for (i = 0; i < num_pools; i++) { 797 rx_pool = &adapter->rx_pool[i]; 798 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", 799 i, rx_pool->size, rx_pool->buff_size); 800 801 if (alloc_ltb_set(adapter, &rx_pool->ltb_set, 802 rx_pool->size, rx_pool->buff_size)) 803 goto out; 804 805 for (j = 0; j < rx_pool->size; ++j) { 806 struct ibmvnic_rx_buff *rx_buff; 807 808 rx_pool->free_map[j] = j; 809 810 /* NOTE: Don't clear rx_buff->skb here - will leak 811 * memory! replenish_rx_pool() will reuse skbs or 812 * allocate as necessary. 813 */ 814 rx_buff = &rx_pool->rx_buff[j]; 815 rx_buff->dma = 0; 816 rx_buff->data = 0; 817 rx_buff->size = 0; 818 rx_buff->pool_index = 0; 819 } 820 821 /* Mark pool "empty" so replenish_rx_pools() will 822 * update the LTB info for each buffer 823 */ 824 atomic_set(&rx_pool->available, 0); 825 rx_pool->next_alloc = 0; 826 rx_pool->next_free = 0; 827 /* replenish_rx_pool() may have called deactivate_rx_pools() 828 * on failover. Ensure pool is active now. 829 */ 830 rx_pool->active = 1; 831 } 832 return 0; 833 out_release: 834 release_rx_pools(adapter); 835 out: 836 /* We failed to allocate one or more LTBs or map them on the VIOS. 837 * Hold onto the pools and any LTBs that we did allocate/map. 838 */ 839 return rc; 840 } 841 842 static void release_vpd_data(struct ibmvnic_adapter *adapter) 843 { 844 if (!adapter->vpd) 845 return; 846 847 kfree(adapter->vpd->buff); 848 kfree(adapter->vpd); 849 850 adapter->vpd = NULL; 851 } 852 853 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 854 struct ibmvnic_tx_pool *tx_pool) 855 { 856 kfree(tx_pool->tx_buff); 857 kfree(tx_pool->free_map); 858 free_long_term_buff(adapter, &tx_pool->long_term_buff); 859 } 860 861 /** 862 * release_tx_pools() - Release any tx pools attached to @adapter. 863 * @adapter: ibmvnic adapter 864 * 865 * Safe to call this multiple times - even if no pools are attached. 866 */ 867 static void release_tx_pools(struct ibmvnic_adapter *adapter) 868 { 869 int i; 870 871 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are 872 * both NULL or both non-NULL. So we only need to check one. 873 */ 874 if (!adapter->tx_pool) 875 return; 876 877 for (i = 0; i < adapter->num_active_tx_pools; i++) { 878 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 879 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 880 } 881 882 kfree(adapter->tx_pool); 883 adapter->tx_pool = NULL; 884 kfree(adapter->tso_pool); 885 adapter->tso_pool = NULL; 886 adapter->num_active_tx_pools = 0; 887 adapter->prev_tx_pool_size = 0; 888 } 889 890 static int init_one_tx_pool(struct net_device *netdev, 891 struct ibmvnic_tx_pool *tx_pool, 892 int pool_size, int buf_size) 893 { 894 int i; 895 896 tx_pool->tx_buff = kcalloc(pool_size, 897 sizeof(struct ibmvnic_tx_buff), 898 GFP_KERNEL); 899 if (!tx_pool->tx_buff) 900 return -ENOMEM; 901 902 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); 903 if (!tx_pool->free_map) { 904 kfree(tx_pool->tx_buff); 905 tx_pool->tx_buff = NULL; 906 return -ENOMEM; 907 } 908 909 for (i = 0; i < pool_size; i++) 910 tx_pool->free_map[i] = i; 911 912 tx_pool->consumer_index = 0; 913 tx_pool->producer_index = 0; 914 tx_pool->num_buffers = pool_size; 915 tx_pool->buf_size = buf_size; 916 917 return 0; 918 } 919 920 /** 921 * reuse_tx_pools() - Check if the existing tx pools can be reused. 922 * @adapter: ibmvnic adapter 923 * 924 * Check if the existing tx pools in the adapter can be reused. The 925 * pools can be reused if the pool parameters (number of pools, 926 * number of buffers in the pool and mtu) have not changed. 927 * 928 * NOTE: This assumes that all pools have the same number of buffers 929 * which is the case currently. If that changes, we must fix this. 930 * 931 * Return: true if the tx pools can be reused, false otherwise. 932 */ 933 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) 934 { 935 u64 old_num_pools, new_num_pools; 936 u64 old_pool_size, new_pool_size; 937 u64 old_mtu, new_mtu; 938 939 if (!adapter->tx_pool) 940 return false; 941 942 old_num_pools = adapter->num_active_tx_pools; 943 new_num_pools = adapter->num_active_tx_scrqs; 944 old_pool_size = adapter->prev_tx_pool_size; 945 new_pool_size = adapter->req_tx_entries_per_subcrq; 946 old_mtu = adapter->prev_mtu; 947 new_mtu = adapter->req_mtu; 948 949 if (old_mtu != new_mtu || 950 old_num_pools != new_num_pools || 951 old_pool_size != new_pool_size) 952 return false; 953 954 return true; 955 } 956 957 /** 958 * init_tx_pools(): Initialize the set of transmit pools in the adapter. 959 * @netdev: net device associated with the vnic interface 960 * 961 * Initialize the set of transmit pools in the ibmvnic adapter associated 962 * with the net_device @netdev. If possible, reuse the existing tx pools. 963 * Otherwise free any existing pools and allocate a new set of pools 964 * before initializing them. 965 * 966 * Return: 0 on success and negative value on error. 967 */ 968 static int init_tx_pools(struct net_device *netdev) 969 { 970 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 971 struct device *dev = &adapter->vdev->dev; 972 int num_pools; 973 u64 pool_size; /* # of buffers in pool */ 974 u64 buff_size; 975 int i, j, rc; 976 977 num_pools = adapter->req_tx_queues; 978 979 /* We must notify the VIOS about the LTB on all resets - but we only 980 * need to alloc/populate pools if either the number of buffers or 981 * size of each buffer in the pool has changed. 982 */ 983 if (reuse_tx_pools(adapter)) { 984 netdev_dbg(netdev, "Reusing tx pools\n"); 985 goto update_ltb; 986 } 987 988 /* Allocate/populate the pools. */ 989 release_tx_pools(adapter); 990 991 pool_size = adapter->req_tx_entries_per_subcrq; 992 num_pools = adapter->num_active_tx_scrqs; 993 994 adapter->tx_pool = kcalloc(num_pools, 995 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 996 if (!adapter->tx_pool) 997 return -ENOMEM; 998 999 adapter->tso_pool = kcalloc(num_pools, 1000 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 1001 /* To simplify release_tx_pools() ensure that ->tx_pool and 1002 * ->tso_pool are either both NULL or both non-NULL. 1003 */ 1004 if (!adapter->tso_pool) { 1005 kfree(adapter->tx_pool); 1006 adapter->tx_pool = NULL; 1007 return -ENOMEM; 1008 } 1009 1010 /* Set num_active_tx_pools early. If we fail below after partial 1011 * allocation, release_tx_pools() will know how many to look for. 1012 */ 1013 adapter->num_active_tx_pools = num_pools; 1014 1015 buff_size = adapter->req_mtu + VLAN_HLEN; 1016 buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 1017 1018 for (i = 0; i < num_pools; i++) { 1019 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", 1020 i, adapter->req_tx_entries_per_subcrq, buff_size); 1021 1022 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 1023 pool_size, buff_size); 1024 if (rc) 1025 goto out_release; 1026 1027 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], 1028 IBMVNIC_TSO_BUFS, 1029 IBMVNIC_TSO_BUF_SZ); 1030 if (rc) 1031 goto out_release; 1032 } 1033 1034 adapter->prev_tx_pool_size = pool_size; 1035 adapter->prev_mtu = adapter->req_mtu; 1036 1037 update_ltb: 1038 /* NOTE: All tx_pools have the same number of buffers (which is 1039 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS 1040 * buffers (see calls init_one_tx_pool() for these). 1041 * For consistency, we use tx_pool->num_buffers and 1042 * tso_pool->num_buffers below. 1043 */ 1044 rc = -1; 1045 for (i = 0; i < num_pools; i++) { 1046 struct ibmvnic_tx_pool *tso_pool; 1047 struct ibmvnic_tx_pool *tx_pool; 1048 u32 ltb_size; 1049 1050 tx_pool = &adapter->tx_pool[i]; 1051 ltb_size = tx_pool->num_buffers * tx_pool->buf_size; 1052 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 1053 ltb_size)) 1054 goto out; 1055 1056 dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n", 1057 i, tx_pool->long_term_buff.buff, 1058 tx_pool->num_buffers, tx_pool->buf_size); 1059 1060 tx_pool->consumer_index = 0; 1061 tx_pool->producer_index = 0; 1062 1063 for (j = 0; j < tx_pool->num_buffers; j++) 1064 tx_pool->free_map[j] = j; 1065 1066 tso_pool = &adapter->tso_pool[i]; 1067 ltb_size = tso_pool->num_buffers * tso_pool->buf_size; 1068 if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff, 1069 ltb_size)) 1070 goto out; 1071 1072 dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n", 1073 i, tso_pool->long_term_buff.buff, 1074 tso_pool->num_buffers, tso_pool->buf_size); 1075 1076 tso_pool->consumer_index = 0; 1077 tso_pool->producer_index = 0; 1078 1079 for (j = 0; j < tso_pool->num_buffers; j++) 1080 tso_pool->free_map[j] = j; 1081 } 1082 1083 return 0; 1084 out_release: 1085 release_tx_pools(adapter); 1086 out: 1087 /* We failed to allocate one or more LTBs or map them on the VIOS. 1088 * Hold onto the pools and any LTBs that we did allocate/map. 1089 */ 1090 return rc; 1091 } 1092 1093 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 1094 { 1095 int i; 1096 1097 if (adapter->napi_enabled) 1098 return; 1099 1100 for (i = 0; i < adapter->req_rx_queues; i++) 1101 napi_enable(&adapter->napi[i]); 1102 1103 adapter->napi_enabled = true; 1104 } 1105 1106 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 1107 { 1108 int i; 1109 1110 if (!adapter->napi_enabled) 1111 return; 1112 1113 for (i = 0; i < adapter->req_rx_queues; i++) { 1114 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 1115 napi_disable(&adapter->napi[i]); 1116 } 1117 1118 adapter->napi_enabled = false; 1119 } 1120 1121 static int init_napi(struct ibmvnic_adapter *adapter) 1122 { 1123 int i; 1124 1125 adapter->napi = kcalloc(adapter->req_rx_queues, 1126 sizeof(struct napi_struct), GFP_KERNEL); 1127 if (!adapter->napi) 1128 return -ENOMEM; 1129 1130 for (i = 0; i < adapter->req_rx_queues; i++) { 1131 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 1132 netif_napi_add(adapter->netdev, &adapter->napi[i], 1133 ibmvnic_poll, NAPI_POLL_WEIGHT); 1134 } 1135 1136 adapter->num_active_rx_napi = adapter->req_rx_queues; 1137 return 0; 1138 } 1139 1140 static void release_napi(struct ibmvnic_adapter *adapter) 1141 { 1142 int i; 1143 1144 if (!adapter->napi) 1145 return; 1146 1147 for (i = 0; i < adapter->num_active_rx_napi; i++) { 1148 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); 1149 netif_napi_del(&adapter->napi[i]); 1150 } 1151 1152 kfree(adapter->napi); 1153 adapter->napi = NULL; 1154 adapter->num_active_rx_napi = 0; 1155 adapter->napi_enabled = false; 1156 } 1157 1158 static const char *adapter_state_to_string(enum vnic_state state) 1159 { 1160 switch (state) { 1161 case VNIC_PROBING: 1162 return "PROBING"; 1163 case VNIC_PROBED: 1164 return "PROBED"; 1165 case VNIC_OPENING: 1166 return "OPENING"; 1167 case VNIC_OPEN: 1168 return "OPEN"; 1169 case VNIC_CLOSING: 1170 return "CLOSING"; 1171 case VNIC_CLOSED: 1172 return "CLOSED"; 1173 case VNIC_REMOVING: 1174 return "REMOVING"; 1175 case VNIC_REMOVED: 1176 return "REMOVED"; 1177 case VNIC_DOWN: 1178 return "DOWN"; 1179 } 1180 return "UNKNOWN"; 1181 } 1182 1183 static int ibmvnic_login(struct net_device *netdev) 1184 { 1185 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1186 unsigned long timeout = msecs_to_jiffies(20000); 1187 int retry_count = 0; 1188 int retries = 10; 1189 bool retry; 1190 int rc; 1191 1192 do { 1193 retry = false; 1194 if (retry_count > retries) { 1195 netdev_warn(netdev, "Login attempts exceeded\n"); 1196 return -EACCES; 1197 } 1198 1199 adapter->init_done_rc = 0; 1200 reinit_completion(&adapter->init_done); 1201 rc = send_login(adapter); 1202 if (rc) 1203 return rc; 1204 1205 if (!wait_for_completion_timeout(&adapter->init_done, 1206 timeout)) { 1207 netdev_warn(netdev, "Login timed out, retrying...\n"); 1208 retry = true; 1209 adapter->init_done_rc = 0; 1210 retry_count++; 1211 continue; 1212 } 1213 1214 if (adapter->init_done_rc == ABORTED) { 1215 netdev_warn(netdev, "Login aborted, retrying...\n"); 1216 retry = true; 1217 adapter->init_done_rc = 0; 1218 retry_count++; 1219 /* FW or device may be busy, so 1220 * wait a bit before retrying login 1221 */ 1222 msleep(500); 1223 } else if (adapter->init_done_rc == PARTIALSUCCESS) { 1224 retry_count++; 1225 release_sub_crqs(adapter, 1); 1226 1227 retry = true; 1228 netdev_dbg(netdev, 1229 "Received partial success, retrying...\n"); 1230 adapter->init_done_rc = 0; 1231 reinit_completion(&adapter->init_done); 1232 send_query_cap(adapter); 1233 if (!wait_for_completion_timeout(&adapter->init_done, 1234 timeout)) { 1235 netdev_warn(netdev, 1236 "Capabilities query timed out\n"); 1237 return -ETIMEDOUT; 1238 } 1239 1240 rc = init_sub_crqs(adapter); 1241 if (rc) { 1242 netdev_warn(netdev, 1243 "SCRQ initialization failed\n"); 1244 return rc; 1245 } 1246 1247 rc = init_sub_crq_irqs(adapter); 1248 if (rc) { 1249 netdev_warn(netdev, 1250 "SCRQ irq initialization failed\n"); 1251 return rc; 1252 } 1253 } else if (adapter->init_done_rc) { 1254 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", 1255 adapter->init_done_rc); 1256 return -EIO; 1257 } 1258 } while (retry); 1259 1260 __ibmvnic_set_mac(netdev, adapter->mac_addr); 1261 1262 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); 1263 return 0; 1264 } 1265 1266 static void release_login_buffer(struct ibmvnic_adapter *adapter) 1267 { 1268 kfree(adapter->login_buf); 1269 adapter->login_buf = NULL; 1270 } 1271 1272 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 1273 { 1274 kfree(adapter->login_rsp_buf); 1275 adapter->login_rsp_buf = NULL; 1276 } 1277 1278 static void release_resources(struct ibmvnic_adapter *adapter) 1279 { 1280 release_vpd_data(adapter); 1281 1282 release_napi(adapter); 1283 release_login_buffer(adapter); 1284 release_login_rsp_buffer(adapter); 1285 } 1286 1287 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 1288 { 1289 struct net_device *netdev = adapter->netdev; 1290 unsigned long timeout = msecs_to_jiffies(20000); 1291 union ibmvnic_crq crq; 1292 bool resend; 1293 int rc; 1294 1295 netdev_dbg(netdev, "setting link state %d\n", link_state); 1296 1297 memset(&crq, 0, sizeof(crq)); 1298 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 1299 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 1300 crq.logical_link_state.link_state = link_state; 1301 1302 do { 1303 resend = false; 1304 1305 reinit_completion(&adapter->init_done); 1306 rc = ibmvnic_send_crq(adapter, &crq); 1307 if (rc) { 1308 netdev_err(netdev, "Failed to set link state\n"); 1309 return rc; 1310 } 1311 1312 if (!wait_for_completion_timeout(&adapter->init_done, 1313 timeout)) { 1314 netdev_err(netdev, "timeout setting link state\n"); 1315 return -ETIMEDOUT; 1316 } 1317 1318 if (adapter->init_done_rc == PARTIALSUCCESS) { 1319 /* Partuial success, delay and re-send */ 1320 mdelay(1000); 1321 resend = true; 1322 } else if (adapter->init_done_rc) { 1323 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 1324 adapter->init_done_rc); 1325 return adapter->init_done_rc; 1326 } 1327 } while (resend); 1328 1329 return 0; 1330 } 1331 1332 static int set_real_num_queues(struct net_device *netdev) 1333 { 1334 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1335 int rc; 1336 1337 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 1338 adapter->req_tx_queues, adapter->req_rx_queues); 1339 1340 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 1341 if (rc) { 1342 netdev_err(netdev, "failed to set the number of tx queues\n"); 1343 return rc; 1344 } 1345 1346 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 1347 if (rc) 1348 netdev_err(netdev, "failed to set the number of rx queues\n"); 1349 1350 return rc; 1351 } 1352 1353 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 1354 { 1355 struct device *dev = &adapter->vdev->dev; 1356 union ibmvnic_crq crq; 1357 int len = 0; 1358 int rc; 1359 1360 if (adapter->vpd->buff) 1361 len = adapter->vpd->len; 1362 1363 mutex_lock(&adapter->fw_lock); 1364 adapter->fw_done_rc = 0; 1365 reinit_completion(&adapter->fw_done); 1366 1367 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 1368 crq.get_vpd_size.cmd = GET_VPD_SIZE; 1369 rc = ibmvnic_send_crq(adapter, &crq); 1370 if (rc) { 1371 mutex_unlock(&adapter->fw_lock); 1372 return rc; 1373 } 1374 1375 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1376 if (rc) { 1377 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); 1378 mutex_unlock(&adapter->fw_lock); 1379 return rc; 1380 } 1381 mutex_unlock(&adapter->fw_lock); 1382 1383 if (!adapter->vpd->len) 1384 return -ENODATA; 1385 1386 if (!adapter->vpd->buff) 1387 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 1388 else if (adapter->vpd->len != len) 1389 adapter->vpd->buff = 1390 krealloc(adapter->vpd->buff, 1391 adapter->vpd->len, GFP_KERNEL); 1392 1393 if (!adapter->vpd->buff) { 1394 dev_err(dev, "Could allocate VPD buffer\n"); 1395 return -ENOMEM; 1396 } 1397 1398 adapter->vpd->dma_addr = 1399 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 1400 DMA_FROM_DEVICE); 1401 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 1402 dev_err(dev, "Could not map VPD buffer\n"); 1403 kfree(adapter->vpd->buff); 1404 adapter->vpd->buff = NULL; 1405 return -ENOMEM; 1406 } 1407 1408 mutex_lock(&adapter->fw_lock); 1409 adapter->fw_done_rc = 0; 1410 reinit_completion(&adapter->fw_done); 1411 1412 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 1413 crq.get_vpd.cmd = GET_VPD; 1414 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 1415 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1416 rc = ibmvnic_send_crq(adapter, &crq); 1417 if (rc) { 1418 kfree(adapter->vpd->buff); 1419 adapter->vpd->buff = NULL; 1420 mutex_unlock(&adapter->fw_lock); 1421 return rc; 1422 } 1423 1424 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1425 if (rc) { 1426 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); 1427 kfree(adapter->vpd->buff); 1428 adapter->vpd->buff = NULL; 1429 mutex_unlock(&adapter->fw_lock); 1430 return rc; 1431 } 1432 1433 mutex_unlock(&adapter->fw_lock); 1434 return 0; 1435 } 1436 1437 static int init_resources(struct ibmvnic_adapter *adapter) 1438 { 1439 struct net_device *netdev = adapter->netdev; 1440 int rc; 1441 1442 rc = set_real_num_queues(netdev); 1443 if (rc) 1444 return rc; 1445 1446 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1447 if (!adapter->vpd) 1448 return -ENOMEM; 1449 1450 /* Vital Product Data (VPD) */ 1451 rc = ibmvnic_get_vpd(adapter); 1452 if (rc) { 1453 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1454 return rc; 1455 } 1456 1457 rc = init_napi(adapter); 1458 if (rc) 1459 return rc; 1460 1461 send_query_map(adapter); 1462 1463 rc = init_rx_pools(netdev); 1464 if (rc) 1465 return rc; 1466 1467 rc = init_tx_pools(netdev); 1468 return rc; 1469 } 1470 1471 static int __ibmvnic_open(struct net_device *netdev) 1472 { 1473 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1474 enum vnic_state prev_state = adapter->state; 1475 int i, rc; 1476 1477 adapter->state = VNIC_OPENING; 1478 replenish_pools(adapter); 1479 ibmvnic_napi_enable(adapter); 1480 1481 /* We're ready to receive frames, enable the sub-crq interrupts and 1482 * set the logical link state to up 1483 */ 1484 for (i = 0; i < adapter->req_rx_queues; i++) { 1485 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1486 if (prev_state == VNIC_CLOSED) 1487 enable_irq(adapter->rx_scrq[i]->irq); 1488 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1489 } 1490 1491 for (i = 0; i < adapter->req_tx_queues; i++) { 1492 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1493 if (prev_state == VNIC_CLOSED) 1494 enable_irq(adapter->tx_scrq[i]->irq); 1495 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1496 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); 1497 } 1498 1499 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1500 if (rc) { 1501 ibmvnic_napi_disable(adapter); 1502 ibmvnic_disable_irqs(adapter); 1503 return rc; 1504 } 1505 1506 adapter->tx_queues_active = true; 1507 1508 /* Since queues were stopped until now, there shouldn't be any 1509 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we 1510 * don't need the synchronize_rcu()? Leaving it for consistency 1511 * with setting ->tx_queues_active = false. 1512 */ 1513 synchronize_rcu(); 1514 1515 netif_tx_start_all_queues(netdev); 1516 1517 if (prev_state == VNIC_CLOSED) { 1518 for (i = 0; i < adapter->req_rx_queues; i++) 1519 napi_schedule(&adapter->napi[i]); 1520 } 1521 1522 adapter->state = VNIC_OPEN; 1523 return rc; 1524 } 1525 1526 static int ibmvnic_open(struct net_device *netdev) 1527 { 1528 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1529 int rc; 1530 1531 ASSERT_RTNL(); 1532 1533 /* If device failover is pending or we are about to reset, just set 1534 * device state and return. Device operation will be handled by reset 1535 * routine. 1536 * 1537 * It should be safe to overwrite the adapter->state here. Since 1538 * we hold the rtnl, either the reset has not actually started or 1539 * the rtnl got dropped during the set_link_state() in do_reset(). 1540 * In the former case, no one else is changing the state (again we 1541 * have the rtnl) and in the latter case, do_reset() will detect and 1542 * honor our setting below. 1543 */ 1544 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { 1545 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n", 1546 adapter_state_to_string(adapter->state), 1547 adapter->failover_pending); 1548 adapter->state = VNIC_OPEN; 1549 rc = 0; 1550 goto out; 1551 } 1552 1553 if (adapter->state != VNIC_CLOSED) { 1554 rc = ibmvnic_login(netdev); 1555 if (rc) 1556 goto out; 1557 1558 rc = init_resources(adapter); 1559 if (rc) { 1560 netdev_err(netdev, "failed to initialize resources\n"); 1561 goto out; 1562 } 1563 } 1564 1565 rc = __ibmvnic_open(netdev); 1566 1567 out: 1568 /* If open failed and there is a pending failover or in-progress reset, 1569 * set device state and return. Device operation will be handled by 1570 * reset routine. See also comments above regarding rtnl. 1571 */ 1572 if (rc && 1573 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { 1574 adapter->state = VNIC_OPEN; 1575 rc = 0; 1576 } 1577 1578 if (rc) { 1579 release_resources(adapter); 1580 release_rx_pools(adapter); 1581 release_tx_pools(adapter); 1582 } 1583 1584 return rc; 1585 } 1586 1587 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1588 { 1589 struct ibmvnic_rx_pool *rx_pool; 1590 struct ibmvnic_rx_buff *rx_buff; 1591 u64 rx_entries; 1592 int rx_scrqs; 1593 int i, j; 1594 1595 if (!adapter->rx_pool) 1596 return; 1597 1598 rx_scrqs = adapter->num_active_rx_pools; 1599 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1600 1601 /* Free any remaining skbs in the rx buffer pools */ 1602 for (i = 0; i < rx_scrqs; i++) { 1603 rx_pool = &adapter->rx_pool[i]; 1604 if (!rx_pool || !rx_pool->rx_buff) 1605 continue; 1606 1607 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1608 for (j = 0; j < rx_entries; j++) { 1609 rx_buff = &rx_pool->rx_buff[j]; 1610 if (rx_buff && rx_buff->skb) { 1611 dev_kfree_skb_any(rx_buff->skb); 1612 rx_buff->skb = NULL; 1613 } 1614 } 1615 } 1616 } 1617 1618 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1619 struct ibmvnic_tx_pool *tx_pool) 1620 { 1621 struct ibmvnic_tx_buff *tx_buff; 1622 u64 tx_entries; 1623 int i; 1624 1625 if (!tx_pool || !tx_pool->tx_buff) 1626 return; 1627 1628 tx_entries = tx_pool->num_buffers; 1629 1630 for (i = 0; i < tx_entries; i++) { 1631 tx_buff = &tx_pool->tx_buff[i]; 1632 if (tx_buff && tx_buff->skb) { 1633 dev_kfree_skb_any(tx_buff->skb); 1634 tx_buff->skb = NULL; 1635 } 1636 } 1637 } 1638 1639 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1640 { 1641 int tx_scrqs; 1642 int i; 1643 1644 if (!adapter->tx_pool || !adapter->tso_pool) 1645 return; 1646 1647 tx_scrqs = adapter->num_active_tx_pools; 1648 1649 /* Free any remaining skbs in the tx buffer pools */ 1650 for (i = 0; i < tx_scrqs; i++) { 1651 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1652 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1653 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1654 } 1655 } 1656 1657 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1658 { 1659 struct net_device *netdev = adapter->netdev; 1660 int i; 1661 1662 if (adapter->tx_scrq) { 1663 for (i = 0; i < adapter->req_tx_queues; i++) 1664 if (adapter->tx_scrq[i]->irq) { 1665 netdev_dbg(netdev, 1666 "Disabling tx_scrq[%d] irq\n", i); 1667 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1668 disable_irq(adapter->tx_scrq[i]->irq); 1669 } 1670 } 1671 1672 if (adapter->rx_scrq) { 1673 for (i = 0; i < adapter->req_rx_queues; i++) { 1674 if (adapter->rx_scrq[i]->irq) { 1675 netdev_dbg(netdev, 1676 "Disabling rx_scrq[%d] irq\n", i); 1677 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1678 disable_irq(adapter->rx_scrq[i]->irq); 1679 } 1680 } 1681 } 1682 } 1683 1684 static void ibmvnic_cleanup(struct net_device *netdev) 1685 { 1686 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1687 1688 /* ensure that transmissions are stopped if called by do_reset */ 1689 1690 adapter->tx_queues_active = false; 1691 1692 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active 1693 * update so they don't restart a queue after we stop it below. 1694 */ 1695 synchronize_rcu(); 1696 1697 if (test_bit(0, &adapter->resetting)) 1698 netif_tx_disable(netdev); 1699 else 1700 netif_tx_stop_all_queues(netdev); 1701 1702 ibmvnic_napi_disable(adapter); 1703 ibmvnic_disable_irqs(adapter); 1704 } 1705 1706 static int __ibmvnic_close(struct net_device *netdev) 1707 { 1708 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1709 int rc = 0; 1710 1711 adapter->state = VNIC_CLOSING; 1712 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1713 adapter->state = VNIC_CLOSED; 1714 return rc; 1715 } 1716 1717 static int ibmvnic_close(struct net_device *netdev) 1718 { 1719 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1720 int rc; 1721 1722 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n", 1723 adapter_state_to_string(adapter->state), 1724 adapter->failover_pending, 1725 adapter->force_reset_recovery); 1726 1727 /* If device failover is pending, just set device state and return. 1728 * Device operation will be handled by reset routine. 1729 */ 1730 if (adapter->failover_pending) { 1731 adapter->state = VNIC_CLOSED; 1732 return 0; 1733 } 1734 1735 rc = __ibmvnic_close(netdev); 1736 ibmvnic_cleanup(netdev); 1737 clean_rx_pools(adapter); 1738 clean_tx_pools(adapter); 1739 1740 return rc; 1741 } 1742 1743 /** 1744 * build_hdr_data - creates L2/L3/L4 header data buffer 1745 * @hdr_field: bitfield determining needed headers 1746 * @skb: socket buffer 1747 * @hdr_len: array of header lengths 1748 * @hdr_data: buffer to write the header to 1749 * 1750 * Reads hdr_field to determine which headers are needed by firmware. 1751 * Builds a buffer containing these headers. Saves individual header 1752 * lengths and total buffer length to be used to build descriptors. 1753 */ 1754 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 1755 int *hdr_len, u8 *hdr_data) 1756 { 1757 int len = 0; 1758 u8 *hdr; 1759 1760 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 1761 hdr_len[0] = sizeof(struct vlan_ethhdr); 1762 else 1763 hdr_len[0] = sizeof(struct ethhdr); 1764 1765 if (skb->protocol == htons(ETH_P_IP)) { 1766 hdr_len[1] = ip_hdr(skb)->ihl * 4; 1767 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1768 hdr_len[2] = tcp_hdrlen(skb); 1769 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1770 hdr_len[2] = sizeof(struct udphdr); 1771 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1772 hdr_len[1] = sizeof(struct ipv6hdr); 1773 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1774 hdr_len[2] = tcp_hdrlen(skb); 1775 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 1776 hdr_len[2] = sizeof(struct udphdr); 1777 } else if (skb->protocol == htons(ETH_P_ARP)) { 1778 hdr_len[1] = arp_hdr_len(skb->dev); 1779 hdr_len[2] = 0; 1780 } 1781 1782 memset(hdr_data, 0, 120); 1783 if ((hdr_field >> 6) & 1) { 1784 hdr = skb_mac_header(skb); 1785 memcpy(hdr_data, hdr, hdr_len[0]); 1786 len += hdr_len[0]; 1787 } 1788 1789 if ((hdr_field >> 5) & 1) { 1790 hdr = skb_network_header(skb); 1791 memcpy(hdr_data + len, hdr, hdr_len[1]); 1792 len += hdr_len[1]; 1793 } 1794 1795 if ((hdr_field >> 4) & 1) { 1796 hdr = skb_transport_header(skb); 1797 memcpy(hdr_data + len, hdr, hdr_len[2]); 1798 len += hdr_len[2]; 1799 } 1800 return len; 1801 } 1802 1803 /** 1804 * create_hdr_descs - create header and header extension descriptors 1805 * @hdr_field: bitfield determining needed headers 1806 * @hdr_data: buffer containing header data 1807 * @len: length of data buffer 1808 * @hdr_len: array of individual header lengths 1809 * @scrq_arr: descriptor array 1810 * 1811 * Creates header and, if needed, header extension descriptors and 1812 * places them in a descriptor array, scrq_arr 1813 */ 1814 1815 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1816 union sub_crq *scrq_arr) 1817 { 1818 union sub_crq hdr_desc; 1819 int tmp_len = len; 1820 int num_descs = 0; 1821 u8 *data, *cur; 1822 int tmp; 1823 1824 while (tmp_len > 0) { 1825 cur = hdr_data + len - tmp_len; 1826 1827 memset(&hdr_desc, 0, sizeof(hdr_desc)); 1828 if (cur != hdr_data) { 1829 data = hdr_desc.hdr_ext.data; 1830 tmp = tmp_len > 29 ? 29 : tmp_len; 1831 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 1832 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 1833 hdr_desc.hdr_ext.len = tmp; 1834 } else { 1835 data = hdr_desc.hdr.data; 1836 tmp = tmp_len > 24 ? 24 : tmp_len; 1837 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 1838 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 1839 hdr_desc.hdr.len = tmp; 1840 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 1841 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 1842 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 1843 hdr_desc.hdr.flag = hdr_field << 1; 1844 } 1845 memcpy(data, cur, tmp); 1846 tmp_len -= tmp; 1847 *scrq_arr = hdr_desc; 1848 scrq_arr++; 1849 num_descs++; 1850 } 1851 1852 return num_descs; 1853 } 1854 1855 /** 1856 * build_hdr_descs_arr - build a header descriptor array 1857 * @skb: tx socket buffer 1858 * @indir_arr: indirect array 1859 * @num_entries: number of descriptors to be sent 1860 * @hdr_field: bit field determining which headers will be sent 1861 * 1862 * This function will build a TX descriptor array with applicable 1863 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 1864 */ 1865 1866 static void build_hdr_descs_arr(struct sk_buff *skb, 1867 union sub_crq *indir_arr, 1868 int *num_entries, u8 hdr_field) 1869 { 1870 int hdr_len[3] = {0, 0, 0}; 1871 u8 hdr_data[140] = {0}; 1872 int tot_len; 1873 1874 tot_len = build_hdr_data(hdr_field, skb, hdr_len, 1875 hdr_data); 1876 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 1877 indir_arr + 1); 1878 } 1879 1880 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 1881 struct net_device *netdev) 1882 { 1883 /* For some backing devices, mishandling of small packets 1884 * can result in a loss of connection or TX stall. Device 1885 * architects recommend that no packet should be smaller 1886 * than the minimum MTU value provided to the driver, so 1887 * pad any packets to that length 1888 */ 1889 if (skb->len < netdev->min_mtu) 1890 return skb_put_padto(skb, netdev->min_mtu); 1891 1892 return 0; 1893 } 1894 1895 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 1896 struct ibmvnic_sub_crq_queue *tx_scrq) 1897 { 1898 struct ibmvnic_ind_xmit_queue *ind_bufp; 1899 struct ibmvnic_tx_buff *tx_buff; 1900 struct ibmvnic_tx_pool *tx_pool; 1901 union sub_crq tx_scrq_entry; 1902 int queue_num; 1903 int entries; 1904 int index; 1905 int i; 1906 1907 ind_bufp = &tx_scrq->ind_buf; 1908 entries = (u64)ind_bufp->index; 1909 queue_num = tx_scrq->pool_index; 1910 1911 for (i = entries - 1; i >= 0; --i) { 1912 tx_scrq_entry = ind_bufp->indir_arr[i]; 1913 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) 1914 continue; 1915 index = be32_to_cpu(tx_scrq_entry.v1.correlator); 1916 if (index & IBMVNIC_TSO_POOL_MASK) { 1917 tx_pool = &adapter->tso_pool[queue_num]; 1918 index &= ~IBMVNIC_TSO_POOL_MASK; 1919 } else { 1920 tx_pool = &adapter->tx_pool[queue_num]; 1921 } 1922 tx_pool->free_map[tx_pool->consumer_index] = index; 1923 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 1924 tx_pool->num_buffers - 1 : 1925 tx_pool->consumer_index - 1; 1926 tx_buff = &tx_pool->tx_buff[index]; 1927 adapter->netdev->stats.tx_packets--; 1928 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; 1929 adapter->tx_stats_buffers[queue_num].packets--; 1930 adapter->tx_stats_buffers[queue_num].bytes -= 1931 tx_buff->skb->len; 1932 dev_kfree_skb_any(tx_buff->skb); 1933 tx_buff->skb = NULL; 1934 adapter->netdev->stats.tx_dropped++; 1935 } 1936 1937 ind_bufp->index = 0; 1938 1939 if (atomic_sub_return(entries, &tx_scrq->used) <= 1940 (adapter->req_tx_entries_per_subcrq / 2) && 1941 __netif_subqueue_stopped(adapter->netdev, queue_num)) { 1942 rcu_read_lock(); 1943 1944 if (adapter->tx_queues_active) { 1945 netif_wake_subqueue(adapter->netdev, queue_num); 1946 netdev_dbg(adapter->netdev, "Started queue %d\n", 1947 queue_num); 1948 } 1949 1950 rcu_read_unlock(); 1951 } 1952 } 1953 1954 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, 1955 struct ibmvnic_sub_crq_queue *tx_scrq) 1956 { 1957 struct ibmvnic_ind_xmit_queue *ind_bufp; 1958 u64 dma_addr; 1959 u64 entries; 1960 u64 handle; 1961 int rc; 1962 1963 ind_bufp = &tx_scrq->ind_buf; 1964 dma_addr = (u64)ind_bufp->indir_dma; 1965 entries = (u64)ind_bufp->index; 1966 handle = tx_scrq->handle; 1967 1968 if (!entries) 1969 return 0; 1970 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); 1971 if (rc) 1972 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); 1973 else 1974 ind_bufp->index = 0; 1975 return 0; 1976 } 1977 1978 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 1979 { 1980 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1981 int queue_num = skb_get_queue_mapping(skb); 1982 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 1983 struct device *dev = &adapter->vdev->dev; 1984 struct ibmvnic_ind_xmit_queue *ind_bufp; 1985 struct ibmvnic_tx_buff *tx_buff = NULL; 1986 struct ibmvnic_sub_crq_queue *tx_scrq; 1987 struct ibmvnic_long_term_buff *ltb; 1988 struct ibmvnic_tx_pool *tx_pool; 1989 unsigned int tx_send_failed = 0; 1990 netdev_tx_t ret = NETDEV_TX_OK; 1991 unsigned int tx_map_failed = 0; 1992 union sub_crq indir_arr[16]; 1993 unsigned int tx_dropped = 0; 1994 unsigned int tx_packets = 0; 1995 unsigned int tx_bytes = 0; 1996 dma_addr_t data_dma_addr; 1997 struct netdev_queue *txq; 1998 unsigned long lpar_rc; 1999 union sub_crq tx_crq; 2000 unsigned int offset; 2001 int num_entries = 1; 2002 unsigned char *dst; 2003 int bufidx = 0; 2004 u8 proto = 0; 2005 2006 /* If a reset is in progress, drop the packet since 2007 * the scrqs may get torn down. Otherwise use the 2008 * rcu to ensure reset waits for us to complete. 2009 */ 2010 rcu_read_lock(); 2011 if (!adapter->tx_queues_active) { 2012 dev_kfree_skb_any(skb); 2013 2014 tx_send_failed++; 2015 tx_dropped++; 2016 ret = NETDEV_TX_OK; 2017 goto out; 2018 } 2019 2020 tx_scrq = adapter->tx_scrq[queue_num]; 2021 txq = netdev_get_tx_queue(netdev, queue_num); 2022 ind_bufp = &tx_scrq->ind_buf; 2023 2024 if (ibmvnic_xmit_workarounds(skb, netdev)) { 2025 tx_dropped++; 2026 tx_send_failed++; 2027 ret = NETDEV_TX_OK; 2028 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2029 goto out; 2030 } 2031 2032 if (skb_is_gso(skb)) 2033 tx_pool = &adapter->tso_pool[queue_num]; 2034 else 2035 tx_pool = &adapter->tx_pool[queue_num]; 2036 2037 bufidx = tx_pool->free_map[tx_pool->consumer_index]; 2038 2039 if (bufidx == IBMVNIC_INVALID_MAP) { 2040 dev_kfree_skb_any(skb); 2041 tx_send_failed++; 2042 tx_dropped++; 2043 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2044 ret = NETDEV_TX_OK; 2045 goto out; 2046 } 2047 2048 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 2049 2050 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset); 2051 2052 dst = ltb->buff + offset; 2053 memset(dst, 0, tx_pool->buf_size); 2054 data_dma_addr = ltb->addr + offset; 2055 2056 if (skb_shinfo(skb)->nr_frags) { 2057 int cur, i; 2058 2059 /* Copy the head */ 2060 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 2061 cur = skb_headlen(skb); 2062 2063 /* Copy the frags */ 2064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2065 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2066 2067 memcpy(dst + cur, skb_frag_address(frag), 2068 skb_frag_size(frag)); 2069 cur += skb_frag_size(frag); 2070 } 2071 } else { 2072 skb_copy_from_linear_data(skb, dst, skb->len); 2073 } 2074 2075 /* post changes to long_term_buff *dst before VIOS accessing it */ 2076 dma_wmb(); 2077 2078 tx_pool->consumer_index = 2079 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 2080 2081 tx_buff = &tx_pool->tx_buff[bufidx]; 2082 tx_buff->skb = skb; 2083 tx_buff->index = bufidx; 2084 tx_buff->pool_index = queue_num; 2085 2086 memset(&tx_crq, 0, sizeof(tx_crq)); 2087 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 2088 tx_crq.v1.type = IBMVNIC_TX_DESC; 2089 tx_crq.v1.n_crq_elem = 1; 2090 tx_crq.v1.n_sge = 1; 2091 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 2092 2093 if (skb_is_gso(skb)) 2094 tx_crq.v1.correlator = 2095 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK); 2096 else 2097 tx_crq.v1.correlator = cpu_to_be32(bufidx); 2098 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id); 2099 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 2100 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 2101 2102 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { 2103 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 2104 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 2105 } 2106 2107 if (skb->protocol == htons(ETH_P_IP)) { 2108 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 2109 proto = ip_hdr(skb)->protocol; 2110 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2111 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 2112 proto = ipv6_hdr(skb)->nexthdr; 2113 } 2114 2115 if (proto == IPPROTO_TCP) 2116 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 2117 else if (proto == IPPROTO_UDP) 2118 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 2119 2120 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2121 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 2122 hdrs += 2; 2123 } 2124 if (skb_is_gso(skb)) { 2125 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 2126 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 2127 hdrs += 2; 2128 } 2129 2130 if ((*hdrs >> 7) & 1) 2131 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); 2132 2133 tx_crq.v1.n_crq_elem = num_entries; 2134 tx_buff->num_entries = num_entries; 2135 /* flush buffer if current entry can not fit */ 2136 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { 2137 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2138 if (lpar_rc != H_SUCCESS) 2139 goto tx_flush_err; 2140 } 2141 2142 indir_arr[0] = tx_crq; 2143 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], 2144 num_entries * sizeof(struct ibmvnic_generic_scrq)); 2145 ind_bufp->index += num_entries; 2146 if (__netdev_tx_sent_queue(txq, skb->len, 2147 netdev_xmit_more() && 2148 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { 2149 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 2150 if (lpar_rc != H_SUCCESS) 2151 goto tx_err; 2152 } 2153 2154 if (atomic_add_return(num_entries, &tx_scrq->used) 2155 >= adapter->req_tx_entries_per_subcrq) { 2156 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 2157 netif_stop_subqueue(netdev, queue_num); 2158 } 2159 2160 tx_packets++; 2161 tx_bytes += skb->len; 2162 txq_trans_cond_update(txq); 2163 ret = NETDEV_TX_OK; 2164 goto out; 2165 2166 tx_flush_err: 2167 dev_kfree_skb_any(skb); 2168 tx_buff->skb = NULL; 2169 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 2170 tx_pool->num_buffers - 1 : 2171 tx_pool->consumer_index - 1; 2172 tx_dropped++; 2173 tx_err: 2174 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) 2175 dev_err_ratelimited(dev, "tx: send failed\n"); 2176 2177 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 2178 /* Disable TX and report carrier off if queue is closed 2179 * or pending failover. 2180 * Firmware guarantees that a signal will be sent to the 2181 * driver, triggering a reset or some other action. 2182 */ 2183 netif_tx_stop_all_queues(netdev); 2184 netif_carrier_off(netdev); 2185 } 2186 out: 2187 rcu_read_unlock(); 2188 netdev->stats.tx_dropped += tx_dropped; 2189 netdev->stats.tx_bytes += tx_bytes; 2190 netdev->stats.tx_packets += tx_packets; 2191 adapter->tx_send_failed += tx_send_failed; 2192 adapter->tx_map_failed += tx_map_failed; 2193 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 2194 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 2195 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 2196 2197 return ret; 2198 } 2199 2200 static void ibmvnic_set_multi(struct net_device *netdev) 2201 { 2202 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2203 struct netdev_hw_addr *ha; 2204 union ibmvnic_crq crq; 2205 2206 memset(&crq, 0, sizeof(crq)); 2207 crq.request_capability.first = IBMVNIC_CRQ_CMD; 2208 crq.request_capability.cmd = REQUEST_CAPABILITY; 2209 2210 if (netdev->flags & IFF_PROMISC) { 2211 if (!adapter->promisc_supported) 2212 return; 2213 } else { 2214 if (netdev->flags & IFF_ALLMULTI) { 2215 /* Accept all multicast */ 2216 memset(&crq, 0, sizeof(crq)); 2217 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2218 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2219 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 2220 ibmvnic_send_crq(adapter, &crq); 2221 } else if (netdev_mc_empty(netdev)) { 2222 /* Reject all multicast */ 2223 memset(&crq, 0, sizeof(crq)); 2224 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2225 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2226 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 2227 ibmvnic_send_crq(adapter, &crq); 2228 } else { 2229 /* Accept one or more multicast(s) */ 2230 netdev_for_each_mc_addr(ha, netdev) { 2231 memset(&crq, 0, sizeof(crq)); 2232 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 2233 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 2234 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 2235 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 2236 ha->addr); 2237 ibmvnic_send_crq(adapter, &crq); 2238 } 2239 } 2240 } 2241 } 2242 2243 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) 2244 { 2245 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2246 union ibmvnic_crq crq; 2247 int rc; 2248 2249 if (!is_valid_ether_addr(dev_addr)) { 2250 rc = -EADDRNOTAVAIL; 2251 goto err; 2252 } 2253 2254 memset(&crq, 0, sizeof(crq)); 2255 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 2256 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 2257 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); 2258 2259 mutex_lock(&adapter->fw_lock); 2260 adapter->fw_done_rc = 0; 2261 reinit_completion(&adapter->fw_done); 2262 2263 rc = ibmvnic_send_crq(adapter, &crq); 2264 if (rc) { 2265 rc = -EIO; 2266 mutex_unlock(&adapter->fw_lock); 2267 goto err; 2268 } 2269 2270 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 2271 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 2272 if (rc || adapter->fw_done_rc) { 2273 rc = -EIO; 2274 mutex_unlock(&adapter->fw_lock); 2275 goto err; 2276 } 2277 mutex_unlock(&adapter->fw_lock); 2278 return 0; 2279 err: 2280 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 2281 return rc; 2282 } 2283 2284 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 2285 { 2286 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2287 struct sockaddr *addr = p; 2288 int rc; 2289 2290 rc = 0; 2291 if (!is_valid_ether_addr(addr->sa_data)) 2292 return -EADDRNOTAVAIL; 2293 2294 ether_addr_copy(adapter->mac_addr, addr->sa_data); 2295 if (adapter->state != VNIC_PROBED) 2296 rc = __ibmvnic_set_mac(netdev, addr->sa_data); 2297 2298 return rc; 2299 } 2300 2301 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) 2302 { 2303 switch (reason) { 2304 case VNIC_RESET_FAILOVER: 2305 return "FAILOVER"; 2306 case VNIC_RESET_MOBILITY: 2307 return "MOBILITY"; 2308 case VNIC_RESET_FATAL: 2309 return "FATAL"; 2310 case VNIC_RESET_NON_FATAL: 2311 return "NON_FATAL"; 2312 case VNIC_RESET_TIMEOUT: 2313 return "TIMEOUT"; 2314 case VNIC_RESET_CHANGE_PARAM: 2315 return "CHANGE_PARAM"; 2316 case VNIC_RESET_PASSIVE_INIT: 2317 return "PASSIVE_INIT"; 2318 } 2319 return "UNKNOWN"; 2320 } 2321 2322 /* 2323 * Initialize the init_done completion and return code values. We 2324 * can get a transport event just after registering the CRQ and the 2325 * tasklet will use this to communicate the transport event. To ensure 2326 * we don't miss the notification/error, initialize these _before_ 2327 * regisering the CRQ. 2328 */ 2329 static inline void reinit_init_done(struct ibmvnic_adapter *adapter) 2330 { 2331 reinit_completion(&adapter->init_done); 2332 adapter->init_done_rc = 0; 2333 } 2334 2335 /* 2336 * do_reset returns zero if we are able to keep processing reset events, or 2337 * non-zero if we hit a fatal error and must halt. 2338 */ 2339 static int do_reset(struct ibmvnic_adapter *adapter, 2340 struct ibmvnic_rwi *rwi, u32 reset_state) 2341 { 2342 struct net_device *netdev = adapter->netdev; 2343 u64 old_num_rx_queues, old_num_tx_queues; 2344 u64 old_num_rx_slots, old_num_tx_slots; 2345 int rc; 2346 2347 netdev_dbg(adapter->netdev, 2348 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", 2349 adapter_state_to_string(adapter->state), 2350 adapter->failover_pending, 2351 reset_reason_to_string(rwi->reset_reason), 2352 adapter_state_to_string(reset_state)); 2353 2354 adapter->reset_reason = rwi->reset_reason; 2355 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ 2356 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2357 rtnl_lock(); 2358 2359 /* Now that we have the rtnl lock, clear any pending failover. 2360 * This will ensure ibmvnic_open() has either completed or will 2361 * block until failover is complete. 2362 */ 2363 if (rwi->reset_reason == VNIC_RESET_FAILOVER) 2364 adapter->failover_pending = false; 2365 2366 /* read the state and check (again) after getting rtnl */ 2367 reset_state = adapter->state; 2368 2369 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2370 rc = -EBUSY; 2371 goto out; 2372 } 2373 2374 netif_carrier_off(netdev); 2375 2376 old_num_rx_queues = adapter->req_rx_queues; 2377 old_num_tx_queues = adapter->req_tx_queues; 2378 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; 2379 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; 2380 2381 ibmvnic_cleanup(netdev); 2382 2383 if (reset_state == VNIC_OPEN && 2384 adapter->reset_reason != VNIC_RESET_MOBILITY && 2385 adapter->reset_reason != VNIC_RESET_FAILOVER) { 2386 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2387 rc = __ibmvnic_close(netdev); 2388 if (rc) 2389 goto out; 2390 } else { 2391 adapter->state = VNIC_CLOSING; 2392 2393 /* Release the RTNL lock before link state change and 2394 * re-acquire after the link state change to allow 2395 * linkwatch_event to grab the RTNL lock and run during 2396 * a reset. 2397 */ 2398 rtnl_unlock(); 2399 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 2400 rtnl_lock(); 2401 if (rc) 2402 goto out; 2403 2404 if (adapter->state == VNIC_OPEN) { 2405 /* When we dropped rtnl, ibmvnic_open() got 2406 * it and noticed that we are resetting and 2407 * set the adapter state to OPEN. Update our 2408 * new "target" state, and resume the reset 2409 * from VNIC_CLOSING state. 2410 */ 2411 netdev_dbg(netdev, 2412 "Open changed state from %s, updating.\n", 2413 adapter_state_to_string(reset_state)); 2414 reset_state = VNIC_OPEN; 2415 adapter->state = VNIC_CLOSING; 2416 } 2417 2418 if (adapter->state != VNIC_CLOSING) { 2419 /* If someone else changed the adapter state 2420 * when we dropped the rtnl, fail the reset 2421 */ 2422 rc = -EAGAIN; 2423 goto out; 2424 } 2425 adapter->state = VNIC_CLOSED; 2426 } 2427 } 2428 2429 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2430 release_resources(adapter); 2431 release_sub_crqs(adapter, 1); 2432 release_crq_queue(adapter); 2433 } 2434 2435 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 2436 /* remove the closed state so when we call open it appears 2437 * we are coming from the probed state. 2438 */ 2439 adapter->state = VNIC_PROBED; 2440 2441 reinit_init_done(adapter); 2442 2443 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2444 rc = init_crq_queue(adapter); 2445 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 2446 rc = ibmvnic_reenable_crq_queue(adapter); 2447 release_sub_crqs(adapter, 1); 2448 } else { 2449 rc = ibmvnic_reset_crq(adapter); 2450 if (rc == H_CLOSED || rc == H_SUCCESS) { 2451 rc = vio_enable_interrupts(adapter->vdev); 2452 if (rc) 2453 netdev_err(adapter->netdev, 2454 "Reset failed to enable interrupts. rc=%d\n", 2455 rc); 2456 } 2457 } 2458 2459 if (rc) { 2460 netdev_err(adapter->netdev, 2461 "Reset couldn't initialize crq. rc=%d\n", rc); 2462 goto out; 2463 } 2464 2465 rc = ibmvnic_reset_init(adapter, true); 2466 if (rc) 2467 goto out; 2468 2469 /* If the adapter was in PROBE or DOWN state prior to the reset, 2470 * exit here. 2471 */ 2472 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) { 2473 rc = 0; 2474 goto out; 2475 } 2476 2477 rc = ibmvnic_login(netdev); 2478 if (rc) 2479 goto out; 2480 2481 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2482 rc = init_resources(adapter); 2483 if (rc) 2484 goto out; 2485 } else if (adapter->req_rx_queues != old_num_rx_queues || 2486 adapter->req_tx_queues != old_num_tx_queues || 2487 adapter->req_rx_add_entries_per_subcrq != 2488 old_num_rx_slots || 2489 adapter->req_tx_entries_per_subcrq != 2490 old_num_tx_slots || 2491 !adapter->rx_pool || 2492 !adapter->tso_pool || 2493 !adapter->tx_pool) { 2494 release_napi(adapter); 2495 release_vpd_data(adapter); 2496 2497 rc = init_resources(adapter); 2498 if (rc) 2499 goto out; 2500 2501 } else { 2502 rc = init_tx_pools(netdev); 2503 if (rc) { 2504 netdev_dbg(netdev, 2505 "init tx pools failed (%d)\n", 2506 rc); 2507 goto out; 2508 } 2509 2510 rc = init_rx_pools(netdev); 2511 if (rc) { 2512 netdev_dbg(netdev, 2513 "init rx pools failed (%d)\n", 2514 rc); 2515 goto out; 2516 } 2517 } 2518 ibmvnic_disable_irqs(adapter); 2519 } 2520 adapter->state = VNIC_CLOSED; 2521 2522 if (reset_state == VNIC_CLOSED) { 2523 rc = 0; 2524 goto out; 2525 } 2526 2527 rc = __ibmvnic_open(netdev); 2528 if (rc) { 2529 rc = IBMVNIC_OPEN_FAILED; 2530 goto out; 2531 } 2532 2533 /* refresh device's multicast list */ 2534 ibmvnic_set_multi(netdev); 2535 2536 if (adapter->reset_reason == VNIC_RESET_FAILOVER || 2537 adapter->reset_reason == VNIC_RESET_MOBILITY) 2538 __netdev_notify_peers(netdev); 2539 2540 rc = 0; 2541 2542 out: 2543 /* restore the adapter state if reset failed */ 2544 if (rc) 2545 adapter->state = reset_state; 2546 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */ 2547 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2548 rtnl_unlock(); 2549 2550 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", 2551 adapter_state_to_string(adapter->state), 2552 adapter->failover_pending, rc); 2553 return rc; 2554 } 2555 2556 static int do_hard_reset(struct ibmvnic_adapter *adapter, 2557 struct ibmvnic_rwi *rwi, u32 reset_state) 2558 { 2559 struct net_device *netdev = adapter->netdev; 2560 int rc; 2561 2562 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", 2563 reset_reason_to_string(rwi->reset_reason)); 2564 2565 /* read the state and check (again) after getting rtnl */ 2566 reset_state = adapter->state; 2567 2568 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2569 rc = -EBUSY; 2570 goto out; 2571 } 2572 2573 netif_carrier_off(netdev); 2574 adapter->reset_reason = rwi->reset_reason; 2575 2576 ibmvnic_cleanup(netdev); 2577 release_resources(adapter); 2578 release_sub_crqs(adapter, 0); 2579 release_crq_queue(adapter); 2580 2581 /* remove the closed state so when we call open it appears 2582 * we are coming from the probed state. 2583 */ 2584 adapter->state = VNIC_PROBED; 2585 2586 reinit_init_done(adapter); 2587 2588 rc = init_crq_queue(adapter); 2589 if (rc) { 2590 netdev_err(adapter->netdev, 2591 "Couldn't initialize crq. rc=%d\n", rc); 2592 goto out; 2593 } 2594 2595 rc = ibmvnic_reset_init(adapter, false); 2596 if (rc) 2597 goto out; 2598 2599 /* If the adapter was in PROBE or DOWN state prior to the reset, 2600 * exit here. 2601 */ 2602 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) 2603 goto out; 2604 2605 rc = ibmvnic_login(netdev); 2606 if (rc) 2607 goto out; 2608 2609 rc = init_resources(adapter); 2610 if (rc) 2611 goto out; 2612 2613 ibmvnic_disable_irqs(adapter); 2614 adapter->state = VNIC_CLOSED; 2615 2616 if (reset_state == VNIC_CLOSED) 2617 goto out; 2618 2619 rc = __ibmvnic_open(netdev); 2620 if (rc) { 2621 rc = IBMVNIC_OPEN_FAILED; 2622 goto out; 2623 } 2624 2625 __netdev_notify_peers(netdev); 2626 out: 2627 /* restore adapter state if reset failed */ 2628 if (rc) 2629 adapter->state = reset_state; 2630 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", 2631 adapter_state_to_string(adapter->state), 2632 adapter->failover_pending, rc); 2633 return rc; 2634 } 2635 2636 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 2637 { 2638 struct ibmvnic_rwi *rwi; 2639 unsigned long flags; 2640 2641 spin_lock_irqsave(&adapter->rwi_lock, flags); 2642 2643 if (!list_empty(&adapter->rwi_list)) { 2644 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 2645 list); 2646 list_del(&rwi->list); 2647 } else { 2648 rwi = NULL; 2649 } 2650 2651 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2652 return rwi; 2653 } 2654 2655 /** 2656 * do_passive_init - complete probing when partner device is detected. 2657 * @adapter: ibmvnic_adapter struct 2658 * 2659 * If the ibmvnic device does not have a partner device to communicate with at boot 2660 * and that partner device comes online at a later time, this function is called 2661 * to complete the initialization process of ibmvnic device. 2662 * Caller is expected to hold rtnl_lock(). 2663 * 2664 * Returns non-zero if sub-CRQs are not initialized properly leaving the device 2665 * in the down state. 2666 * Returns 0 upon success and the device is in PROBED state. 2667 */ 2668 2669 static int do_passive_init(struct ibmvnic_adapter *adapter) 2670 { 2671 unsigned long timeout = msecs_to_jiffies(30000); 2672 struct net_device *netdev = adapter->netdev; 2673 struct device *dev = &adapter->vdev->dev; 2674 int rc; 2675 2676 netdev_dbg(netdev, "Partner device found, probing.\n"); 2677 2678 adapter->state = VNIC_PROBING; 2679 reinit_completion(&adapter->init_done); 2680 adapter->init_done_rc = 0; 2681 adapter->crq.active = true; 2682 2683 rc = send_crq_init_complete(adapter); 2684 if (rc) 2685 goto out; 2686 2687 rc = send_version_xchg(adapter); 2688 if (rc) 2689 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); 2690 2691 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 2692 dev_err(dev, "Initialization sequence timed out\n"); 2693 rc = -ETIMEDOUT; 2694 goto out; 2695 } 2696 2697 rc = init_sub_crqs(adapter); 2698 if (rc) { 2699 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc); 2700 goto out; 2701 } 2702 2703 rc = init_sub_crq_irqs(adapter); 2704 if (rc) { 2705 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc); 2706 goto init_failed; 2707 } 2708 2709 netdev->mtu = adapter->req_mtu - ETH_HLEN; 2710 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 2711 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 2712 2713 adapter->state = VNIC_PROBED; 2714 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n"); 2715 2716 return 0; 2717 2718 init_failed: 2719 release_sub_crqs(adapter, 1); 2720 out: 2721 adapter->state = VNIC_DOWN; 2722 return rc; 2723 } 2724 2725 static void __ibmvnic_reset(struct work_struct *work) 2726 { 2727 struct ibmvnic_adapter *adapter; 2728 unsigned int timeout = 5000; 2729 struct ibmvnic_rwi *tmprwi; 2730 bool saved_state = false; 2731 struct ibmvnic_rwi *rwi; 2732 unsigned long flags; 2733 struct device *dev; 2734 bool need_reset; 2735 int num_fails = 0; 2736 u32 reset_state; 2737 int rc = 0; 2738 2739 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 2740 dev = &adapter->vdev->dev; 2741 2742 /* Wait for ibmvnic_probe() to complete. If probe is taking too long 2743 * or if another reset is in progress, defer work for now. If probe 2744 * eventually fails it will flush and terminate our work. 2745 * 2746 * Three possibilities here: 2747 * 1. Adpater being removed - just return 2748 * 2. Timed out on probe or another reset in progress - delay the work 2749 * 3. Completed probe - perform any resets in queue 2750 */ 2751 if (adapter->state == VNIC_PROBING && 2752 !wait_for_completion_timeout(&adapter->probe_done, timeout)) { 2753 dev_err(dev, "Reset thread timed out on probe"); 2754 queue_delayed_work(system_long_wq, 2755 &adapter->ibmvnic_delayed_reset, 2756 IBMVNIC_RESET_DELAY); 2757 return; 2758 } 2759 2760 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */ 2761 if (adapter->state == VNIC_REMOVING) 2762 return; 2763 2764 /* ->rwi_list is stable now (no one else is removing entries) */ 2765 2766 /* ibmvnic_probe() may have purged the reset queue after we were 2767 * scheduled to process a reset so there maybe no resets to process. 2768 * Before setting the ->resetting bit though, we have to make sure 2769 * that there is infact a reset to process. Otherwise we may race 2770 * with ibmvnic_open() and end up leaving the vnic down: 2771 * 2772 * __ibmvnic_reset() ibmvnic_open() 2773 * ----------------- -------------- 2774 * 2775 * set ->resetting bit 2776 * find ->resetting bit is set 2777 * set ->state to IBMVNIC_OPEN (i.e 2778 * assume reset will open device) 2779 * return 2780 * find reset queue empty 2781 * return 2782 * 2783 * Neither performed vnic login/open and vnic stays down 2784 * 2785 * If we hold the lock and conditionally set the bit, either we 2786 * or ibmvnic_open() will complete the open. 2787 */ 2788 need_reset = false; 2789 spin_lock(&adapter->rwi_lock); 2790 if (!list_empty(&adapter->rwi_list)) { 2791 if (test_and_set_bit_lock(0, &adapter->resetting)) { 2792 queue_delayed_work(system_long_wq, 2793 &adapter->ibmvnic_delayed_reset, 2794 IBMVNIC_RESET_DELAY); 2795 } else { 2796 need_reset = true; 2797 } 2798 } 2799 spin_unlock(&adapter->rwi_lock); 2800 2801 if (!need_reset) 2802 return; 2803 2804 rwi = get_next_rwi(adapter); 2805 while (rwi) { 2806 spin_lock_irqsave(&adapter->state_lock, flags); 2807 2808 if (adapter->state == VNIC_REMOVING || 2809 adapter->state == VNIC_REMOVED) { 2810 spin_unlock_irqrestore(&adapter->state_lock, flags); 2811 kfree(rwi); 2812 rc = EBUSY; 2813 break; 2814 } 2815 2816 if (!saved_state) { 2817 reset_state = adapter->state; 2818 saved_state = true; 2819 } 2820 spin_unlock_irqrestore(&adapter->state_lock, flags); 2821 2822 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) { 2823 rtnl_lock(); 2824 rc = do_passive_init(adapter); 2825 rtnl_unlock(); 2826 if (!rc) 2827 netif_carrier_on(adapter->netdev); 2828 } else if (adapter->force_reset_recovery) { 2829 /* Since we are doing a hard reset now, clear the 2830 * failover_pending flag so we don't ignore any 2831 * future MOBILITY or other resets. 2832 */ 2833 adapter->failover_pending = false; 2834 2835 /* Transport event occurred during previous reset */ 2836 if (adapter->wait_for_reset) { 2837 /* Previous was CHANGE_PARAM; caller locked */ 2838 adapter->force_reset_recovery = false; 2839 rc = do_hard_reset(adapter, rwi, reset_state); 2840 } else { 2841 rtnl_lock(); 2842 adapter->force_reset_recovery = false; 2843 rc = do_hard_reset(adapter, rwi, reset_state); 2844 rtnl_unlock(); 2845 } 2846 if (rc) 2847 num_fails++; 2848 else 2849 num_fails = 0; 2850 2851 /* If auto-priority-failover is enabled we can get 2852 * back to back failovers during resets, resulting 2853 * in at least two failed resets (from high-priority 2854 * backing device to low-priority one and then back) 2855 * If resets continue to fail beyond that, give the 2856 * adapter some time to settle down before retrying. 2857 */ 2858 if (num_fails >= 3) { 2859 netdev_dbg(adapter->netdev, 2860 "[S:%s] Hard reset failed %d times, waiting 60 secs\n", 2861 adapter_state_to_string(adapter->state), 2862 num_fails); 2863 set_current_state(TASK_UNINTERRUPTIBLE); 2864 schedule_timeout(60 * HZ); 2865 } 2866 } else { 2867 rc = do_reset(adapter, rwi, reset_state); 2868 } 2869 tmprwi = rwi; 2870 adapter->last_reset_time = jiffies; 2871 2872 if (rc) 2873 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); 2874 2875 rwi = get_next_rwi(adapter); 2876 2877 /* 2878 * If there is another reset queued, free the previous rwi 2879 * and process the new reset even if previous reset failed 2880 * (the previous reset could have failed because of a fail 2881 * over for instance, so process the fail over). 2882 * 2883 * If there are no resets queued and the previous reset failed, 2884 * the adapter would be in an undefined state. So retry the 2885 * previous reset as a hard reset. 2886 */ 2887 if (rwi) 2888 kfree(tmprwi); 2889 else if (rc) 2890 rwi = tmprwi; 2891 2892 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || 2893 rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) 2894 adapter->force_reset_recovery = true; 2895 } 2896 2897 if (adapter->wait_for_reset) { 2898 adapter->reset_done_rc = rc; 2899 complete(&adapter->reset_done); 2900 } 2901 2902 clear_bit_unlock(0, &adapter->resetting); 2903 2904 netdev_dbg(adapter->netdev, 2905 "[S:%s FRR:%d WFR:%d] Done processing resets\n", 2906 adapter_state_to_string(adapter->state), 2907 adapter->force_reset_recovery, 2908 adapter->wait_for_reset); 2909 } 2910 2911 static void __ibmvnic_delayed_reset(struct work_struct *work) 2912 { 2913 struct ibmvnic_adapter *adapter; 2914 2915 adapter = container_of(work, struct ibmvnic_adapter, 2916 ibmvnic_delayed_reset.work); 2917 __ibmvnic_reset(&adapter->ibmvnic_reset); 2918 } 2919 2920 static void flush_reset_queue(struct ibmvnic_adapter *adapter) 2921 { 2922 struct list_head *entry, *tmp_entry; 2923 2924 if (!list_empty(&adapter->rwi_list)) { 2925 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { 2926 list_del(entry); 2927 kfree(list_entry(entry, struct ibmvnic_rwi, list)); 2928 } 2929 } 2930 } 2931 2932 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 2933 enum ibmvnic_reset_reason reason) 2934 { 2935 struct net_device *netdev = adapter->netdev; 2936 struct ibmvnic_rwi *rwi, *tmp; 2937 unsigned long flags; 2938 int ret; 2939 2940 spin_lock_irqsave(&adapter->rwi_lock, flags); 2941 2942 /* If failover is pending don't schedule any other reset. 2943 * Instead let the failover complete. If there is already a 2944 * a failover reset scheduled, we will detect and drop the 2945 * duplicate reset when walking the ->rwi_list below. 2946 */ 2947 if (adapter->state == VNIC_REMOVING || 2948 adapter->state == VNIC_REMOVED || 2949 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { 2950 ret = EBUSY; 2951 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 2952 goto err; 2953 } 2954 2955 list_for_each_entry(tmp, &adapter->rwi_list, list) { 2956 if (tmp->reset_reason == reason) { 2957 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", 2958 reset_reason_to_string(reason)); 2959 ret = EBUSY; 2960 goto err; 2961 } 2962 } 2963 2964 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); 2965 if (!rwi) { 2966 ret = ENOMEM; 2967 goto err; 2968 } 2969 /* if we just received a transport event, 2970 * flush reset queue and process this reset 2971 */ 2972 if (adapter->force_reset_recovery) 2973 flush_reset_queue(adapter); 2974 2975 rwi->reset_reason = reason; 2976 list_add_tail(&rwi->list, &adapter->rwi_list); 2977 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", 2978 reset_reason_to_string(reason)); 2979 queue_work(system_long_wq, &adapter->ibmvnic_reset); 2980 2981 ret = 0; 2982 err: 2983 /* ibmvnic_close() below can block, so drop the lock first */ 2984 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2985 2986 if (ret == ENOMEM) 2987 ibmvnic_close(netdev); 2988 2989 return -ret; 2990 } 2991 2992 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) 2993 { 2994 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2995 2996 if (test_bit(0, &adapter->resetting)) { 2997 netdev_err(adapter->netdev, 2998 "Adapter is resetting, skip timeout reset\n"); 2999 return; 3000 } 3001 /* No queuing up reset until at least 5 seconds (default watchdog val) 3002 * after last reset 3003 */ 3004 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { 3005 netdev_dbg(dev, "Not yet time to tx timeout.\n"); 3006 return; 3007 } 3008 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 3009 } 3010 3011 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 3012 struct ibmvnic_rx_buff *rx_buff) 3013 { 3014 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 3015 3016 rx_buff->skb = NULL; 3017 3018 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 3019 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 3020 3021 atomic_dec(&pool->available); 3022 } 3023 3024 static int ibmvnic_poll(struct napi_struct *napi, int budget) 3025 { 3026 struct ibmvnic_sub_crq_queue *rx_scrq; 3027 struct ibmvnic_adapter *adapter; 3028 struct net_device *netdev; 3029 int frames_processed; 3030 int scrq_num; 3031 3032 netdev = napi->dev; 3033 adapter = netdev_priv(netdev); 3034 scrq_num = (int)(napi - adapter->napi); 3035 frames_processed = 0; 3036 rx_scrq = adapter->rx_scrq[scrq_num]; 3037 3038 restart_poll: 3039 while (frames_processed < budget) { 3040 struct sk_buff *skb; 3041 struct ibmvnic_rx_buff *rx_buff; 3042 union sub_crq *next; 3043 u32 length; 3044 u16 offset; 3045 u8 flags = 0; 3046 3047 if (unlikely(test_bit(0, &adapter->resetting) && 3048 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 3049 enable_scrq_irq(adapter, rx_scrq); 3050 napi_complete_done(napi, frames_processed); 3051 return frames_processed; 3052 } 3053 3054 if (!pending_scrq(adapter, rx_scrq)) 3055 break; 3056 next = ibmvnic_next_scrq(adapter, rx_scrq); 3057 rx_buff = (struct ibmvnic_rx_buff *) 3058 be64_to_cpu(next->rx_comp.correlator); 3059 /* do error checking */ 3060 if (next->rx_comp.rc) { 3061 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 3062 be16_to_cpu(next->rx_comp.rc)); 3063 /* free the entry */ 3064 next->rx_comp.first = 0; 3065 dev_kfree_skb_any(rx_buff->skb); 3066 remove_buff_from_pool(adapter, rx_buff); 3067 continue; 3068 } else if (!rx_buff->skb) { 3069 /* free the entry */ 3070 next->rx_comp.first = 0; 3071 remove_buff_from_pool(adapter, rx_buff); 3072 continue; 3073 } 3074 3075 length = be32_to_cpu(next->rx_comp.len); 3076 offset = be16_to_cpu(next->rx_comp.off_frame_data); 3077 flags = next->rx_comp.flags; 3078 skb = rx_buff->skb; 3079 /* load long_term_buff before copying to skb */ 3080 dma_rmb(); 3081 skb_copy_to_linear_data(skb, rx_buff->data + offset, 3082 length); 3083 3084 /* VLAN Header has been stripped by the system firmware and 3085 * needs to be inserted by the driver 3086 */ 3087 if (adapter->rx_vlan_header_insertion && 3088 (flags & IBMVNIC_VLAN_STRIPPED)) 3089 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3090 ntohs(next->rx_comp.vlan_tci)); 3091 3092 /* free the entry */ 3093 next->rx_comp.first = 0; 3094 remove_buff_from_pool(adapter, rx_buff); 3095 3096 skb_put(skb, length); 3097 skb->protocol = eth_type_trans(skb, netdev); 3098 skb_record_rx_queue(skb, scrq_num); 3099 3100 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 3101 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 3102 skb->ip_summed = CHECKSUM_UNNECESSARY; 3103 } 3104 3105 length = skb->len; 3106 napi_gro_receive(napi, skb); /* send it up */ 3107 netdev->stats.rx_packets++; 3108 netdev->stats.rx_bytes += length; 3109 adapter->rx_stats_buffers[scrq_num].packets++; 3110 adapter->rx_stats_buffers[scrq_num].bytes += length; 3111 frames_processed++; 3112 } 3113 3114 if (adapter->state != VNIC_CLOSING && 3115 ((atomic_read(&adapter->rx_pool[scrq_num].available) < 3116 adapter->req_rx_add_entries_per_subcrq / 2) || 3117 frames_processed < budget)) 3118 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 3119 if (frames_processed < budget) { 3120 if (napi_complete_done(napi, frames_processed)) { 3121 enable_scrq_irq(adapter, rx_scrq); 3122 if (pending_scrq(adapter, rx_scrq)) { 3123 if (napi_reschedule(napi)) { 3124 disable_scrq_irq(adapter, rx_scrq); 3125 goto restart_poll; 3126 } 3127 } 3128 } 3129 } 3130 return frames_processed; 3131 } 3132 3133 static int wait_for_reset(struct ibmvnic_adapter *adapter) 3134 { 3135 int rc, ret; 3136 3137 adapter->fallback.mtu = adapter->req_mtu; 3138 adapter->fallback.rx_queues = adapter->req_rx_queues; 3139 adapter->fallback.tx_queues = adapter->req_tx_queues; 3140 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 3141 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 3142 3143 reinit_completion(&adapter->reset_done); 3144 adapter->wait_for_reset = true; 3145 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3146 3147 if (rc) { 3148 ret = rc; 3149 goto out; 3150 } 3151 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); 3152 if (rc) { 3153 ret = -ENODEV; 3154 goto out; 3155 } 3156 3157 ret = 0; 3158 if (adapter->reset_done_rc) { 3159 ret = -EIO; 3160 adapter->desired.mtu = adapter->fallback.mtu; 3161 adapter->desired.rx_queues = adapter->fallback.rx_queues; 3162 adapter->desired.tx_queues = adapter->fallback.tx_queues; 3163 adapter->desired.rx_entries = adapter->fallback.rx_entries; 3164 adapter->desired.tx_entries = adapter->fallback.tx_entries; 3165 3166 reinit_completion(&adapter->reset_done); 3167 adapter->wait_for_reset = true; 3168 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 3169 if (rc) { 3170 ret = rc; 3171 goto out; 3172 } 3173 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 3174 60000); 3175 if (rc) { 3176 ret = -ENODEV; 3177 goto out; 3178 } 3179 } 3180 out: 3181 adapter->wait_for_reset = false; 3182 3183 return ret; 3184 } 3185 3186 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 3187 { 3188 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3189 3190 adapter->desired.mtu = new_mtu + ETH_HLEN; 3191 3192 return wait_for_reset(adapter); 3193 } 3194 3195 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 3196 struct net_device *dev, 3197 netdev_features_t features) 3198 { 3199 /* Some backing hardware adapters can not 3200 * handle packets with a MSS less than 224 3201 * or with only one segment. 3202 */ 3203 if (skb_is_gso(skb)) { 3204 if (skb_shinfo(skb)->gso_size < 224 || 3205 skb_shinfo(skb)->gso_segs == 1) 3206 features &= ~NETIF_F_GSO_MASK; 3207 } 3208 3209 return features; 3210 } 3211 3212 static const struct net_device_ops ibmvnic_netdev_ops = { 3213 .ndo_open = ibmvnic_open, 3214 .ndo_stop = ibmvnic_close, 3215 .ndo_start_xmit = ibmvnic_xmit, 3216 .ndo_set_rx_mode = ibmvnic_set_multi, 3217 .ndo_set_mac_address = ibmvnic_set_mac, 3218 .ndo_validate_addr = eth_validate_addr, 3219 .ndo_tx_timeout = ibmvnic_tx_timeout, 3220 .ndo_change_mtu = ibmvnic_change_mtu, 3221 .ndo_features_check = ibmvnic_features_check, 3222 }; 3223 3224 /* ethtool functions */ 3225 3226 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 3227 struct ethtool_link_ksettings *cmd) 3228 { 3229 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3230 int rc; 3231 3232 rc = send_query_phys_parms(adapter); 3233 if (rc) { 3234 adapter->speed = SPEED_UNKNOWN; 3235 adapter->duplex = DUPLEX_UNKNOWN; 3236 } 3237 cmd->base.speed = adapter->speed; 3238 cmd->base.duplex = adapter->duplex; 3239 cmd->base.port = PORT_FIBRE; 3240 cmd->base.phy_address = 0; 3241 cmd->base.autoneg = AUTONEG_ENABLE; 3242 3243 return 0; 3244 } 3245 3246 static void ibmvnic_get_drvinfo(struct net_device *netdev, 3247 struct ethtool_drvinfo *info) 3248 { 3249 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3250 3251 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 3252 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 3253 strscpy(info->fw_version, adapter->fw_version, 3254 sizeof(info->fw_version)); 3255 } 3256 3257 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 3258 { 3259 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3260 3261 return adapter->msg_enable; 3262 } 3263 3264 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 3265 { 3266 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3267 3268 adapter->msg_enable = data; 3269 } 3270 3271 static u32 ibmvnic_get_link(struct net_device *netdev) 3272 { 3273 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3274 3275 /* Don't need to send a query because we request a logical link up at 3276 * init and then we wait for link state indications 3277 */ 3278 return adapter->logical_link_state; 3279 } 3280 3281 static void ibmvnic_get_ringparam(struct net_device *netdev, 3282 struct ethtool_ringparam *ring, 3283 struct kernel_ethtool_ringparam *kernel_ring, 3284 struct netlink_ext_ack *extack) 3285 { 3286 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3287 3288 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { 3289 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 3290 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 3291 } else { 3292 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ; 3293 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ; 3294 } 3295 ring->rx_mini_max_pending = 0; 3296 ring->rx_jumbo_max_pending = 0; 3297 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 3298 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 3299 ring->rx_mini_pending = 0; 3300 ring->rx_jumbo_pending = 0; 3301 } 3302 3303 static int ibmvnic_set_ringparam(struct net_device *netdev, 3304 struct ethtool_ringparam *ring, 3305 struct kernel_ethtool_ringparam *kernel_ring, 3306 struct netlink_ext_ack *extack) 3307 { 3308 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3309 int ret; 3310 3311 ret = 0; 3312 adapter->desired.rx_entries = ring->rx_pending; 3313 adapter->desired.tx_entries = ring->tx_pending; 3314 3315 ret = wait_for_reset(adapter); 3316 3317 if (!ret && 3318 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || 3319 adapter->req_tx_entries_per_subcrq != ring->tx_pending)) 3320 netdev_info(netdev, 3321 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", 3322 ring->rx_pending, ring->tx_pending, 3323 adapter->req_rx_add_entries_per_subcrq, 3324 adapter->req_tx_entries_per_subcrq); 3325 return ret; 3326 } 3327 3328 static void ibmvnic_get_channels(struct net_device *netdev, 3329 struct ethtool_channels *channels) 3330 { 3331 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3332 3333 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { 3334 channels->max_rx = adapter->max_rx_queues; 3335 channels->max_tx = adapter->max_tx_queues; 3336 } else { 3337 channels->max_rx = IBMVNIC_MAX_QUEUES; 3338 channels->max_tx = IBMVNIC_MAX_QUEUES; 3339 } 3340 3341 channels->max_other = 0; 3342 channels->max_combined = 0; 3343 channels->rx_count = adapter->req_rx_queues; 3344 channels->tx_count = adapter->req_tx_queues; 3345 channels->other_count = 0; 3346 channels->combined_count = 0; 3347 } 3348 3349 static int ibmvnic_set_channels(struct net_device *netdev, 3350 struct ethtool_channels *channels) 3351 { 3352 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3353 int ret; 3354 3355 ret = 0; 3356 adapter->desired.rx_queues = channels->rx_count; 3357 adapter->desired.tx_queues = channels->tx_count; 3358 3359 ret = wait_for_reset(adapter); 3360 3361 if (!ret && 3362 (adapter->req_rx_queues != channels->rx_count || 3363 adapter->req_tx_queues != channels->tx_count)) 3364 netdev_info(netdev, 3365 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", 3366 channels->rx_count, channels->tx_count, 3367 adapter->req_rx_queues, adapter->req_tx_queues); 3368 return ret; 3369 } 3370 3371 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 3372 { 3373 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3374 int i; 3375 3376 switch (stringset) { 3377 case ETH_SS_STATS: 3378 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); 3379 i++, data += ETH_GSTRING_LEN) 3380 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 3381 3382 for (i = 0; i < adapter->req_tx_queues; i++) { 3383 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 3384 data += ETH_GSTRING_LEN; 3385 3386 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 3387 data += ETH_GSTRING_LEN; 3388 3389 snprintf(data, ETH_GSTRING_LEN, 3390 "tx%d_dropped_packets", i); 3391 data += ETH_GSTRING_LEN; 3392 } 3393 3394 for (i = 0; i < adapter->req_rx_queues; i++) { 3395 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 3396 data += ETH_GSTRING_LEN; 3397 3398 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 3399 data += ETH_GSTRING_LEN; 3400 3401 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 3402 data += ETH_GSTRING_LEN; 3403 } 3404 break; 3405 3406 case ETH_SS_PRIV_FLAGS: 3407 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++) 3408 strcpy(data + i * ETH_GSTRING_LEN, 3409 ibmvnic_priv_flags[i]); 3410 break; 3411 default: 3412 return; 3413 } 3414 } 3415 3416 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 3417 { 3418 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3419 3420 switch (sset) { 3421 case ETH_SS_STATS: 3422 return ARRAY_SIZE(ibmvnic_stats) + 3423 adapter->req_tx_queues * NUM_TX_STATS + 3424 adapter->req_rx_queues * NUM_RX_STATS; 3425 case ETH_SS_PRIV_FLAGS: 3426 return ARRAY_SIZE(ibmvnic_priv_flags); 3427 default: 3428 return -EOPNOTSUPP; 3429 } 3430 } 3431 3432 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 3433 struct ethtool_stats *stats, u64 *data) 3434 { 3435 struct ibmvnic_adapter *adapter = netdev_priv(dev); 3436 union ibmvnic_crq crq; 3437 int i, j; 3438 int rc; 3439 3440 memset(&crq, 0, sizeof(crq)); 3441 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 3442 crq.request_statistics.cmd = REQUEST_STATISTICS; 3443 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 3444 crq.request_statistics.len = 3445 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 3446 3447 /* Wait for data to be written */ 3448 reinit_completion(&adapter->stats_done); 3449 rc = ibmvnic_send_crq(adapter, &crq); 3450 if (rc) 3451 return; 3452 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); 3453 if (rc) 3454 return; 3455 3456 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 3457 data[i] = be64_to_cpu(IBMVNIC_GET_STAT 3458 (adapter, ibmvnic_stats[i].offset)); 3459 3460 for (j = 0; j < adapter->req_tx_queues; j++) { 3461 data[i] = adapter->tx_stats_buffers[j].packets; 3462 i++; 3463 data[i] = adapter->tx_stats_buffers[j].bytes; 3464 i++; 3465 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 3466 i++; 3467 } 3468 3469 for (j = 0; j < adapter->req_rx_queues; j++) { 3470 data[i] = adapter->rx_stats_buffers[j].packets; 3471 i++; 3472 data[i] = adapter->rx_stats_buffers[j].bytes; 3473 i++; 3474 data[i] = adapter->rx_stats_buffers[j].interrupts; 3475 i++; 3476 } 3477 } 3478 3479 static u32 ibmvnic_get_priv_flags(struct net_device *netdev) 3480 { 3481 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3482 3483 return adapter->priv_flags; 3484 } 3485 3486 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) 3487 { 3488 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 3489 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES); 3490 3491 if (which_maxes) 3492 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; 3493 else 3494 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; 3495 3496 return 0; 3497 } 3498 3499 static const struct ethtool_ops ibmvnic_ethtool_ops = { 3500 .get_drvinfo = ibmvnic_get_drvinfo, 3501 .get_msglevel = ibmvnic_get_msglevel, 3502 .set_msglevel = ibmvnic_set_msglevel, 3503 .get_link = ibmvnic_get_link, 3504 .get_ringparam = ibmvnic_get_ringparam, 3505 .set_ringparam = ibmvnic_set_ringparam, 3506 .get_channels = ibmvnic_get_channels, 3507 .set_channels = ibmvnic_set_channels, 3508 .get_strings = ibmvnic_get_strings, 3509 .get_sset_count = ibmvnic_get_sset_count, 3510 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 3511 .get_link_ksettings = ibmvnic_get_link_ksettings, 3512 .get_priv_flags = ibmvnic_get_priv_flags, 3513 .set_priv_flags = ibmvnic_set_priv_flags, 3514 }; 3515 3516 /* Routines for managing CRQs/sCRQs */ 3517 3518 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 3519 struct ibmvnic_sub_crq_queue *scrq) 3520 { 3521 int rc; 3522 3523 if (!scrq) { 3524 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); 3525 return -EINVAL; 3526 } 3527 3528 if (scrq->irq) { 3529 free_irq(scrq->irq, scrq); 3530 irq_dispose_mapping(scrq->irq); 3531 scrq->irq = 0; 3532 } 3533 3534 if (scrq->msgs) { 3535 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 3536 atomic_set(&scrq->used, 0); 3537 scrq->cur = 0; 3538 scrq->ind_buf.index = 0; 3539 } else { 3540 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); 3541 return -EINVAL; 3542 } 3543 3544 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3545 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3546 return rc; 3547 } 3548 3549 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 3550 { 3551 int i, rc; 3552 3553 if (!adapter->tx_scrq || !adapter->rx_scrq) 3554 return -EINVAL; 3555 3556 for (i = 0; i < adapter->req_tx_queues; i++) { 3557 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 3558 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 3559 if (rc) 3560 return rc; 3561 } 3562 3563 for (i = 0; i < adapter->req_rx_queues; i++) { 3564 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 3565 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 3566 if (rc) 3567 return rc; 3568 } 3569 3570 return rc; 3571 } 3572 3573 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 3574 struct ibmvnic_sub_crq_queue *scrq, 3575 bool do_h_free) 3576 { 3577 struct device *dev = &adapter->vdev->dev; 3578 long rc; 3579 3580 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 3581 3582 if (do_h_free) { 3583 /* Close the sub-crqs */ 3584 do { 3585 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3586 adapter->vdev->unit_address, 3587 scrq->crq_num); 3588 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 3589 3590 if (rc) { 3591 netdev_err(adapter->netdev, 3592 "Failed to release sub-CRQ %16lx, rc = %ld\n", 3593 scrq->crq_num, rc); 3594 } 3595 } 3596 3597 dma_free_coherent(dev, 3598 IBMVNIC_IND_ARR_SZ, 3599 scrq->ind_buf.indir_arr, 3600 scrq->ind_buf.indir_dma); 3601 3602 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3603 DMA_BIDIRECTIONAL); 3604 free_pages((unsigned long)scrq->msgs, 2); 3605 kfree(scrq); 3606 } 3607 3608 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 3609 *adapter) 3610 { 3611 struct device *dev = &adapter->vdev->dev; 3612 struct ibmvnic_sub_crq_queue *scrq; 3613 int rc; 3614 3615 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 3616 if (!scrq) 3617 return NULL; 3618 3619 scrq->msgs = 3620 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 3621 if (!scrq->msgs) { 3622 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 3623 goto zero_page_failed; 3624 } 3625 3626 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 3627 DMA_BIDIRECTIONAL); 3628 if (dma_mapping_error(dev, scrq->msg_token)) { 3629 dev_warn(dev, "Couldn't map crq queue messages page\n"); 3630 goto map_failed; 3631 } 3632 3633 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3634 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3635 3636 if (rc == H_RESOURCE) 3637 rc = ibmvnic_reset_crq(adapter); 3638 3639 if (rc == H_CLOSED) { 3640 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 3641 } else if (rc) { 3642 dev_warn(dev, "Error %d registering sub-crq\n", rc); 3643 goto reg_failed; 3644 } 3645 3646 scrq->adapter = adapter; 3647 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 3648 scrq->ind_buf.index = 0; 3649 3650 scrq->ind_buf.indir_arr = 3651 dma_alloc_coherent(dev, 3652 IBMVNIC_IND_ARR_SZ, 3653 &scrq->ind_buf.indir_dma, 3654 GFP_KERNEL); 3655 3656 if (!scrq->ind_buf.indir_arr) 3657 goto indir_failed; 3658 3659 spin_lock_init(&scrq->lock); 3660 3661 netdev_dbg(adapter->netdev, 3662 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 3663 scrq->crq_num, scrq->hw_irq, scrq->irq); 3664 3665 return scrq; 3666 3667 indir_failed: 3668 do { 3669 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3670 adapter->vdev->unit_address, 3671 scrq->crq_num); 3672 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); 3673 reg_failed: 3674 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3675 DMA_BIDIRECTIONAL); 3676 map_failed: 3677 free_pages((unsigned long)scrq->msgs, 2); 3678 zero_page_failed: 3679 kfree(scrq); 3680 3681 return NULL; 3682 } 3683 3684 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 3685 { 3686 int i; 3687 3688 if (adapter->tx_scrq) { 3689 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 3690 if (!adapter->tx_scrq[i]) 3691 continue; 3692 3693 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 3694 i); 3695 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); 3696 if (adapter->tx_scrq[i]->irq) { 3697 free_irq(adapter->tx_scrq[i]->irq, 3698 adapter->tx_scrq[i]); 3699 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 3700 adapter->tx_scrq[i]->irq = 0; 3701 } 3702 3703 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 3704 do_h_free); 3705 } 3706 3707 kfree(adapter->tx_scrq); 3708 adapter->tx_scrq = NULL; 3709 adapter->num_active_tx_scrqs = 0; 3710 } 3711 3712 if (adapter->rx_scrq) { 3713 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 3714 if (!adapter->rx_scrq[i]) 3715 continue; 3716 3717 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 3718 i); 3719 if (adapter->rx_scrq[i]->irq) { 3720 free_irq(adapter->rx_scrq[i]->irq, 3721 adapter->rx_scrq[i]); 3722 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 3723 adapter->rx_scrq[i]->irq = 0; 3724 } 3725 3726 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 3727 do_h_free); 3728 } 3729 3730 kfree(adapter->rx_scrq); 3731 adapter->rx_scrq = NULL; 3732 adapter->num_active_rx_scrqs = 0; 3733 } 3734 } 3735 3736 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 3737 struct ibmvnic_sub_crq_queue *scrq) 3738 { 3739 struct device *dev = &adapter->vdev->dev; 3740 unsigned long rc; 3741 3742 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3743 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 3744 if (rc) 3745 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 3746 scrq->hw_irq, rc); 3747 return rc; 3748 } 3749 3750 /* We can not use the IRQ chip EOI handler because that has the 3751 * unintended effect of changing the interrupt priority. 3752 */ 3753 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) 3754 { 3755 u64 val = 0xff000000 | scrq->hw_irq; 3756 unsigned long rc; 3757 3758 rc = plpar_hcall_norets(H_EOI, val); 3759 if (rc) 3760 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); 3761 } 3762 3763 /* Due to a firmware bug, the hypervisor can send an interrupt to a 3764 * transmit or receive queue just prior to a partition migration. 3765 * Force an EOI after migration. 3766 */ 3767 static void ibmvnic_clear_pending_interrupt(struct device *dev, 3768 struct ibmvnic_sub_crq_queue *scrq) 3769 { 3770 if (!xive_enabled()) 3771 ibmvnic_xics_eoi(dev, scrq); 3772 } 3773 3774 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 3775 struct ibmvnic_sub_crq_queue *scrq) 3776 { 3777 struct device *dev = &adapter->vdev->dev; 3778 unsigned long rc; 3779 3780 if (scrq->hw_irq > 0x100000000ULL) { 3781 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 3782 return 1; 3783 } 3784 3785 if (test_bit(0, &adapter->resetting) && 3786 adapter->reset_reason == VNIC_RESET_MOBILITY) { 3787 ibmvnic_clear_pending_interrupt(dev, scrq); 3788 } 3789 3790 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3791 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 3792 if (rc) 3793 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 3794 scrq->hw_irq, rc); 3795 return rc; 3796 } 3797 3798 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 3799 struct ibmvnic_sub_crq_queue *scrq) 3800 { 3801 struct device *dev = &adapter->vdev->dev; 3802 struct ibmvnic_tx_pool *tx_pool; 3803 struct ibmvnic_tx_buff *txbuff; 3804 struct netdev_queue *txq; 3805 union sub_crq *next; 3806 int index; 3807 int i; 3808 3809 restart_loop: 3810 while (pending_scrq(adapter, scrq)) { 3811 unsigned int pool = scrq->pool_index; 3812 int num_entries = 0; 3813 int total_bytes = 0; 3814 int num_packets = 0; 3815 3816 next = ibmvnic_next_scrq(adapter, scrq); 3817 for (i = 0; i < next->tx_comp.num_comps; i++) { 3818 index = be32_to_cpu(next->tx_comp.correlators[i]); 3819 if (index & IBMVNIC_TSO_POOL_MASK) { 3820 tx_pool = &adapter->tso_pool[pool]; 3821 index &= ~IBMVNIC_TSO_POOL_MASK; 3822 } else { 3823 tx_pool = &adapter->tx_pool[pool]; 3824 } 3825 3826 txbuff = &tx_pool->tx_buff[index]; 3827 num_packets++; 3828 num_entries += txbuff->num_entries; 3829 if (txbuff->skb) { 3830 total_bytes += txbuff->skb->len; 3831 if (next->tx_comp.rcs[i]) { 3832 dev_err(dev, "tx error %x\n", 3833 next->tx_comp.rcs[i]); 3834 dev_kfree_skb_irq(txbuff->skb); 3835 } else { 3836 dev_consume_skb_irq(txbuff->skb); 3837 } 3838 txbuff->skb = NULL; 3839 } else { 3840 netdev_warn(adapter->netdev, 3841 "TX completion received with NULL socket buffer\n"); 3842 } 3843 tx_pool->free_map[tx_pool->producer_index] = index; 3844 tx_pool->producer_index = 3845 (tx_pool->producer_index + 1) % 3846 tx_pool->num_buffers; 3847 } 3848 /* remove tx_comp scrq*/ 3849 next->tx_comp.first = 0; 3850 3851 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); 3852 netdev_tx_completed_queue(txq, num_packets, total_bytes); 3853 3854 if (atomic_sub_return(num_entries, &scrq->used) <= 3855 (adapter->req_tx_entries_per_subcrq / 2) && 3856 __netif_subqueue_stopped(adapter->netdev, 3857 scrq->pool_index)) { 3858 rcu_read_lock(); 3859 if (adapter->tx_queues_active) { 3860 netif_wake_subqueue(adapter->netdev, 3861 scrq->pool_index); 3862 netdev_dbg(adapter->netdev, 3863 "Started queue %d\n", 3864 scrq->pool_index); 3865 } 3866 rcu_read_unlock(); 3867 } 3868 } 3869 3870 enable_scrq_irq(adapter, scrq); 3871 3872 if (pending_scrq(adapter, scrq)) { 3873 disable_scrq_irq(adapter, scrq); 3874 goto restart_loop; 3875 } 3876 3877 return 0; 3878 } 3879 3880 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 3881 { 3882 struct ibmvnic_sub_crq_queue *scrq = instance; 3883 struct ibmvnic_adapter *adapter = scrq->adapter; 3884 3885 disable_scrq_irq(adapter, scrq); 3886 ibmvnic_complete_tx(adapter, scrq); 3887 3888 return IRQ_HANDLED; 3889 } 3890 3891 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 3892 { 3893 struct ibmvnic_sub_crq_queue *scrq = instance; 3894 struct ibmvnic_adapter *adapter = scrq->adapter; 3895 3896 /* When booting a kdump kernel we can hit pending interrupts 3897 * prior to completing driver initialization. 3898 */ 3899 if (unlikely(adapter->state != VNIC_OPEN)) 3900 return IRQ_NONE; 3901 3902 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 3903 3904 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 3905 disable_scrq_irq(adapter, scrq); 3906 __napi_schedule(&adapter->napi[scrq->scrq_num]); 3907 } 3908 3909 return IRQ_HANDLED; 3910 } 3911 3912 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 3913 { 3914 struct device *dev = &adapter->vdev->dev; 3915 struct ibmvnic_sub_crq_queue *scrq; 3916 int i = 0, j = 0; 3917 int rc = 0; 3918 3919 for (i = 0; i < adapter->req_tx_queues; i++) { 3920 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 3921 i); 3922 scrq = adapter->tx_scrq[i]; 3923 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 3924 3925 if (!scrq->irq) { 3926 rc = -EINVAL; 3927 dev_err(dev, "Error mapping irq\n"); 3928 goto req_tx_irq_failed; 3929 } 3930 3931 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", 3932 adapter->vdev->unit_address, i); 3933 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 3934 0, scrq->name, scrq); 3935 3936 if (rc) { 3937 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 3938 scrq->irq, rc); 3939 irq_dispose_mapping(scrq->irq); 3940 goto req_tx_irq_failed; 3941 } 3942 } 3943 3944 for (i = 0; i < adapter->req_rx_queues; i++) { 3945 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 3946 i); 3947 scrq = adapter->rx_scrq[i]; 3948 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 3949 if (!scrq->irq) { 3950 rc = -EINVAL; 3951 dev_err(dev, "Error mapping irq\n"); 3952 goto req_rx_irq_failed; 3953 } 3954 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", 3955 adapter->vdev->unit_address, i); 3956 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 3957 0, scrq->name, scrq); 3958 if (rc) { 3959 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 3960 scrq->irq, rc); 3961 irq_dispose_mapping(scrq->irq); 3962 goto req_rx_irq_failed; 3963 } 3964 } 3965 return rc; 3966 3967 req_rx_irq_failed: 3968 for (j = 0; j < i; j++) { 3969 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 3970 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 3971 } 3972 i = adapter->req_tx_queues; 3973 req_tx_irq_failed: 3974 for (j = 0; j < i; j++) { 3975 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 3976 irq_dispose_mapping(adapter->tx_scrq[j]->irq); 3977 } 3978 release_sub_crqs(adapter, 1); 3979 return rc; 3980 } 3981 3982 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 3983 { 3984 struct device *dev = &adapter->vdev->dev; 3985 struct ibmvnic_sub_crq_queue **allqueues; 3986 int registered_queues = 0; 3987 int total_queues; 3988 int more = 0; 3989 int i; 3990 3991 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 3992 3993 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 3994 if (!allqueues) 3995 return -ENOMEM; 3996 3997 for (i = 0; i < total_queues; i++) { 3998 allqueues[i] = init_sub_crq_queue(adapter); 3999 if (!allqueues[i]) { 4000 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 4001 break; 4002 } 4003 registered_queues++; 4004 } 4005 4006 /* Make sure we were able to register the minimum number of queues */ 4007 if (registered_queues < 4008 adapter->min_tx_queues + adapter->min_rx_queues) { 4009 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 4010 goto tx_failed; 4011 } 4012 4013 /* Distribute the failed allocated queues*/ 4014 for (i = 0; i < total_queues - registered_queues + more ; i++) { 4015 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 4016 switch (i % 3) { 4017 case 0: 4018 if (adapter->req_rx_queues > adapter->min_rx_queues) 4019 adapter->req_rx_queues--; 4020 else 4021 more++; 4022 break; 4023 case 1: 4024 if (adapter->req_tx_queues > adapter->min_tx_queues) 4025 adapter->req_tx_queues--; 4026 else 4027 more++; 4028 break; 4029 } 4030 } 4031 4032 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 4033 sizeof(*adapter->tx_scrq), GFP_KERNEL); 4034 if (!adapter->tx_scrq) 4035 goto tx_failed; 4036 4037 for (i = 0; i < adapter->req_tx_queues; i++) { 4038 adapter->tx_scrq[i] = allqueues[i]; 4039 adapter->tx_scrq[i]->pool_index = i; 4040 adapter->num_active_tx_scrqs++; 4041 } 4042 4043 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 4044 sizeof(*adapter->rx_scrq), GFP_KERNEL); 4045 if (!adapter->rx_scrq) 4046 goto rx_failed; 4047 4048 for (i = 0; i < adapter->req_rx_queues; i++) { 4049 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 4050 adapter->rx_scrq[i]->scrq_num = i; 4051 adapter->num_active_rx_scrqs++; 4052 } 4053 4054 kfree(allqueues); 4055 return 0; 4056 4057 rx_failed: 4058 kfree(adapter->tx_scrq); 4059 adapter->tx_scrq = NULL; 4060 tx_failed: 4061 for (i = 0; i < registered_queues; i++) 4062 release_sub_crq_queue(adapter, allqueues[i], 1); 4063 kfree(allqueues); 4064 return -ENOMEM; 4065 } 4066 4067 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) 4068 { 4069 struct device *dev = &adapter->vdev->dev; 4070 union ibmvnic_crq crq; 4071 int max_entries; 4072 int cap_reqs; 4073 4074 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on 4075 * the PROMISC flag). Initialize this count upfront. When the tasklet 4076 * receives a response to all of these, it will send the next protocol 4077 * message (QUERY_IP_OFFLOAD). 4078 */ 4079 if (!(adapter->netdev->flags & IFF_PROMISC) || 4080 adapter->promisc_supported) 4081 cap_reqs = 7; 4082 else 4083 cap_reqs = 6; 4084 4085 if (!retry) { 4086 /* Sub-CRQ entries are 32 byte long */ 4087 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 4088 4089 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4090 4091 if (adapter->min_tx_entries_per_subcrq > entries_page || 4092 adapter->min_rx_add_entries_per_subcrq > entries_page) { 4093 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 4094 return; 4095 } 4096 4097 if (adapter->desired.mtu) 4098 adapter->req_mtu = adapter->desired.mtu; 4099 else 4100 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 4101 4102 if (!adapter->desired.tx_entries) 4103 adapter->desired.tx_entries = 4104 adapter->max_tx_entries_per_subcrq; 4105 if (!adapter->desired.rx_entries) 4106 adapter->desired.rx_entries = 4107 adapter->max_rx_add_entries_per_subcrq; 4108 4109 max_entries = IBMVNIC_MAX_LTB_SIZE / 4110 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 4111 4112 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4113 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { 4114 adapter->desired.tx_entries = max_entries; 4115 } 4116 4117 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 4118 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { 4119 adapter->desired.rx_entries = max_entries; 4120 } 4121 4122 if (adapter->desired.tx_entries) 4123 adapter->req_tx_entries_per_subcrq = 4124 adapter->desired.tx_entries; 4125 else 4126 adapter->req_tx_entries_per_subcrq = 4127 adapter->max_tx_entries_per_subcrq; 4128 4129 if (adapter->desired.rx_entries) 4130 adapter->req_rx_add_entries_per_subcrq = 4131 adapter->desired.rx_entries; 4132 else 4133 adapter->req_rx_add_entries_per_subcrq = 4134 adapter->max_rx_add_entries_per_subcrq; 4135 4136 if (adapter->desired.tx_queues) 4137 adapter->req_tx_queues = 4138 adapter->desired.tx_queues; 4139 else 4140 adapter->req_tx_queues = 4141 adapter->opt_tx_comp_sub_queues; 4142 4143 if (adapter->desired.rx_queues) 4144 adapter->req_rx_queues = 4145 adapter->desired.rx_queues; 4146 else 4147 adapter->req_rx_queues = 4148 adapter->opt_rx_comp_queues; 4149 4150 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 4151 } else { 4152 atomic_add(cap_reqs, &adapter->running_cap_crqs); 4153 } 4154 memset(&crq, 0, sizeof(crq)); 4155 crq.request_capability.first = IBMVNIC_CRQ_CMD; 4156 crq.request_capability.cmd = REQUEST_CAPABILITY; 4157 4158 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 4159 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 4160 cap_reqs--; 4161 ibmvnic_send_crq(adapter, &crq); 4162 4163 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 4164 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 4165 cap_reqs--; 4166 ibmvnic_send_crq(adapter, &crq); 4167 4168 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 4169 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 4170 cap_reqs--; 4171 ibmvnic_send_crq(adapter, &crq); 4172 4173 crq.request_capability.capability = 4174 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 4175 crq.request_capability.number = 4176 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 4177 cap_reqs--; 4178 ibmvnic_send_crq(adapter, &crq); 4179 4180 crq.request_capability.capability = 4181 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 4182 crq.request_capability.number = 4183 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 4184 cap_reqs--; 4185 ibmvnic_send_crq(adapter, &crq); 4186 4187 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 4188 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 4189 cap_reqs--; 4190 ibmvnic_send_crq(adapter, &crq); 4191 4192 if (adapter->netdev->flags & IFF_PROMISC) { 4193 if (adapter->promisc_supported) { 4194 crq.request_capability.capability = 4195 cpu_to_be16(PROMISC_REQUESTED); 4196 crq.request_capability.number = cpu_to_be64(1); 4197 cap_reqs--; 4198 ibmvnic_send_crq(adapter, &crq); 4199 } 4200 } else { 4201 crq.request_capability.capability = 4202 cpu_to_be16(PROMISC_REQUESTED); 4203 crq.request_capability.number = cpu_to_be64(0); 4204 cap_reqs--; 4205 ibmvnic_send_crq(adapter, &crq); 4206 } 4207 4208 /* Keep at end to catch any discrepancy between expected and actual 4209 * CRQs sent. 4210 */ 4211 WARN_ON(cap_reqs != 0); 4212 } 4213 4214 static int pending_scrq(struct ibmvnic_adapter *adapter, 4215 struct ibmvnic_sub_crq_queue *scrq) 4216 { 4217 union sub_crq *entry = &scrq->msgs[scrq->cur]; 4218 int rc; 4219 4220 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); 4221 4222 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4223 * contents of the SCRQ descriptor 4224 */ 4225 dma_rmb(); 4226 4227 return rc; 4228 } 4229 4230 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 4231 struct ibmvnic_sub_crq_queue *scrq) 4232 { 4233 union sub_crq *entry; 4234 unsigned long flags; 4235 4236 spin_lock_irqsave(&scrq->lock, flags); 4237 entry = &scrq->msgs[scrq->cur]; 4238 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4239 if (++scrq->cur == scrq->size) 4240 scrq->cur = 0; 4241 } else { 4242 entry = NULL; 4243 } 4244 spin_unlock_irqrestore(&scrq->lock, flags); 4245 4246 /* Ensure that the SCRQ valid flag is loaded prior to loading the 4247 * contents of the SCRQ descriptor 4248 */ 4249 dma_rmb(); 4250 4251 return entry; 4252 } 4253 4254 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 4255 { 4256 struct ibmvnic_crq_queue *queue = &adapter->crq; 4257 union ibmvnic_crq *crq; 4258 4259 crq = &queue->msgs[queue->cur]; 4260 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 4261 if (++queue->cur == queue->size) 4262 queue->cur = 0; 4263 } else { 4264 crq = NULL; 4265 } 4266 4267 return crq; 4268 } 4269 4270 static void print_subcrq_error(struct device *dev, int rc, const char *func) 4271 { 4272 switch (rc) { 4273 case H_PARAMETER: 4274 dev_warn_ratelimited(dev, 4275 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", 4276 func, rc); 4277 break; 4278 case H_CLOSED: 4279 dev_warn_ratelimited(dev, 4280 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", 4281 func, rc); 4282 break; 4283 default: 4284 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); 4285 break; 4286 } 4287 } 4288 4289 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 4290 u64 remote_handle, u64 ioba, u64 num_entries) 4291 { 4292 unsigned int ua = adapter->vdev->unit_address; 4293 struct device *dev = &adapter->vdev->dev; 4294 int rc; 4295 4296 /* Make sure the hypervisor sees the complete request */ 4297 dma_wmb(); 4298 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 4299 cpu_to_be64(remote_handle), 4300 ioba, num_entries); 4301 4302 if (rc) 4303 print_subcrq_error(dev, rc, __func__); 4304 4305 return rc; 4306 } 4307 4308 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 4309 union ibmvnic_crq *crq) 4310 { 4311 unsigned int ua = adapter->vdev->unit_address; 4312 struct device *dev = &adapter->vdev->dev; 4313 u64 *u64_crq = (u64 *)crq; 4314 int rc; 4315 4316 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 4317 (unsigned long)cpu_to_be64(u64_crq[0]), 4318 (unsigned long)cpu_to_be64(u64_crq[1])); 4319 4320 if (!adapter->crq.active && 4321 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 4322 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 4323 return -EINVAL; 4324 } 4325 4326 /* Make sure the hypervisor sees the complete request */ 4327 dma_wmb(); 4328 4329 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 4330 cpu_to_be64(u64_crq[0]), 4331 cpu_to_be64(u64_crq[1])); 4332 4333 if (rc) { 4334 if (rc == H_CLOSED) { 4335 dev_warn(dev, "CRQ Queue closed\n"); 4336 /* do not reset, report the fail, wait for passive init from server */ 4337 } 4338 4339 dev_warn(dev, "Send error (rc=%d)\n", rc); 4340 } 4341 4342 return rc; 4343 } 4344 4345 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 4346 { 4347 struct device *dev = &adapter->vdev->dev; 4348 union ibmvnic_crq crq; 4349 int retries = 100; 4350 int rc; 4351 4352 memset(&crq, 0, sizeof(crq)); 4353 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 4354 crq.generic.cmd = IBMVNIC_CRQ_INIT; 4355 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 4356 4357 do { 4358 rc = ibmvnic_send_crq(adapter, &crq); 4359 if (rc != H_CLOSED) 4360 break; 4361 retries--; 4362 msleep(50); 4363 4364 } while (retries > 0); 4365 4366 if (rc) { 4367 dev_err(dev, "Failed to send init request, rc = %d\n", rc); 4368 return rc; 4369 } 4370 4371 return 0; 4372 } 4373 4374 struct vnic_login_client_data { 4375 u8 type; 4376 __be16 len; 4377 char name[]; 4378 } __packed; 4379 4380 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 4381 { 4382 int len; 4383 4384 /* Calculate the amount of buffer space needed for the 4385 * vnic client data in the login buffer. There are four entries, 4386 * OS name, LPAR name, device name, and a null last entry. 4387 */ 4388 len = 4 * sizeof(struct vnic_login_client_data); 4389 len += 6; /* "Linux" plus NULL */ 4390 len += strlen(utsname()->nodename) + 1; 4391 len += strlen(adapter->netdev->name) + 1; 4392 4393 return len; 4394 } 4395 4396 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 4397 struct vnic_login_client_data *vlcd) 4398 { 4399 const char *os_name = "Linux"; 4400 int len; 4401 4402 /* Type 1 - LPAR OS */ 4403 vlcd->type = 1; 4404 len = strlen(os_name) + 1; 4405 vlcd->len = cpu_to_be16(len); 4406 strscpy(vlcd->name, os_name, len); 4407 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4408 4409 /* Type 2 - LPAR name */ 4410 vlcd->type = 2; 4411 len = strlen(utsname()->nodename) + 1; 4412 vlcd->len = cpu_to_be16(len); 4413 strscpy(vlcd->name, utsname()->nodename, len); 4414 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 4415 4416 /* Type 3 - device name */ 4417 vlcd->type = 3; 4418 len = strlen(adapter->netdev->name) + 1; 4419 vlcd->len = cpu_to_be16(len); 4420 strscpy(vlcd->name, adapter->netdev->name, len); 4421 } 4422 4423 static int send_login(struct ibmvnic_adapter *adapter) 4424 { 4425 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 4426 struct ibmvnic_login_buffer *login_buffer; 4427 struct device *dev = &adapter->vdev->dev; 4428 struct vnic_login_client_data *vlcd; 4429 dma_addr_t rsp_buffer_token; 4430 dma_addr_t buffer_token; 4431 size_t rsp_buffer_size; 4432 union ibmvnic_crq crq; 4433 int client_data_len; 4434 size_t buffer_size; 4435 __be64 *tx_list_p; 4436 __be64 *rx_list_p; 4437 int rc; 4438 int i; 4439 4440 if (!adapter->tx_scrq || !adapter->rx_scrq) { 4441 netdev_err(adapter->netdev, 4442 "RX or TX queues are not allocated, device login failed\n"); 4443 return -ENOMEM; 4444 } 4445 4446 release_login_buffer(adapter); 4447 release_login_rsp_buffer(adapter); 4448 4449 client_data_len = vnic_client_data_len(adapter); 4450 4451 buffer_size = 4452 sizeof(struct ibmvnic_login_buffer) + 4453 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 4454 client_data_len; 4455 4456 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 4457 if (!login_buffer) 4458 goto buf_alloc_failed; 4459 4460 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 4461 DMA_TO_DEVICE); 4462 if (dma_mapping_error(dev, buffer_token)) { 4463 dev_err(dev, "Couldn't map login buffer\n"); 4464 goto buf_map_failed; 4465 } 4466 4467 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 4468 sizeof(u64) * adapter->req_tx_queues + 4469 sizeof(u64) * adapter->req_rx_queues + 4470 sizeof(u64) * adapter->req_rx_queues + 4471 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 4472 4473 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 4474 if (!login_rsp_buffer) 4475 goto buf_rsp_alloc_failed; 4476 4477 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 4478 rsp_buffer_size, DMA_FROM_DEVICE); 4479 if (dma_mapping_error(dev, rsp_buffer_token)) { 4480 dev_err(dev, "Couldn't map login rsp buffer\n"); 4481 goto buf_rsp_map_failed; 4482 } 4483 4484 adapter->login_buf = login_buffer; 4485 adapter->login_buf_token = buffer_token; 4486 adapter->login_buf_sz = buffer_size; 4487 adapter->login_rsp_buf = login_rsp_buffer; 4488 adapter->login_rsp_buf_token = rsp_buffer_token; 4489 adapter->login_rsp_buf_sz = rsp_buffer_size; 4490 4491 login_buffer->len = cpu_to_be32(buffer_size); 4492 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 4493 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 4494 login_buffer->off_txcomp_subcrqs = 4495 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 4496 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 4497 login_buffer->off_rxcomp_subcrqs = 4498 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 4499 sizeof(u64) * adapter->req_tx_queues); 4500 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 4501 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 4502 4503 tx_list_p = (__be64 *)((char *)login_buffer + 4504 sizeof(struct ibmvnic_login_buffer)); 4505 rx_list_p = (__be64 *)((char *)login_buffer + 4506 sizeof(struct ibmvnic_login_buffer) + 4507 sizeof(u64) * adapter->req_tx_queues); 4508 4509 for (i = 0; i < adapter->req_tx_queues; i++) { 4510 if (adapter->tx_scrq[i]) { 4511 tx_list_p[i] = 4512 cpu_to_be64(adapter->tx_scrq[i]->crq_num); 4513 } 4514 } 4515 4516 for (i = 0; i < adapter->req_rx_queues; i++) { 4517 if (adapter->rx_scrq[i]) { 4518 rx_list_p[i] = 4519 cpu_to_be64(adapter->rx_scrq[i]->crq_num); 4520 } 4521 } 4522 4523 /* Insert vNIC login client data */ 4524 vlcd = (struct vnic_login_client_data *) 4525 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 4526 login_buffer->client_data_offset = 4527 cpu_to_be32((char *)vlcd - (char *)login_buffer); 4528 login_buffer->client_data_len = cpu_to_be32(client_data_len); 4529 4530 vnic_add_client_data(adapter, vlcd); 4531 4532 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 4533 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 4534 netdev_dbg(adapter->netdev, "%016lx\n", 4535 ((unsigned long *)(adapter->login_buf))[i]); 4536 } 4537 4538 memset(&crq, 0, sizeof(crq)); 4539 crq.login.first = IBMVNIC_CRQ_CMD; 4540 crq.login.cmd = LOGIN; 4541 crq.login.ioba = cpu_to_be32(buffer_token); 4542 crq.login.len = cpu_to_be32(buffer_size); 4543 4544 adapter->login_pending = true; 4545 rc = ibmvnic_send_crq(adapter, &crq); 4546 if (rc) { 4547 adapter->login_pending = false; 4548 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); 4549 goto buf_rsp_map_failed; 4550 } 4551 4552 return 0; 4553 4554 buf_rsp_map_failed: 4555 kfree(login_rsp_buffer); 4556 adapter->login_rsp_buf = NULL; 4557 buf_rsp_alloc_failed: 4558 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 4559 buf_map_failed: 4560 kfree(login_buffer); 4561 adapter->login_buf = NULL; 4562 buf_alloc_failed: 4563 return -ENOMEM; 4564 } 4565 4566 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 4567 u32 len, u8 map_id) 4568 { 4569 union ibmvnic_crq crq; 4570 4571 memset(&crq, 0, sizeof(crq)); 4572 crq.request_map.first = IBMVNIC_CRQ_CMD; 4573 crq.request_map.cmd = REQUEST_MAP; 4574 crq.request_map.map_id = map_id; 4575 crq.request_map.ioba = cpu_to_be32(addr); 4576 crq.request_map.len = cpu_to_be32(len); 4577 return ibmvnic_send_crq(adapter, &crq); 4578 } 4579 4580 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 4581 { 4582 union ibmvnic_crq crq; 4583 4584 memset(&crq, 0, sizeof(crq)); 4585 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 4586 crq.request_unmap.cmd = REQUEST_UNMAP; 4587 crq.request_unmap.map_id = map_id; 4588 return ibmvnic_send_crq(adapter, &crq); 4589 } 4590 4591 static void send_query_map(struct ibmvnic_adapter *adapter) 4592 { 4593 union ibmvnic_crq crq; 4594 4595 memset(&crq, 0, sizeof(crq)); 4596 crq.query_map.first = IBMVNIC_CRQ_CMD; 4597 crq.query_map.cmd = QUERY_MAP; 4598 ibmvnic_send_crq(adapter, &crq); 4599 } 4600 4601 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 4602 static void send_query_cap(struct ibmvnic_adapter *adapter) 4603 { 4604 union ibmvnic_crq crq; 4605 int cap_reqs; 4606 4607 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count 4608 * upfront. When the tasklet receives a response to all of these, it 4609 * can send out the next protocol messaage (REQUEST_CAPABILITY). 4610 */ 4611 cap_reqs = 25; 4612 4613 atomic_set(&adapter->running_cap_crqs, cap_reqs); 4614 4615 memset(&crq, 0, sizeof(crq)); 4616 crq.query_capability.first = IBMVNIC_CRQ_CMD; 4617 crq.query_capability.cmd = QUERY_CAPABILITY; 4618 4619 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 4620 ibmvnic_send_crq(adapter, &crq); 4621 cap_reqs--; 4622 4623 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 4624 ibmvnic_send_crq(adapter, &crq); 4625 cap_reqs--; 4626 4627 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 4628 ibmvnic_send_crq(adapter, &crq); 4629 cap_reqs--; 4630 4631 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 4632 ibmvnic_send_crq(adapter, &crq); 4633 cap_reqs--; 4634 4635 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 4636 ibmvnic_send_crq(adapter, &crq); 4637 cap_reqs--; 4638 4639 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 4640 ibmvnic_send_crq(adapter, &crq); 4641 cap_reqs--; 4642 4643 crq.query_capability.capability = 4644 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 4645 ibmvnic_send_crq(adapter, &crq); 4646 cap_reqs--; 4647 4648 crq.query_capability.capability = 4649 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 4650 ibmvnic_send_crq(adapter, &crq); 4651 cap_reqs--; 4652 4653 crq.query_capability.capability = 4654 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 4655 ibmvnic_send_crq(adapter, &crq); 4656 cap_reqs--; 4657 4658 crq.query_capability.capability = 4659 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 4660 ibmvnic_send_crq(adapter, &crq); 4661 cap_reqs--; 4662 4663 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 4664 ibmvnic_send_crq(adapter, &crq); 4665 cap_reqs--; 4666 4667 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 4668 ibmvnic_send_crq(adapter, &crq); 4669 cap_reqs--; 4670 4671 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 4672 ibmvnic_send_crq(adapter, &crq); 4673 cap_reqs--; 4674 4675 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 4676 ibmvnic_send_crq(adapter, &crq); 4677 cap_reqs--; 4678 4679 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 4680 ibmvnic_send_crq(adapter, &crq); 4681 cap_reqs--; 4682 4683 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 4684 ibmvnic_send_crq(adapter, &crq); 4685 cap_reqs--; 4686 4687 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 4688 ibmvnic_send_crq(adapter, &crq); 4689 cap_reqs--; 4690 4691 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 4692 ibmvnic_send_crq(adapter, &crq); 4693 cap_reqs--; 4694 4695 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 4696 ibmvnic_send_crq(adapter, &crq); 4697 cap_reqs--; 4698 4699 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 4700 ibmvnic_send_crq(adapter, &crq); 4701 cap_reqs--; 4702 4703 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 4704 ibmvnic_send_crq(adapter, &crq); 4705 cap_reqs--; 4706 4707 crq.query_capability.capability = 4708 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 4709 ibmvnic_send_crq(adapter, &crq); 4710 cap_reqs--; 4711 4712 crq.query_capability.capability = 4713 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 4714 ibmvnic_send_crq(adapter, &crq); 4715 cap_reqs--; 4716 4717 crq.query_capability.capability = 4718 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 4719 ibmvnic_send_crq(adapter, &crq); 4720 cap_reqs--; 4721 4722 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 4723 4724 ibmvnic_send_crq(adapter, &crq); 4725 cap_reqs--; 4726 4727 /* Keep at end to catch any discrepancy between expected and actual 4728 * CRQs sent. 4729 */ 4730 WARN_ON(cap_reqs != 0); 4731 } 4732 4733 static void send_query_ip_offload(struct ibmvnic_adapter *adapter) 4734 { 4735 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 4736 struct device *dev = &adapter->vdev->dev; 4737 union ibmvnic_crq crq; 4738 4739 adapter->ip_offload_tok = 4740 dma_map_single(dev, 4741 &adapter->ip_offload_buf, 4742 buf_sz, 4743 DMA_FROM_DEVICE); 4744 4745 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 4746 if (!firmware_has_feature(FW_FEATURE_CMO)) 4747 dev_err(dev, "Couldn't map offload buffer\n"); 4748 return; 4749 } 4750 4751 memset(&crq, 0, sizeof(crq)); 4752 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 4753 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 4754 crq.query_ip_offload.len = cpu_to_be32(buf_sz); 4755 crq.query_ip_offload.ioba = 4756 cpu_to_be32(adapter->ip_offload_tok); 4757 4758 ibmvnic_send_crq(adapter, &crq); 4759 } 4760 4761 static void send_control_ip_offload(struct ibmvnic_adapter *adapter) 4762 { 4763 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; 4764 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 4765 struct device *dev = &adapter->vdev->dev; 4766 netdev_features_t old_hw_features = 0; 4767 union ibmvnic_crq crq; 4768 4769 adapter->ip_offload_ctrl_tok = 4770 dma_map_single(dev, 4771 ctrl_buf, 4772 sizeof(adapter->ip_offload_ctrl), 4773 DMA_TO_DEVICE); 4774 4775 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 4776 dev_err(dev, "Couldn't map ip offload control buffer\n"); 4777 return; 4778 } 4779 4780 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 4781 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); 4782 ctrl_buf->ipv4_chksum = buf->ipv4_chksum; 4783 ctrl_buf->ipv6_chksum = buf->ipv6_chksum; 4784 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 4785 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; 4786 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 4787 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; 4788 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; 4789 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; 4790 4791 /* large_rx disabled for now, additional features needed */ 4792 ctrl_buf->large_rx_ipv4 = 0; 4793 ctrl_buf->large_rx_ipv6 = 0; 4794 4795 if (adapter->state != VNIC_PROBING) { 4796 old_hw_features = adapter->netdev->hw_features; 4797 adapter->netdev->hw_features = 0; 4798 } 4799 4800 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 4801 4802 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 4803 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; 4804 4805 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 4806 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; 4807 4808 if ((adapter->netdev->features & 4809 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 4810 adapter->netdev->hw_features |= NETIF_F_RXCSUM; 4811 4812 if (buf->large_tx_ipv4) 4813 adapter->netdev->hw_features |= NETIF_F_TSO; 4814 if (buf->large_tx_ipv6) 4815 adapter->netdev->hw_features |= NETIF_F_TSO6; 4816 4817 if (adapter->state == VNIC_PROBING) { 4818 adapter->netdev->features |= adapter->netdev->hw_features; 4819 } else if (old_hw_features != adapter->netdev->hw_features) { 4820 netdev_features_t tmp = 0; 4821 4822 /* disable features no longer supported */ 4823 adapter->netdev->features &= adapter->netdev->hw_features; 4824 /* turn on features now supported if previously enabled */ 4825 tmp = (old_hw_features ^ adapter->netdev->hw_features) & 4826 adapter->netdev->hw_features; 4827 adapter->netdev->features |= 4828 tmp & adapter->netdev->wanted_features; 4829 } 4830 4831 memset(&crq, 0, sizeof(crq)); 4832 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 4833 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 4834 crq.control_ip_offload.len = 4835 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 4836 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 4837 ibmvnic_send_crq(adapter, &crq); 4838 } 4839 4840 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 4841 struct ibmvnic_adapter *adapter) 4842 { 4843 struct device *dev = &adapter->vdev->dev; 4844 4845 if (crq->get_vpd_size_rsp.rc.code) { 4846 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 4847 crq->get_vpd_size_rsp.rc.code); 4848 complete(&adapter->fw_done); 4849 return; 4850 } 4851 4852 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 4853 complete(&adapter->fw_done); 4854 } 4855 4856 static void handle_vpd_rsp(union ibmvnic_crq *crq, 4857 struct ibmvnic_adapter *adapter) 4858 { 4859 struct device *dev = &adapter->vdev->dev; 4860 unsigned char *substr = NULL; 4861 u8 fw_level_len = 0; 4862 4863 memset(adapter->fw_version, 0, 32); 4864 4865 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 4866 DMA_FROM_DEVICE); 4867 4868 if (crq->get_vpd_rsp.rc.code) { 4869 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 4870 crq->get_vpd_rsp.rc.code); 4871 goto complete; 4872 } 4873 4874 /* get the position of the firmware version info 4875 * located after the ASCII 'RM' substring in the buffer 4876 */ 4877 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 4878 if (!substr) { 4879 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 4880 goto complete; 4881 } 4882 4883 /* get length of firmware level ASCII substring */ 4884 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 4885 fw_level_len = *(substr + 2); 4886 } else { 4887 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 4888 goto complete; 4889 } 4890 4891 /* copy firmware version string from vpd into adapter */ 4892 if ((substr + 3 + fw_level_len) < 4893 (adapter->vpd->buff + adapter->vpd->len)) { 4894 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 4895 } else { 4896 dev_info(dev, "FW substr extrapolated VPD buff\n"); 4897 } 4898 4899 complete: 4900 if (adapter->fw_version[0] == '\0') 4901 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); 4902 complete(&adapter->fw_done); 4903 } 4904 4905 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 4906 { 4907 struct device *dev = &adapter->vdev->dev; 4908 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 4909 int i; 4910 4911 dma_unmap_single(dev, adapter->ip_offload_tok, 4912 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 4913 4914 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 4915 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 4916 netdev_dbg(adapter->netdev, "%016lx\n", 4917 ((unsigned long *)(buf))[i]); 4918 4919 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 4920 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 4921 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 4922 buf->tcp_ipv4_chksum); 4923 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 4924 buf->tcp_ipv6_chksum); 4925 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 4926 buf->udp_ipv4_chksum); 4927 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 4928 buf->udp_ipv6_chksum); 4929 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 4930 buf->large_tx_ipv4); 4931 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 4932 buf->large_tx_ipv6); 4933 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 4934 buf->large_rx_ipv4); 4935 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 4936 buf->large_rx_ipv6); 4937 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 4938 buf->max_ipv4_header_size); 4939 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 4940 buf->max_ipv6_header_size); 4941 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 4942 buf->max_tcp_header_size); 4943 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 4944 buf->max_udp_header_size); 4945 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 4946 buf->max_large_tx_size); 4947 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 4948 buf->max_large_rx_size); 4949 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 4950 buf->ipv6_extension_header); 4951 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 4952 buf->tcp_pseudosum_req); 4953 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 4954 buf->num_ipv6_ext_headers); 4955 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 4956 buf->off_ipv6_ext_headers); 4957 4958 send_control_ip_offload(adapter); 4959 } 4960 4961 static const char *ibmvnic_fw_err_cause(u16 cause) 4962 { 4963 switch (cause) { 4964 case ADAPTER_PROBLEM: 4965 return "adapter problem"; 4966 case BUS_PROBLEM: 4967 return "bus problem"; 4968 case FW_PROBLEM: 4969 return "firmware problem"; 4970 case DD_PROBLEM: 4971 return "device driver problem"; 4972 case EEH_RECOVERY: 4973 return "EEH recovery"; 4974 case FW_UPDATED: 4975 return "firmware updated"; 4976 case LOW_MEMORY: 4977 return "low Memory"; 4978 default: 4979 return "unknown"; 4980 } 4981 } 4982 4983 static void handle_error_indication(union ibmvnic_crq *crq, 4984 struct ibmvnic_adapter *adapter) 4985 { 4986 struct device *dev = &adapter->vdev->dev; 4987 u16 cause; 4988 4989 cause = be16_to_cpu(crq->error_indication.error_cause); 4990 4991 dev_warn_ratelimited(dev, 4992 "Firmware reports %serror, cause: %s. Starting recovery...\n", 4993 crq->error_indication.flags 4994 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 4995 ibmvnic_fw_err_cause(cause)); 4996 4997 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 4998 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4999 else 5000 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 5001 } 5002 5003 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 5004 struct ibmvnic_adapter *adapter) 5005 { 5006 struct net_device *netdev = adapter->netdev; 5007 struct device *dev = &adapter->vdev->dev; 5008 long rc; 5009 5010 rc = crq->change_mac_addr_rsp.rc.code; 5011 if (rc) { 5012 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 5013 goto out; 5014 } 5015 /* crq->change_mac_addr.mac_addr is the requested one 5016 * crq->change_mac_addr_rsp.mac_addr is the returned valid one. 5017 */ 5018 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); 5019 ether_addr_copy(adapter->mac_addr, 5020 &crq->change_mac_addr_rsp.mac_addr[0]); 5021 out: 5022 complete(&adapter->fw_done); 5023 return rc; 5024 } 5025 5026 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 5027 struct ibmvnic_adapter *adapter) 5028 { 5029 struct device *dev = &adapter->vdev->dev; 5030 u64 *req_value; 5031 char *name; 5032 5033 atomic_dec(&adapter->running_cap_crqs); 5034 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", 5035 atomic_read(&adapter->running_cap_crqs)); 5036 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 5037 case REQ_TX_QUEUES: 5038 req_value = &adapter->req_tx_queues; 5039 name = "tx"; 5040 break; 5041 case REQ_RX_QUEUES: 5042 req_value = &adapter->req_rx_queues; 5043 name = "rx"; 5044 break; 5045 case REQ_RX_ADD_QUEUES: 5046 req_value = &adapter->req_rx_add_queues; 5047 name = "rx_add"; 5048 break; 5049 case REQ_TX_ENTRIES_PER_SUBCRQ: 5050 req_value = &adapter->req_tx_entries_per_subcrq; 5051 name = "tx_entries_per_subcrq"; 5052 break; 5053 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 5054 req_value = &adapter->req_rx_add_entries_per_subcrq; 5055 name = "rx_add_entries_per_subcrq"; 5056 break; 5057 case REQ_MTU: 5058 req_value = &adapter->req_mtu; 5059 name = "mtu"; 5060 break; 5061 case PROMISC_REQUESTED: 5062 req_value = &adapter->promisc; 5063 name = "promisc"; 5064 break; 5065 default: 5066 dev_err(dev, "Got invalid cap request rsp %d\n", 5067 crq->request_capability.capability); 5068 return; 5069 } 5070 5071 switch (crq->request_capability_rsp.rc.code) { 5072 case SUCCESS: 5073 break; 5074 case PARTIALSUCCESS: 5075 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 5076 *req_value, 5077 (long)be64_to_cpu(crq->request_capability_rsp.number), 5078 name); 5079 5080 if (be16_to_cpu(crq->request_capability_rsp.capability) == 5081 REQ_MTU) { 5082 pr_err("mtu of %llu is not supported. Reverting.\n", 5083 *req_value); 5084 *req_value = adapter->fallback.mtu; 5085 } else { 5086 *req_value = 5087 be64_to_cpu(crq->request_capability_rsp.number); 5088 } 5089 5090 send_request_cap(adapter, 1); 5091 return; 5092 default: 5093 dev_err(dev, "Error %d in request cap rsp\n", 5094 crq->request_capability_rsp.rc.code); 5095 return; 5096 } 5097 5098 /* Done receiving requested capabilities, query IP offload support */ 5099 if (atomic_read(&adapter->running_cap_crqs) == 0) 5100 send_query_ip_offload(adapter); 5101 } 5102 5103 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 5104 struct ibmvnic_adapter *adapter) 5105 { 5106 struct device *dev = &adapter->vdev->dev; 5107 struct net_device *netdev = adapter->netdev; 5108 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 5109 struct ibmvnic_login_buffer *login = adapter->login_buf; 5110 u64 *tx_handle_array; 5111 u64 *rx_handle_array; 5112 int num_tx_pools; 5113 int num_rx_pools; 5114 u64 *size_array; 5115 int i; 5116 5117 /* CHECK: Test/set of login_pending does not need to be atomic 5118 * because only ibmvnic_tasklet tests/clears this. 5119 */ 5120 if (!adapter->login_pending) { 5121 netdev_warn(netdev, "Ignoring unexpected login response\n"); 5122 return 0; 5123 } 5124 adapter->login_pending = false; 5125 5126 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 5127 DMA_TO_DEVICE); 5128 dma_unmap_single(dev, adapter->login_rsp_buf_token, 5129 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 5130 5131 /* If the number of queues requested can't be allocated by the 5132 * server, the login response will return with code 1. We will need 5133 * to resend the login buffer with fewer queues requested. 5134 */ 5135 if (login_rsp_crq->generic.rc.code) { 5136 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 5137 complete(&adapter->init_done); 5138 return 0; 5139 } 5140 5141 if (adapter->failover_pending) { 5142 adapter->init_done_rc = -EAGAIN; 5143 netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 5144 complete(&adapter->init_done); 5145 /* login response buffer will be released on reset */ 5146 return 0; 5147 } 5148 5149 netdev->mtu = adapter->req_mtu - ETH_HLEN; 5150 5151 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 5152 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 5153 netdev_dbg(adapter->netdev, "%016lx\n", 5154 ((unsigned long *)(adapter->login_rsp_buf))[i]); 5155 } 5156 5157 /* Sanity checks */ 5158 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 5159 (be32_to_cpu(login->num_rxcomp_subcrqs) * 5160 adapter->req_rx_add_queues != 5161 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 5162 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 5163 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5164 return -EIO; 5165 } 5166 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5167 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 5168 /* variable buffer sizes are not supported, so just read the 5169 * first entry. 5170 */ 5171 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); 5172 5173 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 5174 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 5175 5176 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5177 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 5178 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 5179 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); 5180 5181 for (i = 0; i < num_tx_pools; i++) 5182 adapter->tx_scrq[i]->handle = tx_handle_array[i]; 5183 5184 for (i = 0; i < num_rx_pools; i++) 5185 adapter->rx_scrq[i]->handle = rx_handle_array[i]; 5186 5187 adapter->num_active_tx_scrqs = num_tx_pools; 5188 adapter->num_active_rx_scrqs = num_rx_pools; 5189 release_login_rsp_buffer(adapter); 5190 release_login_buffer(adapter); 5191 complete(&adapter->init_done); 5192 5193 return 0; 5194 } 5195 5196 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 5197 struct ibmvnic_adapter *adapter) 5198 { 5199 struct device *dev = &adapter->vdev->dev; 5200 long rc; 5201 5202 rc = crq->request_unmap_rsp.rc.code; 5203 if (rc) 5204 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 5205 } 5206 5207 static void handle_query_map_rsp(union ibmvnic_crq *crq, 5208 struct ibmvnic_adapter *adapter) 5209 { 5210 struct net_device *netdev = adapter->netdev; 5211 struct device *dev = &adapter->vdev->dev; 5212 long rc; 5213 5214 rc = crq->query_map_rsp.rc.code; 5215 if (rc) { 5216 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 5217 return; 5218 } 5219 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", 5220 crq->query_map_rsp.page_size, 5221 __be32_to_cpu(crq->query_map_rsp.tot_pages), 5222 __be32_to_cpu(crq->query_map_rsp.free_pages)); 5223 } 5224 5225 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 5226 struct ibmvnic_adapter *adapter) 5227 { 5228 struct net_device *netdev = adapter->netdev; 5229 struct device *dev = &adapter->vdev->dev; 5230 long rc; 5231 5232 atomic_dec(&adapter->running_cap_crqs); 5233 netdev_dbg(netdev, "Outstanding queries: %d\n", 5234 atomic_read(&adapter->running_cap_crqs)); 5235 rc = crq->query_capability.rc.code; 5236 if (rc) { 5237 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 5238 goto out; 5239 } 5240 5241 switch (be16_to_cpu(crq->query_capability.capability)) { 5242 case MIN_TX_QUEUES: 5243 adapter->min_tx_queues = 5244 be64_to_cpu(crq->query_capability.number); 5245 netdev_dbg(netdev, "min_tx_queues = %lld\n", 5246 adapter->min_tx_queues); 5247 break; 5248 case MIN_RX_QUEUES: 5249 adapter->min_rx_queues = 5250 be64_to_cpu(crq->query_capability.number); 5251 netdev_dbg(netdev, "min_rx_queues = %lld\n", 5252 adapter->min_rx_queues); 5253 break; 5254 case MIN_RX_ADD_QUEUES: 5255 adapter->min_rx_add_queues = 5256 be64_to_cpu(crq->query_capability.number); 5257 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 5258 adapter->min_rx_add_queues); 5259 break; 5260 case MAX_TX_QUEUES: 5261 adapter->max_tx_queues = 5262 be64_to_cpu(crq->query_capability.number); 5263 netdev_dbg(netdev, "max_tx_queues = %lld\n", 5264 adapter->max_tx_queues); 5265 break; 5266 case MAX_RX_QUEUES: 5267 adapter->max_rx_queues = 5268 be64_to_cpu(crq->query_capability.number); 5269 netdev_dbg(netdev, "max_rx_queues = %lld\n", 5270 adapter->max_rx_queues); 5271 break; 5272 case MAX_RX_ADD_QUEUES: 5273 adapter->max_rx_add_queues = 5274 be64_to_cpu(crq->query_capability.number); 5275 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 5276 adapter->max_rx_add_queues); 5277 break; 5278 case MIN_TX_ENTRIES_PER_SUBCRQ: 5279 adapter->min_tx_entries_per_subcrq = 5280 be64_to_cpu(crq->query_capability.number); 5281 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 5282 adapter->min_tx_entries_per_subcrq); 5283 break; 5284 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 5285 adapter->min_rx_add_entries_per_subcrq = 5286 be64_to_cpu(crq->query_capability.number); 5287 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 5288 adapter->min_rx_add_entries_per_subcrq); 5289 break; 5290 case MAX_TX_ENTRIES_PER_SUBCRQ: 5291 adapter->max_tx_entries_per_subcrq = 5292 be64_to_cpu(crq->query_capability.number); 5293 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 5294 adapter->max_tx_entries_per_subcrq); 5295 break; 5296 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 5297 adapter->max_rx_add_entries_per_subcrq = 5298 be64_to_cpu(crq->query_capability.number); 5299 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 5300 adapter->max_rx_add_entries_per_subcrq); 5301 break; 5302 case TCP_IP_OFFLOAD: 5303 adapter->tcp_ip_offload = 5304 be64_to_cpu(crq->query_capability.number); 5305 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 5306 adapter->tcp_ip_offload); 5307 break; 5308 case PROMISC_SUPPORTED: 5309 adapter->promisc_supported = 5310 be64_to_cpu(crq->query_capability.number); 5311 netdev_dbg(netdev, "promisc_supported = %lld\n", 5312 adapter->promisc_supported); 5313 break; 5314 case MIN_MTU: 5315 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 5316 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 5317 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 5318 break; 5319 case MAX_MTU: 5320 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 5321 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 5322 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 5323 break; 5324 case MAX_MULTICAST_FILTERS: 5325 adapter->max_multicast_filters = 5326 be64_to_cpu(crq->query_capability.number); 5327 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 5328 adapter->max_multicast_filters); 5329 break; 5330 case VLAN_HEADER_INSERTION: 5331 adapter->vlan_header_insertion = 5332 be64_to_cpu(crq->query_capability.number); 5333 if (adapter->vlan_header_insertion) 5334 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 5335 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 5336 adapter->vlan_header_insertion); 5337 break; 5338 case RX_VLAN_HEADER_INSERTION: 5339 adapter->rx_vlan_header_insertion = 5340 be64_to_cpu(crq->query_capability.number); 5341 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 5342 adapter->rx_vlan_header_insertion); 5343 break; 5344 case MAX_TX_SG_ENTRIES: 5345 adapter->max_tx_sg_entries = 5346 be64_to_cpu(crq->query_capability.number); 5347 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 5348 adapter->max_tx_sg_entries); 5349 break; 5350 case RX_SG_SUPPORTED: 5351 adapter->rx_sg_supported = 5352 be64_to_cpu(crq->query_capability.number); 5353 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 5354 adapter->rx_sg_supported); 5355 break; 5356 case OPT_TX_COMP_SUB_QUEUES: 5357 adapter->opt_tx_comp_sub_queues = 5358 be64_to_cpu(crq->query_capability.number); 5359 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 5360 adapter->opt_tx_comp_sub_queues); 5361 break; 5362 case OPT_RX_COMP_QUEUES: 5363 adapter->opt_rx_comp_queues = 5364 be64_to_cpu(crq->query_capability.number); 5365 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 5366 adapter->opt_rx_comp_queues); 5367 break; 5368 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 5369 adapter->opt_rx_bufadd_q_per_rx_comp_q = 5370 be64_to_cpu(crq->query_capability.number); 5371 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 5372 adapter->opt_rx_bufadd_q_per_rx_comp_q); 5373 break; 5374 case OPT_TX_ENTRIES_PER_SUBCRQ: 5375 adapter->opt_tx_entries_per_subcrq = 5376 be64_to_cpu(crq->query_capability.number); 5377 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 5378 adapter->opt_tx_entries_per_subcrq); 5379 break; 5380 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 5381 adapter->opt_rxba_entries_per_subcrq = 5382 be64_to_cpu(crq->query_capability.number); 5383 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 5384 adapter->opt_rxba_entries_per_subcrq); 5385 break; 5386 case TX_RX_DESC_REQ: 5387 adapter->tx_rx_desc_req = crq->query_capability.number; 5388 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 5389 adapter->tx_rx_desc_req); 5390 break; 5391 5392 default: 5393 netdev_err(netdev, "Got invalid cap rsp %d\n", 5394 crq->query_capability.capability); 5395 } 5396 5397 out: 5398 if (atomic_read(&adapter->running_cap_crqs) == 0) 5399 send_request_cap(adapter, 0); 5400 } 5401 5402 static int send_query_phys_parms(struct ibmvnic_adapter *adapter) 5403 { 5404 union ibmvnic_crq crq; 5405 int rc; 5406 5407 memset(&crq, 0, sizeof(crq)); 5408 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; 5409 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; 5410 5411 mutex_lock(&adapter->fw_lock); 5412 adapter->fw_done_rc = 0; 5413 reinit_completion(&adapter->fw_done); 5414 5415 rc = ibmvnic_send_crq(adapter, &crq); 5416 if (rc) { 5417 mutex_unlock(&adapter->fw_lock); 5418 return rc; 5419 } 5420 5421 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 5422 if (rc) { 5423 mutex_unlock(&adapter->fw_lock); 5424 return rc; 5425 } 5426 5427 mutex_unlock(&adapter->fw_lock); 5428 return adapter->fw_done_rc ? -EIO : 0; 5429 } 5430 5431 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, 5432 struct ibmvnic_adapter *adapter) 5433 { 5434 struct net_device *netdev = adapter->netdev; 5435 int rc; 5436 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); 5437 5438 rc = crq->query_phys_parms_rsp.rc.code; 5439 if (rc) { 5440 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); 5441 return rc; 5442 } 5443 switch (rspeed) { 5444 case IBMVNIC_10MBPS: 5445 adapter->speed = SPEED_10; 5446 break; 5447 case IBMVNIC_100MBPS: 5448 adapter->speed = SPEED_100; 5449 break; 5450 case IBMVNIC_1GBPS: 5451 adapter->speed = SPEED_1000; 5452 break; 5453 case IBMVNIC_10GBPS: 5454 adapter->speed = SPEED_10000; 5455 break; 5456 case IBMVNIC_25GBPS: 5457 adapter->speed = SPEED_25000; 5458 break; 5459 case IBMVNIC_40GBPS: 5460 adapter->speed = SPEED_40000; 5461 break; 5462 case IBMVNIC_50GBPS: 5463 adapter->speed = SPEED_50000; 5464 break; 5465 case IBMVNIC_100GBPS: 5466 adapter->speed = SPEED_100000; 5467 break; 5468 case IBMVNIC_200GBPS: 5469 adapter->speed = SPEED_200000; 5470 break; 5471 default: 5472 if (netif_carrier_ok(netdev)) 5473 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); 5474 adapter->speed = SPEED_UNKNOWN; 5475 } 5476 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) 5477 adapter->duplex = DUPLEX_FULL; 5478 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) 5479 adapter->duplex = DUPLEX_HALF; 5480 else 5481 adapter->duplex = DUPLEX_UNKNOWN; 5482 5483 return rc; 5484 } 5485 5486 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 5487 struct ibmvnic_adapter *adapter) 5488 { 5489 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 5490 struct net_device *netdev = adapter->netdev; 5491 struct device *dev = &adapter->vdev->dev; 5492 u64 *u64_crq = (u64 *)crq; 5493 long rc; 5494 5495 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 5496 (unsigned long)cpu_to_be64(u64_crq[0]), 5497 (unsigned long)cpu_to_be64(u64_crq[1])); 5498 switch (gen_crq->first) { 5499 case IBMVNIC_CRQ_INIT_RSP: 5500 switch (gen_crq->cmd) { 5501 case IBMVNIC_CRQ_INIT: 5502 dev_info(dev, "Partner initialized\n"); 5503 adapter->from_passive_init = true; 5504 /* Discard any stale login responses from prev reset. 5505 * CHECK: should we clear even on INIT_COMPLETE? 5506 */ 5507 adapter->login_pending = false; 5508 5509 if (adapter->state == VNIC_DOWN) 5510 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); 5511 else 5512 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 5513 5514 if (rc && rc != -EBUSY) { 5515 /* We were unable to schedule the failover 5516 * reset either because the adapter was still 5517 * probing (eg: during kexec) or we could not 5518 * allocate memory. Clear the failover_pending 5519 * flag since no one else will. We ignore 5520 * EBUSY because it means either FAILOVER reset 5521 * is already scheduled or the adapter is 5522 * being removed. 5523 */ 5524 netdev_err(netdev, 5525 "Error %ld scheduling failover reset\n", 5526 rc); 5527 adapter->failover_pending = false; 5528 } 5529 5530 if (!completion_done(&adapter->init_done)) { 5531 if (!adapter->init_done_rc) 5532 adapter->init_done_rc = -EAGAIN; 5533 complete(&adapter->init_done); 5534 } 5535 5536 break; 5537 case IBMVNIC_CRQ_INIT_COMPLETE: 5538 dev_info(dev, "Partner initialization complete\n"); 5539 adapter->crq.active = true; 5540 send_version_xchg(adapter); 5541 break; 5542 default: 5543 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 5544 } 5545 return; 5546 case IBMVNIC_CRQ_XPORT_EVENT: 5547 netif_carrier_off(netdev); 5548 adapter->crq.active = false; 5549 /* terminate any thread waiting for a response 5550 * from the device 5551 */ 5552 if (!completion_done(&adapter->fw_done)) { 5553 adapter->fw_done_rc = -EIO; 5554 complete(&adapter->fw_done); 5555 } 5556 5557 /* if we got here during crq-init, retry crq-init */ 5558 if (!completion_done(&adapter->init_done)) { 5559 adapter->init_done_rc = -EAGAIN; 5560 complete(&adapter->init_done); 5561 } 5562 5563 if (!completion_done(&adapter->stats_done)) 5564 complete(&adapter->stats_done); 5565 if (test_bit(0, &adapter->resetting)) 5566 adapter->force_reset_recovery = true; 5567 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 5568 dev_info(dev, "Migrated, re-enabling adapter\n"); 5569 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 5570 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 5571 dev_info(dev, "Backing device failover detected\n"); 5572 adapter->failover_pending = true; 5573 } else { 5574 /* The adapter lost the connection */ 5575 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 5576 gen_crq->cmd); 5577 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 5578 } 5579 return; 5580 case IBMVNIC_CRQ_CMD_RSP: 5581 break; 5582 default: 5583 dev_err(dev, "Got an invalid msg type 0x%02x\n", 5584 gen_crq->first); 5585 return; 5586 } 5587 5588 switch (gen_crq->cmd) { 5589 case VERSION_EXCHANGE_RSP: 5590 rc = crq->version_exchange_rsp.rc.code; 5591 if (rc) { 5592 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 5593 break; 5594 } 5595 ibmvnic_version = 5596 be16_to_cpu(crq->version_exchange_rsp.version); 5597 dev_info(dev, "Partner protocol version is %d\n", 5598 ibmvnic_version); 5599 send_query_cap(adapter); 5600 break; 5601 case QUERY_CAPABILITY_RSP: 5602 handle_query_cap_rsp(crq, adapter); 5603 break; 5604 case QUERY_MAP_RSP: 5605 handle_query_map_rsp(crq, adapter); 5606 break; 5607 case REQUEST_MAP_RSP: 5608 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 5609 complete(&adapter->fw_done); 5610 break; 5611 case REQUEST_UNMAP_RSP: 5612 handle_request_unmap_rsp(crq, adapter); 5613 break; 5614 case REQUEST_CAPABILITY_RSP: 5615 handle_request_cap_rsp(crq, adapter); 5616 break; 5617 case LOGIN_RSP: 5618 netdev_dbg(netdev, "Got Login Response\n"); 5619 handle_login_rsp(crq, adapter); 5620 break; 5621 case LOGICAL_LINK_STATE_RSP: 5622 netdev_dbg(netdev, 5623 "Got Logical Link State Response, state: %d rc: %d\n", 5624 crq->logical_link_state_rsp.link_state, 5625 crq->logical_link_state_rsp.rc.code); 5626 adapter->logical_link_state = 5627 crq->logical_link_state_rsp.link_state; 5628 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 5629 complete(&adapter->init_done); 5630 break; 5631 case LINK_STATE_INDICATION: 5632 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 5633 adapter->phys_link_state = 5634 crq->link_state_indication.phys_link_state; 5635 adapter->logical_link_state = 5636 crq->link_state_indication.logical_link_state; 5637 if (adapter->phys_link_state && adapter->logical_link_state) 5638 netif_carrier_on(netdev); 5639 else 5640 netif_carrier_off(netdev); 5641 break; 5642 case CHANGE_MAC_ADDR_RSP: 5643 netdev_dbg(netdev, "Got MAC address change Response\n"); 5644 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 5645 break; 5646 case ERROR_INDICATION: 5647 netdev_dbg(netdev, "Got Error Indication\n"); 5648 handle_error_indication(crq, adapter); 5649 break; 5650 case REQUEST_STATISTICS_RSP: 5651 netdev_dbg(netdev, "Got Statistics Response\n"); 5652 complete(&adapter->stats_done); 5653 break; 5654 case QUERY_IP_OFFLOAD_RSP: 5655 netdev_dbg(netdev, "Got Query IP offload Response\n"); 5656 handle_query_ip_offload_rsp(adapter); 5657 break; 5658 case MULTICAST_CTRL_RSP: 5659 netdev_dbg(netdev, "Got multicast control Response\n"); 5660 break; 5661 case CONTROL_IP_OFFLOAD_RSP: 5662 netdev_dbg(netdev, "Got Control IP offload Response\n"); 5663 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 5664 sizeof(adapter->ip_offload_ctrl), 5665 DMA_TO_DEVICE); 5666 complete(&adapter->init_done); 5667 break; 5668 case COLLECT_FW_TRACE_RSP: 5669 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 5670 complete(&adapter->fw_done); 5671 break; 5672 case GET_VPD_SIZE_RSP: 5673 handle_vpd_size_rsp(crq, adapter); 5674 break; 5675 case GET_VPD_RSP: 5676 handle_vpd_rsp(crq, adapter); 5677 break; 5678 case QUERY_PHYS_PARMS_RSP: 5679 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); 5680 complete(&adapter->fw_done); 5681 break; 5682 default: 5683 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 5684 gen_crq->cmd); 5685 } 5686 } 5687 5688 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 5689 { 5690 struct ibmvnic_adapter *adapter = instance; 5691 5692 tasklet_schedule(&adapter->tasklet); 5693 return IRQ_HANDLED; 5694 } 5695 5696 static void ibmvnic_tasklet(struct tasklet_struct *t) 5697 { 5698 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); 5699 struct ibmvnic_crq_queue *queue = &adapter->crq; 5700 union ibmvnic_crq *crq; 5701 unsigned long flags; 5702 5703 spin_lock_irqsave(&queue->lock, flags); 5704 5705 /* Pull all the valid messages off the CRQ */ 5706 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 5707 /* This barrier makes sure ibmvnic_next_crq()'s 5708 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded 5709 * before ibmvnic_handle_crq()'s 5710 * switch(gen_crq->first) and switch(gen_crq->cmd). 5711 */ 5712 dma_rmb(); 5713 ibmvnic_handle_crq(crq, adapter); 5714 crq->generic.first = 0; 5715 } 5716 5717 spin_unlock_irqrestore(&queue->lock, flags); 5718 } 5719 5720 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 5721 { 5722 struct vio_dev *vdev = adapter->vdev; 5723 int rc; 5724 5725 do { 5726 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 5727 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5728 5729 if (rc) 5730 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 5731 5732 return rc; 5733 } 5734 5735 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 5736 { 5737 struct ibmvnic_crq_queue *crq = &adapter->crq; 5738 struct device *dev = &adapter->vdev->dev; 5739 struct vio_dev *vdev = adapter->vdev; 5740 int rc; 5741 5742 /* Close the CRQ */ 5743 do { 5744 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5745 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5746 5747 /* Clean out the queue */ 5748 if (!crq->msgs) 5749 return -EINVAL; 5750 5751 memset(crq->msgs, 0, PAGE_SIZE); 5752 crq->cur = 0; 5753 crq->active = false; 5754 5755 /* And re-open it again */ 5756 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 5757 crq->msg_token, PAGE_SIZE); 5758 5759 if (rc == H_CLOSED) 5760 /* Adapter is good, but other end is not ready */ 5761 dev_warn(dev, "Partner adapter not ready\n"); 5762 else if (rc != 0) 5763 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 5764 5765 return rc; 5766 } 5767 5768 static void release_crq_queue(struct ibmvnic_adapter *adapter) 5769 { 5770 struct ibmvnic_crq_queue *crq = &adapter->crq; 5771 struct vio_dev *vdev = adapter->vdev; 5772 long rc; 5773 5774 if (!crq->msgs) 5775 return; 5776 5777 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 5778 free_irq(vdev->irq, adapter); 5779 tasklet_kill(&adapter->tasklet); 5780 do { 5781 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5782 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5783 5784 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 5785 DMA_BIDIRECTIONAL); 5786 free_page((unsigned long)crq->msgs); 5787 crq->msgs = NULL; 5788 crq->active = false; 5789 } 5790 5791 static int init_crq_queue(struct ibmvnic_adapter *adapter) 5792 { 5793 struct ibmvnic_crq_queue *crq = &adapter->crq; 5794 struct device *dev = &adapter->vdev->dev; 5795 struct vio_dev *vdev = adapter->vdev; 5796 int rc, retrc = -ENOMEM; 5797 5798 if (crq->msgs) 5799 return 0; 5800 5801 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 5802 /* Should we allocate more than one page? */ 5803 5804 if (!crq->msgs) 5805 return -ENOMEM; 5806 5807 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 5808 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 5809 DMA_BIDIRECTIONAL); 5810 if (dma_mapping_error(dev, crq->msg_token)) 5811 goto map_failed; 5812 5813 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 5814 crq->msg_token, PAGE_SIZE); 5815 5816 if (rc == H_RESOURCE) 5817 /* maybe kexecing and resource is busy. try a reset */ 5818 rc = ibmvnic_reset_crq(adapter); 5819 retrc = rc; 5820 5821 if (rc == H_CLOSED) { 5822 dev_warn(dev, "Partner adapter not ready\n"); 5823 } else if (rc) { 5824 dev_warn(dev, "Error %d opening adapter\n", rc); 5825 goto reg_crq_failed; 5826 } 5827 5828 retrc = 0; 5829 5830 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); 5831 5832 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 5833 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", 5834 adapter->vdev->unit_address); 5835 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); 5836 if (rc) { 5837 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 5838 vdev->irq, rc); 5839 goto req_irq_failed; 5840 } 5841 5842 rc = vio_enable_interrupts(vdev); 5843 if (rc) { 5844 dev_err(dev, "Error %d enabling interrupts\n", rc); 5845 goto req_irq_failed; 5846 } 5847 5848 crq->cur = 0; 5849 spin_lock_init(&crq->lock); 5850 5851 /* process any CRQs that were queued before we enabled interrupts */ 5852 tasklet_schedule(&adapter->tasklet); 5853 5854 return retrc; 5855 5856 req_irq_failed: 5857 tasklet_kill(&adapter->tasklet); 5858 do { 5859 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5860 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5861 reg_crq_failed: 5862 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 5863 map_failed: 5864 free_page((unsigned long)crq->msgs); 5865 crq->msgs = NULL; 5866 return retrc; 5867 } 5868 5869 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) 5870 { 5871 struct device *dev = &adapter->vdev->dev; 5872 unsigned long timeout = msecs_to_jiffies(20000); 5873 u64 old_num_rx_queues = adapter->req_rx_queues; 5874 u64 old_num_tx_queues = adapter->req_tx_queues; 5875 int rc; 5876 5877 adapter->from_passive_init = false; 5878 5879 rc = ibmvnic_send_crq_init(adapter); 5880 if (rc) { 5881 dev_err(dev, "Send crq init failed with error %d\n", rc); 5882 return rc; 5883 } 5884 5885 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 5886 dev_err(dev, "Initialization sequence timed out\n"); 5887 return -ETIMEDOUT; 5888 } 5889 5890 if (adapter->init_done_rc) { 5891 release_crq_queue(adapter); 5892 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc); 5893 return adapter->init_done_rc; 5894 } 5895 5896 if (adapter->from_passive_init) { 5897 adapter->state = VNIC_OPEN; 5898 adapter->from_passive_init = false; 5899 dev_err(dev, "CRQ-init failed, passive-init\n"); 5900 return -EINVAL; 5901 } 5902 5903 if (reset && 5904 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && 5905 adapter->reset_reason != VNIC_RESET_MOBILITY) { 5906 if (adapter->req_rx_queues != old_num_rx_queues || 5907 adapter->req_tx_queues != old_num_tx_queues) { 5908 release_sub_crqs(adapter, 0); 5909 rc = init_sub_crqs(adapter); 5910 } else { 5911 rc = reset_sub_crq_queues(adapter); 5912 } 5913 } else { 5914 rc = init_sub_crqs(adapter); 5915 } 5916 5917 if (rc) { 5918 dev_err(dev, "Initialization of sub crqs failed\n"); 5919 release_crq_queue(adapter); 5920 return rc; 5921 } 5922 5923 rc = init_sub_crq_irqs(adapter); 5924 if (rc) { 5925 dev_err(dev, "Failed to initialize sub crq irqs\n"); 5926 release_crq_queue(adapter); 5927 } 5928 5929 return rc; 5930 } 5931 5932 static struct device_attribute dev_attr_failover; 5933 5934 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 5935 { 5936 struct ibmvnic_adapter *adapter; 5937 struct net_device *netdev; 5938 unsigned char *mac_addr_p; 5939 unsigned long flags; 5940 bool init_success; 5941 int rc; 5942 5943 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 5944 dev->unit_address); 5945 5946 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 5947 VETH_MAC_ADDR, NULL); 5948 if (!mac_addr_p) { 5949 dev_err(&dev->dev, 5950 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 5951 __FILE__, __LINE__); 5952 return 0; 5953 } 5954 5955 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 5956 IBMVNIC_MAX_QUEUES); 5957 if (!netdev) 5958 return -ENOMEM; 5959 5960 adapter = netdev_priv(netdev); 5961 adapter->state = VNIC_PROBING; 5962 dev_set_drvdata(&dev->dev, netdev); 5963 adapter->vdev = dev; 5964 adapter->netdev = netdev; 5965 adapter->login_pending = false; 5966 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); 5967 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ 5968 bitmap_set(adapter->map_ids, 0, 1); 5969 5970 ether_addr_copy(adapter->mac_addr, mac_addr_p); 5971 eth_hw_addr_set(netdev, adapter->mac_addr); 5972 netdev->irq = dev->irq; 5973 netdev->netdev_ops = &ibmvnic_netdev_ops; 5974 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 5975 SET_NETDEV_DEV(netdev, &dev->dev); 5976 5977 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 5978 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, 5979 __ibmvnic_delayed_reset); 5980 INIT_LIST_HEAD(&adapter->rwi_list); 5981 spin_lock_init(&adapter->rwi_lock); 5982 spin_lock_init(&adapter->state_lock); 5983 mutex_init(&adapter->fw_lock); 5984 init_completion(&adapter->probe_done); 5985 init_completion(&adapter->init_done); 5986 init_completion(&adapter->fw_done); 5987 init_completion(&adapter->reset_done); 5988 init_completion(&adapter->stats_done); 5989 clear_bit(0, &adapter->resetting); 5990 adapter->prev_rx_buf_sz = 0; 5991 adapter->prev_mtu = 0; 5992 5993 init_success = false; 5994 do { 5995 reinit_init_done(adapter); 5996 5997 /* clear any failovers we got in the previous pass 5998 * since we are reinitializing the CRQ 5999 */ 6000 adapter->failover_pending = false; 6001 6002 /* If we had already initialized CRQ, we may have one or 6003 * more resets queued already. Discard those and release 6004 * the CRQ before initializing the CRQ again. 6005 */ 6006 release_crq_queue(adapter); 6007 6008 /* Since we are still in PROBING state, __ibmvnic_reset() 6009 * will not access the ->rwi_list and since we released CRQ, 6010 * we won't get _new_ transport events. But there maybe an 6011 * ongoing ibmvnic_reset() call. So serialize access to 6012 * rwi_list. If we win the race, ibvmnic_reset() could add 6013 * a reset after we purged but thats ok - we just may end 6014 * up with an extra reset (i.e similar to having two or more 6015 * resets in the queue at once). 6016 * CHECK. 6017 */ 6018 spin_lock_irqsave(&adapter->rwi_lock, flags); 6019 flush_reset_queue(adapter); 6020 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 6021 6022 rc = init_crq_queue(adapter); 6023 if (rc) { 6024 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 6025 rc); 6026 goto ibmvnic_init_fail; 6027 } 6028 6029 rc = ibmvnic_reset_init(adapter, false); 6030 } while (rc == -EAGAIN); 6031 6032 /* We are ignoring the error from ibmvnic_reset_init() assuming that the 6033 * partner is not ready. CRQ is not active. When the partner becomes 6034 * ready, we will do the passive init reset. 6035 */ 6036 6037 if (!rc) 6038 init_success = true; 6039 6040 rc = init_stats_buffers(adapter); 6041 if (rc) 6042 goto ibmvnic_init_fail; 6043 6044 rc = init_stats_token(adapter); 6045 if (rc) 6046 goto ibmvnic_stats_fail; 6047 6048 rc = device_create_file(&dev->dev, &dev_attr_failover); 6049 if (rc) 6050 goto ibmvnic_dev_file_err; 6051 6052 netif_carrier_off(netdev); 6053 6054 if (init_success) { 6055 adapter->state = VNIC_PROBED; 6056 netdev->mtu = adapter->req_mtu - ETH_HLEN; 6057 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 6058 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 6059 } else { 6060 adapter->state = VNIC_DOWN; 6061 } 6062 6063 adapter->wait_for_reset = false; 6064 adapter->last_reset_time = jiffies; 6065 6066 rc = register_netdev(netdev); 6067 if (rc) { 6068 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 6069 goto ibmvnic_register_fail; 6070 } 6071 dev_info(&dev->dev, "ibmvnic registered\n"); 6072 6073 complete(&adapter->probe_done); 6074 6075 return 0; 6076 6077 ibmvnic_register_fail: 6078 device_remove_file(&dev->dev, &dev_attr_failover); 6079 6080 ibmvnic_dev_file_err: 6081 release_stats_token(adapter); 6082 6083 ibmvnic_stats_fail: 6084 release_stats_buffers(adapter); 6085 6086 ibmvnic_init_fail: 6087 release_sub_crqs(adapter, 1); 6088 release_crq_queue(adapter); 6089 6090 /* cleanup worker thread after releasing CRQ so we don't get 6091 * transport events (i.e new work items for the worker thread). 6092 */ 6093 adapter->state = VNIC_REMOVING; 6094 complete(&adapter->probe_done); 6095 flush_work(&adapter->ibmvnic_reset); 6096 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6097 6098 flush_reset_queue(adapter); 6099 6100 mutex_destroy(&adapter->fw_lock); 6101 free_netdev(netdev); 6102 6103 return rc; 6104 } 6105 6106 static void ibmvnic_remove(struct vio_dev *dev) 6107 { 6108 struct net_device *netdev = dev_get_drvdata(&dev->dev); 6109 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6110 unsigned long flags; 6111 6112 spin_lock_irqsave(&adapter->state_lock, flags); 6113 6114 /* If ibmvnic_reset() is scheduling a reset, wait for it to 6115 * finish. Then, set the state to REMOVING to prevent it from 6116 * scheduling any more work and to have reset functions ignore 6117 * any resets that have already been scheduled. Drop the lock 6118 * after setting state, so __ibmvnic_reset() which is called 6119 * from the flush_work() below, can make progress. 6120 */ 6121 spin_lock(&adapter->rwi_lock); 6122 adapter->state = VNIC_REMOVING; 6123 spin_unlock(&adapter->rwi_lock); 6124 6125 spin_unlock_irqrestore(&adapter->state_lock, flags); 6126 6127 flush_work(&adapter->ibmvnic_reset); 6128 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 6129 6130 rtnl_lock(); 6131 unregister_netdevice(netdev); 6132 6133 release_resources(adapter); 6134 release_rx_pools(adapter); 6135 release_tx_pools(adapter); 6136 release_sub_crqs(adapter, 1); 6137 release_crq_queue(adapter); 6138 6139 release_stats_token(adapter); 6140 release_stats_buffers(adapter); 6141 6142 adapter->state = VNIC_REMOVED; 6143 6144 rtnl_unlock(); 6145 mutex_destroy(&adapter->fw_lock); 6146 device_remove_file(&dev->dev, &dev_attr_failover); 6147 free_netdev(netdev); 6148 dev_set_drvdata(&dev->dev, NULL); 6149 } 6150 6151 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 6152 const char *buf, size_t count) 6153 { 6154 struct net_device *netdev = dev_get_drvdata(dev); 6155 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6156 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 6157 __be64 session_token; 6158 long rc; 6159 6160 if (!sysfs_streq(buf, "1")) 6161 return -EINVAL; 6162 6163 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 6164 H_GET_SESSION_TOKEN, 0, 0, 0); 6165 if (rc) { 6166 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 6167 rc); 6168 goto last_resort; 6169 } 6170 6171 session_token = (__be64)retbuf[0]; 6172 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 6173 be64_to_cpu(session_token)); 6174 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 6175 H_SESSION_ERR_DETECTED, session_token, 0, 0); 6176 if (rc) { 6177 netdev_err(netdev, 6178 "H_VIOCTL initiated failover failed, rc %ld\n", 6179 rc); 6180 goto last_resort; 6181 } 6182 6183 return count; 6184 6185 last_resort: 6186 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); 6187 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 6188 6189 return count; 6190 } 6191 static DEVICE_ATTR_WO(failover); 6192 6193 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 6194 { 6195 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 6196 struct ibmvnic_adapter *adapter; 6197 struct iommu_table *tbl; 6198 unsigned long ret = 0; 6199 int i; 6200 6201 tbl = get_iommu_table_base(&vdev->dev); 6202 6203 /* netdev inits at probe time along with the structures we need below*/ 6204 if (!netdev) 6205 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 6206 6207 adapter = netdev_priv(netdev); 6208 6209 ret += PAGE_SIZE; /* the crq message queue */ 6210 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 6211 6212 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 6213 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 6214 6215 for (i = 0; i < adapter->num_active_rx_pools; i++) 6216 ret += adapter->rx_pool[i].size * 6217 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 6218 6219 return ret; 6220 } 6221 6222 static int ibmvnic_resume(struct device *dev) 6223 { 6224 struct net_device *netdev = dev_get_drvdata(dev); 6225 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 6226 6227 if (adapter->state != VNIC_OPEN) 6228 return 0; 6229 6230 tasklet_schedule(&adapter->tasklet); 6231 6232 return 0; 6233 } 6234 6235 static const struct vio_device_id ibmvnic_device_table[] = { 6236 {"network", "IBM,vnic"}, 6237 {"", "" } 6238 }; 6239 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 6240 6241 static const struct dev_pm_ops ibmvnic_pm_ops = { 6242 .resume = ibmvnic_resume 6243 }; 6244 6245 static struct vio_driver ibmvnic_driver = { 6246 .id_table = ibmvnic_device_table, 6247 .probe = ibmvnic_probe, 6248 .remove = ibmvnic_remove, 6249 .get_desired_dma = ibmvnic_get_desired_dma, 6250 .name = ibmvnic_driver_name, 6251 .pm = &ibmvnic_pm_ops, 6252 }; 6253 6254 /* module functions */ 6255 static int __init ibmvnic_module_init(void) 6256 { 6257 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 6258 IBMVNIC_DRIVER_VERSION); 6259 6260 return vio_register_driver(&ibmvnic_driver); 6261 } 6262 6263 static void __exit ibmvnic_module_exit(void) 6264 { 6265 vio_unregister_driver(&ibmvnic_driver); 6266 } 6267 6268 module_init(ibmvnic_module_init); 6269 module_exit(ibmvnic_module_exit); 6270