1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /* Messages are passed between the VNIC driver and the VNIC server using */ 17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 18 /* issue and receive commands that initiate communication with the server */ 19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 20 /* are used by the driver to notify the server that a packet is */ 21 /* ready for transmission or that a buffer has been added to receive a */ 22 /* packet. Subsequently, sCRQs are used by the server to notify the */ 23 /* driver that a packet transmission has been completed or that a packet */ 24 /* has been received and placed in a waiting buffer. */ 25 /* */ 26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 27 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 28 /* or receive has been completed, the VNIC driver is required to use */ 29 /* "long term mapping". This entails that large, continuous DMA mapped */ 30 /* buffers are allocated on driver initialization and these buffers are */ 31 /* then continuously reused to pass skbs to and from the VNIC server. */ 32 /* */ 33 /**************************************************************************/ 34 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/types.h> 38 #include <linux/errno.h> 39 #include <linux/completion.h> 40 #include <linux/ioport.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kernel.h> 43 #include <linux/netdevice.h> 44 #include <linux/etherdevice.h> 45 #include <linux/skbuff.h> 46 #include <linux/init.h> 47 #include <linux/delay.h> 48 #include <linux/mm.h> 49 #include <linux/ethtool.h> 50 #include <linux/proc_fs.h> 51 #include <linux/if_arp.h> 52 #include <linux/in.h> 53 #include <linux/ip.h> 54 #include <linux/ipv6.h> 55 #include <linux/irq.h> 56 #include <linux/kthread.h> 57 #include <linux/seq_file.h> 58 #include <linux/interrupt.h> 59 #include <net/net_namespace.h> 60 #include <asm/hvcall.h> 61 #include <linux/atomic.h> 62 #include <asm/vio.h> 63 #include <asm/iommu.h> 64 #include <linux/uaccess.h> 65 #include <asm/firmware.h> 66 #include <linux/workqueue.h> 67 #include <linux/if_vlan.h> 68 #include <linux/utsname.h> 69 70 #include "ibmvnic.h" 71 72 static const char ibmvnic_driver_name[] = "ibmvnic"; 73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 74 75 MODULE_AUTHOR("Santiago Leon"); 76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 77 MODULE_LICENSE("GPL"); 78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 79 80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 81 static int ibmvnic_remove(struct vio_dev *); 82 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 87 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 88 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 89 static int enable_scrq_irq(struct ibmvnic_adapter *, 90 struct ibmvnic_sub_crq_queue *); 91 static int disable_scrq_irq(struct ibmvnic_adapter *, 92 struct ibmvnic_sub_crq_queue *); 93 static int pending_scrq(struct ibmvnic_adapter *, 94 struct ibmvnic_sub_crq_queue *); 95 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 96 struct ibmvnic_sub_crq_queue *); 97 static int ibmvnic_poll(struct napi_struct *napi, int data); 98 static void send_query_map(struct ibmvnic_adapter *adapter); 99 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); 100 static int send_request_unmap(struct ibmvnic_adapter *, u8); 101 static int send_login(struct ibmvnic_adapter *adapter); 102 static void send_query_cap(struct ibmvnic_adapter *adapter); 103 static int init_sub_crqs(struct ibmvnic_adapter *); 104 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 105 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); 106 static void release_crq_queue(struct ibmvnic_adapter *); 107 static int __ibmvnic_set_mac(struct net_device *, u8 *); 108 static int init_crq_queue(struct ibmvnic_adapter *adapter); 109 static int send_query_phys_parms(struct ibmvnic_adapter *adapter); 110 111 struct ibmvnic_stat { 112 char name[ETH_GSTRING_LEN]; 113 int offset; 114 }; 115 116 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 117 offsetof(struct ibmvnic_statistics, stat)) 118 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) 119 120 static const struct ibmvnic_stat ibmvnic_stats[] = { 121 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 122 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 123 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 124 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 125 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 126 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 127 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 128 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 129 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 130 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 131 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 132 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 133 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 134 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 135 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 136 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 137 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 138 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 139 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 140 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 141 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 142 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 143 }; 144 145 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 146 unsigned long length, unsigned long *number, 147 unsigned long *irq) 148 { 149 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 150 long rc; 151 152 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 153 *number = retbuf[0]; 154 *irq = retbuf[1]; 155 156 return rc; 157 } 158 159 /** 160 * ibmvnic_wait_for_completion - Check device state and wait for completion 161 * @adapter: private device data 162 * @comp_done: completion structure to wait for 163 * @timeout: time to wait in milliseconds 164 * 165 * Wait for a completion signal or until the timeout limit is reached 166 * while checking that the device is still active. 167 */ 168 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, 169 struct completion *comp_done, 170 unsigned long timeout) 171 { 172 struct net_device *netdev; 173 unsigned long div_timeout; 174 u8 retry; 175 176 netdev = adapter->netdev; 177 retry = 5; 178 div_timeout = msecs_to_jiffies(timeout / retry); 179 while (true) { 180 if (!adapter->crq.active) { 181 netdev_err(netdev, "Device down!\n"); 182 return -ENODEV; 183 } 184 if (!retry--) 185 break; 186 if (wait_for_completion_timeout(comp_done, div_timeout)) 187 return 0; 188 } 189 netdev_err(netdev, "Operation timed out.\n"); 190 return -ETIMEDOUT; 191 } 192 193 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 194 struct ibmvnic_long_term_buff *ltb, int size) 195 { 196 struct device *dev = &adapter->vdev->dev; 197 int rc; 198 199 ltb->size = size; 200 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, 201 GFP_KERNEL); 202 203 if (!ltb->buff) { 204 dev_err(dev, "Couldn't alloc long term buffer\n"); 205 return -ENOMEM; 206 } 207 ltb->map_id = adapter->map_id; 208 adapter->map_id++; 209 210 mutex_lock(&adapter->fw_lock); 211 adapter->fw_done_rc = 0; 212 reinit_completion(&adapter->fw_done); 213 rc = send_request_map(adapter, ltb->addr, 214 ltb->size, ltb->map_id); 215 if (rc) { 216 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 217 mutex_unlock(&adapter->fw_lock); 218 return rc; 219 } 220 221 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 222 if (rc) { 223 dev_err(dev, 224 "Long term map request aborted or timed out,rc = %d\n", 225 rc); 226 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 227 mutex_unlock(&adapter->fw_lock); 228 return rc; 229 } 230 231 if (adapter->fw_done_rc) { 232 dev_err(dev, "Couldn't map long term buffer,rc = %d\n", 233 adapter->fw_done_rc); 234 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 235 mutex_unlock(&adapter->fw_lock); 236 return -1; 237 } 238 mutex_unlock(&adapter->fw_lock); 239 return 0; 240 } 241 242 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 243 struct ibmvnic_long_term_buff *ltb) 244 { 245 struct device *dev = &adapter->vdev->dev; 246 247 if (!ltb->buff) 248 return; 249 250 /* VIOS automatically unmaps the long term buffer at remote 251 * end for the following resets: 252 * FAILOVER, MOBILITY, TIMEOUT. 253 */ 254 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 255 adapter->reset_reason != VNIC_RESET_MOBILITY && 256 adapter->reset_reason != VNIC_RESET_TIMEOUT) 257 send_request_unmap(adapter, ltb->map_id); 258 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 259 } 260 261 static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb) 262 { 263 if (!ltb->buff) 264 return -EINVAL; 265 266 memset(ltb->buff, 0, ltb->size); 267 return 0; 268 } 269 270 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 271 { 272 int i; 273 274 for (i = 0; i < adapter->num_active_rx_pools; i++) 275 adapter->rx_pool[i].active = 0; 276 } 277 278 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 279 struct ibmvnic_rx_pool *pool) 280 { 281 int count = pool->size - atomic_read(&pool->available); 282 u64 handle = adapter->rx_scrq[pool->index]->handle; 283 struct device *dev = &adapter->vdev->dev; 284 struct ibmvnic_ind_xmit_queue *ind_bufp; 285 struct ibmvnic_sub_crq_queue *rx_scrq; 286 union sub_crq *sub_crq; 287 int buffers_added = 0; 288 unsigned long lpar_rc; 289 struct sk_buff *skb; 290 unsigned int offset; 291 dma_addr_t dma_addr; 292 unsigned char *dst; 293 int shift = 0; 294 int index; 295 int i; 296 297 if (!pool->active) 298 return; 299 300 rx_scrq = adapter->rx_scrq[pool->index]; 301 ind_bufp = &rx_scrq->ind_buf; 302 for (i = 0; i < count; ++i) { 303 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); 304 if (!skb) { 305 dev_err(dev, "Couldn't replenish rx buff\n"); 306 adapter->replenish_no_mem++; 307 break; 308 } 309 310 index = pool->free_map[pool->next_free]; 311 312 if (pool->rx_buff[index].skb) 313 dev_err(dev, "Inconsistent free_map!\n"); 314 315 /* Copy the skb to the long term mapped DMA buffer */ 316 offset = index * pool->buff_size; 317 dst = pool->long_term_buff.buff + offset; 318 memset(dst, 0, pool->buff_size); 319 dma_addr = pool->long_term_buff.addr + offset; 320 pool->rx_buff[index].data = dst; 321 322 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 323 pool->rx_buff[index].dma = dma_addr; 324 pool->rx_buff[index].skb = skb; 325 pool->rx_buff[index].pool_index = pool->index; 326 pool->rx_buff[index].size = pool->buff_size; 327 328 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; 329 memset(sub_crq, 0, sizeof(*sub_crq)); 330 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; 331 sub_crq->rx_add.correlator = 332 cpu_to_be64((u64)&pool->rx_buff[index]); 333 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); 334 sub_crq->rx_add.map_id = pool->long_term_buff.map_id; 335 336 /* The length field of the sCRQ is defined to be 24 bits so the 337 * buffer size needs to be left shifted by a byte before it is 338 * converted to big endian to prevent the last byte from being 339 * truncated. 340 */ 341 #ifdef __LITTLE_ENDIAN__ 342 shift = 8; 343 #endif 344 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); 345 pool->next_free = (pool->next_free + 1) % pool->size; 346 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || 347 i == count - 1) { 348 lpar_rc = 349 send_subcrq_indirect(adapter, handle, 350 (u64)ind_bufp->indir_dma, 351 (u64)ind_bufp->index); 352 if (lpar_rc != H_SUCCESS) 353 goto failure; 354 buffers_added += ind_bufp->index; 355 adapter->replenish_add_buff_success += ind_bufp->index; 356 ind_bufp->index = 0; 357 } 358 } 359 atomic_add(buffers_added, &pool->available); 360 return; 361 362 failure: 363 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) 364 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); 365 for (i = ind_bufp->index - 1; i >= 0; --i) { 366 struct ibmvnic_rx_buff *rx_buff; 367 368 pool->next_free = pool->next_free == 0 ? 369 pool->size - 1 : pool->next_free - 1; 370 sub_crq = &ind_bufp->indir_arr[i]; 371 rx_buff = (struct ibmvnic_rx_buff *) 372 be64_to_cpu(sub_crq->rx_add.correlator); 373 index = (int)(rx_buff - pool->rx_buff); 374 pool->free_map[pool->next_free] = index; 375 dev_kfree_skb_any(pool->rx_buff[index].skb); 376 pool->rx_buff[index].skb = NULL; 377 } 378 adapter->replenish_add_buff_failure += ind_bufp->index; 379 atomic_add(buffers_added, &pool->available); 380 ind_bufp->index = 0; 381 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 382 /* Disable buffer pool replenishment and report carrier off if 383 * queue is closed or pending failover. 384 * Firmware guarantees that a signal will be sent to the 385 * driver, triggering a reset. 386 */ 387 deactivate_rx_pools(adapter); 388 netif_carrier_off(adapter->netdev); 389 } 390 } 391 392 static void replenish_pools(struct ibmvnic_adapter *adapter) 393 { 394 int i; 395 396 adapter->replenish_task_cycles++; 397 for (i = 0; i < adapter->num_active_rx_pools; i++) { 398 if (adapter->rx_pool[i].active) 399 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 400 } 401 402 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); 403 } 404 405 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 406 { 407 kfree(adapter->tx_stats_buffers); 408 kfree(adapter->rx_stats_buffers); 409 adapter->tx_stats_buffers = NULL; 410 adapter->rx_stats_buffers = NULL; 411 } 412 413 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 414 { 415 adapter->tx_stats_buffers = 416 kcalloc(IBMVNIC_MAX_QUEUES, 417 sizeof(struct ibmvnic_tx_queue_stats), 418 GFP_KERNEL); 419 if (!adapter->tx_stats_buffers) 420 return -ENOMEM; 421 422 adapter->rx_stats_buffers = 423 kcalloc(IBMVNIC_MAX_QUEUES, 424 sizeof(struct ibmvnic_rx_queue_stats), 425 GFP_KERNEL); 426 if (!adapter->rx_stats_buffers) 427 return -ENOMEM; 428 429 return 0; 430 } 431 432 static void release_stats_token(struct ibmvnic_adapter *adapter) 433 { 434 struct device *dev = &adapter->vdev->dev; 435 436 if (!adapter->stats_token) 437 return; 438 439 dma_unmap_single(dev, adapter->stats_token, 440 sizeof(struct ibmvnic_statistics), 441 DMA_FROM_DEVICE); 442 adapter->stats_token = 0; 443 } 444 445 static int init_stats_token(struct ibmvnic_adapter *adapter) 446 { 447 struct device *dev = &adapter->vdev->dev; 448 dma_addr_t stok; 449 450 stok = dma_map_single(dev, &adapter->stats, 451 sizeof(struct ibmvnic_statistics), 452 DMA_FROM_DEVICE); 453 if (dma_mapping_error(dev, stok)) { 454 dev_err(dev, "Couldn't map stats buffer\n"); 455 return -1; 456 } 457 458 adapter->stats_token = stok; 459 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 460 return 0; 461 } 462 463 static int reset_rx_pools(struct ibmvnic_adapter *adapter) 464 { 465 struct ibmvnic_rx_pool *rx_pool; 466 u64 buff_size; 467 int rx_scrqs; 468 int i, j, rc; 469 470 if (!adapter->rx_pool) 471 return -1; 472 473 buff_size = adapter->cur_rx_buf_sz; 474 rx_scrqs = adapter->num_active_rx_pools; 475 for (i = 0; i < rx_scrqs; i++) { 476 rx_pool = &adapter->rx_pool[i]; 477 478 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 479 480 if (rx_pool->buff_size != buff_size) { 481 free_long_term_buff(adapter, &rx_pool->long_term_buff); 482 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 483 rc = alloc_long_term_buff(adapter, 484 &rx_pool->long_term_buff, 485 rx_pool->size * 486 rx_pool->buff_size); 487 } else { 488 rc = reset_long_term_buff(&rx_pool->long_term_buff); 489 } 490 491 if (rc) 492 return rc; 493 494 for (j = 0; j < rx_pool->size; j++) 495 rx_pool->free_map[j] = j; 496 497 memset(rx_pool->rx_buff, 0, 498 rx_pool->size * sizeof(struct ibmvnic_rx_buff)); 499 500 atomic_set(&rx_pool->available, 0); 501 rx_pool->next_alloc = 0; 502 rx_pool->next_free = 0; 503 rx_pool->active = 1; 504 } 505 506 return 0; 507 } 508 509 static void release_rx_pools(struct ibmvnic_adapter *adapter) 510 { 511 struct ibmvnic_rx_pool *rx_pool; 512 int i, j; 513 514 if (!adapter->rx_pool) 515 return; 516 517 for (i = 0; i < adapter->num_active_rx_pools; i++) { 518 rx_pool = &adapter->rx_pool[i]; 519 520 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 521 522 kfree(rx_pool->free_map); 523 free_long_term_buff(adapter, &rx_pool->long_term_buff); 524 525 if (!rx_pool->rx_buff) 526 continue; 527 528 for (j = 0; j < rx_pool->size; j++) { 529 if (rx_pool->rx_buff[j].skb) { 530 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); 531 rx_pool->rx_buff[j].skb = NULL; 532 } 533 } 534 535 kfree(rx_pool->rx_buff); 536 } 537 538 kfree(adapter->rx_pool); 539 adapter->rx_pool = NULL; 540 adapter->num_active_rx_pools = 0; 541 } 542 543 static int init_rx_pools(struct net_device *netdev) 544 { 545 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 546 struct device *dev = &adapter->vdev->dev; 547 struct ibmvnic_rx_pool *rx_pool; 548 int rxadd_subcrqs; 549 u64 buff_size; 550 int i, j; 551 552 rxadd_subcrqs = adapter->num_active_rx_scrqs; 553 buff_size = adapter->cur_rx_buf_sz; 554 555 adapter->rx_pool = kcalloc(rxadd_subcrqs, 556 sizeof(struct ibmvnic_rx_pool), 557 GFP_KERNEL); 558 if (!adapter->rx_pool) { 559 dev_err(dev, "Failed to allocate rx pools\n"); 560 return -1; 561 } 562 563 adapter->num_active_rx_pools = rxadd_subcrqs; 564 565 for (i = 0; i < rxadd_subcrqs; i++) { 566 rx_pool = &adapter->rx_pool[i]; 567 568 netdev_dbg(adapter->netdev, 569 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 570 i, adapter->req_rx_add_entries_per_subcrq, 571 buff_size); 572 573 rx_pool->size = adapter->req_rx_add_entries_per_subcrq; 574 rx_pool->index = i; 575 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 576 rx_pool->active = 1; 577 578 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 579 GFP_KERNEL); 580 if (!rx_pool->free_map) { 581 release_rx_pools(adapter); 582 return -1; 583 } 584 585 rx_pool->rx_buff = kcalloc(rx_pool->size, 586 sizeof(struct ibmvnic_rx_buff), 587 GFP_KERNEL); 588 if (!rx_pool->rx_buff) { 589 dev_err(dev, "Couldn't alloc rx buffers\n"); 590 release_rx_pools(adapter); 591 return -1; 592 } 593 594 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 595 rx_pool->size * rx_pool->buff_size)) { 596 release_rx_pools(adapter); 597 return -1; 598 } 599 600 for (j = 0; j < rx_pool->size; ++j) 601 rx_pool->free_map[j] = j; 602 603 atomic_set(&rx_pool->available, 0); 604 rx_pool->next_alloc = 0; 605 rx_pool->next_free = 0; 606 } 607 608 return 0; 609 } 610 611 static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool) 612 { 613 int rc, i; 614 615 rc = reset_long_term_buff(&tx_pool->long_term_buff); 616 if (rc) 617 return rc; 618 619 memset(tx_pool->tx_buff, 0, 620 tx_pool->num_buffers * 621 sizeof(struct ibmvnic_tx_buff)); 622 623 for (i = 0; i < tx_pool->num_buffers; i++) 624 tx_pool->free_map[i] = i; 625 626 tx_pool->consumer_index = 0; 627 tx_pool->producer_index = 0; 628 629 return 0; 630 } 631 632 static int reset_tx_pools(struct ibmvnic_adapter *adapter) 633 { 634 int tx_scrqs; 635 int i, rc; 636 637 if (!adapter->tx_pool) 638 return -1; 639 640 tx_scrqs = adapter->num_active_tx_pools; 641 for (i = 0; i < tx_scrqs; i++) { 642 rc = reset_one_tx_pool(&adapter->tso_pool[i]); 643 if (rc) 644 return rc; 645 rc = reset_one_tx_pool(&adapter->tx_pool[i]); 646 if (rc) 647 return rc; 648 } 649 650 return 0; 651 } 652 653 static void release_vpd_data(struct ibmvnic_adapter *adapter) 654 { 655 if (!adapter->vpd) 656 return; 657 658 kfree(adapter->vpd->buff); 659 kfree(adapter->vpd); 660 661 adapter->vpd = NULL; 662 } 663 664 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 665 struct ibmvnic_tx_pool *tx_pool) 666 { 667 kfree(tx_pool->tx_buff); 668 kfree(tx_pool->free_map); 669 free_long_term_buff(adapter, &tx_pool->long_term_buff); 670 } 671 672 static void release_tx_pools(struct ibmvnic_adapter *adapter) 673 { 674 int i; 675 676 if (!adapter->tx_pool) 677 return; 678 679 for (i = 0; i < adapter->num_active_tx_pools; i++) { 680 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 681 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 682 } 683 684 kfree(adapter->tx_pool); 685 adapter->tx_pool = NULL; 686 kfree(adapter->tso_pool); 687 adapter->tso_pool = NULL; 688 adapter->num_active_tx_pools = 0; 689 } 690 691 static int init_one_tx_pool(struct net_device *netdev, 692 struct ibmvnic_tx_pool *tx_pool, 693 int num_entries, int buf_size) 694 { 695 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 696 int i; 697 698 tx_pool->tx_buff = kcalloc(num_entries, 699 sizeof(struct ibmvnic_tx_buff), 700 GFP_KERNEL); 701 if (!tx_pool->tx_buff) 702 return -1; 703 704 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 705 num_entries * buf_size)) 706 return -1; 707 708 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); 709 if (!tx_pool->free_map) 710 return -1; 711 712 for (i = 0; i < num_entries; i++) 713 tx_pool->free_map[i] = i; 714 715 tx_pool->consumer_index = 0; 716 tx_pool->producer_index = 0; 717 tx_pool->num_buffers = num_entries; 718 tx_pool->buf_size = buf_size; 719 720 return 0; 721 } 722 723 static int init_tx_pools(struct net_device *netdev) 724 { 725 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 726 int tx_subcrqs; 727 u64 buff_size; 728 int i, rc; 729 730 tx_subcrqs = adapter->num_active_tx_scrqs; 731 adapter->tx_pool = kcalloc(tx_subcrqs, 732 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 733 if (!adapter->tx_pool) 734 return -1; 735 736 adapter->tso_pool = kcalloc(tx_subcrqs, 737 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 738 if (!adapter->tso_pool) 739 return -1; 740 741 adapter->num_active_tx_pools = tx_subcrqs; 742 743 for (i = 0; i < tx_subcrqs; i++) { 744 buff_size = adapter->req_mtu + VLAN_HLEN; 745 buff_size = ALIGN(buff_size, L1_CACHE_BYTES); 746 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 747 adapter->req_tx_entries_per_subcrq, 748 buff_size); 749 if (rc) { 750 release_tx_pools(adapter); 751 return rc; 752 } 753 754 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], 755 IBMVNIC_TSO_BUFS, 756 IBMVNIC_TSO_BUF_SZ); 757 if (rc) { 758 release_tx_pools(adapter); 759 return rc; 760 } 761 } 762 763 return 0; 764 } 765 766 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 767 { 768 int i; 769 770 if (adapter->napi_enabled) 771 return; 772 773 for (i = 0; i < adapter->req_rx_queues; i++) 774 napi_enable(&adapter->napi[i]); 775 776 adapter->napi_enabled = true; 777 } 778 779 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 780 { 781 int i; 782 783 if (!adapter->napi_enabled) 784 return; 785 786 for (i = 0; i < adapter->req_rx_queues; i++) { 787 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 788 napi_disable(&adapter->napi[i]); 789 } 790 791 adapter->napi_enabled = false; 792 } 793 794 static int init_napi(struct ibmvnic_adapter *adapter) 795 { 796 int i; 797 798 adapter->napi = kcalloc(adapter->req_rx_queues, 799 sizeof(struct napi_struct), GFP_KERNEL); 800 if (!adapter->napi) 801 return -ENOMEM; 802 803 for (i = 0; i < adapter->req_rx_queues; i++) { 804 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 805 netif_napi_add(adapter->netdev, &adapter->napi[i], 806 ibmvnic_poll, NAPI_POLL_WEIGHT); 807 } 808 809 adapter->num_active_rx_napi = adapter->req_rx_queues; 810 return 0; 811 } 812 813 static void release_napi(struct ibmvnic_adapter *adapter) 814 { 815 int i; 816 817 if (!adapter->napi) 818 return; 819 820 for (i = 0; i < adapter->num_active_rx_napi; i++) { 821 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); 822 netif_napi_del(&adapter->napi[i]); 823 } 824 825 kfree(adapter->napi); 826 adapter->napi = NULL; 827 adapter->num_active_rx_napi = 0; 828 adapter->napi_enabled = false; 829 } 830 831 static int ibmvnic_login(struct net_device *netdev) 832 { 833 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 834 unsigned long timeout = msecs_to_jiffies(20000); 835 int retry_count = 0; 836 int retries = 10; 837 bool retry; 838 int rc; 839 840 do { 841 retry = false; 842 if (retry_count > retries) { 843 netdev_warn(netdev, "Login attempts exceeded\n"); 844 return -1; 845 } 846 847 adapter->init_done_rc = 0; 848 reinit_completion(&adapter->init_done); 849 rc = send_login(adapter); 850 if (rc) 851 return rc; 852 853 if (!wait_for_completion_timeout(&adapter->init_done, 854 timeout)) { 855 netdev_warn(netdev, "Login timed out, retrying...\n"); 856 retry = true; 857 adapter->init_done_rc = 0; 858 retry_count++; 859 continue; 860 } 861 862 if (adapter->init_done_rc == ABORTED) { 863 netdev_warn(netdev, "Login aborted, retrying...\n"); 864 retry = true; 865 adapter->init_done_rc = 0; 866 retry_count++; 867 /* FW or device may be busy, so 868 * wait a bit before retrying login 869 */ 870 msleep(500); 871 } else if (adapter->init_done_rc == PARTIALSUCCESS) { 872 retry_count++; 873 release_sub_crqs(adapter, 1); 874 875 retry = true; 876 netdev_dbg(netdev, 877 "Received partial success, retrying...\n"); 878 adapter->init_done_rc = 0; 879 reinit_completion(&adapter->init_done); 880 send_query_cap(adapter); 881 if (!wait_for_completion_timeout(&adapter->init_done, 882 timeout)) { 883 netdev_warn(netdev, 884 "Capabilities query timed out\n"); 885 return -1; 886 } 887 888 rc = init_sub_crqs(adapter); 889 if (rc) { 890 netdev_warn(netdev, 891 "SCRQ initialization failed\n"); 892 return -1; 893 } 894 895 rc = init_sub_crq_irqs(adapter); 896 if (rc) { 897 netdev_warn(netdev, 898 "SCRQ irq initialization failed\n"); 899 return -1; 900 } 901 } else if (adapter->init_done_rc) { 902 netdev_warn(netdev, "Adapter login failed\n"); 903 return -1; 904 } 905 } while (retry); 906 907 __ibmvnic_set_mac(netdev, adapter->mac_addr); 908 909 netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state); 910 return 0; 911 } 912 913 static void release_login_buffer(struct ibmvnic_adapter *adapter) 914 { 915 kfree(adapter->login_buf); 916 adapter->login_buf = NULL; 917 } 918 919 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 920 { 921 kfree(adapter->login_rsp_buf); 922 adapter->login_rsp_buf = NULL; 923 } 924 925 static void release_resources(struct ibmvnic_adapter *adapter) 926 { 927 release_vpd_data(adapter); 928 929 release_tx_pools(adapter); 930 release_rx_pools(adapter); 931 932 release_napi(adapter); 933 release_login_buffer(adapter); 934 release_login_rsp_buffer(adapter); 935 } 936 937 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 938 { 939 struct net_device *netdev = adapter->netdev; 940 unsigned long timeout = msecs_to_jiffies(20000); 941 union ibmvnic_crq crq; 942 bool resend; 943 int rc; 944 945 netdev_dbg(netdev, "setting link state %d\n", link_state); 946 947 memset(&crq, 0, sizeof(crq)); 948 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 949 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 950 crq.logical_link_state.link_state = link_state; 951 952 do { 953 resend = false; 954 955 reinit_completion(&adapter->init_done); 956 rc = ibmvnic_send_crq(adapter, &crq); 957 if (rc) { 958 netdev_err(netdev, "Failed to set link state\n"); 959 return rc; 960 } 961 962 if (!wait_for_completion_timeout(&adapter->init_done, 963 timeout)) { 964 netdev_err(netdev, "timeout setting link state\n"); 965 return -1; 966 } 967 968 if (adapter->init_done_rc == PARTIALSUCCESS) { 969 /* Partuial success, delay and re-send */ 970 mdelay(1000); 971 resend = true; 972 } else if (adapter->init_done_rc) { 973 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 974 adapter->init_done_rc); 975 return adapter->init_done_rc; 976 } 977 } while (resend); 978 979 return 0; 980 } 981 982 static int set_real_num_queues(struct net_device *netdev) 983 { 984 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 985 int rc; 986 987 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 988 adapter->req_tx_queues, adapter->req_rx_queues); 989 990 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 991 if (rc) { 992 netdev_err(netdev, "failed to set the number of tx queues\n"); 993 return rc; 994 } 995 996 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 997 if (rc) 998 netdev_err(netdev, "failed to set the number of rx queues\n"); 999 1000 return rc; 1001 } 1002 1003 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 1004 { 1005 struct device *dev = &adapter->vdev->dev; 1006 union ibmvnic_crq crq; 1007 int len = 0; 1008 int rc; 1009 1010 if (adapter->vpd->buff) 1011 len = adapter->vpd->len; 1012 1013 mutex_lock(&adapter->fw_lock); 1014 adapter->fw_done_rc = 0; 1015 reinit_completion(&adapter->fw_done); 1016 1017 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 1018 crq.get_vpd_size.cmd = GET_VPD_SIZE; 1019 rc = ibmvnic_send_crq(adapter, &crq); 1020 if (rc) { 1021 mutex_unlock(&adapter->fw_lock); 1022 return rc; 1023 } 1024 1025 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1026 if (rc) { 1027 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); 1028 mutex_unlock(&adapter->fw_lock); 1029 return rc; 1030 } 1031 mutex_unlock(&adapter->fw_lock); 1032 1033 if (!adapter->vpd->len) 1034 return -ENODATA; 1035 1036 if (!adapter->vpd->buff) 1037 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 1038 else if (adapter->vpd->len != len) 1039 adapter->vpd->buff = 1040 krealloc(adapter->vpd->buff, 1041 adapter->vpd->len, GFP_KERNEL); 1042 1043 if (!adapter->vpd->buff) { 1044 dev_err(dev, "Could allocate VPD buffer\n"); 1045 return -ENOMEM; 1046 } 1047 1048 adapter->vpd->dma_addr = 1049 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 1050 DMA_FROM_DEVICE); 1051 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 1052 dev_err(dev, "Could not map VPD buffer\n"); 1053 kfree(adapter->vpd->buff); 1054 adapter->vpd->buff = NULL; 1055 return -ENOMEM; 1056 } 1057 1058 mutex_lock(&adapter->fw_lock); 1059 adapter->fw_done_rc = 0; 1060 reinit_completion(&adapter->fw_done); 1061 1062 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 1063 crq.get_vpd.cmd = GET_VPD; 1064 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 1065 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1066 rc = ibmvnic_send_crq(adapter, &crq); 1067 if (rc) { 1068 kfree(adapter->vpd->buff); 1069 adapter->vpd->buff = NULL; 1070 mutex_unlock(&adapter->fw_lock); 1071 return rc; 1072 } 1073 1074 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1075 if (rc) { 1076 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); 1077 kfree(adapter->vpd->buff); 1078 adapter->vpd->buff = NULL; 1079 mutex_unlock(&adapter->fw_lock); 1080 return rc; 1081 } 1082 1083 mutex_unlock(&adapter->fw_lock); 1084 return 0; 1085 } 1086 1087 static int init_resources(struct ibmvnic_adapter *adapter) 1088 { 1089 struct net_device *netdev = adapter->netdev; 1090 int rc; 1091 1092 rc = set_real_num_queues(netdev); 1093 if (rc) 1094 return rc; 1095 1096 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1097 if (!adapter->vpd) 1098 return -ENOMEM; 1099 1100 /* Vital Product Data (VPD) */ 1101 rc = ibmvnic_get_vpd(adapter); 1102 if (rc) { 1103 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1104 return rc; 1105 } 1106 1107 adapter->map_id = 1; 1108 1109 rc = init_napi(adapter); 1110 if (rc) 1111 return rc; 1112 1113 send_query_map(adapter); 1114 1115 rc = init_rx_pools(netdev); 1116 if (rc) 1117 return rc; 1118 1119 rc = init_tx_pools(netdev); 1120 return rc; 1121 } 1122 1123 static int __ibmvnic_open(struct net_device *netdev) 1124 { 1125 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1126 enum vnic_state prev_state = adapter->state; 1127 int i, rc; 1128 1129 adapter->state = VNIC_OPENING; 1130 replenish_pools(adapter); 1131 ibmvnic_napi_enable(adapter); 1132 1133 /* We're ready to receive frames, enable the sub-crq interrupts and 1134 * set the logical link state to up 1135 */ 1136 for (i = 0; i < adapter->req_rx_queues; i++) { 1137 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1138 if (prev_state == VNIC_CLOSED) 1139 enable_irq(adapter->rx_scrq[i]->irq); 1140 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1141 } 1142 1143 for (i = 0; i < adapter->req_tx_queues; i++) { 1144 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1145 if (prev_state == VNIC_CLOSED) 1146 enable_irq(adapter->tx_scrq[i]->irq); 1147 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1148 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); 1149 } 1150 1151 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1152 if (rc) { 1153 for (i = 0; i < adapter->req_rx_queues; i++) 1154 napi_disable(&adapter->napi[i]); 1155 release_resources(adapter); 1156 return rc; 1157 } 1158 1159 netif_tx_start_all_queues(netdev); 1160 1161 if (prev_state == VNIC_CLOSED) { 1162 for (i = 0; i < adapter->req_rx_queues; i++) 1163 napi_schedule(&adapter->napi[i]); 1164 } 1165 1166 adapter->state = VNIC_OPEN; 1167 return rc; 1168 } 1169 1170 static int ibmvnic_open(struct net_device *netdev) 1171 { 1172 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1173 int rc; 1174 1175 ASSERT_RTNL(); 1176 1177 /* If device failover is pending or we are about to reset, just set 1178 * device state and return. Device operation will be handled by reset 1179 * routine. 1180 * 1181 * It should be safe to overwrite the adapter->state here. Since 1182 * we hold the rtnl, either the reset has not actually started or 1183 * the rtnl got dropped during the set_link_state() in do_reset(). 1184 * In the former case, no one else is changing the state (again we 1185 * have the rtnl) and in the latter case, do_reset() will detect and 1186 * honor our setting below. 1187 */ 1188 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { 1189 netdev_dbg(netdev, "[S:%d FOP:%d] Resetting, deferring open\n", 1190 adapter->state, adapter->failover_pending); 1191 adapter->state = VNIC_OPEN; 1192 rc = 0; 1193 goto out; 1194 } 1195 1196 if (adapter->state != VNIC_CLOSED) { 1197 rc = ibmvnic_login(netdev); 1198 if (rc) 1199 goto out; 1200 1201 rc = init_resources(adapter); 1202 if (rc) { 1203 netdev_err(netdev, "failed to initialize resources\n"); 1204 release_resources(adapter); 1205 goto out; 1206 } 1207 } 1208 1209 rc = __ibmvnic_open(netdev); 1210 1211 out: 1212 /* If open failed and there is a pending failover or in-progress reset, 1213 * set device state and return. Device operation will be handled by 1214 * reset routine. See also comments above regarding rtnl. 1215 */ 1216 if (rc && 1217 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { 1218 adapter->state = VNIC_OPEN; 1219 rc = 0; 1220 } 1221 return rc; 1222 } 1223 1224 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1225 { 1226 struct ibmvnic_rx_pool *rx_pool; 1227 struct ibmvnic_rx_buff *rx_buff; 1228 u64 rx_entries; 1229 int rx_scrqs; 1230 int i, j; 1231 1232 if (!adapter->rx_pool) 1233 return; 1234 1235 rx_scrqs = adapter->num_active_rx_pools; 1236 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1237 1238 /* Free any remaining skbs in the rx buffer pools */ 1239 for (i = 0; i < rx_scrqs; i++) { 1240 rx_pool = &adapter->rx_pool[i]; 1241 if (!rx_pool || !rx_pool->rx_buff) 1242 continue; 1243 1244 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1245 for (j = 0; j < rx_entries; j++) { 1246 rx_buff = &rx_pool->rx_buff[j]; 1247 if (rx_buff && rx_buff->skb) { 1248 dev_kfree_skb_any(rx_buff->skb); 1249 rx_buff->skb = NULL; 1250 } 1251 } 1252 } 1253 } 1254 1255 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1256 struct ibmvnic_tx_pool *tx_pool) 1257 { 1258 struct ibmvnic_tx_buff *tx_buff; 1259 u64 tx_entries; 1260 int i; 1261 1262 if (!tx_pool || !tx_pool->tx_buff) 1263 return; 1264 1265 tx_entries = tx_pool->num_buffers; 1266 1267 for (i = 0; i < tx_entries; i++) { 1268 tx_buff = &tx_pool->tx_buff[i]; 1269 if (tx_buff && tx_buff->skb) { 1270 dev_kfree_skb_any(tx_buff->skb); 1271 tx_buff->skb = NULL; 1272 } 1273 } 1274 } 1275 1276 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1277 { 1278 int tx_scrqs; 1279 int i; 1280 1281 if (!adapter->tx_pool || !adapter->tso_pool) 1282 return; 1283 1284 tx_scrqs = adapter->num_active_tx_pools; 1285 1286 /* Free any remaining skbs in the tx buffer pools */ 1287 for (i = 0; i < tx_scrqs; i++) { 1288 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1289 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1290 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1291 } 1292 } 1293 1294 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1295 { 1296 struct net_device *netdev = adapter->netdev; 1297 int i; 1298 1299 if (adapter->tx_scrq) { 1300 for (i = 0; i < adapter->req_tx_queues; i++) 1301 if (adapter->tx_scrq[i]->irq) { 1302 netdev_dbg(netdev, 1303 "Disabling tx_scrq[%d] irq\n", i); 1304 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1305 disable_irq(adapter->tx_scrq[i]->irq); 1306 } 1307 } 1308 1309 if (adapter->rx_scrq) { 1310 for (i = 0; i < adapter->req_rx_queues; i++) { 1311 if (adapter->rx_scrq[i]->irq) { 1312 netdev_dbg(netdev, 1313 "Disabling rx_scrq[%d] irq\n", i); 1314 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1315 disable_irq(adapter->rx_scrq[i]->irq); 1316 } 1317 } 1318 } 1319 } 1320 1321 static void ibmvnic_cleanup(struct net_device *netdev) 1322 { 1323 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1324 1325 /* ensure that transmissions are stopped if called by do_reset */ 1326 if (test_bit(0, &adapter->resetting)) 1327 netif_tx_disable(netdev); 1328 else 1329 netif_tx_stop_all_queues(netdev); 1330 1331 ibmvnic_napi_disable(adapter); 1332 ibmvnic_disable_irqs(adapter); 1333 1334 clean_rx_pools(adapter); 1335 clean_tx_pools(adapter); 1336 } 1337 1338 static int __ibmvnic_close(struct net_device *netdev) 1339 { 1340 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1341 int rc = 0; 1342 1343 adapter->state = VNIC_CLOSING; 1344 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1345 adapter->state = VNIC_CLOSED; 1346 return rc; 1347 } 1348 1349 static int ibmvnic_close(struct net_device *netdev) 1350 { 1351 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1352 int rc; 1353 1354 netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n", 1355 adapter->state, adapter->failover_pending, 1356 adapter->force_reset_recovery); 1357 1358 /* If device failover is pending, just set device state and return. 1359 * Device operation will be handled by reset routine. 1360 */ 1361 if (adapter->failover_pending) { 1362 adapter->state = VNIC_CLOSED; 1363 return 0; 1364 } 1365 1366 rc = __ibmvnic_close(netdev); 1367 ibmvnic_cleanup(netdev); 1368 1369 return rc; 1370 } 1371 1372 /** 1373 * build_hdr_data - creates L2/L3/L4 header data buffer 1374 * @hdr_field: bitfield determining needed headers 1375 * @skb: socket buffer 1376 * @hdr_len: array of header lengths 1377 * @hdr_data: buffer to write the header to 1378 * 1379 * Reads hdr_field to determine which headers are needed by firmware. 1380 * Builds a buffer containing these headers. Saves individual header 1381 * lengths and total buffer length to be used to build descriptors. 1382 */ 1383 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 1384 int *hdr_len, u8 *hdr_data) 1385 { 1386 int len = 0; 1387 u8 *hdr; 1388 1389 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 1390 hdr_len[0] = sizeof(struct vlan_ethhdr); 1391 else 1392 hdr_len[0] = sizeof(struct ethhdr); 1393 1394 if (skb->protocol == htons(ETH_P_IP)) { 1395 hdr_len[1] = ip_hdr(skb)->ihl * 4; 1396 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1397 hdr_len[2] = tcp_hdrlen(skb); 1398 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1399 hdr_len[2] = sizeof(struct udphdr); 1400 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1401 hdr_len[1] = sizeof(struct ipv6hdr); 1402 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1403 hdr_len[2] = tcp_hdrlen(skb); 1404 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 1405 hdr_len[2] = sizeof(struct udphdr); 1406 } else if (skb->protocol == htons(ETH_P_ARP)) { 1407 hdr_len[1] = arp_hdr_len(skb->dev); 1408 hdr_len[2] = 0; 1409 } 1410 1411 memset(hdr_data, 0, 120); 1412 if ((hdr_field >> 6) & 1) { 1413 hdr = skb_mac_header(skb); 1414 memcpy(hdr_data, hdr, hdr_len[0]); 1415 len += hdr_len[0]; 1416 } 1417 1418 if ((hdr_field >> 5) & 1) { 1419 hdr = skb_network_header(skb); 1420 memcpy(hdr_data + len, hdr, hdr_len[1]); 1421 len += hdr_len[1]; 1422 } 1423 1424 if ((hdr_field >> 4) & 1) { 1425 hdr = skb_transport_header(skb); 1426 memcpy(hdr_data + len, hdr, hdr_len[2]); 1427 len += hdr_len[2]; 1428 } 1429 return len; 1430 } 1431 1432 /** 1433 * create_hdr_descs - create header and header extension descriptors 1434 * @hdr_field: bitfield determining needed headers 1435 * @hdr_data: buffer containing header data 1436 * @len: length of data buffer 1437 * @hdr_len: array of individual header lengths 1438 * @scrq_arr: descriptor array 1439 * 1440 * Creates header and, if needed, header extension descriptors and 1441 * places them in a descriptor array, scrq_arr 1442 */ 1443 1444 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1445 union sub_crq *scrq_arr) 1446 { 1447 union sub_crq hdr_desc; 1448 int tmp_len = len; 1449 int num_descs = 0; 1450 u8 *data, *cur; 1451 int tmp; 1452 1453 while (tmp_len > 0) { 1454 cur = hdr_data + len - tmp_len; 1455 1456 memset(&hdr_desc, 0, sizeof(hdr_desc)); 1457 if (cur != hdr_data) { 1458 data = hdr_desc.hdr_ext.data; 1459 tmp = tmp_len > 29 ? 29 : tmp_len; 1460 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 1461 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 1462 hdr_desc.hdr_ext.len = tmp; 1463 } else { 1464 data = hdr_desc.hdr.data; 1465 tmp = tmp_len > 24 ? 24 : tmp_len; 1466 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 1467 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 1468 hdr_desc.hdr.len = tmp; 1469 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 1470 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 1471 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 1472 hdr_desc.hdr.flag = hdr_field << 1; 1473 } 1474 memcpy(data, cur, tmp); 1475 tmp_len -= tmp; 1476 *scrq_arr = hdr_desc; 1477 scrq_arr++; 1478 num_descs++; 1479 } 1480 1481 return num_descs; 1482 } 1483 1484 /** 1485 * build_hdr_descs_arr - build a header descriptor array 1486 * @txbuff: tx buffer 1487 * @num_entries: number of descriptors to be sent 1488 * @hdr_field: bit field determining which headers will be sent 1489 * 1490 * This function will build a TX descriptor array with applicable 1491 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 1492 */ 1493 1494 static void build_hdr_descs_arr(struct sk_buff *skb, 1495 union sub_crq *indir_arr, 1496 int *num_entries, u8 hdr_field) 1497 { 1498 int hdr_len[3] = {0, 0, 0}; 1499 u8 hdr_data[140] = {0}; 1500 int tot_len; 1501 1502 tot_len = build_hdr_data(hdr_field, skb, hdr_len, 1503 hdr_data); 1504 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 1505 indir_arr + 1); 1506 } 1507 1508 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 1509 struct net_device *netdev) 1510 { 1511 /* For some backing devices, mishandling of small packets 1512 * can result in a loss of connection or TX stall. Device 1513 * architects recommend that no packet should be smaller 1514 * than the minimum MTU value provided to the driver, so 1515 * pad any packets to that length 1516 */ 1517 if (skb->len < netdev->min_mtu) 1518 return skb_put_padto(skb, netdev->min_mtu); 1519 1520 return 0; 1521 } 1522 1523 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 1524 struct ibmvnic_sub_crq_queue *tx_scrq) 1525 { 1526 struct ibmvnic_ind_xmit_queue *ind_bufp; 1527 struct ibmvnic_tx_buff *tx_buff; 1528 struct ibmvnic_tx_pool *tx_pool; 1529 union sub_crq tx_scrq_entry; 1530 int queue_num; 1531 int entries; 1532 int index; 1533 int i; 1534 1535 ind_bufp = &tx_scrq->ind_buf; 1536 entries = (u64)ind_bufp->index; 1537 queue_num = tx_scrq->pool_index; 1538 1539 for (i = entries - 1; i >= 0; --i) { 1540 tx_scrq_entry = ind_bufp->indir_arr[i]; 1541 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) 1542 continue; 1543 index = be32_to_cpu(tx_scrq_entry.v1.correlator); 1544 if (index & IBMVNIC_TSO_POOL_MASK) { 1545 tx_pool = &adapter->tso_pool[queue_num]; 1546 index &= ~IBMVNIC_TSO_POOL_MASK; 1547 } else { 1548 tx_pool = &adapter->tx_pool[queue_num]; 1549 } 1550 tx_pool->free_map[tx_pool->consumer_index] = index; 1551 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 1552 tx_pool->num_buffers - 1 : 1553 tx_pool->consumer_index - 1; 1554 tx_buff = &tx_pool->tx_buff[index]; 1555 adapter->netdev->stats.tx_packets--; 1556 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; 1557 adapter->tx_stats_buffers[queue_num].packets--; 1558 adapter->tx_stats_buffers[queue_num].bytes -= 1559 tx_buff->skb->len; 1560 dev_kfree_skb_any(tx_buff->skb); 1561 tx_buff->skb = NULL; 1562 adapter->netdev->stats.tx_dropped++; 1563 } 1564 ind_bufp->index = 0; 1565 if (atomic_sub_return(entries, &tx_scrq->used) <= 1566 (adapter->req_tx_entries_per_subcrq / 2) && 1567 __netif_subqueue_stopped(adapter->netdev, queue_num)) { 1568 netif_wake_subqueue(adapter->netdev, queue_num); 1569 netdev_dbg(adapter->netdev, "Started queue %d\n", 1570 queue_num); 1571 } 1572 } 1573 1574 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, 1575 struct ibmvnic_sub_crq_queue *tx_scrq) 1576 { 1577 struct ibmvnic_ind_xmit_queue *ind_bufp; 1578 u64 dma_addr; 1579 u64 entries; 1580 u64 handle; 1581 int rc; 1582 1583 ind_bufp = &tx_scrq->ind_buf; 1584 dma_addr = (u64)ind_bufp->indir_dma; 1585 entries = (u64)ind_bufp->index; 1586 handle = tx_scrq->handle; 1587 1588 if (!entries) 1589 return 0; 1590 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); 1591 if (rc) 1592 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); 1593 else 1594 ind_bufp->index = 0; 1595 return 0; 1596 } 1597 1598 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 1599 { 1600 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1601 int queue_num = skb_get_queue_mapping(skb); 1602 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 1603 struct device *dev = &adapter->vdev->dev; 1604 struct ibmvnic_ind_xmit_queue *ind_bufp; 1605 struct ibmvnic_tx_buff *tx_buff = NULL; 1606 struct ibmvnic_sub_crq_queue *tx_scrq; 1607 struct ibmvnic_tx_pool *tx_pool; 1608 unsigned int tx_send_failed = 0; 1609 netdev_tx_t ret = NETDEV_TX_OK; 1610 unsigned int tx_map_failed = 0; 1611 union sub_crq indir_arr[16]; 1612 unsigned int tx_dropped = 0; 1613 unsigned int tx_packets = 0; 1614 unsigned int tx_bytes = 0; 1615 dma_addr_t data_dma_addr; 1616 struct netdev_queue *txq; 1617 unsigned long lpar_rc; 1618 union sub_crq tx_crq; 1619 unsigned int offset; 1620 int num_entries = 1; 1621 unsigned char *dst; 1622 int index = 0; 1623 u8 proto = 0; 1624 1625 tx_scrq = adapter->tx_scrq[queue_num]; 1626 txq = netdev_get_tx_queue(netdev, queue_num); 1627 ind_bufp = &tx_scrq->ind_buf; 1628 1629 if (test_bit(0, &adapter->resetting)) { 1630 if (!netif_subqueue_stopped(netdev, skb)) 1631 netif_stop_subqueue(netdev, queue_num); 1632 dev_kfree_skb_any(skb); 1633 1634 tx_send_failed++; 1635 tx_dropped++; 1636 ret = NETDEV_TX_OK; 1637 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1638 goto out; 1639 } 1640 1641 if (ibmvnic_xmit_workarounds(skb, netdev)) { 1642 tx_dropped++; 1643 tx_send_failed++; 1644 ret = NETDEV_TX_OK; 1645 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1646 goto out; 1647 } 1648 if (skb_is_gso(skb)) 1649 tx_pool = &adapter->tso_pool[queue_num]; 1650 else 1651 tx_pool = &adapter->tx_pool[queue_num]; 1652 1653 index = tx_pool->free_map[tx_pool->consumer_index]; 1654 1655 if (index == IBMVNIC_INVALID_MAP) { 1656 dev_kfree_skb_any(skb); 1657 tx_send_failed++; 1658 tx_dropped++; 1659 ret = NETDEV_TX_OK; 1660 ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1661 goto out; 1662 } 1663 1664 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 1665 1666 offset = index * tx_pool->buf_size; 1667 dst = tx_pool->long_term_buff.buff + offset; 1668 memset(dst, 0, tx_pool->buf_size); 1669 data_dma_addr = tx_pool->long_term_buff.addr + offset; 1670 1671 if (skb_shinfo(skb)->nr_frags) { 1672 int cur, i; 1673 1674 /* Copy the head */ 1675 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 1676 cur = skb_headlen(skb); 1677 1678 /* Copy the frags */ 1679 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1680 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1681 1682 memcpy(dst + cur, 1683 page_address(skb_frag_page(frag)) + 1684 skb_frag_off(frag), skb_frag_size(frag)); 1685 cur += skb_frag_size(frag); 1686 } 1687 } else { 1688 skb_copy_from_linear_data(skb, dst, skb->len); 1689 } 1690 1691 /* post changes to long_term_buff *dst before VIOS accessing it */ 1692 dma_wmb(); 1693 1694 tx_pool->consumer_index = 1695 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 1696 1697 tx_buff = &tx_pool->tx_buff[index]; 1698 tx_buff->skb = skb; 1699 tx_buff->index = index; 1700 tx_buff->pool_index = queue_num; 1701 1702 memset(&tx_crq, 0, sizeof(tx_crq)); 1703 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 1704 tx_crq.v1.type = IBMVNIC_TX_DESC; 1705 tx_crq.v1.n_crq_elem = 1; 1706 tx_crq.v1.n_sge = 1; 1707 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 1708 1709 if (skb_is_gso(skb)) 1710 tx_crq.v1.correlator = 1711 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); 1712 else 1713 tx_crq.v1.correlator = cpu_to_be32(index); 1714 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); 1715 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 1716 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 1717 1718 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { 1719 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 1720 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 1721 } 1722 1723 if (skb->protocol == htons(ETH_P_IP)) { 1724 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 1725 proto = ip_hdr(skb)->protocol; 1726 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1727 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 1728 proto = ipv6_hdr(skb)->nexthdr; 1729 } 1730 1731 if (proto == IPPROTO_TCP) 1732 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 1733 else if (proto == IPPROTO_UDP) 1734 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 1735 1736 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1737 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 1738 hdrs += 2; 1739 } 1740 if (skb_is_gso(skb)) { 1741 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 1742 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 1743 hdrs += 2; 1744 } 1745 1746 if ((*hdrs >> 7) & 1) 1747 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); 1748 1749 tx_crq.v1.n_crq_elem = num_entries; 1750 tx_buff->num_entries = num_entries; 1751 /* flush buffer if current entry can not fit */ 1752 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { 1753 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1754 if (lpar_rc != H_SUCCESS) 1755 goto tx_flush_err; 1756 } 1757 1758 indir_arr[0] = tx_crq; 1759 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], 1760 num_entries * sizeof(struct ibmvnic_generic_scrq)); 1761 ind_bufp->index += num_entries; 1762 if (__netdev_tx_sent_queue(txq, skb->len, 1763 netdev_xmit_more() && 1764 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { 1765 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1766 if (lpar_rc != H_SUCCESS) 1767 goto tx_err; 1768 } 1769 1770 if (atomic_add_return(num_entries, &tx_scrq->used) 1771 >= adapter->req_tx_entries_per_subcrq) { 1772 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 1773 netif_stop_subqueue(netdev, queue_num); 1774 } 1775 1776 tx_packets++; 1777 tx_bytes += skb->len; 1778 txq->trans_start = jiffies; 1779 ret = NETDEV_TX_OK; 1780 goto out; 1781 1782 tx_flush_err: 1783 dev_kfree_skb_any(skb); 1784 tx_buff->skb = NULL; 1785 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? 1786 tx_pool->num_buffers - 1 : 1787 tx_pool->consumer_index - 1; 1788 tx_dropped++; 1789 tx_err: 1790 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) 1791 dev_err_ratelimited(dev, "tx: send failed\n"); 1792 1793 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 1794 /* Disable TX and report carrier off if queue is closed 1795 * or pending failover. 1796 * Firmware guarantees that a signal will be sent to the 1797 * driver, triggering a reset or some other action. 1798 */ 1799 netif_tx_stop_all_queues(netdev); 1800 netif_carrier_off(netdev); 1801 } 1802 out: 1803 netdev->stats.tx_dropped += tx_dropped; 1804 netdev->stats.tx_bytes += tx_bytes; 1805 netdev->stats.tx_packets += tx_packets; 1806 adapter->tx_send_failed += tx_send_failed; 1807 adapter->tx_map_failed += tx_map_failed; 1808 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 1809 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 1810 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 1811 1812 return ret; 1813 } 1814 1815 static void ibmvnic_set_multi(struct net_device *netdev) 1816 { 1817 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1818 struct netdev_hw_addr *ha; 1819 union ibmvnic_crq crq; 1820 1821 memset(&crq, 0, sizeof(crq)); 1822 crq.request_capability.first = IBMVNIC_CRQ_CMD; 1823 crq.request_capability.cmd = REQUEST_CAPABILITY; 1824 1825 if (netdev->flags & IFF_PROMISC) { 1826 if (!adapter->promisc_supported) 1827 return; 1828 } else { 1829 if (netdev->flags & IFF_ALLMULTI) { 1830 /* Accept all multicast */ 1831 memset(&crq, 0, sizeof(crq)); 1832 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1833 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1834 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 1835 ibmvnic_send_crq(adapter, &crq); 1836 } else if (netdev_mc_empty(netdev)) { 1837 /* Reject all multicast */ 1838 memset(&crq, 0, sizeof(crq)); 1839 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1840 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1841 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 1842 ibmvnic_send_crq(adapter, &crq); 1843 } else { 1844 /* Accept one or more multicast(s) */ 1845 netdev_for_each_mc_addr(ha, netdev) { 1846 memset(&crq, 0, sizeof(crq)); 1847 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1848 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1849 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 1850 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 1851 ha->addr); 1852 ibmvnic_send_crq(adapter, &crq); 1853 } 1854 } 1855 } 1856 } 1857 1858 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) 1859 { 1860 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1861 union ibmvnic_crq crq; 1862 int rc; 1863 1864 if (!is_valid_ether_addr(dev_addr)) { 1865 rc = -EADDRNOTAVAIL; 1866 goto err; 1867 } 1868 1869 memset(&crq, 0, sizeof(crq)); 1870 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 1871 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 1872 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); 1873 1874 mutex_lock(&adapter->fw_lock); 1875 adapter->fw_done_rc = 0; 1876 reinit_completion(&adapter->fw_done); 1877 1878 rc = ibmvnic_send_crq(adapter, &crq); 1879 if (rc) { 1880 rc = -EIO; 1881 mutex_unlock(&adapter->fw_lock); 1882 goto err; 1883 } 1884 1885 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 1886 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 1887 if (rc || adapter->fw_done_rc) { 1888 rc = -EIO; 1889 mutex_unlock(&adapter->fw_lock); 1890 goto err; 1891 } 1892 mutex_unlock(&adapter->fw_lock); 1893 return 0; 1894 err: 1895 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 1896 return rc; 1897 } 1898 1899 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 1900 { 1901 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1902 struct sockaddr *addr = p; 1903 int rc; 1904 1905 rc = 0; 1906 if (!is_valid_ether_addr(addr->sa_data)) 1907 return -EADDRNOTAVAIL; 1908 1909 if (adapter->state != VNIC_PROBED) { 1910 ether_addr_copy(adapter->mac_addr, addr->sa_data); 1911 rc = __ibmvnic_set_mac(netdev, addr->sa_data); 1912 } 1913 1914 return rc; 1915 } 1916 1917 /* 1918 * do_reset returns zero if we are able to keep processing reset events, or 1919 * non-zero if we hit a fatal error and must halt. 1920 */ 1921 static int do_reset(struct ibmvnic_adapter *adapter, 1922 struct ibmvnic_rwi *rwi, u32 reset_state) 1923 { 1924 u64 old_num_rx_queues, old_num_tx_queues; 1925 u64 old_num_rx_slots, old_num_tx_slots; 1926 struct net_device *netdev = adapter->netdev; 1927 int i, rc; 1928 1929 netdev_dbg(adapter->netdev, 1930 "[S:%d FOP:%d] Reset reason %d, reset_state %d\n", 1931 adapter->state, adapter->failover_pending, 1932 rwi->reset_reason, reset_state); 1933 1934 adapter->reset_reason = rwi->reset_reason; 1935 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ 1936 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 1937 rtnl_lock(); 1938 1939 /* Now that we have the rtnl lock, clear any pending failover. 1940 * This will ensure ibmvnic_open() has either completed or will 1941 * block until failover is complete. 1942 */ 1943 if (rwi->reset_reason == VNIC_RESET_FAILOVER) 1944 adapter->failover_pending = false; 1945 1946 /* read the state and check (again) after getting rtnl */ 1947 reset_state = adapter->state; 1948 1949 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 1950 rc = -EBUSY; 1951 goto out; 1952 } 1953 1954 netif_carrier_off(netdev); 1955 1956 old_num_rx_queues = adapter->req_rx_queues; 1957 old_num_tx_queues = adapter->req_tx_queues; 1958 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; 1959 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; 1960 1961 ibmvnic_cleanup(netdev); 1962 1963 if (reset_state == VNIC_OPEN && 1964 adapter->reset_reason != VNIC_RESET_MOBILITY && 1965 adapter->reset_reason != VNIC_RESET_FAILOVER) { 1966 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 1967 rc = __ibmvnic_close(netdev); 1968 if (rc) 1969 goto out; 1970 } else { 1971 adapter->state = VNIC_CLOSING; 1972 1973 /* Release the RTNL lock before link state change and 1974 * re-acquire after the link state change to allow 1975 * linkwatch_event to grab the RTNL lock and run during 1976 * a reset. 1977 */ 1978 rtnl_unlock(); 1979 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1980 rtnl_lock(); 1981 if (rc) 1982 goto out; 1983 1984 if (adapter->state == VNIC_OPEN) { 1985 /* When we dropped rtnl, ibmvnic_open() got 1986 * it and noticed that we are resetting and 1987 * set the adapter state to OPEN. Update our 1988 * new "target" state, and resume the reset 1989 * from VNIC_CLOSING state. 1990 */ 1991 netdev_dbg(netdev, 1992 "Open changed state from %d, updating.\n", 1993 reset_state); 1994 reset_state = VNIC_OPEN; 1995 adapter->state = VNIC_CLOSING; 1996 } 1997 1998 if (adapter->state != VNIC_CLOSING) { 1999 /* If someone else changed the adapter state 2000 * when we dropped the rtnl, fail the reset 2001 */ 2002 rc = -1; 2003 goto out; 2004 } 2005 adapter->state = VNIC_CLOSED; 2006 } 2007 } 2008 2009 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2010 release_resources(adapter); 2011 release_sub_crqs(adapter, 1); 2012 release_crq_queue(adapter); 2013 } 2014 2015 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 2016 /* remove the closed state so when we call open it appears 2017 * we are coming from the probed state. 2018 */ 2019 adapter->state = VNIC_PROBED; 2020 2021 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2022 rc = init_crq_queue(adapter); 2023 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 2024 rc = ibmvnic_reenable_crq_queue(adapter); 2025 release_sub_crqs(adapter, 1); 2026 } else { 2027 rc = ibmvnic_reset_crq(adapter); 2028 if (rc == H_CLOSED || rc == H_SUCCESS) { 2029 rc = vio_enable_interrupts(adapter->vdev); 2030 if (rc) 2031 netdev_err(adapter->netdev, 2032 "Reset failed to enable interrupts. rc=%d\n", 2033 rc); 2034 } 2035 } 2036 2037 if (rc) { 2038 netdev_err(adapter->netdev, 2039 "Reset couldn't initialize crq. rc=%d\n", rc); 2040 goto out; 2041 } 2042 2043 rc = ibmvnic_reset_init(adapter, true); 2044 if (rc) { 2045 rc = IBMVNIC_INIT_FAILED; 2046 goto out; 2047 } 2048 2049 /* If the adapter was in PROBE state prior to the reset, 2050 * exit here. 2051 */ 2052 if (reset_state == VNIC_PROBED) { 2053 rc = 0; 2054 goto out; 2055 } 2056 2057 rc = ibmvnic_login(netdev); 2058 if (rc) 2059 goto out; 2060 2061 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { 2062 rc = init_resources(adapter); 2063 if (rc) 2064 goto out; 2065 } else if (adapter->req_rx_queues != old_num_rx_queues || 2066 adapter->req_tx_queues != old_num_tx_queues || 2067 adapter->req_rx_add_entries_per_subcrq != 2068 old_num_rx_slots || 2069 adapter->req_tx_entries_per_subcrq != 2070 old_num_tx_slots || 2071 !adapter->rx_pool || 2072 !adapter->tso_pool || 2073 !adapter->tx_pool) { 2074 release_rx_pools(adapter); 2075 release_tx_pools(adapter); 2076 release_napi(adapter); 2077 release_vpd_data(adapter); 2078 2079 rc = init_resources(adapter); 2080 if (rc) 2081 goto out; 2082 2083 } else { 2084 rc = reset_tx_pools(adapter); 2085 if (rc) { 2086 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", 2087 rc); 2088 goto out; 2089 } 2090 2091 rc = reset_rx_pools(adapter); 2092 if (rc) { 2093 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", 2094 rc); 2095 goto out; 2096 } 2097 } 2098 ibmvnic_disable_irqs(adapter); 2099 } 2100 adapter->state = VNIC_CLOSED; 2101 2102 if (reset_state == VNIC_CLOSED) { 2103 rc = 0; 2104 goto out; 2105 } 2106 2107 rc = __ibmvnic_open(netdev); 2108 if (rc) { 2109 rc = IBMVNIC_OPEN_FAILED; 2110 goto out; 2111 } 2112 2113 /* refresh device's multicast list */ 2114 ibmvnic_set_multi(netdev); 2115 2116 /* kick napi */ 2117 for (i = 0; i < adapter->req_rx_queues; i++) 2118 napi_schedule(&adapter->napi[i]); 2119 2120 if (adapter->reset_reason == VNIC_RESET_FAILOVER || 2121 adapter->reset_reason == VNIC_RESET_MOBILITY) 2122 __netdev_notify_peers(netdev); 2123 2124 rc = 0; 2125 2126 out: 2127 /* restore the adapter state if reset failed */ 2128 if (rc) 2129 adapter->state = reset_state; 2130 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */ 2131 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) 2132 rtnl_unlock(); 2133 2134 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n", 2135 adapter->state, adapter->failover_pending, rc); 2136 return rc; 2137 } 2138 2139 static int do_hard_reset(struct ibmvnic_adapter *adapter, 2140 struct ibmvnic_rwi *rwi, u32 reset_state) 2141 { 2142 struct net_device *netdev = adapter->netdev; 2143 int rc; 2144 2145 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n", 2146 rwi->reset_reason); 2147 2148 /* read the state and check (again) after getting rtnl */ 2149 reset_state = adapter->state; 2150 2151 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { 2152 rc = -EBUSY; 2153 goto out; 2154 } 2155 2156 netif_carrier_off(netdev); 2157 adapter->reset_reason = rwi->reset_reason; 2158 2159 ibmvnic_cleanup(netdev); 2160 release_resources(adapter); 2161 release_sub_crqs(adapter, 0); 2162 release_crq_queue(adapter); 2163 2164 /* remove the closed state so when we call open it appears 2165 * we are coming from the probed state. 2166 */ 2167 adapter->state = VNIC_PROBED; 2168 2169 reinit_completion(&adapter->init_done); 2170 rc = init_crq_queue(adapter); 2171 if (rc) { 2172 netdev_err(adapter->netdev, 2173 "Couldn't initialize crq. rc=%d\n", rc); 2174 goto out; 2175 } 2176 2177 rc = ibmvnic_reset_init(adapter, false); 2178 if (rc) 2179 goto out; 2180 2181 /* If the adapter was in PROBE state prior to the reset, 2182 * exit here. 2183 */ 2184 if (reset_state == VNIC_PROBED) 2185 goto out; 2186 2187 rc = ibmvnic_login(netdev); 2188 if (rc) 2189 goto out; 2190 2191 rc = init_resources(adapter); 2192 if (rc) 2193 goto out; 2194 2195 ibmvnic_disable_irqs(adapter); 2196 adapter->state = VNIC_CLOSED; 2197 2198 if (reset_state == VNIC_CLOSED) 2199 goto out; 2200 2201 rc = __ibmvnic_open(netdev); 2202 if (rc) { 2203 rc = IBMVNIC_OPEN_FAILED; 2204 goto out; 2205 } 2206 2207 __netdev_notify_peers(netdev); 2208 out: 2209 /* restore adapter state if reset failed */ 2210 if (rc) 2211 adapter->state = reset_state; 2212 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n", 2213 adapter->state, adapter->failover_pending, rc); 2214 return rc; 2215 } 2216 2217 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 2218 { 2219 struct ibmvnic_rwi *rwi; 2220 unsigned long flags; 2221 2222 spin_lock_irqsave(&adapter->rwi_lock, flags); 2223 2224 if (!list_empty(&adapter->rwi_list)) { 2225 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 2226 list); 2227 list_del(&rwi->list); 2228 } else { 2229 rwi = NULL; 2230 } 2231 2232 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2233 return rwi; 2234 } 2235 2236 static void __ibmvnic_reset(struct work_struct *work) 2237 { 2238 struct ibmvnic_rwi *rwi; 2239 struct ibmvnic_adapter *adapter; 2240 bool saved_state = false; 2241 unsigned long flags; 2242 u32 reset_state; 2243 int rc = 0; 2244 2245 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 2246 2247 if (test_and_set_bit_lock(0, &adapter->resetting)) { 2248 schedule_delayed_work(&adapter->ibmvnic_delayed_reset, 2249 IBMVNIC_RESET_DELAY); 2250 return; 2251 } 2252 2253 rwi = get_next_rwi(adapter); 2254 while (rwi) { 2255 spin_lock_irqsave(&adapter->state_lock, flags); 2256 2257 if (adapter->state == VNIC_REMOVING || 2258 adapter->state == VNIC_REMOVED) { 2259 spin_unlock_irqrestore(&adapter->state_lock, flags); 2260 kfree(rwi); 2261 rc = EBUSY; 2262 break; 2263 } 2264 2265 if (!saved_state) { 2266 reset_state = adapter->state; 2267 saved_state = true; 2268 } 2269 spin_unlock_irqrestore(&adapter->state_lock, flags); 2270 2271 if (adapter->force_reset_recovery) { 2272 /* Since we are doing a hard reset now, clear the 2273 * failover_pending flag so we don't ignore any 2274 * future MOBILITY or other resets. 2275 */ 2276 adapter->failover_pending = false; 2277 2278 /* Transport event occurred during previous reset */ 2279 if (adapter->wait_for_reset) { 2280 /* Previous was CHANGE_PARAM; caller locked */ 2281 adapter->force_reset_recovery = false; 2282 rc = do_hard_reset(adapter, rwi, reset_state); 2283 } else { 2284 rtnl_lock(); 2285 adapter->force_reset_recovery = false; 2286 rc = do_hard_reset(adapter, rwi, reset_state); 2287 rtnl_unlock(); 2288 } 2289 if (rc) { 2290 /* give backing device time to settle down */ 2291 netdev_dbg(adapter->netdev, 2292 "[S:%d] Hard reset failed, waiting 60 secs\n", 2293 adapter->state); 2294 set_current_state(TASK_UNINTERRUPTIBLE); 2295 schedule_timeout(60 * HZ); 2296 } 2297 } else { 2298 rc = do_reset(adapter, rwi, reset_state); 2299 } 2300 kfree(rwi); 2301 adapter->last_reset_time = jiffies; 2302 2303 if (rc) 2304 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); 2305 2306 rwi = get_next_rwi(adapter); 2307 2308 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || 2309 rwi->reset_reason == VNIC_RESET_MOBILITY)) 2310 adapter->force_reset_recovery = true; 2311 } 2312 2313 if (adapter->wait_for_reset) { 2314 adapter->reset_done_rc = rc; 2315 complete(&adapter->reset_done); 2316 } 2317 2318 clear_bit_unlock(0, &adapter->resetting); 2319 2320 netdev_dbg(adapter->netdev, 2321 "[S:%d FRR:%d WFR:%d] Done processing resets\n", 2322 adapter->state, adapter->force_reset_recovery, 2323 adapter->wait_for_reset); 2324 } 2325 2326 static void __ibmvnic_delayed_reset(struct work_struct *work) 2327 { 2328 struct ibmvnic_adapter *adapter; 2329 2330 adapter = container_of(work, struct ibmvnic_adapter, 2331 ibmvnic_delayed_reset.work); 2332 __ibmvnic_reset(&adapter->ibmvnic_reset); 2333 } 2334 2335 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 2336 enum ibmvnic_reset_reason reason) 2337 { 2338 struct list_head *entry, *tmp_entry; 2339 struct ibmvnic_rwi *rwi, *tmp; 2340 struct net_device *netdev = adapter->netdev; 2341 unsigned long flags; 2342 int ret; 2343 2344 spin_lock_irqsave(&adapter->rwi_lock, flags); 2345 2346 /* If failover is pending don't schedule any other reset. 2347 * Instead let the failover complete. If there is already a 2348 * a failover reset scheduled, we will detect and drop the 2349 * duplicate reset when walking the ->rwi_list below. 2350 */ 2351 if (adapter->state == VNIC_REMOVING || 2352 adapter->state == VNIC_REMOVED || 2353 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { 2354 ret = EBUSY; 2355 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 2356 goto err; 2357 } 2358 2359 if (adapter->state == VNIC_PROBING) { 2360 netdev_warn(netdev, "Adapter reset during probe\n"); 2361 adapter->init_done_rc = EAGAIN; 2362 ret = EAGAIN; 2363 goto err; 2364 } 2365 2366 list_for_each(entry, &adapter->rwi_list) { 2367 tmp = list_entry(entry, struct ibmvnic_rwi, list); 2368 if (tmp->reset_reason == reason) { 2369 netdev_dbg(netdev, "Skipping matching reset, reason=%d\n", 2370 reason); 2371 ret = EBUSY; 2372 goto err; 2373 } 2374 } 2375 2376 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); 2377 if (!rwi) { 2378 ret = ENOMEM; 2379 goto err; 2380 } 2381 /* if we just received a transport event, 2382 * flush reset queue and process this reset 2383 */ 2384 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { 2385 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) 2386 list_del(entry); 2387 } 2388 rwi->reset_reason = reason; 2389 list_add_tail(&rwi->list, &adapter->rwi_list); 2390 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); 2391 schedule_work(&adapter->ibmvnic_reset); 2392 2393 ret = 0; 2394 err: 2395 /* ibmvnic_close() below can block, so drop the lock first */ 2396 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 2397 2398 if (ret == ENOMEM) 2399 ibmvnic_close(netdev); 2400 2401 return -ret; 2402 } 2403 2404 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) 2405 { 2406 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2407 2408 if (test_bit(0, &adapter->resetting)) { 2409 netdev_err(adapter->netdev, 2410 "Adapter is resetting, skip timeout reset\n"); 2411 return; 2412 } 2413 /* No queuing up reset until at least 5 seconds (default watchdog val) 2414 * after last reset 2415 */ 2416 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { 2417 netdev_dbg(dev, "Not yet time to tx timeout.\n"); 2418 return; 2419 } 2420 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 2421 } 2422 2423 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 2424 struct ibmvnic_rx_buff *rx_buff) 2425 { 2426 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 2427 2428 rx_buff->skb = NULL; 2429 2430 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 2431 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 2432 2433 atomic_dec(&pool->available); 2434 } 2435 2436 static int ibmvnic_poll(struct napi_struct *napi, int budget) 2437 { 2438 struct ibmvnic_sub_crq_queue *rx_scrq; 2439 struct ibmvnic_adapter *adapter; 2440 struct net_device *netdev; 2441 int frames_processed; 2442 int scrq_num; 2443 2444 netdev = napi->dev; 2445 adapter = netdev_priv(netdev); 2446 scrq_num = (int)(napi - adapter->napi); 2447 frames_processed = 0; 2448 rx_scrq = adapter->rx_scrq[scrq_num]; 2449 2450 restart_poll: 2451 while (frames_processed < budget) { 2452 struct sk_buff *skb; 2453 struct ibmvnic_rx_buff *rx_buff; 2454 union sub_crq *next; 2455 u32 length; 2456 u16 offset; 2457 u8 flags = 0; 2458 2459 if (unlikely(test_bit(0, &adapter->resetting) && 2460 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 2461 enable_scrq_irq(adapter, rx_scrq); 2462 napi_complete_done(napi, frames_processed); 2463 return frames_processed; 2464 } 2465 2466 if (!pending_scrq(adapter, rx_scrq)) 2467 break; 2468 next = ibmvnic_next_scrq(adapter, rx_scrq); 2469 rx_buff = (struct ibmvnic_rx_buff *) 2470 be64_to_cpu(next->rx_comp.correlator); 2471 /* do error checking */ 2472 if (next->rx_comp.rc) { 2473 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 2474 be16_to_cpu(next->rx_comp.rc)); 2475 /* free the entry */ 2476 next->rx_comp.first = 0; 2477 dev_kfree_skb_any(rx_buff->skb); 2478 remove_buff_from_pool(adapter, rx_buff); 2479 continue; 2480 } else if (!rx_buff->skb) { 2481 /* free the entry */ 2482 next->rx_comp.first = 0; 2483 remove_buff_from_pool(adapter, rx_buff); 2484 continue; 2485 } 2486 2487 length = be32_to_cpu(next->rx_comp.len); 2488 offset = be16_to_cpu(next->rx_comp.off_frame_data); 2489 flags = next->rx_comp.flags; 2490 skb = rx_buff->skb; 2491 /* load long_term_buff before copying to skb */ 2492 dma_rmb(); 2493 skb_copy_to_linear_data(skb, rx_buff->data + offset, 2494 length); 2495 2496 /* VLAN Header has been stripped by the system firmware and 2497 * needs to be inserted by the driver 2498 */ 2499 if (adapter->rx_vlan_header_insertion && 2500 (flags & IBMVNIC_VLAN_STRIPPED)) 2501 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2502 ntohs(next->rx_comp.vlan_tci)); 2503 2504 /* free the entry */ 2505 next->rx_comp.first = 0; 2506 remove_buff_from_pool(adapter, rx_buff); 2507 2508 skb_put(skb, length); 2509 skb->protocol = eth_type_trans(skb, netdev); 2510 skb_record_rx_queue(skb, scrq_num); 2511 2512 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 2513 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 2514 skb->ip_summed = CHECKSUM_UNNECESSARY; 2515 } 2516 2517 length = skb->len; 2518 napi_gro_receive(napi, skb); /* send it up */ 2519 netdev->stats.rx_packets++; 2520 netdev->stats.rx_bytes += length; 2521 adapter->rx_stats_buffers[scrq_num].packets++; 2522 adapter->rx_stats_buffers[scrq_num].bytes += length; 2523 frames_processed++; 2524 } 2525 2526 if (adapter->state != VNIC_CLOSING && 2527 ((atomic_read(&adapter->rx_pool[scrq_num].available) < 2528 adapter->req_rx_add_entries_per_subcrq / 2) || 2529 frames_processed < budget)) 2530 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 2531 if (frames_processed < budget) { 2532 if (napi_complete_done(napi, frames_processed)) { 2533 enable_scrq_irq(adapter, rx_scrq); 2534 if (pending_scrq(adapter, rx_scrq)) { 2535 if (napi_reschedule(napi)) { 2536 disable_scrq_irq(adapter, rx_scrq); 2537 goto restart_poll; 2538 } 2539 } 2540 } 2541 } 2542 return frames_processed; 2543 } 2544 2545 static int wait_for_reset(struct ibmvnic_adapter *adapter) 2546 { 2547 int rc, ret; 2548 2549 adapter->fallback.mtu = adapter->req_mtu; 2550 adapter->fallback.rx_queues = adapter->req_rx_queues; 2551 adapter->fallback.tx_queues = adapter->req_tx_queues; 2552 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 2553 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 2554 2555 reinit_completion(&adapter->reset_done); 2556 adapter->wait_for_reset = true; 2557 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2558 2559 if (rc) { 2560 ret = rc; 2561 goto out; 2562 } 2563 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); 2564 if (rc) { 2565 ret = -ENODEV; 2566 goto out; 2567 } 2568 2569 ret = 0; 2570 if (adapter->reset_done_rc) { 2571 ret = -EIO; 2572 adapter->desired.mtu = adapter->fallback.mtu; 2573 adapter->desired.rx_queues = adapter->fallback.rx_queues; 2574 adapter->desired.tx_queues = adapter->fallback.tx_queues; 2575 adapter->desired.rx_entries = adapter->fallback.rx_entries; 2576 adapter->desired.tx_entries = adapter->fallback.tx_entries; 2577 2578 reinit_completion(&adapter->reset_done); 2579 adapter->wait_for_reset = true; 2580 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2581 if (rc) { 2582 ret = rc; 2583 goto out; 2584 } 2585 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 2586 60000); 2587 if (rc) { 2588 ret = -ENODEV; 2589 goto out; 2590 } 2591 } 2592 out: 2593 adapter->wait_for_reset = false; 2594 2595 return ret; 2596 } 2597 2598 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 2599 { 2600 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2601 2602 adapter->desired.mtu = new_mtu + ETH_HLEN; 2603 2604 return wait_for_reset(adapter); 2605 } 2606 2607 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 2608 struct net_device *dev, 2609 netdev_features_t features) 2610 { 2611 /* Some backing hardware adapters can not 2612 * handle packets with a MSS less than 224 2613 * or with only one segment. 2614 */ 2615 if (skb_is_gso(skb)) { 2616 if (skb_shinfo(skb)->gso_size < 224 || 2617 skb_shinfo(skb)->gso_segs == 1) 2618 features &= ~NETIF_F_GSO_MASK; 2619 } 2620 2621 return features; 2622 } 2623 2624 static const struct net_device_ops ibmvnic_netdev_ops = { 2625 .ndo_open = ibmvnic_open, 2626 .ndo_stop = ibmvnic_close, 2627 .ndo_start_xmit = ibmvnic_xmit, 2628 .ndo_set_rx_mode = ibmvnic_set_multi, 2629 .ndo_set_mac_address = ibmvnic_set_mac, 2630 .ndo_validate_addr = eth_validate_addr, 2631 .ndo_tx_timeout = ibmvnic_tx_timeout, 2632 .ndo_change_mtu = ibmvnic_change_mtu, 2633 .ndo_features_check = ibmvnic_features_check, 2634 }; 2635 2636 /* ethtool functions */ 2637 2638 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 2639 struct ethtool_link_ksettings *cmd) 2640 { 2641 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2642 int rc; 2643 2644 rc = send_query_phys_parms(adapter); 2645 if (rc) { 2646 adapter->speed = SPEED_UNKNOWN; 2647 adapter->duplex = DUPLEX_UNKNOWN; 2648 } 2649 cmd->base.speed = adapter->speed; 2650 cmd->base.duplex = adapter->duplex; 2651 cmd->base.port = PORT_FIBRE; 2652 cmd->base.phy_address = 0; 2653 cmd->base.autoneg = AUTONEG_ENABLE; 2654 2655 return 0; 2656 } 2657 2658 static void ibmvnic_get_drvinfo(struct net_device *netdev, 2659 struct ethtool_drvinfo *info) 2660 { 2661 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2662 2663 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 2664 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 2665 strscpy(info->fw_version, adapter->fw_version, 2666 sizeof(info->fw_version)); 2667 } 2668 2669 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 2670 { 2671 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2672 2673 return adapter->msg_enable; 2674 } 2675 2676 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 2677 { 2678 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2679 2680 adapter->msg_enable = data; 2681 } 2682 2683 static u32 ibmvnic_get_link(struct net_device *netdev) 2684 { 2685 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2686 2687 /* Don't need to send a query because we request a logical link up at 2688 * init and then we wait for link state indications 2689 */ 2690 return adapter->logical_link_state; 2691 } 2692 2693 static void ibmvnic_get_ringparam(struct net_device *netdev, 2694 struct ethtool_ringparam *ring) 2695 { 2696 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2697 2698 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { 2699 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 2700 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 2701 } else { 2702 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ; 2703 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ; 2704 } 2705 ring->rx_mini_max_pending = 0; 2706 ring->rx_jumbo_max_pending = 0; 2707 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 2708 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 2709 ring->rx_mini_pending = 0; 2710 ring->rx_jumbo_pending = 0; 2711 } 2712 2713 static int ibmvnic_set_ringparam(struct net_device *netdev, 2714 struct ethtool_ringparam *ring) 2715 { 2716 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2717 int ret; 2718 2719 ret = 0; 2720 adapter->desired.rx_entries = ring->rx_pending; 2721 adapter->desired.tx_entries = ring->tx_pending; 2722 2723 ret = wait_for_reset(adapter); 2724 2725 if (!ret && 2726 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || 2727 adapter->req_tx_entries_per_subcrq != ring->tx_pending)) 2728 netdev_info(netdev, 2729 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", 2730 ring->rx_pending, ring->tx_pending, 2731 adapter->req_rx_add_entries_per_subcrq, 2732 adapter->req_tx_entries_per_subcrq); 2733 return ret; 2734 } 2735 2736 static void ibmvnic_get_channels(struct net_device *netdev, 2737 struct ethtool_channels *channels) 2738 { 2739 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2740 2741 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { 2742 channels->max_rx = adapter->max_rx_queues; 2743 channels->max_tx = adapter->max_tx_queues; 2744 } else { 2745 channels->max_rx = IBMVNIC_MAX_QUEUES; 2746 channels->max_tx = IBMVNIC_MAX_QUEUES; 2747 } 2748 2749 channels->max_other = 0; 2750 channels->max_combined = 0; 2751 channels->rx_count = adapter->req_rx_queues; 2752 channels->tx_count = adapter->req_tx_queues; 2753 channels->other_count = 0; 2754 channels->combined_count = 0; 2755 } 2756 2757 static int ibmvnic_set_channels(struct net_device *netdev, 2758 struct ethtool_channels *channels) 2759 { 2760 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2761 int ret; 2762 2763 ret = 0; 2764 adapter->desired.rx_queues = channels->rx_count; 2765 adapter->desired.tx_queues = channels->tx_count; 2766 2767 ret = wait_for_reset(adapter); 2768 2769 if (!ret && 2770 (adapter->req_rx_queues != channels->rx_count || 2771 adapter->req_tx_queues != channels->tx_count)) 2772 netdev_info(netdev, 2773 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", 2774 channels->rx_count, channels->tx_count, 2775 adapter->req_rx_queues, adapter->req_tx_queues); 2776 return ret; 2777 } 2778 2779 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2780 { 2781 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2782 int i; 2783 2784 switch (stringset) { 2785 case ETH_SS_STATS: 2786 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); 2787 i++, data += ETH_GSTRING_LEN) 2788 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 2789 2790 for (i = 0; i < adapter->req_tx_queues; i++) { 2791 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 2792 data += ETH_GSTRING_LEN; 2793 2794 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 2795 data += ETH_GSTRING_LEN; 2796 2797 snprintf(data, ETH_GSTRING_LEN, 2798 "tx%d_dropped_packets", i); 2799 data += ETH_GSTRING_LEN; 2800 } 2801 2802 for (i = 0; i < adapter->req_rx_queues; i++) { 2803 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 2804 data += ETH_GSTRING_LEN; 2805 2806 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 2807 data += ETH_GSTRING_LEN; 2808 2809 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 2810 data += ETH_GSTRING_LEN; 2811 } 2812 break; 2813 2814 case ETH_SS_PRIV_FLAGS: 2815 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++) 2816 strcpy(data + i * ETH_GSTRING_LEN, 2817 ibmvnic_priv_flags[i]); 2818 break; 2819 default: 2820 return; 2821 } 2822 } 2823 2824 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 2825 { 2826 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2827 2828 switch (sset) { 2829 case ETH_SS_STATS: 2830 return ARRAY_SIZE(ibmvnic_stats) + 2831 adapter->req_tx_queues * NUM_TX_STATS + 2832 adapter->req_rx_queues * NUM_RX_STATS; 2833 case ETH_SS_PRIV_FLAGS: 2834 return ARRAY_SIZE(ibmvnic_priv_flags); 2835 default: 2836 return -EOPNOTSUPP; 2837 } 2838 } 2839 2840 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 2841 struct ethtool_stats *stats, u64 *data) 2842 { 2843 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2844 union ibmvnic_crq crq; 2845 int i, j; 2846 int rc; 2847 2848 memset(&crq, 0, sizeof(crq)); 2849 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 2850 crq.request_statistics.cmd = REQUEST_STATISTICS; 2851 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 2852 crq.request_statistics.len = 2853 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 2854 2855 /* Wait for data to be written */ 2856 reinit_completion(&adapter->stats_done); 2857 rc = ibmvnic_send_crq(adapter, &crq); 2858 if (rc) 2859 return; 2860 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); 2861 if (rc) 2862 return; 2863 2864 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 2865 data[i] = be64_to_cpu(IBMVNIC_GET_STAT 2866 (adapter, ibmvnic_stats[i].offset)); 2867 2868 for (j = 0; j < adapter->req_tx_queues; j++) { 2869 data[i] = adapter->tx_stats_buffers[j].packets; 2870 i++; 2871 data[i] = adapter->tx_stats_buffers[j].bytes; 2872 i++; 2873 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 2874 i++; 2875 } 2876 2877 for (j = 0; j < adapter->req_rx_queues; j++) { 2878 data[i] = adapter->rx_stats_buffers[j].packets; 2879 i++; 2880 data[i] = adapter->rx_stats_buffers[j].bytes; 2881 i++; 2882 data[i] = adapter->rx_stats_buffers[j].interrupts; 2883 i++; 2884 } 2885 } 2886 2887 static u32 ibmvnic_get_priv_flags(struct net_device *netdev) 2888 { 2889 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2890 2891 return adapter->priv_flags; 2892 } 2893 2894 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) 2895 { 2896 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2897 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES); 2898 2899 if (which_maxes) 2900 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; 2901 else 2902 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; 2903 2904 return 0; 2905 } 2906 2907 static const struct ethtool_ops ibmvnic_ethtool_ops = { 2908 .get_drvinfo = ibmvnic_get_drvinfo, 2909 .get_msglevel = ibmvnic_get_msglevel, 2910 .set_msglevel = ibmvnic_set_msglevel, 2911 .get_link = ibmvnic_get_link, 2912 .get_ringparam = ibmvnic_get_ringparam, 2913 .set_ringparam = ibmvnic_set_ringparam, 2914 .get_channels = ibmvnic_get_channels, 2915 .set_channels = ibmvnic_set_channels, 2916 .get_strings = ibmvnic_get_strings, 2917 .get_sset_count = ibmvnic_get_sset_count, 2918 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 2919 .get_link_ksettings = ibmvnic_get_link_ksettings, 2920 .get_priv_flags = ibmvnic_get_priv_flags, 2921 .set_priv_flags = ibmvnic_set_priv_flags, 2922 }; 2923 2924 /* Routines for managing CRQs/sCRQs */ 2925 2926 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 2927 struct ibmvnic_sub_crq_queue *scrq) 2928 { 2929 int rc; 2930 2931 if (!scrq) { 2932 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); 2933 return -EINVAL; 2934 } 2935 2936 if (scrq->irq) { 2937 free_irq(scrq->irq, scrq); 2938 irq_dispose_mapping(scrq->irq); 2939 scrq->irq = 0; 2940 } 2941 2942 if (scrq->msgs) { 2943 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 2944 atomic_set(&scrq->used, 0); 2945 scrq->cur = 0; 2946 scrq->ind_buf.index = 0; 2947 } else { 2948 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); 2949 return -EINVAL; 2950 } 2951 2952 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 2953 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 2954 return rc; 2955 } 2956 2957 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 2958 { 2959 int i, rc; 2960 2961 if (!adapter->tx_scrq || !adapter->rx_scrq) 2962 return -EINVAL; 2963 2964 for (i = 0; i < adapter->req_tx_queues; i++) { 2965 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 2966 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 2967 if (rc) 2968 return rc; 2969 } 2970 2971 for (i = 0; i < adapter->req_rx_queues; i++) { 2972 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 2973 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 2974 if (rc) 2975 return rc; 2976 } 2977 2978 return rc; 2979 } 2980 2981 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 2982 struct ibmvnic_sub_crq_queue *scrq, 2983 bool do_h_free) 2984 { 2985 struct device *dev = &adapter->vdev->dev; 2986 long rc; 2987 2988 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 2989 2990 if (do_h_free) { 2991 /* Close the sub-crqs */ 2992 do { 2993 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 2994 adapter->vdev->unit_address, 2995 scrq->crq_num); 2996 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 2997 2998 if (rc) { 2999 netdev_err(adapter->netdev, 3000 "Failed to release sub-CRQ %16lx, rc = %ld\n", 3001 scrq->crq_num, rc); 3002 } 3003 } 3004 3005 dma_free_coherent(dev, 3006 IBMVNIC_IND_ARR_SZ, 3007 scrq->ind_buf.indir_arr, 3008 scrq->ind_buf.indir_dma); 3009 3010 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3011 DMA_BIDIRECTIONAL); 3012 free_pages((unsigned long)scrq->msgs, 2); 3013 kfree(scrq); 3014 } 3015 3016 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 3017 *adapter) 3018 { 3019 struct device *dev = &adapter->vdev->dev; 3020 struct ibmvnic_sub_crq_queue *scrq; 3021 int rc; 3022 3023 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 3024 if (!scrq) 3025 return NULL; 3026 3027 scrq->msgs = 3028 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 3029 if (!scrq->msgs) { 3030 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 3031 goto zero_page_failed; 3032 } 3033 3034 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 3035 DMA_BIDIRECTIONAL); 3036 if (dma_mapping_error(dev, scrq->msg_token)) { 3037 dev_warn(dev, "Couldn't map crq queue messages page\n"); 3038 goto map_failed; 3039 } 3040 3041 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 3042 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 3043 3044 if (rc == H_RESOURCE) 3045 rc = ibmvnic_reset_crq(adapter); 3046 3047 if (rc == H_CLOSED) { 3048 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 3049 } else if (rc) { 3050 dev_warn(dev, "Error %d registering sub-crq\n", rc); 3051 goto reg_failed; 3052 } 3053 3054 scrq->adapter = adapter; 3055 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 3056 scrq->ind_buf.index = 0; 3057 3058 scrq->ind_buf.indir_arr = 3059 dma_alloc_coherent(dev, 3060 IBMVNIC_IND_ARR_SZ, 3061 &scrq->ind_buf.indir_dma, 3062 GFP_KERNEL); 3063 3064 if (!scrq->ind_buf.indir_arr) 3065 goto indir_failed; 3066 3067 spin_lock_init(&scrq->lock); 3068 3069 netdev_dbg(adapter->netdev, 3070 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 3071 scrq->crq_num, scrq->hw_irq, scrq->irq); 3072 3073 return scrq; 3074 3075 indir_failed: 3076 do { 3077 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 3078 adapter->vdev->unit_address, 3079 scrq->crq_num); 3080 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); 3081 reg_failed: 3082 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 3083 DMA_BIDIRECTIONAL); 3084 map_failed: 3085 free_pages((unsigned long)scrq->msgs, 2); 3086 zero_page_failed: 3087 kfree(scrq); 3088 3089 return NULL; 3090 } 3091 3092 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 3093 { 3094 int i; 3095 3096 if (adapter->tx_scrq) { 3097 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 3098 if (!adapter->tx_scrq[i]) 3099 continue; 3100 3101 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 3102 i); 3103 if (adapter->tx_scrq[i]->irq) { 3104 free_irq(adapter->tx_scrq[i]->irq, 3105 adapter->tx_scrq[i]); 3106 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 3107 adapter->tx_scrq[i]->irq = 0; 3108 } 3109 3110 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 3111 do_h_free); 3112 } 3113 3114 kfree(adapter->tx_scrq); 3115 adapter->tx_scrq = NULL; 3116 adapter->num_active_tx_scrqs = 0; 3117 } 3118 3119 if (adapter->rx_scrq) { 3120 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 3121 if (!adapter->rx_scrq[i]) 3122 continue; 3123 3124 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 3125 i); 3126 if (adapter->rx_scrq[i]->irq) { 3127 free_irq(adapter->rx_scrq[i]->irq, 3128 adapter->rx_scrq[i]); 3129 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 3130 adapter->rx_scrq[i]->irq = 0; 3131 } 3132 3133 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 3134 do_h_free); 3135 } 3136 3137 kfree(adapter->rx_scrq); 3138 adapter->rx_scrq = NULL; 3139 adapter->num_active_rx_scrqs = 0; 3140 } 3141 } 3142 3143 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 3144 struct ibmvnic_sub_crq_queue *scrq) 3145 { 3146 struct device *dev = &adapter->vdev->dev; 3147 unsigned long rc; 3148 3149 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3150 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 3151 if (rc) 3152 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 3153 scrq->hw_irq, rc); 3154 return rc; 3155 } 3156 3157 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 3158 struct ibmvnic_sub_crq_queue *scrq) 3159 { 3160 struct device *dev = &adapter->vdev->dev; 3161 unsigned long rc; 3162 3163 if (scrq->hw_irq > 0x100000000ULL) { 3164 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 3165 return 1; 3166 } 3167 3168 if (test_bit(0, &adapter->resetting) && 3169 adapter->reset_reason == VNIC_RESET_MOBILITY) { 3170 u64 val = (0xff000000) | scrq->hw_irq; 3171 3172 rc = plpar_hcall_norets(H_EOI, val); 3173 /* H_EOI would fail with rc = H_FUNCTION when running 3174 * in XIVE mode which is expected, but not an error. 3175 */ 3176 if (rc && rc != H_FUNCTION) 3177 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 3178 val, rc); 3179 } 3180 3181 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 3182 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 3183 if (rc) 3184 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 3185 scrq->hw_irq, rc); 3186 return rc; 3187 } 3188 3189 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 3190 struct ibmvnic_sub_crq_queue *scrq) 3191 { 3192 struct device *dev = &adapter->vdev->dev; 3193 struct ibmvnic_tx_pool *tx_pool; 3194 struct ibmvnic_tx_buff *txbuff; 3195 struct netdev_queue *txq; 3196 union sub_crq *next; 3197 int index; 3198 int i; 3199 3200 restart_loop: 3201 while (pending_scrq(adapter, scrq)) { 3202 unsigned int pool = scrq->pool_index; 3203 int num_entries = 0; 3204 int total_bytes = 0; 3205 int num_packets = 0; 3206 3207 next = ibmvnic_next_scrq(adapter, scrq); 3208 for (i = 0; i < next->tx_comp.num_comps; i++) { 3209 if (next->tx_comp.rcs[i]) 3210 dev_err(dev, "tx error %x\n", 3211 next->tx_comp.rcs[i]); 3212 index = be32_to_cpu(next->tx_comp.correlators[i]); 3213 if (index & IBMVNIC_TSO_POOL_MASK) { 3214 tx_pool = &adapter->tso_pool[pool]; 3215 index &= ~IBMVNIC_TSO_POOL_MASK; 3216 } else { 3217 tx_pool = &adapter->tx_pool[pool]; 3218 } 3219 3220 txbuff = &tx_pool->tx_buff[index]; 3221 num_packets++; 3222 num_entries += txbuff->num_entries; 3223 if (txbuff->skb) { 3224 total_bytes += txbuff->skb->len; 3225 dev_consume_skb_irq(txbuff->skb); 3226 txbuff->skb = NULL; 3227 } else { 3228 netdev_warn(adapter->netdev, 3229 "TX completion received with NULL socket buffer\n"); 3230 } 3231 tx_pool->free_map[tx_pool->producer_index] = index; 3232 tx_pool->producer_index = 3233 (tx_pool->producer_index + 1) % 3234 tx_pool->num_buffers; 3235 } 3236 /* remove tx_comp scrq*/ 3237 next->tx_comp.first = 0; 3238 3239 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); 3240 netdev_tx_completed_queue(txq, num_packets, total_bytes); 3241 3242 if (atomic_sub_return(num_entries, &scrq->used) <= 3243 (adapter->req_tx_entries_per_subcrq / 2) && 3244 __netif_subqueue_stopped(adapter->netdev, 3245 scrq->pool_index)) { 3246 netif_wake_subqueue(adapter->netdev, scrq->pool_index); 3247 netdev_dbg(adapter->netdev, "Started queue %d\n", 3248 scrq->pool_index); 3249 } 3250 } 3251 3252 enable_scrq_irq(adapter, scrq); 3253 3254 if (pending_scrq(adapter, scrq)) { 3255 disable_scrq_irq(adapter, scrq); 3256 goto restart_loop; 3257 } 3258 3259 return 0; 3260 } 3261 3262 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 3263 { 3264 struct ibmvnic_sub_crq_queue *scrq = instance; 3265 struct ibmvnic_adapter *adapter = scrq->adapter; 3266 3267 disable_scrq_irq(adapter, scrq); 3268 ibmvnic_complete_tx(adapter, scrq); 3269 3270 return IRQ_HANDLED; 3271 } 3272 3273 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 3274 { 3275 struct ibmvnic_sub_crq_queue *scrq = instance; 3276 struct ibmvnic_adapter *adapter = scrq->adapter; 3277 3278 /* When booting a kdump kernel we can hit pending interrupts 3279 * prior to completing driver initialization. 3280 */ 3281 if (unlikely(adapter->state != VNIC_OPEN)) 3282 return IRQ_NONE; 3283 3284 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 3285 3286 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 3287 disable_scrq_irq(adapter, scrq); 3288 __napi_schedule(&adapter->napi[scrq->scrq_num]); 3289 } 3290 3291 return IRQ_HANDLED; 3292 } 3293 3294 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 3295 { 3296 struct device *dev = &adapter->vdev->dev; 3297 struct ibmvnic_sub_crq_queue *scrq; 3298 int i = 0, j = 0; 3299 int rc = 0; 3300 3301 for (i = 0; i < adapter->req_tx_queues; i++) { 3302 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 3303 i); 3304 scrq = adapter->tx_scrq[i]; 3305 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 3306 3307 if (!scrq->irq) { 3308 rc = -EINVAL; 3309 dev_err(dev, "Error mapping irq\n"); 3310 goto req_tx_irq_failed; 3311 } 3312 3313 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", 3314 adapter->vdev->unit_address, i); 3315 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 3316 0, scrq->name, scrq); 3317 3318 if (rc) { 3319 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 3320 scrq->irq, rc); 3321 irq_dispose_mapping(scrq->irq); 3322 goto req_tx_irq_failed; 3323 } 3324 } 3325 3326 for (i = 0; i < adapter->req_rx_queues; i++) { 3327 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 3328 i); 3329 scrq = adapter->rx_scrq[i]; 3330 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 3331 if (!scrq->irq) { 3332 rc = -EINVAL; 3333 dev_err(dev, "Error mapping irq\n"); 3334 goto req_rx_irq_failed; 3335 } 3336 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", 3337 adapter->vdev->unit_address, i); 3338 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 3339 0, scrq->name, scrq); 3340 if (rc) { 3341 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 3342 scrq->irq, rc); 3343 irq_dispose_mapping(scrq->irq); 3344 goto req_rx_irq_failed; 3345 } 3346 } 3347 return rc; 3348 3349 req_rx_irq_failed: 3350 for (j = 0; j < i; j++) { 3351 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 3352 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 3353 } 3354 i = adapter->req_tx_queues; 3355 req_tx_irq_failed: 3356 for (j = 0; j < i; j++) { 3357 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 3358 irq_dispose_mapping(adapter->tx_scrq[j]->irq); 3359 } 3360 release_sub_crqs(adapter, 1); 3361 return rc; 3362 } 3363 3364 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 3365 { 3366 struct device *dev = &adapter->vdev->dev; 3367 struct ibmvnic_sub_crq_queue **allqueues; 3368 int registered_queues = 0; 3369 int total_queues; 3370 int more = 0; 3371 int i; 3372 3373 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 3374 3375 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 3376 if (!allqueues) 3377 return -1; 3378 3379 for (i = 0; i < total_queues; i++) { 3380 allqueues[i] = init_sub_crq_queue(adapter); 3381 if (!allqueues[i]) { 3382 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 3383 break; 3384 } 3385 registered_queues++; 3386 } 3387 3388 /* Make sure we were able to register the minimum number of queues */ 3389 if (registered_queues < 3390 adapter->min_tx_queues + adapter->min_rx_queues) { 3391 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 3392 goto tx_failed; 3393 } 3394 3395 /* Distribute the failed allocated queues*/ 3396 for (i = 0; i < total_queues - registered_queues + more ; i++) { 3397 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 3398 switch (i % 3) { 3399 case 0: 3400 if (adapter->req_rx_queues > adapter->min_rx_queues) 3401 adapter->req_rx_queues--; 3402 else 3403 more++; 3404 break; 3405 case 1: 3406 if (adapter->req_tx_queues > adapter->min_tx_queues) 3407 adapter->req_tx_queues--; 3408 else 3409 more++; 3410 break; 3411 } 3412 } 3413 3414 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 3415 sizeof(*adapter->tx_scrq), GFP_KERNEL); 3416 if (!adapter->tx_scrq) 3417 goto tx_failed; 3418 3419 for (i = 0; i < adapter->req_tx_queues; i++) { 3420 adapter->tx_scrq[i] = allqueues[i]; 3421 adapter->tx_scrq[i]->pool_index = i; 3422 adapter->num_active_tx_scrqs++; 3423 } 3424 3425 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 3426 sizeof(*adapter->rx_scrq), GFP_KERNEL); 3427 if (!adapter->rx_scrq) 3428 goto rx_failed; 3429 3430 for (i = 0; i < adapter->req_rx_queues; i++) { 3431 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 3432 adapter->rx_scrq[i]->scrq_num = i; 3433 adapter->num_active_rx_scrqs++; 3434 } 3435 3436 kfree(allqueues); 3437 return 0; 3438 3439 rx_failed: 3440 kfree(adapter->tx_scrq); 3441 adapter->tx_scrq = NULL; 3442 tx_failed: 3443 for (i = 0; i < registered_queues; i++) 3444 release_sub_crq_queue(adapter, allqueues[i], 1); 3445 kfree(allqueues); 3446 return -1; 3447 } 3448 3449 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) 3450 { 3451 struct device *dev = &adapter->vdev->dev; 3452 union ibmvnic_crq crq; 3453 int max_entries; 3454 3455 if (!retry) { 3456 /* Sub-CRQ entries are 32 byte long */ 3457 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 3458 3459 if (adapter->min_tx_entries_per_subcrq > entries_page || 3460 adapter->min_rx_add_entries_per_subcrq > entries_page) { 3461 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 3462 return; 3463 } 3464 3465 if (adapter->desired.mtu) 3466 adapter->req_mtu = adapter->desired.mtu; 3467 else 3468 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 3469 3470 if (!adapter->desired.tx_entries) 3471 adapter->desired.tx_entries = 3472 adapter->max_tx_entries_per_subcrq; 3473 if (!adapter->desired.rx_entries) 3474 adapter->desired.rx_entries = 3475 adapter->max_rx_add_entries_per_subcrq; 3476 3477 max_entries = IBMVNIC_MAX_LTB_SIZE / 3478 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 3479 3480 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 3481 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { 3482 adapter->desired.tx_entries = max_entries; 3483 } 3484 3485 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 3486 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { 3487 adapter->desired.rx_entries = max_entries; 3488 } 3489 3490 if (adapter->desired.tx_entries) 3491 adapter->req_tx_entries_per_subcrq = 3492 adapter->desired.tx_entries; 3493 else 3494 adapter->req_tx_entries_per_subcrq = 3495 adapter->max_tx_entries_per_subcrq; 3496 3497 if (adapter->desired.rx_entries) 3498 adapter->req_rx_add_entries_per_subcrq = 3499 adapter->desired.rx_entries; 3500 else 3501 adapter->req_rx_add_entries_per_subcrq = 3502 adapter->max_rx_add_entries_per_subcrq; 3503 3504 if (adapter->desired.tx_queues) 3505 adapter->req_tx_queues = 3506 adapter->desired.tx_queues; 3507 else 3508 adapter->req_tx_queues = 3509 adapter->opt_tx_comp_sub_queues; 3510 3511 if (adapter->desired.rx_queues) 3512 adapter->req_rx_queues = 3513 adapter->desired.rx_queues; 3514 else 3515 adapter->req_rx_queues = 3516 adapter->opt_rx_comp_queues; 3517 3518 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 3519 } 3520 3521 memset(&crq, 0, sizeof(crq)); 3522 crq.request_capability.first = IBMVNIC_CRQ_CMD; 3523 crq.request_capability.cmd = REQUEST_CAPABILITY; 3524 3525 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 3526 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 3527 atomic_inc(&adapter->running_cap_crqs); 3528 ibmvnic_send_crq(adapter, &crq); 3529 3530 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 3531 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 3532 atomic_inc(&adapter->running_cap_crqs); 3533 ibmvnic_send_crq(adapter, &crq); 3534 3535 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 3536 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 3537 atomic_inc(&adapter->running_cap_crqs); 3538 ibmvnic_send_crq(adapter, &crq); 3539 3540 crq.request_capability.capability = 3541 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 3542 crq.request_capability.number = 3543 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 3544 atomic_inc(&adapter->running_cap_crqs); 3545 ibmvnic_send_crq(adapter, &crq); 3546 3547 crq.request_capability.capability = 3548 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 3549 crq.request_capability.number = 3550 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 3551 atomic_inc(&adapter->running_cap_crqs); 3552 ibmvnic_send_crq(adapter, &crq); 3553 3554 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 3555 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 3556 atomic_inc(&adapter->running_cap_crqs); 3557 ibmvnic_send_crq(adapter, &crq); 3558 3559 if (adapter->netdev->flags & IFF_PROMISC) { 3560 if (adapter->promisc_supported) { 3561 crq.request_capability.capability = 3562 cpu_to_be16(PROMISC_REQUESTED); 3563 crq.request_capability.number = cpu_to_be64(1); 3564 atomic_inc(&adapter->running_cap_crqs); 3565 ibmvnic_send_crq(adapter, &crq); 3566 } 3567 } else { 3568 crq.request_capability.capability = 3569 cpu_to_be16(PROMISC_REQUESTED); 3570 crq.request_capability.number = cpu_to_be64(0); 3571 atomic_inc(&adapter->running_cap_crqs); 3572 ibmvnic_send_crq(adapter, &crq); 3573 } 3574 } 3575 3576 static int pending_scrq(struct ibmvnic_adapter *adapter, 3577 struct ibmvnic_sub_crq_queue *scrq) 3578 { 3579 union sub_crq *entry = &scrq->msgs[scrq->cur]; 3580 int rc; 3581 3582 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); 3583 3584 /* Ensure that the SCRQ valid flag is loaded prior to loading the 3585 * contents of the SCRQ descriptor 3586 */ 3587 dma_rmb(); 3588 3589 return rc; 3590 } 3591 3592 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 3593 struct ibmvnic_sub_crq_queue *scrq) 3594 { 3595 union sub_crq *entry; 3596 unsigned long flags; 3597 3598 spin_lock_irqsave(&scrq->lock, flags); 3599 entry = &scrq->msgs[scrq->cur]; 3600 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 3601 if (++scrq->cur == scrq->size) 3602 scrq->cur = 0; 3603 } else { 3604 entry = NULL; 3605 } 3606 spin_unlock_irqrestore(&scrq->lock, flags); 3607 3608 /* Ensure that the SCRQ valid flag is loaded prior to loading the 3609 * contents of the SCRQ descriptor 3610 */ 3611 dma_rmb(); 3612 3613 return entry; 3614 } 3615 3616 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 3617 { 3618 struct ibmvnic_crq_queue *queue = &adapter->crq; 3619 union ibmvnic_crq *crq; 3620 3621 crq = &queue->msgs[queue->cur]; 3622 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 3623 if (++queue->cur == queue->size) 3624 queue->cur = 0; 3625 } else { 3626 crq = NULL; 3627 } 3628 3629 return crq; 3630 } 3631 3632 static void print_subcrq_error(struct device *dev, int rc, const char *func) 3633 { 3634 switch (rc) { 3635 case H_PARAMETER: 3636 dev_warn_ratelimited(dev, 3637 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", 3638 func, rc); 3639 break; 3640 case H_CLOSED: 3641 dev_warn_ratelimited(dev, 3642 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", 3643 func, rc); 3644 break; 3645 default: 3646 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); 3647 break; 3648 } 3649 } 3650 3651 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 3652 u64 remote_handle, u64 ioba, u64 num_entries) 3653 { 3654 unsigned int ua = adapter->vdev->unit_address; 3655 struct device *dev = &adapter->vdev->dev; 3656 int rc; 3657 3658 /* Make sure the hypervisor sees the complete request */ 3659 dma_wmb(); 3660 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 3661 cpu_to_be64(remote_handle), 3662 ioba, num_entries); 3663 3664 if (rc) 3665 print_subcrq_error(dev, rc, __func__); 3666 3667 return rc; 3668 } 3669 3670 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 3671 union ibmvnic_crq *crq) 3672 { 3673 unsigned int ua = adapter->vdev->unit_address; 3674 struct device *dev = &adapter->vdev->dev; 3675 u64 *u64_crq = (u64 *)crq; 3676 int rc; 3677 3678 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 3679 (unsigned long)cpu_to_be64(u64_crq[0]), 3680 (unsigned long)cpu_to_be64(u64_crq[1])); 3681 3682 if (!adapter->crq.active && 3683 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 3684 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 3685 return -EINVAL; 3686 } 3687 3688 /* Make sure the hypervisor sees the complete request */ 3689 dma_wmb(); 3690 3691 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 3692 cpu_to_be64(u64_crq[0]), 3693 cpu_to_be64(u64_crq[1])); 3694 3695 if (rc) { 3696 if (rc == H_CLOSED) { 3697 dev_warn(dev, "CRQ Queue closed\n"); 3698 /* do not reset, report the fail, wait for passive init from server */ 3699 } 3700 3701 dev_warn(dev, "Send error (rc=%d)\n", rc); 3702 } 3703 3704 return rc; 3705 } 3706 3707 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 3708 { 3709 struct device *dev = &adapter->vdev->dev; 3710 union ibmvnic_crq crq; 3711 int retries = 100; 3712 int rc; 3713 3714 memset(&crq, 0, sizeof(crq)); 3715 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 3716 crq.generic.cmd = IBMVNIC_CRQ_INIT; 3717 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 3718 3719 do { 3720 rc = ibmvnic_send_crq(adapter, &crq); 3721 if (rc != H_CLOSED) 3722 break; 3723 retries--; 3724 msleep(50); 3725 3726 } while (retries > 0); 3727 3728 if (rc) { 3729 dev_err(dev, "Failed to send init request, rc = %d\n", rc); 3730 return rc; 3731 } 3732 3733 return 0; 3734 } 3735 3736 static int send_version_xchg(struct ibmvnic_adapter *adapter) 3737 { 3738 union ibmvnic_crq crq; 3739 3740 memset(&crq, 0, sizeof(crq)); 3741 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 3742 crq.version_exchange.cmd = VERSION_EXCHANGE; 3743 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 3744 3745 return ibmvnic_send_crq(adapter, &crq); 3746 } 3747 3748 struct vnic_login_client_data { 3749 u8 type; 3750 __be16 len; 3751 char name[]; 3752 } __packed; 3753 3754 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 3755 { 3756 int len; 3757 3758 /* Calculate the amount of buffer space needed for the 3759 * vnic client data in the login buffer. There are four entries, 3760 * OS name, LPAR name, device name, and a null last entry. 3761 */ 3762 len = 4 * sizeof(struct vnic_login_client_data); 3763 len += 6; /* "Linux" plus NULL */ 3764 len += strlen(utsname()->nodename) + 1; 3765 len += strlen(adapter->netdev->name) + 1; 3766 3767 return len; 3768 } 3769 3770 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 3771 struct vnic_login_client_data *vlcd) 3772 { 3773 const char *os_name = "Linux"; 3774 int len; 3775 3776 /* Type 1 - LPAR OS */ 3777 vlcd->type = 1; 3778 len = strlen(os_name) + 1; 3779 vlcd->len = cpu_to_be16(len); 3780 strncpy(vlcd->name, os_name, len); 3781 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3782 3783 /* Type 2 - LPAR name */ 3784 vlcd->type = 2; 3785 len = strlen(utsname()->nodename) + 1; 3786 vlcd->len = cpu_to_be16(len); 3787 strncpy(vlcd->name, utsname()->nodename, len); 3788 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3789 3790 /* Type 3 - device name */ 3791 vlcd->type = 3; 3792 len = strlen(adapter->netdev->name) + 1; 3793 vlcd->len = cpu_to_be16(len); 3794 strncpy(vlcd->name, adapter->netdev->name, len); 3795 } 3796 3797 static int send_login(struct ibmvnic_adapter *adapter) 3798 { 3799 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 3800 struct ibmvnic_login_buffer *login_buffer; 3801 struct device *dev = &adapter->vdev->dev; 3802 struct vnic_login_client_data *vlcd; 3803 dma_addr_t rsp_buffer_token; 3804 dma_addr_t buffer_token; 3805 size_t rsp_buffer_size; 3806 union ibmvnic_crq crq; 3807 int client_data_len; 3808 size_t buffer_size; 3809 __be64 *tx_list_p; 3810 __be64 *rx_list_p; 3811 int rc; 3812 int i; 3813 3814 if (!adapter->tx_scrq || !adapter->rx_scrq) { 3815 netdev_err(adapter->netdev, 3816 "RX or TX queues are not allocated, device login failed\n"); 3817 return -1; 3818 } 3819 3820 release_login_buffer(adapter); 3821 release_login_rsp_buffer(adapter); 3822 3823 client_data_len = vnic_client_data_len(adapter); 3824 3825 buffer_size = 3826 sizeof(struct ibmvnic_login_buffer) + 3827 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 3828 client_data_len; 3829 3830 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 3831 if (!login_buffer) 3832 goto buf_alloc_failed; 3833 3834 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 3835 DMA_TO_DEVICE); 3836 if (dma_mapping_error(dev, buffer_token)) { 3837 dev_err(dev, "Couldn't map login buffer\n"); 3838 goto buf_map_failed; 3839 } 3840 3841 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 3842 sizeof(u64) * adapter->req_tx_queues + 3843 sizeof(u64) * adapter->req_rx_queues + 3844 sizeof(u64) * adapter->req_rx_queues + 3845 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 3846 3847 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 3848 if (!login_rsp_buffer) 3849 goto buf_rsp_alloc_failed; 3850 3851 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 3852 rsp_buffer_size, DMA_FROM_DEVICE); 3853 if (dma_mapping_error(dev, rsp_buffer_token)) { 3854 dev_err(dev, "Couldn't map login rsp buffer\n"); 3855 goto buf_rsp_map_failed; 3856 } 3857 3858 adapter->login_buf = login_buffer; 3859 adapter->login_buf_token = buffer_token; 3860 adapter->login_buf_sz = buffer_size; 3861 adapter->login_rsp_buf = login_rsp_buffer; 3862 adapter->login_rsp_buf_token = rsp_buffer_token; 3863 adapter->login_rsp_buf_sz = rsp_buffer_size; 3864 3865 login_buffer->len = cpu_to_be32(buffer_size); 3866 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 3867 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 3868 login_buffer->off_txcomp_subcrqs = 3869 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 3870 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 3871 login_buffer->off_rxcomp_subcrqs = 3872 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 3873 sizeof(u64) * adapter->req_tx_queues); 3874 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 3875 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 3876 3877 tx_list_p = (__be64 *)((char *)login_buffer + 3878 sizeof(struct ibmvnic_login_buffer)); 3879 rx_list_p = (__be64 *)((char *)login_buffer + 3880 sizeof(struct ibmvnic_login_buffer) + 3881 sizeof(u64) * adapter->req_tx_queues); 3882 3883 for (i = 0; i < adapter->req_tx_queues; i++) { 3884 if (adapter->tx_scrq[i]) { 3885 tx_list_p[i] = 3886 cpu_to_be64(adapter->tx_scrq[i]->crq_num); 3887 } 3888 } 3889 3890 for (i = 0; i < adapter->req_rx_queues; i++) { 3891 if (adapter->rx_scrq[i]) { 3892 rx_list_p[i] = 3893 cpu_to_be64(adapter->rx_scrq[i]->crq_num); 3894 } 3895 } 3896 3897 /* Insert vNIC login client data */ 3898 vlcd = (struct vnic_login_client_data *) 3899 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 3900 login_buffer->client_data_offset = 3901 cpu_to_be32((char *)vlcd - (char *)login_buffer); 3902 login_buffer->client_data_len = cpu_to_be32(client_data_len); 3903 3904 vnic_add_client_data(adapter, vlcd); 3905 3906 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 3907 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 3908 netdev_dbg(adapter->netdev, "%016lx\n", 3909 ((unsigned long *)(adapter->login_buf))[i]); 3910 } 3911 3912 memset(&crq, 0, sizeof(crq)); 3913 crq.login.first = IBMVNIC_CRQ_CMD; 3914 crq.login.cmd = LOGIN; 3915 crq.login.ioba = cpu_to_be32(buffer_token); 3916 crq.login.len = cpu_to_be32(buffer_size); 3917 3918 adapter->login_pending = true; 3919 rc = ibmvnic_send_crq(adapter, &crq); 3920 if (rc) { 3921 adapter->login_pending = false; 3922 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); 3923 goto buf_rsp_map_failed; 3924 } 3925 3926 return 0; 3927 3928 buf_rsp_map_failed: 3929 kfree(login_rsp_buffer); 3930 adapter->login_rsp_buf = NULL; 3931 buf_rsp_alloc_failed: 3932 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 3933 buf_map_failed: 3934 kfree(login_buffer); 3935 adapter->login_buf = NULL; 3936 buf_alloc_failed: 3937 return -1; 3938 } 3939 3940 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 3941 u32 len, u8 map_id) 3942 { 3943 union ibmvnic_crq crq; 3944 3945 memset(&crq, 0, sizeof(crq)); 3946 crq.request_map.first = IBMVNIC_CRQ_CMD; 3947 crq.request_map.cmd = REQUEST_MAP; 3948 crq.request_map.map_id = map_id; 3949 crq.request_map.ioba = cpu_to_be32(addr); 3950 crq.request_map.len = cpu_to_be32(len); 3951 return ibmvnic_send_crq(adapter, &crq); 3952 } 3953 3954 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 3955 { 3956 union ibmvnic_crq crq; 3957 3958 memset(&crq, 0, sizeof(crq)); 3959 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 3960 crq.request_unmap.cmd = REQUEST_UNMAP; 3961 crq.request_unmap.map_id = map_id; 3962 return ibmvnic_send_crq(adapter, &crq); 3963 } 3964 3965 static void send_query_map(struct ibmvnic_adapter *adapter) 3966 { 3967 union ibmvnic_crq crq; 3968 3969 memset(&crq, 0, sizeof(crq)); 3970 crq.query_map.first = IBMVNIC_CRQ_CMD; 3971 crq.query_map.cmd = QUERY_MAP; 3972 ibmvnic_send_crq(adapter, &crq); 3973 } 3974 3975 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 3976 static void send_query_cap(struct ibmvnic_adapter *adapter) 3977 { 3978 union ibmvnic_crq crq; 3979 3980 atomic_set(&adapter->running_cap_crqs, 0); 3981 memset(&crq, 0, sizeof(crq)); 3982 crq.query_capability.first = IBMVNIC_CRQ_CMD; 3983 crq.query_capability.cmd = QUERY_CAPABILITY; 3984 3985 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 3986 atomic_inc(&adapter->running_cap_crqs); 3987 ibmvnic_send_crq(adapter, &crq); 3988 3989 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 3990 atomic_inc(&adapter->running_cap_crqs); 3991 ibmvnic_send_crq(adapter, &crq); 3992 3993 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 3994 atomic_inc(&adapter->running_cap_crqs); 3995 ibmvnic_send_crq(adapter, &crq); 3996 3997 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 3998 atomic_inc(&adapter->running_cap_crqs); 3999 ibmvnic_send_crq(adapter, &crq); 4000 4001 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 4002 atomic_inc(&adapter->running_cap_crqs); 4003 ibmvnic_send_crq(adapter, &crq); 4004 4005 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 4006 atomic_inc(&adapter->running_cap_crqs); 4007 ibmvnic_send_crq(adapter, &crq); 4008 4009 crq.query_capability.capability = 4010 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 4011 atomic_inc(&adapter->running_cap_crqs); 4012 ibmvnic_send_crq(adapter, &crq); 4013 4014 crq.query_capability.capability = 4015 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 4016 atomic_inc(&adapter->running_cap_crqs); 4017 ibmvnic_send_crq(adapter, &crq); 4018 4019 crq.query_capability.capability = 4020 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 4021 atomic_inc(&adapter->running_cap_crqs); 4022 ibmvnic_send_crq(adapter, &crq); 4023 4024 crq.query_capability.capability = 4025 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 4026 atomic_inc(&adapter->running_cap_crqs); 4027 ibmvnic_send_crq(adapter, &crq); 4028 4029 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 4030 atomic_inc(&adapter->running_cap_crqs); 4031 ibmvnic_send_crq(adapter, &crq); 4032 4033 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 4034 atomic_inc(&adapter->running_cap_crqs); 4035 ibmvnic_send_crq(adapter, &crq); 4036 4037 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 4038 atomic_inc(&adapter->running_cap_crqs); 4039 ibmvnic_send_crq(adapter, &crq); 4040 4041 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 4042 atomic_inc(&adapter->running_cap_crqs); 4043 ibmvnic_send_crq(adapter, &crq); 4044 4045 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 4046 atomic_inc(&adapter->running_cap_crqs); 4047 ibmvnic_send_crq(adapter, &crq); 4048 4049 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 4050 atomic_inc(&adapter->running_cap_crqs); 4051 ibmvnic_send_crq(adapter, &crq); 4052 4053 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 4054 atomic_inc(&adapter->running_cap_crqs); 4055 ibmvnic_send_crq(adapter, &crq); 4056 4057 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 4058 atomic_inc(&adapter->running_cap_crqs); 4059 ibmvnic_send_crq(adapter, &crq); 4060 4061 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 4062 atomic_inc(&adapter->running_cap_crqs); 4063 ibmvnic_send_crq(adapter, &crq); 4064 4065 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 4066 atomic_inc(&adapter->running_cap_crqs); 4067 ibmvnic_send_crq(adapter, &crq); 4068 4069 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 4070 atomic_inc(&adapter->running_cap_crqs); 4071 ibmvnic_send_crq(adapter, &crq); 4072 4073 crq.query_capability.capability = 4074 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 4075 atomic_inc(&adapter->running_cap_crqs); 4076 ibmvnic_send_crq(adapter, &crq); 4077 4078 crq.query_capability.capability = 4079 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 4080 atomic_inc(&adapter->running_cap_crqs); 4081 ibmvnic_send_crq(adapter, &crq); 4082 4083 crq.query_capability.capability = 4084 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 4085 atomic_inc(&adapter->running_cap_crqs); 4086 ibmvnic_send_crq(adapter, &crq); 4087 4088 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 4089 atomic_inc(&adapter->running_cap_crqs); 4090 ibmvnic_send_crq(adapter, &crq); 4091 } 4092 4093 static void send_query_ip_offload(struct ibmvnic_adapter *adapter) 4094 { 4095 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 4096 struct device *dev = &adapter->vdev->dev; 4097 union ibmvnic_crq crq; 4098 4099 adapter->ip_offload_tok = 4100 dma_map_single(dev, 4101 &adapter->ip_offload_buf, 4102 buf_sz, 4103 DMA_FROM_DEVICE); 4104 4105 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 4106 if (!firmware_has_feature(FW_FEATURE_CMO)) 4107 dev_err(dev, "Couldn't map offload buffer\n"); 4108 return; 4109 } 4110 4111 memset(&crq, 0, sizeof(crq)); 4112 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 4113 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 4114 crq.query_ip_offload.len = cpu_to_be32(buf_sz); 4115 crq.query_ip_offload.ioba = 4116 cpu_to_be32(adapter->ip_offload_tok); 4117 4118 ibmvnic_send_crq(adapter, &crq); 4119 } 4120 4121 static void send_control_ip_offload(struct ibmvnic_adapter *adapter) 4122 { 4123 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; 4124 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 4125 struct device *dev = &adapter->vdev->dev; 4126 netdev_features_t old_hw_features = 0; 4127 union ibmvnic_crq crq; 4128 4129 adapter->ip_offload_ctrl_tok = 4130 dma_map_single(dev, 4131 ctrl_buf, 4132 sizeof(adapter->ip_offload_ctrl), 4133 DMA_TO_DEVICE); 4134 4135 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 4136 dev_err(dev, "Couldn't map ip offload control buffer\n"); 4137 return; 4138 } 4139 4140 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 4141 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); 4142 ctrl_buf->ipv4_chksum = buf->ipv4_chksum; 4143 ctrl_buf->ipv6_chksum = buf->ipv6_chksum; 4144 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 4145 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; 4146 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 4147 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; 4148 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; 4149 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; 4150 4151 /* large_rx disabled for now, additional features needed */ 4152 ctrl_buf->large_rx_ipv4 = 0; 4153 ctrl_buf->large_rx_ipv6 = 0; 4154 4155 if (adapter->state != VNIC_PROBING) { 4156 old_hw_features = adapter->netdev->hw_features; 4157 adapter->netdev->hw_features = 0; 4158 } 4159 4160 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; 4161 4162 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 4163 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; 4164 4165 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 4166 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; 4167 4168 if ((adapter->netdev->features & 4169 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 4170 adapter->netdev->hw_features |= NETIF_F_RXCSUM; 4171 4172 if (buf->large_tx_ipv4) 4173 adapter->netdev->hw_features |= NETIF_F_TSO; 4174 if (buf->large_tx_ipv6) 4175 adapter->netdev->hw_features |= NETIF_F_TSO6; 4176 4177 if (adapter->state == VNIC_PROBING) { 4178 adapter->netdev->features |= adapter->netdev->hw_features; 4179 } else if (old_hw_features != adapter->netdev->hw_features) { 4180 netdev_features_t tmp = 0; 4181 4182 /* disable features no longer supported */ 4183 adapter->netdev->features &= adapter->netdev->hw_features; 4184 /* turn on features now supported if previously enabled */ 4185 tmp = (old_hw_features ^ adapter->netdev->hw_features) & 4186 adapter->netdev->hw_features; 4187 adapter->netdev->features |= 4188 tmp & adapter->netdev->wanted_features; 4189 } 4190 4191 memset(&crq, 0, sizeof(crq)); 4192 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 4193 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 4194 crq.control_ip_offload.len = 4195 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 4196 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 4197 ibmvnic_send_crq(adapter, &crq); 4198 } 4199 4200 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 4201 struct ibmvnic_adapter *adapter) 4202 { 4203 struct device *dev = &adapter->vdev->dev; 4204 4205 if (crq->get_vpd_size_rsp.rc.code) { 4206 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 4207 crq->get_vpd_size_rsp.rc.code); 4208 complete(&adapter->fw_done); 4209 return; 4210 } 4211 4212 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 4213 complete(&adapter->fw_done); 4214 } 4215 4216 static void handle_vpd_rsp(union ibmvnic_crq *crq, 4217 struct ibmvnic_adapter *adapter) 4218 { 4219 struct device *dev = &adapter->vdev->dev; 4220 unsigned char *substr = NULL; 4221 u8 fw_level_len = 0; 4222 4223 memset(adapter->fw_version, 0, 32); 4224 4225 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 4226 DMA_FROM_DEVICE); 4227 4228 if (crq->get_vpd_rsp.rc.code) { 4229 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 4230 crq->get_vpd_rsp.rc.code); 4231 goto complete; 4232 } 4233 4234 /* get the position of the firmware version info 4235 * located after the ASCII 'RM' substring in the buffer 4236 */ 4237 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 4238 if (!substr) { 4239 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 4240 goto complete; 4241 } 4242 4243 /* get length of firmware level ASCII substring */ 4244 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 4245 fw_level_len = *(substr + 2); 4246 } else { 4247 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 4248 goto complete; 4249 } 4250 4251 /* copy firmware version string from vpd into adapter */ 4252 if ((substr + 3 + fw_level_len) < 4253 (adapter->vpd->buff + adapter->vpd->len)) { 4254 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 4255 } else { 4256 dev_info(dev, "FW substr extrapolated VPD buff\n"); 4257 } 4258 4259 complete: 4260 if (adapter->fw_version[0] == '\0') 4261 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char)); 4262 complete(&adapter->fw_done); 4263 } 4264 4265 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 4266 { 4267 struct device *dev = &adapter->vdev->dev; 4268 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 4269 int i; 4270 4271 dma_unmap_single(dev, adapter->ip_offload_tok, 4272 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 4273 4274 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 4275 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 4276 netdev_dbg(adapter->netdev, "%016lx\n", 4277 ((unsigned long *)(buf))[i]); 4278 4279 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 4280 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 4281 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 4282 buf->tcp_ipv4_chksum); 4283 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 4284 buf->tcp_ipv6_chksum); 4285 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 4286 buf->udp_ipv4_chksum); 4287 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 4288 buf->udp_ipv6_chksum); 4289 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 4290 buf->large_tx_ipv4); 4291 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 4292 buf->large_tx_ipv6); 4293 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 4294 buf->large_rx_ipv4); 4295 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 4296 buf->large_rx_ipv6); 4297 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 4298 buf->max_ipv4_header_size); 4299 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 4300 buf->max_ipv6_header_size); 4301 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 4302 buf->max_tcp_header_size); 4303 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 4304 buf->max_udp_header_size); 4305 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 4306 buf->max_large_tx_size); 4307 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 4308 buf->max_large_rx_size); 4309 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 4310 buf->ipv6_extension_header); 4311 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 4312 buf->tcp_pseudosum_req); 4313 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 4314 buf->num_ipv6_ext_headers); 4315 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 4316 buf->off_ipv6_ext_headers); 4317 4318 send_control_ip_offload(adapter); 4319 } 4320 4321 static const char *ibmvnic_fw_err_cause(u16 cause) 4322 { 4323 switch (cause) { 4324 case ADAPTER_PROBLEM: 4325 return "adapter problem"; 4326 case BUS_PROBLEM: 4327 return "bus problem"; 4328 case FW_PROBLEM: 4329 return "firmware problem"; 4330 case DD_PROBLEM: 4331 return "device driver problem"; 4332 case EEH_RECOVERY: 4333 return "EEH recovery"; 4334 case FW_UPDATED: 4335 return "firmware updated"; 4336 case LOW_MEMORY: 4337 return "low Memory"; 4338 default: 4339 return "unknown"; 4340 } 4341 } 4342 4343 static void handle_error_indication(union ibmvnic_crq *crq, 4344 struct ibmvnic_adapter *adapter) 4345 { 4346 struct device *dev = &adapter->vdev->dev; 4347 u16 cause; 4348 4349 cause = be16_to_cpu(crq->error_indication.error_cause); 4350 4351 dev_warn_ratelimited(dev, 4352 "Firmware reports %serror, cause: %s. Starting recovery...\n", 4353 crq->error_indication.flags 4354 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 4355 ibmvnic_fw_err_cause(cause)); 4356 4357 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 4358 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4359 else 4360 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 4361 } 4362 4363 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 4364 struct ibmvnic_adapter *adapter) 4365 { 4366 struct net_device *netdev = adapter->netdev; 4367 struct device *dev = &adapter->vdev->dev; 4368 long rc; 4369 4370 rc = crq->change_mac_addr_rsp.rc.code; 4371 if (rc) { 4372 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 4373 goto out; 4374 } 4375 /* crq->change_mac_addr.mac_addr is the requested one 4376 * crq->change_mac_addr_rsp.mac_addr is the returned valid one. 4377 */ 4378 ether_addr_copy(netdev->dev_addr, 4379 &crq->change_mac_addr_rsp.mac_addr[0]); 4380 ether_addr_copy(adapter->mac_addr, 4381 &crq->change_mac_addr_rsp.mac_addr[0]); 4382 out: 4383 complete(&adapter->fw_done); 4384 return rc; 4385 } 4386 4387 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 4388 struct ibmvnic_adapter *adapter) 4389 { 4390 struct device *dev = &adapter->vdev->dev; 4391 u64 *req_value; 4392 char *name; 4393 4394 atomic_dec(&adapter->running_cap_crqs); 4395 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 4396 case REQ_TX_QUEUES: 4397 req_value = &adapter->req_tx_queues; 4398 name = "tx"; 4399 break; 4400 case REQ_RX_QUEUES: 4401 req_value = &adapter->req_rx_queues; 4402 name = "rx"; 4403 break; 4404 case REQ_RX_ADD_QUEUES: 4405 req_value = &adapter->req_rx_add_queues; 4406 name = "rx_add"; 4407 break; 4408 case REQ_TX_ENTRIES_PER_SUBCRQ: 4409 req_value = &adapter->req_tx_entries_per_subcrq; 4410 name = "tx_entries_per_subcrq"; 4411 break; 4412 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 4413 req_value = &adapter->req_rx_add_entries_per_subcrq; 4414 name = "rx_add_entries_per_subcrq"; 4415 break; 4416 case REQ_MTU: 4417 req_value = &adapter->req_mtu; 4418 name = "mtu"; 4419 break; 4420 case PROMISC_REQUESTED: 4421 req_value = &adapter->promisc; 4422 name = "promisc"; 4423 break; 4424 default: 4425 dev_err(dev, "Got invalid cap request rsp %d\n", 4426 crq->request_capability.capability); 4427 return; 4428 } 4429 4430 switch (crq->request_capability_rsp.rc.code) { 4431 case SUCCESS: 4432 break; 4433 case PARTIALSUCCESS: 4434 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 4435 *req_value, 4436 (long)be64_to_cpu(crq->request_capability_rsp.number), 4437 name); 4438 4439 if (be16_to_cpu(crq->request_capability_rsp.capability) == 4440 REQ_MTU) { 4441 pr_err("mtu of %llu is not supported. Reverting.\n", 4442 *req_value); 4443 *req_value = adapter->fallback.mtu; 4444 } else { 4445 *req_value = 4446 be64_to_cpu(crq->request_capability_rsp.number); 4447 } 4448 4449 send_request_cap(adapter, 1); 4450 return; 4451 default: 4452 dev_err(dev, "Error %d in request cap rsp\n", 4453 crq->request_capability_rsp.rc.code); 4454 return; 4455 } 4456 4457 /* Done receiving requested capabilities, query IP offload support */ 4458 if (atomic_read(&adapter->running_cap_crqs) == 0) { 4459 adapter->wait_capability = false; 4460 send_query_ip_offload(adapter); 4461 } 4462 } 4463 4464 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 4465 struct ibmvnic_adapter *adapter) 4466 { 4467 struct device *dev = &adapter->vdev->dev; 4468 struct net_device *netdev = adapter->netdev; 4469 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 4470 struct ibmvnic_login_buffer *login = adapter->login_buf; 4471 u64 *tx_handle_array; 4472 u64 *rx_handle_array; 4473 int num_tx_pools; 4474 int num_rx_pools; 4475 u64 *size_array; 4476 int i; 4477 4478 /* CHECK: Test/set of login_pending does not need to be atomic 4479 * because only ibmvnic_tasklet tests/clears this. 4480 */ 4481 if (!adapter->login_pending) { 4482 netdev_warn(netdev, "Ignoring unexpected login response\n"); 4483 return 0; 4484 } 4485 adapter->login_pending = false; 4486 4487 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 4488 DMA_TO_DEVICE); 4489 dma_unmap_single(dev, adapter->login_rsp_buf_token, 4490 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 4491 4492 /* If the number of queues requested can't be allocated by the 4493 * server, the login response will return with code 1. We will need 4494 * to resend the login buffer with fewer queues requested. 4495 */ 4496 if (login_rsp_crq->generic.rc.code) { 4497 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 4498 complete(&adapter->init_done); 4499 return 0; 4500 } 4501 4502 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4503 4504 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 4505 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 4506 netdev_dbg(adapter->netdev, "%016lx\n", 4507 ((unsigned long *)(adapter->login_rsp_buf))[i]); 4508 } 4509 4510 /* Sanity checks */ 4511 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 4512 (be32_to_cpu(login->num_rxcomp_subcrqs) * 4513 adapter->req_rx_add_queues != 4514 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 4515 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 4516 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4517 return -EIO; 4518 } 4519 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 4520 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 4521 /* variable buffer sizes are not supported, so just read the 4522 * first entry. 4523 */ 4524 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); 4525 4526 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 4527 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 4528 4529 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 4530 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 4531 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 4532 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); 4533 4534 for (i = 0; i < num_tx_pools; i++) 4535 adapter->tx_scrq[i]->handle = tx_handle_array[i]; 4536 4537 for (i = 0; i < num_rx_pools; i++) 4538 adapter->rx_scrq[i]->handle = rx_handle_array[i]; 4539 4540 adapter->num_active_tx_scrqs = num_tx_pools; 4541 adapter->num_active_rx_scrqs = num_rx_pools; 4542 release_login_rsp_buffer(adapter); 4543 release_login_buffer(adapter); 4544 complete(&adapter->init_done); 4545 4546 return 0; 4547 } 4548 4549 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 4550 struct ibmvnic_adapter *adapter) 4551 { 4552 struct device *dev = &adapter->vdev->dev; 4553 long rc; 4554 4555 rc = crq->request_unmap_rsp.rc.code; 4556 if (rc) 4557 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 4558 } 4559 4560 static void handle_query_map_rsp(union ibmvnic_crq *crq, 4561 struct ibmvnic_adapter *adapter) 4562 { 4563 struct net_device *netdev = adapter->netdev; 4564 struct device *dev = &adapter->vdev->dev; 4565 long rc; 4566 4567 rc = crq->query_map_rsp.rc.code; 4568 if (rc) { 4569 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 4570 return; 4571 } 4572 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", 4573 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, 4574 crq->query_map_rsp.free_pages); 4575 } 4576 4577 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 4578 struct ibmvnic_adapter *adapter) 4579 { 4580 struct net_device *netdev = adapter->netdev; 4581 struct device *dev = &adapter->vdev->dev; 4582 long rc; 4583 4584 atomic_dec(&adapter->running_cap_crqs); 4585 netdev_dbg(netdev, "Outstanding queries: %d\n", 4586 atomic_read(&adapter->running_cap_crqs)); 4587 rc = crq->query_capability.rc.code; 4588 if (rc) { 4589 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 4590 goto out; 4591 } 4592 4593 switch (be16_to_cpu(crq->query_capability.capability)) { 4594 case MIN_TX_QUEUES: 4595 adapter->min_tx_queues = 4596 be64_to_cpu(crq->query_capability.number); 4597 netdev_dbg(netdev, "min_tx_queues = %lld\n", 4598 adapter->min_tx_queues); 4599 break; 4600 case MIN_RX_QUEUES: 4601 adapter->min_rx_queues = 4602 be64_to_cpu(crq->query_capability.number); 4603 netdev_dbg(netdev, "min_rx_queues = %lld\n", 4604 adapter->min_rx_queues); 4605 break; 4606 case MIN_RX_ADD_QUEUES: 4607 adapter->min_rx_add_queues = 4608 be64_to_cpu(crq->query_capability.number); 4609 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 4610 adapter->min_rx_add_queues); 4611 break; 4612 case MAX_TX_QUEUES: 4613 adapter->max_tx_queues = 4614 be64_to_cpu(crq->query_capability.number); 4615 netdev_dbg(netdev, "max_tx_queues = %lld\n", 4616 adapter->max_tx_queues); 4617 break; 4618 case MAX_RX_QUEUES: 4619 adapter->max_rx_queues = 4620 be64_to_cpu(crq->query_capability.number); 4621 netdev_dbg(netdev, "max_rx_queues = %lld\n", 4622 adapter->max_rx_queues); 4623 break; 4624 case MAX_RX_ADD_QUEUES: 4625 adapter->max_rx_add_queues = 4626 be64_to_cpu(crq->query_capability.number); 4627 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 4628 adapter->max_rx_add_queues); 4629 break; 4630 case MIN_TX_ENTRIES_PER_SUBCRQ: 4631 adapter->min_tx_entries_per_subcrq = 4632 be64_to_cpu(crq->query_capability.number); 4633 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 4634 adapter->min_tx_entries_per_subcrq); 4635 break; 4636 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 4637 adapter->min_rx_add_entries_per_subcrq = 4638 be64_to_cpu(crq->query_capability.number); 4639 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 4640 adapter->min_rx_add_entries_per_subcrq); 4641 break; 4642 case MAX_TX_ENTRIES_PER_SUBCRQ: 4643 adapter->max_tx_entries_per_subcrq = 4644 be64_to_cpu(crq->query_capability.number); 4645 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 4646 adapter->max_tx_entries_per_subcrq); 4647 break; 4648 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 4649 adapter->max_rx_add_entries_per_subcrq = 4650 be64_to_cpu(crq->query_capability.number); 4651 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 4652 adapter->max_rx_add_entries_per_subcrq); 4653 break; 4654 case TCP_IP_OFFLOAD: 4655 adapter->tcp_ip_offload = 4656 be64_to_cpu(crq->query_capability.number); 4657 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 4658 adapter->tcp_ip_offload); 4659 break; 4660 case PROMISC_SUPPORTED: 4661 adapter->promisc_supported = 4662 be64_to_cpu(crq->query_capability.number); 4663 netdev_dbg(netdev, "promisc_supported = %lld\n", 4664 adapter->promisc_supported); 4665 break; 4666 case MIN_MTU: 4667 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 4668 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4669 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 4670 break; 4671 case MAX_MTU: 4672 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 4673 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4674 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 4675 break; 4676 case MAX_MULTICAST_FILTERS: 4677 adapter->max_multicast_filters = 4678 be64_to_cpu(crq->query_capability.number); 4679 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 4680 adapter->max_multicast_filters); 4681 break; 4682 case VLAN_HEADER_INSERTION: 4683 adapter->vlan_header_insertion = 4684 be64_to_cpu(crq->query_capability.number); 4685 if (adapter->vlan_header_insertion) 4686 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 4687 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 4688 adapter->vlan_header_insertion); 4689 break; 4690 case RX_VLAN_HEADER_INSERTION: 4691 adapter->rx_vlan_header_insertion = 4692 be64_to_cpu(crq->query_capability.number); 4693 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 4694 adapter->rx_vlan_header_insertion); 4695 break; 4696 case MAX_TX_SG_ENTRIES: 4697 adapter->max_tx_sg_entries = 4698 be64_to_cpu(crq->query_capability.number); 4699 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 4700 adapter->max_tx_sg_entries); 4701 break; 4702 case RX_SG_SUPPORTED: 4703 adapter->rx_sg_supported = 4704 be64_to_cpu(crq->query_capability.number); 4705 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 4706 adapter->rx_sg_supported); 4707 break; 4708 case OPT_TX_COMP_SUB_QUEUES: 4709 adapter->opt_tx_comp_sub_queues = 4710 be64_to_cpu(crq->query_capability.number); 4711 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 4712 adapter->opt_tx_comp_sub_queues); 4713 break; 4714 case OPT_RX_COMP_QUEUES: 4715 adapter->opt_rx_comp_queues = 4716 be64_to_cpu(crq->query_capability.number); 4717 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 4718 adapter->opt_rx_comp_queues); 4719 break; 4720 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 4721 adapter->opt_rx_bufadd_q_per_rx_comp_q = 4722 be64_to_cpu(crq->query_capability.number); 4723 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 4724 adapter->opt_rx_bufadd_q_per_rx_comp_q); 4725 break; 4726 case OPT_TX_ENTRIES_PER_SUBCRQ: 4727 adapter->opt_tx_entries_per_subcrq = 4728 be64_to_cpu(crq->query_capability.number); 4729 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 4730 adapter->opt_tx_entries_per_subcrq); 4731 break; 4732 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 4733 adapter->opt_rxba_entries_per_subcrq = 4734 be64_to_cpu(crq->query_capability.number); 4735 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 4736 adapter->opt_rxba_entries_per_subcrq); 4737 break; 4738 case TX_RX_DESC_REQ: 4739 adapter->tx_rx_desc_req = crq->query_capability.number; 4740 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 4741 adapter->tx_rx_desc_req); 4742 break; 4743 4744 default: 4745 netdev_err(netdev, "Got invalid cap rsp %d\n", 4746 crq->query_capability.capability); 4747 } 4748 4749 out: 4750 if (atomic_read(&adapter->running_cap_crqs) == 0) { 4751 adapter->wait_capability = false; 4752 send_request_cap(adapter, 0); 4753 } 4754 } 4755 4756 static int send_query_phys_parms(struct ibmvnic_adapter *adapter) 4757 { 4758 union ibmvnic_crq crq; 4759 int rc; 4760 4761 memset(&crq, 0, sizeof(crq)); 4762 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; 4763 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; 4764 4765 mutex_lock(&adapter->fw_lock); 4766 adapter->fw_done_rc = 0; 4767 reinit_completion(&adapter->fw_done); 4768 4769 rc = ibmvnic_send_crq(adapter, &crq); 4770 if (rc) { 4771 mutex_unlock(&adapter->fw_lock); 4772 return rc; 4773 } 4774 4775 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 4776 if (rc) { 4777 mutex_unlock(&adapter->fw_lock); 4778 return rc; 4779 } 4780 4781 mutex_unlock(&adapter->fw_lock); 4782 return adapter->fw_done_rc ? -EIO : 0; 4783 } 4784 4785 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, 4786 struct ibmvnic_adapter *adapter) 4787 { 4788 struct net_device *netdev = adapter->netdev; 4789 int rc; 4790 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); 4791 4792 rc = crq->query_phys_parms_rsp.rc.code; 4793 if (rc) { 4794 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); 4795 return rc; 4796 } 4797 switch (rspeed) { 4798 case IBMVNIC_10MBPS: 4799 adapter->speed = SPEED_10; 4800 break; 4801 case IBMVNIC_100MBPS: 4802 adapter->speed = SPEED_100; 4803 break; 4804 case IBMVNIC_1GBPS: 4805 adapter->speed = SPEED_1000; 4806 break; 4807 case IBMVNIC_10GBPS: 4808 adapter->speed = SPEED_10000; 4809 break; 4810 case IBMVNIC_25GBPS: 4811 adapter->speed = SPEED_25000; 4812 break; 4813 case IBMVNIC_40GBPS: 4814 adapter->speed = SPEED_40000; 4815 break; 4816 case IBMVNIC_50GBPS: 4817 adapter->speed = SPEED_50000; 4818 break; 4819 case IBMVNIC_100GBPS: 4820 adapter->speed = SPEED_100000; 4821 break; 4822 case IBMVNIC_200GBPS: 4823 adapter->speed = SPEED_200000; 4824 break; 4825 default: 4826 if (netif_carrier_ok(netdev)) 4827 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); 4828 adapter->speed = SPEED_UNKNOWN; 4829 } 4830 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) 4831 adapter->duplex = DUPLEX_FULL; 4832 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) 4833 adapter->duplex = DUPLEX_HALF; 4834 else 4835 adapter->duplex = DUPLEX_UNKNOWN; 4836 4837 return rc; 4838 } 4839 4840 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 4841 struct ibmvnic_adapter *adapter) 4842 { 4843 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 4844 struct net_device *netdev = adapter->netdev; 4845 struct device *dev = &adapter->vdev->dev; 4846 u64 *u64_crq = (u64 *)crq; 4847 long rc; 4848 4849 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 4850 (unsigned long)cpu_to_be64(u64_crq[0]), 4851 (unsigned long)cpu_to_be64(u64_crq[1])); 4852 switch (gen_crq->first) { 4853 case IBMVNIC_CRQ_INIT_RSP: 4854 switch (gen_crq->cmd) { 4855 case IBMVNIC_CRQ_INIT: 4856 dev_info(dev, "Partner initialized\n"); 4857 adapter->from_passive_init = true; 4858 /* Discard any stale login responses from prev reset. 4859 * CHECK: should we clear even on INIT_COMPLETE? 4860 */ 4861 adapter->login_pending = false; 4862 4863 if (!completion_done(&adapter->init_done)) { 4864 complete(&adapter->init_done); 4865 adapter->init_done_rc = -EIO; 4866 } 4867 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 4868 if (rc && rc != -EBUSY) { 4869 /* We were unable to schedule the failover 4870 * reset either because the adapter was still 4871 * probing (eg: during kexec) or we could not 4872 * allocate memory. Clear the failover_pending 4873 * flag since no one else will. We ignore 4874 * EBUSY because it means either FAILOVER reset 4875 * is already scheduled or the adapter is 4876 * being removed. 4877 */ 4878 netdev_err(netdev, 4879 "Error %ld scheduling failover reset\n", 4880 rc); 4881 adapter->failover_pending = false; 4882 } 4883 break; 4884 case IBMVNIC_CRQ_INIT_COMPLETE: 4885 dev_info(dev, "Partner initialization complete\n"); 4886 adapter->crq.active = true; 4887 send_version_xchg(adapter); 4888 break; 4889 default: 4890 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 4891 } 4892 return; 4893 case IBMVNIC_CRQ_XPORT_EVENT: 4894 netif_carrier_off(netdev); 4895 adapter->crq.active = false; 4896 /* terminate any thread waiting for a response 4897 * from the device 4898 */ 4899 if (!completion_done(&adapter->fw_done)) { 4900 adapter->fw_done_rc = -EIO; 4901 complete(&adapter->fw_done); 4902 } 4903 if (!completion_done(&adapter->stats_done)) 4904 complete(&adapter->stats_done); 4905 if (test_bit(0, &adapter->resetting)) 4906 adapter->force_reset_recovery = true; 4907 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 4908 dev_info(dev, "Migrated, re-enabling adapter\n"); 4909 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 4910 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 4911 dev_info(dev, "Backing device failover detected\n"); 4912 adapter->failover_pending = true; 4913 } else { 4914 /* The adapter lost the connection */ 4915 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 4916 gen_crq->cmd); 4917 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4918 } 4919 return; 4920 case IBMVNIC_CRQ_CMD_RSP: 4921 break; 4922 default: 4923 dev_err(dev, "Got an invalid msg type 0x%02x\n", 4924 gen_crq->first); 4925 return; 4926 } 4927 4928 switch (gen_crq->cmd) { 4929 case VERSION_EXCHANGE_RSP: 4930 rc = crq->version_exchange_rsp.rc.code; 4931 if (rc) { 4932 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 4933 break; 4934 } 4935 ibmvnic_version = 4936 be16_to_cpu(crq->version_exchange_rsp.version); 4937 dev_info(dev, "Partner protocol version is %d\n", 4938 ibmvnic_version); 4939 send_query_cap(adapter); 4940 break; 4941 case QUERY_CAPABILITY_RSP: 4942 handle_query_cap_rsp(crq, adapter); 4943 break; 4944 case QUERY_MAP_RSP: 4945 handle_query_map_rsp(crq, adapter); 4946 break; 4947 case REQUEST_MAP_RSP: 4948 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 4949 complete(&adapter->fw_done); 4950 break; 4951 case REQUEST_UNMAP_RSP: 4952 handle_request_unmap_rsp(crq, adapter); 4953 break; 4954 case REQUEST_CAPABILITY_RSP: 4955 handle_request_cap_rsp(crq, adapter); 4956 break; 4957 case LOGIN_RSP: 4958 netdev_dbg(netdev, "Got Login Response\n"); 4959 handle_login_rsp(crq, adapter); 4960 break; 4961 case LOGICAL_LINK_STATE_RSP: 4962 netdev_dbg(netdev, 4963 "Got Logical Link State Response, state: %d rc: %d\n", 4964 crq->logical_link_state_rsp.link_state, 4965 crq->logical_link_state_rsp.rc.code); 4966 adapter->logical_link_state = 4967 crq->logical_link_state_rsp.link_state; 4968 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 4969 complete(&adapter->init_done); 4970 break; 4971 case LINK_STATE_INDICATION: 4972 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 4973 adapter->phys_link_state = 4974 crq->link_state_indication.phys_link_state; 4975 adapter->logical_link_state = 4976 crq->link_state_indication.logical_link_state; 4977 if (adapter->phys_link_state && adapter->logical_link_state) 4978 netif_carrier_on(netdev); 4979 else 4980 netif_carrier_off(netdev); 4981 break; 4982 case CHANGE_MAC_ADDR_RSP: 4983 netdev_dbg(netdev, "Got MAC address change Response\n"); 4984 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 4985 break; 4986 case ERROR_INDICATION: 4987 netdev_dbg(netdev, "Got Error Indication\n"); 4988 handle_error_indication(crq, adapter); 4989 break; 4990 case REQUEST_STATISTICS_RSP: 4991 netdev_dbg(netdev, "Got Statistics Response\n"); 4992 complete(&adapter->stats_done); 4993 break; 4994 case QUERY_IP_OFFLOAD_RSP: 4995 netdev_dbg(netdev, "Got Query IP offload Response\n"); 4996 handle_query_ip_offload_rsp(adapter); 4997 break; 4998 case MULTICAST_CTRL_RSP: 4999 netdev_dbg(netdev, "Got multicast control Response\n"); 5000 break; 5001 case CONTROL_IP_OFFLOAD_RSP: 5002 netdev_dbg(netdev, "Got Control IP offload Response\n"); 5003 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 5004 sizeof(adapter->ip_offload_ctrl), 5005 DMA_TO_DEVICE); 5006 complete(&adapter->init_done); 5007 break; 5008 case COLLECT_FW_TRACE_RSP: 5009 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 5010 complete(&adapter->fw_done); 5011 break; 5012 case GET_VPD_SIZE_RSP: 5013 handle_vpd_size_rsp(crq, adapter); 5014 break; 5015 case GET_VPD_RSP: 5016 handle_vpd_rsp(crq, adapter); 5017 break; 5018 case QUERY_PHYS_PARMS_RSP: 5019 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); 5020 complete(&adapter->fw_done); 5021 break; 5022 default: 5023 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 5024 gen_crq->cmd); 5025 } 5026 } 5027 5028 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 5029 { 5030 struct ibmvnic_adapter *adapter = instance; 5031 5032 tasklet_schedule(&adapter->tasklet); 5033 return IRQ_HANDLED; 5034 } 5035 5036 static void ibmvnic_tasklet(struct tasklet_struct *t) 5037 { 5038 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); 5039 struct ibmvnic_crq_queue *queue = &adapter->crq; 5040 union ibmvnic_crq *crq; 5041 unsigned long flags; 5042 bool done = false; 5043 5044 spin_lock_irqsave(&queue->lock, flags); 5045 while (!done) { 5046 /* Pull all the valid messages off the CRQ */ 5047 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 5048 /* This barrier makes sure ibmvnic_next_crq()'s 5049 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded 5050 * before ibmvnic_handle_crq()'s 5051 * switch(gen_crq->first) and switch(gen_crq->cmd). 5052 */ 5053 dma_rmb(); 5054 ibmvnic_handle_crq(crq, adapter); 5055 crq->generic.first = 0; 5056 } 5057 5058 /* remain in tasklet until all 5059 * capabilities responses are received 5060 */ 5061 if (!adapter->wait_capability) 5062 done = true; 5063 } 5064 /* if capabilities CRQ's were sent in this tasklet, the following 5065 * tasklet must wait until all responses are received 5066 */ 5067 if (atomic_read(&adapter->running_cap_crqs) != 0) 5068 adapter->wait_capability = true; 5069 spin_unlock_irqrestore(&queue->lock, flags); 5070 } 5071 5072 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 5073 { 5074 struct vio_dev *vdev = adapter->vdev; 5075 int rc; 5076 5077 do { 5078 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 5079 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5080 5081 if (rc) 5082 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 5083 5084 return rc; 5085 } 5086 5087 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 5088 { 5089 struct ibmvnic_crq_queue *crq = &adapter->crq; 5090 struct device *dev = &adapter->vdev->dev; 5091 struct vio_dev *vdev = adapter->vdev; 5092 int rc; 5093 5094 /* Close the CRQ */ 5095 do { 5096 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5097 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5098 5099 /* Clean out the queue */ 5100 if (!crq->msgs) 5101 return -EINVAL; 5102 5103 memset(crq->msgs, 0, PAGE_SIZE); 5104 crq->cur = 0; 5105 crq->active = false; 5106 5107 /* And re-open it again */ 5108 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 5109 crq->msg_token, PAGE_SIZE); 5110 5111 if (rc == H_CLOSED) 5112 /* Adapter is good, but other end is not ready */ 5113 dev_warn(dev, "Partner adapter not ready\n"); 5114 else if (rc != 0) 5115 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 5116 5117 return rc; 5118 } 5119 5120 static void release_crq_queue(struct ibmvnic_adapter *adapter) 5121 { 5122 struct ibmvnic_crq_queue *crq = &adapter->crq; 5123 struct vio_dev *vdev = adapter->vdev; 5124 long rc; 5125 5126 if (!crq->msgs) 5127 return; 5128 5129 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 5130 free_irq(vdev->irq, adapter); 5131 tasklet_kill(&adapter->tasklet); 5132 do { 5133 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5134 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5135 5136 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 5137 DMA_BIDIRECTIONAL); 5138 free_page((unsigned long)crq->msgs); 5139 crq->msgs = NULL; 5140 crq->active = false; 5141 } 5142 5143 static int init_crq_queue(struct ibmvnic_adapter *adapter) 5144 { 5145 struct ibmvnic_crq_queue *crq = &adapter->crq; 5146 struct device *dev = &adapter->vdev->dev; 5147 struct vio_dev *vdev = adapter->vdev; 5148 int rc, retrc = -ENOMEM; 5149 5150 if (crq->msgs) 5151 return 0; 5152 5153 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 5154 /* Should we allocate more than one page? */ 5155 5156 if (!crq->msgs) 5157 return -ENOMEM; 5158 5159 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 5160 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 5161 DMA_BIDIRECTIONAL); 5162 if (dma_mapping_error(dev, crq->msg_token)) 5163 goto map_failed; 5164 5165 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 5166 crq->msg_token, PAGE_SIZE); 5167 5168 if (rc == H_RESOURCE) 5169 /* maybe kexecing and resource is busy. try a reset */ 5170 rc = ibmvnic_reset_crq(adapter); 5171 retrc = rc; 5172 5173 if (rc == H_CLOSED) { 5174 dev_warn(dev, "Partner adapter not ready\n"); 5175 } else if (rc) { 5176 dev_warn(dev, "Error %d opening adapter\n", rc); 5177 goto reg_crq_failed; 5178 } 5179 5180 retrc = 0; 5181 5182 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); 5183 5184 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 5185 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", 5186 adapter->vdev->unit_address); 5187 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); 5188 if (rc) { 5189 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 5190 vdev->irq, rc); 5191 goto req_irq_failed; 5192 } 5193 5194 rc = vio_enable_interrupts(vdev); 5195 if (rc) { 5196 dev_err(dev, "Error %d enabling interrupts\n", rc); 5197 goto req_irq_failed; 5198 } 5199 5200 crq->cur = 0; 5201 spin_lock_init(&crq->lock); 5202 5203 return retrc; 5204 5205 req_irq_failed: 5206 tasklet_kill(&adapter->tasklet); 5207 do { 5208 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 5209 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 5210 reg_crq_failed: 5211 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 5212 map_failed: 5213 free_page((unsigned long)crq->msgs); 5214 crq->msgs = NULL; 5215 return retrc; 5216 } 5217 5218 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) 5219 { 5220 struct device *dev = &adapter->vdev->dev; 5221 unsigned long timeout = msecs_to_jiffies(20000); 5222 u64 old_num_rx_queues, old_num_tx_queues; 5223 int rc; 5224 5225 adapter->from_passive_init = false; 5226 5227 if (reset) { 5228 old_num_rx_queues = adapter->req_rx_queues; 5229 old_num_tx_queues = adapter->req_tx_queues; 5230 reinit_completion(&adapter->init_done); 5231 } 5232 5233 adapter->init_done_rc = 0; 5234 rc = ibmvnic_send_crq_init(adapter); 5235 if (rc) { 5236 dev_err(dev, "Send crq init failed with error %d\n", rc); 5237 return rc; 5238 } 5239 5240 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 5241 dev_err(dev, "Initialization sequence timed out\n"); 5242 return -1; 5243 } 5244 5245 if (adapter->init_done_rc) { 5246 release_crq_queue(adapter); 5247 return adapter->init_done_rc; 5248 } 5249 5250 if (adapter->from_passive_init) { 5251 adapter->state = VNIC_OPEN; 5252 adapter->from_passive_init = false; 5253 return -1; 5254 } 5255 5256 if (reset && 5257 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && 5258 adapter->reset_reason != VNIC_RESET_MOBILITY) { 5259 if (adapter->req_rx_queues != old_num_rx_queues || 5260 adapter->req_tx_queues != old_num_tx_queues) { 5261 release_sub_crqs(adapter, 0); 5262 rc = init_sub_crqs(adapter); 5263 } else { 5264 rc = reset_sub_crq_queues(adapter); 5265 } 5266 } else { 5267 rc = init_sub_crqs(adapter); 5268 } 5269 5270 if (rc) { 5271 dev_err(dev, "Initialization of sub crqs failed\n"); 5272 release_crq_queue(adapter); 5273 return rc; 5274 } 5275 5276 rc = init_sub_crq_irqs(adapter); 5277 if (rc) { 5278 dev_err(dev, "Failed to initialize sub crq irqs\n"); 5279 release_crq_queue(adapter); 5280 } 5281 5282 return rc; 5283 } 5284 5285 static struct device_attribute dev_attr_failover; 5286 5287 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 5288 { 5289 struct ibmvnic_adapter *adapter; 5290 struct net_device *netdev; 5291 unsigned char *mac_addr_p; 5292 int rc; 5293 5294 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 5295 dev->unit_address); 5296 5297 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 5298 VETH_MAC_ADDR, NULL); 5299 if (!mac_addr_p) { 5300 dev_err(&dev->dev, 5301 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 5302 __FILE__, __LINE__); 5303 return 0; 5304 } 5305 5306 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 5307 IBMVNIC_MAX_QUEUES); 5308 if (!netdev) 5309 return -ENOMEM; 5310 5311 adapter = netdev_priv(netdev); 5312 adapter->state = VNIC_PROBING; 5313 dev_set_drvdata(&dev->dev, netdev); 5314 adapter->vdev = dev; 5315 adapter->netdev = netdev; 5316 adapter->login_pending = false; 5317 5318 ether_addr_copy(adapter->mac_addr, mac_addr_p); 5319 ether_addr_copy(netdev->dev_addr, adapter->mac_addr); 5320 netdev->irq = dev->irq; 5321 netdev->netdev_ops = &ibmvnic_netdev_ops; 5322 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 5323 SET_NETDEV_DEV(netdev, &dev->dev); 5324 5325 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 5326 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, 5327 __ibmvnic_delayed_reset); 5328 INIT_LIST_HEAD(&adapter->rwi_list); 5329 spin_lock_init(&adapter->rwi_lock); 5330 spin_lock_init(&adapter->state_lock); 5331 mutex_init(&adapter->fw_lock); 5332 init_completion(&adapter->init_done); 5333 init_completion(&adapter->fw_done); 5334 init_completion(&adapter->reset_done); 5335 init_completion(&adapter->stats_done); 5336 clear_bit(0, &adapter->resetting); 5337 5338 do { 5339 rc = init_crq_queue(adapter); 5340 if (rc) { 5341 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 5342 rc); 5343 goto ibmvnic_init_fail; 5344 } 5345 5346 rc = ibmvnic_reset_init(adapter, false); 5347 if (rc && rc != EAGAIN) 5348 goto ibmvnic_init_fail; 5349 } while (rc == EAGAIN); 5350 5351 rc = init_stats_buffers(adapter); 5352 if (rc) 5353 goto ibmvnic_init_fail; 5354 5355 rc = init_stats_token(adapter); 5356 if (rc) 5357 goto ibmvnic_stats_fail; 5358 5359 netdev->mtu = adapter->req_mtu - ETH_HLEN; 5360 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 5361 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 5362 5363 rc = device_create_file(&dev->dev, &dev_attr_failover); 5364 if (rc) 5365 goto ibmvnic_dev_file_err; 5366 5367 netif_carrier_off(netdev); 5368 rc = register_netdev(netdev); 5369 if (rc) { 5370 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 5371 goto ibmvnic_register_fail; 5372 } 5373 dev_info(&dev->dev, "ibmvnic registered\n"); 5374 5375 adapter->state = VNIC_PROBED; 5376 5377 adapter->wait_for_reset = false; 5378 adapter->last_reset_time = jiffies; 5379 return 0; 5380 5381 ibmvnic_register_fail: 5382 device_remove_file(&dev->dev, &dev_attr_failover); 5383 5384 ibmvnic_dev_file_err: 5385 release_stats_token(adapter); 5386 5387 ibmvnic_stats_fail: 5388 release_stats_buffers(adapter); 5389 5390 ibmvnic_init_fail: 5391 release_sub_crqs(adapter, 1); 5392 release_crq_queue(adapter); 5393 mutex_destroy(&adapter->fw_lock); 5394 free_netdev(netdev); 5395 5396 return rc; 5397 } 5398 5399 static int ibmvnic_remove(struct vio_dev *dev) 5400 { 5401 struct net_device *netdev = dev_get_drvdata(&dev->dev); 5402 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 5403 unsigned long flags; 5404 5405 spin_lock_irqsave(&adapter->state_lock, flags); 5406 5407 /* If ibmvnic_reset() is scheduling a reset, wait for it to 5408 * finish. Then, set the state to REMOVING to prevent it from 5409 * scheduling any more work and to have reset functions ignore 5410 * any resets that have already been scheduled. Drop the lock 5411 * after setting state, so __ibmvnic_reset() which is called 5412 * from the flush_work() below, can make progress. 5413 */ 5414 spin_lock_irqsave(&adapter->rwi_lock, flags); 5415 adapter->state = VNIC_REMOVING; 5416 spin_unlock_irqrestore(&adapter->rwi_lock, flags); 5417 5418 spin_unlock_irqrestore(&adapter->state_lock, flags); 5419 5420 flush_work(&adapter->ibmvnic_reset); 5421 flush_delayed_work(&adapter->ibmvnic_delayed_reset); 5422 5423 rtnl_lock(); 5424 unregister_netdevice(netdev); 5425 5426 release_resources(adapter); 5427 release_sub_crqs(adapter, 1); 5428 release_crq_queue(adapter); 5429 5430 release_stats_token(adapter); 5431 release_stats_buffers(adapter); 5432 5433 adapter->state = VNIC_REMOVED; 5434 5435 rtnl_unlock(); 5436 mutex_destroy(&adapter->fw_lock); 5437 device_remove_file(&dev->dev, &dev_attr_failover); 5438 free_netdev(netdev); 5439 dev_set_drvdata(&dev->dev, NULL); 5440 5441 return 0; 5442 } 5443 5444 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 5445 const char *buf, size_t count) 5446 { 5447 struct net_device *netdev = dev_get_drvdata(dev); 5448 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 5449 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 5450 __be64 session_token; 5451 long rc; 5452 5453 if (!sysfs_streq(buf, "1")) 5454 return -EINVAL; 5455 5456 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 5457 H_GET_SESSION_TOKEN, 0, 0, 0); 5458 if (rc) { 5459 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 5460 rc); 5461 return -EINVAL; 5462 } 5463 5464 session_token = (__be64)retbuf[0]; 5465 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 5466 be64_to_cpu(session_token)); 5467 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 5468 H_SESSION_ERR_DETECTED, session_token, 0, 0); 5469 if (rc) { 5470 netdev_err(netdev, "Client initiated failover failed, rc %ld\n", 5471 rc); 5472 return -EINVAL; 5473 } 5474 5475 return count; 5476 } 5477 5478 static DEVICE_ATTR_WO(failover); 5479 5480 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 5481 { 5482 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 5483 struct ibmvnic_adapter *adapter; 5484 struct iommu_table *tbl; 5485 unsigned long ret = 0; 5486 int i; 5487 5488 tbl = get_iommu_table_base(&vdev->dev); 5489 5490 /* netdev inits at probe time along with the structures we need below*/ 5491 if (!netdev) 5492 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 5493 5494 adapter = netdev_priv(netdev); 5495 5496 ret += PAGE_SIZE; /* the crq message queue */ 5497 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 5498 5499 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 5500 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 5501 5502 for (i = 0; i < adapter->num_active_rx_pools; i++) 5503 ret += adapter->rx_pool[i].size * 5504 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 5505 5506 return ret; 5507 } 5508 5509 static int ibmvnic_resume(struct device *dev) 5510 { 5511 struct net_device *netdev = dev_get_drvdata(dev); 5512 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 5513 5514 if (adapter->state != VNIC_OPEN) 5515 return 0; 5516 5517 tasklet_schedule(&adapter->tasklet); 5518 5519 return 0; 5520 } 5521 5522 static const struct vio_device_id ibmvnic_device_table[] = { 5523 {"network", "IBM,vnic"}, 5524 {"", "" } 5525 }; 5526 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 5527 5528 static const struct dev_pm_ops ibmvnic_pm_ops = { 5529 .resume = ibmvnic_resume 5530 }; 5531 5532 static struct vio_driver ibmvnic_driver = { 5533 .id_table = ibmvnic_device_table, 5534 .probe = ibmvnic_probe, 5535 .remove = ibmvnic_remove, 5536 .get_desired_dma = ibmvnic_get_desired_dma, 5537 .name = ibmvnic_driver_name, 5538 .pm = &ibmvnic_pm_ops, 5539 }; 5540 5541 /* module functions */ 5542 static int __init ibmvnic_module_init(void) 5543 { 5544 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 5545 IBMVNIC_DRIVER_VERSION); 5546 5547 return vio_register_driver(&ibmvnic_driver); 5548 } 5549 5550 static void __exit ibmvnic_module_exit(void) 5551 { 5552 vio_unregister_driver(&ibmvnic_driver); 5553 } 5554 5555 module_init(ibmvnic_module_init); 5556 module_exit(ibmvnic_module_exit); 5557