1 /**************************************************************************/ 2 /* */ 3 /* IBM System i and System p Virtual NIC Device Driver */ 4 /* Copyright (C) 2014 IBM Corp. */ 5 /* Santiago Leon (santi_leon@yahoo.com) */ 6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 7 /* John Allen (jallen@linux.vnet.ibm.com) */ 8 /* */ 9 /* This program is free software; you can redistribute it and/or modify */ 10 /* it under the terms of the GNU General Public License as published by */ 11 /* the Free Software Foundation; either version 2 of the License, or */ 12 /* (at your option) any later version. */ 13 /* */ 14 /* This program is distributed in the hope that it will be useful, */ 15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ 16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ 17 /* GNU General Public License for more details. */ 18 /* */ 19 /* You should have received a copy of the GNU General Public License */ 20 /* along with this program. */ 21 /* */ 22 /* This module contains the implementation of a virtual ethernet device */ 23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 24 /* option of the RS/6000 Platform Architecture to interface with virtual */ 25 /* ethernet NICs that are presented to the partition by the hypervisor. */ 26 /* */ 27 /* Messages are passed between the VNIC driver and the VNIC server using */ 28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 29 /* issue and receive commands that initiate communication with the server */ 30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 31 /* are used by the driver to notify the server that a packet is */ 32 /* ready for transmission or that a buffer has been added to receive a */ 33 /* packet. Subsequently, sCRQs are used by the server to notify the */ 34 /* driver that a packet transmission has been completed or that a packet */ 35 /* has been received and placed in a waiting buffer. */ 36 /* */ 37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 38 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 39 /* or receive has been completed, the VNIC driver is required to use */ 40 /* "long term mapping". This entails that large, continuous DMA mapped */ 41 /* buffers are allocated on driver initialization and these buffers are */ 42 /* then continuously reused to pass skbs to and from the VNIC server. */ 43 /* */ 44 /**************************************************************************/ 45 46 #include <linux/module.h> 47 #include <linux/moduleparam.h> 48 #include <linux/types.h> 49 #include <linux/errno.h> 50 #include <linux/completion.h> 51 #include <linux/ioport.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/kernel.h> 54 #include <linux/netdevice.h> 55 #include <linux/etherdevice.h> 56 #include <linux/skbuff.h> 57 #include <linux/init.h> 58 #include <linux/delay.h> 59 #include <linux/mm.h> 60 #include <linux/ethtool.h> 61 #include <linux/proc_fs.h> 62 #include <linux/if_arp.h> 63 #include <linux/in.h> 64 #include <linux/ip.h> 65 #include <linux/ipv6.h> 66 #include <linux/irq.h> 67 #include <linux/kthread.h> 68 #include <linux/seq_file.h> 69 #include <linux/interrupt.h> 70 #include <net/net_namespace.h> 71 #include <asm/hvcall.h> 72 #include <linux/atomic.h> 73 #include <asm/vio.h> 74 #include <asm/iommu.h> 75 #include <linux/uaccess.h> 76 #include <asm/firmware.h> 77 #include <linux/workqueue.h> 78 #include <linux/if_vlan.h> 79 #include <linux/utsname.h> 80 81 #include "ibmvnic.h" 82 83 static const char ibmvnic_driver_name[] = "ibmvnic"; 84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 85 86 MODULE_AUTHOR("Santiago Leon"); 87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 88 MODULE_LICENSE("GPL"); 89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 90 91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 92 static int ibmvnic_remove(struct vio_dev *); 93 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 99 union sub_crq *sub_crq); 100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 102 static int enable_scrq_irq(struct ibmvnic_adapter *, 103 struct ibmvnic_sub_crq_queue *); 104 static int disable_scrq_irq(struct ibmvnic_adapter *, 105 struct ibmvnic_sub_crq_queue *); 106 static int pending_scrq(struct ibmvnic_adapter *, 107 struct ibmvnic_sub_crq_queue *); 108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 109 struct ibmvnic_sub_crq_queue *); 110 static int ibmvnic_poll(struct napi_struct *napi, int data); 111 static void send_map_query(struct ibmvnic_adapter *adapter); 112 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); 113 static int send_request_unmap(struct ibmvnic_adapter *, u8); 114 static int send_login(struct ibmvnic_adapter *adapter); 115 static void send_cap_queries(struct ibmvnic_adapter *adapter); 116 static int init_sub_crqs(struct ibmvnic_adapter *); 117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 118 static int ibmvnic_init(struct ibmvnic_adapter *); 119 static int ibmvnic_reset_init(struct ibmvnic_adapter *); 120 static void release_crq_queue(struct ibmvnic_adapter *); 121 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); 122 static int init_crq_queue(struct ibmvnic_adapter *adapter); 123 124 struct ibmvnic_stat { 125 char name[ETH_GSTRING_LEN]; 126 int offset; 127 }; 128 129 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 130 offsetof(struct ibmvnic_statistics, stat)) 131 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) 132 133 static const struct ibmvnic_stat ibmvnic_stats[] = { 134 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 135 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 136 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 137 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 138 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 139 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 140 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 141 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 142 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 143 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 144 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 145 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 146 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 147 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 148 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 149 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 150 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 151 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 152 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 153 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 154 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 155 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 156 }; 157 158 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 159 unsigned long length, unsigned long *number, 160 unsigned long *irq) 161 { 162 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 163 long rc; 164 165 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 166 *number = retbuf[0]; 167 *irq = retbuf[1]; 168 169 return rc; 170 } 171 172 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 173 struct ibmvnic_long_term_buff *ltb, int size) 174 { 175 struct device *dev = &adapter->vdev->dev; 176 int rc; 177 178 ltb->size = size; 179 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, 180 GFP_KERNEL); 181 182 if (!ltb->buff) { 183 dev_err(dev, "Couldn't alloc long term buffer\n"); 184 return -ENOMEM; 185 } 186 ltb->map_id = adapter->map_id; 187 adapter->map_id++; 188 189 init_completion(&adapter->fw_done); 190 rc = send_request_map(adapter, ltb->addr, 191 ltb->size, ltb->map_id); 192 if (rc) { 193 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 194 return rc; 195 } 196 wait_for_completion(&adapter->fw_done); 197 198 if (adapter->fw_done_rc) { 199 dev_err(dev, "Couldn't map long term buffer,rc = %d\n", 200 adapter->fw_done_rc); 201 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 202 return -1; 203 } 204 return 0; 205 } 206 207 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 208 struct ibmvnic_long_term_buff *ltb) 209 { 210 struct device *dev = &adapter->vdev->dev; 211 212 if (!ltb->buff) 213 return; 214 215 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 216 adapter->reset_reason != VNIC_RESET_MOBILITY) 217 send_request_unmap(adapter, ltb->map_id); 218 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 219 } 220 221 static int reset_long_term_buff(struct ibmvnic_adapter *adapter, 222 struct ibmvnic_long_term_buff *ltb) 223 { 224 int rc; 225 226 memset(ltb->buff, 0, ltb->size); 227 228 init_completion(&adapter->fw_done); 229 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 230 if (rc) 231 return rc; 232 wait_for_completion(&adapter->fw_done); 233 234 if (adapter->fw_done_rc) { 235 dev_info(&adapter->vdev->dev, 236 "Reset failed, attempting to free and reallocate buffer\n"); 237 free_long_term_buff(adapter, ltb); 238 return alloc_long_term_buff(adapter, ltb, ltb->size); 239 } 240 return 0; 241 } 242 243 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 244 { 245 int i; 246 247 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 248 i++) 249 adapter->rx_pool[i].active = 0; 250 } 251 252 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 253 struct ibmvnic_rx_pool *pool) 254 { 255 int count = pool->size - atomic_read(&pool->available); 256 struct device *dev = &adapter->vdev->dev; 257 int buffers_added = 0; 258 unsigned long lpar_rc; 259 union sub_crq sub_crq; 260 struct sk_buff *skb; 261 unsigned int offset; 262 dma_addr_t dma_addr; 263 unsigned char *dst; 264 u64 *handle_array; 265 int shift = 0; 266 int index; 267 int i; 268 269 if (!pool->active) 270 return; 271 272 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 273 be32_to_cpu(adapter->login_rsp_buf-> 274 off_rxadd_subcrqs)); 275 276 for (i = 0; i < count; ++i) { 277 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 278 if (!skb) { 279 dev_err(dev, "Couldn't replenish rx buff\n"); 280 adapter->replenish_no_mem++; 281 break; 282 } 283 284 index = pool->free_map[pool->next_free]; 285 286 if (pool->rx_buff[index].skb) 287 dev_err(dev, "Inconsistent free_map!\n"); 288 289 /* Copy the skb to the long term mapped DMA buffer */ 290 offset = index * pool->buff_size; 291 dst = pool->long_term_buff.buff + offset; 292 memset(dst, 0, pool->buff_size); 293 dma_addr = pool->long_term_buff.addr + offset; 294 pool->rx_buff[index].data = dst; 295 296 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 297 pool->rx_buff[index].dma = dma_addr; 298 pool->rx_buff[index].skb = skb; 299 pool->rx_buff[index].pool_index = pool->index; 300 pool->rx_buff[index].size = pool->buff_size; 301 302 memset(&sub_crq, 0, sizeof(sub_crq)); 303 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD; 304 sub_crq.rx_add.correlator = 305 cpu_to_be64((u64)&pool->rx_buff[index]); 306 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr); 307 sub_crq.rx_add.map_id = pool->long_term_buff.map_id; 308 309 /* The length field of the sCRQ is defined to be 24 bits so the 310 * buffer size needs to be left shifted by a byte before it is 311 * converted to big endian to prevent the last byte from being 312 * truncated. 313 */ 314 #ifdef __LITTLE_ENDIAN__ 315 shift = 8; 316 #endif 317 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift); 318 319 lpar_rc = send_subcrq(adapter, handle_array[pool->index], 320 &sub_crq); 321 if (lpar_rc != H_SUCCESS) 322 goto failure; 323 324 buffers_added++; 325 adapter->replenish_add_buff_success++; 326 pool->next_free = (pool->next_free + 1) % pool->size; 327 } 328 atomic_add(buffers_added, &pool->available); 329 return; 330 331 failure: 332 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) 333 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); 334 pool->free_map[pool->next_free] = index; 335 pool->rx_buff[index].skb = NULL; 336 337 dev_kfree_skb_any(skb); 338 adapter->replenish_add_buff_failure++; 339 atomic_add(buffers_added, &pool->available); 340 341 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 342 /* Disable buffer pool replenishment and report carrier off if 343 * queue is closed or pending failover. 344 * Firmware guarantees that a signal will be sent to the 345 * driver, triggering a reset. 346 */ 347 deactivate_rx_pools(adapter); 348 netif_carrier_off(adapter->netdev); 349 } 350 } 351 352 static void replenish_pools(struct ibmvnic_adapter *adapter) 353 { 354 int i; 355 356 adapter->replenish_task_cycles++; 357 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 358 i++) { 359 if (adapter->rx_pool[i].active) 360 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 361 } 362 } 363 364 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 365 { 366 kfree(adapter->tx_stats_buffers); 367 kfree(adapter->rx_stats_buffers); 368 adapter->tx_stats_buffers = NULL; 369 adapter->rx_stats_buffers = NULL; 370 } 371 372 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 373 { 374 adapter->tx_stats_buffers = 375 kcalloc(IBMVNIC_MAX_QUEUES, 376 sizeof(struct ibmvnic_tx_queue_stats), 377 GFP_KERNEL); 378 if (!adapter->tx_stats_buffers) 379 return -ENOMEM; 380 381 adapter->rx_stats_buffers = 382 kcalloc(IBMVNIC_MAX_QUEUES, 383 sizeof(struct ibmvnic_rx_queue_stats), 384 GFP_KERNEL); 385 if (!adapter->rx_stats_buffers) 386 return -ENOMEM; 387 388 return 0; 389 } 390 391 static void release_stats_token(struct ibmvnic_adapter *adapter) 392 { 393 struct device *dev = &adapter->vdev->dev; 394 395 if (!adapter->stats_token) 396 return; 397 398 dma_unmap_single(dev, adapter->stats_token, 399 sizeof(struct ibmvnic_statistics), 400 DMA_FROM_DEVICE); 401 adapter->stats_token = 0; 402 } 403 404 static int init_stats_token(struct ibmvnic_adapter *adapter) 405 { 406 struct device *dev = &adapter->vdev->dev; 407 dma_addr_t stok; 408 409 stok = dma_map_single(dev, &adapter->stats, 410 sizeof(struct ibmvnic_statistics), 411 DMA_FROM_DEVICE); 412 if (dma_mapping_error(dev, stok)) { 413 dev_err(dev, "Couldn't map stats buffer\n"); 414 return -1; 415 } 416 417 adapter->stats_token = stok; 418 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 419 return 0; 420 } 421 422 static int reset_rx_pools(struct ibmvnic_adapter *adapter) 423 { 424 struct ibmvnic_rx_pool *rx_pool; 425 int rx_scrqs; 426 int i, j, rc; 427 u64 *size_array; 428 429 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 430 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 431 432 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 433 for (i = 0; i < rx_scrqs; i++) { 434 rx_pool = &adapter->rx_pool[i]; 435 436 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 437 438 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { 439 free_long_term_buff(adapter, &rx_pool->long_term_buff); 440 rx_pool->buff_size = be64_to_cpu(size_array[i]); 441 alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 442 rx_pool->size * 443 rx_pool->buff_size); 444 } else { 445 rc = reset_long_term_buff(adapter, 446 &rx_pool->long_term_buff); 447 } 448 449 if (rc) 450 return rc; 451 452 for (j = 0; j < rx_pool->size; j++) 453 rx_pool->free_map[j] = j; 454 455 memset(rx_pool->rx_buff, 0, 456 rx_pool->size * sizeof(struct ibmvnic_rx_buff)); 457 458 atomic_set(&rx_pool->available, 0); 459 rx_pool->next_alloc = 0; 460 rx_pool->next_free = 0; 461 rx_pool->active = 1; 462 } 463 464 return 0; 465 } 466 467 static void release_rx_pools(struct ibmvnic_adapter *adapter) 468 { 469 struct ibmvnic_rx_pool *rx_pool; 470 int i, j; 471 472 if (!adapter->rx_pool) 473 return; 474 475 for (i = 0; i < adapter->num_active_rx_pools; i++) { 476 rx_pool = &adapter->rx_pool[i]; 477 478 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 479 480 kfree(rx_pool->free_map); 481 free_long_term_buff(adapter, &rx_pool->long_term_buff); 482 483 if (!rx_pool->rx_buff) 484 continue; 485 486 for (j = 0; j < rx_pool->size; j++) { 487 if (rx_pool->rx_buff[j].skb) { 488 dev_kfree_skb_any(rx_pool->rx_buff[i].skb); 489 rx_pool->rx_buff[i].skb = NULL; 490 } 491 } 492 493 kfree(rx_pool->rx_buff); 494 } 495 496 kfree(adapter->rx_pool); 497 adapter->rx_pool = NULL; 498 adapter->num_active_rx_pools = 0; 499 } 500 501 static int init_rx_pools(struct net_device *netdev) 502 { 503 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 504 struct device *dev = &adapter->vdev->dev; 505 struct ibmvnic_rx_pool *rx_pool; 506 int rxadd_subcrqs; 507 u64 *size_array; 508 int i, j; 509 510 rxadd_subcrqs = 511 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 512 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 513 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 514 515 adapter->rx_pool = kcalloc(rxadd_subcrqs, 516 sizeof(struct ibmvnic_rx_pool), 517 GFP_KERNEL); 518 if (!adapter->rx_pool) { 519 dev_err(dev, "Failed to allocate rx pools\n"); 520 return -1; 521 } 522 523 adapter->num_active_rx_pools = rxadd_subcrqs; 524 525 for (i = 0; i < rxadd_subcrqs; i++) { 526 rx_pool = &adapter->rx_pool[i]; 527 528 netdev_dbg(adapter->netdev, 529 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 530 i, adapter->req_rx_add_entries_per_subcrq, 531 be64_to_cpu(size_array[i])); 532 533 rx_pool->size = adapter->req_rx_add_entries_per_subcrq; 534 rx_pool->index = i; 535 rx_pool->buff_size = be64_to_cpu(size_array[i]); 536 rx_pool->active = 1; 537 538 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 539 GFP_KERNEL); 540 if (!rx_pool->free_map) { 541 release_rx_pools(adapter); 542 return -1; 543 } 544 545 rx_pool->rx_buff = kcalloc(rx_pool->size, 546 sizeof(struct ibmvnic_rx_buff), 547 GFP_KERNEL); 548 if (!rx_pool->rx_buff) { 549 dev_err(dev, "Couldn't alloc rx buffers\n"); 550 release_rx_pools(adapter); 551 return -1; 552 } 553 554 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 555 rx_pool->size * rx_pool->buff_size)) { 556 release_rx_pools(adapter); 557 return -1; 558 } 559 560 for (j = 0; j < rx_pool->size; ++j) 561 rx_pool->free_map[j] = j; 562 563 atomic_set(&rx_pool->available, 0); 564 rx_pool->next_alloc = 0; 565 rx_pool->next_free = 0; 566 } 567 568 return 0; 569 } 570 571 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, 572 struct ibmvnic_tx_pool *tx_pool) 573 { 574 int rc, i; 575 576 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); 577 if (rc) 578 return rc; 579 580 memset(tx_pool->tx_buff, 0, 581 tx_pool->num_buffers * 582 sizeof(struct ibmvnic_tx_buff)); 583 584 for (i = 0; i < tx_pool->num_buffers; i++) 585 tx_pool->free_map[i] = i; 586 587 tx_pool->consumer_index = 0; 588 tx_pool->producer_index = 0; 589 590 return 0; 591 } 592 593 static int reset_tx_pools(struct ibmvnic_adapter *adapter) 594 { 595 int tx_scrqs; 596 int i, rc; 597 598 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 599 for (i = 0; i < tx_scrqs; i++) { 600 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); 601 if (rc) 602 return rc; 603 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); 604 if (rc) 605 return rc; 606 } 607 608 return 0; 609 } 610 611 static void release_vpd_data(struct ibmvnic_adapter *adapter) 612 { 613 if (!adapter->vpd) 614 return; 615 616 kfree(adapter->vpd->buff); 617 kfree(adapter->vpd); 618 619 adapter->vpd = NULL; 620 } 621 622 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 623 struct ibmvnic_tx_pool *tx_pool) 624 { 625 kfree(tx_pool->tx_buff); 626 kfree(tx_pool->free_map); 627 free_long_term_buff(adapter, &tx_pool->long_term_buff); 628 } 629 630 static void release_tx_pools(struct ibmvnic_adapter *adapter) 631 { 632 int i; 633 634 if (!adapter->tx_pool) 635 return; 636 637 for (i = 0; i < adapter->num_active_tx_pools; i++) { 638 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 639 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 640 } 641 642 kfree(adapter->tx_pool); 643 adapter->tx_pool = NULL; 644 kfree(adapter->tso_pool); 645 adapter->tso_pool = NULL; 646 adapter->num_active_tx_pools = 0; 647 } 648 649 static int init_one_tx_pool(struct net_device *netdev, 650 struct ibmvnic_tx_pool *tx_pool, 651 int num_entries, int buf_size) 652 { 653 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 654 int i; 655 656 tx_pool->tx_buff = kcalloc(num_entries, 657 sizeof(struct ibmvnic_tx_buff), 658 GFP_KERNEL); 659 if (!tx_pool->tx_buff) 660 return -1; 661 662 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 663 num_entries * buf_size)) 664 return -1; 665 666 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); 667 if (!tx_pool->free_map) 668 return -1; 669 670 for (i = 0; i < num_entries; i++) 671 tx_pool->free_map[i] = i; 672 673 tx_pool->consumer_index = 0; 674 tx_pool->producer_index = 0; 675 tx_pool->num_buffers = num_entries; 676 tx_pool->buf_size = buf_size; 677 678 return 0; 679 } 680 681 static int init_tx_pools(struct net_device *netdev) 682 { 683 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 684 int tx_subcrqs; 685 int i, rc; 686 687 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 688 adapter->tx_pool = kcalloc(tx_subcrqs, 689 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 690 if (!adapter->tx_pool) 691 return -1; 692 693 adapter->tso_pool = kcalloc(tx_subcrqs, 694 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 695 if (!adapter->tso_pool) 696 return -1; 697 698 adapter->num_active_tx_pools = tx_subcrqs; 699 700 for (i = 0; i < tx_subcrqs; i++) { 701 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 702 adapter->req_tx_entries_per_subcrq, 703 adapter->req_mtu + VLAN_HLEN); 704 if (rc) { 705 release_tx_pools(adapter); 706 return rc; 707 } 708 709 init_one_tx_pool(netdev, &adapter->tso_pool[i], 710 IBMVNIC_TSO_BUFS, 711 IBMVNIC_TSO_BUF_SZ); 712 if (rc) { 713 release_tx_pools(adapter); 714 return rc; 715 } 716 } 717 718 return 0; 719 } 720 721 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 722 { 723 int i; 724 725 if (adapter->napi_enabled) 726 return; 727 728 for (i = 0; i < adapter->req_rx_queues; i++) 729 napi_enable(&adapter->napi[i]); 730 731 adapter->napi_enabled = true; 732 } 733 734 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 735 { 736 int i; 737 738 if (!adapter->napi_enabled) 739 return; 740 741 for (i = 0; i < adapter->req_rx_queues; i++) { 742 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 743 napi_disable(&adapter->napi[i]); 744 } 745 746 adapter->napi_enabled = false; 747 } 748 749 static int init_napi(struct ibmvnic_adapter *adapter) 750 { 751 int i; 752 753 adapter->napi = kcalloc(adapter->req_rx_queues, 754 sizeof(struct napi_struct), GFP_KERNEL); 755 if (!adapter->napi) 756 return -ENOMEM; 757 758 for (i = 0; i < adapter->req_rx_queues; i++) { 759 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 760 netif_napi_add(adapter->netdev, &adapter->napi[i], 761 ibmvnic_poll, NAPI_POLL_WEIGHT); 762 } 763 764 adapter->num_active_rx_napi = adapter->req_rx_queues; 765 return 0; 766 } 767 768 static void release_napi(struct ibmvnic_adapter *adapter) 769 { 770 int i; 771 772 if (!adapter->napi) 773 return; 774 775 for (i = 0; i < adapter->num_active_rx_napi; i++) { 776 if (&adapter->napi[i]) { 777 netdev_dbg(adapter->netdev, 778 "Releasing napi[%d]\n", i); 779 netif_napi_del(&adapter->napi[i]); 780 } 781 } 782 783 kfree(adapter->napi); 784 adapter->napi = NULL; 785 adapter->num_active_rx_napi = 0; 786 adapter->napi_enabled = false; 787 } 788 789 static int ibmvnic_login(struct net_device *netdev) 790 { 791 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 792 unsigned long timeout = msecs_to_jiffies(30000); 793 int retry_count = 0; 794 bool retry; 795 int rc; 796 797 do { 798 retry = false; 799 if (retry_count > IBMVNIC_MAX_QUEUES) { 800 netdev_warn(netdev, "Login attempts exceeded\n"); 801 return -1; 802 } 803 804 adapter->init_done_rc = 0; 805 reinit_completion(&adapter->init_done); 806 rc = send_login(adapter); 807 if (rc) { 808 netdev_warn(netdev, "Unable to login\n"); 809 return rc; 810 } 811 812 if (!wait_for_completion_timeout(&adapter->init_done, 813 timeout)) { 814 netdev_warn(netdev, "Login timed out\n"); 815 return -1; 816 } 817 818 if (adapter->init_done_rc == PARTIALSUCCESS) { 819 retry_count++; 820 release_sub_crqs(adapter, 1); 821 822 retry = true; 823 netdev_dbg(netdev, 824 "Received partial success, retrying...\n"); 825 adapter->init_done_rc = 0; 826 reinit_completion(&adapter->init_done); 827 send_cap_queries(adapter); 828 if (!wait_for_completion_timeout(&adapter->init_done, 829 timeout)) { 830 netdev_warn(netdev, 831 "Capabilities query timed out\n"); 832 return -1; 833 } 834 835 rc = init_sub_crqs(adapter); 836 if (rc) { 837 netdev_warn(netdev, 838 "SCRQ initialization failed\n"); 839 return -1; 840 } 841 842 rc = init_sub_crq_irqs(adapter); 843 if (rc) { 844 netdev_warn(netdev, 845 "SCRQ irq initialization failed\n"); 846 return -1; 847 } 848 } else if (adapter->init_done_rc) { 849 netdev_warn(netdev, "Adapter login failed\n"); 850 return -1; 851 } 852 } while (retry); 853 854 /* handle pending MAC address changes after successful login */ 855 if (adapter->mac_change_pending) { 856 __ibmvnic_set_mac(netdev, &adapter->desired.mac); 857 adapter->mac_change_pending = false; 858 } 859 860 return 0; 861 } 862 863 static void release_login_buffer(struct ibmvnic_adapter *adapter) 864 { 865 kfree(adapter->login_buf); 866 adapter->login_buf = NULL; 867 } 868 869 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 870 { 871 kfree(adapter->login_rsp_buf); 872 adapter->login_rsp_buf = NULL; 873 } 874 875 static void release_resources(struct ibmvnic_adapter *adapter) 876 { 877 release_vpd_data(adapter); 878 879 release_tx_pools(adapter); 880 release_rx_pools(adapter); 881 882 release_napi(adapter); 883 release_login_rsp_buffer(adapter); 884 } 885 886 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 887 { 888 struct net_device *netdev = adapter->netdev; 889 unsigned long timeout = msecs_to_jiffies(30000); 890 union ibmvnic_crq crq; 891 bool resend; 892 int rc; 893 894 netdev_dbg(netdev, "setting link state %d\n", link_state); 895 896 memset(&crq, 0, sizeof(crq)); 897 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 898 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 899 crq.logical_link_state.link_state = link_state; 900 901 do { 902 resend = false; 903 904 reinit_completion(&adapter->init_done); 905 rc = ibmvnic_send_crq(adapter, &crq); 906 if (rc) { 907 netdev_err(netdev, "Failed to set link state\n"); 908 return rc; 909 } 910 911 if (!wait_for_completion_timeout(&adapter->init_done, 912 timeout)) { 913 netdev_err(netdev, "timeout setting link state\n"); 914 return -1; 915 } 916 917 if (adapter->init_done_rc == 1) { 918 /* Partuial success, delay and re-send */ 919 mdelay(1000); 920 resend = true; 921 } else if (adapter->init_done_rc) { 922 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 923 adapter->init_done_rc); 924 return adapter->init_done_rc; 925 } 926 } while (resend); 927 928 return 0; 929 } 930 931 static int set_real_num_queues(struct net_device *netdev) 932 { 933 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 934 int rc; 935 936 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 937 adapter->req_tx_queues, adapter->req_rx_queues); 938 939 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 940 if (rc) { 941 netdev_err(netdev, "failed to set the number of tx queues\n"); 942 return rc; 943 } 944 945 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 946 if (rc) 947 netdev_err(netdev, "failed to set the number of rx queues\n"); 948 949 return rc; 950 } 951 952 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 953 { 954 struct device *dev = &adapter->vdev->dev; 955 union ibmvnic_crq crq; 956 int len = 0; 957 int rc; 958 959 if (adapter->vpd->buff) 960 len = adapter->vpd->len; 961 962 init_completion(&adapter->fw_done); 963 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 964 crq.get_vpd_size.cmd = GET_VPD_SIZE; 965 rc = ibmvnic_send_crq(adapter, &crq); 966 if (rc) 967 return rc; 968 wait_for_completion(&adapter->fw_done); 969 970 if (!adapter->vpd->len) 971 return -ENODATA; 972 973 if (!adapter->vpd->buff) 974 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 975 else if (adapter->vpd->len != len) 976 adapter->vpd->buff = 977 krealloc(adapter->vpd->buff, 978 adapter->vpd->len, GFP_KERNEL); 979 980 if (!adapter->vpd->buff) { 981 dev_err(dev, "Could allocate VPD buffer\n"); 982 return -ENOMEM; 983 } 984 985 adapter->vpd->dma_addr = 986 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 987 DMA_FROM_DEVICE); 988 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 989 dev_err(dev, "Could not map VPD buffer\n"); 990 kfree(adapter->vpd->buff); 991 adapter->vpd->buff = NULL; 992 return -ENOMEM; 993 } 994 995 reinit_completion(&adapter->fw_done); 996 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 997 crq.get_vpd.cmd = GET_VPD; 998 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 999 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1000 rc = ibmvnic_send_crq(adapter, &crq); 1001 if (rc) { 1002 kfree(adapter->vpd->buff); 1003 adapter->vpd->buff = NULL; 1004 return rc; 1005 } 1006 wait_for_completion(&adapter->fw_done); 1007 1008 return 0; 1009 } 1010 1011 static int init_resources(struct ibmvnic_adapter *adapter) 1012 { 1013 struct net_device *netdev = adapter->netdev; 1014 int rc; 1015 1016 rc = set_real_num_queues(netdev); 1017 if (rc) 1018 return rc; 1019 1020 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1021 if (!adapter->vpd) 1022 return -ENOMEM; 1023 1024 /* Vital Product Data (VPD) */ 1025 rc = ibmvnic_get_vpd(adapter); 1026 if (rc) { 1027 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1028 return rc; 1029 } 1030 1031 adapter->map_id = 1; 1032 1033 rc = init_napi(adapter); 1034 if (rc) 1035 return rc; 1036 1037 send_map_query(adapter); 1038 1039 rc = init_rx_pools(netdev); 1040 if (rc) 1041 return rc; 1042 1043 rc = init_tx_pools(netdev); 1044 return rc; 1045 } 1046 1047 static int __ibmvnic_open(struct net_device *netdev) 1048 { 1049 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1050 enum vnic_state prev_state = adapter->state; 1051 int i, rc; 1052 1053 adapter->state = VNIC_OPENING; 1054 replenish_pools(adapter); 1055 ibmvnic_napi_enable(adapter); 1056 1057 /* We're ready to receive frames, enable the sub-crq interrupts and 1058 * set the logical link state to up 1059 */ 1060 for (i = 0; i < adapter->req_rx_queues; i++) { 1061 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1062 if (prev_state == VNIC_CLOSED) 1063 enable_irq(adapter->rx_scrq[i]->irq); 1064 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1065 } 1066 1067 for (i = 0; i < adapter->req_tx_queues; i++) { 1068 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1069 if (prev_state == VNIC_CLOSED) 1070 enable_irq(adapter->tx_scrq[i]->irq); 1071 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1072 } 1073 1074 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1075 if (rc) { 1076 for (i = 0; i < adapter->req_rx_queues; i++) 1077 napi_disable(&adapter->napi[i]); 1078 release_resources(adapter); 1079 return rc; 1080 } 1081 1082 netif_tx_start_all_queues(netdev); 1083 1084 if (prev_state == VNIC_CLOSED) { 1085 for (i = 0; i < adapter->req_rx_queues; i++) 1086 napi_schedule(&adapter->napi[i]); 1087 } 1088 1089 adapter->state = VNIC_OPEN; 1090 return rc; 1091 } 1092 1093 static int ibmvnic_open(struct net_device *netdev) 1094 { 1095 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1096 int rc; 1097 1098 /* If device failover is pending, just set device state and return. 1099 * Device operation will be handled by reset routine. 1100 */ 1101 if (adapter->failover_pending) { 1102 adapter->state = VNIC_OPEN; 1103 return 0; 1104 } 1105 1106 mutex_lock(&adapter->reset_lock); 1107 1108 if (adapter->state != VNIC_CLOSED) { 1109 rc = ibmvnic_login(netdev); 1110 if (rc) { 1111 mutex_unlock(&adapter->reset_lock); 1112 return rc; 1113 } 1114 1115 rc = init_resources(adapter); 1116 if (rc) { 1117 netdev_err(netdev, "failed to initialize resources\n"); 1118 release_resources(adapter); 1119 mutex_unlock(&adapter->reset_lock); 1120 return rc; 1121 } 1122 } 1123 1124 rc = __ibmvnic_open(netdev); 1125 netif_carrier_on(netdev); 1126 1127 mutex_unlock(&adapter->reset_lock); 1128 1129 return rc; 1130 } 1131 1132 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1133 { 1134 struct ibmvnic_rx_pool *rx_pool; 1135 struct ibmvnic_rx_buff *rx_buff; 1136 u64 rx_entries; 1137 int rx_scrqs; 1138 int i, j; 1139 1140 if (!adapter->rx_pool) 1141 return; 1142 1143 rx_scrqs = adapter->num_active_rx_pools; 1144 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1145 1146 /* Free any remaining skbs in the rx buffer pools */ 1147 for (i = 0; i < rx_scrqs; i++) { 1148 rx_pool = &adapter->rx_pool[i]; 1149 if (!rx_pool || !rx_pool->rx_buff) 1150 continue; 1151 1152 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1153 for (j = 0; j < rx_entries; j++) { 1154 rx_buff = &rx_pool->rx_buff[j]; 1155 if (rx_buff && rx_buff->skb) { 1156 dev_kfree_skb_any(rx_buff->skb); 1157 rx_buff->skb = NULL; 1158 } 1159 } 1160 } 1161 } 1162 1163 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1164 struct ibmvnic_tx_pool *tx_pool) 1165 { 1166 struct ibmvnic_tx_buff *tx_buff; 1167 u64 tx_entries; 1168 int i; 1169 1170 if (!tx_pool || !tx_pool->tx_buff) 1171 return; 1172 1173 tx_entries = tx_pool->num_buffers; 1174 1175 for (i = 0; i < tx_entries; i++) { 1176 tx_buff = &tx_pool->tx_buff[i]; 1177 if (tx_buff && tx_buff->skb) { 1178 dev_kfree_skb_any(tx_buff->skb); 1179 tx_buff->skb = NULL; 1180 } 1181 } 1182 } 1183 1184 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1185 { 1186 int tx_scrqs; 1187 int i; 1188 1189 if (!adapter->tx_pool || !adapter->tso_pool) 1190 return; 1191 1192 tx_scrqs = adapter->num_active_tx_pools; 1193 1194 /* Free any remaining skbs in the tx buffer pools */ 1195 for (i = 0; i < tx_scrqs; i++) { 1196 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1197 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1198 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1199 } 1200 } 1201 1202 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1203 { 1204 struct net_device *netdev = adapter->netdev; 1205 int i; 1206 1207 if (adapter->tx_scrq) { 1208 for (i = 0; i < adapter->req_tx_queues; i++) 1209 if (adapter->tx_scrq[i]->irq) { 1210 netdev_dbg(netdev, 1211 "Disabling tx_scrq[%d] irq\n", i); 1212 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1213 disable_irq(adapter->tx_scrq[i]->irq); 1214 } 1215 } 1216 1217 if (adapter->rx_scrq) { 1218 for (i = 0; i < adapter->req_rx_queues; i++) { 1219 if (adapter->rx_scrq[i]->irq) { 1220 netdev_dbg(netdev, 1221 "Disabling rx_scrq[%d] irq\n", i); 1222 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1223 disable_irq(adapter->rx_scrq[i]->irq); 1224 } 1225 } 1226 } 1227 } 1228 1229 static void ibmvnic_cleanup(struct net_device *netdev) 1230 { 1231 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1232 1233 /* ensure that transmissions are stopped if called by do_reset */ 1234 if (adapter->resetting) 1235 netif_tx_disable(netdev); 1236 else 1237 netif_tx_stop_all_queues(netdev); 1238 1239 ibmvnic_napi_disable(adapter); 1240 ibmvnic_disable_irqs(adapter); 1241 1242 clean_rx_pools(adapter); 1243 clean_tx_pools(adapter); 1244 } 1245 1246 static int __ibmvnic_close(struct net_device *netdev) 1247 { 1248 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1249 int rc = 0; 1250 1251 adapter->state = VNIC_CLOSING; 1252 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1253 if (rc) 1254 return rc; 1255 adapter->state = VNIC_CLOSED; 1256 return 0; 1257 } 1258 1259 static int ibmvnic_close(struct net_device *netdev) 1260 { 1261 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1262 int rc; 1263 1264 /* If device failover is pending, just set device state and return. 1265 * Device operation will be handled by reset routine. 1266 */ 1267 if (adapter->failover_pending) { 1268 adapter->state = VNIC_CLOSED; 1269 return 0; 1270 } 1271 1272 mutex_lock(&adapter->reset_lock); 1273 rc = __ibmvnic_close(netdev); 1274 ibmvnic_cleanup(netdev); 1275 mutex_unlock(&adapter->reset_lock); 1276 1277 return rc; 1278 } 1279 1280 /** 1281 * build_hdr_data - creates L2/L3/L4 header data buffer 1282 * @hdr_field - bitfield determining needed headers 1283 * @skb - socket buffer 1284 * @hdr_len - array of header lengths 1285 * @tot_len - total length of data 1286 * 1287 * Reads hdr_field to determine which headers are needed by firmware. 1288 * Builds a buffer containing these headers. Saves individual header 1289 * lengths and total buffer length to be used to build descriptors. 1290 */ 1291 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 1292 int *hdr_len, u8 *hdr_data) 1293 { 1294 int len = 0; 1295 u8 *hdr; 1296 1297 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 1298 hdr_len[0] = sizeof(struct vlan_ethhdr); 1299 else 1300 hdr_len[0] = sizeof(struct ethhdr); 1301 1302 if (skb->protocol == htons(ETH_P_IP)) { 1303 hdr_len[1] = ip_hdr(skb)->ihl * 4; 1304 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1305 hdr_len[2] = tcp_hdrlen(skb); 1306 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1307 hdr_len[2] = sizeof(struct udphdr); 1308 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1309 hdr_len[1] = sizeof(struct ipv6hdr); 1310 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1311 hdr_len[2] = tcp_hdrlen(skb); 1312 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 1313 hdr_len[2] = sizeof(struct udphdr); 1314 } else if (skb->protocol == htons(ETH_P_ARP)) { 1315 hdr_len[1] = arp_hdr_len(skb->dev); 1316 hdr_len[2] = 0; 1317 } 1318 1319 memset(hdr_data, 0, 120); 1320 if ((hdr_field >> 6) & 1) { 1321 hdr = skb_mac_header(skb); 1322 memcpy(hdr_data, hdr, hdr_len[0]); 1323 len += hdr_len[0]; 1324 } 1325 1326 if ((hdr_field >> 5) & 1) { 1327 hdr = skb_network_header(skb); 1328 memcpy(hdr_data + len, hdr, hdr_len[1]); 1329 len += hdr_len[1]; 1330 } 1331 1332 if ((hdr_field >> 4) & 1) { 1333 hdr = skb_transport_header(skb); 1334 memcpy(hdr_data + len, hdr, hdr_len[2]); 1335 len += hdr_len[2]; 1336 } 1337 return len; 1338 } 1339 1340 /** 1341 * create_hdr_descs - create header and header extension descriptors 1342 * @hdr_field - bitfield determining needed headers 1343 * @data - buffer containing header data 1344 * @len - length of data buffer 1345 * @hdr_len - array of individual header lengths 1346 * @scrq_arr - descriptor array 1347 * 1348 * Creates header and, if needed, header extension descriptors and 1349 * places them in a descriptor array, scrq_arr 1350 */ 1351 1352 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1353 union sub_crq *scrq_arr) 1354 { 1355 union sub_crq hdr_desc; 1356 int tmp_len = len; 1357 int num_descs = 0; 1358 u8 *data, *cur; 1359 int tmp; 1360 1361 while (tmp_len > 0) { 1362 cur = hdr_data + len - tmp_len; 1363 1364 memset(&hdr_desc, 0, sizeof(hdr_desc)); 1365 if (cur != hdr_data) { 1366 data = hdr_desc.hdr_ext.data; 1367 tmp = tmp_len > 29 ? 29 : tmp_len; 1368 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 1369 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 1370 hdr_desc.hdr_ext.len = tmp; 1371 } else { 1372 data = hdr_desc.hdr.data; 1373 tmp = tmp_len > 24 ? 24 : tmp_len; 1374 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 1375 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 1376 hdr_desc.hdr.len = tmp; 1377 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 1378 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 1379 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 1380 hdr_desc.hdr.flag = hdr_field << 1; 1381 } 1382 memcpy(data, cur, tmp); 1383 tmp_len -= tmp; 1384 *scrq_arr = hdr_desc; 1385 scrq_arr++; 1386 num_descs++; 1387 } 1388 1389 return num_descs; 1390 } 1391 1392 /** 1393 * build_hdr_descs_arr - build a header descriptor array 1394 * @skb - socket buffer 1395 * @num_entries - number of descriptors to be sent 1396 * @subcrq - first TX descriptor 1397 * @hdr_field - bit field determining which headers will be sent 1398 * 1399 * This function will build a TX descriptor array with applicable 1400 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 1401 */ 1402 1403 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, 1404 int *num_entries, u8 hdr_field) 1405 { 1406 int hdr_len[3] = {0, 0, 0}; 1407 int tot_len; 1408 u8 *hdr_data = txbuff->hdr_data; 1409 1410 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, 1411 txbuff->hdr_data); 1412 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 1413 txbuff->indir_arr + 1); 1414 } 1415 1416 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 1417 struct net_device *netdev) 1418 { 1419 /* For some backing devices, mishandling of small packets 1420 * can result in a loss of connection or TX stall. Device 1421 * architects recommend that no packet should be smaller 1422 * than the minimum MTU value provided to the driver, so 1423 * pad any packets to that length 1424 */ 1425 if (skb->len < netdev->min_mtu) 1426 return skb_put_padto(skb, netdev->min_mtu); 1427 1428 return 0; 1429 } 1430 1431 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 1432 { 1433 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1434 int queue_num = skb_get_queue_mapping(skb); 1435 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 1436 struct device *dev = &adapter->vdev->dev; 1437 struct ibmvnic_tx_buff *tx_buff = NULL; 1438 struct ibmvnic_sub_crq_queue *tx_scrq; 1439 struct ibmvnic_tx_pool *tx_pool; 1440 unsigned int tx_send_failed = 0; 1441 unsigned int tx_map_failed = 0; 1442 unsigned int tx_dropped = 0; 1443 unsigned int tx_packets = 0; 1444 unsigned int tx_bytes = 0; 1445 dma_addr_t data_dma_addr; 1446 struct netdev_queue *txq; 1447 unsigned long lpar_rc; 1448 union sub_crq tx_crq; 1449 unsigned int offset; 1450 int num_entries = 1; 1451 unsigned char *dst; 1452 u64 *handle_array; 1453 int index = 0; 1454 u8 proto = 0; 1455 int ret = 0; 1456 1457 if (adapter->resetting) { 1458 if (!netif_subqueue_stopped(netdev, skb)) 1459 netif_stop_subqueue(netdev, queue_num); 1460 dev_kfree_skb_any(skb); 1461 1462 tx_send_failed++; 1463 tx_dropped++; 1464 ret = NETDEV_TX_OK; 1465 goto out; 1466 } 1467 1468 if (ibmvnic_xmit_workarounds(skb, netdev)) { 1469 tx_dropped++; 1470 tx_send_failed++; 1471 ret = NETDEV_TX_OK; 1472 goto out; 1473 } 1474 if (skb_is_gso(skb)) 1475 tx_pool = &adapter->tso_pool[queue_num]; 1476 else 1477 tx_pool = &adapter->tx_pool[queue_num]; 1478 1479 tx_scrq = adapter->tx_scrq[queue_num]; 1480 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); 1481 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 1482 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 1483 1484 index = tx_pool->free_map[tx_pool->consumer_index]; 1485 1486 if (index == IBMVNIC_INVALID_MAP) { 1487 dev_kfree_skb_any(skb); 1488 tx_send_failed++; 1489 tx_dropped++; 1490 ret = NETDEV_TX_OK; 1491 goto out; 1492 } 1493 1494 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 1495 1496 offset = index * tx_pool->buf_size; 1497 dst = tx_pool->long_term_buff.buff + offset; 1498 memset(dst, 0, tx_pool->buf_size); 1499 data_dma_addr = tx_pool->long_term_buff.addr + offset; 1500 1501 if (skb_shinfo(skb)->nr_frags) { 1502 int cur, i; 1503 1504 /* Copy the head */ 1505 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 1506 cur = skb_headlen(skb); 1507 1508 /* Copy the frags */ 1509 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1510 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1511 1512 memcpy(dst + cur, 1513 page_address(skb_frag_page(frag)) + 1514 frag->page_offset, skb_frag_size(frag)); 1515 cur += skb_frag_size(frag); 1516 } 1517 } else { 1518 skb_copy_from_linear_data(skb, dst, skb->len); 1519 } 1520 1521 tx_pool->consumer_index = 1522 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 1523 1524 tx_buff = &tx_pool->tx_buff[index]; 1525 tx_buff->skb = skb; 1526 tx_buff->data_dma[0] = data_dma_addr; 1527 tx_buff->data_len[0] = skb->len; 1528 tx_buff->index = index; 1529 tx_buff->pool_index = queue_num; 1530 tx_buff->last_frag = true; 1531 1532 memset(&tx_crq, 0, sizeof(tx_crq)); 1533 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 1534 tx_crq.v1.type = IBMVNIC_TX_DESC; 1535 tx_crq.v1.n_crq_elem = 1; 1536 tx_crq.v1.n_sge = 1; 1537 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 1538 1539 if (skb_is_gso(skb)) 1540 tx_crq.v1.correlator = 1541 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); 1542 else 1543 tx_crq.v1.correlator = cpu_to_be32(index); 1544 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); 1545 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 1546 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 1547 1548 if (adapter->vlan_header_insertion) { 1549 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 1550 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 1551 } 1552 1553 if (skb->protocol == htons(ETH_P_IP)) { 1554 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 1555 proto = ip_hdr(skb)->protocol; 1556 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1557 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 1558 proto = ipv6_hdr(skb)->nexthdr; 1559 } 1560 1561 if (proto == IPPROTO_TCP) 1562 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 1563 else if (proto == IPPROTO_UDP) 1564 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 1565 1566 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1567 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 1568 hdrs += 2; 1569 } 1570 if (skb_is_gso(skb)) { 1571 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 1572 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 1573 hdrs += 2; 1574 } 1575 /* determine if l2/3/4 headers are sent to firmware */ 1576 if ((*hdrs >> 7) & 1) { 1577 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); 1578 tx_crq.v1.n_crq_elem = num_entries; 1579 tx_buff->num_entries = num_entries; 1580 tx_buff->indir_arr[0] = tx_crq; 1581 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, 1582 sizeof(tx_buff->indir_arr), 1583 DMA_TO_DEVICE); 1584 if (dma_mapping_error(dev, tx_buff->indir_dma)) { 1585 dev_kfree_skb_any(skb); 1586 tx_buff->skb = NULL; 1587 if (!firmware_has_feature(FW_FEATURE_CMO)) 1588 dev_err(dev, "tx: unable to map descriptor array\n"); 1589 tx_map_failed++; 1590 tx_dropped++; 1591 ret = NETDEV_TX_OK; 1592 goto tx_err_out; 1593 } 1594 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], 1595 (u64)tx_buff->indir_dma, 1596 (u64)num_entries); 1597 } else { 1598 tx_buff->num_entries = num_entries; 1599 lpar_rc = send_subcrq(adapter, handle_array[queue_num], 1600 &tx_crq); 1601 } 1602 if (lpar_rc != H_SUCCESS) { 1603 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) 1604 dev_err_ratelimited(dev, "tx: send failed\n"); 1605 dev_kfree_skb_any(skb); 1606 tx_buff->skb = NULL; 1607 1608 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 1609 /* Disable TX and report carrier off if queue is closed 1610 * or pending failover. 1611 * Firmware guarantees that a signal will be sent to the 1612 * driver, triggering a reset or some other action. 1613 */ 1614 netif_tx_stop_all_queues(netdev); 1615 netif_carrier_off(netdev); 1616 } 1617 1618 tx_send_failed++; 1619 tx_dropped++; 1620 ret = NETDEV_TX_OK; 1621 goto tx_err_out; 1622 } 1623 1624 if (atomic_add_return(num_entries, &tx_scrq->used) 1625 >= adapter->req_tx_entries_per_subcrq) { 1626 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 1627 netif_stop_subqueue(netdev, queue_num); 1628 } 1629 1630 tx_packets++; 1631 tx_bytes += skb->len; 1632 txq->trans_start = jiffies; 1633 ret = NETDEV_TX_OK; 1634 goto out; 1635 1636 tx_err_out: 1637 /* roll back consumer index and map array*/ 1638 if (tx_pool->consumer_index == 0) 1639 tx_pool->consumer_index = 1640 tx_pool->num_buffers - 1; 1641 else 1642 tx_pool->consumer_index--; 1643 tx_pool->free_map[tx_pool->consumer_index] = index; 1644 out: 1645 netdev->stats.tx_dropped += tx_dropped; 1646 netdev->stats.tx_bytes += tx_bytes; 1647 netdev->stats.tx_packets += tx_packets; 1648 adapter->tx_send_failed += tx_send_failed; 1649 adapter->tx_map_failed += tx_map_failed; 1650 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 1651 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 1652 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 1653 1654 return ret; 1655 } 1656 1657 static void ibmvnic_set_multi(struct net_device *netdev) 1658 { 1659 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1660 struct netdev_hw_addr *ha; 1661 union ibmvnic_crq crq; 1662 1663 memset(&crq, 0, sizeof(crq)); 1664 crq.request_capability.first = IBMVNIC_CRQ_CMD; 1665 crq.request_capability.cmd = REQUEST_CAPABILITY; 1666 1667 if (netdev->flags & IFF_PROMISC) { 1668 if (!adapter->promisc_supported) 1669 return; 1670 } else { 1671 if (netdev->flags & IFF_ALLMULTI) { 1672 /* Accept all multicast */ 1673 memset(&crq, 0, sizeof(crq)); 1674 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1675 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1676 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 1677 ibmvnic_send_crq(adapter, &crq); 1678 } else if (netdev_mc_empty(netdev)) { 1679 /* Reject all multicast */ 1680 memset(&crq, 0, sizeof(crq)); 1681 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1682 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1683 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 1684 ibmvnic_send_crq(adapter, &crq); 1685 } else { 1686 /* Accept one or more multicast(s) */ 1687 netdev_for_each_mc_addr(ha, netdev) { 1688 memset(&crq, 0, sizeof(crq)); 1689 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1690 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1691 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 1692 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 1693 ha->addr); 1694 ibmvnic_send_crq(adapter, &crq); 1695 } 1696 } 1697 } 1698 } 1699 1700 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) 1701 { 1702 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1703 struct sockaddr *addr = p; 1704 union ibmvnic_crq crq; 1705 int rc; 1706 1707 if (!is_valid_ether_addr(addr->sa_data)) 1708 return -EADDRNOTAVAIL; 1709 1710 memset(&crq, 0, sizeof(crq)); 1711 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 1712 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 1713 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); 1714 1715 init_completion(&adapter->fw_done); 1716 rc = ibmvnic_send_crq(adapter, &crq); 1717 if (rc) 1718 return rc; 1719 wait_for_completion(&adapter->fw_done); 1720 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 1721 return adapter->fw_done_rc ? -EIO : 0; 1722 } 1723 1724 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 1725 { 1726 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1727 struct sockaddr *addr = p; 1728 int rc; 1729 1730 if (adapter->state == VNIC_PROBED) { 1731 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); 1732 adapter->mac_change_pending = true; 1733 return 0; 1734 } 1735 1736 rc = __ibmvnic_set_mac(netdev, addr); 1737 1738 return rc; 1739 } 1740 1741 /** 1742 * do_reset returns zero if we are able to keep processing reset events, or 1743 * non-zero if we hit a fatal error and must halt. 1744 */ 1745 static int do_reset(struct ibmvnic_adapter *adapter, 1746 struct ibmvnic_rwi *rwi, u32 reset_state) 1747 { 1748 u64 old_num_rx_queues, old_num_tx_queues; 1749 struct net_device *netdev = adapter->netdev; 1750 int i, rc; 1751 1752 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", 1753 rwi->reset_reason); 1754 1755 netif_carrier_off(netdev); 1756 adapter->reset_reason = rwi->reset_reason; 1757 1758 old_num_rx_queues = adapter->req_rx_queues; 1759 old_num_tx_queues = adapter->req_tx_queues; 1760 1761 ibmvnic_cleanup(netdev); 1762 1763 if (adapter->reset_reason != VNIC_RESET_MOBILITY && 1764 adapter->reset_reason != VNIC_RESET_FAILOVER) { 1765 rc = __ibmvnic_close(netdev); 1766 if (rc) 1767 return rc; 1768 } 1769 1770 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1771 adapter->wait_for_reset) { 1772 release_resources(adapter); 1773 release_sub_crqs(adapter, 1); 1774 release_crq_queue(adapter); 1775 } 1776 1777 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 1778 /* remove the closed state so when we call open it appears 1779 * we are coming from the probed state. 1780 */ 1781 adapter->state = VNIC_PROBED; 1782 1783 if (adapter->wait_for_reset) { 1784 rc = init_crq_queue(adapter); 1785 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 1786 rc = ibmvnic_reenable_crq_queue(adapter); 1787 release_sub_crqs(adapter, 1); 1788 } else { 1789 rc = ibmvnic_reset_crq(adapter); 1790 if (!rc) 1791 rc = vio_enable_interrupts(adapter->vdev); 1792 } 1793 1794 if (rc) { 1795 netdev_err(adapter->netdev, 1796 "Couldn't initialize crq. rc=%d\n", rc); 1797 return rc; 1798 } 1799 1800 rc = ibmvnic_reset_init(adapter); 1801 if (rc) 1802 return IBMVNIC_INIT_FAILED; 1803 1804 /* If the adapter was in PROBE state prior to the reset, 1805 * exit here. 1806 */ 1807 if (reset_state == VNIC_PROBED) 1808 return 0; 1809 1810 rc = ibmvnic_login(netdev); 1811 if (rc) { 1812 adapter->state = reset_state; 1813 return rc; 1814 } 1815 1816 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1817 adapter->wait_for_reset) { 1818 rc = init_resources(adapter); 1819 if (rc) 1820 return rc; 1821 } else if (adapter->req_rx_queues != old_num_rx_queues || 1822 adapter->req_tx_queues != old_num_tx_queues) { 1823 adapter->map_id = 1; 1824 release_rx_pools(adapter); 1825 release_tx_pools(adapter); 1826 rc = init_rx_pools(netdev); 1827 if (rc) 1828 return rc; 1829 rc = init_tx_pools(netdev); 1830 if (rc) 1831 return rc; 1832 1833 release_napi(adapter); 1834 rc = init_napi(adapter); 1835 if (rc) 1836 return rc; 1837 } else { 1838 rc = reset_tx_pools(adapter); 1839 if (rc) 1840 return rc; 1841 1842 rc = reset_rx_pools(adapter); 1843 if (rc) 1844 return rc; 1845 } 1846 ibmvnic_disable_irqs(adapter); 1847 } 1848 adapter->state = VNIC_CLOSED; 1849 1850 if (reset_state == VNIC_CLOSED) 1851 return 0; 1852 1853 rc = __ibmvnic_open(netdev); 1854 if (rc) { 1855 if (list_empty(&adapter->rwi_list)) 1856 adapter->state = VNIC_CLOSED; 1857 else 1858 adapter->state = reset_state; 1859 1860 return 0; 1861 } 1862 1863 /* kick napi */ 1864 for (i = 0; i < adapter->req_rx_queues; i++) 1865 napi_schedule(&adapter->napi[i]); 1866 1867 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 1868 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) 1869 netdev_notify_peers(netdev); 1870 1871 netif_carrier_on(netdev); 1872 1873 return 0; 1874 } 1875 1876 static int do_hard_reset(struct ibmvnic_adapter *adapter, 1877 struct ibmvnic_rwi *rwi, u32 reset_state) 1878 { 1879 struct net_device *netdev = adapter->netdev; 1880 int rc; 1881 1882 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n", 1883 rwi->reset_reason); 1884 1885 netif_carrier_off(netdev); 1886 adapter->reset_reason = rwi->reset_reason; 1887 1888 ibmvnic_cleanup(netdev); 1889 release_resources(adapter); 1890 release_sub_crqs(adapter, 0); 1891 release_crq_queue(adapter); 1892 1893 /* remove the closed state so when we call open it appears 1894 * we are coming from the probed state. 1895 */ 1896 adapter->state = VNIC_PROBED; 1897 1898 rc = init_crq_queue(adapter); 1899 if (rc) { 1900 netdev_err(adapter->netdev, 1901 "Couldn't initialize crq. rc=%d\n", rc); 1902 return rc; 1903 } 1904 1905 rc = ibmvnic_init(adapter); 1906 if (rc) 1907 return rc; 1908 1909 /* If the adapter was in PROBE state prior to the reset, 1910 * exit here. 1911 */ 1912 if (reset_state == VNIC_PROBED) 1913 return 0; 1914 1915 rc = ibmvnic_login(netdev); 1916 if (rc) { 1917 adapter->state = VNIC_PROBED; 1918 return 0; 1919 } 1920 /* netif_set_real_num_xx_queues needs to take rtnl lock here 1921 * unless wait_for_reset is set, in which case the rtnl lock 1922 * has already been taken before initializing the reset 1923 */ 1924 if (!adapter->wait_for_reset) { 1925 rtnl_lock(); 1926 rc = init_resources(adapter); 1927 rtnl_unlock(); 1928 } else { 1929 rc = init_resources(adapter); 1930 } 1931 if (rc) 1932 return rc; 1933 1934 ibmvnic_disable_irqs(adapter); 1935 adapter->state = VNIC_CLOSED; 1936 1937 if (reset_state == VNIC_CLOSED) 1938 return 0; 1939 1940 rc = __ibmvnic_open(netdev); 1941 if (rc) { 1942 if (list_empty(&adapter->rwi_list)) 1943 adapter->state = VNIC_CLOSED; 1944 else 1945 adapter->state = reset_state; 1946 1947 return 0; 1948 } 1949 1950 netif_carrier_on(netdev); 1951 1952 return 0; 1953 } 1954 1955 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 1956 { 1957 struct ibmvnic_rwi *rwi; 1958 1959 mutex_lock(&adapter->rwi_lock); 1960 1961 if (!list_empty(&adapter->rwi_list)) { 1962 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 1963 list); 1964 list_del(&rwi->list); 1965 } else { 1966 rwi = NULL; 1967 } 1968 1969 mutex_unlock(&adapter->rwi_lock); 1970 return rwi; 1971 } 1972 1973 static void free_all_rwi(struct ibmvnic_adapter *adapter) 1974 { 1975 struct ibmvnic_rwi *rwi; 1976 1977 rwi = get_next_rwi(adapter); 1978 while (rwi) { 1979 kfree(rwi); 1980 rwi = get_next_rwi(adapter); 1981 } 1982 } 1983 1984 static void __ibmvnic_reset(struct work_struct *work) 1985 { 1986 struct ibmvnic_rwi *rwi; 1987 struct ibmvnic_adapter *adapter; 1988 struct net_device *netdev; 1989 u32 reset_state; 1990 int rc = 0; 1991 1992 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 1993 netdev = adapter->netdev; 1994 1995 mutex_lock(&adapter->reset_lock); 1996 reset_state = adapter->state; 1997 1998 rwi = get_next_rwi(adapter); 1999 while (rwi) { 2000 if (adapter->force_reset_recovery) { 2001 adapter->force_reset_recovery = false; 2002 rc = do_hard_reset(adapter, rwi, reset_state); 2003 } else { 2004 rc = do_reset(adapter, rwi, reset_state); 2005 } 2006 kfree(rwi); 2007 if (rc && rc != IBMVNIC_INIT_FAILED && 2008 !adapter->force_reset_recovery) 2009 break; 2010 2011 rwi = get_next_rwi(adapter); 2012 } 2013 2014 if (adapter->wait_for_reset) { 2015 adapter->wait_for_reset = false; 2016 adapter->reset_done_rc = rc; 2017 complete(&adapter->reset_done); 2018 } 2019 2020 if (rc) { 2021 netdev_dbg(adapter->netdev, "Reset failed\n"); 2022 free_all_rwi(adapter); 2023 mutex_unlock(&adapter->reset_lock); 2024 return; 2025 } 2026 2027 adapter->resetting = false; 2028 mutex_unlock(&adapter->reset_lock); 2029 } 2030 2031 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 2032 enum ibmvnic_reset_reason reason) 2033 { 2034 struct list_head *entry, *tmp_entry; 2035 struct ibmvnic_rwi *rwi, *tmp; 2036 struct net_device *netdev = adapter->netdev; 2037 int ret; 2038 2039 if (adapter->state == VNIC_REMOVING || 2040 adapter->state == VNIC_REMOVED || 2041 adapter->failover_pending) { 2042 ret = EBUSY; 2043 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 2044 goto err; 2045 } 2046 2047 if (adapter->state == VNIC_PROBING) { 2048 netdev_warn(netdev, "Adapter reset during probe\n"); 2049 ret = adapter->init_done_rc = EAGAIN; 2050 goto err; 2051 } 2052 2053 mutex_lock(&adapter->rwi_lock); 2054 2055 list_for_each(entry, &adapter->rwi_list) { 2056 tmp = list_entry(entry, struct ibmvnic_rwi, list); 2057 if (tmp->reset_reason == reason) { 2058 netdev_dbg(netdev, "Skipping matching reset\n"); 2059 mutex_unlock(&adapter->rwi_lock); 2060 ret = EBUSY; 2061 goto err; 2062 } 2063 } 2064 2065 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL); 2066 if (!rwi) { 2067 mutex_unlock(&adapter->rwi_lock); 2068 ibmvnic_close(netdev); 2069 ret = ENOMEM; 2070 goto err; 2071 } 2072 /* if we just received a transport event, 2073 * flush reset queue and process this reset 2074 */ 2075 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { 2076 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) 2077 list_del(entry); 2078 } 2079 rwi->reset_reason = reason; 2080 list_add_tail(&rwi->list, &adapter->rwi_list); 2081 mutex_unlock(&adapter->rwi_lock); 2082 adapter->resetting = true; 2083 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); 2084 schedule_work(&adapter->ibmvnic_reset); 2085 2086 return 0; 2087 err: 2088 if (adapter->wait_for_reset) 2089 adapter->wait_for_reset = false; 2090 return -ret; 2091 } 2092 2093 static void ibmvnic_tx_timeout(struct net_device *dev) 2094 { 2095 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2096 2097 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 2098 } 2099 2100 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 2101 struct ibmvnic_rx_buff *rx_buff) 2102 { 2103 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 2104 2105 rx_buff->skb = NULL; 2106 2107 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 2108 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 2109 2110 atomic_dec(&pool->available); 2111 } 2112 2113 static int ibmvnic_poll(struct napi_struct *napi, int budget) 2114 { 2115 struct net_device *netdev = napi->dev; 2116 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2117 int scrq_num = (int)(napi - adapter->napi); 2118 int frames_processed = 0; 2119 2120 restart_poll: 2121 while (frames_processed < budget) { 2122 struct sk_buff *skb; 2123 struct ibmvnic_rx_buff *rx_buff; 2124 union sub_crq *next; 2125 u32 length; 2126 u16 offset; 2127 u8 flags = 0; 2128 2129 if (unlikely(adapter->resetting && 2130 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 2131 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 2132 napi_complete_done(napi, frames_processed); 2133 return frames_processed; 2134 } 2135 2136 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) 2137 break; 2138 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); 2139 rx_buff = 2140 (struct ibmvnic_rx_buff *)be64_to_cpu(next-> 2141 rx_comp.correlator); 2142 /* do error checking */ 2143 if (next->rx_comp.rc) { 2144 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 2145 be16_to_cpu(next->rx_comp.rc)); 2146 /* free the entry */ 2147 next->rx_comp.first = 0; 2148 dev_kfree_skb_any(rx_buff->skb); 2149 remove_buff_from_pool(adapter, rx_buff); 2150 continue; 2151 } else if (!rx_buff->skb) { 2152 /* free the entry */ 2153 next->rx_comp.first = 0; 2154 remove_buff_from_pool(adapter, rx_buff); 2155 continue; 2156 } 2157 2158 length = be32_to_cpu(next->rx_comp.len); 2159 offset = be16_to_cpu(next->rx_comp.off_frame_data); 2160 flags = next->rx_comp.flags; 2161 skb = rx_buff->skb; 2162 skb_copy_to_linear_data(skb, rx_buff->data + offset, 2163 length); 2164 2165 /* VLAN Header has been stripped by the system firmware and 2166 * needs to be inserted by the driver 2167 */ 2168 if (adapter->rx_vlan_header_insertion && 2169 (flags & IBMVNIC_VLAN_STRIPPED)) 2170 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2171 ntohs(next->rx_comp.vlan_tci)); 2172 2173 /* free the entry */ 2174 next->rx_comp.first = 0; 2175 remove_buff_from_pool(adapter, rx_buff); 2176 2177 skb_put(skb, length); 2178 skb->protocol = eth_type_trans(skb, netdev); 2179 skb_record_rx_queue(skb, scrq_num); 2180 2181 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 2182 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 2183 skb->ip_summed = CHECKSUM_UNNECESSARY; 2184 } 2185 2186 length = skb->len; 2187 napi_gro_receive(napi, skb); /* send it up */ 2188 netdev->stats.rx_packets++; 2189 netdev->stats.rx_bytes += length; 2190 adapter->rx_stats_buffers[scrq_num].packets++; 2191 adapter->rx_stats_buffers[scrq_num].bytes += length; 2192 frames_processed++; 2193 } 2194 2195 if (adapter->state != VNIC_CLOSING) 2196 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 2197 2198 if (frames_processed < budget) { 2199 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 2200 napi_complete_done(napi, frames_processed); 2201 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && 2202 napi_reschedule(napi)) { 2203 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 2204 goto restart_poll; 2205 } 2206 } 2207 return frames_processed; 2208 } 2209 2210 #ifdef CONFIG_NET_POLL_CONTROLLER 2211 static void ibmvnic_netpoll_controller(struct net_device *dev) 2212 { 2213 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2214 int i; 2215 2216 replenish_pools(netdev_priv(dev)); 2217 for (i = 0; i < adapter->req_rx_queues; i++) 2218 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, 2219 adapter->rx_scrq[i]); 2220 } 2221 #endif 2222 2223 static int wait_for_reset(struct ibmvnic_adapter *adapter) 2224 { 2225 int rc, ret; 2226 2227 adapter->fallback.mtu = adapter->req_mtu; 2228 adapter->fallback.rx_queues = adapter->req_rx_queues; 2229 adapter->fallback.tx_queues = adapter->req_tx_queues; 2230 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 2231 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 2232 2233 init_completion(&adapter->reset_done); 2234 adapter->wait_for_reset = true; 2235 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2236 if (rc) 2237 return rc; 2238 wait_for_completion(&adapter->reset_done); 2239 2240 ret = 0; 2241 if (adapter->reset_done_rc) { 2242 ret = -EIO; 2243 adapter->desired.mtu = adapter->fallback.mtu; 2244 adapter->desired.rx_queues = adapter->fallback.rx_queues; 2245 adapter->desired.tx_queues = adapter->fallback.tx_queues; 2246 adapter->desired.rx_entries = adapter->fallback.rx_entries; 2247 adapter->desired.tx_entries = adapter->fallback.tx_entries; 2248 2249 init_completion(&adapter->reset_done); 2250 adapter->wait_for_reset = true; 2251 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2252 if (rc) 2253 return ret; 2254 wait_for_completion(&adapter->reset_done); 2255 } 2256 adapter->wait_for_reset = false; 2257 2258 return ret; 2259 } 2260 2261 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 2262 { 2263 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2264 2265 adapter->desired.mtu = new_mtu + ETH_HLEN; 2266 2267 return wait_for_reset(adapter); 2268 } 2269 2270 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 2271 struct net_device *dev, 2272 netdev_features_t features) 2273 { 2274 /* Some backing hardware adapters can not 2275 * handle packets with a MSS less than 224 2276 * or with only one segment. 2277 */ 2278 if (skb_is_gso(skb)) { 2279 if (skb_shinfo(skb)->gso_size < 224 || 2280 skb_shinfo(skb)->gso_segs == 1) 2281 features &= ~NETIF_F_GSO_MASK; 2282 } 2283 2284 return features; 2285 } 2286 2287 static const struct net_device_ops ibmvnic_netdev_ops = { 2288 .ndo_open = ibmvnic_open, 2289 .ndo_stop = ibmvnic_close, 2290 .ndo_start_xmit = ibmvnic_xmit, 2291 .ndo_set_rx_mode = ibmvnic_set_multi, 2292 .ndo_set_mac_address = ibmvnic_set_mac, 2293 .ndo_validate_addr = eth_validate_addr, 2294 .ndo_tx_timeout = ibmvnic_tx_timeout, 2295 #ifdef CONFIG_NET_POLL_CONTROLLER 2296 .ndo_poll_controller = ibmvnic_netpoll_controller, 2297 #endif 2298 .ndo_change_mtu = ibmvnic_change_mtu, 2299 .ndo_features_check = ibmvnic_features_check, 2300 }; 2301 2302 /* ethtool functions */ 2303 2304 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 2305 struct ethtool_link_ksettings *cmd) 2306 { 2307 u32 supported, advertising; 2308 2309 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | 2310 SUPPORTED_FIBRE); 2311 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | 2312 ADVERTISED_FIBRE); 2313 cmd->base.speed = SPEED_1000; 2314 cmd->base.duplex = DUPLEX_FULL; 2315 cmd->base.port = PORT_FIBRE; 2316 cmd->base.phy_address = 0; 2317 cmd->base.autoneg = AUTONEG_ENABLE; 2318 2319 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 2320 supported); 2321 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 2322 advertising); 2323 2324 return 0; 2325 } 2326 2327 static void ibmvnic_get_drvinfo(struct net_device *netdev, 2328 struct ethtool_drvinfo *info) 2329 { 2330 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2331 2332 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 2333 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 2334 strlcpy(info->fw_version, adapter->fw_version, 2335 sizeof(info->fw_version)); 2336 } 2337 2338 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 2339 { 2340 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2341 2342 return adapter->msg_enable; 2343 } 2344 2345 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 2346 { 2347 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2348 2349 adapter->msg_enable = data; 2350 } 2351 2352 static u32 ibmvnic_get_link(struct net_device *netdev) 2353 { 2354 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2355 2356 /* Don't need to send a query because we request a logical link up at 2357 * init and then we wait for link state indications 2358 */ 2359 return adapter->logical_link_state; 2360 } 2361 2362 static void ibmvnic_get_ringparam(struct net_device *netdev, 2363 struct ethtool_ringparam *ring) 2364 { 2365 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2366 2367 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 2368 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 2369 ring->rx_mini_max_pending = 0; 2370 ring->rx_jumbo_max_pending = 0; 2371 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 2372 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 2373 ring->rx_mini_pending = 0; 2374 ring->rx_jumbo_pending = 0; 2375 } 2376 2377 static int ibmvnic_set_ringparam(struct net_device *netdev, 2378 struct ethtool_ringparam *ring) 2379 { 2380 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2381 2382 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || 2383 ring->tx_pending > adapter->max_tx_entries_per_subcrq) { 2384 netdev_err(netdev, "Invalid request.\n"); 2385 netdev_err(netdev, "Max tx buffers = %llu\n", 2386 adapter->max_rx_add_entries_per_subcrq); 2387 netdev_err(netdev, "Max rx buffers = %llu\n", 2388 adapter->max_tx_entries_per_subcrq); 2389 return -EINVAL; 2390 } 2391 2392 adapter->desired.rx_entries = ring->rx_pending; 2393 adapter->desired.tx_entries = ring->tx_pending; 2394 2395 return wait_for_reset(adapter); 2396 } 2397 2398 static void ibmvnic_get_channels(struct net_device *netdev, 2399 struct ethtool_channels *channels) 2400 { 2401 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2402 2403 channels->max_rx = adapter->max_rx_queues; 2404 channels->max_tx = adapter->max_tx_queues; 2405 channels->max_other = 0; 2406 channels->max_combined = 0; 2407 channels->rx_count = adapter->req_rx_queues; 2408 channels->tx_count = adapter->req_tx_queues; 2409 channels->other_count = 0; 2410 channels->combined_count = 0; 2411 } 2412 2413 static int ibmvnic_set_channels(struct net_device *netdev, 2414 struct ethtool_channels *channels) 2415 { 2416 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2417 2418 adapter->desired.rx_queues = channels->rx_count; 2419 adapter->desired.tx_queues = channels->tx_count; 2420 2421 return wait_for_reset(adapter); 2422 } 2423 2424 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2425 { 2426 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2427 int i; 2428 2429 if (stringset != ETH_SS_STATS) 2430 return; 2431 2432 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) 2433 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 2434 2435 for (i = 0; i < adapter->req_tx_queues; i++) { 2436 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 2437 data += ETH_GSTRING_LEN; 2438 2439 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 2440 data += ETH_GSTRING_LEN; 2441 2442 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); 2443 data += ETH_GSTRING_LEN; 2444 } 2445 2446 for (i = 0; i < adapter->req_rx_queues; i++) { 2447 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 2448 data += ETH_GSTRING_LEN; 2449 2450 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 2451 data += ETH_GSTRING_LEN; 2452 2453 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 2454 data += ETH_GSTRING_LEN; 2455 } 2456 } 2457 2458 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 2459 { 2460 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2461 2462 switch (sset) { 2463 case ETH_SS_STATS: 2464 return ARRAY_SIZE(ibmvnic_stats) + 2465 adapter->req_tx_queues * NUM_TX_STATS + 2466 adapter->req_rx_queues * NUM_RX_STATS; 2467 default: 2468 return -EOPNOTSUPP; 2469 } 2470 } 2471 2472 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 2473 struct ethtool_stats *stats, u64 *data) 2474 { 2475 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2476 union ibmvnic_crq crq; 2477 int i, j; 2478 int rc; 2479 2480 memset(&crq, 0, sizeof(crq)); 2481 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 2482 crq.request_statistics.cmd = REQUEST_STATISTICS; 2483 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 2484 crq.request_statistics.len = 2485 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 2486 2487 /* Wait for data to be written */ 2488 init_completion(&adapter->stats_done); 2489 rc = ibmvnic_send_crq(adapter, &crq); 2490 if (rc) 2491 return; 2492 wait_for_completion(&adapter->stats_done); 2493 2494 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 2495 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, 2496 ibmvnic_stats[i].offset)); 2497 2498 for (j = 0; j < adapter->req_tx_queues; j++) { 2499 data[i] = adapter->tx_stats_buffers[j].packets; 2500 i++; 2501 data[i] = adapter->tx_stats_buffers[j].bytes; 2502 i++; 2503 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 2504 i++; 2505 } 2506 2507 for (j = 0; j < adapter->req_rx_queues; j++) { 2508 data[i] = adapter->rx_stats_buffers[j].packets; 2509 i++; 2510 data[i] = adapter->rx_stats_buffers[j].bytes; 2511 i++; 2512 data[i] = adapter->rx_stats_buffers[j].interrupts; 2513 i++; 2514 } 2515 } 2516 2517 static const struct ethtool_ops ibmvnic_ethtool_ops = { 2518 .get_drvinfo = ibmvnic_get_drvinfo, 2519 .get_msglevel = ibmvnic_get_msglevel, 2520 .set_msglevel = ibmvnic_set_msglevel, 2521 .get_link = ibmvnic_get_link, 2522 .get_ringparam = ibmvnic_get_ringparam, 2523 .set_ringparam = ibmvnic_set_ringparam, 2524 .get_channels = ibmvnic_get_channels, 2525 .set_channels = ibmvnic_set_channels, 2526 .get_strings = ibmvnic_get_strings, 2527 .get_sset_count = ibmvnic_get_sset_count, 2528 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 2529 .get_link_ksettings = ibmvnic_get_link_ksettings, 2530 }; 2531 2532 /* Routines for managing CRQs/sCRQs */ 2533 2534 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 2535 struct ibmvnic_sub_crq_queue *scrq) 2536 { 2537 int rc; 2538 2539 if (scrq->irq) { 2540 free_irq(scrq->irq, scrq); 2541 irq_dispose_mapping(scrq->irq); 2542 scrq->irq = 0; 2543 } 2544 2545 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 2546 atomic_set(&scrq->used, 0); 2547 scrq->cur = 0; 2548 2549 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 2550 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 2551 return rc; 2552 } 2553 2554 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 2555 { 2556 int i, rc; 2557 2558 for (i = 0; i < adapter->req_tx_queues; i++) { 2559 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 2560 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 2561 if (rc) 2562 return rc; 2563 } 2564 2565 for (i = 0; i < adapter->req_rx_queues; i++) { 2566 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 2567 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 2568 if (rc) 2569 return rc; 2570 } 2571 2572 return rc; 2573 } 2574 2575 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 2576 struct ibmvnic_sub_crq_queue *scrq, 2577 bool do_h_free) 2578 { 2579 struct device *dev = &adapter->vdev->dev; 2580 long rc; 2581 2582 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 2583 2584 if (do_h_free) { 2585 /* Close the sub-crqs */ 2586 do { 2587 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 2588 adapter->vdev->unit_address, 2589 scrq->crq_num); 2590 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 2591 2592 if (rc) { 2593 netdev_err(adapter->netdev, 2594 "Failed to release sub-CRQ %16lx, rc = %ld\n", 2595 scrq->crq_num, rc); 2596 } 2597 } 2598 2599 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 2600 DMA_BIDIRECTIONAL); 2601 free_pages((unsigned long)scrq->msgs, 2); 2602 kfree(scrq); 2603 } 2604 2605 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 2606 *adapter) 2607 { 2608 struct device *dev = &adapter->vdev->dev; 2609 struct ibmvnic_sub_crq_queue *scrq; 2610 int rc; 2611 2612 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 2613 if (!scrq) 2614 return NULL; 2615 2616 scrq->msgs = 2617 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 2618 if (!scrq->msgs) { 2619 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 2620 goto zero_page_failed; 2621 } 2622 2623 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 2624 DMA_BIDIRECTIONAL); 2625 if (dma_mapping_error(dev, scrq->msg_token)) { 2626 dev_warn(dev, "Couldn't map crq queue messages page\n"); 2627 goto map_failed; 2628 } 2629 2630 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 2631 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 2632 2633 if (rc == H_RESOURCE) 2634 rc = ibmvnic_reset_crq(adapter); 2635 2636 if (rc == H_CLOSED) { 2637 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 2638 } else if (rc) { 2639 dev_warn(dev, "Error %d registering sub-crq\n", rc); 2640 goto reg_failed; 2641 } 2642 2643 scrq->adapter = adapter; 2644 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 2645 spin_lock_init(&scrq->lock); 2646 2647 netdev_dbg(adapter->netdev, 2648 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 2649 scrq->crq_num, scrq->hw_irq, scrq->irq); 2650 2651 return scrq; 2652 2653 reg_failed: 2654 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 2655 DMA_BIDIRECTIONAL); 2656 map_failed: 2657 free_pages((unsigned long)scrq->msgs, 2); 2658 zero_page_failed: 2659 kfree(scrq); 2660 2661 return NULL; 2662 } 2663 2664 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 2665 { 2666 int i; 2667 2668 if (adapter->tx_scrq) { 2669 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 2670 if (!adapter->tx_scrq[i]) 2671 continue; 2672 2673 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 2674 i); 2675 if (adapter->tx_scrq[i]->irq) { 2676 free_irq(adapter->tx_scrq[i]->irq, 2677 adapter->tx_scrq[i]); 2678 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 2679 adapter->tx_scrq[i]->irq = 0; 2680 } 2681 2682 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 2683 do_h_free); 2684 } 2685 2686 kfree(adapter->tx_scrq); 2687 adapter->tx_scrq = NULL; 2688 adapter->num_active_tx_scrqs = 0; 2689 } 2690 2691 if (adapter->rx_scrq) { 2692 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 2693 if (!adapter->rx_scrq[i]) 2694 continue; 2695 2696 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 2697 i); 2698 if (adapter->rx_scrq[i]->irq) { 2699 free_irq(adapter->rx_scrq[i]->irq, 2700 adapter->rx_scrq[i]); 2701 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 2702 adapter->rx_scrq[i]->irq = 0; 2703 } 2704 2705 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 2706 do_h_free); 2707 } 2708 2709 kfree(adapter->rx_scrq); 2710 adapter->rx_scrq = NULL; 2711 adapter->num_active_rx_scrqs = 0; 2712 } 2713 } 2714 2715 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 2716 struct ibmvnic_sub_crq_queue *scrq) 2717 { 2718 struct device *dev = &adapter->vdev->dev; 2719 unsigned long rc; 2720 2721 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2722 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2723 if (rc) 2724 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 2725 scrq->hw_irq, rc); 2726 return rc; 2727 } 2728 2729 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 2730 struct ibmvnic_sub_crq_queue *scrq) 2731 { 2732 struct device *dev = &adapter->vdev->dev; 2733 unsigned long rc; 2734 2735 if (scrq->hw_irq > 0x100000000ULL) { 2736 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2737 return 1; 2738 } 2739 2740 if (adapter->resetting && 2741 adapter->reset_reason == VNIC_RESET_MOBILITY) { 2742 u64 val = (0xff000000) | scrq->hw_irq; 2743 2744 rc = plpar_hcall_norets(H_EOI, val); 2745 if (rc) 2746 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 2747 val, rc); 2748 } 2749 2750 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2751 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2752 if (rc) 2753 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 2754 scrq->hw_irq, rc); 2755 return rc; 2756 } 2757 2758 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 2759 struct ibmvnic_sub_crq_queue *scrq) 2760 { 2761 struct device *dev = &adapter->vdev->dev; 2762 struct ibmvnic_tx_pool *tx_pool; 2763 struct ibmvnic_tx_buff *txbuff; 2764 union sub_crq *next; 2765 int index; 2766 int i, j; 2767 u8 *first; 2768 2769 restart_loop: 2770 while (pending_scrq(adapter, scrq)) { 2771 unsigned int pool = scrq->pool_index; 2772 int num_entries = 0; 2773 2774 next = ibmvnic_next_scrq(adapter, scrq); 2775 for (i = 0; i < next->tx_comp.num_comps; i++) { 2776 if (next->tx_comp.rcs[i]) { 2777 dev_err(dev, "tx error %x\n", 2778 next->tx_comp.rcs[i]); 2779 continue; 2780 } 2781 index = be32_to_cpu(next->tx_comp.correlators[i]); 2782 if (index & IBMVNIC_TSO_POOL_MASK) { 2783 tx_pool = &adapter->tso_pool[pool]; 2784 index &= ~IBMVNIC_TSO_POOL_MASK; 2785 } else { 2786 tx_pool = &adapter->tx_pool[pool]; 2787 } 2788 2789 txbuff = &tx_pool->tx_buff[index]; 2790 2791 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { 2792 if (!txbuff->data_dma[j]) 2793 continue; 2794 2795 txbuff->data_dma[j] = 0; 2796 } 2797 /* if sub_crq was sent indirectly */ 2798 first = &txbuff->indir_arr[0].generic.first; 2799 if (*first == IBMVNIC_CRQ_CMD) { 2800 dma_unmap_single(dev, txbuff->indir_dma, 2801 sizeof(txbuff->indir_arr), 2802 DMA_TO_DEVICE); 2803 *first = 0; 2804 } 2805 2806 if (txbuff->last_frag) { 2807 dev_kfree_skb_any(txbuff->skb); 2808 txbuff->skb = NULL; 2809 } 2810 2811 num_entries += txbuff->num_entries; 2812 2813 tx_pool->free_map[tx_pool->producer_index] = index; 2814 tx_pool->producer_index = 2815 (tx_pool->producer_index + 1) % 2816 tx_pool->num_buffers; 2817 } 2818 /* remove tx_comp scrq*/ 2819 next->tx_comp.first = 0; 2820 2821 if (atomic_sub_return(num_entries, &scrq->used) <= 2822 (adapter->req_tx_entries_per_subcrq / 2) && 2823 __netif_subqueue_stopped(adapter->netdev, 2824 scrq->pool_index)) { 2825 netif_wake_subqueue(adapter->netdev, scrq->pool_index); 2826 netdev_dbg(adapter->netdev, "Started queue %d\n", 2827 scrq->pool_index); 2828 } 2829 } 2830 2831 enable_scrq_irq(adapter, scrq); 2832 2833 if (pending_scrq(adapter, scrq)) { 2834 disable_scrq_irq(adapter, scrq); 2835 goto restart_loop; 2836 } 2837 2838 return 0; 2839 } 2840 2841 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 2842 { 2843 struct ibmvnic_sub_crq_queue *scrq = instance; 2844 struct ibmvnic_adapter *adapter = scrq->adapter; 2845 2846 disable_scrq_irq(adapter, scrq); 2847 ibmvnic_complete_tx(adapter, scrq); 2848 2849 return IRQ_HANDLED; 2850 } 2851 2852 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 2853 { 2854 struct ibmvnic_sub_crq_queue *scrq = instance; 2855 struct ibmvnic_adapter *adapter = scrq->adapter; 2856 2857 /* When booting a kdump kernel we can hit pending interrupts 2858 * prior to completing driver initialization. 2859 */ 2860 if (unlikely(adapter->state != VNIC_OPEN)) 2861 return IRQ_NONE; 2862 2863 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 2864 2865 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 2866 disable_scrq_irq(adapter, scrq); 2867 __napi_schedule(&adapter->napi[scrq->scrq_num]); 2868 } 2869 2870 return IRQ_HANDLED; 2871 } 2872 2873 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 2874 { 2875 struct device *dev = &adapter->vdev->dev; 2876 struct ibmvnic_sub_crq_queue *scrq; 2877 int i = 0, j = 0; 2878 int rc = 0; 2879 2880 for (i = 0; i < adapter->req_tx_queues; i++) { 2881 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 2882 i); 2883 scrq = adapter->tx_scrq[i]; 2884 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 2885 2886 if (!scrq->irq) { 2887 rc = -EINVAL; 2888 dev_err(dev, "Error mapping irq\n"); 2889 goto req_tx_irq_failed; 2890 } 2891 2892 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 2893 0, "ibmvnic_tx", scrq); 2894 2895 if (rc) { 2896 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 2897 scrq->irq, rc); 2898 irq_dispose_mapping(scrq->irq); 2899 goto req_tx_irq_failed; 2900 } 2901 } 2902 2903 for (i = 0; i < adapter->req_rx_queues; i++) { 2904 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 2905 i); 2906 scrq = adapter->rx_scrq[i]; 2907 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 2908 if (!scrq->irq) { 2909 rc = -EINVAL; 2910 dev_err(dev, "Error mapping irq\n"); 2911 goto req_rx_irq_failed; 2912 } 2913 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 2914 0, "ibmvnic_rx", scrq); 2915 if (rc) { 2916 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 2917 scrq->irq, rc); 2918 irq_dispose_mapping(scrq->irq); 2919 goto req_rx_irq_failed; 2920 } 2921 } 2922 return rc; 2923 2924 req_rx_irq_failed: 2925 for (j = 0; j < i; j++) { 2926 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 2927 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 2928 } 2929 i = adapter->req_tx_queues; 2930 req_tx_irq_failed: 2931 for (j = 0; j < i; j++) { 2932 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 2933 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 2934 } 2935 release_sub_crqs(adapter, 1); 2936 return rc; 2937 } 2938 2939 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 2940 { 2941 struct device *dev = &adapter->vdev->dev; 2942 struct ibmvnic_sub_crq_queue **allqueues; 2943 int registered_queues = 0; 2944 int total_queues; 2945 int more = 0; 2946 int i; 2947 2948 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 2949 2950 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 2951 if (!allqueues) 2952 return -1; 2953 2954 for (i = 0; i < total_queues; i++) { 2955 allqueues[i] = init_sub_crq_queue(adapter); 2956 if (!allqueues[i]) { 2957 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 2958 break; 2959 } 2960 registered_queues++; 2961 } 2962 2963 /* Make sure we were able to register the minimum number of queues */ 2964 if (registered_queues < 2965 adapter->min_tx_queues + adapter->min_rx_queues) { 2966 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 2967 goto tx_failed; 2968 } 2969 2970 /* Distribute the failed allocated queues*/ 2971 for (i = 0; i < total_queues - registered_queues + more ; i++) { 2972 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 2973 switch (i % 3) { 2974 case 0: 2975 if (adapter->req_rx_queues > adapter->min_rx_queues) 2976 adapter->req_rx_queues--; 2977 else 2978 more++; 2979 break; 2980 case 1: 2981 if (adapter->req_tx_queues > adapter->min_tx_queues) 2982 adapter->req_tx_queues--; 2983 else 2984 more++; 2985 break; 2986 } 2987 } 2988 2989 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 2990 sizeof(*adapter->tx_scrq), GFP_KERNEL); 2991 if (!adapter->tx_scrq) 2992 goto tx_failed; 2993 2994 for (i = 0; i < adapter->req_tx_queues; i++) { 2995 adapter->tx_scrq[i] = allqueues[i]; 2996 adapter->tx_scrq[i]->pool_index = i; 2997 adapter->num_active_tx_scrqs++; 2998 } 2999 3000 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 3001 sizeof(*adapter->rx_scrq), GFP_KERNEL); 3002 if (!adapter->rx_scrq) 3003 goto rx_failed; 3004 3005 for (i = 0; i < adapter->req_rx_queues; i++) { 3006 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 3007 adapter->rx_scrq[i]->scrq_num = i; 3008 adapter->num_active_rx_scrqs++; 3009 } 3010 3011 kfree(allqueues); 3012 return 0; 3013 3014 rx_failed: 3015 kfree(adapter->tx_scrq); 3016 adapter->tx_scrq = NULL; 3017 tx_failed: 3018 for (i = 0; i < registered_queues; i++) 3019 release_sub_crq_queue(adapter, allqueues[i], 1); 3020 kfree(allqueues); 3021 return -1; 3022 } 3023 3024 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry) 3025 { 3026 struct device *dev = &adapter->vdev->dev; 3027 union ibmvnic_crq crq; 3028 int max_entries; 3029 3030 if (!retry) { 3031 /* Sub-CRQ entries are 32 byte long */ 3032 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 3033 3034 if (adapter->min_tx_entries_per_subcrq > entries_page || 3035 adapter->min_rx_add_entries_per_subcrq > entries_page) { 3036 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 3037 return; 3038 } 3039 3040 if (adapter->desired.mtu) 3041 adapter->req_mtu = adapter->desired.mtu; 3042 else 3043 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 3044 3045 if (!adapter->desired.tx_entries) 3046 adapter->desired.tx_entries = 3047 adapter->max_tx_entries_per_subcrq; 3048 if (!adapter->desired.rx_entries) 3049 adapter->desired.rx_entries = 3050 adapter->max_rx_add_entries_per_subcrq; 3051 3052 max_entries = IBMVNIC_MAX_LTB_SIZE / 3053 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 3054 3055 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 3056 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { 3057 adapter->desired.tx_entries = max_entries; 3058 } 3059 3060 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 3061 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { 3062 adapter->desired.rx_entries = max_entries; 3063 } 3064 3065 if (adapter->desired.tx_entries) 3066 adapter->req_tx_entries_per_subcrq = 3067 adapter->desired.tx_entries; 3068 else 3069 adapter->req_tx_entries_per_subcrq = 3070 adapter->max_tx_entries_per_subcrq; 3071 3072 if (adapter->desired.rx_entries) 3073 adapter->req_rx_add_entries_per_subcrq = 3074 adapter->desired.rx_entries; 3075 else 3076 adapter->req_rx_add_entries_per_subcrq = 3077 adapter->max_rx_add_entries_per_subcrq; 3078 3079 if (adapter->desired.tx_queues) 3080 adapter->req_tx_queues = 3081 adapter->desired.tx_queues; 3082 else 3083 adapter->req_tx_queues = 3084 adapter->opt_tx_comp_sub_queues; 3085 3086 if (adapter->desired.rx_queues) 3087 adapter->req_rx_queues = 3088 adapter->desired.rx_queues; 3089 else 3090 adapter->req_rx_queues = 3091 adapter->opt_rx_comp_queues; 3092 3093 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 3094 } 3095 3096 memset(&crq, 0, sizeof(crq)); 3097 crq.request_capability.first = IBMVNIC_CRQ_CMD; 3098 crq.request_capability.cmd = REQUEST_CAPABILITY; 3099 3100 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 3101 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 3102 atomic_inc(&adapter->running_cap_crqs); 3103 ibmvnic_send_crq(adapter, &crq); 3104 3105 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 3106 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 3107 atomic_inc(&adapter->running_cap_crqs); 3108 ibmvnic_send_crq(adapter, &crq); 3109 3110 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 3111 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 3112 atomic_inc(&adapter->running_cap_crqs); 3113 ibmvnic_send_crq(adapter, &crq); 3114 3115 crq.request_capability.capability = 3116 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 3117 crq.request_capability.number = 3118 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 3119 atomic_inc(&adapter->running_cap_crqs); 3120 ibmvnic_send_crq(adapter, &crq); 3121 3122 crq.request_capability.capability = 3123 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 3124 crq.request_capability.number = 3125 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 3126 atomic_inc(&adapter->running_cap_crqs); 3127 ibmvnic_send_crq(adapter, &crq); 3128 3129 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 3130 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 3131 atomic_inc(&adapter->running_cap_crqs); 3132 ibmvnic_send_crq(adapter, &crq); 3133 3134 if (adapter->netdev->flags & IFF_PROMISC) { 3135 if (adapter->promisc_supported) { 3136 crq.request_capability.capability = 3137 cpu_to_be16(PROMISC_REQUESTED); 3138 crq.request_capability.number = cpu_to_be64(1); 3139 atomic_inc(&adapter->running_cap_crqs); 3140 ibmvnic_send_crq(adapter, &crq); 3141 } 3142 } else { 3143 crq.request_capability.capability = 3144 cpu_to_be16(PROMISC_REQUESTED); 3145 crq.request_capability.number = cpu_to_be64(0); 3146 atomic_inc(&adapter->running_cap_crqs); 3147 ibmvnic_send_crq(adapter, &crq); 3148 } 3149 } 3150 3151 static int pending_scrq(struct ibmvnic_adapter *adapter, 3152 struct ibmvnic_sub_crq_queue *scrq) 3153 { 3154 union sub_crq *entry = &scrq->msgs[scrq->cur]; 3155 3156 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) 3157 return 1; 3158 else 3159 return 0; 3160 } 3161 3162 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 3163 struct ibmvnic_sub_crq_queue *scrq) 3164 { 3165 union sub_crq *entry; 3166 unsigned long flags; 3167 3168 spin_lock_irqsave(&scrq->lock, flags); 3169 entry = &scrq->msgs[scrq->cur]; 3170 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 3171 if (++scrq->cur == scrq->size) 3172 scrq->cur = 0; 3173 } else { 3174 entry = NULL; 3175 } 3176 spin_unlock_irqrestore(&scrq->lock, flags); 3177 3178 return entry; 3179 } 3180 3181 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 3182 { 3183 struct ibmvnic_crq_queue *queue = &adapter->crq; 3184 union ibmvnic_crq *crq; 3185 3186 crq = &queue->msgs[queue->cur]; 3187 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 3188 if (++queue->cur == queue->size) 3189 queue->cur = 0; 3190 } else { 3191 crq = NULL; 3192 } 3193 3194 return crq; 3195 } 3196 3197 static void print_subcrq_error(struct device *dev, int rc, const char *func) 3198 { 3199 switch (rc) { 3200 case H_PARAMETER: 3201 dev_warn_ratelimited(dev, 3202 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", 3203 func, rc); 3204 break; 3205 case H_CLOSED: 3206 dev_warn_ratelimited(dev, 3207 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", 3208 func, rc); 3209 break; 3210 default: 3211 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); 3212 break; 3213 } 3214 } 3215 3216 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 3217 union sub_crq *sub_crq) 3218 { 3219 unsigned int ua = adapter->vdev->unit_address; 3220 struct device *dev = &adapter->vdev->dev; 3221 u64 *u64_crq = (u64 *)sub_crq; 3222 int rc; 3223 3224 netdev_dbg(adapter->netdev, 3225 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n", 3226 (unsigned long int)cpu_to_be64(remote_handle), 3227 (unsigned long int)cpu_to_be64(u64_crq[0]), 3228 (unsigned long int)cpu_to_be64(u64_crq[1]), 3229 (unsigned long int)cpu_to_be64(u64_crq[2]), 3230 (unsigned long int)cpu_to_be64(u64_crq[3])); 3231 3232 /* Make sure the hypervisor sees the complete request */ 3233 mb(); 3234 3235 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, 3236 cpu_to_be64(remote_handle), 3237 cpu_to_be64(u64_crq[0]), 3238 cpu_to_be64(u64_crq[1]), 3239 cpu_to_be64(u64_crq[2]), 3240 cpu_to_be64(u64_crq[3])); 3241 3242 if (rc) 3243 print_subcrq_error(dev, rc, __func__); 3244 3245 return rc; 3246 } 3247 3248 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 3249 u64 remote_handle, u64 ioba, u64 num_entries) 3250 { 3251 unsigned int ua = adapter->vdev->unit_address; 3252 struct device *dev = &adapter->vdev->dev; 3253 int rc; 3254 3255 /* Make sure the hypervisor sees the complete request */ 3256 mb(); 3257 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 3258 cpu_to_be64(remote_handle), 3259 ioba, num_entries); 3260 3261 if (rc) 3262 print_subcrq_error(dev, rc, __func__); 3263 3264 return rc; 3265 } 3266 3267 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 3268 union ibmvnic_crq *crq) 3269 { 3270 unsigned int ua = adapter->vdev->unit_address; 3271 struct device *dev = &adapter->vdev->dev; 3272 u64 *u64_crq = (u64 *)crq; 3273 int rc; 3274 3275 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 3276 (unsigned long int)cpu_to_be64(u64_crq[0]), 3277 (unsigned long int)cpu_to_be64(u64_crq[1])); 3278 3279 if (!adapter->crq.active && 3280 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 3281 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 3282 return -EINVAL; 3283 } 3284 3285 /* Make sure the hypervisor sees the complete request */ 3286 mb(); 3287 3288 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 3289 cpu_to_be64(u64_crq[0]), 3290 cpu_to_be64(u64_crq[1])); 3291 3292 if (rc) { 3293 if (rc == H_CLOSED) { 3294 dev_warn(dev, "CRQ Queue closed\n"); 3295 if (adapter->resetting) 3296 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 3297 } 3298 3299 dev_warn(dev, "Send error (rc=%d)\n", rc); 3300 } 3301 3302 return rc; 3303 } 3304 3305 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 3306 { 3307 union ibmvnic_crq crq; 3308 3309 memset(&crq, 0, sizeof(crq)); 3310 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 3311 crq.generic.cmd = IBMVNIC_CRQ_INIT; 3312 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 3313 3314 return ibmvnic_send_crq(adapter, &crq); 3315 } 3316 3317 static int send_version_xchg(struct ibmvnic_adapter *adapter) 3318 { 3319 union ibmvnic_crq crq; 3320 3321 memset(&crq, 0, sizeof(crq)); 3322 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 3323 crq.version_exchange.cmd = VERSION_EXCHANGE; 3324 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 3325 3326 return ibmvnic_send_crq(adapter, &crq); 3327 } 3328 3329 struct vnic_login_client_data { 3330 u8 type; 3331 __be16 len; 3332 char name[]; 3333 } __packed; 3334 3335 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 3336 { 3337 int len; 3338 3339 /* Calculate the amount of buffer space needed for the 3340 * vnic client data in the login buffer. There are four entries, 3341 * OS name, LPAR name, device name, and a null last entry. 3342 */ 3343 len = 4 * sizeof(struct vnic_login_client_data); 3344 len += 6; /* "Linux" plus NULL */ 3345 len += strlen(utsname()->nodename) + 1; 3346 len += strlen(adapter->netdev->name) + 1; 3347 3348 return len; 3349 } 3350 3351 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 3352 struct vnic_login_client_data *vlcd) 3353 { 3354 const char *os_name = "Linux"; 3355 int len; 3356 3357 /* Type 1 - LPAR OS */ 3358 vlcd->type = 1; 3359 len = strlen(os_name) + 1; 3360 vlcd->len = cpu_to_be16(len); 3361 strncpy(vlcd->name, os_name, len); 3362 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3363 3364 /* Type 2 - LPAR name */ 3365 vlcd->type = 2; 3366 len = strlen(utsname()->nodename) + 1; 3367 vlcd->len = cpu_to_be16(len); 3368 strncpy(vlcd->name, utsname()->nodename, len); 3369 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3370 3371 /* Type 3 - device name */ 3372 vlcd->type = 3; 3373 len = strlen(adapter->netdev->name) + 1; 3374 vlcd->len = cpu_to_be16(len); 3375 strncpy(vlcd->name, adapter->netdev->name, len); 3376 } 3377 3378 static int send_login(struct ibmvnic_adapter *adapter) 3379 { 3380 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 3381 struct ibmvnic_login_buffer *login_buffer; 3382 struct device *dev = &adapter->vdev->dev; 3383 dma_addr_t rsp_buffer_token; 3384 dma_addr_t buffer_token; 3385 size_t rsp_buffer_size; 3386 union ibmvnic_crq crq; 3387 size_t buffer_size; 3388 __be64 *tx_list_p; 3389 __be64 *rx_list_p; 3390 int client_data_len; 3391 struct vnic_login_client_data *vlcd; 3392 int i; 3393 3394 if (!adapter->tx_scrq || !adapter->rx_scrq) { 3395 netdev_err(adapter->netdev, 3396 "RX or TX queues are not allocated, device login failed\n"); 3397 return -1; 3398 } 3399 3400 release_login_rsp_buffer(adapter); 3401 client_data_len = vnic_client_data_len(adapter); 3402 3403 buffer_size = 3404 sizeof(struct ibmvnic_login_buffer) + 3405 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 3406 client_data_len; 3407 3408 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 3409 if (!login_buffer) 3410 goto buf_alloc_failed; 3411 3412 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 3413 DMA_TO_DEVICE); 3414 if (dma_mapping_error(dev, buffer_token)) { 3415 dev_err(dev, "Couldn't map login buffer\n"); 3416 goto buf_map_failed; 3417 } 3418 3419 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 3420 sizeof(u64) * adapter->req_tx_queues + 3421 sizeof(u64) * adapter->req_rx_queues + 3422 sizeof(u64) * adapter->req_rx_queues + 3423 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 3424 3425 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 3426 if (!login_rsp_buffer) 3427 goto buf_rsp_alloc_failed; 3428 3429 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 3430 rsp_buffer_size, DMA_FROM_DEVICE); 3431 if (dma_mapping_error(dev, rsp_buffer_token)) { 3432 dev_err(dev, "Couldn't map login rsp buffer\n"); 3433 goto buf_rsp_map_failed; 3434 } 3435 3436 adapter->login_buf = login_buffer; 3437 adapter->login_buf_token = buffer_token; 3438 adapter->login_buf_sz = buffer_size; 3439 adapter->login_rsp_buf = login_rsp_buffer; 3440 adapter->login_rsp_buf_token = rsp_buffer_token; 3441 adapter->login_rsp_buf_sz = rsp_buffer_size; 3442 3443 login_buffer->len = cpu_to_be32(buffer_size); 3444 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 3445 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 3446 login_buffer->off_txcomp_subcrqs = 3447 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 3448 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 3449 login_buffer->off_rxcomp_subcrqs = 3450 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 3451 sizeof(u64) * adapter->req_tx_queues); 3452 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 3453 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 3454 3455 tx_list_p = (__be64 *)((char *)login_buffer + 3456 sizeof(struct ibmvnic_login_buffer)); 3457 rx_list_p = (__be64 *)((char *)login_buffer + 3458 sizeof(struct ibmvnic_login_buffer) + 3459 sizeof(u64) * adapter->req_tx_queues); 3460 3461 for (i = 0; i < adapter->req_tx_queues; i++) { 3462 if (adapter->tx_scrq[i]) { 3463 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]-> 3464 crq_num); 3465 } 3466 } 3467 3468 for (i = 0; i < adapter->req_rx_queues; i++) { 3469 if (adapter->rx_scrq[i]) { 3470 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]-> 3471 crq_num); 3472 } 3473 } 3474 3475 /* Insert vNIC login client data */ 3476 vlcd = (struct vnic_login_client_data *) 3477 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 3478 login_buffer->client_data_offset = 3479 cpu_to_be32((char *)vlcd - (char *)login_buffer); 3480 login_buffer->client_data_len = cpu_to_be32(client_data_len); 3481 3482 vnic_add_client_data(adapter, vlcd); 3483 3484 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 3485 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 3486 netdev_dbg(adapter->netdev, "%016lx\n", 3487 ((unsigned long int *)(adapter->login_buf))[i]); 3488 } 3489 3490 memset(&crq, 0, sizeof(crq)); 3491 crq.login.first = IBMVNIC_CRQ_CMD; 3492 crq.login.cmd = LOGIN; 3493 crq.login.ioba = cpu_to_be32(buffer_token); 3494 crq.login.len = cpu_to_be32(buffer_size); 3495 ibmvnic_send_crq(adapter, &crq); 3496 3497 return 0; 3498 3499 buf_rsp_map_failed: 3500 kfree(login_rsp_buffer); 3501 buf_rsp_alloc_failed: 3502 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 3503 buf_map_failed: 3504 kfree(login_buffer); 3505 buf_alloc_failed: 3506 return -1; 3507 } 3508 3509 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 3510 u32 len, u8 map_id) 3511 { 3512 union ibmvnic_crq crq; 3513 3514 memset(&crq, 0, sizeof(crq)); 3515 crq.request_map.first = IBMVNIC_CRQ_CMD; 3516 crq.request_map.cmd = REQUEST_MAP; 3517 crq.request_map.map_id = map_id; 3518 crq.request_map.ioba = cpu_to_be32(addr); 3519 crq.request_map.len = cpu_to_be32(len); 3520 return ibmvnic_send_crq(adapter, &crq); 3521 } 3522 3523 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 3524 { 3525 union ibmvnic_crq crq; 3526 3527 memset(&crq, 0, sizeof(crq)); 3528 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 3529 crq.request_unmap.cmd = REQUEST_UNMAP; 3530 crq.request_unmap.map_id = map_id; 3531 return ibmvnic_send_crq(adapter, &crq); 3532 } 3533 3534 static void send_map_query(struct ibmvnic_adapter *adapter) 3535 { 3536 union ibmvnic_crq crq; 3537 3538 memset(&crq, 0, sizeof(crq)); 3539 crq.query_map.first = IBMVNIC_CRQ_CMD; 3540 crq.query_map.cmd = QUERY_MAP; 3541 ibmvnic_send_crq(adapter, &crq); 3542 } 3543 3544 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 3545 static void send_cap_queries(struct ibmvnic_adapter *adapter) 3546 { 3547 union ibmvnic_crq crq; 3548 3549 atomic_set(&adapter->running_cap_crqs, 0); 3550 memset(&crq, 0, sizeof(crq)); 3551 crq.query_capability.first = IBMVNIC_CRQ_CMD; 3552 crq.query_capability.cmd = QUERY_CAPABILITY; 3553 3554 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 3555 atomic_inc(&adapter->running_cap_crqs); 3556 ibmvnic_send_crq(adapter, &crq); 3557 3558 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 3559 atomic_inc(&adapter->running_cap_crqs); 3560 ibmvnic_send_crq(adapter, &crq); 3561 3562 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 3563 atomic_inc(&adapter->running_cap_crqs); 3564 ibmvnic_send_crq(adapter, &crq); 3565 3566 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 3567 atomic_inc(&adapter->running_cap_crqs); 3568 ibmvnic_send_crq(adapter, &crq); 3569 3570 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 3571 atomic_inc(&adapter->running_cap_crqs); 3572 ibmvnic_send_crq(adapter, &crq); 3573 3574 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 3575 atomic_inc(&adapter->running_cap_crqs); 3576 ibmvnic_send_crq(adapter, &crq); 3577 3578 crq.query_capability.capability = 3579 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 3580 atomic_inc(&adapter->running_cap_crqs); 3581 ibmvnic_send_crq(adapter, &crq); 3582 3583 crq.query_capability.capability = 3584 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 3585 atomic_inc(&adapter->running_cap_crqs); 3586 ibmvnic_send_crq(adapter, &crq); 3587 3588 crq.query_capability.capability = 3589 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 3590 atomic_inc(&adapter->running_cap_crqs); 3591 ibmvnic_send_crq(adapter, &crq); 3592 3593 crq.query_capability.capability = 3594 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 3595 atomic_inc(&adapter->running_cap_crqs); 3596 ibmvnic_send_crq(adapter, &crq); 3597 3598 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 3599 atomic_inc(&adapter->running_cap_crqs); 3600 ibmvnic_send_crq(adapter, &crq); 3601 3602 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 3603 atomic_inc(&adapter->running_cap_crqs); 3604 ibmvnic_send_crq(adapter, &crq); 3605 3606 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 3607 atomic_inc(&adapter->running_cap_crqs); 3608 ibmvnic_send_crq(adapter, &crq); 3609 3610 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 3611 atomic_inc(&adapter->running_cap_crqs); 3612 ibmvnic_send_crq(adapter, &crq); 3613 3614 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 3615 atomic_inc(&adapter->running_cap_crqs); 3616 ibmvnic_send_crq(adapter, &crq); 3617 3618 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 3619 atomic_inc(&adapter->running_cap_crqs); 3620 ibmvnic_send_crq(adapter, &crq); 3621 3622 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 3623 atomic_inc(&adapter->running_cap_crqs); 3624 ibmvnic_send_crq(adapter, &crq); 3625 3626 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 3627 atomic_inc(&adapter->running_cap_crqs); 3628 ibmvnic_send_crq(adapter, &crq); 3629 3630 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 3631 atomic_inc(&adapter->running_cap_crqs); 3632 ibmvnic_send_crq(adapter, &crq); 3633 3634 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 3635 atomic_inc(&adapter->running_cap_crqs); 3636 ibmvnic_send_crq(adapter, &crq); 3637 3638 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 3639 atomic_inc(&adapter->running_cap_crqs); 3640 ibmvnic_send_crq(adapter, &crq); 3641 3642 crq.query_capability.capability = 3643 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 3644 atomic_inc(&adapter->running_cap_crqs); 3645 ibmvnic_send_crq(adapter, &crq); 3646 3647 crq.query_capability.capability = 3648 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 3649 atomic_inc(&adapter->running_cap_crqs); 3650 ibmvnic_send_crq(adapter, &crq); 3651 3652 crq.query_capability.capability = 3653 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 3654 atomic_inc(&adapter->running_cap_crqs); 3655 ibmvnic_send_crq(adapter, &crq); 3656 3657 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 3658 atomic_inc(&adapter->running_cap_crqs); 3659 ibmvnic_send_crq(adapter, &crq); 3660 } 3661 3662 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 3663 struct ibmvnic_adapter *adapter) 3664 { 3665 struct device *dev = &adapter->vdev->dev; 3666 3667 if (crq->get_vpd_size_rsp.rc.code) { 3668 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 3669 crq->get_vpd_size_rsp.rc.code); 3670 complete(&adapter->fw_done); 3671 return; 3672 } 3673 3674 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 3675 complete(&adapter->fw_done); 3676 } 3677 3678 static void handle_vpd_rsp(union ibmvnic_crq *crq, 3679 struct ibmvnic_adapter *adapter) 3680 { 3681 struct device *dev = &adapter->vdev->dev; 3682 unsigned char *substr = NULL; 3683 u8 fw_level_len = 0; 3684 3685 memset(adapter->fw_version, 0, 32); 3686 3687 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 3688 DMA_FROM_DEVICE); 3689 3690 if (crq->get_vpd_rsp.rc.code) { 3691 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 3692 crq->get_vpd_rsp.rc.code); 3693 goto complete; 3694 } 3695 3696 /* get the position of the firmware version info 3697 * located after the ASCII 'RM' substring in the buffer 3698 */ 3699 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 3700 if (!substr) { 3701 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 3702 goto complete; 3703 } 3704 3705 /* get length of firmware level ASCII substring */ 3706 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 3707 fw_level_len = *(substr + 2); 3708 } else { 3709 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 3710 goto complete; 3711 } 3712 3713 /* copy firmware version string from vpd into adapter */ 3714 if ((substr + 3 + fw_level_len) < 3715 (adapter->vpd->buff + adapter->vpd->len)) { 3716 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 3717 } else { 3718 dev_info(dev, "FW substr extrapolated VPD buff\n"); 3719 } 3720 3721 complete: 3722 if (adapter->fw_version[0] == '\0') 3723 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char)); 3724 complete(&adapter->fw_done); 3725 } 3726 3727 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 3728 { 3729 struct device *dev = &adapter->vdev->dev; 3730 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 3731 union ibmvnic_crq crq; 3732 int i; 3733 3734 dma_unmap_single(dev, adapter->ip_offload_tok, 3735 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 3736 3737 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 3738 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 3739 netdev_dbg(adapter->netdev, "%016lx\n", 3740 ((unsigned long int *)(buf))[i]); 3741 3742 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 3743 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 3744 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 3745 buf->tcp_ipv4_chksum); 3746 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 3747 buf->tcp_ipv6_chksum); 3748 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 3749 buf->udp_ipv4_chksum); 3750 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 3751 buf->udp_ipv6_chksum); 3752 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 3753 buf->large_tx_ipv4); 3754 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 3755 buf->large_tx_ipv6); 3756 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 3757 buf->large_rx_ipv4); 3758 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 3759 buf->large_rx_ipv6); 3760 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 3761 buf->max_ipv4_header_size); 3762 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 3763 buf->max_ipv6_header_size); 3764 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 3765 buf->max_tcp_header_size); 3766 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 3767 buf->max_udp_header_size); 3768 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 3769 buf->max_large_tx_size); 3770 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 3771 buf->max_large_rx_size); 3772 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 3773 buf->ipv6_extension_header); 3774 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 3775 buf->tcp_pseudosum_req); 3776 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 3777 buf->num_ipv6_ext_headers); 3778 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 3779 buf->off_ipv6_ext_headers); 3780 3781 adapter->ip_offload_ctrl_tok = 3782 dma_map_single(dev, &adapter->ip_offload_ctrl, 3783 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE); 3784 3785 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 3786 dev_err(dev, "Couldn't map ip offload control buffer\n"); 3787 return; 3788 } 3789 3790 adapter->ip_offload_ctrl.len = 3791 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 3792 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); 3793 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum; 3794 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum; 3795 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 3796 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; 3797 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 3798 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum; 3799 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4; 3800 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6; 3801 3802 /* large_rx disabled for now, additional features needed */ 3803 adapter->ip_offload_ctrl.large_rx_ipv4 = 0; 3804 adapter->ip_offload_ctrl.large_rx_ipv6 = 0; 3805 3806 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; 3807 3808 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 3809 adapter->netdev->features |= NETIF_F_IP_CSUM; 3810 3811 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 3812 adapter->netdev->features |= NETIF_F_IPV6_CSUM; 3813 3814 if ((adapter->netdev->features & 3815 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 3816 adapter->netdev->features |= NETIF_F_RXCSUM; 3817 3818 if (buf->large_tx_ipv4) 3819 adapter->netdev->features |= NETIF_F_TSO; 3820 if (buf->large_tx_ipv6) 3821 adapter->netdev->features |= NETIF_F_TSO6; 3822 3823 adapter->netdev->hw_features |= adapter->netdev->features; 3824 3825 memset(&crq, 0, sizeof(crq)); 3826 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 3827 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 3828 crq.control_ip_offload.len = 3829 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 3830 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 3831 ibmvnic_send_crq(adapter, &crq); 3832 } 3833 3834 static const char *ibmvnic_fw_err_cause(u16 cause) 3835 { 3836 switch (cause) { 3837 case ADAPTER_PROBLEM: 3838 return "adapter problem"; 3839 case BUS_PROBLEM: 3840 return "bus problem"; 3841 case FW_PROBLEM: 3842 return "firmware problem"; 3843 case DD_PROBLEM: 3844 return "device driver problem"; 3845 case EEH_RECOVERY: 3846 return "EEH recovery"; 3847 case FW_UPDATED: 3848 return "firmware updated"; 3849 case LOW_MEMORY: 3850 return "low Memory"; 3851 default: 3852 return "unknown"; 3853 } 3854 } 3855 3856 static void handle_error_indication(union ibmvnic_crq *crq, 3857 struct ibmvnic_adapter *adapter) 3858 { 3859 struct device *dev = &adapter->vdev->dev; 3860 u16 cause; 3861 3862 cause = be16_to_cpu(crq->error_indication.error_cause); 3863 3864 dev_warn_ratelimited(dev, 3865 "Firmware reports %serror, cause: %s. Starting recovery...\n", 3866 crq->error_indication.flags 3867 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 3868 ibmvnic_fw_err_cause(cause)); 3869 3870 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 3871 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 3872 else 3873 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 3874 } 3875 3876 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 3877 struct ibmvnic_adapter *adapter) 3878 { 3879 struct net_device *netdev = adapter->netdev; 3880 struct device *dev = &adapter->vdev->dev; 3881 long rc; 3882 3883 rc = crq->change_mac_addr_rsp.rc.code; 3884 if (rc) { 3885 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 3886 goto out; 3887 } 3888 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], 3889 ETH_ALEN); 3890 out: 3891 complete(&adapter->fw_done); 3892 return rc; 3893 } 3894 3895 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 3896 struct ibmvnic_adapter *adapter) 3897 { 3898 struct device *dev = &adapter->vdev->dev; 3899 u64 *req_value; 3900 char *name; 3901 3902 atomic_dec(&adapter->running_cap_crqs); 3903 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 3904 case REQ_TX_QUEUES: 3905 req_value = &adapter->req_tx_queues; 3906 name = "tx"; 3907 break; 3908 case REQ_RX_QUEUES: 3909 req_value = &adapter->req_rx_queues; 3910 name = "rx"; 3911 break; 3912 case REQ_RX_ADD_QUEUES: 3913 req_value = &adapter->req_rx_add_queues; 3914 name = "rx_add"; 3915 break; 3916 case REQ_TX_ENTRIES_PER_SUBCRQ: 3917 req_value = &adapter->req_tx_entries_per_subcrq; 3918 name = "tx_entries_per_subcrq"; 3919 break; 3920 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 3921 req_value = &adapter->req_rx_add_entries_per_subcrq; 3922 name = "rx_add_entries_per_subcrq"; 3923 break; 3924 case REQ_MTU: 3925 req_value = &adapter->req_mtu; 3926 name = "mtu"; 3927 break; 3928 case PROMISC_REQUESTED: 3929 req_value = &adapter->promisc; 3930 name = "promisc"; 3931 break; 3932 default: 3933 dev_err(dev, "Got invalid cap request rsp %d\n", 3934 crq->request_capability.capability); 3935 return; 3936 } 3937 3938 switch (crq->request_capability_rsp.rc.code) { 3939 case SUCCESS: 3940 break; 3941 case PARTIALSUCCESS: 3942 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 3943 *req_value, 3944 (long int)be64_to_cpu(crq->request_capability_rsp. 3945 number), name); 3946 3947 if (be16_to_cpu(crq->request_capability_rsp.capability) == 3948 REQ_MTU) { 3949 pr_err("mtu of %llu is not supported. Reverting.\n", 3950 *req_value); 3951 *req_value = adapter->fallback.mtu; 3952 } else { 3953 *req_value = 3954 be64_to_cpu(crq->request_capability_rsp.number); 3955 } 3956 3957 ibmvnic_send_req_caps(adapter, 1); 3958 return; 3959 default: 3960 dev_err(dev, "Error %d in request cap rsp\n", 3961 crq->request_capability_rsp.rc.code); 3962 return; 3963 } 3964 3965 /* Done receiving requested capabilities, query IP offload support */ 3966 if (atomic_read(&adapter->running_cap_crqs) == 0) { 3967 union ibmvnic_crq newcrq; 3968 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 3969 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = 3970 &adapter->ip_offload_buf; 3971 3972 adapter->wait_capability = false; 3973 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, 3974 buf_sz, 3975 DMA_FROM_DEVICE); 3976 3977 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 3978 if (!firmware_has_feature(FW_FEATURE_CMO)) 3979 dev_err(dev, "Couldn't map offload buffer\n"); 3980 return; 3981 } 3982 3983 memset(&newcrq, 0, sizeof(newcrq)); 3984 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 3985 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 3986 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz); 3987 newcrq.query_ip_offload.ioba = 3988 cpu_to_be32(adapter->ip_offload_tok); 3989 3990 ibmvnic_send_crq(adapter, &newcrq); 3991 } 3992 } 3993 3994 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 3995 struct ibmvnic_adapter *adapter) 3996 { 3997 struct device *dev = &adapter->vdev->dev; 3998 struct net_device *netdev = adapter->netdev; 3999 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 4000 struct ibmvnic_login_buffer *login = adapter->login_buf; 4001 int i; 4002 4003 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 4004 DMA_TO_DEVICE); 4005 dma_unmap_single(dev, adapter->login_rsp_buf_token, 4006 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 4007 4008 /* If the number of queues requested can't be allocated by the 4009 * server, the login response will return with code 1. We will need 4010 * to resend the login buffer with fewer queues requested. 4011 */ 4012 if (login_rsp_crq->generic.rc.code) { 4013 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 4014 complete(&adapter->init_done); 4015 return 0; 4016 } 4017 4018 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4019 4020 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 4021 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 4022 netdev_dbg(adapter->netdev, "%016lx\n", 4023 ((unsigned long int *)(adapter->login_rsp_buf))[i]); 4024 } 4025 4026 /* Sanity checks */ 4027 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 4028 (be32_to_cpu(login->num_rxcomp_subcrqs) * 4029 adapter->req_rx_add_queues != 4030 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 4031 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 4032 ibmvnic_remove(adapter->vdev); 4033 return -EIO; 4034 } 4035 release_login_buffer(adapter); 4036 complete(&adapter->init_done); 4037 4038 return 0; 4039 } 4040 4041 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 4042 struct ibmvnic_adapter *adapter) 4043 { 4044 struct device *dev = &adapter->vdev->dev; 4045 long rc; 4046 4047 rc = crq->request_unmap_rsp.rc.code; 4048 if (rc) 4049 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 4050 } 4051 4052 static void handle_query_map_rsp(union ibmvnic_crq *crq, 4053 struct ibmvnic_adapter *adapter) 4054 { 4055 struct net_device *netdev = adapter->netdev; 4056 struct device *dev = &adapter->vdev->dev; 4057 long rc; 4058 4059 rc = crq->query_map_rsp.rc.code; 4060 if (rc) { 4061 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 4062 return; 4063 } 4064 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", 4065 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, 4066 crq->query_map_rsp.free_pages); 4067 } 4068 4069 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 4070 struct ibmvnic_adapter *adapter) 4071 { 4072 struct net_device *netdev = adapter->netdev; 4073 struct device *dev = &adapter->vdev->dev; 4074 long rc; 4075 4076 atomic_dec(&adapter->running_cap_crqs); 4077 netdev_dbg(netdev, "Outstanding queries: %d\n", 4078 atomic_read(&adapter->running_cap_crqs)); 4079 rc = crq->query_capability.rc.code; 4080 if (rc) { 4081 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 4082 goto out; 4083 } 4084 4085 switch (be16_to_cpu(crq->query_capability.capability)) { 4086 case MIN_TX_QUEUES: 4087 adapter->min_tx_queues = 4088 be64_to_cpu(crq->query_capability.number); 4089 netdev_dbg(netdev, "min_tx_queues = %lld\n", 4090 adapter->min_tx_queues); 4091 break; 4092 case MIN_RX_QUEUES: 4093 adapter->min_rx_queues = 4094 be64_to_cpu(crq->query_capability.number); 4095 netdev_dbg(netdev, "min_rx_queues = %lld\n", 4096 adapter->min_rx_queues); 4097 break; 4098 case MIN_RX_ADD_QUEUES: 4099 adapter->min_rx_add_queues = 4100 be64_to_cpu(crq->query_capability.number); 4101 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 4102 adapter->min_rx_add_queues); 4103 break; 4104 case MAX_TX_QUEUES: 4105 adapter->max_tx_queues = 4106 be64_to_cpu(crq->query_capability.number); 4107 netdev_dbg(netdev, "max_tx_queues = %lld\n", 4108 adapter->max_tx_queues); 4109 break; 4110 case MAX_RX_QUEUES: 4111 adapter->max_rx_queues = 4112 be64_to_cpu(crq->query_capability.number); 4113 netdev_dbg(netdev, "max_rx_queues = %lld\n", 4114 adapter->max_rx_queues); 4115 break; 4116 case MAX_RX_ADD_QUEUES: 4117 adapter->max_rx_add_queues = 4118 be64_to_cpu(crq->query_capability.number); 4119 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 4120 adapter->max_rx_add_queues); 4121 break; 4122 case MIN_TX_ENTRIES_PER_SUBCRQ: 4123 adapter->min_tx_entries_per_subcrq = 4124 be64_to_cpu(crq->query_capability.number); 4125 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 4126 adapter->min_tx_entries_per_subcrq); 4127 break; 4128 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 4129 adapter->min_rx_add_entries_per_subcrq = 4130 be64_to_cpu(crq->query_capability.number); 4131 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 4132 adapter->min_rx_add_entries_per_subcrq); 4133 break; 4134 case MAX_TX_ENTRIES_PER_SUBCRQ: 4135 adapter->max_tx_entries_per_subcrq = 4136 be64_to_cpu(crq->query_capability.number); 4137 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 4138 adapter->max_tx_entries_per_subcrq); 4139 break; 4140 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 4141 adapter->max_rx_add_entries_per_subcrq = 4142 be64_to_cpu(crq->query_capability.number); 4143 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 4144 adapter->max_rx_add_entries_per_subcrq); 4145 break; 4146 case TCP_IP_OFFLOAD: 4147 adapter->tcp_ip_offload = 4148 be64_to_cpu(crq->query_capability.number); 4149 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 4150 adapter->tcp_ip_offload); 4151 break; 4152 case PROMISC_SUPPORTED: 4153 adapter->promisc_supported = 4154 be64_to_cpu(crq->query_capability.number); 4155 netdev_dbg(netdev, "promisc_supported = %lld\n", 4156 adapter->promisc_supported); 4157 break; 4158 case MIN_MTU: 4159 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 4160 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4161 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 4162 break; 4163 case MAX_MTU: 4164 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 4165 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4166 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 4167 break; 4168 case MAX_MULTICAST_FILTERS: 4169 adapter->max_multicast_filters = 4170 be64_to_cpu(crq->query_capability.number); 4171 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 4172 adapter->max_multicast_filters); 4173 break; 4174 case VLAN_HEADER_INSERTION: 4175 adapter->vlan_header_insertion = 4176 be64_to_cpu(crq->query_capability.number); 4177 if (adapter->vlan_header_insertion) 4178 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 4179 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 4180 adapter->vlan_header_insertion); 4181 break; 4182 case RX_VLAN_HEADER_INSERTION: 4183 adapter->rx_vlan_header_insertion = 4184 be64_to_cpu(crq->query_capability.number); 4185 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 4186 adapter->rx_vlan_header_insertion); 4187 break; 4188 case MAX_TX_SG_ENTRIES: 4189 adapter->max_tx_sg_entries = 4190 be64_to_cpu(crq->query_capability.number); 4191 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 4192 adapter->max_tx_sg_entries); 4193 break; 4194 case RX_SG_SUPPORTED: 4195 adapter->rx_sg_supported = 4196 be64_to_cpu(crq->query_capability.number); 4197 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 4198 adapter->rx_sg_supported); 4199 break; 4200 case OPT_TX_COMP_SUB_QUEUES: 4201 adapter->opt_tx_comp_sub_queues = 4202 be64_to_cpu(crq->query_capability.number); 4203 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 4204 adapter->opt_tx_comp_sub_queues); 4205 break; 4206 case OPT_RX_COMP_QUEUES: 4207 adapter->opt_rx_comp_queues = 4208 be64_to_cpu(crq->query_capability.number); 4209 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 4210 adapter->opt_rx_comp_queues); 4211 break; 4212 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 4213 adapter->opt_rx_bufadd_q_per_rx_comp_q = 4214 be64_to_cpu(crq->query_capability.number); 4215 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 4216 adapter->opt_rx_bufadd_q_per_rx_comp_q); 4217 break; 4218 case OPT_TX_ENTRIES_PER_SUBCRQ: 4219 adapter->opt_tx_entries_per_subcrq = 4220 be64_to_cpu(crq->query_capability.number); 4221 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 4222 adapter->opt_tx_entries_per_subcrq); 4223 break; 4224 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 4225 adapter->opt_rxba_entries_per_subcrq = 4226 be64_to_cpu(crq->query_capability.number); 4227 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 4228 adapter->opt_rxba_entries_per_subcrq); 4229 break; 4230 case TX_RX_DESC_REQ: 4231 adapter->tx_rx_desc_req = crq->query_capability.number; 4232 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 4233 adapter->tx_rx_desc_req); 4234 break; 4235 4236 default: 4237 netdev_err(netdev, "Got invalid cap rsp %d\n", 4238 crq->query_capability.capability); 4239 } 4240 4241 out: 4242 if (atomic_read(&adapter->running_cap_crqs) == 0) { 4243 adapter->wait_capability = false; 4244 ibmvnic_send_req_caps(adapter, 0); 4245 } 4246 } 4247 4248 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 4249 struct ibmvnic_adapter *adapter) 4250 { 4251 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 4252 struct net_device *netdev = adapter->netdev; 4253 struct device *dev = &adapter->vdev->dev; 4254 u64 *u64_crq = (u64 *)crq; 4255 long rc; 4256 4257 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 4258 (unsigned long int)cpu_to_be64(u64_crq[0]), 4259 (unsigned long int)cpu_to_be64(u64_crq[1])); 4260 switch (gen_crq->first) { 4261 case IBMVNIC_CRQ_INIT_RSP: 4262 switch (gen_crq->cmd) { 4263 case IBMVNIC_CRQ_INIT: 4264 dev_info(dev, "Partner initialized\n"); 4265 adapter->from_passive_init = true; 4266 adapter->failover_pending = false; 4267 if (!completion_done(&adapter->init_done)) { 4268 complete(&adapter->init_done); 4269 adapter->init_done_rc = -EIO; 4270 } 4271 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 4272 break; 4273 case IBMVNIC_CRQ_INIT_COMPLETE: 4274 dev_info(dev, "Partner initialization complete\n"); 4275 adapter->crq.active = true; 4276 send_version_xchg(adapter); 4277 break; 4278 default: 4279 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 4280 } 4281 return; 4282 case IBMVNIC_CRQ_XPORT_EVENT: 4283 netif_carrier_off(netdev); 4284 adapter->crq.active = false; 4285 if (adapter->resetting) 4286 adapter->force_reset_recovery = true; 4287 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 4288 dev_info(dev, "Migrated, re-enabling adapter\n"); 4289 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 4290 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 4291 dev_info(dev, "Backing device failover detected\n"); 4292 adapter->failover_pending = true; 4293 } else { 4294 /* The adapter lost the connection */ 4295 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 4296 gen_crq->cmd); 4297 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4298 } 4299 return; 4300 case IBMVNIC_CRQ_CMD_RSP: 4301 break; 4302 default: 4303 dev_err(dev, "Got an invalid msg type 0x%02x\n", 4304 gen_crq->first); 4305 return; 4306 } 4307 4308 switch (gen_crq->cmd) { 4309 case VERSION_EXCHANGE_RSP: 4310 rc = crq->version_exchange_rsp.rc.code; 4311 if (rc) { 4312 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 4313 break; 4314 } 4315 dev_info(dev, "Partner protocol version is %d\n", 4316 crq->version_exchange_rsp.version); 4317 if (be16_to_cpu(crq->version_exchange_rsp.version) < 4318 ibmvnic_version) 4319 ibmvnic_version = 4320 be16_to_cpu(crq->version_exchange_rsp.version); 4321 send_cap_queries(adapter); 4322 break; 4323 case QUERY_CAPABILITY_RSP: 4324 handle_query_cap_rsp(crq, adapter); 4325 break; 4326 case QUERY_MAP_RSP: 4327 handle_query_map_rsp(crq, adapter); 4328 break; 4329 case REQUEST_MAP_RSP: 4330 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 4331 complete(&adapter->fw_done); 4332 break; 4333 case REQUEST_UNMAP_RSP: 4334 handle_request_unmap_rsp(crq, adapter); 4335 break; 4336 case REQUEST_CAPABILITY_RSP: 4337 handle_request_cap_rsp(crq, adapter); 4338 break; 4339 case LOGIN_RSP: 4340 netdev_dbg(netdev, "Got Login Response\n"); 4341 handle_login_rsp(crq, adapter); 4342 break; 4343 case LOGICAL_LINK_STATE_RSP: 4344 netdev_dbg(netdev, 4345 "Got Logical Link State Response, state: %d rc: %d\n", 4346 crq->logical_link_state_rsp.link_state, 4347 crq->logical_link_state_rsp.rc.code); 4348 adapter->logical_link_state = 4349 crq->logical_link_state_rsp.link_state; 4350 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 4351 complete(&adapter->init_done); 4352 break; 4353 case LINK_STATE_INDICATION: 4354 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 4355 adapter->phys_link_state = 4356 crq->link_state_indication.phys_link_state; 4357 adapter->logical_link_state = 4358 crq->link_state_indication.logical_link_state; 4359 break; 4360 case CHANGE_MAC_ADDR_RSP: 4361 netdev_dbg(netdev, "Got MAC address change Response\n"); 4362 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 4363 break; 4364 case ERROR_INDICATION: 4365 netdev_dbg(netdev, "Got Error Indication\n"); 4366 handle_error_indication(crq, adapter); 4367 break; 4368 case REQUEST_STATISTICS_RSP: 4369 netdev_dbg(netdev, "Got Statistics Response\n"); 4370 complete(&adapter->stats_done); 4371 break; 4372 case QUERY_IP_OFFLOAD_RSP: 4373 netdev_dbg(netdev, "Got Query IP offload Response\n"); 4374 handle_query_ip_offload_rsp(adapter); 4375 break; 4376 case MULTICAST_CTRL_RSP: 4377 netdev_dbg(netdev, "Got multicast control Response\n"); 4378 break; 4379 case CONTROL_IP_OFFLOAD_RSP: 4380 netdev_dbg(netdev, "Got Control IP offload Response\n"); 4381 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 4382 sizeof(adapter->ip_offload_ctrl), 4383 DMA_TO_DEVICE); 4384 complete(&adapter->init_done); 4385 break; 4386 case COLLECT_FW_TRACE_RSP: 4387 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 4388 complete(&adapter->fw_done); 4389 break; 4390 case GET_VPD_SIZE_RSP: 4391 handle_vpd_size_rsp(crq, adapter); 4392 break; 4393 case GET_VPD_RSP: 4394 handle_vpd_rsp(crq, adapter); 4395 break; 4396 default: 4397 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 4398 gen_crq->cmd); 4399 } 4400 } 4401 4402 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 4403 { 4404 struct ibmvnic_adapter *adapter = instance; 4405 4406 tasklet_schedule(&adapter->tasklet); 4407 return IRQ_HANDLED; 4408 } 4409 4410 static void ibmvnic_tasklet(void *data) 4411 { 4412 struct ibmvnic_adapter *adapter = data; 4413 struct ibmvnic_crq_queue *queue = &adapter->crq; 4414 union ibmvnic_crq *crq; 4415 unsigned long flags; 4416 bool done = false; 4417 4418 spin_lock_irqsave(&queue->lock, flags); 4419 while (!done) { 4420 /* Pull all the valid messages off the CRQ */ 4421 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 4422 ibmvnic_handle_crq(crq, adapter); 4423 crq->generic.first = 0; 4424 } 4425 4426 /* remain in tasklet until all 4427 * capabilities responses are received 4428 */ 4429 if (!adapter->wait_capability) 4430 done = true; 4431 } 4432 /* if capabilities CRQ's were sent in this tasklet, the following 4433 * tasklet must wait until all responses are received 4434 */ 4435 if (atomic_read(&adapter->running_cap_crqs) != 0) 4436 adapter->wait_capability = true; 4437 spin_unlock_irqrestore(&queue->lock, flags); 4438 } 4439 4440 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 4441 { 4442 struct vio_dev *vdev = adapter->vdev; 4443 int rc; 4444 4445 do { 4446 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 4447 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4448 4449 if (rc) 4450 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 4451 4452 return rc; 4453 } 4454 4455 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 4456 { 4457 struct ibmvnic_crq_queue *crq = &adapter->crq; 4458 struct device *dev = &adapter->vdev->dev; 4459 struct vio_dev *vdev = adapter->vdev; 4460 int rc; 4461 4462 /* Close the CRQ */ 4463 do { 4464 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4465 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4466 4467 /* Clean out the queue */ 4468 memset(crq->msgs, 0, PAGE_SIZE); 4469 crq->cur = 0; 4470 crq->active = false; 4471 4472 /* And re-open it again */ 4473 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 4474 crq->msg_token, PAGE_SIZE); 4475 4476 if (rc == H_CLOSED) 4477 /* Adapter is good, but other end is not ready */ 4478 dev_warn(dev, "Partner adapter not ready\n"); 4479 else if (rc != 0) 4480 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 4481 4482 return rc; 4483 } 4484 4485 static void release_crq_queue(struct ibmvnic_adapter *adapter) 4486 { 4487 struct ibmvnic_crq_queue *crq = &adapter->crq; 4488 struct vio_dev *vdev = adapter->vdev; 4489 long rc; 4490 4491 if (!crq->msgs) 4492 return; 4493 4494 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 4495 free_irq(vdev->irq, adapter); 4496 tasklet_kill(&adapter->tasklet); 4497 do { 4498 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4499 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4500 4501 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 4502 DMA_BIDIRECTIONAL); 4503 free_page((unsigned long)crq->msgs); 4504 crq->msgs = NULL; 4505 crq->active = false; 4506 } 4507 4508 static int init_crq_queue(struct ibmvnic_adapter *adapter) 4509 { 4510 struct ibmvnic_crq_queue *crq = &adapter->crq; 4511 struct device *dev = &adapter->vdev->dev; 4512 struct vio_dev *vdev = adapter->vdev; 4513 int rc, retrc = -ENOMEM; 4514 4515 if (crq->msgs) 4516 return 0; 4517 4518 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 4519 /* Should we allocate more than one page? */ 4520 4521 if (!crq->msgs) 4522 return -ENOMEM; 4523 4524 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 4525 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 4526 DMA_BIDIRECTIONAL); 4527 if (dma_mapping_error(dev, crq->msg_token)) 4528 goto map_failed; 4529 4530 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 4531 crq->msg_token, PAGE_SIZE); 4532 4533 if (rc == H_RESOURCE) 4534 /* maybe kexecing and resource is busy. try a reset */ 4535 rc = ibmvnic_reset_crq(adapter); 4536 retrc = rc; 4537 4538 if (rc == H_CLOSED) { 4539 dev_warn(dev, "Partner adapter not ready\n"); 4540 } else if (rc) { 4541 dev_warn(dev, "Error %d opening adapter\n", rc); 4542 goto reg_crq_failed; 4543 } 4544 4545 retrc = 0; 4546 4547 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet, 4548 (unsigned long)adapter); 4549 4550 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 4551 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, 4552 adapter); 4553 if (rc) { 4554 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 4555 vdev->irq, rc); 4556 goto req_irq_failed; 4557 } 4558 4559 rc = vio_enable_interrupts(vdev); 4560 if (rc) { 4561 dev_err(dev, "Error %d enabling interrupts\n", rc); 4562 goto req_irq_failed; 4563 } 4564 4565 crq->cur = 0; 4566 spin_lock_init(&crq->lock); 4567 4568 return retrc; 4569 4570 req_irq_failed: 4571 tasklet_kill(&adapter->tasklet); 4572 do { 4573 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4574 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4575 reg_crq_failed: 4576 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 4577 map_failed: 4578 free_page((unsigned long)crq->msgs); 4579 crq->msgs = NULL; 4580 return retrc; 4581 } 4582 4583 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) 4584 { 4585 struct device *dev = &adapter->vdev->dev; 4586 unsigned long timeout = msecs_to_jiffies(30000); 4587 u64 old_num_rx_queues, old_num_tx_queues; 4588 int rc; 4589 4590 adapter->from_passive_init = false; 4591 4592 old_num_rx_queues = adapter->req_rx_queues; 4593 old_num_tx_queues = adapter->req_tx_queues; 4594 4595 init_completion(&adapter->init_done); 4596 adapter->init_done_rc = 0; 4597 ibmvnic_send_crq_init(adapter); 4598 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4599 dev_err(dev, "Initialization sequence timed out\n"); 4600 return -1; 4601 } 4602 4603 if (adapter->init_done_rc) { 4604 release_crq_queue(adapter); 4605 return adapter->init_done_rc; 4606 } 4607 4608 if (adapter->from_passive_init) { 4609 adapter->state = VNIC_OPEN; 4610 adapter->from_passive_init = false; 4611 return -1; 4612 } 4613 4614 if (adapter->resetting && !adapter->wait_for_reset && 4615 adapter->reset_reason != VNIC_RESET_MOBILITY) { 4616 if (adapter->req_rx_queues != old_num_rx_queues || 4617 adapter->req_tx_queues != old_num_tx_queues) { 4618 release_sub_crqs(adapter, 0); 4619 rc = init_sub_crqs(adapter); 4620 } else { 4621 rc = reset_sub_crq_queues(adapter); 4622 } 4623 } else { 4624 rc = init_sub_crqs(adapter); 4625 } 4626 4627 if (rc) { 4628 dev_err(dev, "Initialization of sub crqs failed\n"); 4629 release_crq_queue(adapter); 4630 return rc; 4631 } 4632 4633 rc = init_sub_crq_irqs(adapter); 4634 if (rc) { 4635 dev_err(dev, "Failed to initialize sub crq irqs\n"); 4636 release_crq_queue(adapter); 4637 } 4638 4639 return rc; 4640 } 4641 4642 static int ibmvnic_init(struct ibmvnic_adapter *adapter) 4643 { 4644 struct device *dev = &adapter->vdev->dev; 4645 unsigned long timeout = msecs_to_jiffies(30000); 4646 int rc; 4647 4648 adapter->from_passive_init = false; 4649 4650 init_completion(&adapter->init_done); 4651 adapter->init_done_rc = 0; 4652 ibmvnic_send_crq_init(adapter); 4653 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4654 dev_err(dev, "Initialization sequence timed out\n"); 4655 return -1; 4656 } 4657 4658 if (adapter->init_done_rc) { 4659 release_crq_queue(adapter); 4660 return adapter->init_done_rc; 4661 } 4662 4663 if (adapter->from_passive_init) { 4664 adapter->state = VNIC_OPEN; 4665 adapter->from_passive_init = false; 4666 return -1; 4667 } 4668 4669 rc = init_sub_crqs(adapter); 4670 if (rc) { 4671 dev_err(dev, "Initialization of sub crqs failed\n"); 4672 release_crq_queue(adapter); 4673 return rc; 4674 } 4675 4676 rc = init_sub_crq_irqs(adapter); 4677 if (rc) { 4678 dev_err(dev, "Failed to initialize sub crq irqs\n"); 4679 release_crq_queue(adapter); 4680 } 4681 4682 return rc; 4683 } 4684 4685 static struct device_attribute dev_attr_failover; 4686 4687 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 4688 { 4689 struct ibmvnic_adapter *adapter; 4690 struct net_device *netdev; 4691 unsigned char *mac_addr_p; 4692 int rc; 4693 4694 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 4695 dev->unit_address); 4696 4697 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 4698 VETH_MAC_ADDR, NULL); 4699 if (!mac_addr_p) { 4700 dev_err(&dev->dev, 4701 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 4702 __FILE__, __LINE__); 4703 return 0; 4704 } 4705 4706 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 4707 IBMVNIC_MAX_QUEUES); 4708 if (!netdev) 4709 return -ENOMEM; 4710 4711 adapter = netdev_priv(netdev); 4712 adapter->state = VNIC_PROBING; 4713 dev_set_drvdata(&dev->dev, netdev); 4714 adapter->vdev = dev; 4715 adapter->netdev = netdev; 4716 4717 ether_addr_copy(adapter->mac_addr, mac_addr_p); 4718 ether_addr_copy(netdev->dev_addr, adapter->mac_addr); 4719 netdev->irq = dev->irq; 4720 netdev->netdev_ops = &ibmvnic_netdev_ops; 4721 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 4722 SET_NETDEV_DEV(netdev, &dev->dev); 4723 4724 spin_lock_init(&adapter->stats_lock); 4725 4726 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4727 INIT_LIST_HEAD(&adapter->rwi_list); 4728 mutex_init(&adapter->reset_lock); 4729 mutex_init(&adapter->rwi_lock); 4730 adapter->resetting = false; 4731 4732 adapter->mac_change_pending = false; 4733 4734 do { 4735 rc = init_crq_queue(adapter); 4736 if (rc) { 4737 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 4738 rc); 4739 goto ibmvnic_init_fail; 4740 } 4741 4742 rc = ibmvnic_init(adapter); 4743 if (rc && rc != EAGAIN) 4744 goto ibmvnic_init_fail; 4745 } while (rc == EAGAIN); 4746 4747 rc = init_stats_buffers(adapter); 4748 if (rc) 4749 goto ibmvnic_init_fail; 4750 4751 rc = init_stats_token(adapter); 4752 if (rc) 4753 goto ibmvnic_stats_fail; 4754 4755 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4756 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4757 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4758 4759 rc = device_create_file(&dev->dev, &dev_attr_failover); 4760 if (rc) 4761 goto ibmvnic_dev_file_err; 4762 4763 netif_carrier_off(netdev); 4764 rc = register_netdev(netdev); 4765 if (rc) { 4766 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 4767 goto ibmvnic_register_fail; 4768 } 4769 dev_info(&dev->dev, "ibmvnic registered\n"); 4770 4771 adapter->state = VNIC_PROBED; 4772 4773 adapter->wait_for_reset = false; 4774 4775 return 0; 4776 4777 ibmvnic_register_fail: 4778 device_remove_file(&dev->dev, &dev_attr_failover); 4779 4780 ibmvnic_dev_file_err: 4781 release_stats_token(adapter); 4782 4783 ibmvnic_stats_fail: 4784 release_stats_buffers(adapter); 4785 4786 ibmvnic_init_fail: 4787 release_sub_crqs(adapter, 1); 4788 release_crq_queue(adapter); 4789 free_netdev(netdev); 4790 4791 return rc; 4792 } 4793 4794 static int ibmvnic_remove(struct vio_dev *dev) 4795 { 4796 struct net_device *netdev = dev_get_drvdata(&dev->dev); 4797 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4798 4799 adapter->state = VNIC_REMOVING; 4800 unregister_netdev(netdev); 4801 mutex_lock(&adapter->reset_lock); 4802 4803 release_resources(adapter); 4804 release_sub_crqs(adapter, 1); 4805 release_crq_queue(adapter); 4806 4807 release_stats_token(adapter); 4808 release_stats_buffers(adapter); 4809 4810 adapter->state = VNIC_REMOVED; 4811 4812 mutex_unlock(&adapter->reset_lock); 4813 device_remove_file(&dev->dev, &dev_attr_failover); 4814 free_netdev(netdev); 4815 dev_set_drvdata(&dev->dev, NULL); 4816 4817 return 0; 4818 } 4819 4820 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 4821 const char *buf, size_t count) 4822 { 4823 struct net_device *netdev = dev_get_drvdata(dev); 4824 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4825 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 4826 __be64 session_token; 4827 long rc; 4828 4829 if (!sysfs_streq(buf, "1")) 4830 return -EINVAL; 4831 4832 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 4833 H_GET_SESSION_TOKEN, 0, 0, 0); 4834 if (rc) { 4835 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 4836 rc); 4837 return -EINVAL; 4838 } 4839 4840 session_token = (__be64)retbuf[0]; 4841 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 4842 be64_to_cpu(session_token)); 4843 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4844 H_SESSION_ERR_DETECTED, session_token, 0, 0); 4845 if (rc) { 4846 netdev_err(netdev, "Client initiated failover failed, rc %ld\n", 4847 rc); 4848 return -EINVAL; 4849 } 4850 4851 return count; 4852 } 4853 4854 static DEVICE_ATTR_WO(failover); 4855 4856 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 4857 { 4858 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 4859 struct ibmvnic_adapter *adapter; 4860 struct iommu_table *tbl; 4861 unsigned long ret = 0; 4862 int i; 4863 4864 tbl = get_iommu_table_base(&vdev->dev); 4865 4866 /* netdev inits at probe time along with the structures we need below*/ 4867 if (!netdev) 4868 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 4869 4870 adapter = netdev_priv(netdev); 4871 4872 ret += PAGE_SIZE; /* the crq message queue */ 4873 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 4874 4875 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 4876 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 4877 4878 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 4879 i++) 4880 ret += adapter->rx_pool[i].size * 4881 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 4882 4883 return ret; 4884 } 4885 4886 static int ibmvnic_resume(struct device *dev) 4887 { 4888 struct net_device *netdev = dev_get_drvdata(dev); 4889 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4890 4891 if (adapter->state != VNIC_OPEN) 4892 return 0; 4893 4894 tasklet_schedule(&adapter->tasklet); 4895 4896 return 0; 4897 } 4898 4899 static const struct vio_device_id ibmvnic_device_table[] = { 4900 {"network", "IBM,vnic"}, 4901 {"", "" } 4902 }; 4903 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 4904 4905 static const struct dev_pm_ops ibmvnic_pm_ops = { 4906 .resume = ibmvnic_resume 4907 }; 4908 4909 static struct vio_driver ibmvnic_driver = { 4910 .id_table = ibmvnic_device_table, 4911 .probe = ibmvnic_probe, 4912 .remove = ibmvnic_remove, 4913 .get_desired_dma = ibmvnic_get_desired_dma, 4914 .name = ibmvnic_driver_name, 4915 .pm = &ibmvnic_pm_ops, 4916 }; 4917 4918 /* module functions */ 4919 static int __init ibmvnic_module_init(void) 4920 { 4921 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 4922 IBMVNIC_DRIVER_VERSION); 4923 4924 return vio_register_driver(&ibmvnic_driver); 4925 } 4926 4927 static void __exit ibmvnic_module_exit(void) 4928 { 4929 vio_unregister_driver(&ibmvnic_driver); 4930 } 4931 4932 module_init(ibmvnic_module_init); 4933 module_exit(ibmvnic_module_exit); 4934