1 /**************************************************************************/ 2 /* */ 3 /* IBM System i and System p Virtual NIC Device Driver */ 4 /* Copyright (C) 2014 IBM Corp. */ 5 /* Santiago Leon (santi_leon@yahoo.com) */ 6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 7 /* John Allen (jallen@linux.vnet.ibm.com) */ 8 /* */ 9 /* This program is free software; you can redistribute it and/or modify */ 10 /* it under the terms of the GNU General Public License as published by */ 11 /* the Free Software Foundation; either version 2 of the License, or */ 12 /* (at your option) any later version. */ 13 /* */ 14 /* This program is distributed in the hope that it will be useful, */ 15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ 16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ 17 /* GNU General Public License for more details. */ 18 /* */ 19 /* You should have received a copy of the GNU General Public License */ 20 /* along with this program. */ 21 /* */ 22 /* This module contains the implementation of a virtual ethernet device */ 23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 24 /* option of the RS/6000 Platform Architecture to interface with virtual */ 25 /* ethernet NICs that are presented to the partition by the hypervisor. */ 26 /* */ 27 /* Messages are passed between the VNIC driver and the VNIC server using */ 28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 29 /* issue and receive commands that initiate communication with the server */ 30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 31 /* are used by the driver to notify the server that a packet is */ 32 /* ready for transmission or that a buffer has been added to receive a */ 33 /* packet. Subsequently, sCRQs are used by the server to notify the */ 34 /* driver that a packet transmission has been completed or that a packet */ 35 /* has been received and placed in a waiting buffer. */ 36 /* */ 37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 38 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 39 /* or receive has been completed, the VNIC driver is required to use */ 40 /* "long term mapping". This entails that large, continuous DMA mapped */ 41 /* buffers are allocated on driver initialization and these buffers are */ 42 /* then continuously reused to pass skbs to and from the VNIC server. */ 43 /* */ 44 /**************************************************************************/ 45 46 #include <linux/module.h> 47 #include <linux/moduleparam.h> 48 #include <linux/types.h> 49 #include <linux/errno.h> 50 #include <linux/completion.h> 51 #include <linux/ioport.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/kernel.h> 54 #include <linux/netdevice.h> 55 #include <linux/etherdevice.h> 56 #include <linux/skbuff.h> 57 #include <linux/init.h> 58 #include <linux/delay.h> 59 #include <linux/mm.h> 60 #include <linux/ethtool.h> 61 #include <linux/proc_fs.h> 62 #include <linux/if_arp.h> 63 #include <linux/in.h> 64 #include <linux/ip.h> 65 #include <linux/ipv6.h> 66 #include <linux/irq.h> 67 #include <linux/kthread.h> 68 #include <linux/seq_file.h> 69 #include <linux/interrupt.h> 70 #include <net/net_namespace.h> 71 #include <asm/hvcall.h> 72 #include <linux/atomic.h> 73 #include <asm/vio.h> 74 #include <asm/iommu.h> 75 #include <linux/uaccess.h> 76 #include <asm/firmware.h> 77 #include <linux/workqueue.h> 78 #include <linux/if_vlan.h> 79 #include <linux/utsname.h> 80 81 #include "ibmvnic.h" 82 83 static const char ibmvnic_driver_name[] = "ibmvnic"; 84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 85 86 MODULE_AUTHOR("Santiago Leon"); 87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 88 MODULE_LICENSE("GPL"); 89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 90 91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 92 static int ibmvnic_remove(struct vio_dev *); 93 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 99 union sub_crq *sub_crq); 100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 102 static int enable_scrq_irq(struct ibmvnic_adapter *, 103 struct ibmvnic_sub_crq_queue *); 104 static int disable_scrq_irq(struct ibmvnic_adapter *, 105 struct ibmvnic_sub_crq_queue *); 106 static int pending_scrq(struct ibmvnic_adapter *, 107 struct ibmvnic_sub_crq_queue *); 108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 109 struct ibmvnic_sub_crq_queue *); 110 static int ibmvnic_poll(struct napi_struct *napi, int data); 111 static void send_map_query(struct ibmvnic_adapter *adapter); 112 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); 113 static int send_request_unmap(struct ibmvnic_adapter *, u8); 114 static int send_login(struct ibmvnic_adapter *adapter); 115 static void send_cap_queries(struct ibmvnic_adapter *adapter); 116 static int init_sub_crqs(struct ibmvnic_adapter *); 117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 118 static int ibmvnic_init(struct ibmvnic_adapter *); 119 static int ibmvnic_reset_init(struct ibmvnic_adapter *); 120 static void release_crq_queue(struct ibmvnic_adapter *); 121 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); 122 static int init_crq_queue(struct ibmvnic_adapter *adapter); 123 124 struct ibmvnic_stat { 125 char name[ETH_GSTRING_LEN]; 126 int offset; 127 }; 128 129 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 130 offsetof(struct ibmvnic_statistics, stat)) 131 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) 132 133 static const struct ibmvnic_stat ibmvnic_stats[] = { 134 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 135 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 136 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 137 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 138 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 139 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 140 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 141 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 142 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 143 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 144 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 145 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 146 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 147 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 148 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 149 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 150 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 151 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 152 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 153 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 154 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 155 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 156 }; 157 158 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 159 unsigned long length, unsigned long *number, 160 unsigned long *irq) 161 { 162 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 163 long rc; 164 165 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 166 *number = retbuf[0]; 167 *irq = retbuf[1]; 168 169 return rc; 170 } 171 172 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 173 struct ibmvnic_long_term_buff *ltb, int size) 174 { 175 struct device *dev = &adapter->vdev->dev; 176 int rc; 177 178 ltb->size = size; 179 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, 180 GFP_KERNEL); 181 182 if (!ltb->buff) { 183 dev_err(dev, "Couldn't alloc long term buffer\n"); 184 return -ENOMEM; 185 } 186 ltb->map_id = adapter->map_id; 187 adapter->map_id++; 188 189 init_completion(&adapter->fw_done); 190 rc = send_request_map(adapter, ltb->addr, 191 ltb->size, ltb->map_id); 192 if (rc) { 193 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 194 return rc; 195 } 196 wait_for_completion(&adapter->fw_done); 197 198 if (adapter->fw_done_rc) { 199 dev_err(dev, "Couldn't map long term buffer,rc = %d\n", 200 adapter->fw_done_rc); 201 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 202 return -1; 203 } 204 return 0; 205 } 206 207 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 208 struct ibmvnic_long_term_buff *ltb) 209 { 210 struct device *dev = &adapter->vdev->dev; 211 212 if (!ltb->buff) 213 return; 214 215 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 216 adapter->reset_reason != VNIC_RESET_MOBILITY) 217 send_request_unmap(adapter, ltb->map_id); 218 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 219 } 220 221 static int reset_long_term_buff(struct ibmvnic_adapter *adapter, 222 struct ibmvnic_long_term_buff *ltb) 223 { 224 int rc; 225 226 memset(ltb->buff, 0, ltb->size); 227 228 init_completion(&adapter->fw_done); 229 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 230 if (rc) 231 return rc; 232 wait_for_completion(&adapter->fw_done); 233 234 if (adapter->fw_done_rc) { 235 dev_info(&adapter->vdev->dev, 236 "Reset failed, attempting to free and reallocate buffer\n"); 237 free_long_term_buff(adapter, ltb); 238 return alloc_long_term_buff(adapter, ltb, ltb->size); 239 } 240 return 0; 241 } 242 243 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 244 { 245 int i; 246 247 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 248 i++) 249 adapter->rx_pool[i].active = 0; 250 } 251 252 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 253 struct ibmvnic_rx_pool *pool) 254 { 255 int count = pool->size - atomic_read(&pool->available); 256 struct device *dev = &adapter->vdev->dev; 257 int buffers_added = 0; 258 unsigned long lpar_rc; 259 union sub_crq sub_crq; 260 struct sk_buff *skb; 261 unsigned int offset; 262 dma_addr_t dma_addr; 263 unsigned char *dst; 264 u64 *handle_array; 265 int shift = 0; 266 int index; 267 int i; 268 269 if (!pool->active) 270 return; 271 272 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 273 be32_to_cpu(adapter->login_rsp_buf-> 274 off_rxadd_subcrqs)); 275 276 for (i = 0; i < count; ++i) { 277 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 278 if (!skb) { 279 dev_err(dev, "Couldn't replenish rx buff\n"); 280 adapter->replenish_no_mem++; 281 break; 282 } 283 284 index = pool->free_map[pool->next_free]; 285 286 if (pool->rx_buff[index].skb) 287 dev_err(dev, "Inconsistent free_map!\n"); 288 289 /* Copy the skb to the long term mapped DMA buffer */ 290 offset = index * pool->buff_size; 291 dst = pool->long_term_buff.buff + offset; 292 memset(dst, 0, pool->buff_size); 293 dma_addr = pool->long_term_buff.addr + offset; 294 pool->rx_buff[index].data = dst; 295 296 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 297 pool->rx_buff[index].dma = dma_addr; 298 pool->rx_buff[index].skb = skb; 299 pool->rx_buff[index].pool_index = pool->index; 300 pool->rx_buff[index].size = pool->buff_size; 301 302 memset(&sub_crq, 0, sizeof(sub_crq)); 303 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD; 304 sub_crq.rx_add.correlator = 305 cpu_to_be64((u64)&pool->rx_buff[index]); 306 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr); 307 sub_crq.rx_add.map_id = pool->long_term_buff.map_id; 308 309 /* The length field of the sCRQ is defined to be 24 bits so the 310 * buffer size needs to be left shifted by a byte before it is 311 * converted to big endian to prevent the last byte from being 312 * truncated. 313 */ 314 #ifdef __LITTLE_ENDIAN__ 315 shift = 8; 316 #endif 317 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift); 318 319 lpar_rc = send_subcrq(adapter, handle_array[pool->index], 320 &sub_crq); 321 if (lpar_rc != H_SUCCESS) 322 goto failure; 323 324 buffers_added++; 325 adapter->replenish_add_buff_success++; 326 pool->next_free = (pool->next_free + 1) % pool->size; 327 } 328 atomic_add(buffers_added, &pool->available); 329 return; 330 331 failure: 332 dev_info(dev, "replenish pools failure\n"); 333 pool->free_map[pool->next_free] = index; 334 pool->rx_buff[index].skb = NULL; 335 336 dev_kfree_skb_any(skb); 337 adapter->replenish_add_buff_failure++; 338 atomic_add(buffers_added, &pool->available); 339 340 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 341 /* Disable buffer pool replenishment and report carrier off if 342 * queue is closed or pending failover. 343 * Firmware guarantees that a signal will be sent to the 344 * driver, triggering a reset. 345 */ 346 deactivate_rx_pools(adapter); 347 netif_carrier_off(adapter->netdev); 348 } 349 } 350 351 static void replenish_pools(struct ibmvnic_adapter *adapter) 352 { 353 int i; 354 355 adapter->replenish_task_cycles++; 356 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 357 i++) { 358 if (adapter->rx_pool[i].active) 359 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 360 } 361 } 362 363 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 364 { 365 kfree(adapter->tx_stats_buffers); 366 kfree(adapter->rx_stats_buffers); 367 adapter->tx_stats_buffers = NULL; 368 adapter->rx_stats_buffers = NULL; 369 } 370 371 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 372 { 373 adapter->tx_stats_buffers = 374 kcalloc(IBMVNIC_MAX_QUEUES, 375 sizeof(struct ibmvnic_tx_queue_stats), 376 GFP_KERNEL); 377 if (!adapter->tx_stats_buffers) 378 return -ENOMEM; 379 380 adapter->rx_stats_buffers = 381 kcalloc(IBMVNIC_MAX_QUEUES, 382 sizeof(struct ibmvnic_rx_queue_stats), 383 GFP_KERNEL); 384 if (!adapter->rx_stats_buffers) 385 return -ENOMEM; 386 387 return 0; 388 } 389 390 static void release_stats_token(struct ibmvnic_adapter *adapter) 391 { 392 struct device *dev = &adapter->vdev->dev; 393 394 if (!adapter->stats_token) 395 return; 396 397 dma_unmap_single(dev, adapter->stats_token, 398 sizeof(struct ibmvnic_statistics), 399 DMA_FROM_DEVICE); 400 adapter->stats_token = 0; 401 } 402 403 static int init_stats_token(struct ibmvnic_adapter *adapter) 404 { 405 struct device *dev = &adapter->vdev->dev; 406 dma_addr_t stok; 407 408 stok = dma_map_single(dev, &adapter->stats, 409 sizeof(struct ibmvnic_statistics), 410 DMA_FROM_DEVICE); 411 if (dma_mapping_error(dev, stok)) { 412 dev_err(dev, "Couldn't map stats buffer\n"); 413 return -1; 414 } 415 416 adapter->stats_token = stok; 417 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 418 return 0; 419 } 420 421 static int reset_rx_pools(struct ibmvnic_adapter *adapter) 422 { 423 struct ibmvnic_rx_pool *rx_pool; 424 int rx_scrqs; 425 int i, j, rc; 426 u64 *size_array; 427 428 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 429 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 430 431 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 432 for (i = 0; i < rx_scrqs; i++) { 433 rx_pool = &adapter->rx_pool[i]; 434 435 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 436 437 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { 438 free_long_term_buff(adapter, &rx_pool->long_term_buff); 439 rx_pool->buff_size = be64_to_cpu(size_array[i]); 440 alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 441 rx_pool->size * 442 rx_pool->buff_size); 443 } else { 444 rc = reset_long_term_buff(adapter, 445 &rx_pool->long_term_buff); 446 } 447 448 if (rc) 449 return rc; 450 451 for (j = 0; j < rx_pool->size; j++) 452 rx_pool->free_map[j] = j; 453 454 memset(rx_pool->rx_buff, 0, 455 rx_pool->size * sizeof(struct ibmvnic_rx_buff)); 456 457 atomic_set(&rx_pool->available, 0); 458 rx_pool->next_alloc = 0; 459 rx_pool->next_free = 0; 460 rx_pool->active = 1; 461 } 462 463 return 0; 464 } 465 466 static void release_rx_pools(struct ibmvnic_adapter *adapter) 467 { 468 struct ibmvnic_rx_pool *rx_pool; 469 int i, j; 470 471 if (!adapter->rx_pool) 472 return; 473 474 for (i = 0; i < adapter->num_active_rx_pools; i++) { 475 rx_pool = &adapter->rx_pool[i]; 476 477 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 478 479 kfree(rx_pool->free_map); 480 free_long_term_buff(adapter, &rx_pool->long_term_buff); 481 482 if (!rx_pool->rx_buff) 483 continue; 484 485 for (j = 0; j < rx_pool->size; j++) { 486 if (rx_pool->rx_buff[j].skb) { 487 dev_kfree_skb_any(rx_pool->rx_buff[i].skb); 488 rx_pool->rx_buff[i].skb = NULL; 489 } 490 } 491 492 kfree(rx_pool->rx_buff); 493 } 494 495 kfree(adapter->rx_pool); 496 adapter->rx_pool = NULL; 497 adapter->num_active_rx_pools = 0; 498 } 499 500 static int init_rx_pools(struct net_device *netdev) 501 { 502 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 503 struct device *dev = &adapter->vdev->dev; 504 struct ibmvnic_rx_pool *rx_pool; 505 int rxadd_subcrqs; 506 u64 *size_array; 507 int i, j; 508 509 rxadd_subcrqs = 510 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 511 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 512 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 513 514 adapter->rx_pool = kcalloc(rxadd_subcrqs, 515 sizeof(struct ibmvnic_rx_pool), 516 GFP_KERNEL); 517 if (!adapter->rx_pool) { 518 dev_err(dev, "Failed to allocate rx pools\n"); 519 return -1; 520 } 521 522 adapter->num_active_rx_pools = rxadd_subcrqs; 523 524 for (i = 0; i < rxadd_subcrqs; i++) { 525 rx_pool = &adapter->rx_pool[i]; 526 527 netdev_dbg(adapter->netdev, 528 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 529 i, adapter->req_rx_add_entries_per_subcrq, 530 be64_to_cpu(size_array[i])); 531 532 rx_pool->size = adapter->req_rx_add_entries_per_subcrq; 533 rx_pool->index = i; 534 rx_pool->buff_size = be64_to_cpu(size_array[i]); 535 rx_pool->active = 1; 536 537 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 538 GFP_KERNEL); 539 if (!rx_pool->free_map) { 540 release_rx_pools(adapter); 541 return -1; 542 } 543 544 rx_pool->rx_buff = kcalloc(rx_pool->size, 545 sizeof(struct ibmvnic_rx_buff), 546 GFP_KERNEL); 547 if (!rx_pool->rx_buff) { 548 dev_err(dev, "Couldn't alloc rx buffers\n"); 549 release_rx_pools(adapter); 550 return -1; 551 } 552 553 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 554 rx_pool->size * rx_pool->buff_size)) { 555 release_rx_pools(adapter); 556 return -1; 557 } 558 559 for (j = 0; j < rx_pool->size; ++j) 560 rx_pool->free_map[j] = j; 561 562 atomic_set(&rx_pool->available, 0); 563 rx_pool->next_alloc = 0; 564 rx_pool->next_free = 0; 565 } 566 567 return 0; 568 } 569 570 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, 571 struct ibmvnic_tx_pool *tx_pool) 572 { 573 int rc, i; 574 575 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); 576 if (rc) 577 return rc; 578 579 memset(tx_pool->tx_buff, 0, 580 tx_pool->num_buffers * 581 sizeof(struct ibmvnic_tx_buff)); 582 583 for (i = 0; i < tx_pool->num_buffers; i++) 584 tx_pool->free_map[i] = i; 585 586 tx_pool->consumer_index = 0; 587 tx_pool->producer_index = 0; 588 589 return 0; 590 } 591 592 static int reset_tx_pools(struct ibmvnic_adapter *adapter) 593 { 594 int tx_scrqs; 595 int i, rc; 596 597 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 598 for (i = 0; i < tx_scrqs; i++) { 599 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); 600 if (rc) 601 return rc; 602 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); 603 if (rc) 604 return rc; 605 } 606 607 return 0; 608 } 609 610 static void release_vpd_data(struct ibmvnic_adapter *adapter) 611 { 612 if (!adapter->vpd) 613 return; 614 615 kfree(adapter->vpd->buff); 616 kfree(adapter->vpd); 617 618 adapter->vpd = NULL; 619 } 620 621 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 622 struct ibmvnic_tx_pool *tx_pool) 623 { 624 kfree(tx_pool->tx_buff); 625 kfree(tx_pool->free_map); 626 free_long_term_buff(adapter, &tx_pool->long_term_buff); 627 } 628 629 static void release_tx_pools(struct ibmvnic_adapter *adapter) 630 { 631 int i; 632 633 if (!adapter->tx_pool) 634 return; 635 636 for (i = 0; i < adapter->num_active_tx_pools; i++) { 637 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 638 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 639 } 640 641 kfree(adapter->tx_pool); 642 adapter->tx_pool = NULL; 643 kfree(adapter->tso_pool); 644 adapter->tso_pool = NULL; 645 adapter->num_active_tx_pools = 0; 646 } 647 648 static int init_one_tx_pool(struct net_device *netdev, 649 struct ibmvnic_tx_pool *tx_pool, 650 int num_entries, int buf_size) 651 { 652 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 653 int i; 654 655 tx_pool->tx_buff = kcalloc(num_entries, 656 sizeof(struct ibmvnic_tx_buff), 657 GFP_KERNEL); 658 if (!tx_pool->tx_buff) 659 return -1; 660 661 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 662 num_entries * buf_size)) 663 return -1; 664 665 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); 666 if (!tx_pool->free_map) 667 return -1; 668 669 for (i = 0; i < num_entries; i++) 670 tx_pool->free_map[i] = i; 671 672 tx_pool->consumer_index = 0; 673 tx_pool->producer_index = 0; 674 tx_pool->num_buffers = num_entries; 675 tx_pool->buf_size = buf_size; 676 677 return 0; 678 } 679 680 static int init_tx_pools(struct net_device *netdev) 681 { 682 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 683 int tx_subcrqs; 684 int i, rc; 685 686 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 687 adapter->tx_pool = kcalloc(tx_subcrqs, 688 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 689 if (!adapter->tx_pool) 690 return -1; 691 692 adapter->tso_pool = kcalloc(tx_subcrqs, 693 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 694 if (!adapter->tso_pool) 695 return -1; 696 697 adapter->num_active_tx_pools = tx_subcrqs; 698 699 for (i = 0; i < tx_subcrqs; i++) { 700 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 701 adapter->req_tx_entries_per_subcrq, 702 adapter->req_mtu + VLAN_HLEN); 703 if (rc) { 704 release_tx_pools(adapter); 705 return rc; 706 } 707 708 init_one_tx_pool(netdev, &adapter->tso_pool[i], 709 IBMVNIC_TSO_BUFS, 710 IBMVNIC_TSO_BUF_SZ); 711 if (rc) { 712 release_tx_pools(adapter); 713 return rc; 714 } 715 } 716 717 return 0; 718 } 719 720 static void release_error_buffers(struct ibmvnic_adapter *adapter) 721 { 722 struct device *dev = &adapter->vdev->dev; 723 struct ibmvnic_error_buff *error_buff, *tmp; 724 unsigned long flags; 725 726 spin_lock_irqsave(&adapter->error_list_lock, flags); 727 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) { 728 list_del(&error_buff->list); 729 dma_unmap_single(dev, error_buff->dma, error_buff->len, 730 DMA_FROM_DEVICE); 731 kfree(error_buff->buff); 732 kfree(error_buff); 733 } 734 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 735 } 736 737 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 738 { 739 int i; 740 741 if (adapter->napi_enabled) 742 return; 743 744 for (i = 0; i < adapter->req_rx_queues; i++) 745 napi_enable(&adapter->napi[i]); 746 747 adapter->napi_enabled = true; 748 } 749 750 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 751 { 752 int i; 753 754 if (!adapter->napi_enabled) 755 return; 756 757 for (i = 0; i < adapter->req_rx_queues; i++) { 758 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 759 napi_disable(&adapter->napi[i]); 760 } 761 762 adapter->napi_enabled = false; 763 } 764 765 static int init_napi(struct ibmvnic_adapter *adapter) 766 { 767 int i; 768 769 adapter->napi = kcalloc(adapter->req_rx_queues, 770 sizeof(struct napi_struct), GFP_KERNEL); 771 if (!adapter->napi) 772 return -ENOMEM; 773 774 for (i = 0; i < adapter->req_rx_queues; i++) { 775 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 776 netif_napi_add(adapter->netdev, &adapter->napi[i], 777 ibmvnic_poll, NAPI_POLL_WEIGHT); 778 } 779 780 adapter->num_active_rx_napi = adapter->req_rx_queues; 781 return 0; 782 } 783 784 static void release_napi(struct ibmvnic_adapter *adapter) 785 { 786 int i; 787 788 if (!adapter->napi) 789 return; 790 791 for (i = 0; i < adapter->num_active_rx_napi; i++) { 792 if (&adapter->napi[i]) { 793 netdev_dbg(adapter->netdev, 794 "Releasing napi[%d]\n", i); 795 netif_napi_del(&adapter->napi[i]); 796 } 797 } 798 799 kfree(adapter->napi); 800 adapter->napi = NULL; 801 adapter->num_active_rx_napi = 0; 802 adapter->napi_enabled = false; 803 } 804 805 static int ibmvnic_login(struct net_device *netdev) 806 { 807 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 808 unsigned long timeout = msecs_to_jiffies(30000); 809 int retry_count = 0; 810 bool retry; 811 int rc; 812 813 do { 814 retry = false; 815 if (retry_count > IBMVNIC_MAX_QUEUES) { 816 netdev_warn(netdev, "Login attempts exceeded\n"); 817 return -1; 818 } 819 820 adapter->init_done_rc = 0; 821 reinit_completion(&adapter->init_done); 822 rc = send_login(adapter); 823 if (rc) { 824 netdev_warn(netdev, "Unable to login\n"); 825 return rc; 826 } 827 828 if (!wait_for_completion_timeout(&adapter->init_done, 829 timeout)) { 830 netdev_warn(netdev, "Login timed out\n"); 831 return -1; 832 } 833 834 if (adapter->init_done_rc == PARTIALSUCCESS) { 835 retry_count++; 836 release_sub_crqs(adapter, 1); 837 838 retry = true; 839 netdev_dbg(netdev, 840 "Received partial success, retrying...\n"); 841 adapter->init_done_rc = 0; 842 reinit_completion(&adapter->init_done); 843 send_cap_queries(adapter); 844 if (!wait_for_completion_timeout(&adapter->init_done, 845 timeout)) { 846 netdev_warn(netdev, 847 "Capabilities query timed out\n"); 848 return -1; 849 } 850 851 rc = init_sub_crqs(adapter); 852 if (rc) { 853 netdev_warn(netdev, 854 "SCRQ initialization failed\n"); 855 return -1; 856 } 857 858 rc = init_sub_crq_irqs(adapter); 859 if (rc) { 860 netdev_warn(netdev, 861 "SCRQ irq initialization failed\n"); 862 return -1; 863 } 864 } else if (adapter->init_done_rc) { 865 netdev_warn(netdev, "Adapter login failed\n"); 866 return -1; 867 } 868 } while (retry); 869 870 /* handle pending MAC address changes after successful login */ 871 if (adapter->mac_change_pending) { 872 __ibmvnic_set_mac(netdev, &adapter->desired.mac); 873 adapter->mac_change_pending = false; 874 } 875 876 return 0; 877 } 878 879 static void release_login_buffer(struct ibmvnic_adapter *adapter) 880 { 881 kfree(adapter->login_buf); 882 adapter->login_buf = NULL; 883 } 884 885 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 886 { 887 kfree(adapter->login_rsp_buf); 888 adapter->login_rsp_buf = NULL; 889 } 890 891 static void release_resources(struct ibmvnic_adapter *adapter) 892 { 893 release_vpd_data(adapter); 894 895 release_tx_pools(adapter); 896 release_rx_pools(adapter); 897 898 release_error_buffers(adapter); 899 release_napi(adapter); 900 release_login_rsp_buffer(adapter); 901 } 902 903 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 904 { 905 struct net_device *netdev = adapter->netdev; 906 unsigned long timeout = msecs_to_jiffies(30000); 907 union ibmvnic_crq crq; 908 bool resend; 909 int rc; 910 911 netdev_dbg(netdev, "setting link state %d\n", link_state); 912 913 memset(&crq, 0, sizeof(crq)); 914 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 915 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 916 crq.logical_link_state.link_state = link_state; 917 918 do { 919 resend = false; 920 921 reinit_completion(&adapter->init_done); 922 rc = ibmvnic_send_crq(adapter, &crq); 923 if (rc) { 924 netdev_err(netdev, "Failed to set link state\n"); 925 return rc; 926 } 927 928 if (!wait_for_completion_timeout(&adapter->init_done, 929 timeout)) { 930 netdev_err(netdev, "timeout setting link state\n"); 931 return -1; 932 } 933 934 if (adapter->init_done_rc == 1) { 935 /* Partuial success, delay and re-send */ 936 mdelay(1000); 937 resend = true; 938 } else if (adapter->init_done_rc) { 939 netdev_warn(netdev, "Unable to set link state, rc=%d\n", 940 adapter->init_done_rc); 941 return adapter->init_done_rc; 942 } 943 } while (resend); 944 945 return 0; 946 } 947 948 static int set_real_num_queues(struct net_device *netdev) 949 { 950 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 951 int rc; 952 953 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 954 adapter->req_tx_queues, adapter->req_rx_queues); 955 956 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 957 if (rc) { 958 netdev_err(netdev, "failed to set the number of tx queues\n"); 959 return rc; 960 } 961 962 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 963 if (rc) 964 netdev_err(netdev, "failed to set the number of rx queues\n"); 965 966 return rc; 967 } 968 969 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 970 { 971 struct device *dev = &adapter->vdev->dev; 972 union ibmvnic_crq crq; 973 int len = 0; 974 int rc; 975 976 if (adapter->vpd->buff) 977 len = adapter->vpd->len; 978 979 init_completion(&adapter->fw_done); 980 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 981 crq.get_vpd_size.cmd = GET_VPD_SIZE; 982 rc = ibmvnic_send_crq(adapter, &crq); 983 if (rc) 984 return rc; 985 wait_for_completion(&adapter->fw_done); 986 987 if (!adapter->vpd->len) 988 return -ENODATA; 989 990 if (!adapter->vpd->buff) 991 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 992 else if (adapter->vpd->len != len) 993 adapter->vpd->buff = 994 krealloc(adapter->vpd->buff, 995 adapter->vpd->len, GFP_KERNEL); 996 997 if (!adapter->vpd->buff) { 998 dev_err(dev, "Could allocate VPD buffer\n"); 999 return -ENOMEM; 1000 } 1001 1002 adapter->vpd->dma_addr = 1003 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 1004 DMA_FROM_DEVICE); 1005 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 1006 dev_err(dev, "Could not map VPD buffer\n"); 1007 kfree(adapter->vpd->buff); 1008 adapter->vpd->buff = NULL; 1009 return -ENOMEM; 1010 } 1011 1012 reinit_completion(&adapter->fw_done); 1013 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 1014 crq.get_vpd.cmd = GET_VPD; 1015 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 1016 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 1017 rc = ibmvnic_send_crq(adapter, &crq); 1018 if (rc) { 1019 kfree(adapter->vpd->buff); 1020 adapter->vpd->buff = NULL; 1021 return rc; 1022 } 1023 wait_for_completion(&adapter->fw_done); 1024 1025 return 0; 1026 } 1027 1028 static int init_resources(struct ibmvnic_adapter *adapter) 1029 { 1030 struct net_device *netdev = adapter->netdev; 1031 int rc; 1032 1033 rc = set_real_num_queues(netdev); 1034 if (rc) 1035 return rc; 1036 1037 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1038 if (!adapter->vpd) 1039 return -ENOMEM; 1040 1041 /* Vital Product Data (VPD) */ 1042 rc = ibmvnic_get_vpd(adapter); 1043 if (rc) { 1044 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1045 return rc; 1046 } 1047 1048 adapter->map_id = 1; 1049 1050 rc = init_napi(adapter); 1051 if (rc) 1052 return rc; 1053 1054 send_map_query(adapter); 1055 1056 rc = init_rx_pools(netdev); 1057 if (rc) 1058 return rc; 1059 1060 rc = init_tx_pools(netdev); 1061 return rc; 1062 } 1063 1064 static int __ibmvnic_open(struct net_device *netdev) 1065 { 1066 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1067 enum vnic_state prev_state = adapter->state; 1068 int i, rc; 1069 1070 adapter->state = VNIC_OPENING; 1071 replenish_pools(adapter); 1072 ibmvnic_napi_enable(adapter); 1073 1074 /* We're ready to receive frames, enable the sub-crq interrupts and 1075 * set the logical link state to up 1076 */ 1077 for (i = 0; i < adapter->req_rx_queues; i++) { 1078 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1079 if (prev_state == VNIC_CLOSED) 1080 enable_irq(adapter->rx_scrq[i]->irq); 1081 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1082 } 1083 1084 for (i = 0; i < adapter->req_tx_queues; i++) { 1085 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1086 if (prev_state == VNIC_CLOSED) 1087 enable_irq(adapter->tx_scrq[i]->irq); 1088 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1089 } 1090 1091 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1092 if (rc) { 1093 for (i = 0; i < adapter->req_rx_queues; i++) 1094 napi_disable(&adapter->napi[i]); 1095 release_resources(adapter); 1096 return rc; 1097 } 1098 1099 netif_tx_start_all_queues(netdev); 1100 1101 if (prev_state == VNIC_CLOSED) { 1102 for (i = 0; i < adapter->req_rx_queues; i++) 1103 napi_schedule(&adapter->napi[i]); 1104 } 1105 1106 adapter->state = VNIC_OPEN; 1107 return rc; 1108 } 1109 1110 static int ibmvnic_open(struct net_device *netdev) 1111 { 1112 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1113 int rc; 1114 1115 /* If device failover is pending, just set device state and return. 1116 * Device operation will be handled by reset routine. 1117 */ 1118 if (adapter->failover_pending) { 1119 adapter->state = VNIC_OPEN; 1120 return 0; 1121 } 1122 1123 mutex_lock(&adapter->reset_lock); 1124 1125 if (adapter->state != VNIC_CLOSED) { 1126 rc = ibmvnic_login(netdev); 1127 if (rc) { 1128 mutex_unlock(&adapter->reset_lock); 1129 return rc; 1130 } 1131 1132 rc = init_resources(adapter); 1133 if (rc) { 1134 netdev_err(netdev, "failed to initialize resources\n"); 1135 release_resources(adapter); 1136 mutex_unlock(&adapter->reset_lock); 1137 return rc; 1138 } 1139 } 1140 1141 rc = __ibmvnic_open(netdev); 1142 netif_carrier_on(netdev); 1143 1144 mutex_unlock(&adapter->reset_lock); 1145 1146 return rc; 1147 } 1148 1149 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1150 { 1151 struct ibmvnic_rx_pool *rx_pool; 1152 struct ibmvnic_rx_buff *rx_buff; 1153 u64 rx_entries; 1154 int rx_scrqs; 1155 int i, j; 1156 1157 if (!adapter->rx_pool) 1158 return; 1159 1160 rx_scrqs = adapter->num_active_rx_pools; 1161 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1162 1163 /* Free any remaining skbs in the rx buffer pools */ 1164 for (i = 0; i < rx_scrqs; i++) { 1165 rx_pool = &adapter->rx_pool[i]; 1166 if (!rx_pool || !rx_pool->rx_buff) 1167 continue; 1168 1169 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1170 for (j = 0; j < rx_entries; j++) { 1171 rx_buff = &rx_pool->rx_buff[j]; 1172 if (rx_buff && rx_buff->skb) { 1173 dev_kfree_skb_any(rx_buff->skb); 1174 rx_buff->skb = NULL; 1175 } 1176 } 1177 } 1178 } 1179 1180 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1181 struct ibmvnic_tx_pool *tx_pool) 1182 { 1183 struct ibmvnic_tx_buff *tx_buff; 1184 u64 tx_entries; 1185 int i; 1186 1187 if (!tx_pool || !tx_pool->tx_buff) 1188 return; 1189 1190 tx_entries = tx_pool->num_buffers; 1191 1192 for (i = 0; i < tx_entries; i++) { 1193 tx_buff = &tx_pool->tx_buff[i]; 1194 if (tx_buff && tx_buff->skb) { 1195 dev_kfree_skb_any(tx_buff->skb); 1196 tx_buff->skb = NULL; 1197 } 1198 } 1199 } 1200 1201 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1202 { 1203 int tx_scrqs; 1204 int i; 1205 1206 if (!adapter->tx_pool || !adapter->tso_pool) 1207 return; 1208 1209 tx_scrqs = adapter->num_active_tx_pools; 1210 1211 /* Free any remaining skbs in the tx buffer pools */ 1212 for (i = 0; i < tx_scrqs; i++) { 1213 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1214 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1215 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1216 } 1217 } 1218 1219 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1220 { 1221 struct net_device *netdev = adapter->netdev; 1222 int i; 1223 1224 if (adapter->tx_scrq) { 1225 for (i = 0; i < adapter->req_tx_queues; i++) 1226 if (adapter->tx_scrq[i]->irq) { 1227 netdev_dbg(netdev, 1228 "Disabling tx_scrq[%d] irq\n", i); 1229 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1230 disable_irq(adapter->tx_scrq[i]->irq); 1231 } 1232 } 1233 1234 if (adapter->rx_scrq) { 1235 for (i = 0; i < adapter->req_rx_queues; i++) { 1236 if (adapter->rx_scrq[i]->irq) { 1237 netdev_dbg(netdev, 1238 "Disabling rx_scrq[%d] irq\n", i); 1239 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1240 disable_irq(adapter->rx_scrq[i]->irq); 1241 } 1242 } 1243 } 1244 } 1245 1246 static void ibmvnic_cleanup(struct net_device *netdev) 1247 { 1248 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1249 1250 /* ensure that transmissions are stopped if called by do_reset */ 1251 if (adapter->resetting) 1252 netif_tx_disable(netdev); 1253 else 1254 netif_tx_stop_all_queues(netdev); 1255 1256 ibmvnic_napi_disable(adapter); 1257 ibmvnic_disable_irqs(adapter); 1258 1259 clean_rx_pools(adapter); 1260 clean_tx_pools(adapter); 1261 } 1262 1263 static int __ibmvnic_close(struct net_device *netdev) 1264 { 1265 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1266 int rc = 0; 1267 1268 adapter->state = VNIC_CLOSING; 1269 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1270 if (rc) 1271 return rc; 1272 adapter->state = VNIC_CLOSED; 1273 return 0; 1274 } 1275 1276 static int ibmvnic_close(struct net_device *netdev) 1277 { 1278 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1279 int rc; 1280 1281 /* If device failover is pending, just set device state and return. 1282 * Device operation will be handled by reset routine. 1283 */ 1284 if (adapter->failover_pending) { 1285 adapter->state = VNIC_CLOSED; 1286 return 0; 1287 } 1288 1289 mutex_lock(&adapter->reset_lock); 1290 rc = __ibmvnic_close(netdev); 1291 ibmvnic_cleanup(netdev); 1292 mutex_unlock(&adapter->reset_lock); 1293 1294 return rc; 1295 } 1296 1297 /** 1298 * build_hdr_data - creates L2/L3/L4 header data buffer 1299 * @hdr_field - bitfield determining needed headers 1300 * @skb - socket buffer 1301 * @hdr_len - array of header lengths 1302 * @tot_len - total length of data 1303 * 1304 * Reads hdr_field to determine which headers are needed by firmware. 1305 * Builds a buffer containing these headers. Saves individual header 1306 * lengths and total buffer length to be used to build descriptors. 1307 */ 1308 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 1309 int *hdr_len, u8 *hdr_data) 1310 { 1311 int len = 0; 1312 u8 *hdr; 1313 1314 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 1315 hdr_len[0] = sizeof(struct vlan_ethhdr); 1316 else 1317 hdr_len[0] = sizeof(struct ethhdr); 1318 1319 if (skb->protocol == htons(ETH_P_IP)) { 1320 hdr_len[1] = ip_hdr(skb)->ihl * 4; 1321 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1322 hdr_len[2] = tcp_hdrlen(skb); 1323 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1324 hdr_len[2] = sizeof(struct udphdr); 1325 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1326 hdr_len[1] = sizeof(struct ipv6hdr); 1327 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1328 hdr_len[2] = tcp_hdrlen(skb); 1329 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 1330 hdr_len[2] = sizeof(struct udphdr); 1331 } else if (skb->protocol == htons(ETH_P_ARP)) { 1332 hdr_len[1] = arp_hdr_len(skb->dev); 1333 hdr_len[2] = 0; 1334 } 1335 1336 memset(hdr_data, 0, 120); 1337 if ((hdr_field >> 6) & 1) { 1338 hdr = skb_mac_header(skb); 1339 memcpy(hdr_data, hdr, hdr_len[0]); 1340 len += hdr_len[0]; 1341 } 1342 1343 if ((hdr_field >> 5) & 1) { 1344 hdr = skb_network_header(skb); 1345 memcpy(hdr_data + len, hdr, hdr_len[1]); 1346 len += hdr_len[1]; 1347 } 1348 1349 if ((hdr_field >> 4) & 1) { 1350 hdr = skb_transport_header(skb); 1351 memcpy(hdr_data + len, hdr, hdr_len[2]); 1352 len += hdr_len[2]; 1353 } 1354 return len; 1355 } 1356 1357 /** 1358 * create_hdr_descs - create header and header extension descriptors 1359 * @hdr_field - bitfield determining needed headers 1360 * @data - buffer containing header data 1361 * @len - length of data buffer 1362 * @hdr_len - array of individual header lengths 1363 * @scrq_arr - descriptor array 1364 * 1365 * Creates header and, if needed, header extension descriptors and 1366 * places them in a descriptor array, scrq_arr 1367 */ 1368 1369 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1370 union sub_crq *scrq_arr) 1371 { 1372 union sub_crq hdr_desc; 1373 int tmp_len = len; 1374 int num_descs = 0; 1375 u8 *data, *cur; 1376 int tmp; 1377 1378 while (tmp_len > 0) { 1379 cur = hdr_data + len - tmp_len; 1380 1381 memset(&hdr_desc, 0, sizeof(hdr_desc)); 1382 if (cur != hdr_data) { 1383 data = hdr_desc.hdr_ext.data; 1384 tmp = tmp_len > 29 ? 29 : tmp_len; 1385 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 1386 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 1387 hdr_desc.hdr_ext.len = tmp; 1388 } else { 1389 data = hdr_desc.hdr.data; 1390 tmp = tmp_len > 24 ? 24 : tmp_len; 1391 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 1392 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 1393 hdr_desc.hdr.len = tmp; 1394 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 1395 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 1396 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 1397 hdr_desc.hdr.flag = hdr_field << 1; 1398 } 1399 memcpy(data, cur, tmp); 1400 tmp_len -= tmp; 1401 *scrq_arr = hdr_desc; 1402 scrq_arr++; 1403 num_descs++; 1404 } 1405 1406 return num_descs; 1407 } 1408 1409 /** 1410 * build_hdr_descs_arr - build a header descriptor array 1411 * @skb - socket buffer 1412 * @num_entries - number of descriptors to be sent 1413 * @subcrq - first TX descriptor 1414 * @hdr_field - bit field determining which headers will be sent 1415 * 1416 * This function will build a TX descriptor array with applicable 1417 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 1418 */ 1419 1420 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, 1421 int *num_entries, u8 hdr_field) 1422 { 1423 int hdr_len[3] = {0, 0, 0}; 1424 int tot_len; 1425 u8 *hdr_data = txbuff->hdr_data; 1426 1427 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, 1428 txbuff->hdr_data); 1429 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 1430 txbuff->indir_arr + 1); 1431 } 1432 1433 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 1434 struct net_device *netdev) 1435 { 1436 /* For some backing devices, mishandling of small packets 1437 * can result in a loss of connection or TX stall. Device 1438 * architects recommend that no packet should be smaller 1439 * than the minimum MTU value provided to the driver, so 1440 * pad any packets to that length 1441 */ 1442 if (skb->len < netdev->min_mtu) 1443 return skb_put_padto(skb, netdev->min_mtu); 1444 1445 return 0; 1446 } 1447 1448 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 1449 { 1450 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1451 int queue_num = skb_get_queue_mapping(skb); 1452 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 1453 struct device *dev = &adapter->vdev->dev; 1454 struct ibmvnic_tx_buff *tx_buff = NULL; 1455 struct ibmvnic_sub_crq_queue *tx_scrq; 1456 struct ibmvnic_tx_pool *tx_pool; 1457 unsigned int tx_send_failed = 0; 1458 unsigned int tx_map_failed = 0; 1459 unsigned int tx_dropped = 0; 1460 unsigned int tx_packets = 0; 1461 unsigned int tx_bytes = 0; 1462 dma_addr_t data_dma_addr; 1463 struct netdev_queue *txq; 1464 unsigned long lpar_rc; 1465 union sub_crq tx_crq; 1466 unsigned int offset; 1467 int num_entries = 1; 1468 unsigned char *dst; 1469 u64 *handle_array; 1470 int index = 0; 1471 u8 proto = 0; 1472 int ret = 0; 1473 1474 if (adapter->resetting) { 1475 if (!netif_subqueue_stopped(netdev, skb)) 1476 netif_stop_subqueue(netdev, queue_num); 1477 dev_kfree_skb_any(skb); 1478 1479 tx_send_failed++; 1480 tx_dropped++; 1481 ret = NETDEV_TX_OK; 1482 goto out; 1483 } 1484 1485 if (ibmvnic_xmit_workarounds(skb, netdev)) { 1486 tx_dropped++; 1487 tx_send_failed++; 1488 ret = NETDEV_TX_OK; 1489 goto out; 1490 } 1491 if (skb_is_gso(skb)) 1492 tx_pool = &adapter->tso_pool[queue_num]; 1493 else 1494 tx_pool = &adapter->tx_pool[queue_num]; 1495 1496 tx_scrq = adapter->tx_scrq[queue_num]; 1497 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); 1498 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 1499 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 1500 1501 index = tx_pool->free_map[tx_pool->consumer_index]; 1502 1503 if (index == IBMVNIC_INVALID_MAP) { 1504 dev_kfree_skb_any(skb); 1505 tx_send_failed++; 1506 tx_dropped++; 1507 ret = NETDEV_TX_OK; 1508 goto out; 1509 } 1510 1511 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 1512 1513 offset = index * tx_pool->buf_size; 1514 dst = tx_pool->long_term_buff.buff + offset; 1515 memset(dst, 0, tx_pool->buf_size); 1516 data_dma_addr = tx_pool->long_term_buff.addr + offset; 1517 1518 if (skb_shinfo(skb)->nr_frags) { 1519 int cur, i; 1520 1521 /* Copy the head */ 1522 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 1523 cur = skb_headlen(skb); 1524 1525 /* Copy the frags */ 1526 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1527 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1528 1529 memcpy(dst + cur, 1530 page_address(skb_frag_page(frag)) + 1531 frag->page_offset, skb_frag_size(frag)); 1532 cur += skb_frag_size(frag); 1533 } 1534 } else { 1535 skb_copy_from_linear_data(skb, dst, skb->len); 1536 } 1537 1538 tx_pool->consumer_index = 1539 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 1540 1541 tx_buff = &tx_pool->tx_buff[index]; 1542 tx_buff->skb = skb; 1543 tx_buff->data_dma[0] = data_dma_addr; 1544 tx_buff->data_len[0] = skb->len; 1545 tx_buff->index = index; 1546 tx_buff->pool_index = queue_num; 1547 tx_buff->last_frag = true; 1548 1549 memset(&tx_crq, 0, sizeof(tx_crq)); 1550 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 1551 tx_crq.v1.type = IBMVNIC_TX_DESC; 1552 tx_crq.v1.n_crq_elem = 1; 1553 tx_crq.v1.n_sge = 1; 1554 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 1555 1556 if (skb_is_gso(skb)) 1557 tx_crq.v1.correlator = 1558 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); 1559 else 1560 tx_crq.v1.correlator = cpu_to_be32(index); 1561 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); 1562 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 1563 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 1564 1565 if (adapter->vlan_header_insertion) { 1566 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 1567 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 1568 } 1569 1570 if (skb->protocol == htons(ETH_P_IP)) { 1571 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 1572 proto = ip_hdr(skb)->protocol; 1573 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1574 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 1575 proto = ipv6_hdr(skb)->nexthdr; 1576 } 1577 1578 if (proto == IPPROTO_TCP) 1579 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 1580 else if (proto == IPPROTO_UDP) 1581 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 1582 1583 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1584 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 1585 hdrs += 2; 1586 } 1587 if (skb_is_gso(skb)) { 1588 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 1589 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 1590 hdrs += 2; 1591 } 1592 /* determine if l2/3/4 headers are sent to firmware */ 1593 if ((*hdrs >> 7) & 1) { 1594 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); 1595 tx_crq.v1.n_crq_elem = num_entries; 1596 tx_buff->num_entries = num_entries; 1597 tx_buff->indir_arr[0] = tx_crq; 1598 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, 1599 sizeof(tx_buff->indir_arr), 1600 DMA_TO_DEVICE); 1601 if (dma_mapping_error(dev, tx_buff->indir_dma)) { 1602 dev_kfree_skb_any(skb); 1603 tx_buff->skb = NULL; 1604 if (!firmware_has_feature(FW_FEATURE_CMO)) 1605 dev_err(dev, "tx: unable to map descriptor array\n"); 1606 tx_map_failed++; 1607 tx_dropped++; 1608 ret = NETDEV_TX_OK; 1609 goto tx_err_out; 1610 } 1611 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], 1612 (u64)tx_buff->indir_dma, 1613 (u64)num_entries); 1614 } else { 1615 tx_buff->num_entries = num_entries; 1616 lpar_rc = send_subcrq(adapter, handle_array[queue_num], 1617 &tx_crq); 1618 } 1619 if (lpar_rc != H_SUCCESS) { 1620 dev_err(dev, "tx failed with code %ld\n", lpar_rc); 1621 dev_kfree_skb_any(skb); 1622 tx_buff->skb = NULL; 1623 1624 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 1625 /* Disable TX and report carrier off if queue is closed 1626 * or pending failover. 1627 * Firmware guarantees that a signal will be sent to the 1628 * driver, triggering a reset or some other action. 1629 */ 1630 netif_tx_stop_all_queues(netdev); 1631 netif_carrier_off(netdev); 1632 } 1633 1634 tx_send_failed++; 1635 tx_dropped++; 1636 ret = NETDEV_TX_OK; 1637 goto tx_err_out; 1638 } 1639 1640 if (atomic_add_return(num_entries, &tx_scrq->used) 1641 >= adapter->req_tx_entries_per_subcrq) { 1642 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 1643 netif_stop_subqueue(netdev, queue_num); 1644 } 1645 1646 tx_packets++; 1647 tx_bytes += skb->len; 1648 txq->trans_start = jiffies; 1649 ret = NETDEV_TX_OK; 1650 goto out; 1651 1652 tx_err_out: 1653 /* roll back consumer index and map array*/ 1654 if (tx_pool->consumer_index == 0) 1655 tx_pool->consumer_index = 1656 tx_pool->num_buffers - 1; 1657 else 1658 tx_pool->consumer_index--; 1659 tx_pool->free_map[tx_pool->consumer_index] = index; 1660 out: 1661 netdev->stats.tx_dropped += tx_dropped; 1662 netdev->stats.tx_bytes += tx_bytes; 1663 netdev->stats.tx_packets += tx_packets; 1664 adapter->tx_send_failed += tx_send_failed; 1665 adapter->tx_map_failed += tx_map_failed; 1666 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 1667 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 1668 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 1669 1670 return ret; 1671 } 1672 1673 static void ibmvnic_set_multi(struct net_device *netdev) 1674 { 1675 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1676 struct netdev_hw_addr *ha; 1677 union ibmvnic_crq crq; 1678 1679 memset(&crq, 0, sizeof(crq)); 1680 crq.request_capability.first = IBMVNIC_CRQ_CMD; 1681 crq.request_capability.cmd = REQUEST_CAPABILITY; 1682 1683 if (netdev->flags & IFF_PROMISC) { 1684 if (!adapter->promisc_supported) 1685 return; 1686 } else { 1687 if (netdev->flags & IFF_ALLMULTI) { 1688 /* Accept all multicast */ 1689 memset(&crq, 0, sizeof(crq)); 1690 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1691 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1692 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 1693 ibmvnic_send_crq(adapter, &crq); 1694 } else if (netdev_mc_empty(netdev)) { 1695 /* Reject all multicast */ 1696 memset(&crq, 0, sizeof(crq)); 1697 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1698 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1699 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 1700 ibmvnic_send_crq(adapter, &crq); 1701 } else { 1702 /* Accept one or more multicast(s) */ 1703 netdev_for_each_mc_addr(ha, netdev) { 1704 memset(&crq, 0, sizeof(crq)); 1705 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1706 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1707 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 1708 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 1709 ha->addr); 1710 ibmvnic_send_crq(adapter, &crq); 1711 } 1712 } 1713 } 1714 } 1715 1716 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) 1717 { 1718 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1719 struct sockaddr *addr = p; 1720 union ibmvnic_crq crq; 1721 int rc; 1722 1723 if (!is_valid_ether_addr(addr->sa_data)) 1724 return -EADDRNOTAVAIL; 1725 1726 memset(&crq, 0, sizeof(crq)); 1727 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 1728 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 1729 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); 1730 1731 init_completion(&adapter->fw_done); 1732 rc = ibmvnic_send_crq(adapter, &crq); 1733 if (rc) 1734 return rc; 1735 wait_for_completion(&adapter->fw_done); 1736 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 1737 return adapter->fw_done_rc ? -EIO : 0; 1738 } 1739 1740 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 1741 { 1742 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1743 struct sockaddr *addr = p; 1744 int rc; 1745 1746 if (adapter->state == VNIC_PROBED) { 1747 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); 1748 adapter->mac_change_pending = true; 1749 return 0; 1750 } 1751 1752 rc = __ibmvnic_set_mac(netdev, addr); 1753 1754 return rc; 1755 } 1756 1757 /** 1758 * do_reset returns zero if we are able to keep processing reset events, or 1759 * non-zero if we hit a fatal error and must halt. 1760 */ 1761 static int do_reset(struct ibmvnic_adapter *adapter, 1762 struct ibmvnic_rwi *rwi, u32 reset_state) 1763 { 1764 u64 old_num_rx_queues, old_num_tx_queues; 1765 struct net_device *netdev = adapter->netdev; 1766 int i, rc; 1767 1768 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", 1769 rwi->reset_reason); 1770 1771 netif_carrier_off(netdev); 1772 adapter->reset_reason = rwi->reset_reason; 1773 1774 old_num_rx_queues = adapter->req_rx_queues; 1775 old_num_tx_queues = adapter->req_tx_queues; 1776 1777 ibmvnic_cleanup(netdev); 1778 1779 if (adapter->reset_reason != VNIC_RESET_MOBILITY && 1780 adapter->reset_reason != VNIC_RESET_FAILOVER) { 1781 rc = __ibmvnic_close(netdev); 1782 if (rc) 1783 return rc; 1784 } 1785 1786 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1787 adapter->wait_for_reset) { 1788 release_resources(adapter); 1789 release_sub_crqs(adapter, 1); 1790 release_crq_queue(adapter); 1791 } 1792 1793 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 1794 /* remove the closed state so when we call open it appears 1795 * we are coming from the probed state. 1796 */ 1797 adapter->state = VNIC_PROBED; 1798 1799 if (adapter->wait_for_reset) { 1800 rc = init_crq_queue(adapter); 1801 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 1802 rc = ibmvnic_reenable_crq_queue(adapter); 1803 release_sub_crqs(adapter, 1); 1804 } else { 1805 rc = ibmvnic_reset_crq(adapter); 1806 if (!rc) 1807 rc = vio_enable_interrupts(adapter->vdev); 1808 } 1809 1810 if (rc) { 1811 netdev_err(adapter->netdev, 1812 "Couldn't initialize crq. rc=%d\n", rc); 1813 return rc; 1814 } 1815 1816 rc = ibmvnic_reset_init(adapter); 1817 if (rc) 1818 return IBMVNIC_INIT_FAILED; 1819 1820 /* If the adapter was in PROBE state prior to the reset, 1821 * exit here. 1822 */ 1823 if (reset_state == VNIC_PROBED) 1824 return 0; 1825 1826 rc = ibmvnic_login(netdev); 1827 if (rc) { 1828 adapter->state = VNIC_PROBED; 1829 return 0; 1830 } 1831 1832 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1833 adapter->wait_for_reset) { 1834 rc = init_resources(adapter); 1835 if (rc) 1836 return rc; 1837 } else if (adapter->req_rx_queues != old_num_rx_queues || 1838 adapter->req_tx_queues != old_num_tx_queues) { 1839 adapter->map_id = 1; 1840 release_rx_pools(adapter); 1841 release_tx_pools(adapter); 1842 init_rx_pools(netdev); 1843 init_tx_pools(netdev); 1844 1845 release_napi(adapter); 1846 init_napi(adapter); 1847 } else { 1848 rc = reset_tx_pools(adapter); 1849 if (rc) 1850 return rc; 1851 1852 rc = reset_rx_pools(adapter); 1853 if (rc) 1854 return rc; 1855 } 1856 ibmvnic_disable_irqs(adapter); 1857 } 1858 adapter->state = VNIC_CLOSED; 1859 1860 if (reset_state == VNIC_CLOSED) 1861 return 0; 1862 1863 rc = __ibmvnic_open(netdev); 1864 if (rc) { 1865 if (list_empty(&adapter->rwi_list)) 1866 adapter->state = VNIC_CLOSED; 1867 else 1868 adapter->state = reset_state; 1869 1870 return 0; 1871 } 1872 1873 /* kick napi */ 1874 for (i = 0; i < adapter->req_rx_queues; i++) 1875 napi_schedule(&adapter->napi[i]); 1876 1877 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 1878 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) 1879 netdev_notify_peers(netdev); 1880 1881 netif_carrier_on(netdev); 1882 1883 return 0; 1884 } 1885 1886 static int do_hard_reset(struct ibmvnic_adapter *adapter, 1887 struct ibmvnic_rwi *rwi, u32 reset_state) 1888 { 1889 struct net_device *netdev = adapter->netdev; 1890 int rc; 1891 1892 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n", 1893 rwi->reset_reason); 1894 1895 netif_carrier_off(netdev); 1896 adapter->reset_reason = rwi->reset_reason; 1897 1898 ibmvnic_cleanup(netdev); 1899 release_resources(adapter); 1900 release_sub_crqs(adapter, 0); 1901 release_crq_queue(adapter); 1902 1903 /* remove the closed state so when we call open it appears 1904 * we are coming from the probed state. 1905 */ 1906 adapter->state = VNIC_PROBED; 1907 1908 rc = init_crq_queue(adapter); 1909 if (rc) { 1910 netdev_err(adapter->netdev, 1911 "Couldn't initialize crq. rc=%d\n", rc); 1912 return rc; 1913 } 1914 1915 rc = ibmvnic_init(adapter); 1916 if (rc) 1917 return rc; 1918 1919 /* If the adapter was in PROBE state prior to the reset, 1920 * exit here. 1921 */ 1922 if (reset_state == VNIC_PROBED) 1923 return 0; 1924 1925 rc = ibmvnic_login(netdev); 1926 if (rc) { 1927 adapter->state = VNIC_PROBED; 1928 return 0; 1929 } 1930 /* netif_set_real_num_xx_queues needs to take rtnl lock here 1931 * unless wait_for_reset is set, in which case the rtnl lock 1932 * has already been taken before initializing the reset 1933 */ 1934 if (!adapter->wait_for_reset) { 1935 rtnl_lock(); 1936 rc = init_resources(adapter); 1937 rtnl_unlock(); 1938 } else { 1939 rc = init_resources(adapter); 1940 } 1941 if (rc) 1942 return rc; 1943 1944 ibmvnic_disable_irqs(adapter); 1945 adapter->state = VNIC_CLOSED; 1946 1947 if (reset_state == VNIC_CLOSED) 1948 return 0; 1949 1950 rc = __ibmvnic_open(netdev); 1951 if (rc) { 1952 if (list_empty(&adapter->rwi_list)) 1953 adapter->state = VNIC_CLOSED; 1954 else 1955 adapter->state = reset_state; 1956 1957 return 0; 1958 } 1959 1960 netif_carrier_on(netdev); 1961 1962 return 0; 1963 } 1964 1965 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 1966 { 1967 struct ibmvnic_rwi *rwi; 1968 1969 mutex_lock(&adapter->rwi_lock); 1970 1971 if (!list_empty(&adapter->rwi_list)) { 1972 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 1973 list); 1974 list_del(&rwi->list); 1975 } else { 1976 rwi = NULL; 1977 } 1978 1979 mutex_unlock(&adapter->rwi_lock); 1980 return rwi; 1981 } 1982 1983 static void free_all_rwi(struct ibmvnic_adapter *adapter) 1984 { 1985 struct ibmvnic_rwi *rwi; 1986 1987 rwi = get_next_rwi(adapter); 1988 while (rwi) { 1989 kfree(rwi); 1990 rwi = get_next_rwi(adapter); 1991 } 1992 } 1993 1994 static void __ibmvnic_reset(struct work_struct *work) 1995 { 1996 struct ibmvnic_rwi *rwi; 1997 struct ibmvnic_adapter *adapter; 1998 struct net_device *netdev; 1999 u32 reset_state; 2000 int rc = 0; 2001 2002 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 2003 netdev = adapter->netdev; 2004 2005 mutex_lock(&adapter->reset_lock); 2006 reset_state = adapter->state; 2007 2008 rwi = get_next_rwi(adapter); 2009 while (rwi) { 2010 if (adapter->force_reset_recovery) { 2011 adapter->force_reset_recovery = false; 2012 rc = do_hard_reset(adapter, rwi, reset_state); 2013 } else { 2014 rc = do_reset(adapter, rwi, reset_state); 2015 } 2016 kfree(rwi); 2017 if (rc && rc != IBMVNIC_INIT_FAILED && 2018 !adapter->force_reset_recovery) 2019 break; 2020 2021 rwi = get_next_rwi(adapter); 2022 } 2023 2024 if (adapter->wait_for_reset) { 2025 adapter->wait_for_reset = false; 2026 adapter->reset_done_rc = rc; 2027 complete(&adapter->reset_done); 2028 } 2029 2030 if (rc) { 2031 netdev_dbg(adapter->netdev, "Reset failed\n"); 2032 free_all_rwi(adapter); 2033 mutex_unlock(&adapter->reset_lock); 2034 return; 2035 } 2036 2037 adapter->resetting = false; 2038 mutex_unlock(&adapter->reset_lock); 2039 } 2040 2041 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 2042 enum ibmvnic_reset_reason reason) 2043 { 2044 struct list_head *entry, *tmp_entry; 2045 struct ibmvnic_rwi *rwi, *tmp; 2046 struct net_device *netdev = adapter->netdev; 2047 int ret; 2048 2049 if (adapter->state == VNIC_REMOVING || 2050 adapter->state == VNIC_REMOVED || 2051 adapter->failover_pending) { 2052 ret = EBUSY; 2053 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 2054 goto err; 2055 } 2056 2057 if (adapter->state == VNIC_PROBING) { 2058 netdev_warn(netdev, "Adapter reset during probe\n"); 2059 ret = adapter->init_done_rc = EAGAIN; 2060 goto err; 2061 } 2062 2063 mutex_lock(&adapter->rwi_lock); 2064 2065 list_for_each(entry, &adapter->rwi_list) { 2066 tmp = list_entry(entry, struct ibmvnic_rwi, list); 2067 if (tmp->reset_reason == reason) { 2068 netdev_dbg(netdev, "Skipping matching reset\n"); 2069 mutex_unlock(&adapter->rwi_lock); 2070 ret = EBUSY; 2071 goto err; 2072 } 2073 } 2074 2075 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL); 2076 if (!rwi) { 2077 mutex_unlock(&adapter->rwi_lock); 2078 ibmvnic_close(netdev); 2079 ret = ENOMEM; 2080 goto err; 2081 } 2082 /* if we just received a transport event, 2083 * flush reset queue and process this reset 2084 */ 2085 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { 2086 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) 2087 list_del(entry); 2088 } 2089 rwi->reset_reason = reason; 2090 list_add_tail(&rwi->list, &adapter->rwi_list); 2091 mutex_unlock(&adapter->rwi_lock); 2092 adapter->resetting = true; 2093 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); 2094 schedule_work(&adapter->ibmvnic_reset); 2095 2096 return 0; 2097 err: 2098 if (adapter->wait_for_reset) 2099 adapter->wait_for_reset = false; 2100 return -ret; 2101 } 2102 2103 static void ibmvnic_tx_timeout(struct net_device *dev) 2104 { 2105 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2106 2107 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 2108 } 2109 2110 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 2111 struct ibmvnic_rx_buff *rx_buff) 2112 { 2113 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 2114 2115 rx_buff->skb = NULL; 2116 2117 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 2118 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 2119 2120 atomic_dec(&pool->available); 2121 } 2122 2123 static int ibmvnic_poll(struct napi_struct *napi, int budget) 2124 { 2125 struct net_device *netdev = napi->dev; 2126 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2127 int scrq_num = (int)(napi - adapter->napi); 2128 int frames_processed = 0; 2129 2130 restart_poll: 2131 while (frames_processed < budget) { 2132 struct sk_buff *skb; 2133 struct ibmvnic_rx_buff *rx_buff; 2134 union sub_crq *next; 2135 u32 length; 2136 u16 offset; 2137 u8 flags = 0; 2138 2139 if (unlikely(adapter->resetting && 2140 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 2141 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 2142 napi_complete_done(napi, frames_processed); 2143 return frames_processed; 2144 } 2145 2146 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) 2147 break; 2148 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); 2149 rx_buff = 2150 (struct ibmvnic_rx_buff *)be64_to_cpu(next-> 2151 rx_comp.correlator); 2152 /* do error checking */ 2153 if (next->rx_comp.rc) { 2154 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 2155 be16_to_cpu(next->rx_comp.rc)); 2156 /* free the entry */ 2157 next->rx_comp.first = 0; 2158 dev_kfree_skb_any(rx_buff->skb); 2159 remove_buff_from_pool(adapter, rx_buff); 2160 continue; 2161 } else if (!rx_buff->skb) { 2162 /* free the entry */ 2163 next->rx_comp.first = 0; 2164 remove_buff_from_pool(adapter, rx_buff); 2165 continue; 2166 } 2167 2168 length = be32_to_cpu(next->rx_comp.len); 2169 offset = be16_to_cpu(next->rx_comp.off_frame_data); 2170 flags = next->rx_comp.flags; 2171 skb = rx_buff->skb; 2172 skb_copy_to_linear_data(skb, rx_buff->data + offset, 2173 length); 2174 2175 /* VLAN Header has been stripped by the system firmware and 2176 * needs to be inserted by the driver 2177 */ 2178 if (adapter->rx_vlan_header_insertion && 2179 (flags & IBMVNIC_VLAN_STRIPPED)) 2180 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2181 ntohs(next->rx_comp.vlan_tci)); 2182 2183 /* free the entry */ 2184 next->rx_comp.first = 0; 2185 remove_buff_from_pool(adapter, rx_buff); 2186 2187 skb_put(skb, length); 2188 skb->protocol = eth_type_trans(skb, netdev); 2189 skb_record_rx_queue(skb, scrq_num); 2190 2191 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 2192 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 2193 skb->ip_summed = CHECKSUM_UNNECESSARY; 2194 } 2195 2196 length = skb->len; 2197 napi_gro_receive(napi, skb); /* send it up */ 2198 netdev->stats.rx_packets++; 2199 netdev->stats.rx_bytes += length; 2200 adapter->rx_stats_buffers[scrq_num].packets++; 2201 adapter->rx_stats_buffers[scrq_num].bytes += length; 2202 frames_processed++; 2203 } 2204 2205 if (adapter->state != VNIC_CLOSING) 2206 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 2207 2208 if (frames_processed < budget) { 2209 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 2210 napi_complete_done(napi, frames_processed); 2211 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && 2212 napi_reschedule(napi)) { 2213 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 2214 goto restart_poll; 2215 } 2216 } 2217 return frames_processed; 2218 } 2219 2220 #ifdef CONFIG_NET_POLL_CONTROLLER 2221 static void ibmvnic_netpoll_controller(struct net_device *dev) 2222 { 2223 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2224 int i; 2225 2226 replenish_pools(netdev_priv(dev)); 2227 for (i = 0; i < adapter->req_rx_queues; i++) 2228 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, 2229 adapter->rx_scrq[i]); 2230 } 2231 #endif 2232 2233 static int wait_for_reset(struct ibmvnic_adapter *adapter) 2234 { 2235 int rc, ret; 2236 2237 adapter->fallback.mtu = adapter->req_mtu; 2238 adapter->fallback.rx_queues = adapter->req_rx_queues; 2239 adapter->fallback.tx_queues = adapter->req_tx_queues; 2240 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 2241 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 2242 2243 init_completion(&adapter->reset_done); 2244 adapter->wait_for_reset = true; 2245 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2246 if (rc) 2247 return rc; 2248 wait_for_completion(&adapter->reset_done); 2249 2250 ret = 0; 2251 if (adapter->reset_done_rc) { 2252 ret = -EIO; 2253 adapter->desired.mtu = adapter->fallback.mtu; 2254 adapter->desired.rx_queues = adapter->fallback.rx_queues; 2255 adapter->desired.tx_queues = adapter->fallback.tx_queues; 2256 adapter->desired.rx_entries = adapter->fallback.rx_entries; 2257 adapter->desired.tx_entries = adapter->fallback.tx_entries; 2258 2259 init_completion(&adapter->reset_done); 2260 adapter->wait_for_reset = true; 2261 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2262 if (rc) 2263 return ret; 2264 wait_for_completion(&adapter->reset_done); 2265 } 2266 adapter->wait_for_reset = false; 2267 2268 return ret; 2269 } 2270 2271 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 2272 { 2273 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2274 2275 adapter->desired.mtu = new_mtu + ETH_HLEN; 2276 2277 return wait_for_reset(adapter); 2278 } 2279 2280 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 2281 struct net_device *dev, 2282 netdev_features_t features) 2283 { 2284 /* Some backing hardware adapters can not 2285 * handle packets with a MSS less than 224 2286 * or with only one segment. 2287 */ 2288 if (skb_is_gso(skb)) { 2289 if (skb_shinfo(skb)->gso_size < 224 || 2290 skb_shinfo(skb)->gso_segs == 1) 2291 features &= ~NETIF_F_GSO_MASK; 2292 } 2293 2294 return features; 2295 } 2296 2297 static const struct net_device_ops ibmvnic_netdev_ops = { 2298 .ndo_open = ibmvnic_open, 2299 .ndo_stop = ibmvnic_close, 2300 .ndo_start_xmit = ibmvnic_xmit, 2301 .ndo_set_rx_mode = ibmvnic_set_multi, 2302 .ndo_set_mac_address = ibmvnic_set_mac, 2303 .ndo_validate_addr = eth_validate_addr, 2304 .ndo_tx_timeout = ibmvnic_tx_timeout, 2305 #ifdef CONFIG_NET_POLL_CONTROLLER 2306 .ndo_poll_controller = ibmvnic_netpoll_controller, 2307 #endif 2308 .ndo_change_mtu = ibmvnic_change_mtu, 2309 .ndo_features_check = ibmvnic_features_check, 2310 }; 2311 2312 /* ethtool functions */ 2313 2314 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 2315 struct ethtool_link_ksettings *cmd) 2316 { 2317 u32 supported, advertising; 2318 2319 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | 2320 SUPPORTED_FIBRE); 2321 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | 2322 ADVERTISED_FIBRE); 2323 cmd->base.speed = SPEED_1000; 2324 cmd->base.duplex = DUPLEX_FULL; 2325 cmd->base.port = PORT_FIBRE; 2326 cmd->base.phy_address = 0; 2327 cmd->base.autoneg = AUTONEG_ENABLE; 2328 2329 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 2330 supported); 2331 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 2332 advertising); 2333 2334 return 0; 2335 } 2336 2337 static void ibmvnic_get_drvinfo(struct net_device *netdev, 2338 struct ethtool_drvinfo *info) 2339 { 2340 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2341 2342 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 2343 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 2344 strlcpy(info->fw_version, adapter->fw_version, 2345 sizeof(info->fw_version)); 2346 } 2347 2348 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 2349 { 2350 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2351 2352 return adapter->msg_enable; 2353 } 2354 2355 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 2356 { 2357 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2358 2359 adapter->msg_enable = data; 2360 } 2361 2362 static u32 ibmvnic_get_link(struct net_device *netdev) 2363 { 2364 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2365 2366 /* Don't need to send a query because we request a logical link up at 2367 * init and then we wait for link state indications 2368 */ 2369 return adapter->logical_link_state; 2370 } 2371 2372 static void ibmvnic_get_ringparam(struct net_device *netdev, 2373 struct ethtool_ringparam *ring) 2374 { 2375 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2376 2377 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 2378 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 2379 ring->rx_mini_max_pending = 0; 2380 ring->rx_jumbo_max_pending = 0; 2381 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 2382 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 2383 ring->rx_mini_pending = 0; 2384 ring->rx_jumbo_pending = 0; 2385 } 2386 2387 static int ibmvnic_set_ringparam(struct net_device *netdev, 2388 struct ethtool_ringparam *ring) 2389 { 2390 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2391 2392 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || 2393 ring->tx_pending > adapter->max_tx_entries_per_subcrq) { 2394 netdev_err(netdev, "Invalid request.\n"); 2395 netdev_err(netdev, "Max tx buffers = %llu\n", 2396 adapter->max_rx_add_entries_per_subcrq); 2397 netdev_err(netdev, "Max rx buffers = %llu\n", 2398 adapter->max_tx_entries_per_subcrq); 2399 return -EINVAL; 2400 } 2401 2402 adapter->desired.rx_entries = ring->rx_pending; 2403 adapter->desired.tx_entries = ring->tx_pending; 2404 2405 return wait_for_reset(adapter); 2406 } 2407 2408 static void ibmvnic_get_channels(struct net_device *netdev, 2409 struct ethtool_channels *channels) 2410 { 2411 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2412 2413 channels->max_rx = adapter->max_rx_queues; 2414 channels->max_tx = adapter->max_tx_queues; 2415 channels->max_other = 0; 2416 channels->max_combined = 0; 2417 channels->rx_count = adapter->req_rx_queues; 2418 channels->tx_count = adapter->req_tx_queues; 2419 channels->other_count = 0; 2420 channels->combined_count = 0; 2421 } 2422 2423 static int ibmvnic_set_channels(struct net_device *netdev, 2424 struct ethtool_channels *channels) 2425 { 2426 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2427 2428 adapter->desired.rx_queues = channels->rx_count; 2429 adapter->desired.tx_queues = channels->tx_count; 2430 2431 return wait_for_reset(adapter); 2432 } 2433 2434 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2435 { 2436 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2437 int i; 2438 2439 if (stringset != ETH_SS_STATS) 2440 return; 2441 2442 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) 2443 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 2444 2445 for (i = 0; i < adapter->req_tx_queues; i++) { 2446 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 2447 data += ETH_GSTRING_LEN; 2448 2449 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 2450 data += ETH_GSTRING_LEN; 2451 2452 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); 2453 data += ETH_GSTRING_LEN; 2454 } 2455 2456 for (i = 0; i < adapter->req_rx_queues; i++) { 2457 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 2458 data += ETH_GSTRING_LEN; 2459 2460 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 2461 data += ETH_GSTRING_LEN; 2462 2463 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 2464 data += ETH_GSTRING_LEN; 2465 } 2466 } 2467 2468 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 2469 { 2470 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2471 2472 switch (sset) { 2473 case ETH_SS_STATS: 2474 return ARRAY_SIZE(ibmvnic_stats) + 2475 adapter->req_tx_queues * NUM_TX_STATS + 2476 adapter->req_rx_queues * NUM_RX_STATS; 2477 default: 2478 return -EOPNOTSUPP; 2479 } 2480 } 2481 2482 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 2483 struct ethtool_stats *stats, u64 *data) 2484 { 2485 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2486 union ibmvnic_crq crq; 2487 int i, j; 2488 int rc; 2489 2490 memset(&crq, 0, sizeof(crq)); 2491 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 2492 crq.request_statistics.cmd = REQUEST_STATISTICS; 2493 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 2494 crq.request_statistics.len = 2495 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 2496 2497 /* Wait for data to be written */ 2498 init_completion(&adapter->stats_done); 2499 rc = ibmvnic_send_crq(adapter, &crq); 2500 if (rc) 2501 return; 2502 wait_for_completion(&adapter->stats_done); 2503 2504 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 2505 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, 2506 ibmvnic_stats[i].offset)); 2507 2508 for (j = 0; j < adapter->req_tx_queues; j++) { 2509 data[i] = adapter->tx_stats_buffers[j].packets; 2510 i++; 2511 data[i] = adapter->tx_stats_buffers[j].bytes; 2512 i++; 2513 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 2514 i++; 2515 } 2516 2517 for (j = 0; j < adapter->req_rx_queues; j++) { 2518 data[i] = adapter->rx_stats_buffers[j].packets; 2519 i++; 2520 data[i] = adapter->rx_stats_buffers[j].bytes; 2521 i++; 2522 data[i] = adapter->rx_stats_buffers[j].interrupts; 2523 i++; 2524 } 2525 } 2526 2527 static const struct ethtool_ops ibmvnic_ethtool_ops = { 2528 .get_drvinfo = ibmvnic_get_drvinfo, 2529 .get_msglevel = ibmvnic_get_msglevel, 2530 .set_msglevel = ibmvnic_set_msglevel, 2531 .get_link = ibmvnic_get_link, 2532 .get_ringparam = ibmvnic_get_ringparam, 2533 .set_ringparam = ibmvnic_set_ringparam, 2534 .get_channels = ibmvnic_get_channels, 2535 .set_channels = ibmvnic_set_channels, 2536 .get_strings = ibmvnic_get_strings, 2537 .get_sset_count = ibmvnic_get_sset_count, 2538 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 2539 .get_link_ksettings = ibmvnic_get_link_ksettings, 2540 }; 2541 2542 /* Routines for managing CRQs/sCRQs */ 2543 2544 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 2545 struct ibmvnic_sub_crq_queue *scrq) 2546 { 2547 int rc; 2548 2549 if (scrq->irq) { 2550 free_irq(scrq->irq, scrq); 2551 irq_dispose_mapping(scrq->irq); 2552 scrq->irq = 0; 2553 } 2554 2555 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 2556 atomic_set(&scrq->used, 0); 2557 scrq->cur = 0; 2558 2559 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 2560 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 2561 return rc; 2562 } 2563 2564 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 2565 { 2566 int i, rc; 2567 2568 for (i = 0; i < adapter->req_tx_queues; i++) { 2569 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 2570 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 2571 if (rc) 2572 return rc; 2573 } 2574 2575 for (i = 0; i < adapter->req_rx_queues; i++) { 2576 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 2577 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 2578 if (rc) 2579 return rc; 2580 } 2581 2582 return rc; 2583 } 2584 2585 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 2586 struct ibmvnic_sub_crq_queue *scrq, 2587 bool do_h_free) 2588 { 2589 struct device *dev = &adapter->vdev->dev; 2590 long rc; 2591 2592 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 2593 2594 if (do_h_free) { 2595 /* Close the sub-crqs */ 2596 do { 2597 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 2598 adapter->vdev->unit_address, 2599 scrq->crq_num); 2600 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 2601 2602 if (rc) { 2603 netdev_err(adapter->netdev, 2604 "Failed to release sub-CRQ %16lx, rc = %ld\n", 2605 scrq->crq_num, rc); 2606 } 2607 } 2608 2609 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 2610 DMA_BIDIRECTIONAL); 2611 free_pages((unsigned long)scrq->msgs, 2); 2612 kfree(scrq); 2613 } 2614 2615 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 2616 *adapter) 2617 { 2618 struct device *dev = &adapter->vdev->dev; 2619 struct ibmvnic_sub_crq_queue *scrq; 2620 int rc; 2621 2622 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 2623 if (!scrq) 2624 return NULL; 2625 2626 scrq->msgs = 2627 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 2628 if (!scrq->msgs) { 2629 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 2630 goto zero_page_failed; 2631 } 2632 2633 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 2634 DMA_BIDIRECTIONAL); 2635 if (dma_mapping_error(dev, scrq->msg_token)) { 2636 dev_warn(dev, "Couldn't map crq queue messages page\n"); 2637 goto map_failed; 2638 } 2639 2640 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 2641 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 2642 2643 if (rc == H_RESOURCE) 2644 rc = ibmvnic_reset_crq(adapter); 2645 2646 if (rc == H_CLOSED) { 2647 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 2648 } else if (rc) { 2649 dev_warn(dev, "Error %d registering sub-crq\n", rc); 2650 goto reg_failed; 2651 } 2652 2653 scrq->adapter = adapter; 2654 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 2655 spin_lock_init(&scrq->lock); 2656 2657 netdev_dbg(adapter->netdev, 2658 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 2659 scrq->crq_num, scrq->hw_irq, scrq->irq); 2660 2661 return scrq; 2662 2663 reg_failed: 2664 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 2665 DMA_BIDIRECTIONAL); 2666 map_failed: 2667 free_pages((unsigned long)scrq->msgs, 2); 2668 zero_page_failed: 2669 kfree(scrq); 2670 2671 return NULL; 2672 } 2673 2674 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 2675 { 2676 int i; 2677 2678 if (adapter->tx_scrq) { 2679 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 2680 if (!adapter->tx_scrq[i]) 2681 continue; 2682 2683 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 2684 i); 2685 if (adapter->tx_scrq[i]->irq) { 2686 free_irq(adapter->tx_scrq[i]->irq, 2687 adapter->tx_scrq[i]); 2688 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 2689 adapter->tx_scrq[i]->irq = 0; 2690 } 2691 2692 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 2693 do_h_free); 2694 } 2695 2696 kfree(adapter->tx_scrq); 2697 adapter->tx_scrq = NULL; 2698 adapter->num_active_tx_scrqs = 0; 2699 } 2700 2701 if (adapter->rx_scrq) { 2702 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 2703 if (!adapter->rx_scrq[i]) 2704 continue; 2705 2706 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 2707 i); 2708 if (adapter->rx_scrq[i]->irq) { 2709 free_irq(adapter->rx_scrq[i]->irq, 2710 adapter->rx_scrq[i]); 2711 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 2712 adapter->rx_scrq[i]->irq = 0; 2713 } 2714 2715 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 2716 do_h_free); 2717 } 2718 2719 kfree(adapter->rx_scrq); 2720 adapter->rx_scrq = NULL; 2721 adapter->num_active_rx_scrqs = 0; 2722 } 2723 } 2724 2725 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 2726 struct ibmvnic_sub_crq_queue *scrq) 2727 { 2728 struct device *dev = &adapter->vdev->dev; 2729 unsigned long rc; 2730 2731 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2732 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2733 if (rc) 2734 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 2735 scrq->hw_irq, rc); 2736 return rc; 2737 } 2738 2739 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 2740 struct ibmvnic_sub_crq_queue *scrq) 2741 { 2742 struct device *dev = &adapter->vdev->dev; 2743 unsigned long rc; 2744 2745 if (scrq->hw_irq > 0x100000000ULL) { 2746 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2747 return 1; 2748 } 2749 2750 if (adapter->resetting && 2751 adapter->reset_reason == VNIC_RESET_MOBILITY) { 2752 u64 val = (0xff000000) | scrq->hw_irq; 2753 2754 rc = plpar_hcall_norets(H_EOI, val); 2755 if (rc) 2756 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 2757 val, rc); 2758 } 2759 2760 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2761 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2762 if (rc) 2763 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 2764 scrq->hw_irq, rc); 2765 return rc; 2766 } 2767 2768 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 2769 struct ibmvnic_sub_crq_queue *scrq) 2770 { 2771 struct device *dev = &adapter->vdev->dev; 2772 struct ibmvnic_tx_pool *tx_pool; 2773 struct ibmvnic_tx_buff *txbuff; 2774 union sub_crq *next; 2775 int index; 2776 int i, j; 2777 u8 *first; 2778 2779 restart_loop: 2780 while (pending_scrq(adapter, scrq)) { 2781 unsigned int pool = scrq->pool_index; 2782 int num_entries = 0; 2783 2784 next = ibmvnic_next_scrq(adapter, scrq); 2785 for (i = 0; i < next->tx_comp.num_comps; i++) { 2786 if (next->tx_comp.rcs[i]) { 2787 dev_err(dev, "tx error %x\n", 2788 next->tx_comp.rcs[i]); 2789 continue; 2790 } 2791 index = be32_to_cpu(next->tx_comp.correlators[i]); 2792 if (index & IBMVNIC_TSO_POOL_MASK) { 2793 tx_pool = &adapter->tso_pool[pool]; 2794 index &= ~IBMVNIC_TSO_POOL_MASK; 2795 } else { 2796 tx_pool = &adapter->tx_pool[pool]; 2797 } 2798 2799 txbuff = &tx_pool->tx_buff[index]; 2800 2801 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { 2802 if (!txbuff->data_dma[j]) 2803 continue; 2804 2805 txbuff->data_dma[j] = 0; 2806 } 2807 /* if sub_crq was sent indirectly */ 2808 first = &txbuff->indir_arr[0].generic.first; 2809 if (*first == IBMVNIC_CRQ_CMD) { 2810 dma_unmap_single(dev, txbuff->indir_dma, 2811 sizeof(txbuff->indir_arr), 2812 DMA_TO_DEVICE); 2813 *first = 0; 2814 } 2815 2816 if (txbuff->last_frag) { 2817 dev_kfree_skb_any(txbuff->skb); 2818 txbuff->skb = NULL; 2819 } 2820 2821 num_entries += txbuff->num_entries; 2822 2823 tx_pool->free_map[tx_pool->producer_index] = index; 2824 tx_pool->producer_index = 2825 (tx_pool->producer_index + 1) % 2826 tx_pool->num_buffers; 2827 } 2828 /* remove tx_comp scrq*/ 2829 next->tx_comp.first = 0; 2830 2831 if (atomic_sub_return(num_entries, &scrq->used) <= 2832 (adapter->req_tx_entries_per_subcrq / 2) && 2833 __netif_subqueue_stopped(adapter->netdev, 2834 scrq->pool_index)) { 2835 netif_wake_subqueue(adapter->netdev, scrq->pool_index); 2836 netdev_dbg(adapter->netdev, "Started queue %d\n", 2837 scrq->pool_index); 2838 } 2839 } 2840 2841 enable_scrq_irq(adapter, scrq); 2842 2843 if (pending_scrq(adapter, scrq)) { 2844 disable_scrq_irq(adapter, scrq); 2845 goto restart_loop; 2846 } 2847 2848 return 0; 2849 } 2850 2851 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 2852 { 2853 struct ibmvnic_sub_crq_queue *scrq = instance; 2854 struct ibmvnic_adapter *adapter = scrq->adapter; 2855 2856 disable_scrq_irq(adapter, scrq); 2857 ibmvnic_complete_tx(adapter, scrq); 2858 2859 return IRQ_HANDLED; 2860 } 2861 2862 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 2863 { 2864 struct ibmvnic_sub_crq_queue *scrq = instance; 2865 struct ibmvnic_adapter *adapter = scrq->adapter; 2866 2867 /* When booting a kdump kernel we can hit pending interrupts 2868 * prior to completing driver initialization. 2869 */ 2870 if (unlikely(adapter->state != VNIC_OPEN)) 2871 return IRQ_NONE; 2872 2873 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 2874 2875 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 2876 disable_scrq_irq(adapter, scrq); 2877 __napi_schedule(&adapter->napi[scrq->scrq_num]); 2878 } 2879 2880 return IRQ_HANDLED; 2881 } 2882 2883 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 2884 { 2885 struct device *dev = &adapter->vdev->dev; 2886 struct ibmvnic_sub_crq_queue *scrq; 2887 int i = 0, j = 0; 2888 int rc = 0; 2889 2890 for (i = 0; i < adapter->req_tx_queues; i++) { 2891 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 2892 i); 2893 scrq = adapter->tx_scrq[i]; 2894 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 2895 2896 if (!scrq->irq) { 2897 rc = -EINVAL; 2898 dev_err(dev, "Error mapping irq\n"); 2899 goto req_tx_irq_failed; 2900 } 2901 2902 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 2903 0, "ibmvnic_tx", scrq); 2904 2905 if (rc) { 2906 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 2907 scrq->irq, rc); 2908 irq_dispose_mapping(scrq->irq); 2909 goto req_tx_irq_failed; 2910 } 2911 } 2912 2913 for (i = 0; i < adapter->req_rx_queues; i++) { 2914 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 2915 i); 2916 scrq = adapter->rx_scrq[i]; 2917 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 2918 if (!scrq->irq) { 2919 rc = -EINVAL; 2920 dev_err(dev, "Error mapping irq\n"); 2921 goto req_rx_irq_failed; 2922 } 2923 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 2924 0, "ibmvnic_rx", scrq); 2925 if (rc) { 2926 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 2927 scrq->irq, rc); 2928 irq_dispose_mapping(scrq->irq); 2929 goto req_rx_irq_failed; 2930 } 2931 } 2932 return rc; 2933 2934 req_rx_irq_failed: 2935 for (j = 0; j < i; j++) { 2936 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 2937 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 2938 } 2939 i = adapter->req_tx_queues; 2940 req_tx_irq_failed: 2941 for (j = 0; j < i; j++) { 2942 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 2943 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 2944 } 2945 release_sub_crqs(adapter, 1); 2946 return rc; 2947 } 2948 2949 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 2950 { 2951 struct device *dev = &adapter->vdev->dev; 2952 struct ibmvnic_sub_crq_queue **allqueues; 2953 int registered_queues = 0; 2954 int total_queues; 2955 int more = 0; 2956 int i; 2957 2958 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 2959 2960 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 2961 if (!allqueues) 2962 return -1; 2963 2964 for (i = 0; i < total_queues; i++) { 2965 allqueues[i] = init_sub_crq_queue(adapter); 2966 if (!allqueues[i]) { 2967 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 2968 break; 2969 } 2970 registered_queues++; 2971 } 2972 2973 /* Make sure we were able to register the minimum number of queues */ 2974 if (registered_queues < 2975 adapter->min_tx_queues + adapter->min_rx_queues) { 2976 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 2977 goto tx_failed; 2978 } 2979 2980 /* Distribute the failed allocated queues*/ 2981 for (i = 0; i < total_queues - registered_queues + more ; i++) { 2982 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 2983 switch (i % 3) { 2984 case 0: 2985 if (adapter->req_rx_queues > adapter->min_rx_queues) 2986 adapter->req_rx_queues--; 2987 else 2988 more++; 2989 break; 2990 case 1: 2991 if (adapter->req_tx_queues > adapter->min_tx_queues) 2992 adapter->req_tx_queues--; 2993 else 2994 more++; 2995 break; 2996 } 2997 } 2998 2999 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 3000 sizeof(*adapter->tx_scrq), GFP_KERNEL); 3001 if (!adapter->tx_scrq) 3002 goto tx_failed; 3003 3004 for (i = 0; i < adapter->req_tx_queues; i++) { 3005 adapter->tx_scrq[i] = allqueues[i]; 3006 adapter->tx_scrq[i]->pool_index = i; 3007 adapter->num_active_tx_scrqs++; 3008 } 3009 3010 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 3011 sizeof(*adapter->rx_scrq), GFP_KERNEL); 3012 if (!adapter->rx_scrq) 3013 goto rx_failed; 3014 3015 for (i = 0; i < adapter->req_rx_queues; i++) { 3016 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 3017 adapter->rx_scrq[i]->scrq_num = i; 3018 adapter->num_active_rx_scrqs++; 3019 } 3020 3021 kfree(allqueues); 3022 return 0; 3023 3024 rx_failed: 3025 kfree(adapter->tx_scrq); 3026 adapter->tx_scrq = NULL; 3027 tx_failed: 3028 for (i = 0; i < registered_queues; i++) 3029 release_sub_crq_queue(adapter, allqueues[i], 1); 3030 kfree(allqueues); 3031 return -1; 3032 } 3033 3034 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry) 3035 { 3036 struct device *dev = &adapter->vdev->dev; 3037 union ibmvnic_crq crq; 3038 int max_entries; 3039 3040 if (!retry) { 3041 /* Sub-CRQ entries are 32 byte long */ 3042 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 3043 3044 if (adapter->min_tx_entries_per_subcrq > entries_page || 3045 adapter->min_rx_add_entries_per_subcrq > entries_page) { 3046 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 3047 return; 3048 } 3049 3050 if (adapter->desired.mtu) 3051 adapter->req_mtu = adapter->desired.mtu; 3052 else 3053 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 3054 3055 if (!adapter->desired.tx_entries) 3056 adapter->desired.tx_entries = 3057 adapter->max_tx_entries_per_subcrq; 3058 if (!adapter->desired.rx_entries) 3059 adapter->desired.rx_entries = 3060 adapter->max_rx_add_entries_per_subcrq; 3061 3062 max_entries = IBMVNIC_MAX_LTB_SIZE / 3063 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 3064 3065 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 3066 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { 3067 adapter->desired.tx_entries = max_entries; 3068 } 3069 3070 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 3071 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { 3072 adapter->desired.rx_entries = max_entries; 3073 } 3074 3075 if (adapter->desired.tx_entries) 3076 adapter->req_tx_entries_per_subcrq = 3077 adapter->desired.tx_entries; 3078 else 3079 adapter->req_tx_entries_per_subcrq = 3080 adapter->max_tx_entries_per_subcrq; 3081 3082 if (adapter->desired.rx_entries) 3083 adapter->req_rx_add_entries_per_subcrq = 3084 adapter->desired.rx_entries; 3085 else 3086 adapter->req_rx_add_entries_per_subcrq = 3087 adapter->max_rx_add_entries_per_subcrq; 3088 3089 if (adapter->desired.tx_queues) 3090 adapter->req_tx_queues = 3091 adapter->desired.tx_queues; 3092 else 3093 adapter->req_tx_queues = 3094 adapter->opt_tx_comp_sub_queues; 3095 3096 if (adapter->desired.rx_queues) 3097 adapter->req_rx_queues = 3098 adapter->desired.rx_queues; 3099 else 3100 adapter->req_rx_queues = 3101 adapter->opt_rx_comp_queues; 3102 3103 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 3104 } 3105 3106 memset(&crq, 0, sizeof(crq)); 3107 crq.request_capability.first = IBMVNIC_CRQ_CMD; 3108 crq.request_capability.cmd = REQUEST_CAPABILITY; 3109 3110 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 3111 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 3112 atomic_inc(&adapter->running_cap_crqs); 3113 ibmvnic_send_crq(adapter, &crq); 3114 3115 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 3116 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 3117 atomic_inc(&adapter->running_cap_crqs); 3118 ibmvnic_send_crq(adapter, &crq); 3119 3120 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 3121 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 3122 atomic_inc(&adapter->running_cap_crqs); 3123 ibmvnic_send_crq(adapter, &crq); 3124 3125 crq.request_capability.capability = 3126 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 3127 crq.request_capability.number = 3128 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 3129 atomic_inc(&adapter->running_cap_crqs); 3130 ibmvnic_send_crq(adapter, &crq); 3131 3132 crq.request_capability.capability = 3133 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 3134 crq.request_capability.number = 3135 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 3136 atomic_inc(&adapter->running_cap_crqs); 3137 ibmvnic_send_crq(adapter, &crq); 3138 3139 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 3140 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 3141 atomic_inc(&adapter->running_cap_crqs); 3142 ibmvnic_send_crq(adapter, &crq); 3143 3144 if (adapter->netdev->flags & IFF_PROMISC) { 3145 if (adapter->promisc_supported) { 3146 crq.request_capability.capability = 3147 cpu_to_be16(PROMISC_REQUESTED); 3148 crq.request_capability.number = cpu_to_be64(1); 3149 atomic_inc(&adapter->running_cap_crqs); 3150 ibmvnic_send_crq(adapter, &crq); 3151 } 3152 } else { 3153 crq.request_capability.capability = 3154 cpu_to_be16(PROMISC_REQUESTED); 3155 crq.request_capability.number = cpu_to_be64(0); 3156 atomic_inc(&adapter->running_cap_crqs); 3157 ibmvnic_send_crq(adapter, &crq); 3158 } 3159 } 3160 3161 static int pending_scrq(struct ibmvnic_adapter *adapter, 3162 struct ibmvnic_sub_crq_queue *scrq) 3163 { 3164 union sub_crq *entry = &scrq->msgs[scrq->cur]; 3165 3166 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) 3167 return 1; 3168 else 3169 return 0; 3170 } 3171 3172 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 3173 struct ibmvnic_sub_crq_queue *scrq) 3174 { 3175 union sub_crq *entry; 3176 unsigned long flags; 3177 3178 spin_lock_irqsave(&scrq->lock, flags); 3179 entry = &scrq->msgs[scrq->cur]; 3180 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 3181 if (++scrq->cur == scrq->size) 3182 scrq->cur = 0; 3183 } else { 3184 entry = NULL; 3185 } 3186 spin_unlock_irqrestore(&scrq->lock, flags); 3187 3188 return entry; 3189 } 3190 3191 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 3192 { 3193 struct ibmvnic_crq_queue *queue = &adapter->crq; 3194 union ibmvnic_crq *crq; 3195 3196 crq = &queue->msgs[queue->cur]; 3197 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 3198 if (++queue->cur == queue->size) 3199 queue->cur = 0; 3200 } else { 3201 crq = NULL; 3202 } 3203 3204 return crq; 3205 } 3206 3207 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 3208 union sub_crq *sub_crq) 3209 { 3210 unsigned int ua = adapter->vdev->unit_address; 3211 struct device *dev = &adapter->vdev->dev; 3212 u64 *u64_crq = (u64 *)sub_crq; 3213 int rc; 3214 3215 netdev_dbg(adapter->netdev, 3216 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n", 3217 (unsigned long int)cpu_to_be64(remote_handle), 3218 (unsigned long int)cpu_to_be64(u64_crq[0]), 3219 (unsigned long int)cpu_to_be64(u64_crq[1]), 3220 (unsigned long int)cpu_to_be64(u64_crq[2]), 3221 (unsigned long int)cpu_to_be64(u64_crq[3])); 3222 3223 /* Make sure the hypervisor sees the complete request */ 3224 mb(); 3225 3226 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, 3227 cpu_to_be64(remote_handle), 3228 cpu_to_be64(u64_crq[0]), 3229 cpu_to_be64(u64_crq[1]), 3230 cpu_to_be64(u64_crq[2]), 3231 cpu_to_be64(u64_crq[3])); 3232 3233 if (rc) { 3234 if (rc == H_CLOSED) 3235 dev_warn(dev, "CRQ Queue closed\n"); 3236 dev_err(dev, "Send error (rc=%d)\n", rc); 3237 } 3238 3239 return rc; 3240 } 3241 3242 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 3243 u64 remote_handle, u64 ioba, u64 num_entries) 3244 { 3245 unsigned int ua = adapter->vdev->unit_address; 3246 struct device *dev = &adapter->vdev->dev; 3247 int rc; 3248 3249 /* Make sure the hypervisor sees the complete request */ 3250 mb(); 3251 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 3252 cpu_to_be64(remote_handle), 3253 ioba, num_entries); 3254 3255 if (rc) { 3256 if (rc == H_CLOSED) 3257 dev_warn(dev, "CRQ Queue closed\n"); 3258 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc); 3259 } 3260 3261 return rc; 3262 } 3263 3264 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 3265 union ibmvnic_crq *crq) 3266 { 3267 unsigned int ua = adapter->vdev->unit_address; 3268 struct device *dev = &adapter->vdev->dev; 3269 u64 *u64_crq = (u64 *)crq; 3270 int rc; 3271 3272 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 3273 (unsigned long int)cpu_to_be64(u64_crq[0]), 3274 (unsigned long int)cpu_to_be64(u64_crq[1])); 3275 3276 if (!adapter->crq.active && 3277 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { 3278 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); 3279 return -EINVAL; 3280 } 3281 3282 /* Make sure the hypervisor sees the complete request */ 3283 mb(); 3284 3285 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 3286 cpu_to_be64(u64_crq[0]), 3287 cpu_to_be64(u64_crq[1])); 3288 3289 if (rc) { 3290 if (rc == H_CLOSED) { 3291 dev_warn(dev, "CRQ Queue closed\n"); 3292 if (adapter->resetting) 3293 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 3294 } 3295 3296 dev_warn(dev, "Send error (rc=%d)\n", rc); 3297 } 3298 3299 return rc; 3300 } 3301 3302 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 3303 { 3304 union ibmvnic_crq crq; 3305 3306 memset(&crq, 0, sizeof(crq)); 3307 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 3308 crq.generic.cmd = IBMVNIC_CRQ_INIT; 3309 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 3310 3311 return ibmvnic_send_crq(adapter, &crq); 3312 } 3313 3314 static int send_version_xchg(struct ibmvnic_adapter *adapter) 3315 { 3316 union ibmvnic_crq crq; 3317 3318 memset(&crq, 0, sizeof(crq)); 3319 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 3320 crq.version_exchange.cmd = VERSION_EXCHANGE; 3321 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 3322 3323 return ibmvnic_send_crq(adapter, &crq); 3324 } 3325 3326 struct vnic_login_client_data { 3327 u8 type; 3328 __be16 len; 3329 char name[]; 3330 } __packed; 3331 3332 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 3333 { 3334 int len; 3335 3336 /* Calculate the amount of buffer space needed for the 3337 * vnic client data in the login buffer. There are four entries, 3338 * OS name, LPAR name, device name, and a null last entry. 3339 */ 3340 len = 4 * sizeof(struct vnic_login_client_data); 3341 len += 6; /* "Linux" plus NULL */ 3342 len += strlen(utsname()->nodename) + 1; 3343 len += strlen(adapter->netdev->name) + 1; 3344 3345 return len; 3346 } 3347 3348 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 3349 struct vnic_login_client_data *vlcd) 3350 { 3351 const char *os_name = "Linux"; 3352 int len; 3353 3354 /* Type 1 - LPAR OS */ 3355 vlcd->type = 1; 3356 len = strlen(os_name) + 1; 3357 vlcd->len = cpu_to_be16(len); 3358 strncpy(vlcd->name, os_name, len); 3359 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3360 3361 /* Type 2 - LPAR name */ 3362 vlcd->type = 2; 3363 len = strlen(utsname()->nodename) + 1; 3364 vlcd->len = cpu_to_be16(len); 3365 strncpy(vlcd->name, utsname()->nodename, len); 3366 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3367 3368 /* Type 3 - device name */ 3369 vlcd->type = 3; 3370 len = strlen(adapter->netdev->name) + 1; 3371 vlcd->len = cpu_to_be16(len); 3372 strncpy(vlcd->name, adapter->netdev->name, len); 3373 } 3374 3375 static int send_login(struct ibmvnic_adapter *adapter) 3376 { 3377 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 3378 struct ibmvnic_login_buffer *login_buffer; 3379 struct device *dev = &adapter->vdev->dev; 3380 dma_addr_t rsp_buffer_token; 3381 dma_addr_t buffer_token; 3382 size_t rsp_buffer_size; 3383 union ibmvnic_crq crq; 3384 size_t buffer_size; 3385 __be64 *tx_list_p; 3386 __be64 *rx_list_p; 3387 int client_data_len; 3388 struct vnic_login_client_data *vlcd; 3389 int i; 3390 3391 if (!adapter->tx_scrq || !adapter->rx_scrq) { 3392 netdev_err(adapter->netdev, 3393 "RX or TX queues are not allocated, device login failed\n"); 3394 return -1; 3395 } 3396 3397 release_login_rsp_buffer(adapter); 3398 client_data_len = vnic_client_data_len(adapter); 3399 3400 buffer_size = 3401 sizeof(struct ibmvnic_login_buffer) + 3402 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 3403 client_data_len; 3404 3405 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 3406 if (!login_buffer) 3407 goto buf_alloc_failed; 3408 3409 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 3410 DMA_TO_DEVICE); 3411 if (dma_mapping_error(dev, buffer_token)) { 3412 dev_err(dev, "Couldn't map login buffer\n"); 3413 goto buf_map_failed; 3414 } 3415 3416 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 3417 sizeof(u64) * adapter->req_tx_queues + 3418 sizeof(u64) * adapter->req_rx_queues + 3419 sizeof(u64) * adapter->req_rx_queues + 3420 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 3421 3422 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 3423 if (!login_rsp_buffer) 3424 goto buf_rsp_alloc_failed; 3425 3426 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 3427 rsp_buffer_size, DMA_FROM_DEVICE); 3428 if (dma_mapping_error(dev, rsp_buffer_token)) { 3429 dev_err(dev, "Couldn't map login rsp buffer\n"); 3430 goto buf_rsp_map_failed; 3431 } 3432 3433 adapter->login_buf = login_buffer; 3434 adapter->login_buf_token = buffer_token; 3435 adapter->login_buf_sz = buffer_size; 3436 adapter->login_rsp_buf = login_rsp_buffer; 3437 adapter->login_rsp_buf_token = rsp_buffer_token; 3438 adapter->login_rsp_buf_sz = rsp_buffer_size; 3439 3440 login_buffer->len = cpu_to_be32(buffer_size); 3441 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 3442 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 3443 login_buffer->off_txcomp_subcrqs = 3444 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 3445 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 3446 login_buffer->off_rxcomp_subcrqs = 3447 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 3448 sizeof(u64) * adapter->req_tx_queues); 3449 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 3450 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 3451 3452 tx_list_p = (__be64 *)((char *)login_buffer + 3453 sizeof(struct ibmvnic_login_buffer)); 3454 rx_list_p = (__be64 *)((char *)login_buffer + 3455 sizeof(struct ibmvnic_login_buffer) + 3456 sizeof(u64) * adapter->req_tx_queues); 3457 3458 for (i = 0; i < adapter->req_tx_queues; i++) { 3459 if (adapter->tx_scrq[i]) { 3460 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]-> 3461 crq_num); 3462 } 3463 } 3464 3465 for (i = 0; i < adapter->req_rx_queues; i++) { 3466 if (adapter->rx_scrq[i]) { 3467 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]-> 3468 crq_num); 3469 } 3470 } 3471 3472 /* Insert vNIC login client data */ 3473 vlcd = (struct vnic_login_client_data *) 3474 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 3475 login_buffer->client_data_offset = 3476 cpu_to_be32((char *)vlcd - (char *)login_buffer); 3477 login_buffer->client_data_len = cpu_to_be32(client_data_len); 3478 3479 vnic_add_client_data(adapter, vlcd); 3480 3481 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 3482 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 3483 netdev_dbg(adapter->netdev, "%016lx\n", 3484 ((unsigned long int *)(adapter->login_buf))[i]); 3485 } 3486 3487 memset(&crq, 0, sizeof(crq)); 3488 crq.login.first = IBMVNIC_CRQ_CMD; 3489 crq.login.cmd = LOGIN; 3490 crq.login.ioba = cpu_to_be32(buffer_token); 3491 crq.login.len = cpu_to_be32(buffer_size); 3492 ibmvnic_send_crq(adapter, &crq); 3493 3494 return 0; 3495 3496 buf_rsp_map_failed: 3497 kfree(login_rsp_buffer); 3498 buf_rsp_alloc_failed: 3499 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 3500 buf_map_failed: 3501 kfree(login_buffer); 3502 buf_alloc_failed: 3503 return -1; 3504 } 3505 3506 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 3507 u32 len, u8 map_id) 3508 { 3509 union ibmvnic_crq crq; 3510 3511 memset(&crq, 0, sizeof(crq)); 3512 crq.request_map.first = IBMVNIC_CRQ_CMD; 3513 crq.request_map.cmd = REQUEST_MAP; 3514 crq.request_map.map_id = map_id; 3515 crq.request_map.ioba = cpu_to_be32(addr); 3516 crq.request_map.len = cpu_to_be32(len); 3517 return ibmvnic_send_crq(adapter, &crq); 3518 } 3519 3520 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 3521 { 3522 union ibmvnic_crq crq; 3523 3524 memset(&crq, 0, sizeof(crq)); 3525 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 3526 crq.request_unmap.cmd = REQUEST_UNMAP; 3527 crq.request_unmap.map_id = map_id; 3528 return ibmvnic_send_crq(adapter, &crq); 3529 } 3530 3531 static void send_map_query(struct ibmvnic_adapter *adapter) 3532 { 3533 union ibmvnic_crq crq; 3534 3535 memset(&crq, 0, sizeof(crq)); 3536 crq.query_map.first = IBMVNIC_CRQ_CMD; 3537 crq.query_map.cmd = QUERY_MAP; 3538 ibmvnic_send_crq(adapter, &crq); 3539 } 3540 3541 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 3542 static void send_cap_queries(struct ibmvnic_adapter *adapter) 3543 { 3544 union ibmvnic_crq crq; 3545 3546 atomic_set(&adapter->running_cap_crqs, 0); 3547 memset(&crq, 0, sizeof(crq)); 3548 crq.query_capability.first = IBMVNIC_CRQ_CMD; 3549 crq.query_capability.cmd = QUERY_CAPABILITY; 3550 3551 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 3552 atomic_inc(&adapter->running_cap_crqs); 3553 ibmvnic_send_crq(adapter, &crq); 3554 3555 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 3556 atomic_inc(&adapter->running_cap_crqs); 3557 ibmvnic_send_crq(adapter, &crq); 3558 3559 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 3560 atomic_inc(&adapter->running_cap_crqs); 3561 ibmvnic_send_crq(adapter, &crq); 3562 3563 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 3564 atomic_inc(&adapter->running_cap_crqs); 3565 ibmvnic_send_crq(adapter, &crq); 3566 3567 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 3568 atomic_inc(&adapter->running_cap_crqs); 3569 ibmvnic_send_crq(adapter, &crq); 3570 3571 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 3572 atomic_inc(&adapter->running_cap_crqs); 3573 ibmvnic_send_crq(adapter, &crq); 3574 3575 crq.query_capability.capability = 3576 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 3577 atomic_inc(&adapter->running_cap_crqs); 3578 ibmvnic_send_crq(adapter, &crq); 3579 3580 crq.query_capability.capability = 3581 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 3582 atomic_inc(&adapter->running_cap_crqs); 3583 ibmvnic_send_crq(adapter, &crq); 3584 3585 crq.query_capability.capability = 3586 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 3587 atomic_inc(&adapter->running_cap_crqs); 3588 ibmvnic_send_crq(adapter, &crq); 3589 3590 crq.query_capability.capability = 3591 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 3592 atomic_inc(&adapter->running_cap_crqs); 3593 ibmvnic_send_crq(adapter, &crq); 3594 3595 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 3596 atomic_inc(&adapter->running_cap_crqs); 3597 ibmvnic_send_crq(adapter, &crq); 3598 3599 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 3600 atomic_inc(&adapter->running_cap_crqs); 3601 ibmvnic_send_crq(adapter, &crq); 3602 3603 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 3604 atomic_inc(&adapter->running_cap_crqs); 3605 ibmvnic_send_crq(adapter, &crq); 3606 3607 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 3608 atomic_inc(&adapter->running_cap_crqs); 3609 ibmvnic_send_crq(adapter, &crq); 3610 3611 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 3612 atomic_inc(&adapter->running_cap_crqs); 3613 ibmvnic_send_crq(adapter, &crq); 3614 3615 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 3616 atomic_inc(&adapter->running_cap_crqs); 3617 ibmvnic_send_crq(adapter, &crq); 3618 3619 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 3620 atomic_inc(&adapter->running_cap_crqs); 3621 ibmvnic_send_crq(adapter, &crq); 3622 3623 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 3624 atomic_inc(&adapter->running_cap_crqs); 3625 ibmvnic_send_crq(adapter, &crq); 3626 3627 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 3628 atomic_inc(&adapter->running_cap_crqs); 3629 ibmvnic_send_crq(adapter, &crq); 3630 3631 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 3632 atomic_inc(&adapter->running_cap_crqs); 3633 ibmvnic_send_crq(adapter, &crq); 3634 3635 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 3636 atomic_inc(&adapter->running_cap_crqs); 3637 ibmvnic_send_crq(adapter, &crq); 3638 3639 crq.query_capability.capability = 3640 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 3641 atomic_inc(&adapter->running_cap_crqs); 3642 ibmvnic_send_crq(adapter, &crq); 3643 3644 crq.query_capability.capability = 3645 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 3646 atomic_inc(&adapter->running_cap_crqs); 3647 ibmvnic_send_crq(adapter, &crq); 3648 3649 crq.query_capability.capability = 3650 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 3651 atomic_inc(&adapter->running_cap_crqs); 3652 ibmvnic_send_crq(adapter, &crq); 3653 3654 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 3655 atomic_inc(&adapter->running_cap_crqs); 3656 ibmvnic_send_crq(adapter, &crq); 3657 } 3658 3659 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 3660 struct ibmvnic_adapter *adapter) 3661 { 3662 struct device *dev = &adapter->vdev->dev; 3663 3664 if (crq->get_vpd_size_rsp.rc.code) { 3665 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 3666 crq->get_vpd_size_rsp.rc.code); 3667 complete(&adapter->fw_done); 3668 return; 3669 } 3670 3671 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 3672 complete(&adapter->fw_done); 3673 } 3674 3675 static void handle_vpd_rsp(union ibmvnic_crq *crq, 3676 struct ibmvnic_adapter *adapter) 3677 { 3678 struct device *dev = &adapter->vdev->dev; 3679 unsigned char *substr = NULL; 3680 u8 fw_level_len = 0; 3681 3682 memset(adapter->fw_version, 0, 32); 3683 3684 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 3685 DMA_FROM_DEVICE); 3686 3687 if (crq->get_vpd_rsp.rc.code) { 3688 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 3689 crq->get_vpd_rsp.rc.code); 3690 goto complete; 3691 } 3692 3693 /* get the position of the firmware version info 3694 * located after the ASCII 'RM' substring in the buffer 3695 */ 3696 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 3697 if (!substr) { 3698 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 3699 goto complete; 3700 } 3701 3702 /* get length of firmware level ASCII substring */ 3703 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 3704 fw_level_len = *(substr + 2); 3705 } else { 3706 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 3707 goto complete; 3708 } 3709 3710 /* copy firmware version string from vpd into adapter */ 3711 if ((substr + 3 + fw_level_len) < 3712 (adapter->vpd->buff + adapter->vpd->len)) { 3713 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 3714 } else { 3715 dev_info(dev, "FW substr extrapolated VPD buff\n"); 3716 } 3717 3718 complete: 3719 if (adapter->fw_version[0] == '\0') 3720 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char)); 3721 complete(&adapter->fw_done); 3722 } 3723 3724 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 3725 { 3726 struct device *dev = &adapter->vdev->dev; 3727 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 3728 union ibmvnic_crq crq; 3729 int i; 3730 3731 dma_unmap_single(dev, adapter->ip_offload_tok, 3732 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 3733 3734 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 3735 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 3736 netdev_dbg(adapter->netdev, "%016lx\n", 3737 ((unsigned long int *)(buf))[i]); 3738 3739 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 3740 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 3741 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 3742 buf->tcp_ipv4_chksum); 3743 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 3744 buf->tcp_ipv6_chksum); 3745 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 3746 buf->udp_ipv4_chksum); 3747 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 3748 buf->udp_ipv6_chksum); 3749 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 3750 buf->large_tx_ipv4); 3751 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 3752 buf->large_tx_ipv6); 3753 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 3754 buf->large_rx_ipv4); 3755 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 3756 buf->large_rx_ipv6); 3757 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 3758 buf->max_ipv4_header_size); 3759 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 3760 buf->max_ipv6_header_size); 3761 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 3762 buf->max_tcp_header_size); 3763 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 3764 buf->max_udp_header_size); 3765 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 3766 buf->max_large_tx_size); 3767 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 3768 buf->max_large_rx_size); 3769 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 3770 buf->ipv6_extension_header); 3771 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 3772 buf->tcp_pseudosum_req); 3773 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 3774 buf->num_ipv6_ext_headers); 3775 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 3776 buf->off_ipv6_ext_headers); 3777 3778 adapter->ip_offload_ctrl_tok = 3779 dma_map_single(dev, &adapter->ip_offload_ctrl, 3780 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE); 3781 3782 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 3783 dev_err(dev, "Couldn't map ip offload control buffer\n"); 3784 return; 3785 } 3786 3787 adapter->ip_offload_ctrl.len = 3788 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 3789 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); 3790 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum; 3791 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum; 3792 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 3793 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; 3794 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 3795 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum; 3796 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4; 3797 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6; 3798 3799 /* large_rx disabled for now, additional features needed */ 3800 adapter->ip_offload_ctrl.large_rx_ipv4 = 0; 3801 adapter->ip_offload_ctrl.large_rx_ipv6 = 0; 3802 3803 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; 3804 3805 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 3806 adapter->netdev->features |= NETIF_F_IP_CSUM; 3807 3808 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 3809 adapter->netdev->features |= NETIF_F_IPV6_CSUM; 3810 3811 if ((adapter->netdev->features & 3812 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 3813 adapter->netdev->features |= NETIF_F_RXCSUM; 3814 3815 if (buf->large_tx_ipv4) 3816 adapter->netdev->features |= NETIF_F_TSO; 3817 if (buf->large_tx_ipv6) 3818 adapter->netdev->features |= NETIF_F_TSO6; 3819 3820 adapter->netdev->hw_features |= adapter->netdev->features; 3821 3822 memset(&crq, 0, sizeof(crq)); 3823 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 3824 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 3825 crq.control_ip_offload.len = 3826 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 3827 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 3828 ibmvnic_send_crq(adapter, &crq); 3829 } 3830 3831 static void handle_error_info_rsp(union ibmvnic_crq *crq, 3832 struct ibmvnic_adapter *adapter) 3833 { 3834 struct device *dev = &adapter->vdev->dev; 3835 struct ibmvnic_error_buff *error_buff, *tmp; 3836 unsigned long flags; 3837 bool found = false; 3838 int i; 3839 3840 if (!crq->request_error_rsp.rc.code) { 3841 dev_info(dev, "Request Error Rsp returned with rc=%x\n", 3842 crq->request_error_rsp.rc.code); 3843 return; 3844 } 3845 3846 spin_lock_irqsave(&adapter->error_list_lock, flags); 3847 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) 3848 if (error_buff->error_id == crq->request_error_rsp.error_id) { 3849 found = true; 3850 list_del(&error_buff->list); 3851 break; 3852 } 3853 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 3854 3855 if (!found) { 3856 dev_err(dev, "Couldn't find error id %x\n", 3857 be32_to_cpu(crq->request_error_rsp.error_id)); 3858 return; 3859 } 3860 3861 dev_err(dev, "Detailed info for error id %x:", 3862 be32_to_cpu(crq->request_error_rsp.error_id)); 3863 3864 for (i = 0; i < error_buff->len; i++) { 3865 pr_cont("%02x", (int)error_buff->buff[i]); 3866 if (i % 8 == 7) 3867 pr_cont(" "); 3868 } 3869 pr_cont("\n"); 3870 3871 dma_unmap_single(dev, error_buff->dma, error_buff->len, 3872 DMA_FROM_DEVICE); 3873 kfree(error_buff->buff); 3874 kfree(error_buff); 3875 } 3876 3877 static void request_error_information(struct ibmvnic_adapter *adapter, 3878 union ibmvnic_crq *err_crq) 3879 { 3880 struct device *dev = &adapter->vdev->dev; 3881 struct net_device *netdev = adapter->netdev; 3882 struct ibmvnic_error_buff *error_buff; 3883 unsigned long timeout = msecs_to_jiffies(30000); 3884 union ibmvnic_crq crq; 3885 unsigned long flags; 3886 int rc, detail_len; 3887 3888 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); 3889 if (!error_buff) 3890 return; 3891 3892 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz); 3893 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC); 3894 if (!error_buff->buff) { 3895 kfree(error_buff); 3896 return; 3897 } 3898 3899 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len, 3900 DMA_FROM_DEVICE); 3901 if (dma_mapping_error(dev, error_buff->dma)) { 3902 netdev_err(netdev, "Couldn't map error buffer\n"); 3903 kfree(error_buff->buff); 3904 kfree(error_buff); 3905 return; 3906 } 3907 3908 error_buff->len = detail_len; 3909 error_buff->error_id = err_crq->error_indication.error_id; 3910 3911 spin_lock_irqsave(&adapter->error_list_lock, flags); 3912 list_add_tail(&error_buff->list, &adapter->errors); 3913 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 3914 3915 memset(&crq, 0, sizeof(crq)); 3916 crq.request_error_info.first = IBMVNIC_CRQ_CMD; 3917 crq.request_error_info.cmd = REQUEST_ERROR_INFO; 3918 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); 3919 crq.request_error_info.len = cpu_to_be32(detail_len); 3920 crq.request_error_info.error_id = err_crq->error_indication.error_id; 3921 3922 rc = ibmvnic_send_crq(adapter, &crq); 3923 if (rc) { 3924 netdev_err(netdev, "failed to request error information\n"); 3925 goto err_info_fail; 3926 } 3927 3928 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3929 netdev_err(netdev, "timeout waiting for error information\n"); 3930 goto err_info_fail; 3931 } 3932 3933 return; 3934 3935 err_info_fail: 3936 spin_lock_irqsave(&adapter->error_list_lock, flags); 3937 list_del(&error_buff->list); 3938 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 3939 3940 kfree(error_buff->buff); 3941 kfree(error_buff); 3942 } 3943 3944 static void handle_error_indication(union ibmvnic_crq *crq, 3945 struct ibmvnic_adapter *adapter) 3946 { 3947 struct device *dev = &adapter->vdev->dev; 3948 3949 dev_err(dev, "Firmware reports %serror id %x, cause %d\n", 3950 crq->error_indication.flags 3951 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 3952 be32_to_cpu(crq->error_indication.error_id), 3953 be16_to_cpu(crq->error_indication.error_cause)); 3954 3955 if (be32_to_cpu(crq->error_indication.error_id)) 3956 request_error_information(adapter, crq); 3957 3958 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 3959 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 3960 else 3961 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 3962 } 3963 3964 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 3965 struct ibmvnic_adapter *adapter) 3966 { 3967 struct net_device *netdev = adapter->netdev; 3968 struct device *dev = &adapter->vdev->dev; 3969 long rc; 3970 3971 rc = crq->change_mac_addr_rsp.rc.code; 3972 if (rc) { 3973 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 3974 goto out; 3975 } 3976 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], 3977 ETH_ALEN); 3978 out: 3979 complete(&adapter->fw_done); 3980 return rc; 3981 } 3982 3983 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 3984 struct ibmvnic_adapter *adapter) 3985 { 3986 struct device *dev = &adapter->vdev->dev; 3987 u64 *req_value; 3988 char *name; 3989 3990 atomic_dec(&adapter->running_cap_crqs); 3991 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 3992 case REQ_TX_QUEUES: 3993 req_value = &adapter->req_tx_queues; 3994 name = "tx"; 3995 break; 3996 case REQ_RX_QUEUES: 3997 req_value = &adapter->req_rx_queues; 3998 name = "rx"; 3999 break; 4000 case REQ_RX_ADD_QUEUES: 4001 req_value = &adapter->req_rx_add_queues; 4002 name = "rx_add"; 4003 break; 4004 case REQ_TX_ENTRIES_PER_SUBCRQ: 4005 req_value = &adapter->req_tx_entries_per_subcrq; 4006 name = "tx_entries_per_subcrq"; 4007 break; 4008 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 4009 req_value = &adapter->req_rx_add_entries_per_subcrq; 4010 name = "rx_add_entries_per_subcrq"; 4011 break; 4012 case REQ_MTU: 4013 req_value = &adapter->req_mtu; 4014 name = "mtu"; 4015 break; 4016 case PROMISC_REQUESTED: 4017 req_value = &adapter->promisc; 4018 name = "promisc"; 4019 break; 4020 default: 4021 dev_err(dev, "Got invalid cap request rsp %d\n", 4022 crq->request_capability.capability); 4023 return; 4024 } 4025 4026 switch (crq->request_capability_rsp.rc.code) { 4027 case SUCCESS: 4028 break; 4029 case PARTIALSUCCESS: 4030 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 4031 *req_value, 4032 (long int)be64_to_cpu(crq->request_capability_rsp. 4033 number), name); 4034 4035 if (be16_to_cpu(crq->request_capability_rsp.capability) == 4036 REQ_MTU) { 4037 pr_err("mtu of %llu is not supported. Reverting.\n", 4038 *req_value); 4039 *req_value = adapter->fallback.mtu; 4040 } else { 4041 *req_value = 4042 be64_to_cpu(crq->request_capability_rsp.number); 4043 } 4044 4045 ibmvnic_send_req_caps(adapter, 1); 4046 return; 4047 default: 4048 dev_err(dev, "Error %d in request cap rsp\n", 4049 crq->request_capability_rsp.rc.code); 4050 return; 4051 } 4052 4053 /* Done receiving requested capabilities, query IP offload support */ 4054 if (atomic_read(&adapter->running_cap_crqs) == 0) { 4055 union ibmvnic_crq newcrq; 4056 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 4057 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = 4058 &adapter->ip_offload_buf; 4059 4060 adapter->wait_capability = false; 4061 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, 4062 buf_sz, 4063 DMA_FROM_DEVICE); 4064 4065 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 4066 if (!firmware_has_feature(FW_FEATURE_CMO)) 4067 dev_err(dev, "Couldn't map offload buffer\n"); 4068 return; 4069 } 4070 4071 memset(&newcrq, 0, sizeof(newcrq)); 4072 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 4073 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 4074 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz); 4075 newcrq.query_ip_offload.ioba = 4076 cpu_to_be32(adapter->ip_offload_tok); 4077 4078 ibmvnic_send_crq(adapter, &newcrq); 4079 } 4080 } 4081 4082 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 4083 struct ibmvnic_adapter *adapter) 4084 { 4085 struct device *dev = &adapter->vdev->dev; 4086 struct net_device *netdev = adapter->netdev; 4087 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 4088 struct ibmvnic_login_buffer *login = adapter->login_buf; 4089 int i; 4090 4091 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 4092 DMA_TO_DEVICE); 4093 dma_unmap_single(dev, adapter->login_rsp_buf_token, 4094 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 4095 4096 /* If the number of queues requested can't be allocated by the 4097 * server, the login response will return with code 1. We will need 4098 * to resend the login buffer with fewer queues requested. 4099 */ 4100 if (login_rsp_crq->generic.rc.code) { 4101 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 4102 complete(&adapter->init_done); 4103 return 0; 4104 } 4105 4106 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4107 4108 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 4109 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 4110 netdev_dbg(adapter->netdev, "%016lx\n", 4111 ((unsigned long int *)(adapter->login_rsp_buf))[i]); 4112 } 4113 4114 /* Sanity checks */ 4115 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 4116 (be32_to_cpu(login->num_rxcomp_subcrqs) * 4117 adapter->req_rx_add_queues != 4118 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 4119 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 4120 ibmvnic_remove(adapter->vdev); 4121 return -EIO; 4122 } 4123 release_login_buffer(adapter); 4124 complete(&adapter->init_done); 4125 4126 return 0; 4127 } 4128 4129 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 4130 struct ibmvnic_adapter *adapter) 4131 { 4132 struct device *dev = &adapter->vdev->dev; 4133 long rc; 4134 4135 rc = crq->request_unmap_rsp.rc.code; 4136 if (rc) 4137 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 4138 } 4139 4140 static void handle_query_map_rsp(union ibmvnic_crq *crq, 4141 struct ibmvnic_adapter *adapter) 4142 { 4143 struct net_device *netdev = adapter->netdev; 4144 struct device *dev = &adapter->vdev->dev; 4145 long rc; 4146 4147 rc = crq->query_map_rsp.rc.code; 4148 if (rc) { 4149 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 4150 return; 4151 } 4152 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", 4153 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, 4154 crq->query_map_rsp.free_pages); 4155 } 4156 4157 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 4158 struct ibmvnic_adapter *adapter) 4159 { 4160 struct net_device *netdev = adapter->netdev; 4161 struct device *dev = &adapter->vdev->dev; 4162 long rc; 4163 4164 atomic_dec(&adapter->running_cap_crqs); 4165 netdev_dbg(netdev, "Outstanding queries: %d\n", 4166 atomic_read(&adapter->running_cap_crqs)); 4167 rc = crq->query_capability.rc.code; 4168 if (rc) { 4169 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 4170 goto out; 4171 } 4172 4173 switch (be16_to_cpu(crq->query_capability.capability)) { 4174 case MIN_TX_QUEUES: 4175 adapter->min_tx_queues = 4176 be64_to_cpu(crq->query_capability.number); 4177 netdev_dbg(netdev, "min_tx_queues = %lld\n", 4178 adapter->min_tx_queues); 4179 break; 4180 case MIN_RX_QUEUES: 4181 adapter->min_rx_queues = 4182 be64_to_cpu(crq->query_capability.number); 4183 netdev_dbg(netdev, "min_rx_queues = %lld\n", 4184 adapter->min_rx_queues); 4185 break; 4186 case MIN_RX_ADD_QUEUES: 4187 adapter->min_rx_add_queues = 4188 be64_to_cpu(crq->query_capability.number); 4189 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 4190 adapter->min_rx_add_queues); 4191 break; 4192 case MAX_TX_QUEUES: 4193 adapter->max_tx_queues = 4194 be64_to_cpu(crq->query_capability.number); 4195 netdev_dbg(netdev, "max_tx_queues = %lld\n", 4196 adapter->max_tx_queues); 4197 break; 4198 case MAX_RX_QUEUES: 4199 adapter->max_rx_queues = 4200 be64_to_cpu(crq->query_capability.number); 4201 netdev_dbg(netdev, "max_rx_queues = %lld\n", 4202 adapter->max_rx_queues); 4203 break; 4204 case MAX_RX_ADD_QUEUES: 4205 adapter->max_rx_add_queues = 4206 be64_to_cpu(crq->query_capability.number); 4207 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 4208 adapter->max_rx_add_queues); 4209 break; 4210 case MIN_TX_ENTRIES_PER_SUBCRQ: 4211 adapter->min_tx_entries_per_subcrq = 4212 be64_to_cpu(crq->query_capability.number); 4213 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 4214 adapter->min_tx_entries_per_subcrq); 4215 break; 4216 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 4217 adapter->min_rx_add_entries_per_subcrq = 4218 be64_to_cpu(crq->query_capability.number); 4219 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 4220 adapter->min_rx_add_entries_per_subcrq); 4221 break; 4222 case MAX_TX_ENTRIES_PER_SUBCRQ: 4223 adapter->max_tx_entries_per_subcrq = 4224 be64_to_cpu(crq->query_capability.number); 4225 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 4226 adapter->max_tx_entries_per_subcrq); 4227 break; 4228 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 4229 adapter->max_rx_add_entries_per_subcrq = 4230 be64_to_cpu(crq->query_capability.number); 4231 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 4232 adapter->max_rx_add_entries_per_subcrq); 4233 break; 4234 case TCP_IP_OFFLOAD: 4235 adapter->tcp_ip_offload = 4236 be64_to_cpu(crq->query_capability.number); 4237 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 4238 adapter->tcp_ip_offload); 4239 break; 4240 case PROMISC_SUPPORTED: 4241 adapter->promisc_supported = 4242 be64_to_cpu(crq->query_capability.number); 4243 netdev_dbg(netdev, "promisc_supported = %lld\n", 4244 adapter->promisc_supported); 4245 break; 4246 case MIN_MTU: 4247 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 4248 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4249 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 4250 break; 4251 case MAX_MTU: 4252 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 4253 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4254 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 4255 break; 4256 case MAX_MULTICAST_FILTERS: 4257 adapter->max_multicast_filters = 4258 be64_to_cpu(crq->query_capability.number); 4259 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 4260 adapter->max_multicast_filters); 4261 break; 4262 case VLAN_HEADER_INSERTION: 4263 adapter->vlan_header_insertion = 4264 be64_to_cpu(crq->query_capability.number); 4265 if (adapter->vlan_header_insertion) 4266 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 4267 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 4268 adapter->vlan_header_insertion); 4269 break; 4270 case RX_VLAN_HEADER_INSERTION: 4271 adapter->rx_vlan_header_insertion = 4272 be64_to_cpu(crq->query_capability.number); 4273 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 4274 adapter->rx_vlan_header_insertion); 4275 break; 4276 case MAX_TX_SG_ENTRIES: 4277 adapter->max_tx_sg_entries = 4278 be64_to_cpu(crq->query_capability.number); 4279 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 4280 adapter->max_tx_sg_entries); 4281 break; 4282 case RX_SG_SUPPORTED: 4283 adapter->rx_sg_supported = 4284 be64_to_cpu(crq->query_capability.number); 4285 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 4286 adapter->rx_sg_supported); 4287 break; 4288 case OPT_TX_COMP_SUB_QUEUES: 4289 adapter->opt_tx_comp_sub_queues = 4290 be64_to_cpu(crq->query_capability.number); 4291 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 4292 adapter->opt_tx_comp_sub_queues); 4293 break; 4294 case OPT_RX_COMP_QUEUES: 4295 adapter->opt_rx_comp_queues = 4296 be64_to_cpu(crq->query_capability.number); 4297 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 4298 adapter->opt_rx_comp_queues); 4299 break; 4300 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 4301 adapter->opt_rx_bufadd_q_per_rx_comp_q = 4302 be64_to_cpu(crq->query_capability.number); 4303 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 4304 adapter->opt_rx_bufadd_q_per_rx_comp_q); 4305 break; 4306 case OPT_TX_ENTRIES_PER_SUBCRQ: 4307 adapter->opt_tx_entries_per_subcrq = 4308 be64_to_cpu(crq->query_capability.number); 4309 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 4310 adapter->opt_tx_entries_per_subcrq); 4311 break; 4312 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 4313 adapter->opt_rxba_entries_per_subcrq = 4314 be64_to_cpu(crq->query_capability.number); 4315 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 4316 adapter->opt_rxba_entries_per_subcrq); 4317 break; 4318 case TX_RX_DESC_REQ: 4319 adapter->tx_rx_desc_req = crq->query_capability.number; 4320 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 4321 adapter->tx_rx_desc_req); 4322 break; 4323 4324 default: 4325 netdev_err(netdev, "Got invalid cap rsp %d\n", 4326 crq->query_capability.capability); 4327 } 4328 4329 out: 4330 if (atomic_read(&adapter->running_cap_crqs) == 0) { 4331 adapter->wait_capability = false; 4332 ibmvnic_send_req_caps(adapter, 0); 4333 } 4334 } 4335 4336 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 4337 struct ibmvnic_adapter *adapter) 4338 { 4339 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 4340 struct net_device *netdev = adapter->netdev; 4341 struct device *dev = &adapter->vdev->dev; 4342 u64 *u64_crq = (u64 *)crq; 4343 long rc; 4344 4345 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 4346 (unsigned long int)cpu_to_be64(u64_crq[0]), 4347 (unsigned long int)cpu_to_be64(u64_crq[1])); 4348 switch (gen_crq->first) { 4349 case IBMVNIC_CRQ_INIT_RSP: 4350 switch (gen_crq->cmd) { 4351 case IBMVNIC_CRQ_INIT: 4352 dev_info(dev, "Partner initialized\n"); 4353 adapter->from_passive_init = true; 4354 adapter->failover_pending = false; 4355 if (!completion_done(&adapter->init_done)) { 4356 complete(&adapter->init_done); 4357 adapter->init_done_rc = -EIO; 4358 } 4359 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 4360 break; 4361 case IBMVNIC_CRQ_INIT_COMPLETE: 4362 dev_info(dev, "Partner initialization complete\n"); 4363 adapter->crq.active = true; 4364 send_version_xchg(adapter); 4365 break; 4366 default: 4367 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 4368 } 4369 return; 4370 case IBMVNIC_CRQ_XPORT_EVENT: 4371 netif_carrier_off(netdev); 4372 adapter->crq.active = false; 4373 if (adapter->resetting) 4374 adapter->force_reset_recovery = true; 4375 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 4376 dev_info(dev, "Migrated, re-enabling adapter\n"); 4377 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 4378 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 4379 dev_info(dev, "Backing device failover detected\n"); 4380 adapter->failover_pending = true; 4381 } else { 4382 /* The adapter lost the connection */ 4383 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 4384 gen_crq->cmd); 4385 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4386 } 4387 return; 4388 case IBMVNIC_CRQ_CMD_RSP: 4389 break; 4390 default: 4391 dev_err(dev, "Got an invalid msg type 0x%02x\n", 4392 gen_crq->first); 4393 return; 4394 } 4395 4396 switch (gen_crq->cmd) { 4397 case VERSION_EXCHANGE_RSP: 4398 rc = crq->version_exchange_rsp.rc.code; 4399 if (rc) { 4400 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 4401 break; 4402 } 4403 dev_info(dev, "Partner protocol version is %d\n", 4404 crq->version_exchange_rsp.version); 4405 if (be16_to_cpu(crq->version_exchange_rsp.version) < 4406 ibmvnic_version) 4407 ibmvnic_version = 4408 be16_to_cpu(crq->version_exchange_rsp.version); 4409 send_cap_queries(adapter); 4410 break; 4411 case QUERY_CAPABILITY_RSP: 4412 handle_query_cap_rsp(crq, adapter); 4413 break; 4414 case QUERY_MAP_RSP: 4415 handle_query_map_rsp(crq, adapter); 4416 break; 4417 case REQUEST_MAP_RSP: 4418 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 4419 complete(&adapter->fw_done); 4420 break; 4421 case REQUEST_UNMAP_RSP: 4422 handle_request_unmap_rsp(crq, adapter); 4423 break; 4424 case REQUEST_CAPABILITY_RSP: 4425 handle_request_cap_rsp(crq, adapter); 4426 break; 4427 case LOGIN_RSP: 4428 netdev_dbg(netdev, "Got Login Response\n"); 4429 handle_login_rsp(crq, adapter); 4430 break; 4431 case LOGICAL_LINK_STATE_RSP: 4432 netdev_dbg(netdev, 4433 "Got Logical Link State Response, state: %d rc: %d\n", 4434 crq->logical_link_state_rsp.link_state, 4435 crq->logical_link_state_rsp.rc.code); 4436 adapter->logical_link_state = 4437 crq->logical_link_state_rsp.link_state; 4438 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 4439 complete(&adapter->init_done); 4440 break; 4441 case LINK_STATE_INDICATION: 4442 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 4443 adapter->phys_link_state = 4444 crq->link_state_indication.phys_link_state; 4445 adapter->logical_link_state = 4446 crq->link_state_indication.logical_link_state; 4447 break; 4448 case CHANGE_MAC_ADDR_RSP: 4449 netdev_dbg(netdev, "Got MAC address change Response\n"); 4450 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 4451 break; 4452 case ERROR_INDICATION: 4453 netdev_dbg(netdev, "Got Error Indication\n"); 4454 handle_error_indication(crq, adapter); 4455 break; 4456 case REQUEST_ERROR_RSP: 4457 netdev_dbg(netdev, "Got Error Detail Response\n"); 4458 handle_error_info_rsp(crq, adapter); 4459 break; 4460 case REQUEST_STATISTICS_RSP: 4461 netdev_dbg(netdev, "Got Statistics Response\n"); 4462 complete(&adapter->stats_done); 4463 break; 4464 case QUERY_IP_OFFLOAD_RSP: 4465 netdev_dbg(netdev, "Got Query IP offload Response\n"); 4466 handle_query_ip_offload_rsp(adapter); 4467 break; 4468 case MULTICAST_CTRL_RSP: 4469 netdev_dbg(netdev, "Got multicast control Response\n"); 4470 break; 4471 case CONTROL_IP_OFFLOAD_RSP: 4472 netdev_dbg(netdev, "Got Control IP offload Response\n"); 4473 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 4474 sizeof(adapter->ip_offload_ctrl), 4475 DMA_TO_DEVICE); 4476 complete(&adapter->init_done); 4477 break; 4478 case COLLECT_FW_TRACE_RSP: 4479 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 4480 complete(&adapter->fw_done); 4481 break; 4482 case GET_VPD_SIZE_RSP: 4483 handle_vpd_size_rsp(crq, adapter); 4484 break; 4485 case GET_VPD_RSP: 4486 handle_vpd_rsp(crq, adapter); 4487 break; 4488 default: 4489 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 4490 gen_crq->cmd); 4491 } 4492 } 4493 4494 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 4495 { 4496 struct ibmvnic_adapter *adapter = instance; 4497 4498 tasklet_schedule(&adapter->tasklet); 4499 return IRQ_HANDLED; 4500 } 4501 4502 static void ibmvnic_tasklet(void *data) 4503 { 4504 struct ibmvnic_adapter *adapter = data; 4505 struct ibmvnic_crq_queue *queue = &adapter->crq; 4506 union ibmvnic_crq *crq; 4507 unsigned long flags; 4508 bool done = false; 4509 4510 spin_lock_irqsave(&queue->lock, flags); 4511 while (!done) { 4512 /* Pull all the valid messages off the CRQ */ 4513 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 4514 ibmvnic_handle_crq(crq, adapter); 4515 crq->generic.first = 0; 4516 } 4517 4518 /* remain in tasklet until all 4519 * capabilities responses are received 4520 */ 4521 if (!adapter->wait_capability) 4522 done = true; 4523 } 4524 /* if capabilities CRQ's were sent in this tasklet, the following 4525 * tasklet must wait until all responses are received 4526 */ 4527 if (atomic_read(&adapter->running_cap_crqs) != 0) 4528 adapter->wait_capability = true; 4529 spin_unlock_irqrestore(&queue->lock, flags); 4530 } 4531 4532 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 4533 { 4534 struct vio_dev *vdev = adapter->vdev; 4535 int rc; 4536 4537 do { 4538 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 4539 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4540 4541 if (rc) 4542 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 4543 4544 return rc; 4545 } 4546 4547 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 4548 { 4549 struct ibmvnic_crq_queue *crq = &adapter->crq; 4550 struct device *dev = &adapter->vdev->dev; 4551 struct vio_dev *vdev = adapter->vdev; 4552 int rc; 4553 4554 /* Close the CRQ */ 4555 do { 4556 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4557 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4558 4559 /* Clean out the queue */ 4560 memset(crq->msgs, 0, PAGE_SIZE); 4561 crq->cur = 0; 4562 crq->active = false; 4563 4564 /* And re-open it again */ 4565 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 4566 crq->msg_token, PAGE_SIZE); 4567 4568 if (rc == H_CLOSED) 4569 /* Adapter is good, but other end is not ready */ 4570 dev_warn(dev, "Partner adapter not ready\n"); 4571 else if (rc != 0) 4572 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 4573 4574 return rc; 4575 } 4576 4577 static void release_crq_queue(struct ibmvnic_adapter *adapter) 4578 { 4579 struct ibmvnic_crq_queue *crq = &adapter->crq; 4580 struct vio_dev *vdev = adapter->vdev; 4581 long rc; 4582 4583 if (!crq->msgs) 4584 return; 4585 4586 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 4587 free_irq(vdev->irq, adapter); 4588 tasklet_kill(&adapter->tasklet); 4589 do { 4590 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4591 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4592 4593 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 4594 DMA_BIDIRECTIONAL); 4595 free_page((unsigned long)crq->msgs); 4596 crq->msgs = NULL; 4597 crq->active = false; 4598 } 4599 4600 static int init_crq_queue(struct ibmvnic_adapter *adapter) 4601 { 4602 struct ibmvnic_crq_queue *crq = &adapter->crq; 4603 struct device *dev = &adapter->vdev->dev; 4604 struct vio_dev *vdev = adapter->vdev; 4605 int rc, retrc = -ENOMEM; 4606 4607 if (crq->msgs) 4608 return 0; 4609 4610 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 4611 /* Should we allocate more than one page? */ 4612 4613 if (!crq->msgs) 4614 return -ENOMEM; 4615 4616 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 4617 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 4618 DMA_BIDIRECTIONAL); 4619 if (dma_mapping_error(dev, crq->msg_token)) 4620 goto map_failed; 4621 4622 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 4623 crq->msg_token, PAGE_SIZE); 4624 4625 if (rc == H_RESOURCE) 4626 /* maybe kexecing and resource is busy. try a reset */ 4627 rc = ibmvnic_reset_crq(adapter); 4628 retrc = rc; 4629 4630 if (rc == H_CLOSED) { 4631 dev_warn(dev, "Partner adapter not ready\n"); 4632 } else if (rc) { 4633 dev_warn(dev, "Error %d opening adapter\n", rc); 4634 goto reg_crq_failed; 4635 } 4636 4637 retrc = 0; 4638 4639 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet, 4640 (unsigned long)adapter); 4641 4642 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 4643 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, 4644 adapter); 4645 if (rc) { 4646 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 4647 vdev->irq, rc); 4648 goto req_irq_failed; 4649 } 4650 4651 rc = vio_enable_interrupts(vdev); 4652 if (rc) { 4653 dev_err(dev, "Error %d enabling interrupts\n", rc); 4654 goto req_irq_failed; 4655 } 4656 4657 crq->cur = 0; 4658 spin_lock_init(&crq->lock); 4659 4660 return retrc; 4661 4662 req_irq_failed: 4663 tasklet_kill(&adapter->tasklet); 4664 do { 4665 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4666 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4667 reg_crq_failed: 4668 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 4669 map_failed: 4670 free_page((unsigned long)crq->msgs); 4671 crq->msgs = NULL; 4672 return retrc; 4673 } 4674 4675 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) 4676 { 4677 struct device *dev = &adapter->vdev->dev; 4678 unsigned long timeout = msecs_to_jiffies(30000); 4679 u64 old_num_rx_queues, old_num_tx_queues; 4680 int rc; 4681 4682 adapter->from_passive_init = false; 4683 4684 old_num_rx_queues = adapter->req_rx_queues; 4685 old_num_tx_queues = adapter->req_tx_queues; 4686 4687 init_completion(&adapter->init_done); 4688 adapter->init_done_rc = 0; 4689 ibmvnic_send_crq_init(adapter); 4690 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4691 dev_err(dev, "Initialization sequence timed out\n"); 4692 return -1; 4693 } 4694 4695 if (adapter->init_done_rc) { 4696 release_crq_queue(adapter); 4697 return adapter->init_done_rc; 4698 } 4699 4700 if (adapter->from_passive_init) { 4701 adapter->state = VNIC_OPEN; 4702 adapter->from_passive_init = false; 4703 return -1; 4704 } 4705 4706 if (adapter->resetting && !adapter->wait_for_reset && 4707 adapter->reset_reason != VNIC_RESET_MOBILITY) { 4708 if (adapter->req_rx_queues != old_num_rx_queues || 4709 adapter->req_tx_queues != old_num_tx_queues) { 4710 release_sub_crqs(adapter, 0); 4711 rc = init_sub_crqs(adapter); 4712 } else { 4713 rc = reset_sub_crq_queues(adapter); 4714 } 4715 } else { 4716 rc = init_sub_crqs(adapter); 4717 } 4718 4719 if (rc) { 4720 dev_err(dev, "Initialization of sub crqs failed\n"); 4721 release_crq_queue(adapter); 4722 return rc; 4723 } 4724 4725 rc = init_sub_crq_irqs(adapter); 4726 if (rc) { 4727 dev_err(dev, "Failed to initialize sub crq irqs\n"); 4728 release_crq_queue(adapter); 4729 } 4730 4731 return rc; 4732 } 4733 4734 static int ibmvnic_init(struct ibmvnic_adapter *adapter) 4735 { 4736 struct device *dev = &adapter->vdev->dev; 4737 unsigned long timeout = msecs_to_jiffies(30000); 4738 int rc; 4739 4740 adapter->from_passive_init = false; 4741 4742 init_completion(&adapter->init_done); 4743 adapter->init_done_rc = 0; 4744 ibmvnic_send_crq_init(adapter); 4745 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4746 dev_err(dev, "Initialization sequence timed out\n"); 4747 return -1; 4748 } 4749 4750 if (adapter->init_done_rc) { 4751 release_crq_queue(adapter); 4752 return adapter->init_done_rc; 4753 } 4754 4755 if (adapter->from_passive_init) { 4756 adapter->state = VNIC_OPEN; 4757 adapter->from_passive_init = false; 4758 return -1; 4759 } 4760 4761 rc = init_sub_crqs(adapter); 4762 if (rc) { 4763 dev_err(dev, "Initialization of sub crqs failed\n"); 4764 release_crq_queue(adapter); 4765 return rc; 4766 } 4767 4768 rc = init_sub_crq_irqs(adapter); 4769 if (rc) { 4770 dev_err(dev, "Failed to initialize sub crq irqs\n"); 4771 release_crq_queue(adapter); 4772 } 4773 4774 return rc; 4775 } 4776 4777 static struct device_attribute dev_attr_failover; 4778 4779 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 4780 { 4781 struct ibmvnic_adapter *adapter; 4782 struct net_device *netdev; 4783 unsigned char *mac_addr_p; 4784 int rc; 4785 4786 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 4787 dev->unit_address); 4788 4789 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 4790 VETH_MAC_ADDR, NULL); 4791 if (!mac_addr_p) { 4792 dev_err(&dev->dev, 4793 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 4794 __FILE__, __LINE__); 4795 return 0; 4796 } 4797 4798 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 4799 IBMVNIC_MAX_QUEUES); 4800 if (!netdev) 4801 return -ENOMEM; 4802 4803 adapter = netdev_priv(netdev); 4804 adapter->state = VNIC_PROBING; 4805 dev_set_drvdata(&dev->dev, netdev); 4806 adapter->vdev = dev; 4807 adapter->netdev = netdev; 4808 4809 ether_addr_copy(adapter->mac_addr, mac_addr_p); 4810 ether_addr_copy(netdev->dev_addr, adapter->mac_addr); 4811 netdev->irq = dev->irq; 4812 netdev->netdev_ops = &ibmvnic_netdev_ops; 4813 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 4814 SET_NETDEV_DEV(netdev, &dev->dev); 4815 4816 spin_lock_init(&adapter->stats_lock); 4817 4818 INIT_LIST_HEAD(&adapter->errors); 4819 spin_lock_init(&adapter->error_list_lock); 4820 4821 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4822 INIT_LIST_HEAD(&adapter->rwi_list); 4823 mutex_init(&adapter->reset_lock); 4824 mutex_init(&adapter->rwi_lock); 4825 adapter->resetting = false; 4826 4827 adapter->mac_change_pending = false; 4828 4829 do { 4830 rc = init_crq_queue(adapter); 4831 if (rc) { 4832 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 4833 rc); 4834 goto ibmvnic_init_fail; 4835 } 4836 4837 rc = ibmvnic_init(adapter); 4838 if (rc && rc != EAGAIN) 4839 goto ibmvnic_init_fail; 4840 } while (rc == EAGAIN); 4841 4842 rc = init_stats_buffers(adapter); 4843 if (rc) 4844 goto ibmvnic_init_fail; 4845 4846 rc = init_stats_token(adapter); 4847 if (rc) 4848 goto ibmvnic_stats_fail; 4849 4850 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4851 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4852 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4853 4854 rc = device_create_file(&dev->dev, &dev_attr_failover); 4855 if (rc) 4856 goto ibmvnic_dev_file_err; 4857 4858 netif_carrier_off(netdev); 4859 rc = register_netdev(netdev); 4860 if (rc) { 4861 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 4862 goto ibmvnic_register_fail; 4863 } 4864 dev_info(&dev->dev, "ibmvnic registered\n"); 4865 4866 adapter->state = VNIC_PROBED; 4867 4868 adapter->wait_for_reset = false; 4869 4870 return 0; 4871 4872 ibmvnic_register_fail: 4873 device_remove_file(&dev->dev, &dev_attr_failover); 4874 4875 ibmvnic_dev_file_err: 4876 release_stats_token(adapter); 4877 4878 ibmvnic_stats_fail: 4879 release_stats_buffers(adapter); 4880 4881 ibmvnic_init_fail: 4882 release_sub_crqs(adapter, 1); 4883 release_crq_queue(adapter); 4884 free_netdev(netdev); 4885 4886 return rc; 4887 } 4888 4889 static int ibmvnic_remove(struct vio_dev *dev) 4890 { 4891 struct net_device *netdev = dev_get_drvdata(&dev->dev); 4892 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4893 4894 adapter->state = VNIC_REMOVING; 4895 unregister_netdev(netdev); 4896 mutex_lock(&adapter->reset_lock); 4897 4898 release_resources(adapter); 4899 release_sub_crqs(adapter, 1); 4900 release_crq_queue(adapter); 4901 4902 release_stats_token(adapter); 4903 release_stats_buffers(adapter); 4904 4905 adapter->state = VNIC_REMOVED; 4906 4907 mutex_unlock(&adapter->reset_lock); 4908 device_remove_file(&dev->dev, &dev_attr_failover); 4909 free_netdev(netdev); 4910 dev_set_drvdata(&dev->dev, NULL); 4911 4912 return 0; 4913 } 4914 4915 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 4916 const char *buf, size_t count) 4917 { 4918 struct net_device *netdev = dev_get_drvdata(dev); 4919 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4920 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 4921 __be64 session_token; 4922 long rc; 4923 4924 if (!sysfs_streq(buf, "1")) 4925 return -EINVAL; 4926 4927 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 4928 H_GET_SESSION_TOKEN, 0, 0, 0); 4929 if (rc) { 4930 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 4931 rc); 4932 return -EINVAL; 4933 } 4934 4935 session_token = (__be64)retbuf[0]; 4936 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 4937 be64_to_cpu(session_token)); 4938 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4939 H_SESSION_ERR_DETECTED, session_token, 0, 0); 4940 if (rc) { 4941 netdev_err(netdev, "Client initiated failover failed, rc %ld\n", 4942 rc); 4943 return -EINVAL; 4944 } 4945 4946 return count; 4947 } 4948 4949 static DEVICE_ATTR_WO(failover); 4950 4951 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 4952 { 4953 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 4954 struct ibmvnic_adapter *adapter; 4955 struct iommu_table *tbl; 4956 unsigned long ret = 0; 4957 int i; 4958 4959 tbl = get_iommu_table_base(&vdev->dev); 4960 4961 /* netdev inits at probe time along with the structures we need below*/ 4962 if (!netdev) 4963 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 4964 4965 adapter = netdev_priv(netdev); 4966 4967 ret += PAGE_SIZE; /* the crq message queue */ 4968 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 4969 4970 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 4971 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 4972 4973 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 4974 i++) 4975 ret += adapter->rx_pool[i].size * 4976 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 4977 4978 return ret; 4979 } 4980 4981 static int ibmvnic_resume(struct device *dev) 4982 { 4983 struct net_device *netdev = dev_get_drvdata(dev); 4984 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4985 4986 if (adapter->state != VNIC_OPEN) 4987 return 0; 4988 4989 tasklet_schedule(&adapter->tasklet); 4990 4991 return 0; 4992 } 4993 4994 static const struct vio_device_id ibmvnic_device_table[] = { 4995 {"network", "IBM,vnic"}, 4996 {"", "" } 4997 }; 4998 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 4999 5000 static const struct dev_pm_ops ibmvnic_pm_ops = { 5001 .resume = ibmvnic_resume 5002 }; 5003 5004 static struct vio_driver ibmvnic_driver = { 5005 .id_table = ibmvnic_device_table, 5006 .probe = ibmvnic_probe, 5007 .remove = ibmvnic_remove, 5008 .get_desired_dma = ibmvnic_get_desired_dma, 5009 .name = ibmvnic_driver_name, 5010 .pm = &ibmvnic_pm_ops, 5011 }; 5012 5013 /* module functions */ 5014 static int __init ibmvnic_module_init(void) 5015 { 5016 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 5017 IBMVNIC_DRIVER_VERSION); 5018 5019 return vio_register_driver(&ibmvnic_driver); 5020 } 5021 5022 static void __exit ibmvnic_module_exit(void) 5023 { 5024 vio_unregister_driver(&ibmvnic_driver); 5025 } 5026 5027 module_init(ibmvnic_module_init); 5028 module_exit(ibmvnic_module_exit); 5029