1 /**************************************************************************/ 2 /* */ 3 /* IBM System i and System p Virtual NIC Device Driver */ 4 /* Copyright (C) 2014 IBM Corp. */ 5 /* Santiago Leon (santi_leon@yahoo.com) */ 6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 7 /* John Allen (jallen@linux.vnet.ibm.com) */ 8 /* */ 9 /* This program is free software; you can redistribute it and/or modify */ 10 /* it under the terms of the GNU General Public License as published by */ 11 /* the Free Software Foundation; either version 2 of the License, or */ 12 /* (at your option) any later version. */ 13 /* */ 14 /* This program is distributed in the hope that it will be useful, */ 15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ 16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ 17 /* GNU General Public License for more details. */ 18 /* */ 19 /* You should have received a copy of the GNU General Public License */ 20 /* along with this program. */ 21 /* */ 22 /* This module contains the implementation of a virtual ethernet device */ 23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 24 /* option of the RS/6000 Platform Architecture to interface with virtual */ 25 /* ethernet NICs that are presented to the partition by the hypervisor. */ 26 /* */ 27 /* Messages are passed between the VNIC driver and the VNIC server using */ 28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 29 /* issue and receive commands that initiate communication with the server */ 30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 31 /* are used by the driver to notify the server that a packet is */ 32 /* ready for transmission or that a buffer has been added to receive a */ 33 /* packet. Subsequently, sCRQs are used by the server to notify the */ 34 /* driver that a packet transmission has been completed or that a packet */ 35 /* has been received and placed in a waiting buffer. */ 36 /* */ 37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 38 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 39 /* or receive has been completed, the VNIC driver is required to use */ 40 /* "long term mapping". This entails that large, continuous DMA mapped */ 41 /* buffers are allocated on driver initialization and these buffers are */ 42 /* then continuously reused to pass skbs to and from the VNIC server. */ 43 /* */ 44 /**************************************************************************/ 45 46 #include <linux/module.h> 47 #include <linux/moduleparam.h> 48 #include <linux/types.h> 49 #include <linux/errno.h> 50 #include <linux/completion.h> 51 #include <linux/ioport.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/kernel.h> 54 #include <linux/netdevice.h> 55 #include <linux/etherdevice.h> 56 #include <linux/skbuff.h> 57 #include <linux/init.h> 58 #include <linux/delay.h> 59 #include <linux/mm.h> 60 #include <linux/ethtool.h> 61 #include <linux/proc_fs.h> 62 #include <linux/if_arp.h> 63 #include <linux/in.h> 64 #include <linux/ip.h> 65 #include <linux/ipv6.h> 66 #include <linux/irq.h> 67 #include <linux/kthread.h> 68 #include <linux/seq_file.h> 69 #include <linux/interrupt.h> 70 #include <net/net_namespace.h> 71 #include <asm/hvcall.h> 72 #include <linux/atomic.h> 73 #include <asm/vio.h> 74 #include <asm/iommu.h> 75 #include <linux/uaccess.h> 76 #include <asm/firmware.h> 77 #include <linux/workqueue.h> 78 #include <linux/if_vlan.h> 79 #include <linux/utsname.h> 80 81 #include "ibmvnic.h" 82 83 static const char ibmvnic_driver_name[] = "ibmvnic"; 84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 85 86 MODULE_AUTHOR("Santiago Leon"); 87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 88 MODULE_LICENSE("GPL"); 89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 90 91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 92 static int ibmvnic_remove(struct vio_dev *); 93 static void release_sub_crqs(struct ibmvnic_adapter *, bool); 94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 99 union sub_crq *sub_crq); 100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 102 static int enable_scrq_irq(struct ibmvnic_adapter *, 103 struct ibmvnic_sub_crq_queue *); 104 static int disable_scrq_irq(struct ibmvnic_adapter *, 105 struct ibmvnic_sub_crq_queue *); 106 static int pending_scrq(struct ibmvnic_adapter *, 107 struct ibmvnic_sub_crq_queue *); 108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 109 struct ibmvnic_sub_crq_queue *); 110 static int ibmvnic_poll(struct napi_struct *napi, int data); 111 static void send_map_query(struct ibmvnic_adapter *adapter); 112 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); 113 static void send_request_unmap(struct ibmvnic_adapter *, u8); 114 static int send_login(struct ibmvnic_adapter *adapter); 115 static void send_cap_queries(struct ibmvnic_adapter *adapter); 116 static int init_sub_crqs(struct ibmvnic_adapter *); 117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 118 static int ibmvnic_init(struct ibmvnic_adapter *); 119 static void release_crq_queue(struct ibmvnic_adapter *); 120 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); 121 static int init_crq_queue(struct ibmvnic_adapter *adapter); 122 123 struct ibmvnic_stat { 124 char name[ETH_GSTRING_LEN]; 125 int offset; 126 }; 127 128 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 129 offsetof(struct ibmvnic_statistics, stat)) 130 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) 131 132 static const struct ibmvnic_stat ibmvnic_stats[] = { 133 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 134 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 135 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 136 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 137 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 138 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 139 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 140 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 141 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 142 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 143 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 144 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 145 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 146 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 147 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 148 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 149 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 150 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 151 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 152 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 153 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 154 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 155 }; 156 157 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 158 unsigned long length, unsigned long *number, 159 unsigned long *irq) 160 { 161 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 162 long rc; 163 164 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 165 *number = retbuf[0]; 166 *irq = retbuf[1]; 167 168 return rc; 169 } 170 171 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 172 struct ibmvnic_long_term_buff *ltb, int size) 173 { 174 struct device *dev = &adapter->vdev->dev; 175 176 ltb->size = size; 177 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, 178 GFP_KERNEL); 179 180 if (!ltb->buff) { 181 dev_err(dev, "Couldn't alloc long term buffer\n"); 182 return -ENOMEM; 183 } 184 ltb->map_id = adapter->map_id; 185 adapter->map_id++; 186 187 init_completion(&adapter->fw_done); 188 send_request_map(adapter, ltb->addr, 189 ltb->size, ltb->map_id); 190 wait_for_completion(&adapter->fw_done); 191 192 if (adapter->fw_done_rc) { 193 dev_err(dev, "Couldn't map long term buffer,rc = %d\n", 194 adapter->fw_done_rc); 195 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 196 return -1; 197 } 198 return 0; 199 } 200 201 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 202 struct ibmvnic_long_term_buff *ltb) 203 { 204 struct device *dev = &adapter->vdev->dev; 205 206 if (!ltb->buff) 207 return; 208 209 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 210 adapter->reset_reason != VNIC_RESET_MOBILITY) 211 send_request_unmap(adapter, ltb->map_id); 212 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 213 } 214 215 static int reset_long_term_buff(struct ibmvnic_adapter *adapter, 216 struct ibmvnic_long_term_buff *ltb) 217 { 218 memset(ltb->buff, 0, ltb->size); 219 220 init_completion(&adapter->fw_done); 221 send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 222 wait_for_completion(&adapter->fw_done); 223 224 if (adapter->fw_done_rc) { 225 dev_info(&adapter->vdev->dev, 226 "Reset failed, attempting to free and reallocate buffer\n"); 227 free_long_term_buff(adapter, ltb); 228 return alloc_long_term_buff(adapter, ltb, ltb->size); 229 } 230 return 0; 231 } 232 233 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 234 { 235 int i; 236 237 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 238 i++) 239 adapter->rx_pool[i].active = 0; 240 } 241 242 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 243 struct ibmvnic_rx_pool *pool) 244 { 245 int count = pool->size - atomic_read(&pool->available); 246 struct device *dev = &adapter->vdev->dev; 247 int buffers_added = 0; 248 unsigned long lpar_rc; 249 union sub_crq sub_crq; 250 struct sk_buff *skb; 251 unsigned int offset; 252 dma_addr_t dma_addr; 253 unsigned char *dst; 254 u64 *handle_array; 255 int shift = 0; 256 int index; 257 int i; 258 259 if (!pool->active) 260 return; 261 262 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 263 be32_to_cpu(adapter->login_rsp_buf-> 264 off_rxadd_subcrqs)); 265 266 for (i = 0; i < count; ++i) { 267 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 268 if (!skb) { 269 dev_err(dev, "Couldn't replenish rx buff\n"); 270 adapter->replenish_no_mem++; 271 break; 272 } 273 274 index = pool->free_map[pool->next_free]; 275 276 if (pool->rx_buff[index].skb) 277 dev_err(dev, "Inconsistent free_map!\n"); 278 279 /* Copy the skb to the long term mapped DMA buffer */ 280 offset = index * pool->buff_size; 281 dst = pool->long_term_buff.buff + offset; 282 memset(dst, 0, pool->buff_size); 283 dma_addr = pool->long_term_buff.addr + offset; 284 pool->rx_buff[index].data = dst; 285 286 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 287 pool->rx_buff[index].dma = dma_addr; 288 pool->rx_buff[index].skb = skb; 289 pool->rx_buff[index].pool_index = pool->index; 290 pool->rx_buff[index].size = pool->buff_size; 291 292 memset(&sub_crq, 0, sizeof(sub_crq)); 293 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD; 294 sub_crq.rx_add.correlator = 295 cpu_to_be64((u64)&pool->rx_buff[index]); 296 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr); 297 sub_crq.rx_add.map_id = pool->long_term_buff.map_id; 298 299 /* The length field of the sCRQ is defined to be 24 bits so the 300 * buffer size needs to be left shifted by a byte before it is 301 * converted to big endian to prevent the last byte from being 302 * truncated. 303 */ 304 #ifdef __LITTLE_ENDIAN__ 305 shift = 8; 306 #endif 307 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift); 308 309 lpar_rc = send_subcrq(adapter, handle_array[pool->index], 310 &sub_crq); 311 if (lpar_rc != H_SUCCESS) 312 goto failure; 313 314 buffers_added++; 315 adapter->replenish_add_buff_success++; 316 pool->next_free = (pool->next_free + 1) % pool->size; 317 } 318 atomic_add(buffers_added, &pool->available); 319 return; 320 321 failure: 322 dev_info(dev, "replenish pools failure\n"); 323 pool->free_map[pool->next_free] = index; 324 pool->rx_buff[index].skb = NULL; 325 326 dev_kfree_skb_any(skb); 327 adapter->replenish_add_buff_failure++; 328 atomic_add(buffers_added, &pool->available); 329 330 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 331 /* Disable buffer pool replenishment and report carrier off if 332 * queue is closed or pending failover. 333 * Firmware guarantees that a signal will be sent to the 334 * driver, triggering a reset. 335 */ 336 deactivate_rx_pools(adapter); 337 netif_carrier_off(adapter->netdev); 338 } 339 } 340 341 static void replenish_pools(struct ibmvnic_adapter *adapter) 342 { 343 int i; 344 345 adapter->replenish_task_cycles++; 346 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 347 i++) { 348 if (adapter->rx_pool[i].active) 349 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 350 } 351 } 352 353 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 354 { 355 kfree(adapter->tx_stats_buffers); 356 kfree(adapter->rx_stats_buffers); 357 adapter->tx_stats_buffers = NULL; 358 adapter->rx_stats_buffers = NULL; 359 } 360 361 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 362 { 363 adapter->tx_stats_buffers = 364 kcalloc(IBMVNIC_MAX_QUEUES, 365 sizeof(struct ibmvnic_tx_queue_stats), 366 GFP_KERNEL); 367 if (!adapter->tx_stats_buffers) 368 return -ENOMEM; 369 370 adapter->rx_stats_buffers = 371 kcalloc(IBMVNIC_MAX_QUEUES, 372 sizeof(struct ibmvnic_rx_queue_stats), 373 GFP_KERNEL); 374 if (!adapter->rx_stats_buffers) 375 return -ENOMEM; 376 377 return 0; 378 } 379 380 static void release_stats_token(struct ibmvnic_adapter *adapter) 381 { 382 struct device *dev = &adapter->vdev->dev; 383 384 if (!adapter->stats_token) 385 return; 386 387 dma_unmap_single(dev, adapter->stats_token, 388 sizeof(struct ibmvnic_statistics), 389 DMA_FROM_DEVICE); 390 adapter->stats_token = 0; 391 } 392 393 static int init_stats_token(struct ibmvnic_adapter *adapter) 394 { 395 struct device *dev = &adapter->vdev->dev; 396 dma_addr_t stok; 397 398 stok = dma_map_single(dev, &adapter->stats, 399 sizeof(struct ibmvnic_statistics), 400 DMA_FROM_DEVICE); 401 if (dma_mapping_error(dev, stok)) { 402 dev_err(dev, "Couldn't map stats buffer\n"); 403 return -1; 404 } 405 406 adapter->stats_token = stok; 407 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 408 return 0; 409 } 410 411 static int reset_rx_pools(struct ibmvnic_adapter *adapter) 412 { 413 struct ibmvnic_rx_pool *rx_pool; 414 int rx_scrqs; 415 int i, j, rc; 416 u64 *size_array; 417 418 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 419 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 420 421 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 422 for (i = 0; i < rx_scrqs; i++) { 423 rx_pool = &adapter->rx_pool[i]; 424 425 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 426 427 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { 428 free_long_term_buff(adapter, &rx_pool->long_term_buff); 429 rx_pool->buff_size = be64_to_cpu(size_array[i]); 430 alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 431 rx_pool->size * 432 rx_pool->buff_size); 433 } else { 434 rc = reset_long_term_buff(adapter, 435 &rx_pool->long_term_buff); 436 } 437 438 if (rc) 439 return rc; 440 441 for (j = 0; j < rx_pool->size; j++) 442 rx_pool->free_map[j] = j; 443 444 memset(rx_pool->rx_buff, 0, 445 rx_pool->size * sizeof(struct ibmvnic_rx_buff)); 446 447 atomic_set(&rx_pool->available, 0); 448 rx_pool->next_alloc = 0; 449 rx_pool->next_free = 0; 450 rx_pool->active = 1; 451 } 452 453 return 0; 454 } 455 456 static void release_rx_pools(struct ibmvnic_adapter *adapter) 457 { 458 struct ibmvnic_rx_pool *rx_pool; 459 int i, j; 460 461 if (!adapter->rx_pool) 462 return; 463 464 for (i = 0; i < adapter->num_active_rx_pools; i++) { 465 rx_pool = &adapter->rx_pool[i]; 466 467 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 468 469 kfree(rx_pool->free_map); 470 free_long_term_buff(adapter, &rx_pool->long_term_buff); 471 472 if (!rx_pool->rx_buff) 473 continue; 474 475 for (j = 0; j < rx_pool->size; j++) { 476 if (rx_pool->rx_buff[j].skb) { 477 dev_kfree_skb_any(rx_pool->rx_buff[i].skb); 478 rx_pool->rx_buff[i].skb = NULL; 479 } 480 } 481 482 kfree(rx_pool->rx_buff); 483 } 484 485 kfree(adapter->rx_pool); 486 adapter->rx_pool = NULL; 487 adapter->num_active_rx_pools = 0; 488 } 489 490 static int init_rx_pools(struct net_device *netdev) 491 { 492 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 493 struct device *dev = &adapter->vdev->dev; 494 struct ibmvnic_rx_pool *rx_pool; 495 int rxadd_subcrqs; 496 u64 *size_array; 497 int i, j; 498 499 rxadd_subcrqs = 500 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 501 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 502 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 503 504 adapter->rx_pool = kcalloc(rxadd_subcrqs, 505 sizeof(struct ibmvnic_rx_pool), 506 GFP_KERNEL); 507 if (!adapter->rx_pool) { 508 dev_err(dev, "Failed to allocate rx pools\n"); 509 return -1; 510 } 511 512 adapter->num_active_rx_pools = rxadd_subcrqs; 513 514 for (i = 0; i < rxadd_subcrqs; i++) { 515 rx_pool = &adapter->rx_pool[i]; 516 517 netdev_dbg(adapter->netdev, 518 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 519 i, adapter->req_rx_add_entries_per_subcrq, 520 be64_to_cpu(size_array[i])); 521 522 rx_pool->size = adapter->req_rx_add_entries_per_subcrq; 523 rx_pool->index = i; 524 rx_pool->buff_size = be64_to_cpu(size_array[i]); 525 rx_pool->active = 1; 526 527 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 528 GFP_KERNEL); 529 if (!rx_pool->free_map) { 530 release_rx_pools(adapter); 531 return -1; 532 } 533 534 rx_pool->rx_buff = kcalloc(rx_pool->size, 535 sizeof(struct ibmvnic_rx_buff), 536 GFP_KERNEL); 537 if (!rx_pool->rx_buff) { 538 dev_err(dev, "Couldn't alloc rx buffers\n"); 539 release_rx_pools(adapter); 540 return -1; 541 } 542 543 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 544 rx_pool->size * rx_pool->buff_size)) { 545 release_rx_pools(adapter); 546 return -1; 547 } 548 549 for (j = 0; j < rx_pool->size; ++j) 550 rx_pool->free_map[j] = j; 551 552 atomic_set(&rx_pool->available, 0); 553 rx_pool->next_alloc = 0; 554 rx_pool->next_free = 0; 555 } 556 557 return 0; 558 } 559 560 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, 561 struct ibmvnic_tx_pool *tx_pool) 562 { 563 int rc, i; 564 565 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); 566 if (rc) 567 return rc; 568 569 memset(tx_pool->tx_buff, 0, 570 tx_pool->num_buffers * 571 sizeof(struct ibmvnic_tx_buff)); 572 573 for (i = 0; i < tx_pool->num_buffers; i++) 574 tx_pool->free_map[i] = i; 575 576 tx_pool->consumer_index = 0; 577 tx_pool->producer_index = 0; 578 579 return 0; 580 } 581 582 static int reset_tx_pools(struct ibmvnic_adapter *adapter) 583 { 584 int tx_scrqs; 585 int i, rc; 586 587 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 588 for (i = 0; i < tx_scrqs; i++) { 589 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); 590 if (rc) 591 return rc; 592 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); 593 if (rc) 594 return rc; 595 } 596 597 return 0; 598 } 599 600 static void release_vpd_data(struct ibmvnic_adapter *adapter) 601 { 602 if (!adapter->vpd) 603 return; 604 605 kfree(adapter->vpd->buff); 606 kfree(adapter->vpd); 607 608 adapter->vpd = NULL; 609 } 610 611 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, 612 struct ibmvnic_tx_pool *tx_pool) 613 { 614 kfree(tx_pool->tx_buff); 615 kfree(tx_pool->free_map); 616 free_long_term_buff(adapter, &tx_pool->long_term_buff); 617 } 618 619 static void release_tx_pools(struct ibmvnic_adapter *adapter) 620 { 621 int i; 622 623 if (!adapter->tx_pool) 624 return; 625 626 for (i = 0; i < adapter->num_active_tx_pools; i++) { 627 release_one_tx_pool(adapter, &adapter->tx_pool[i]); 628 release_one_tx_pool(adapter, &adapter->tso_pool[i]); 629 } 630 631 kfree(adapter->tx_pool); 632 adapter->tx_pool = NULL; 633 kfree(adapter->tso_pool); 634 adapter->tso_pool = NULL; 635 adapter->num_active_tx_pools = 0; 636 } 637 638 static int init_one_tx_pool(struct net_device *netdev, 639 struct ibmvnic_tx_pool *tx_pool, 640 int num_entries, int buf_size) 641 { 642 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 643 int i; 644 645 tx_pool->tx_buff = kcalloc(num_entries, 646 sizeof(struct ibmvnic_tx_buff), 647 GFP_KERNEL); 648 if (!tx_pool->tx_buff) 649 return -1; 650 651 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 652 num_entries * buf_size)) 653 return -1; 654 655 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); 656 if (!tx_pool->free_map) 657 return -1; 658 659 for (i = 0; i < num_entries; i++) 660 tx_pool->free_map[i] = i; 661 662 tx_pool->consumer_index = 0; 663 tx_pool->producer_index = 0; 664 tx_pool->num_buffers = num_entries; 665 tx_pool->buf_size = buf_size; 666 667 return 0; 668 } 669 670 static int init_tx_pools(struct net_device *netdev) 671 { 672 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 673 int tx_subcrqs; 674 int i, rc; 675 676 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 677 adapter->tx_pool = kcalloc(tx_subcrqs, 678 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 679 if (!adapter->tx_pool) 680 return -1; 681 682 adapter->tso_pool = kcalloc(tx_subcrqs, 683 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 684 if (!adapter->tso_pool) 685 return -1; 686 687 adapter->num_active_tx_pools = tx_subcrqs; 688 689 for (i = 0; i < tx_subcrqs; i++) { 690 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], 691 adapter->req_tx_entries_per_subcrq, 692 adapter->req_mtu + VLAN_HLEN); 693 if (rc) { 694 release_tx_pools(adapter); 695 return rc; 696 } 697 698 init_one_tx_pool(netdev, &adapter->tso_pool[i], 699 IBMVNIC_TSO_BUFS, 700 IBMVNIC_TSO_BUF_SZ); 701 if (rc) { 702 release_tx_pools(adapter); 703 return rc; 704 } 705 } 706 707 return 0; 708 } 709 710 static void release_error_buffers(struct ibmvnic_adapter *adapter) 711 { 712 struct device *dev = &adapter->vdev->dev; 713 struct ibmvnic_error_buff *error_buff, *tmp; 714 unsigned long flags; 715 716 spin_lock_irqsave(&adapter->error_list_lock, flags); 717 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) { 718 list_del(&error_buff->list); 719 dma_unmap_single(dev, error_buff->dma, error_buff->len, 720 DMA_FROM_DEVICE); 721 kfree(error_buff->buff); 722 kfree(error_buff); 723 } 724 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 725 } 726 727 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 728 { 729 int i; 730 731 if (adapter->napi_enabled) 732 return; 733 734 for (i = 0; i < adapter->req_rx_queues; i++) 735 napi_enable(&adapter->napi[i]); 736 737 adapter->napi_enabled = true; 738 } 739 740 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 741 { 742 int i; 743 744 if (!adapter->napi_enabled) 745 return; 746 747 for (i = 0; i < adapter->req_rx_queues; i++) { 748 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 749 napi_disable(&adapter->napi[i]); 750 } 751 752 adapter->napi_enabled = false; 753 } 754 755 static int init_napi(struct ibmvnic_adapter *adapter) 756 { 757 int i; 758 759 adapter->napi = kcalloc(adapter->req_rx_queues, 760 sizeof(struct napi_struct), GFP_KERNEL); 761 if (!adapter->napi) 762 return -ENOMEM; 763 764 for (i = 0; i < adapter->req_rx_queues; i++) { 765 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); 766 netif_napi_add(adapter->netdev, &adapter->napi[i], 767 ibmvnic_poll, NAPI_POLL_WEIGHT); 768 } 769 770 adapter->num_active_rx_napi = adapter->req_rx_queues; 771 return 0; 772 } 773 774 static void release_napi(struct ibmvnic_adapter *adapter) 775 { 776 int i; 777 778 if (!adapter->napi) 779 return; 780 781 for (i = 0; i < adapter->num_active_rx_napi; i++) { 782 if (&adapter->napi[i]) { 783 netdev_dbg(adapter->netdev, 784 "Releasing napi[%d]\n", i); 785 netif_napi_del(&adapter->napi[i]); 786 } 787 } 788 789 kfree(adapter->napi); 790 adapter->napi = NULL; 791 adapter->num_active_rx_napi = 0; 792 } 793 794 static int ibmvnic_login(struct net_device *netdev) 795 { 796 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 797 unsigned long timeout = msecs_to_jiffies(30000); 798 int retry_count = 0; 799 int rc; 800 801 do { 802 if (retry_count > IBMVNIC_MAX_QUEUES) { 803 netdev_warn(netdev, "Login attempts exceeded\n"); 804 return -1; 805 } 806 807 adapter->init_done_rc = 0; 808 reinit_completion(&adapter->init_done); 809 rc = send_login(adapter); 810 if (rc) { 811 netdev_warn(netdev, "Unable to login\n"); 812 return rc; 813 } 814 815 if (!wait_for_completion_timeout(&adapter->init_done, 816 timeout)) { 817 netdev_warn(netdev, "Login timed out\n"); 818 return -1; 819 } 820 821 if (adapter->init_done_rc == PARTIALSUCCESS) { 822 retry_count++; 823 release_sub_crqs(adapter, 1); 824 825 adapter->init_done_rc = 0; 826 reinit_completion(&adapter->init_done); 827 send_cap_queries(adapter); 828 if (!wait_for_completion_timeout(&adapter->init_done, 829 timeout)) { 830 netdev_warn(netdev, 831 "Capabilities query timed out\n"); 832 return -1; 833 } 834 835 rc = init_sub_crqs(adapter); 836 if (rc) { 837 netdev_warn(netdev, 838 "SCRQ initialization failed\n"); 839 return -1; 840 } 841 842 rc = init_sub_crq_irqs(adapter); 843 if (rc) { 844 netdev_warn(netdev, 845 "SCRQ irq initialization failed\n"); 846 return -1; 847 } 848 } else if (adapter->init_done_rc) { 849 netdev_warn(netdev, "Adapter login failed\n"); 850 return -1; 851 } 852 } while (adapter->init_done_rc == PARTIALSUCCESS); 853 854 /* handle pending MAC address changes after successful login */ 855 if (adapter->mac_change_pending) { 856 __ibmvnic_set_mac(netdev, &adapter->desired.mac); 857 adapter->mac_change_pending = false; 858 } 859 860 return 0; 861 } 862 863 static void release_login_buffer(struct ibmvnic_adapter *adapter) 864 { 865 kfree(adapter->login_buf); 866 adapter->login_buf = NULL; 867 } 868 869 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) 870 { 871 kfree(adapter->login_rsp_buf); 872 adapter->login_rsp_buf = NULL; 873 } 874 875 static void release_resources(struct ibmvnic_adapter *adapter) 876 { 877 release_vpd_data(adapter); 878 879 release_tx_pools(adapter); 880 release_rx_pools(adapter); 881 882 release_error_buffers(adapter); 883 release_napi(adapter); 884 release_login_rsp_buffer(adapter); 885 } 886 887 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 888 { 889 struct net_device *netdev = adapter->netdev; 890 unsigned long timeout = msecs_to_jiffies(30000); 891 union ibmvnic_crq crq; 892 bool resend; 893 int rc; 894 895 netdev_dbg(netdev, "setting link state %d\n", link_state); 896 897 memset(&crq, 0, sizeof(crq)); 898 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 899 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 900 crq.logical_link_state.link_state = link_state; 901 902 do { 903 resend = false; 904 905 reinit_completion(&adapter->init_done); 906 rc = ibmvnic_send_crq(adapter, &crq); 907 if (rc) { 908 netdev_err(netdev, "Failed to set link state\n"); 909 return rc; 910 } 911 912 if (!wait_for_completion_timeout(&adapter->init_done, 913 timeout)) { 914 netdev_err(netdev, "timeout setting link state\n"); 915 return -1; 916 } 917 918 if (adapter->init_done_rc == 1) { 919 /* Partuial success, delay and re-send */ 920 mdelay(1000); 921 resend = true; 922 } 923 } while (resend); 924 925 return 0; 926 } 927 928 static int set_real_num_queues(struct net_device *netdev) 929 { 930 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 931 int rc; 932 933 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 934 adapter->req_tx_queues, adapter->req_rx_queues); 935 936 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 937 if (rc) { 938 netdev_err(netdev, "failed to set the number of tx queues\n"); 939 return rc; 940 } 941 942 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 943 if (rc) 944 netdev_err(netdev, "failed to set the number of rx queues\n"); 945 946 return rc; 947 } 948 949 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 950 { 951 struct device *dev = &adapter->vdev->dev; 952 union ibmvnic_crq crq; 953 int len = 0; 954 955 if (adapter->vpd->buff) 956 len = adapter->vpd->len; 957 958 init_completion(&adapter->fw_done); 959 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 960 crq.get_vpd_size.cmd = GET_VPD_SIZE; 961 ibmvnic_send_crq(adapter, &crq); 962 wait_for_completion(&adapter->fw_done); 963 964 if (!adapter->vpd->len) 965 return -ENODATA; 966 967 if (!adapter->vpd->buff) 968 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 969 else if (adapter->vpd->len != len) 970 adapter->vpd->buff = 971 krealloc(adapter->vpd->buff, 972 adapter->vpd->len, GFP_KERNEL); 973 974 if (!adapter->vpd->buff) { 975 dev_err(dev, "Could allocate VPD buffer\n"); 976 return -ENOMEM; 977 } 978 979 adapter->vpd->dma_addr = 980 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 981 DMA_FROM_DEVICE); 982 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 983 dev_err(dev, "Could not map VPD buffer\n"); 984 kfree(adapter->vpd->buff); 985 adapter->vpd->buff = NULL; 986 return -ENOMEM; 987 } 988 989 reinit_completion(&adapter->fw_done); 990 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 991 crq.get_vpd.cmd = GET_VPD; 992 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 993 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 994 ibmvnic_send_crq(adapter, &crq); 995 wait_for_completion(&adapter->fw_done); 996 997 return 0; 998 } 999 1000 static int init_resources(struct ibmvnic_adapter *adapter) 1001 { 1002 struct net_device *netdev = adapter->netdev; 1003 int rc; 1004 1005 rc = set_real_num_queues(netdev); 1006 if (rc) 1007 return rc; 1008 1009 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 1010 if (!adapter->vpd) 1011 return -ENOMEM; 1012 1013 /* Vital Product Data (VPD) */ 1014 rc = ibmvnic_get_vpd(adapter); 1015 if (rc) { 1016 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 1017 return rc; 1018 } 1019 1020 adapter->map_id = 1; 1021 1022 rc = init_napi(adapter); 1023 if (rc) 1024 return rc; 1025 1026 send_map_query(adapter); 1027 1028 rc = init_rx_pools(netdev); 1029 if (rc) 1030 return rc; 1031 1032 rc = init_tx_pools(netdev); 1033 return rc; 1034 } 1035 1036 static int __ibmvnic_open(struct net_device *netdev) 1037 { 1038 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1039 enum vnic_state prev_state = adapter->state; 1040 int i, rc; 1041 1042 adapter->state = VNIC_OPENING; 1043 replenish_pools(adapter); 1044 ibmvnic_napi_enable(adapter); 1045 1046 /* We're ready to receive frames, enable the sub-crq interrupts and 1047 * set the logical link state to up 1048 */ 1049 for (i = 0; i < adapter->req_rx_queues; i++) { 1050 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1051 if (prev_state == VNIC_CLOSED) 1052 enable_irq(adapter->rx_scrq[i]->irq); 1053 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1054 } 1055 1056 for (i = 0; i < adapter->req_tx_queues; i++) { 1057 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1058 if (prev_state == VNIC_CLOSED) 1059 enable_irq(adapter->tx_scrq[i]->irq); 1060 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1061 } 1062 1063 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1064 if (rc) { 1065 for (i = 0; i < adapter->req_rx_queues; i++) 1066 napi_disable(&adapter->napi[i]); 1067 release_resources(adapter); 1068 return rc; 1069 } 1070 1071 netif_tx_start_all_queues(netdev); 1072 1073 if (prev_state == VNIC_CLOSED) { 1074 for (i = 0; i < adapter->req_rx_queues; i++) 1075 napi_schedule(&adapter->napi[i]); 1076 } 1077 1078 adapter->state = VNIC_OPEN; 1079 return rc; 1080 } 1081 1082 static int ibmvnic_open(struct net_device *netdev) 1083 { 1084 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1085 int rc; 1086 1087 /* If device failover is pending, just set device state and return. 1088 * Device operation will be handled by reset routine. 1089 */ 1090 if (adapter->failover_pending) { 1091 adapter->state = VNIC_OPEN; 1092 return 0; 1093 } 1094 1095 mutex_lock(&adapter->reset_lock); 1096 1097 if (adapter->state != VNIC_CLOSED) { 1098 rc = ibmvnic_login(netdev); 1099 if (rc) { 1100 mutex_unlock(&adapter->reset_lock); 1101 return rc; 1102 } 1103 1104 rc = init_resources(adapter); 1105 if (rc) { 1106 netdev_err(netdev, "failed to initialize resources\n"); 1107 release_resources(adapter); 1108 mutex_unlock(&adapter->reset_lock); 1109 return rc; 1110 } 1111 } 1112 1113 rc = __ibmvnic_open(netdev); 1114 netif_carrier_on(netdev); 1115 1116 mutex_unlock(&adapter->reset_lock); 1117 1118 return rc; 1119 } 1120 1121 static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1122 { 1123 struct ibmvnic_rx_pool *rx_pool; 1124 struct ibmvnic_rx_buff *rx_buff; 1125 u64 rx_entries; 1126 int rx_scrqs; 1127 int i, j; 1128 1129 if (!adapter->rx_pool) 1130 return; 1131 1132 rx_scrqs = adapter->num_active_rx_pools; 1133 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1134 1135 /* Free any remaining skbs in the rx buffer pools */ 1136 for (i = 0; i < rx_scrqs; i++) { 1137 rx_pool = &adapter->rx_pool[i]; 1138 if (!rx_pool || !rx_pool->rx_buff) 1139 continue; 1140 1141 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1142 for (j = 0; j < rx_entries; j++) { 1143 rx_buff = &rx_pool->rx_buff[j]; 1144 if (rx_buff && rx_buff->skb) { 1145 dev_kfree_skb_any(rx_buff->skb); 1146 rx_buff->skb = NULL; 1147 } 1148 } 1149 } 1150 } 1151 1152 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, 1153 struct ibmvnic_tx_pool *tx_pool) 1154 { 1155 struct ibmvnic_tx_buff *tx_buff; 1156 u64 tx_entries; 1157 int i; 1158 1159 if (!tx_pool || !tx_pool->tx_buff) 1160 return; 1161 1162 tx_entries = tx_pool->num_buffers; 1163 1164 for (i = 0; i < tx_entries; i++) { 1165 tx_buff = &tx_pool->tx_buff[i]; 1166 if (tx_buff && tx_buff->skb) { 1167 dev_kfree_skb_any(tx_buff->skb); 1168 tx_buff->skb = NULL; 1169 } 1170 } 1171 } 1172 1173 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1174 { 1175 int tx_scrqs; 1176 int i; 1177 1178 if (!adapter->tx_pool || !adapter->tso_pool) 1179 return; 1180 1181 tx_scrqs = adapter->num_active_tx_pools; 1182 1183 /* Free any remaining skbs in the tx buffer pools */ 1184 for (i = 0; i < tx_scrqs; i++) { 1185 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1186 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); 1187 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); 1188 } 1189 } 1190 1191 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) 1192 { 1193 struct net_device *netdev = adapter->netdev; 1194 int i; 1195 1196 if (adapter->tx_scrq) { 1197 for (i = 0; i < adapter->req_tx_queues; i++) 1198 if (adapter->tx_scrq[i]->irq) { 1199 netdev_dbg(netdev, 1200 "Disabling tx_scrq[%d] irq\n", i); 1201 disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1202 disable_irq(adapter->tx_scrq[i]->irq); 1203 } 1204 } 1205 1206 if (adapter->rx_scrq) { 1207 for (i = 0; i < adapter->req_rx_queues; i++) { 1208 if (adapter->rx_scrq[i]->irq) { 1209 netdev_dbg(netdev, 1210 "Disabling rx_scrq[%d] irq\n", i); 1211 disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1212 disable_irq(adapter->rx_scrq[i]->irq); 1213 } 1214 } 1215 } 1216 } 1217 1218 static void ibmvnic_cleanup(struct net_device *netdev) 1219 { 1220 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1221 1222 /* ensure that transmissions are stopped if called by do_reset */ 1223 if (adapter->resetting) 1224 netif_tx_disable(netdev); 1225 else 1226 netif_tx_stop_all_queues(netdev); 1227 1228 ibmvnic_napi_disable(adapter); 1229 ibmvnic_disable_irqs(adapter); 1230 1231 clean_rx_pools(adapter); 1232 clean_tx_pools(adapter); 1233 } 1234 1235 static int __ibmvnic_close(struct net_device *netdev) 1236 { 1237 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1238 int rc = 0; 1239 1240 adapter->state = VNIC_CLOSING; 1241 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1242 if (rc) 1243 return rc; 1244 adapter->state = VNIC_CLOSED; 1245 return 0; 1246 } 1247 1248 static int ibmvnic_close(struct net_device *netdev) 1249 { 1250 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1251 int rc; 1252 1253 /* If device failover is pending, just set device state and return. 1254 * Device operation will be handled by reset routine. 1255 */ 1256 if (adapter->failover_pending) { 1257 adapter->state = VNIC_CLOSED; 1258 return 0; 1259 } 1260 1261 mutex_lock(&adapter->reset_lock); 1262 rc = __ibmvnic_close(netdev); 1263 ibmvnic_cleanup(netdev); 1264 mutex_unlock(&adapter->reset_lock); 1265 1266 return rc; 1267 } 1268 1269 /** 1270 * build_hdr_data - creates L2/L3/L4 header data buffer 1271 * @hdr_field - bitfield determining needed headers 1272 * @skb - socket buffer 1273 * @hdr_len - array of header lengths 1274 * @tot_len - total length of data 1275 * 1276 * Reads hdr_field to determine which headers are needed by firmware. 1277 * Builds a buffer containing these headers. Saves individual header 1278 * lengths and total buffer length to be used to build descriptors. 1279 */ 1280 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 1281 int *hdr_len, u8 *hdr_data) 1282 { 1283 int len = 0; 1284 u8 *hdr; 1285 1286 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) 1287 hdr_len[0] = sizeof(struct vlan_ethhdr); 1288 else 1289 hdr_len[0] = sizeof(struct ethhdr); 1290 1291 if (skb->protocol == htons(ETH_P_IP)) { 1292 hdr_len[1] = ip_hdr(skb)->ihl * 4; 1293 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1294 hdr_len[2] = tcp_hdrlen(skb); 1295 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1296 hdr_len[2] = sizeof(struct udphdr); 1297 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1298 hdr_len[1] = sizeof(struct ipv6hdr); 1299 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1300 hdr_len[2] = tcp_hdrlen(skb); 1301 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 1302 hdr_len[2] = sizeof(struct udphdr); 1303 } else if (skb->protocol == htons(ETH_P_ARP)) { 1304 hdr_len[1] = arp_hdr_len(skb->dev); 1305 hdr_len[2] = 0; 1306 } 1307 1308 memset(hdr_data, 0, 120); 1309 if ((hdr_field >> 6) & 1) { 1310 hdr = skb_mac_header(skb); 1311 memcpy(hdr_data, hdr, hdr_len[0]); 1312 len += hdr_len[0]; 1313 } 1314 1315 if ((hdr_field >> 5) & 1) { 1316 hdr = skb_network_header(skb); 1317 memcpy(hdr_data + len, hdr, hdr_len[1]); 1318 len += hdr_len[1]; 1319 } 1320 1321 if ((hdr_field >> 4) & 1) { 1322 hdr = skb_transport_header(skb); 1323 memcpy(hdr_data + len, hdr, hdr_len[2]); 1324 len += hdr_len[2]; 1325 } 1326 return len; 1327 } 1328 1329 /** 1330 * create_hdr_descs - create header and header extension descriptors 1331 * @hdr_field - bitfield determining needed headers 1332 * @data - buffer containing header data 1333 * @len - length of data buffer 1334 * @hdr_len - array of individual header lengths 1335 * @scrq_arr - descriptor array 1336 * 1337 * Creates header and, if needed, header extension descriptors and 1338 * places them in a descriptor array, scrq_arr 1339 */ 1340 1341 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1342 union sub_crq *scrq_arr) 1343 { 1344 union sub_crq hdr_desc; 1345 int tmp_len = len; 1346 int num_descs = 0; 1347 u8 *data, *cur; 1348 int tmp; 1349 1350 while (tmp_len > 0) { 1351 cur = hdr_data + len - tmp_len; 1352 1353 memset(&hdr_desc, 0, sizeof(hdr_desc)); 1354 if (cur != hdr_data) { 1355 data = hdr_desc.hdr_ext.data; 1356 tmp = tmp_len > 29 ? 29 : tmp_len; 1357 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 1358 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 1359 hdr_desc.hdr_ext.len = tmp; 1360 } else { 1361 data = hdr_desc.hdr.data; 1362 tmp = tmp_len > 24 ? 24 : tmp_len; 1363 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 1364 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 1365 hdr_desc.hdr.len = tmp; 1366 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 1367 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 1368 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 1369 hdr_desc.hdr.flag = hdr_field << 1; 1370 } 1371 memcpy(data, cur, tmp); 1372 tmp_len -= tmp; 1373 *scrq_arr = hdr_desc; 1374 scrq_arr++; 1375 num_descs++; 1376 } 1377 1378 return num_descs; 1379 } 1380 1381 /** 1382 * build_hdr_descs_arr - build a header descriptor array 1383 * @skb - socket buffer 1384 * @num_entries - number of descriptors to be sent 1385 * @subcrq - first TX descriptor 1386 * @hdr_field - bit field determining which headers will be sent 1387 * 1388 * This function will build a TX descriptor array with applicable 1389 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 1390 */ 1391 1392 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, 1393 int *num_entries, u8 hdr_field) 1394 { 1395 int hdr_len[3] = {0, 0, 0}; 1396 int tot_len; 1397 u8 *hdr_data = txbuff->hdr_data; 1398 1399 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, 1400 txbuff->hdr_data); 1401 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 1402 txbuff->indir_arr + 1); 1403 } 1404 1405 static int ibmvnic_xmit_workarounds(struct sk_buff *skb, 1406 struct net_device *netdev) 1407 { 1408 /* For some backing devices, mishandling of small packets 1409 * can result in a loss of connection or TX stall. Device 1410 * architects recommend that no packet should be smaller 1411 * than the minimum MTU value provided to the driver, so 1412 * pad any packets to that length 1413 */ 1414 if (skb->len < netdev->min_mtu) 1415 return skb_put_padto(skb, netdev->min_mtu); 1416 1417 return 0; 1418 } 1419 1420 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 1421 { 1422 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1423 int queue_num = skb_get_queue_mapping(skb); 1424 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 1425 struct device *dev = &adapter->vdev->dev; 1426 struct ibmvnic_tx_buff *tx_buff = NULL; 1427 struct ibmvnic_sub_crq_queue *tx_scrq; 1428 struct ibmvnic_tx_pool *tx_pool; 1429 unsigned int tx_send_failed = 0; 1430 unsigned int tx_map_failed = 0; 1431 unsigned int tx_dropped = 0; 1432 unsigned int tx_packets = 0; 1433 unsigned int tx_bytes = 0; 1434 dma_addr_t data_dma_addr; 1435 struct netdev_queue *txq; 1436 unsigned long lpar_rc; 1437 union sub_crq tx_crq; 1438 unsigned int offset; 1439 int num_entries = 1; 1440 unsigned char *dst; 1441 u64 *handle_array; 1442 int index = 0; 1443 u8 proto = 0; 1444 int ret = 0; 1445 1446 if (adapter->resetting) { 1447 if (!netif_subqueue_stopped(netdev, skb)) 1448 netif_stop_subqueue(netdev, queue_num); 1449 dev_kfree_skb_any(skb); 1450 1451 tx_send_failed++; 1452 tx_dropped++; 1453 ret = NETDEV_TX_OK; 1454 goto out; 1455 } 1456 1457 if (ibmvnic_xmit_workarounds(skb, netdev)) { 1458 tx_dropped++; 1459 tx_send_failed++; 1460 ret = NETDEV_TX_OK; 1461 goto out; 1462 } 1463 if (skb_is_gso(skb)) 1464 tx_pool = &adapter->tso_pool[queue_num]; 1465 else 1466 tx_pool = &adapter->tx_pool[queue_num]; 1467 1468 tx_scrq = adapter->tx_scrq[queue_num]; 1469 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); 1470 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 1471 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 1472 1473 index = tx_pool->free_map[tx_pool->consumer_index]; 1474 1475 if (index == IBMVNIC_INVALID_MAP) { 1476 dev_kfree_skb_any(skb); 1477 tx_send_failed++; 1478 tx_dropped++; 1479 ret = NETDEV_TX_OK; 1480 goto out; 1481 } 1482 1483 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; 1484 1485 offset = index * tx_pool->buf_size; 1486 dst = tx_pool->long_term_buff.buff + offset; 1487 memset(dst, 0, tx_pool->buf_size); 1488 data_dma_addr = tx_pool->long_term_buff.addr + offset; 1489 1490 if (skb_shinfo(skb)->nr_frags) { 1491 int cur, i; 1492 1493 /* Copy the head */ 1494 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 1495 cur = skb_headlen(skb); 1496 1497 /* Copy the frags */ 1498 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1499 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1500 1501 memcpy(dst + cur, 1502 page_address(skb_frag_page(frag)) + 1503 frag->page_offset, skb_frag_size(frag)); 1504 cur += skb_frag_size(frag); 1505 } 1506 } else { 1507 skb_copy_from_linear_data(skb, dst, skb->len); 1508 } 1509 1510 tx_pool->consumer_index = 1511 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; 1512 1513 tx_buff = &tx_pool->tx_buff[index]; 1514 tx_buff->skb = skb; 1515 tx_buff->data_dma[0] = data_dma_addr; 1516 tx_buff->data_len[0] = skb->len; 1517 tx_buff->index = index; 1518 tx_buff->pool_index = queue_num; 1519 tx_buff->last_frag = true; 1520 1521 memset(&tx_crq, 0, sizeof(tx_crq)); 1522 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 1523 tx_crq.v1.type = IBMVNIC_TX_DESC; 1524 tx_crq.v1.n_crq_elem = 1; 1525 tx_crq.v1.n_sge = 1; 1526 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 1527 1528 if (skb_is_gso(skb)) 1529 tx_crq.v1.correlator = 1530 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); 1531 else 1532 tx_crq.v1.correlator = cpu_to_be32(index); 1533 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); 1534 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 1535 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 1536 1537 if (adapter->vlan_header_insertion) { 1538 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 1539 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 1540 } 1541 1542 if (skb->protocol == htons(ETH_P_IP)) { 1543 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 1544 proto = ip_hdr(skb)->protocol; 1545 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1546 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 1547 proto = ipv6_hdr(skb)->nexthdr; 1548 } 1549 1550 if (proto == IPPROTO_TCP) 1551 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 1552 else if (proto == IPPROTO_UDP) 1553 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 1554 1555 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1556 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 1557 hdrs += 2; 1558 } 1559 if (skb_is_gso(skb)) { 1560 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 1561 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 1562 hdrs += 2; 1563 } 1564 /* determine if l2/3/4 headers are sent to firmware */ 1565 if ((*hdrs >> 7) & 1) { 1566 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); 1567 tx_crq.v1.n_crq_elem = num_entries; 1568 tx_buff->num_entries = num_entries; 1569 tx_buff->indir_arr[0] = tx_crq; 1570 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, 1571 sizeof(tx_buff->indir_arr), 1572 DMA_TO_DEVICE); 1573 if (dma_mapping_error(dev, tx_buff->indir_dma)) { 1574 dev_kfree_skb_any(skb); 1575 tx_buff->skb = NULL; 1576 if (!firmware_has_feature(FW_FEATURE_CMO)) 1577 dev_err(dev, "tx: unable to map descriptor array\n"); 1578 tx_map_failed++; 1579 tx_dropped++; 1580 ret = NETDEV_TX_OK; 1581 goto tx_err_out; 1582 } 1583 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], 1584 (u64)tx_buff->indir_dma, 1585 (u64)num_entries); 1586 } else { 1587 tx_buff->num_entries = num_entries; 1588 lpar_rc = send_subcrq(adapter, handle_array[queue_num], 1589 &tx_crq); 1590 } 1591 if (lpar_rc != H_SUCCESS) { 1592 dev_err(dev, "tx failed with code %ld\n", lpar_rc); 1593 dev_kfree_skb_any(skb); 1594 tx_buff->skb = NULL; 1595 1596 if (lpar_rc == H_CLOSED || adapter->failover_pending) { 1597 /* Disable TX and report carrier off if queue is closed 1598 * or pending failover. 1599 * Firmware guarantees that a signal will be sent to the 1600 * driver, triggering a reset or some other action. 1601 */ 1602 netif_tx_stop_all_queues(netdev); 1603 netif_carrier_off(netdev); 1604 } 1605 1606 tx_send_failed++; 1607 tx_dropped++; 1608 ret = NETDEV_TX_OK; 1609 goto tx_err_out; 1610 } 1611 1612 if (atomic_add_return(num_entries, &tx_scrq->used) 1613 >= adapter->req_tx_entries_per_subcrq) { 1614 netdev_dbg(netdev, "Stopping queue %d\n", queue_num); 1615 netif_stop_subqueue(netdev, queue_num); 1616 } 1617 1618 tx_packets++; 1619 tx_bytes += skb->len; 1620 txq->trans_start = jiffies; 1621 ret = NETDEV_TX_OK; 1622 goto out; 1623 1624 tx_err_out: 1625 /* roll back consumer index and map array*/ 1626 if (tx_pool->consumer_index == 0) 1627 tx_pool->consumer_index = 1628 tx_pool->num_buffers - 1; 1629 else 1630 tx_pool->consumer_index--; 1631 tx_pool->free_map[tx_pool->consumer_index] = index; 1632 out: 1633 netdev->stats.tx_dropped += tx_dropped; 1634 netdev->stats.tx_bytes += tx_bytes; 1635 netdev->stats.tx_packets += tx_packets; 1636 adapter->tx_send_failed += tx_send_failed; 1637 adapter->tx_map_failed += tx_map_failed; 1638 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 1639 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 1640 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 1641 1642 return ret; 1643 } 1644 1645 static void ibmvnic_set_multi(struct net_device *netdev) 1646 { 1647 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1648 struct netdev_hw_addr *ha; 1649 union ibmvnic_crq crq; 1650 1651 memset(&crq, 0, sizeof(crq)); 1652 crq.request_capability.first = IBMVNIC_CRQ_CMD; 1653 crq.request_capability.cmd = REQUEST_CAPABILITY; 1654 1655 if (netdev->flags & IFF_PROMISC) { 1656 if (!adapter->promisc_supported) 1657 return; 1658 } else { 1659 if (netdev->flags & IFF_ALLMULTI) { 1660 /* Accept all multicast */ 1661 memset(&crq, 0, sizeof(crq)); 1662 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1663 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1664 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 1665 ibmvnic_send_crq(adapter, &crq); 1666 } else if (netdev_mc_empty(netdev)) { 1667 /* Reject all multicast */ 1668 memset(&crq, 0, sizeof(crq)); 1669 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1670 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1671 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 1672 ibmvnic_send_crq(adapter, &crq); 1673 } else { 1674 /* Accept one or more multicast(s) */ 1675 netdev_for_each_mc_addr(ha, netdev) { 1676 memset(&crq, 0, sizeof(crq)); 1677 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1678 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1679 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 1680 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 1681 ha->addr); 1682 ibmvnic_send_crq(adapter, &crq); 1683 } 1684 } 1685 } 1686 } 1687 1688 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) 1689 { 1690 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1691 struct sockaddr *addr = p; 1692 union ibmvnic_crq crq; 1693 1694 if (!is_valid_ether_addr(addr->sa_data)) 1695 return -EADDRNOTAVAIL; 1696 1697 memset(&crq, 0, sizeof(crq)); 1698 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 1699 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 1700 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); 1701 1702 init_completion(&adapter->fw_done); 1703 ibmvnic_send_crq(adapter, &crq); 1704 wait_for_completion(&adapter->fw_done); 1705 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 1706 return adapter->fw_done_rc ? -EIO : 0; 1707 } 1708 1709 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 1710 { 1711 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1712 struct sockaddr *addr = p; 1713 int rc; 1714 1715 if (adapter->state == VNIC_PROBED) { 1716 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); 1717 adapter->mac_change_pending = true; 1718 return 0; 1719 } 1720 1721 rc = __ibmvnic_set_mac(netdev, addr); 1722 1723 return rc; 1724 } 1725 1726 /** 1727 * do_reset returns zero if we are able to keep processing reset events, or 1728 * non-zero if we hit a fatal error and must halt. 1729 */ 1730 static int do_reset(struct ibmvnic_adapter *adapter, 1731 struct ibmvnic_rwi *rwi, u32 reset_state) 1732 { 1733 u64 old_num_rx_queues, old_num_tx_queues; 1734 struct net_device *netdev = adapter->netdev; 1735 int i, rc; 1736 1737 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", 1738 rwi->reset_reason); 1739 1740 netif_carrier_off(netdev); 1741 adapter->reset_reason = rwi->reset_reason; 1742 1743 old_num_rx_queues = adapter->req_rx_queues; 1744 old_num_tx_queues = adapter->req_tx_queues; 1745 1746 ibmvnic_cleanup(netdev); 1747 1748 if (adapter->reset_reason != VNIC_RESET_MOBILITY && 1749 adapter->reset_reason != VNIC_RESET_FAILOVER) { 1750 rc = __ibmvnic_close(netdev); 1751 if (rc) 1752 return rc; 1753 } 1754 1755 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1756 adapter->wait_for_reset) { 1757 release_resources(adapter); 1758 release_sub_crqs(adapter, 1); 1759 release_crq_queue(adapter); 1760 } 1761 1762 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 1763 /* remove the closed state so when we call open it appears 1764 * we are coming from the probed state. 1765 */ 1766 adapter->state = VNIC_PROBED; 1767 1768 if (adapter->wait_for_reset) { 1769 rc = init_crq_queue(adapter); 1770 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { 1771 rc = ibmvnic_reenable_crq_queue(adapter); 1772 release_sub_crqs(adapter, 1); 1773 } else { 1774 rc = ibmvnic_reset_crq(adapter); 1775 if (!rc) 1776 rc = vio_enable_interrupts(adapter->vdev); 1777 } 1778 1779 if (rc) { 1780 netdev_err(adapter->netdev, 1781 "Couldn't initialize crq. rc=%d\n", rc); 1782 return rc; 1783 } 1784 1785 rc = ibmvnic_init(adapter); 1786 if (rc) 1787 return IBMVNIC_INIT_FAILED; 1788 1789 /* If the adapter was in PROBE state prior to the reset, 1790 * exit here. 1791 */ 1792 if (reset_state == VNIC_PROBED) 1793 return 0; 1794 1795 rc = ibmvnic_login(netdev); 1796 if (rc) { 1797 adapter->state = VNIC_PROBED; 1798 return 0; 1799 } 1800 1801 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1802 adapter->wait_for_reset) { 1803 rc = init_resources(adapter); 1804 if (rc) 1805 return rc; 1806 } else if (adapter->req_rx_queues != old_num_rx_queues || 1807 adapter->req_tx_queues != old_num_tx_queues) { 1808 adapter->map_id = 1; 1809 release_rx_pools(adapter); 1810 release_tx_pools(adapter); 1811 init_rx_pools(netdev); 1812 init_tx_pools(netdev); 1813 1814 release_napi(adapter); 1815 init_napi(adapter); 1816 } else { 1817 rc = reset_tx_pools(adapter); 1818 if (rc) 1819 return rc; 1820 1821 rc = reset_rx_pools(adapter); 1822 if (rc) 1823 return rc; 1824 } 1825 ibmvnic_disable_irqs(adapter); 1826 } 1827 adapter->state = VNIC_CLOSED; 1828 1829 if (reset_state == VNIC_CLOSED) 1830 return 0; 1831 1832 rc = __ibmvnic_open(netdev); 1833 if (rc) { 1834 if (list_empty(&adapter->rwi_list)) 1835 adapter->state = VNIC_CLOSED; 1836 else 1837 adapter->state = reset_state; 1838 1839 return 0; 1840 } 1841 1842 /* kick napi */ 1843 for (i = 0; i < adapter->req_rx_queues; i++) 1844 napi_schedule(&adapter->napi[i]); 1845 1846 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 1847 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) 1848 netdev_notify_peers(netdev); 1849 1850 netif_carrier_on(netdev); 1851 1852 return 0; 1853 } 1854 1855 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 1856 { 1857 struct ibmvnic_rwi *rwi; 1858 1859 mutex_lock(&adapter->rwi_lock); 1860 1861 if (!list_empty(&adapter->rwi_list)) { 1862 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 1863 list); 1864 list_del(&rwi->list); 1865 } else { 1866 rwi = NULL; 1867 } 1868 1869 mutex_unlock(&adapter->rwi_lock); 1870 return rwi; 1871 } 1872 1873 static void free_all_rwi(struct ibmvnic_adapter *adapter) 1874 { 1875 struct ibmvnic_rwi *rwi; 1876 1877 rwi = get_next_rwi(adapter); 1878 while (rwi) { 1879 kfree(rwi); 1880 rwi = get_next_rwi(adapter); 1881 } 1882 } 1883 1884 static void __ibmvnic_reset(struct work_struct *work) 1885 { 1886 struct ibmvnic_rwi *rwi; 1887 struct ibmvnic_adapter *adapter; 1888 struct net_device *netdev; 1889 u32 reset_state; 1890 int rc = 0; 1891 1892 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 1893 netdev = adapter->netdev; 1894 1895 mutex_lock(&adapter->reset_lock); 1896 adapter->resetting = true; 1897 reset_state = adapter->state; 1898 1899 rwi = get_next_rwi(adapter); 1900 while (rwi) { 1901 rc = do_reset(adapter, rwi, reset_state); 1902 kfree(rwi); 1903 if (rc && rc != IBMVNIC_INIT_FAILED) 1904 break; 1905 1906 rwi = get_next_rwi(adapter); 1907 } 1908 1909 if (adapter->wait_for_reset) { 1910 adapter->wait_for_reset = false; 1911 adapter->reset_done_rc = rc; 1912 complete(&adapter->reset_done); 1913 } 1914 1915 if (rc) { 1916 netdev_dbg(adapter->netdev, "Reset failed\n"); 1917 free_all_rwi(adapter); 1918 mutex_unlock(&adapter->reset_lock); 1919 return; 1920 } 1921 1922 adapter->resetting = false; 1923 mutex_unlock(&adapter->reset_lock); 1924 } 1925 1926 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, 1927 enum ibmvnic_reset_reason reason) 1928 { 1929 struct ibmvnic_rwi *rwi, *tmp; 1930 struct net_device *netdev = adapter->netdev; 1931 struct list_head *entry; 1932 int ret; 1933 1934 if (adapter->state == VNIC_REMOVING || 1935 adapter->state == VNIC_REMOVED || 1936 adapter->failover_pending) { 1937 ret = EBUSY; 1938 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); 1939 goto err; 1940 } 1941 1942 if (adapter->state == VNIC_PROBING) { 1943 netdev_warn(netdev, "Adapter reset during probe\n"); 1944 ret = adapter->init_done_rc = EAGAIN; 1945 goto err; 1946 } 1947 1948 mutex_lock(&adapter->rwi_lock); 1949 1950 list_for_each(entry, &adapter->rwi_list) { 1951 tmp = list_entry(entry, struct ibmvnic_rwi, list); 1952 if (tmp->reset_reason == reason) { 1953 netdev_dbg(netdev, "Skipping matching reset\n"); 1954 mutex_unlock(&adapter->rwi_lock); 1955 ret = EBUSY; 1956 goto err; 1957 } 1958 } 1959 1960 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL); 1961 if (!rwi) { 1962 mutex_unlock(&adapter->rwi_lock); 1963 ibmvnic_close(netdev); 1964 ret = ENOMEM; 1965 goto err; 1966 } 1967 1968 rwi->reset_reason = reason; 1969 list_add_tail(&rwi->list, &adapter->rwi_list); 1970 mutex_unlock(&adapter->rwi_lock); 1971 1972 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); 1973 schedule_work(&adapter->ibmvnic_reset); 1974 1975 return 0; 1976 err: 1977 if (adapter->wait_for_reset) 1978 adapter->wait_for_reset = false; 1979 return -ret; 1980 } 1981 1982 static void ibmvnic_tx_timeout(struct net_device *dev) 1983 { 1984 struct ibmvnic_adapter *adapter = netdev_priv(dev); 1985 1986 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 1987 } 1988 1989 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 1990 struct ibmvnic_rx_buff *rx_buff) 1991 { 1992 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 1993 1994 rx_buff->skb = NULL; 1995 1996 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 1997 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 1998 1999 atomic_dec(&pool->available); 2000 } 2001 2002 static int ibmvnic_poll(struct napi_struct *napi, int budget) 2003 { 2004 struct net_device *netdev = napi->dev; 2005 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2006 int scrq_num = (int)(napi - adapter->napi); 2007 int frames_processed = 0; 2008 2009 restart_poll: 2010 while (frames_processed < budget) { 2011 struct sk_buff *skb; 2012 struct ibmvnic_rx_buff *rx_buff; 2013 union sub_crq *next; 2014 u32 length; 2015 u16 offset; 2016 u8 flags = 0; 2017 2018 if (unlikely(adapter->resetting && 2019 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { 2020 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 2021 napi_complete_done(napi, frames_processed); 2022 return frames_processed; 2023 } 2024 2025 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) 2026 break; 2027 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); 2028 rx_buff = 2029 (struct ibmvnic_rx_buff *)be64_to_cpu(next-> 2030 rx_comp.correlator); 2031 /* do error checking */ 2032 if (next->rx_comp.rc) { 2033 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 2034 be16_to_cpu(next->rx_comp.rc)); 2035 /* free the entry */ 2036 next->rx_comp.first = 0; 2037 dev_kfree_skb_any(rx_buff->skb); 2038 remove_buff_from_pool(adapter, rx_buff); 2039 continue; 2040 } else if (!rx_buff->skb) { 2041 /* free the entry */ 2042 next->rx_comp.first = 0; 2043 remove_buff_from_pool(adapter, rx_buff); 2044 continue; 2045 } 2046 2047 length = be32_to_cpu(next->rx_comp.len); 2048 offset = be16_to_cpu(next->rx_comp.off_frame_data); 2049 flags = next->rx_comp.flags; 2050 skb = rx_buff->skb; 2051 skb_copy_to_linear_data(skb, rx_buff->data + offset, 2052 length); 2053 2054 /* VLAN Header has been stripped by the system firmware and 2055 * needs to be inserted by the driver 2056 */ 2057 if (adapter->rx_vlan_header_insertion && 2058 (flags & IBMVNIC_VLAN_STRIPPED)) 2059 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 2060 ntohs(next->rx_comp.vlan_tci)); 2061 2062 /* free the entry */ 2063 next->rx_comp.first = 0; 2064 remove_buff_from_pool(adapter, rx_buff); 2065 2066 skb_put(skb, length); 2067 skb->protocol = eth_type_trans(skb, netdev); 2068 skb_record_rx_queue(skb, scrq_num); 2069 2070 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 2071 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 2072 skb->ip_summed = CHECKSUM_UNNECESSARY; 2073 } 2074 2075 length = skb->len; 2076 napi_gro_receive(napi, skb); /* send it up */ 2077 netdev->stats.rx_packets++; 2078 netdev->stats.rx_bytes += length; 2079 adapter->rx_stats_buffers[scrq_num].packets++; 2080 adapter->rx_stats_buffers[scrq_num].bytes += length; 2081 frames_processed++; 2082 } 2083 2084 if (adapter->state != VNIC_CLOSING) 2085 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 2086 2087 if (frames_processed < budget) { 2088 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 2089 napi_complete_done(napi, frames_processed); 2090 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && 2091 napi_reschedule(napi)) { 2092 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 2093 goto restart_poll; 2094 } 2095 } 2096 return frames_processed; 2097 } 2098 2099 #ifdef CONFIG_NET_POLL_CONTROLLER 2100 static void ibmvnic_netpoll_controller(struct net_device *dev) 2101 { 2102 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2103 int i; 2104 2105 replenish_pools(netdev_priv(dev)); 2106 for (i = 0; i < adapter->req_rx_queues; i++) 2107 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, 2108 adapter->rx_scrq[i]); 2109 } 2110 #endif 2111 2112 static int wait_for_reset(struct ibmvnic_adapter *adapter) 2113 { 2114 int rc, ret; 2115 2116 adapter->fallback.mtu = adapter->req_mtu; 2117 adapter->fallback.rx_queues = adapter->req_rx_queues; 2118 adapter->fallback.tx_queues = adapter->req_tx_queues; 2119 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 2120 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 2121 2122 init_completion(&adapter->reset_done); 2123 adapter->wait_for_reset = true; 2124 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2125 if (rc) 2126 return rc; 2127 wait_for_completion(&adapter->reset_done); 2128 2129 ret = 0; 2130 if (adapter->reset_done_rc) { 2131 ret = -EIO; 2132 adapter->desired.mtu = adapter->fallback.mtu; 2133 adapter->desired.rx_queues = adapter->fallback.rx_queues; 2134 adapter->desired.tx_queues = adapter->fallback.tx_queues; 2135 adapter->desired.rx_entries = adapter->fallback.rx_entries; 2136 adapter->desired.tx_entries = adapter->fallback.tx_entries; 2137 2138 init_completion(&adapter->reset_done); 2139 adapter->wait_for_reset = true; 2140 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 2141 if (rc) 2142 return ret; 2143 wait_for_completion(&adapter->reset_done); 2144 } 2145 adapter->wait_for_reset = false; 2146 2147 return ret; 2148 } 2149 2150 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 2151 { 2152 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2153 2154 adapter->desired.mtu = new_mtu + ETH_HLEN; 2155 2156 return wait_for_reset(adapter); 2157 } 2158 2159 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, 2160 struct net_device *dev, 2161 netdev_features_t features) 2162 { 2163 /* Some backing hardware adapters can not 2164 * handle packets with a MSS less than 224 2165 * or with only one segment. 2166 */ 2167 if (skb_is_gso(skb)) { 2168 if (skb_shinfo(skb)->gso_size < 224 || 2169 skb_shinfo(skb)->gso_segs == 1) 2170 features &= ~NETIF_F_GSO_MASK; 2171 } 2172 2173 return features; 2174 } 2175 2176 static const struct net_device_ops ibmvnic_netdev_ops = { 2177 .ndo_open = ibmvnic_open, 2178 .ndo_stop = ibmvnic_close, 2179 .ndo_start_xmit = ibmvnic_xmit, 2180 .ndo_set_rx_mode = ibmvnic_set_multi, 2181 .ndo_set_mac_address = ibmvnic_set_mac, 2182 .ndo_validate_addr = eth_validate_addr, 2183 .ndo_tx_timeout = ibmvnic_tx_timeout, 2184 #ifdef CONFIG_NET_POLL_CONTROLLER 2185 .ndo_poll_controller = ibmvnic_netpoll_controller, 2186 #endif 2187 .ndo_change_mtu = ibmvnic_change_mtu, 2188 .ndo_features_check = ibmvnic_features_check, 2189 }; 2190 2191 /* ethtool functions */ 2192 2193 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 2194 struct ethtool_link_ksettings *cmd) 2195 { 2196 u32 supported, advertising; 2197 2198 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | 2199 SUPPORTED_FIBRE); 2200 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | 2201 ADVERTISED_FIBRE); 2202 cmd->base.speed = SPEED_1000; 2203 cmd->base.duplex = DUPLEX_FULL; 2204 cmd->base.port = PORT_FIBRE; 2205 cmd->base.phy_address = 0; 2206 cmd->base.autoneg = AUTONEG_ENABLE; 2207 2208 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 2209 supported); 2210 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 2211 advertising); 2212 2213 return 0; 2214 } 2215 2216 static void ibmvnic_get_drvinfo(struct net_device *netdev, 2217 struct ethtool_drvinfo *info) 2218 { 2219 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2220 2221 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 2222 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 2223 strlcpy(info->fw_version, adapter->fw_version, 2224 sizeof(info->fw_version)); 2225 } 2226 2227 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 2228 { 2229 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2230 2231 return adapter->msg_enable; 2232 } 2233 2234 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 2235 { 2236 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2237 2238 adapter->msg_enable = data; 2239 } 2240 2241 static u32 ibmvnic_get_link(struct net_device *netdev) 2242 { 2243 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2244 2245 /* Don't need to send a query because we request a logical link up at 2246 * init and then we wait for link state indications 2247 */ 2248 return adapter->logical_link_state; 2249 } 2250 2251 static void ibmvnic_get_ringparam(struct net_device *netdev, 2252 struct ethtool_ringparam *ring) 2253 { 2254 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2255 2256 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 2257 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 2258 ring->rx_mini_max_pending = 0; 2259 ring->rx_jumbo_max_pending = 0; 2260 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 2261 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 2262 ring->rx_mini_pending = 0; 2263 ring->rx_jumbo_pending = 0; 2264 } 2265 2266 static int ibmvnic_set_ringparam(struct net_device *netdev, 2267 struct ethtool_ringparam *ring) 2268 { 2269 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2270 2271 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || 2272 ring->tx_pending > adapter->max_tx_entries_per_subcrq) { 2273 netdev_err(netdev, "Invalid request.\n"); 2274 netdev_err(netdev, "Max tx buffers = %llu\n", 2275 adapter->max_rx_add_entries_per_subcrq); 2276 netdev_err(netdev, "Max rx buffers = %llu\n", 2277 adapter->max_tx_entries_per_subcrq); 2278 return -EINVAL; 2279 } 2280 2281 adapter->desired.rx_entries = ring->rx_pending; 2282 adapter->desired.tx_entries = ring->tx_pending; 2283 2284 return wait_for_reset(adapter); 2285 } 2286 2287 static void ibmvnic_get_channels(struct net_device *netdev, 2288 struct ethtool_channels *channels) 2289 { 2290 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2291 2292 channels->max_rx = adapter->max_rx_queues; 2293 channels->max_tx = adapter->max_tx_queues; 2294 channels->max_other = 0; 2295 channels->max_combined = 0; 2296 channels->rx_count = adapter->req_rx_queues; 2297 channels->tx_count = adapter->req_tx_queues; 2298 channels->other_count = 0; 2299 channels->combined_count = 0; 2300 } 2301 2302 static int ibmvnic_set_channels(struct net_device *netdev, 2303 struct ethtool_channels *channels) 2304 { 2305 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2306 2307 adapter->desired.rx_queues = channels->rx_count; 2308 adapter->desired.tx_queues = channels->tx_count; 2309 2310 return wait_for_reset(adapter); 2311 } 2312 2313 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2314 { 2315 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2316 int i; 2317 2318 if (stringset != ETH_SS_STATS) 2319 return; 2320 2321 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) 2322 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 2323 2324 for (i = 0; i < adapter->req_tx_queues; i++) { 2325 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 2326 data += ETH_GSTRING_LEN; 2327 2328 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 2329 data += ETH_GSTRING_LEN; 2330 2331 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); 2332 data += ETH_GSTRING_LEN; 2333 } 2334 2335 for (i = 0; i < adapter->req_rx_queues; i++) { 2336 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 2337 data += ETH_GSTRING_LEN; 2338 2339 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 2340 data += ETH_GSTRING_LEN; 2341 2342 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 2343 data += ETH_GSTRING_LEN; 2344 } 2345 } 2346 2347 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 2348 { 2349 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2350 2351 switch (sset) { 2352 case ETH_SS_STATS: 2353 return ARRAY_SIZE(ibmvnic_stats) + 2354 adapter->req_tx_queues * NUM_TX_STATS + 2355 adapter->req_rx_queues * NUM_RX_STATS; 2356 default: 2357 return -EOPNOTSUPP; 2358 } 2359 } 2360 2361 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 2362 struct ethtool_stats *stats, u64 *data) 2363 { 2364 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2365 union ibmvnic_crq crq; 2366 int i, j; 2367 2368 memset(&crq, 0, sizeof(crq)); 2369 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 2370 crq.request_statistics.cmd = REQUEST_STATISTICS; 2371 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 2372 crq.request_statistics.len = 2373 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 2374 2375 /* Wait for data to be written */ 2376 init_completion(&adapter->stats_done); 2377 ibmvnic_send_crq(adapter, &crq); 2378 wait_for_completion(&adapter->stats_done); 2379 2380 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 2381 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, 2382 ibmvnic_stats[i].offset)); 2383 2384 for (j = 0; j < adapter->req_tx_queues; j++) { 2385 data[i] = adapter->tx_stats_buffers[j].packets; 2386 i++; 2387 data[i] = adapter->tx_stats_buffers[j].bytes; 2388 i++; 2389 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 2390 i++; 2391 } 2392 2393 for (j = 0; j < adapter->req_rx_queues; j++) { 2394 data[i] = adapter->rx_stats_buffers[j].packets; 2395 i++; 2396 data[i] = adapter->rx_stats_buffers[j].bytes; 2397 i++; 2398 data[i] = adapter->rx_stats_buffers[j].interrupts; 2399 i++; 2400 } 2401 } 2402 2403 static const struct ethtool_ops ibmvnic_ethtool_ops = { 2404 .get_drvinfo = ibmvnic_get_drvinfo, 2405 .get_msglevel = ibmvnic_get_msglevel, 2406 .set_msglevel = ibmvnic_set_msglevel, 2407 .get_link = ibmvnic_get_link, 2408 .get_ringparam = ibmvnic_get_ringparam, 2409 .set_ringparam = ibmvnic_set_ringparam, 2410 .get_channels = ibmvnic_get_channels, 2411 .set_channels = ibmvnic_set_channels, 2412 .get_strings = ibmvnic_get_strings, 2413 .get_sset_count = ibmvnic_get_sset_count, 2414 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 2415 .get_link_ksettings = ibmvnic_get_link_ksettings, 2416 }; 2417 2418 /* Routines for managing CRQs/sCRQs */ 2419 2420 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 2421 struct ibmvnic_sub_crq_queue *scrq) 2422 { 2423 int rc; 2424 2425 if (scrq->irq) { 2426 free_irq(scrq->irq, scrq); 2427 irq_dispose_mapping(scrq->irq); 2428 scrq->irq = 0; 2429 } 2430 2431 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 2432 atomic_set(&scrq->used, 0); 2433 scrq->cur = 0; 2434 2435 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 2436 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 2437 return rc; 2438 } 2439 2440 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 2441 { 2442 int i, rc; 2443 2444 for (i = 0; i < adapter->req_tx_queues; i++) { 2445 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 2446 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 2447 if (rc) 2448 return rc; 2449 } 2450 2451 for (i = 0; i < adapter->req_rx_queues; i++) { 2452 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 2453 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 2454 if (rc) 2455 return rc; 2456 } 2457 2458 return rc; 2459 } 2460 2461 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 2462 struct ibmvnic_sub_crq_queue *scrq, 2463 bool do_h_free) 2464 { 2465 struct device *dev = &adapter->vdev->dev; 2466 long rc; 2467 2468 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 2469 2470 if (do_h_free) { 2471 /* Close the sub-crqs */ 2472 do { 2473 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 2474 adapter->vdev->unit_address, 2475 scrq->crq_num); 2476 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 2477 2478 if (rc) { 2479 netdev_err(adapter->netdev, 2480 "Failed to release sub-CRQ %16lx, rc = %ld\n", 2481 scrq->crq_num, rc); 2482 } 2483 } 2484 2485 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 2486 DMA_BIDIRECTIONAL); 2487 free_pages((unsigned long)scrq->msgs, 2); 2488 kfree(scrq); 2489 } 2490 2491 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 2492 *adapter) 2493 { 2494 struct device *dev = &adapter->vdev->dev; 2495 struct ibmvnic_sub_crq_queue *scrq; 2496 int rc; 2497 2498 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 2499 if (!scrq) 2500 return NULL; 2501 2502 scrq->msgs = 2503 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 2504 if (!scrq->msgs) { 2505 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 2506 goto zero_page_failed; 2507 } 2508 2509 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 2510 DMA_BIDIRECTIONAL); 2511 if (dma_mapping_error(dev, scrq->msg_token)) { 2512 dev_warn(dev, "Couldn't map crq queue messages page\n"); 2513 goto map_failed; 2514 } 2515 2516 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 2517 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 2518 2519 if (rc == H_RESOURCE) 2520 rc = ibmvnic_reset_crq(adapter); 2521 2522 if (rc == H_CLOSED) { 2523 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 2524 } else if (rc) { 2525 dev_warn(dev, "Error %d registering sub-crq\n", rc); 2526 goto reg_failed; 2527 } 2528 2529 scrq->adapter = adapter; 2530 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 2531 spin_lock_init(&scrq->lock); 2532 2533 netdev_dbg(adapter->netdev, 2534 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 2535 scrq->crq_num, scrq->hw_irq, scrq->irq); 2536 2537 return scrq; 2538 2539 reg_failed: 2540 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 2541 DMA_BIDIRECTIONAL); 2542 map_failed: 2543 free_pages((unsigned long)scrq->msgs, 2); 2544 zero_page_failed: 2545 kfree(scrq); 2546 2547 return NULL; 2548 } 2549 2550 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) 2551 { 2552 int i; 2553 2554 if (adapter->tx_scrq) { 2555 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { 2556 if (!adapter->tx_scrq[i]) 2557 continue; 2558 2559 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 2560 i); 2561 if (adapter->tx_scrq[i]->irq) { 2562 free_irq(adapter->tx_scrq[i]->irq, 2563 adapter->tx_scrq[i]); 2564 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 2565 adapter->tx_scrq[i]->irq = 0; 2566 } 2567 2568 release_sub_crq_queue(adapter, adapter->tx_scrq[i], 2569 do_h_free); 2570 } 2571 2572 kfree(adapter->tx_scrq); 2573 adapter->tx_scrq = NULL; 2574 adapter->num_active_tx_scrqs = 0; 2575 } 2576 2577 if (adapter->rx_scrq) { 2578 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { 2579 if (!adapter->rx_scrq[i]) 2580 continue; 2581 2582 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 2583 i); 2584 if (adapter->rx_scrq[i]->irq) { 2585 free_irq(adapter->rx_scrq[i]->irq, 2586 adapter->rx_scrq[i]); 2587 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 2588 adapter->rx_scrq[i]->irq = 0; 2589 } 2590 2591 release_sub_crq_queue(adapter, adapter->rx_scrq[i], 2592 do_h_free); 2593 } 2594 2595 kfree(adapter->rx_scrq); 2596 adapter->rx_scrq = NULL; 2597 adapter->num_active_rx_scrqs = 0; 2598 } 2599 } 2600 2601 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 2602 struct ibmvnic_sub_crq_queue *scrq) 2603 { 2604 struct device *dev = &adapter->vdev->dev; 2605 unsigned long rc; 2606 2607 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2608 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2609 if (rc) 2610 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 2611 scrq->hw_irq, rc); 2612 return rc; 2613 } 2614 2615 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 2616 struct ibmvnic_sub_crq_queue *scrq) 2617 { 2618 struct device *dev = &adapter->vdev->dev; 2619 unsigned long rc; 2620 u64 val; 2621 2622 if (scrq->hw_irq > 0x100000000ULL) { 2623 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2624 return 1; 2625 } 2626 2627 val = (0xff000000) | scrq->hw_irq; 2628 rc = plpar_hcall_norets(H_EOI, val); 2629 if (rc) 2630 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 2631 val, rc); 2632 2633 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2634 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2635 if (rc) 2636 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 2637 scrq->hw_irq, rc); 2638 return rc; 2639 } 2640 2641 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 2642 struct ibmvnic_sub_crq_queue *scrq) 2643 { 2644 struct device *dev = &adapter->vdev->dev; 2645 struct ibmvnic_tx_pool *tx_pool; 2646 struct ibmvnic_tx_buff *txbuff; 2647 union sub_crq *next; 2648 int index; 2649 int i, j; 2650 u8 *first; 2651 2652 restart_loop: 2653 while (pending_scrq(adapter, scrq)) { 2654 unsigned int pool = scrq->pool_index; 2655 int num_entries = 0; 2656 2657 next = ibmvnic_next_scrq(adapter, scrq); 2658 for (i = 0; i < next->tx_comp.num_comps; i++) { 2659 if (next->tx_comp.rcs[i]) { 2660 dev_err(dev, "tx error %x\n", 2661 next->tx_comp.rcs[i]); 2662 continue; 2663 } 2664 index = be32_to_cpu(next->tx_comp.correlators[i]); 2665 if (index & IBMVNIC_TSO_POOL_MASK) { 2666 tx_pool = &adapter->tso_pool[pool]; 2667 index &= ~IBMVNIC_TSO_POOL_MASK; 2668 } else { 2669 tx_pool = &adapter->tx_pool[pool]; 2670 } 2671 2672 txbuff = &tx_pool->tx_buff[index]; 2673 2674 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { 2675 if (!txbuff->data_dma[j]) 2676 continue; 2677 2678 txbuff->data_dma[j] = 0; 2679 } 2680 /* if sub_crq was sent indirectly */ 2681 first = &txbuff->indir_arr[0].generic.first; 2682 if (*first == IBMVNIC_CRQ_CMD) { 2683 dma_unmap_single(dev, txbuff->indir_dma, 2684 sizeof(txbuff->indir_arr), 2685 DMA_TO_DEVICE); 2686 *first = 0; 2687 } 2688 2689 if (txbuff->last_frag) { 2690 dev_kfree_skb_any(txbuff->skb); 2691 txbuff->skb = NULL; 2692 } 2693 2694 num_entries += txbuff->num_entries; 2695 2696 tx_pool->free_map[tx_pool->producer_index] = index; 2697 tx_pool->producer_index = 2698 (tx_pool->producer_index + 1) % 2699 tx_pool->num_buffers; 2700 } 2701 /* remove tx_comp scrq*/ 2702 next->tx_comp.first = 0; 2703 2704 if (atomic_sub_return(num_entries, &scrq->used) <= 2705 (adapter->req_tx_entries_per_subcrq / 2) && 2706 __netif_subqueue_stopped(adapter->netdev, 2707 scrq->pool_index)) { 2708 netif_wake_subqueue(adapter->netdev, scrq->pool_index); 2709 netdev_dbg(adapter->netdev, "Started queue %d\n", 2710 scrq->pool_index); 2711 } 2712 } 2713 2714 enable_scrq_irq(adapter, scrq); 2715 2716 if (pending_scrq(adapter, scrq)) { 2717 disable_scrq_irq(adapter, scrq); 2718 goto restart_loop; 2719 } 2720 2721 return 0; 2722 } 2723 2724 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 2725 { 2726 struct ibmvnic_sub_crq_queue *scrq = instance; 2727 struct ibmvnic_adapter *adapter = scrq->adapter; 2728 2729 disable_scrq_irq(adapter, scrq); 2730 ibmvnic_complete_tx(adapter, scrq); 2731 2732 return IRQ_HANDLED; 2733 } 2734 2735 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 2736 { 2737 struct ibmvnic_sub_crq_queue *scrq = instance; 2738 struct ibmvnic_adapter *adapter = scrq->adapter; 2739 2740 /* When booting a kdump kernel we can hit pending interrupts 2741 * prior to completing driver initialization. 2742 */ 2743 if (unlikely(adapter->state != VNIC_OPEN)) 2744 return IRQ_NONE; 2745 2746 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 2747 2748 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 2749 disable_scrq_irq(adapter, scrq); 2750 __napi_schedule(&adapter->napi[scrq->scrq_num]); 2751 } 2752 2753 return IRQ_HANDLED; 2754 } 2755 2756 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 2757 { 2758 struct device *dev = &adapter->vdev->dev; 2759 struct ibmvnic_sub_crq_queue *scrq; 2760 int i = 0, j = 0; 2761 int rc = 0; 2762 2763 for (i = 0; i < adapter->req_tx_queues; i++) { 2764 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 2765 i); 2766 scrq = adapter->tx_scrq[i]; 2767 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 2768 2769 if (!scrq->irq) { 2770 rc = -EINVAL; 2771 dev_err(dev, "Error mapping irq\n"); 2772 goto req_tx_irq_failed; 2773 } 2774 2775 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 2776 0, "ibmvnic_tx", scrq); 2777 2778 if (rc) { 2779 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 2780 scrq->irq, rc); 2781 irq_dispose_mapping(scrq->irq); 2782 goto req_tx_irq_failed; 2783 } 2784 } 2785 2786 for (i = 0; i < adapter->req_rx_queues; i++) { 2787 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 2788 i); 2789 scrq = adapter->rx_scrq[i]; 2790 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 2791 if (!scrq->irq) { 2792 rc = -EINVAL; 2793 dev_err(dev, "Error mapping irq\n"); 2794 goto req_rx_irq_failed; 2795 } 2796 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 2797 0, "ibmvnic_rx", scrq); 2798 if (rc) { 2799 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 2800 scrq->irq, rc); 2801 irq_dispose_mapping(scrq->irq); 2802 goto req_rx_irq_failed; 2803 } 2804 } 2805 return rc; 2806 2807 req_rx_irq_failed: 2808 for (j = 0; j < i; j++) { 2809 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 2810 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 2811 } 2812 i = adapter->req_tx_queues; 2813 req_tx_irq_failed: 2814 for (j = 0; j < i; j++) { 2815 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 2816 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 2817 } 2818 release_sub_crqs(adapter, 1); 2819 return rc; 2820 } 2821 2822 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 2823 { 2824 struct device *dev = &adapter->vdev->dev; 2825 struct ibmvnic_sub_crq_queue **allqueues; 2826 int registered_queues = 0; 2827 int total_queues; 2828 int more = 0; 2829 int i; 2830 2831 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 2832 2833 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 2834 if (!allqueues) 2835 return -1; 2836 2837 for (i = 0; i < total_queues; i++) { 2838 allqueues[i] = init_sub_crq_queue(adapter); 2839 if (!allqueues[i]) { 2840 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 2841 break; 2842 } 2843 registered_queues++; 2844 } 2845 2846 /* Make sure we were able to register the minimum number of queues */ 2847 if (registered_queues < 2848 adapter->min_tx_queues + adapter->min_rx_queues) { 2849 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 2850 goto tx_failed; 2851 } 2852 2853 /* Distribute the failed allocated queues*/ 2854 for (i = 0; i < total_queues - registered_queues + more ; i++) { 2855 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 2856 switch (i % 3) { 2857 case 0: 2858 if (adapter->req_rx_queues > adapter->min_rx_queues) 2859 adapter->req_rx_queues--; 2860 else 2861 more++; 2862 break; 2863 case 1: 2864 if (adapter->req_tx_queues > adapter->min_tx_queues) 2865 adapter->req_tx_queues--; 2866 else 2867 more++; 2868 break; 2869 } 2870 } 2871 2872 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 2873 sizeof(*adapter->tx_scrq), GFP_KERNEL); 2874 if (!adapter->tx_scrq) 2875 goto tx_failed; 2876 2877 for (i = 0; i < adapter->req_tx_queues; i++) { 2878 adapter->tx_scrq[i] = allqueues[i]; 2879 adapter->tx_scrq[i]->pool_index = i; 2880 adapter->num_active_tx_scrqs++; 2881 } 2882 2883 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 2884 sizeof(*adapter->rx_scrq), GFP_KERNEL); 2885 if (!adapter->rx_scrq) 2886 goto rx_failed; 2887 2888 for (i = 0; i < adapter->req_rx_queues; i++) { 2889 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 2890 adapter->rx_scrq[i]->scrq_num = i; 2891 adapter->num_active_rx_scrqs++; 2892 } 2893 2894 kfree(allqueues); 2895 return 0; 2896 2897 rx_failed: 2898 kfree(adapter->tx_scrq); 2899 adapter->tx_scrq = NULL; 2900 tx_failed: 2901 for (i = 0; i < registered_queues; i++) 2902 release_sub_crq_queue(adapter, allqueues[i], 1); 2903 kfree(allqueues); 2904 return -1; 2905 } 2906 2907 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry) 2908 { 2909 struct device *dev = &adapter->vdev->dev; 2910 union ibmvnic_crq crq; 2911 int max_entries; 2912 2913 if (!retry) { 2914 /* Sub-CRQ entries are 32 byte long */ 2915 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 2916 2917 if (adapter->min_tx_entries_per_subcrq > entries_page || 2918 adapter->min_rx_add_entries_per_subcrq > entries_page) { 2919 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 2920 return; 2921 } 2922 2923 if (adapter->desired.mtu) 2924 adapter->req_mtu = adapter->desired.mtu; 2925 else 2926 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 2927 2928 if (!adapter->desired.tx_entries) 2929 adapter->desired.tx_entries = 2930 adapter->max_tx_entries_per_subcrq; 2931 if (!adapter->desired.rx_entries) 2932 adapter->desired.rx_entries = 2933 adapter->max_rx_add_entries_per_subcrq; 2934 2935 max_entries = IBMVNIC_MAX_LTB_SIZE / 2936 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 2937 2938 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 2939 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { 2940 adapter->desired.tx_entries = max_entries; 2941 } 2942 2943 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 2944 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { 2945 adapter->desired.rx_entries = max_entries; 2946 } 2947 2948 if (adapter->desired.tx_entries) 2949 adapter->req_tx_entries_per_subcrq = 2950 adapter->desired.tx_entries; 2951 else 2952 adapter->req_tx_entries_per_subcrq = 2953 adapter->max_tx_entries_per_subcrq; 2954 2955 if (adapter->desired.rx_entries) 2956 adapter->req_rx_add_entries_per_subcrq = 2957 adapter->desired.rx_entries; 2958 else 2959 adapter->req_rx_add_entries_per_subcrq = 2960 adapter->max_rx_add_entries_per_subcrq; 2961 2962 if (adapter->desired.tx_queues) 2963 adapter->req_tx_queues = 2964 adapter->desired.tx_queues; 2965 else 2966 adapter->req_tx_queues = 2967 adapter->opt_tx_comp_sub_queues; 2968 2969 if (adapter->desired.rx_queues) 2970 adapter->req_rx_queues = 2971 adapter->desired.rx_queues; 2972 else 2973 adapter->req_rx_queues = 2974 adapter->opt_rx_comp_queues; 2975 2976 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 2977 } 2978 2979 memset(&crq, 0, sizeof(crq)); 2980 crq.request_capability.first = IBMVNIC_CRQ_CMD; 2981 crq.request_capability.cmd = REQUEST_CAPABILITY; 2982 2983 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 2984 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 2985 atomic_inc(&adapter->running_cap_crqs); 2986 ibmvnic_send_crq(adapter, &crq); 2987 2988 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 2989 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 2990 atomic_inc(&adapter->running_cap_crqs); 2991 ibmvnic_send_crq(adapter, &crq); 2992 2993 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 2994 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 2995 atomic_inc(&adapter->running_cap_crqs); 2996 ibmvnic_send_crq(adapter, &crq); 2997 2998 crq.request_capability.capability = 2999 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 3000 crq.request_capability.number = 3001 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 3002 atomic_inc(&adapter->running_cap_crqs); 3003 ibmvnic_send_crq(adapter, &crq); 3004 3005 crq.request_capability.capability = 3006 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 3007 crq.request_capability.number = 3008 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 3009 atomic_inc(&adapter->running_cap_crqs); 3010 ibmvnic_send_crq(adapter, &crq); 3011 3012 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 3013 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 3014 atomic_inc(&adapter->running_cap_crqs); 3015 ibmvnic_send_crq(adapter, &crq); 3016 3017 if (adapter->netdev->flags & IFF_PROMISC) { 3018 if (adapter->promisc_supported) { 3019 crq.request_capability.capability = 3020 cpu_to_be16(PROMISC_REQUESTED); 3021 crq.request_capability.number = cpu_to_be64(1); 3022 atomic_inc(&adapter->running_cap_crqs); 3023 ibmvnic_send_crq(adapter, &crq); 3024 } 3025 } else { 3026 crq.request_capability.capability = 3027 cpu_to_be16(PROMISC_REQUESTED); 3028 crq.request_capability.number = cpu_to_be64(0); 3029 atomic_inc(&adapter->running_cap_crqs); 3030 ibmvnic_send_crq(adapter, &crq); 3031 } 3032 } 3033 3034 static int pending_scrq(struct ibmvnic_adapter *adapter, 3035 struct ibmvnic_sub_crq_queue *scrq) 3036 { 3037 union sub_crq *entry = &scrq->msgs[scrq->cur]; 3038 3039 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) 3040 return 1; 3041 else 3042 return 0; 3043 } 3044 3045 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 3046 struct ibmvnic_sub_crq_queue *scrq) 3047 { 3048 union sub_crq *entry; 3049 unsigned long flags; 3050 3051 spin_lock_irqsave(&scrq->lock, flags); 3052 entry = &scrq->msgs[scrq->cur]; 3053 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 3054 if (++scrq->cur == scrq->size) 3055 scrq->cur = 0; 3056 } else { 3057 entry = NULL; 3058 } 3059 spin_unlock_irqrestore(&scrq->lock, flags); 3060 3061 return entry; 3062 } 3063 3064 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 3065 { 3066 struct ibmvnic_crq_queue *queue = &adapter->crq; 3067 union ibmvnic_crq *crq; 3068 3069 crq = &queue->msgs[queue->cur]; 3070 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 3071 if (++queue->cur == queue->size) 3072 queue->cur = 0; 3073 } else { 3074 crq = NULL; 3075 } 3076 3077 return crq; 3078 } 3079 3080 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 3081 union sub_crq *sub_crq) 3082 { 3083 unsigned int ua = adapter->vdev->unit_address; 3084 struct device *dev = &adapter->vdev->dev; 3085 u64 *u64_crq = (u64 *)sub_crq; 3086 int rc; 3087 3088 netdev_dbg(adapter->netdev, 3089 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n", 3090 (unsigned long int)cpu_to_be64(remote_handle), 3091 (unsigned long int)cpu_to_be64(u64_crq[0]), 3092 (unsigned long int)cpu_to_be64(u64_crq[1]), 3093 (unsigned long int)cpu_to_be64(u64_crq[2]), 3094 (unsigned long int)cpu_to_be64(u64_crq[3])); 3095 3096 /* Make sure the hypervisor sees the complete request */ 3097 mb(); 3098 3099 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, 3100 cpu_to_be64(remote_handle), 3101 cpu_to_be64(u64_crq[0]), 3102 cpu_to_be64(u64_crq[1]), 3103 cpu_to_be64(u64_crq[2]), 3104 cpu_to_be64(u64_crq[3])); 3105 3106 if (rc) { 3107 if (rc == H_CLOSED) 3108 dev_warn(dev, "CRQ Queue closed\n"); 3109 dev_err(dev, "Send error (rc=%d)\n", rc); 3110 } 3111 3112 return rc; 3113 } 3114 3115 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 3116 u64 remote_handle, u64 ioba, u64 num_entries) 3117 { 3118 unsigned int ua = adapter->vdev->unit_address; 3119 struct device *dev = &adapter->vdev->dev; 3120 int rc; 3121 3122 /* Make sure the hypervisor sees the complete request */ 3123 mb(); 3124 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 3125 cpu_to_be64(remote_handle), 3126 ioba, num_entries); 3127 3128 if (rc) { 3129 if (rc == H_CLOSED) 3130 dev_warn(dev, "CRQ Queue closed\n"); 3131 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc); 3132 } 3133 3134 return rc; 3135 } 3136 3137 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 3138 union ibmvnic_crq *crq) 3139 { 3140 unsigned int ua = adapter->vdev->unit_address; 3141 struct device *dev = &adapter->vdev->dev; 3142 u64 *u64_crq = (u64 *)crq; 3143 int rc; 3144 3145 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 3146 (unsigned long int)cpu_to_be64(u64_crq[0]), 3147 (unsigned long int)cpu_to_be64(u64_crq[1])); 3148 3149 /* Make sure the hypervisor sees the complete request */ 3150 mb(); 3151 3152 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 3153 cpu_to_be64(u64_crq[0]), 3154 cpu_to_be64(u64_crq[1])); 3155 3156 if (rc) { 3157 if (rc == H_CLOSED) { 3158 dev_warn(dev, "CRQ Queue closed\n"); 3159 if (adapter->resetting) 3160 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 3161 } 3162 3163 dev_warn(dev, "Send error (rc=%d)\n", rc); 3164 } 3165 3166 return rc; 3167 } 3168 3169 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 3170 { 3171 union ibmvnic_crq crq; 3172 3173 memset(&crq, 0, sizeof(crq)); 3174 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 3175 crq.generic.cmd = IBMVNIC_CRQ_INIT; 3176 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 3177 3178 return ibmvnic_send_crq(adapter, &crq); 3179 } 3180 3181 static int send_version_xchg(struct ibmvnic_adapter *adapter) 3182 { 3183 union ibmvnic_crq crq; 3184 3185 memset(&crq, 0, sizeof(crq)); 3186 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 3187 crq.version_exchange.cmd = VERSION_EXCHANGE; 3188 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 3189 3190 return ibmvnic_send_crq(adapter, &crq); 3191 } 3192 3193 struct vnic_login_client_data { 3194 u8 type; 3195 __be16 len; 3196 char name[]; 3197 } __packed; 3198 3199 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 3200 { 3201 int len; 3202 3203 /* Calculate the amount of buffer space needed for the 3204 * vnic client data in the login buffer. There are four entries, 3205 * OS name, LPAR name, device name, and a null last entry. 3206 */ 3207 len = 4 * sizeof(struct vnic_login_client_data); 3208 len += 6; /* "Linux" plus NULL */ 3209 len += strlen(utsname()->nodename) + 1; 3210 len += strlen(adapter->netdev->name) + 1; 3211 3212 return len; 3213 } 3214 3215 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 3216 struct vnic_login_client_data *vlcd) 3217 { 3218 const char *os_name = "Linux"; 3219 int len; 3220 3221 /* Type 1 - LPAR OS */ 3222 vlcd->type = 1; 3223 len = strlen(os_name) + 1; 3224 vlcd->len = cpu_to_be16(len); 3225 strncpy(vlcd->name, os_name, len); 3226 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3227 3228 /* Type 2 - LPAR name */ 3229 vlcd->type = 2; 3230 len = strlen(utsname()->nodename) + 1; 3231 vlcd->len = cpu_to_be16(len); 3232 strncpy(vlcd->name, utsname()->nodename, len); 3233 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3234 3235 /* Type 3 - device name */ 3236 vlcd->type = 3; 3237 len = strlen(adapter->netdev->name) + 1; 3238 vlcd->len = cpu_to_be16(len); 3239 strncpy(vlcd->name, adapter->netdev->name, len); 3240 } 3241 3242 static int send_login(struct ibmvnic_adapter *adapter) 3243 { 3244 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 3245 struct ibmvnic_login_buffer *login_buffer; 3246 struct device *dev = &adapter->vdev->dev; 3247 dma_addr_t rsp_buffer_token; 3248 dma_addr_t buffer_token; 3249 size_t rsp_buffer_size; 3250 union ibmvnic_crq crq; 3251 size_t buffer_size; 3252 __be64 *tx_list_p; 3253 __be64 *rx_list_p; 3254 int client_data_len; 3255 struct vnic_login_client_data *vlcd; 3256 int i; 3257 3258 if (!adapter->tx_scrq || !adapter->rx_scrq) { 3259 netdev_err(adapter->netdev, 3260 "RX or TX queues are not allocated, device login failed\n"); 3261 return -1; 3262 } 3263 3264 release_login_rsp_buffer(adapter); 3265 client_data_len = vnic_client_data_len(adapter); 3266 3267 buffer_size = 3268 sizeof(struct ibmvnic_login_buffer) + 3269 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 3270 client_data_len; 3271 3272 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 3273 if (!login_buffer) 3274 goto buf_alloc_failed; 3275 3276 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 3277 DMA_TO_DEVICE); 3278 if (dma_mapping_error(dev, buffer_token)) { 3279 dev_err(dev, "Couldn't map login buffer\n"); 3280 goto buf_map_failed; 3281 } 3282 3283 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 3284 sizeof(u64) * adapter->req_tx_queues + 3285 sizeof(u64) * adapter->req_rx_queues + 3286 sizeof(u64) * adapter->req_rx_queues + 3287 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 3288 3289 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 3290 if (!login_rsp_buffer) 3291 goto buf_rsp_alloc_failed; 3292 3293 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 3294 rsp_buffer_size, DMA_FROM_DEVICE); 3295 if (dma_mapping_error(dev, rsp_buffer_token)) { 3296 dev_err(dev, "Couldn't map login rsp buffer\n"); 3297 goto buf_rsp_map_failed; 3298 } 3299 3300 adapter->login_buf = login_buffer; 3301 adapter->login_buf_token = buffer_token; 3302 adapter->login_buf_sz = buffer_size; 3303 adapter->login_rsp_buf = login_rsp_buffer; 3304 adapter->login_rsp_buf_token = rsp_buffer_token; 3305 adapter->login_rsp_buf_sz = rsp_buffer_size; 3306 3307 login_buffer->len = cpu_to_be32(buffer_size); 3308 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 3309 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 3310 login_buffer->off_txcomp_subcrqs = 3311 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 3312 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 3313 login_buffer->off_rxcomp_subcrqs = 3314 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 3315 sizeof(u64) * adapter->req_tx_queues); 3316 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 3317 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 3318 3319 tx_list_p = (__be64 *)((char *)login_buffer + 3320 sizeof(struct ibmvnic_login_buffer)); 3321 rx_list_p = (__be64 *)((char *)login_buffer + 3322 sizeof(struct ibmvnic_login_buffer) + 3323 sizeof(u64) * adapter->req_tx_queues); 3324 3325 for (i = 0; i < adapter->req_tx_queues; i++) { 3326 if (adapter->tx_scrq[i]) { 3327 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]-> 3328 crq_num); 3329 } 3330 } 3331 3332 for (i = 0; i < adapter->req_rx_queues; i++) { 3333 if (adapter->rx_scrq[i]) { 3334 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]-> 3335 crq_num); 3336 } 3337 } 3338 3339 /* Insert vNIC login client data */ 3340 vlcd = (struct vnic_login_client_data *) 3341 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 3342 login_buffer->client_data_offset = 3343 cpu_to_be32((char *)vlcd - (char *)login_buffer); 3344 login_buffer->client_data_len = cpu_to_be32(client_data_len); 3345 3346 vnic_add_client_data(adapter, vlcd); 3347 3348 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 3349 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 3350 netdev_dbg(adapter->netdev, "%016lx\n", 3351 ((unsigned long int *)(adapter->login_buf))[i]); 3352 } 3353 3354 memset(&crq, 0, sizeof(crq)); 3355 crq.login.first = IBMVNIC_CRQ_CMD; 3356 crq.login.cmd = LOGIN; 3357 crq.login.ioba = cpu_to_be32(buffer_token); 3358 crq.login.len = cpu_to_be32(buffer_size); 3359 ibmvnic_send_crq(adapter, &crq); 3360 3361 return 0; 3362 3363 buf_rsp_map_failed: 3364 kfree(login_rsp_buffer); 3365 buf_rsp_alloc_failed: 3366 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 3367 buf_map_failed: 3368 kfree(login_buffer); 3369 buf_alloc_failed: 3370 return -1; 3371 } 3372 3373 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 3374 u32 len, u8 map_id) 3375 { 3376 union ibmvnic_crq crq; 3377 3378 memset(&crq, 0, sizeof(crq)); 3379 crq.request_map.first = IBMVNIC_CRQ_CMD; 3380 crq.request_map.cmd = REQUEST_MAP; 3381 crq.request_map.map_id = map_id; 3382 crq.request_map.ioba = cpu_to_be32(addr); 3383 crq.request_map.len = cpu_to_be32(len); 3384 ibmvnic_send_crq(adapter, &crq); 3385 } 3386 3387 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 3388 { 3389 union ibmvnic_crq crq; 3390 3391 memset(&crq, 0, sizeof(crq)); 3392 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 3393 crq.request_unmap.cmd = REQUEST_UNMAP; 3394 crq.request_unmap.map_id = map_id; 3395 ibmvnic_send_crq(adapter, &crq); 3396 } 3397 3398 static void send_map_query(struct ibmvnic_adapter *adapter) 3399 { 3400 union ibmvnic_crq crq; 3401 3402 memset(&crq, 0, sizeof(crq)); 3403 crq.query_map.first = IBMVNIC_CRQ_CMD; 3404 crq.query_map.cmd = QUERY_MAP; 3405 ibmvnic_send_crq(adapter, &crq); 3406 } 3407 3408 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 3409 static void send_cap_queries(struct ibmvnic_adapter *adapter) 3410 { 3411 union ibmvnic_crq crq; 3412 3413 atomic_set(&adapter->running_cap_crqs, 0); 3414 memset(&crq, 0, sizeof(crq)); 3415 crq.query_capability.first = IBMVNIC_CRQ_CMD; 3416 crq.query_capability.cmd = QUERY_CAPABILITY; 3417 3418 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 3419 atomic_inc(&adapter->running_cap_crqs); 3420 ibmvnic_send_crq(adapter, &crq); 3421 3422 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 3423 atomic_inc(&adapter->running_cap_crqs); 3424 ibmvnic_send_crq(adapter, &crq); 3425 3426 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 3427 atomic_inc(&adapter->running_cap_crqs); 3428 ibmvnic_send_crq(adapter, &crq); 3429 3430 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 3431 atomic_inc(&adapter->running_cap_crqs); 3432 ibmvnic_send_crq(adapter, &crq); 3433 3434 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 3435 atomic_inc(&adapter->running_cap_crqs); 3436 ibmvnic_send_crq(adapter, &crq); 3437 3438 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 3439 atomic_inc(&adapter->running_cap_crqs); 3440 ibmvnic_send_crq(adapter, &crq); 3441 3442 crq.query_capability.capability = 3443 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 3444 atomic_inc(&adapter->running_cap_crqs); 3445 ibmvnic_send_crq(adapter, &crq); 3446 3447 crq.query_capability.capability = 3448 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 3449 atomic_inc(&adapter->running_cap_crqs); 3450 ibmvnic_send_crq(adapter, &crq); 3451 3452 crq.query_capability.capability = 3453 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 3454 atomic_inc(&adapter->running_cap_crqs); 3455 ibmvnic_send_crq(adapter, &crq); 3456 3457 crq.query_capability.capability = 3458 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 3459 atomic_inc(&adapter->running_cap_crqs); 3460 ibmvnic_send_crq(adapter, &crq); 3461 3462 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 3463 atomic_inc(&adapter->running_cap_crqs); 3464 ibmvnic_send_crq(adapter, &crq); 3465 3466 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 3467 atomic_inc(&adapter->running_cap_crqs); 3468 ibmvnic_send_crq(adapter, &crq); 3469 3470 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 3471 atomic_inc(&adapter->running_cap_crqs); 3472 ibmvnic_send_crq(adapter, &crq); 3473 3474 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 3475 atomic_inc(&adapter->running_cap_crqs); 3476 ibmvnic_send_crq(adapter, &crq); 3477 3478 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 3479 atomic_inc(&adapter->running_cap_crqs); 3480 ibmvnic_send_crq(adapter, &crq); 3481 3482 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 3483 atomic_inc(&adapter->running_cap_crqs); 3484 ibmvnic_send_crq(adapter, &crq); 3485 3486 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 3487 atomic_inc(&adapter->running_cap_crqs); 3488 ibmvnic_send_crq(adapter, &crq); 3489 3490 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 3491 atomic_inc(&adapter->running_cap_crqs); 3492 ibmvnic_send_crq(adapter, &crq); 3493 3494 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 3495 atomic_inc(&adapter->running_cap_crqs); 3496 ibmvnic_send_crq(adapter, &crq); 3497 3498 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 3499 atomic_inc(&adapter->running_cap_crqs); 3500 ibmvnic_send_crq(adapter, &crq); 3501 3502 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 3503 atomic_inc(&adapter->running_cap_crqs); 3504 ibmvnic_send_crq(adapter, &crq); 3505 3506 crq.query_capability.capability = 3507 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 3508 atomic_inc(&adapter->running_cap_crqs); 3509 ibmvnic_send_crq(adapter, &crq); 3510 3511 crq.query_capability.capability = 3512 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 3513 atomic_inc(&adapter->running_cap_crqs); 3514 ibmvnic_send_crq(adapter, &crq); 3515 3516 crq.query_capability.capability = 3517 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 3518 atomic_inc(&adapter->running_cap_crqs); 3519 ibmvnic_send_crq(adapter, &crq); 3520 3521 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 3522 atomic_inc(&adapter->running_cap_crqs); 3523 ibmvnic_send_crq(adapter, &crq); 3524 } 3525 3526 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 3527 struct ibmvnic_adapter *adapter) 3528 { 3529 struct device *dev = &adapter->vdev->dev; 3530 3531 if (crq->get_vpd_size_rsp.rc.code) { 3532 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 3533 crq->get_vpd_size_rsp.rc.code); 3534 complete(&adapter->fw_done); 3535 return; 3536 } 3537 3538 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 3539 complete(&adapter->fw_done); 3540 } 3541 3542 static void handle_vpd_rsp(union ibmvnic_crq *crq, 3543 struct ibmvnic_adapter *adapter) 3544 { 3545 struct device *dev = &adapter->vdev->dev; 3546 unsigned char *substr = NULL; 3547 u8 fw_level_len = 0; 3548 3549 memset(adapter->fw_version, 0, 32); 3550 3551 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 3552 DMA_FROM_DEVICE); 3553 3554 if (crq->get_vpd_rsp.rc.code) { 3555 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 3556 crq->get_vpd_rsp.rc.code); 3557 goto complete; 3558 } 3559 3560 /* get the position of the firmware version info 3561 * located after the ASCII 'RM' substring in the buffer 3562 */ 3563 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 3564 if (!substr) { 3565 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 3566 goto complete; 3567 } 3568 3569 /* get length of firmware level ASCII substring */ 3570 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 3571 fw_level_len = *(substr + 2); 3572 } else { 3573 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 3574 goto complete; 3575 } 3576 3577 /* copy firmware version string from vpd into adapter */ 3578 if ((substr + 3 + fw_level_len) < 3579 (adapter->vpd->buff + adapter->vpd->len)) { 3580 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); 3581 } else { 3582 dev_info(dev, "FW substr extrapolated VPD buff\n"); 3583 } 3584 3585 complete: 3586 if (adapter->fw_version[0] == '\0') 3587 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char)); 3588 complete(&adapter->fw_done); 3589 } 3590 3591 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 3592 { 3593 struct device *dev = &adapter->vdev->dev; 3594 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 3595 union ibmvnic_crq crq; 3596 int i; 3597 3598 dma_unmap_single(dev, adapter->ip_offload_tok, 3599 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 3600 3601 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 3602 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 3603 netdev_dbg(adapter->netdev, "%016lx\n", 3604 ((unsigned long int *)(buf))[i]); 3605 3606 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 3607 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 3608 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 3609 buf->tcp_ipv4_chksum); 3610 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 3611 buf->tcp_ipv6_chksum); 3612 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 3613 buf->udp_ipv4_chksum); 3614 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 3615 buf->udp_ipv6_chksum); 3616 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 3617 buf->large_tx_ipv4); 3618 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 3619 buf->large_tx_ipv6); 3620 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 3621 buf->large_rx_ipv4); 3622 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 3623 buf->large_rx_ipv6); 3624 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 3625 buf->max_ipv4_header_size); 3626 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 3627 buf->max_ipv6_header_size); 3628 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 3629 buf->max_tcp_header_size); 3630 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 3631 buf->max_udp_header_size); 3632 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 3633 buf->max_large_tx_size); 3634 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 3635 buf->max_large_rx_size); 3636 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 3637 buf->ipv6_extension_header); 3638 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 3639 buf->tcp_pseudosum_req); 3640 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 3641 buf->num_ipv6_ext_headers); 3642 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 3643 buf->off_ipv6_ext_headers); 3644 3645 adapter->ip_offload_ctrl_tok = 3646 dma_map_single(dev, &adapter->ip_offload_ctrl, 3647 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE); 3648 3649 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 3650 dev_err(dev, "Couldn't map ip offload control buffer\n"); 3651 return; 3652 } 3653 3654 adapter->ip_offload_ctrl.len = 3655 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 3656 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); 3657 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum; 3658 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum; 3659 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 3660 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; 3661 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 3662 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum; 3663 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4; 3664 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6; 3665 3666 /* large_rx disabled for now, additional features needed */ 3667 adapter->ip_offload_ctrl.large_rx_ipv4 = 0; 3668 adapter->ip_offload_ctrl.large_rx_ipv6 = 0; 3669 3670 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; 3671 3672 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 3673 adapter->netdev->features |= NETIF_F_IP_CSUM; 3674 3675 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 3676 adapter->netdev->features |= NETIF_F_IPV6_CSUM; 3677 3678 if ((adapter->netdev->features & 3679 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 3680 adapter->netdev->features |= NETIF_F_RXCSUM; 3681 3682 if (buf->large_tx_ipv4) 3683 adapter->netdev->features |= NETIF_F_TSO; 3684 if (buf->large_tx_ipv6) 3685 adapter->netdev->features |= NETIF_F_TSO6; 3686 3687 adapter->netdev->hw_features |= adapter->netdev->features; 3688 3689 memset(&crq, 0, sizeof(crq)); 3690 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 3691 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 3692 crq.control_ip_offload.len = 3693 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 3694 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 3695 ibmvnic_send_crq(adapter, &crq); 3696 } 3697 3698 static void handle_error_info_rsp(union ibmvnic_crq *crq, 3699 struct ibmvnic_adapter *adapter) 3700 { 3701 struct device *dev = &adapter->vdev->dev; 3702 struct ibmvnic_error_buff *error_buff, *tmp; 3703 unsigned long flags; 3704 bool found = false; 3705 int i; 3706 3707 if (!crq->request_error_rsp.rc.code) { 3708 dev_info(dev, "Request Error Rsp returned with rc=%x\n", 3709 crq->request_error_rsp.rc.code); 3710 return; 3711 } 3712 3713 spin_lock_irqsave(&adapter->error_list_lock, flags); 3714 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) 3715 if (error_buff->error_id == crq->request_error_rsp.error_id) { 3716 found = true; 3717 list_del(&error_buff->list); 3718 break; 3719 } 3720 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 3721 3722 if (!found) { 3723 dev_err(dev, "Couldn't find error id %x\n", 3724 be32_to_cpu(crq->request_error_rsp.error_id)); 3725 return; 3726 } 3727 3728 dev_err(dev, "Detailed info for error id %x:", 3729 be32_to_cpu(crq->request_error_rsp.error_id)); 3730 3731 for (i = 0; i < error_buff->len; i++) { 3732 pr_cont("%02x", (int)error_buff->buff[i]); 3733 if (i % 8 == 7) 3734 pr_cont(" "); 3735 } 3736 pr_cont("\n"); 3737 3738 dma_unmap_single(dev, error_buff->dma, error_buff->len, 3739 DMA_FROM_DEVICE); 3740 kfree(error_buff->buff); 3741 kfree(error_buff); 3742 } 3743 3744 static void request_error_information(struct ibmvnic_adapter *adapter, 3745 union ibmvnic_crq *err_crq) 3746 { 3747 struct device *dev = &adapter->vdev->dev; 3748 struct net_device *netdev = adapter->netdev; 3749 struct ibmvnic_error_buff *error_buff; 3750 unsigned long timeout = msecs_to_jiffies(30000); 3751 union ibmvnic_crq crq; 3752 unsigned long flags; 3753 int rc, detail_len; 3754 3755 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); 3756 if (!error_buff) 3757 return; 3758 3759 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz); 3760 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC); 3761 if (!error_buff->buff) { 3762 kfree(error_buff); 3763 return; 3764 } 3765 3766 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len, 3767 DMA_FROM_DEVICE); 3768 if (dma_mapping_error(dev, error_buff->dma)) { 3769 netdev_err(netdev, "Couldn't map error buffer\n"); 3770 kfree(error_buff->buff); 3771 kfree(error_buff); 3772 return; 3773 } 3774 3775 error_buff->len = detail_len; 3776 error_buff->error_id = err_crq->error_indication.error_id; 3777 3778 spin_lock_irqsave(&adapter->error_list_lock, flags); 3779 list_add_tail(&error_buff->list, &adapter->errors); 3780 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 3781 3782 memset(&crq, 0, sizeof(crq)); 3783 crq.request_error_info.first = IBMVNIC_CRQ_CMD; 3784 crq.request_error_info.cmd = REQUEST_ERROR_INFO; 3785 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); 3786 crq.request_error_info.len = cpu_to_be32(detail_len); 3787 crq.request_error_info.error_id = err_crq->error_indication.error_id; 3788 3789 rc = ibmvnic_send_crq(adapter, &crq); 3790 if (rc) { 3791 netdev_err(netdev, "failed to request error information\n"); 3792 goto err_info_fail; 3793 } 3794 3795 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3796 netdev_err(netdev, "timeout waiting for error information\n"); 3797 goto err_info_fail; 3798 } 3799 3800 return; 3801 3802 err_info_fail: 3803 spin_lock_irqsave(&adapter->error_list_lock, flags); 3804 list_del(&error_buff->list); 3805 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 3806 3807 kfree(error_buff->buff); 3808 kfree(error_buff); 3809 } 3810 3811 static void handle_error_indication(union ibmvnic_crq *crq, 3812 struct ibmvnic_adapter *adapter) 3813 { 3814 struct device *dev = &adapter->vdev->dev; 3815 3816 dev_err(dev, "Firmware reports %serror id %x, cause %d\n", 3817 crq->error_indication.flags 3818 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 3819 be32_to_cpu(crq->error_indication.error_id), 3820 be16_to_cpu(crq->error_indication.error_cause)); 3821 3822 if (be32_to_cpu(crq->error_indication.error_id)) 3823 request_error_information(adapter, crq); 3824 3825 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 3826 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 3827 else 3828 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 3829 } 3830 3831 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 3832 struct ibmvnic_adapter *adapter) 3833 { 3834 struct net_device *netdev = adapter->netdev; 3835 struct device *dev = &adapter->vdev->dev; 3836 long rc; 3837 3838 rc = crq->change_mac_addr_rsp.rc.code; 3839 if (rc) { 3840 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 3841 goto out; 3842 } 3843 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], 3844 ETH_ALEN); 3845 out: 3846 complete(&adapter->fw_done); 3847 return rc; 3848 } 3849 3850 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 3851 struct ibmvnic_adapter *adapter) 3852 { 3853 struct device *dev = &adapter->vdev->dev; 3854 u64 *req_value; 3855 char *name; 3856 3857 atomic_dec(&adapter->running_cap_crqs); 3858 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 3859 case REQ_TX_QUEUES: 3860 req_value = &adapter->req_tx_queues; 3861 name = "tx"; 3862 break; 3863 case REQ_RX_QUEUES: 3864 req_value = &adapter->req_rx_queues; 3865 name = "rx"; 3866 break; 3867 case REQ_RX_ADD_QUEUES: 3868 req_value = &adapter->req_rx_add_queues; 3869 name = "rx_add"; 3870 break; 3871 case REQ_TX_ENTRIES_PER_SUBCRQ: 3872 req_value = &adapter->req_tx_entries_per_subcrq; 3873 name = "tx_entries_per_subcrq"; 3874 break; 3875 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 3876 req_value = &adapter->req_rx_add_entries_per_subcrq; 3877 name = "rx_add_entries_per_subcrq"; 3878 break; 3879 case REQ_MTU: 3880 req_value = &adapter->req_mtu; 3881 name = "mtu"; 3882 break; 3883 case PROMISC_REQUESTED: 3884 req_value = &adapter->promisc; 3885 name = "promisc"; 3886 break; 3887 default: 3888 dev_err(dev, "Got invalid cap request rsp %d\n", 3889 crq->request_capability.capability); 3890 return; 3891 } 3892 3893 switch (crq->request_capability_rsp.rc.code) { 3894 case SUCCESS: 3895 break; 3896 case PARTIALSUCCESS: 3897 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 3898 *req_value, 3899 (long int)be64_to_cpu(crq->request_capability_rsp. 3900 number), name); 3901 3902 if (be16_to_cpu(crq->request_capability_rsp.capability) == 3903 REQ_MTU) { 3904 pr_err("mtu of %llu is not supported. Reverting.\n", 3905 *req_value); 3906 *req_value = adapter->fallback.mtu; 3907 } else { 3908 *req_value = 3909 be64_to_cpu(crq->request_capability_rsp.number); 3910 } 3911 3912 ibmvnic_send_req_caps(adapter, 1); 3913 return; 3914 default: 3915 dev_err(dev, "Error %d in request cap rsp\n", 3916 crq->request_capability_rsp.rc.code); 3917 return; 3918 } 3919 3920 /* Done receiving requested capabilities, query IP offload support */ 3921 if (atomic_read(&adapter->running_cap_crqs) == 0) { 3922 union ibmvnic_crq newcrq; 3923 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 3924 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = 3925 &adapter->ip_offload_buf; 3926 3927 adapter->wait_capability = false; 3928 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, 3929 buf_sz, 3930 DMA_FROM_DEVICE); 3931 3932 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 3933 if (!firmware_has_feature(FW_FEATURE_CMO)) 3934 dev_err(dev, "Couldn't map offload buffer\n"); 3935 return; 3936 } 3937 3938 memset(&newcrq, 0, sizeof(newcrq)); 3939 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 3940 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 3941 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz); 3942 newcrq.query_ip_offload.ioba = 3943 cpu_to_be32(adapter->ip_offload_tok); 3944 3945 ibmvnic_send_crq(adapter, &newcrq); 3946 } 3947 } 3948 3949 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 3950 struct ibmvnic_adapter *adapter) 3951 { 3952 struct device *dev = &adapter->vdev->dev; 3953 struct net_device *netdev = adapter->netdev; 3954 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 3955 struct ibmvnic_login_buffer *login = adapter->login_buf; 3956 int i; 3957 3958 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 3959 DMA_TO_DEVICE); 3960 dma_unmap_single(dev, adapter->login_rsp_buf_token, 3961 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); 3962 3963 /* If the number of queues requested can't be allocated by the 3964 * server, the login response will return with code 1. We will need 3965 * to resend the login buffer with fewer queues requested. 3966 */ 3967 if (login_rsp_crq->generic.rc.code) { 3968 adapter->init_done_rc = login_rsp_crq->generic.rc.code; 3969 complete(&adapter->init_done); 3970 return 0; 3971 } 3972 3973 netdev->mtu = adapter->req_mtu - ETH_HLEN; 3974 3975 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 3976 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 3977 netdev_dbg(adapter->netdev, "%016lx\n", 3978 ((unsigned long int *)(adapter->login_rsp_buf))[i]); 3979 } 3980 3981 /* Sanity checks */ 3982 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 3983 (be32_to_cpu(login->num_rxcomp_subcrqs) * 3984 adapter->req_rx_add_queues != 3985 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 3986 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 3987 ibmvnic_remove(adapter->vdev); 3988 return -EIO; 3989 } 3990 release_login_buffer(adapter); 3991 complete(&adapter->init_done); 3992 3993 return 0; 3994 } 3995 3996 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 3997 struct ibmvnic_adapter *adapter) 3998 { 3999 struct device *dev = &adapter->vdev->dev; 4000 long rc; 4001 4002 rc = crq->request_unmap_rsp.rc.code; 4003 if (rc) 4004 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 4005 } 4006 4007 static void handle_query_map_rsp(union ibmvnic_crq *crq, 4008 struct ibmvnic_adapter *adapter) 4009 { 4010 struct net_device *netdev = adapter->netdev; 4011 struct device *dev = &adapter->vdev->dev; 4012 long rc; 4013 4014 rc = crq->query_map_rsp.rc.code; 4015 if (rc) { 4016 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 4017 return; 4018 } 4019 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", 4020 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, 4021 crq->query_map_rsp.free_pages); 4022 } 4023 4024 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 4025 struct ibmvnic_adapter *adapter) 4026 { 4027 struct net_device *netdev = adapter->netdev; 4028 struct device *dev = &adapter->vdev->dev; 4029 long rc; 4030 4031 atomic_dec(&adapter->running_cap_crqs); 4032 netdev_dbg(netdev, "Outstanding queries: %d\n", 4033 atomic_read(&adapter->running_cap_crqs)); 4034 rc = crq->query_capability.rc.code; 4035 if (rc) { 4036 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 4037 goto out; 4038 } 4039 4040 switch (be16_to_cpu(crq->query_capability.capability)) { 4041 case MIN_TX_QUEUES: 4042 adapter->min_tx_queues = 4043 be64_to_cpu(crq->query_capability.number); 4044 netdev_dbg(netdev, "min_tx_queues = %lld\n", 4045 adapter->min_tx_queues); 4046 break; 4047 case MIN_RX_QUEUES: 4048 adapter->min_rx_queues = 4049 be64_to_cpu(crq->query_capability.number); 4050 netdev_dbg(netdev, "min_rx_queues = %lld\n", 4051 adapter->min_rx_queues); 4052 break; 4053 case MIN_RX_ADD_QUEUES: 4054 adapter->min_rx_add_queues = 4055 be64_to_cpu(crq->query_capability.number); 4056 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 4057 adapter->min_rx_add_queues); 4058 break; 4059 case MAX_TX_QUEUES: 4060 adapter->max_tx_queues = 4061 be64_to_cpu(crq->query_capability.number); 4062 netdev_dbg(netdev, "max_tx_queues = %lld\n", 4063 adapter->max_tx_queues); 4064 break; 4065 case MAX_RX_QUEUES: 4066 adapter->max_rx_queues = 4067 be64_to_cpu(crq->query_capability.number); 4068 netdev_dbg(netdev, "max_rx_queues = %lld\n", 4069 adapter->max_rx_queues); 4070 break; 4071 case MAX_RX_ADD_QUEUES: 4072 adapter->max_rx_add_queues = 4073 be64_to_cpu(crq->query_capability.number); 4074 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 4075 adapter->max_rx_add_queues); 4076 break; 4077 case MIN_TX_ENTRIES_PER_SUBCRQ: 4078 adapter->min_tx_entries_per_subcrq = 4079 be64_to_cpu(crq->query_capability.number); 4080 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 4081 adapter->min_tx_entries_per_subcrq); 4082 break; 4083 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 4084 adapter->min_rx_add_entries_per_subcrq = 4085 be64_to_cpu(crq->query_capability.number); 4086 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 4087 adapter->min_rx_add_entries_per_subcrq); 4088 break; 4089 case MAX_TX_ENTRIES_PER_SUBCRQ: 4090 adapter->max_tx_entries_per_subcrq = 4091 be64_to_cpu(crq->query_capability.number); 4092 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 4093 adapter->max_tx_entries_per_subcrq); 4094 break; 4095 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 4096 adapter->max_rx_add_entries_per_subcrq = 4097 be64_to_cpu(crq->query_capability.number); 4098 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 4099 adapter->max_rx_add_entries_per_subcrq); 4100 break; 4101 case TCP_IP_OFFLOAD: 4102 adapter->tcp_ip_offload = 4103 be64_to_cpu(crq->query_capability.number); 4104 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 4105 adapter->tcp_ip_offload); 4106 break; 4107 case PROMISC_SUPPORTED: 4108 adapter->promisc_supported = 4109 be64_to_cpu(crq->query_capability.number); 4110 netdev_dbg(netdev, "promisc_supported = %lld\n", 4111 adapter->promisc_supported); 4112 break; 4113 case MIN_MTU: 4114 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 4115 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4116 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 4117 break; 4118 case MAX_MTU: 4119 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 4120 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4121 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 4122 break; 4123 case MAX_MULTICAST_FILTERS: 4124 adapter->max_multicast_filters = 4125 be64_to_cpu(crq->query_capability.number); 4126 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 4127 adapter->max_multicast_filters); 4128 break; 4129 case VLAN_HEADER_INSERTION: 4130 adapter->vlan_header_insertion = 4131 be64_to_cpu(crq->query_capability.number); 4132 if (adapter->vlan_header_insertion) 4133 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 4134 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 4135 adapter->vlan_header_insertion); 4136 break; 4137 case RX_VLAN_HEADER_INSERTION: 4138 adapter->rx_vlan_header_insertion = 4139 be64_to_cpu(crq->query_capability.number); 4140 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 4141 adapter->rx_vlan_header_insertion); 4142 break; 4143 case MAX_TX_SG_ENTRIES: 4144 adapter->max_tx_sg_entries = 4145 be64_to_cpu(crq->query_capability.number); 4146 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 4147 adapter->max_tx_sg_entries); 4148 break; 4149 case RX_SG_SUPPORTED: 4150 adapter->rx_sg_supported = 4151 be64_to_cpu(crq->query_capability.number); 4152 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 4153 adapter->rx_sg_supported); 4154 break; 4155 case OPT_TX_COMP_SUB_QUEUES: 4156 adapter->opt_tx_comp_sub_queues = 4157 be64_to_cpu(crq->query_capability.number); 4158 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 4159 adapter->opt_tx_comp_sub_queues); 4160 break; 4161 case OPT_RX_COMP_QUEUES: 4162 adapter->opt_rx_comp_queues = 4163 be64_to_cpu(crq->query_capability.number); 4164 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 4165 adapter->opt_rx_comp_queues); 4166 break; 4167 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 4168 adapter->opt_rx_bufadd_q_per_rx_comp_q = 4169 be64_to_cpu(crq->query_capability.number); 4170 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 4171 adapter->opt_rx_bufadd_q_per_rx_comp_q); 4172 break; 4173 case OPT_TX_ENTRIES_PER_SUBCRQ: 4174 adapter->opt_tx_entries_per_subcrq = 4175 be64_to_cpu(crq->query_capability.number); 4176 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 4177 adapter->opt_tx_entries_per_subcrq); 4178 break; 4179 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 4180 adapter->opt_rxba_entries_per_subcrq = 4181 be64_to_cpu(crq->query_capability.number); 4182 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 4183 adapter->opt_rxba_entries_per_subcrq); 4184 break; 4185 case TX_RX_DESC_REQ: 4186 adapter->tx_rx_desc_req = crq->query_capability.number; 4187 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 4188 adapter->tx_rx_desc_req); 4189 break; 4190 4191 default: 4192 netdev_err(netdev, "Got invalid cap rsp %d\n", 4193 crq->query_capability.capability); 4194 } 4195 4196 out: 4197 if (atomic_read(&adapter->running_cap_crqs) == 0) { 4198 adapter->wait_capability = false; 4199 ibmvnic_send_req_caps(adapter, 0); 4200 } 4201 } 4202 4203 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 4204 struct ibmvnic_adapter *adapter) 4205 { 4206 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 4207 struct net_device *netdev = adapter->netdev; 4208 struct device *dev = &adapter->vdev->dev; 4209 u64 *u64_crq = (u64 *)crq; 4210 long rc; 4211 4212 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 4213 (unsigned long int)cpu_to_be64(u64_crq[0]), 4214 (unsigned long int)cpu_to_be64(u64_crq[1])); 4215 switch (gen_crq->first) { 4216 case IBMVNIC_CRQ_INIT_RSP: 4217 switch (gen_crq->cmd) { 4218 case IBMVNIC_CRQ_INIT: 4219 dev_info(dev, "Partner initialized\n"); 4220 adapter->from_passive_init = true; 4221 adapter->failover_pending = false; 4222 complete(&adapter->init_done); 4223 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 4224 break; 4225 case IBMVNIC_CRQ_INIT_COMPLETE: 4226 dev_info(dev, "Partner initialization complete\n"); 4227 send_version_xchg(adapter); 4228 break; 4229 default: 4230 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 4231 } 4232 return; 4233 case IBMVNIC_CRQ_XPORT_EVENT: 4234 netif_carrier_off(netdev); 4235 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 4236 dev_info(dev, "Migrated, re-enabling adapter\n"); 4237 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 4238 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 4239 dev_info(dev, "Backing device failover detected\n"); 4240 adapter->failover_pending = true; 4241 } else { 4242 /* The adapter lost the connection */ 4243 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 4244 gen_crq->cmd); 4245 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 4246 } 4247 return; 4248 case IBMVNIC_CRQ_CMD_RSP: 4249 break; 4250 default: 4251 dev_err(dev, "Got an invalid msg type 0x%02x\n", 4252 gen_crq->first); 4253 return; 4254 } 4255 4256 switch (gen_crq->cmd) { 4257 case VERSION_EXCHANGE_RSP: 4258 rc = crq->version_exchange_rsp.rc.code; 4259 if (rc) { 4260 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 4261 break; 4262 } 4263 dev_info(dev, "Partner protocol version is %d\n", 4264 crq->version_exchange_rsp.version); 4265 if (be16_to_cpu(crq->version_exchange_rsp.version) < 4266 ibmvnic_version) 4267 ibmvnic_version = 4268 be16_to_cpu(crq->version_exchange_rsp.version); 4269 send_cap_queries(adapter); 4270 break; 4271 case QUERY_CAPABILITY_RSP: 4272 handle_query_cap_rsp(crq, adapter); 4273 break; 4274 case QUERY_MAP_RSP: 4275 handle_query_map_rsp(crq, adapter); 4276 break; 4277 case REQUEST_MAP_RSP: 4278 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 4279 complete(&adapter->fw_done); 4280 break; 4281 case REQUEST_UNMAP_RSP: 4282 handle_request_unmap_rsp(crq, adapter); 4283 break; 4284 case REQUEST_CAPABILITY_RSP: 4285 handle_request_cap_rsp(crq, adapter); 4286 break; 4287 case LOGIN_RSP: 4288 netdev_dbg(netdev, "Got Login Response\n"); 4289 handle_login_rsp(crq, adapter); 4290 break; 4291 case LOGICAL_LINK_STATE_RSP: 4292 netdev_dbg(netdev, 4293 "Got Logical Link State Response, state: %d rc: %d\n", 4294 crq->logical_link_state_rsp.link_state, 4295 crq->logical_link_state_rsp.rc.code); 4296 adapter->logical_link_state = 4297 crq->logical_link_state_rsp.link_state; 4298 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 4299 complete(&adapter->init_done); 4300 break; 4301 case LINK_STATE_INDICATION: 4302 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 4303 adapter->phys_link_state = 4304 crq->link_state_indication.phys_link_state; 4305 adapter->logical_link_state = 4306 crq->link_state_indication.logical_link_state; 4307 break; 4308 case CHANGE_MAC_ADDR_RSP: 4309 netdev_dbg(netdev, "Got MAC address change Response\n"); 4310 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 4311 break; 4312 case ERROR_INDICATION: 4313 netdev_dbg(netdev, "Got Error Indication\n"); 4314 handle_error_indication(crq, adapter); 4315 break; 4316 case REQUEST_ERROR_RSP: 4317 netdev_dbg(netdev, "Got Error Detail Response\n"); 4318 handle_error_info_rsp(crq, adapter); 4319 break; 4320 case REQUEST_STATISTICS_RSP: 4321 netdev_dbg(netdev, "Got Statistics Response\n"); 4322 complete(&adapter->stats_done); 4323 break; 4324 case QUERY_IP_OFFLOAD_RSP: 4325 netdev_dbg(netdev, "Got Query IP offload Response\n"); 4326 handle_query_ip_offload_rsp(adapter); 4327 break; 4328 case MULTICAST_CTRL_RSP: 4329 netdev_dbg(netdev, "Got multicast control Response\n"); 4330 break; 4331 case CONTROL_IP_OFFLOAD_RSP: 4332 netdev_dbg(netdev, "Got Control IP offload Response\n"); 4333 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 4334 sizeof(adapter->ip_offload_ctrl), 4335 DMA_TO_DEVICE); 4336 complete(&adapter->init_done); 4337 break; 4338 case COLLECT_FW_TRACE_RSP: 4339 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 4340 complete(&adapter->fw_done); 4341 break; 4342 case GET_VPD_SIZE_RSP: 4343 handle_vpd_size_rsp(crq, adapter); 4344 break; 4345 case GET_VPD_RSP: 4346 handle_vpd_rsp(crq, adapter); 4347 break; 4348 default: 4349 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 4350 gen_crq->cmd); 4351 } 4352 } 4353 4354 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 4355 { 4356 struct ibmvnic_adapter *adapter = instance; 4357 4358 tasklet_schedule(&adapter->tasklet); 4359 return IRQ_HANDLED; 4360 } 4361 4362 static void ibmvnic_tasklet(void *data) 4363 { 4364 struct ibmvnic_adapter *adapter = data; 4365 struct ibmvnic_crq_queue *queue = &adapter->crq; 4366 union ibmvnic_crq *crq; 4367 unsigned long flags; 4368 bool done = false; 4369 4370 spin_lock_irqsave(&queue->lock, flags); 4371 while (!done) { 4372 /* Pull all the valid messages off the CRQ */ 4373 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 4374 ibmvnic_handle_crq(crq, adapter); 4375 crq->generic.first = 0; 4376 } 4377 4378 /* remain in tasklet until all 4379 * capabilities responses are received 4380 */ 4381 if (!adapter->wait_capability) 4382 done = true; 4383 } 4384 /* if capabilities CRQ's were sent in this tasklet, the following 4385 * tasklet must wait until all responses are received 4386 */ 4387 if (atomic_read(&adapter->running_cap_crqs) != 0) 4388 adapter->wait_capability = true; 4389 spin_unlock_irqrestore(&queue->lock, flags); 4390 } 4391 4392 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 4393 { 4394 struct vio_dev *vdev = adapter->vdev; 4395 int rc; 4396 4397 do { 4398 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 4399 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4400 4401 if (rc) 4402 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 4403 4404 return rc; 4405 } 4406 4407 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 4408 { 4409 struct ibmvnic_crq_queue *crq = &adapter->crq; 4410 struct device *dev = &adapter->vdev->dev; 4411 struct vio_dev *vdev = adapter->vdev; 4412 int rc; 4413 4414 /* Close the CRQ */ 4415 do { 4416 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4417 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4418 4419 /* Clean out the queue */ 4420 memset(crq->msgs, 0, PAGE_SIZE); 4421 crq->cur = 0; 4422 4423 /* And re-open it again */ 4424 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 4425 crq->msg_token, PAGE_SIZE); 4426 4427 if (rc == H_CLOSED) 4428 /* Adapter is good, but other end is not ready */ 4429 dev_warn(dev, "Partner adapter not ready\n"); 4430 else if (rc != 0) 4431 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 4432 4433 return rc; 4434 } 4435 4436 static void release_crq_queue(struct ibmvnic_adapter *adapter) 4437 { 4438 struct ibmvnic_crq_queue *crq = &adapter->crq; 4439 struct vio_dev *vdev = adapter->vdev; 4440 long rc; 4441 4442 if (!crq->msgs) 4443 return; 4444 4445 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 4446 free_irq(vdev->irq, adapter); 4447 tasklet_kill(&adapter->tasklet); 4448 do { 4449 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4450 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4451 4452 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 4453 DMA_BIDIRECTIONAL); 4454 free_page((unsigned long)crq->msgs); 4455 crq->msgs = NULL; 4456 } 4457 4458 static int init_crq_queue(struct ibmvnic_adapter *adapter) 4459 { 4460 struct ibmvnic_crq_queue *crq = &adapter->crq; 4461 struct device *dev = &adapter->vdev->dev; 4462 struct vio_dev *vdev = adapter->vdev; 4463 int rc, retrc = -ENOMEM; 4464 4465 if (crq->msgs) 4466 return 0; 4467 4468 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 4469 /* Should we allocate more than one page? */ 4470 4471 if (!crq->msgs) 4472 return -ENOMEM; 4473 4474 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 4475 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 4476 DMA_BIDIRECTIONAL); 4477 if (dma_mapping_error(dev, crq->msg_token)) 4478 goto map_failed; 4479 4480 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 4481 crq->msg_token, PAGE_SIZE); 4482 4483 if (rc == H_RESOURCE) 4484 /* maybe kexecing and resource is busy. try a reset */ 4485 rc = ibmvnic_reset_crq(adapter); 4486 retrc = rc; 4487 4488 if (rc == H_CLOSED) { 4489 dev_warn(dev, "Partner adapter not ready\n"); 4490 } else if (rc) { 4491 dev_warn(dev, "Error %d opening adapter\n", rc); 4492 goto reg_crq_failed; 4493 } 4494 4495 retrc = 0; 4496 4497 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet, 4498 (unsigned long)adapter); 4499 4500 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 4501 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, 4502 adapter); 4503 if (rc) { 4504 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 4505 vdev->irq, rc); 4506 goto req_irq_failed; 4507 } 4508 4509 rc = vio_enable_interrupts(vdev); 4510 if (rc) { 4511 dev_err(dev, "Error %d enabling interrupts\n", rc); 4512 goto req_irq_failed; 4513 } 4514 4515 crq->cur = 0; 4516 spin_lock_init(&crq->lock); 4517 4518 return retrc; 4519 4520 req_irq_failed: 4521 tasklet_kill(&adapter->tasklet); 4522 do { 4523 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4524 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4525 reg_crq_failed: 4526 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 4527 map_failed: 4528 free_page((unsigned long)crq->msgs); 4529 crq->msgs = NULL; 4530 return retrc; 4531 } 4532 4533 static int ibmvnic_init(struct ibmvnic_adapter *adapter) 4534 { 4535 struct device *dev = &adapter->vdev->dev; 4536 unsigned long timeout = msecs_to_jiffies(30000); 4537 u64 old_num_rx_queues, old_num_tx_queues; 4538 int rc; 4539 4540 adapter->from_passive_init = false; 4541 4542 old_num_rx_queues = adapter->req_rx_queues; 4543 old_num_tx_queues = adapter->req_tx_queues; 4544 4545 init_completion(&adapter->init_done); 4546 adapter->init_done_rc = 0; 4547 ibmvnic_send_crq_init(adapter); 4548 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4549 dev_err(dev, "Initialization sequence timed out\n"); 4550 return -1; 4551 } 4552 4553 if (adapter->init_done_rc) { 4554 release_crq_queue(adapter); 4555 return adapter->init_done_rc; 4556 } 4557 4558 if (adapter->from_passive_init) { 4559 adapter->state = VNIC_OPEN; 4560 adapter->from_passive_init = false; 4561 return -1; 4562 } 4563 4564 if (adapter->resetting && !adapter->wait_for_reset && 4565 adapter->reset_reason != VNIC_RESET_MOBILITY) { 4566 if (adapter->req_rx_queues != old_num_rx_queues || 4567 adapter->req_tx_queues != old_num_tx_queues) { 4568 release_sub_crqs(adapter, 0); 4569 rc = init_sub_crqs(adapter); 4570 } else { 4571 rc = reset_sub_crq_queues(adapter); 4572 } 4573 } else { 4574 rc = init_sub_crqs(adapter); 4575 } 4576 4577 if (rc) { 4578 dev_err(dev, "Initialization of sub crqs failed\n"); 4579 release_crq_queue(adapter); 4580 return rc; 4581 } 4582 4583 rc = init_sub_crq_irqs(adapter); 4584 if (rc) { 4585 dev_err(dev, "Failed to initialize sub crq irqs\n"); 4586 release_crq_queue(adapter); 4587 } 4588 4589 return rc; 4590 } 4591 4592 static struct device_attribute dev_attr_failover; 4593 4594 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 4595 { 4596 struct ibmvnic_adapter *adapter; 4597 struct net_device *netdev; 4598 unsigned char *mac_addr_p; 4599 int rc; 4600 4601 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 4602 dev->unit_address); 4603 4604 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 4605 VETH_MAC_ADDR, NULL); 4606 if (!mac_addr_p) { 4607 dev_err(&dev->dev, 4608 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 4609 __FILE__, __LINE__); 4610 return 0; 4611 } 4612 4613 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 4614 IBMVNIC_MAX_QUEUES); 4615 if (!netdev) 4616 return -ENOMEM; 4617 4618 adapter = netdev_priv(netdev); 4619 adapter->state = VNIC_PROBING; 4620 dev_set_drvdata(&dev->dev, netdev); 4621 adapter->vdev = dev; 4622 adapter->netdev = netdev; 4623 4624 ether_addr_copy(adapter->mac_addr, mac_addr_p); 4625 ether_addr_copy(netdev->dev_addr, adapter->mac_addr); 4626 netdev->irq = dev->irq; 4627 netdev->netdev_ops = &ibmvnic_netdev_ops; 4628 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 4629 SET_NETDEV_DEV(netdev, &dev->dev); 4630 4631 spin_lock_init(&adapter->stats_lock); 4632 4633 INIT_LIST_HEAD(&adapter->errors); 4634 spin_lock_init(&adapter->error_list_lock); 4635 4636 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4637 INIT_LIST_HEAD(&adapter->rwi_list); 4638 mutex_init(&adapter->reset_lock); 4639 mutex_init(&adapter->rwi_lock); 4640 adapter->resetting = false; 4641 4642 adapter->mac_change_pending = false; 4643 4644 do { 4645 rc = init_crq_queue(adapter); 4646 if (rc) { 4647 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", 4648 rc); 4649 goto ibmvnic_init_fail; 4650 } 4651 4652 rc = ibmvnic_init(adapter); 4653 if (rc && rc != EAGAIN) 4654 goto ibmvnic_init_fail; 4655 } while (rc == EAGAIN); 4656 4657 rc = init_stats_buffers(adapter); 4658 if (rc) 4659 goto ibmvnic_init_fail; 4660 4661 rc = init_stats_token(adapter); 4662 if (rc) 4663 goto ibmvnic_stats_fail; 4664 4665 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4666 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4667 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4668 4669 rc = device_create_file(&dev->dev, &dev_attr_failover); 4670 if (rc) 4671 goto ibmvnic_dev_file_err; 4672 4673 netif_carrier_off(netdev); 4674 rc = register_netdev(netdev); 4675 if (rc) { 4676 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 4677 goto ibmvnic_register_fail; 4678 } 4679 dev_info(&dev->dev, "ibmvnic registered\n"); 4680 4681 adapter->state = VNIC_PROBED; 4682 4683 adapter->wait_for_reset = false; 4684 4685 return 0; 4686 4687 ibmvnic_register_fail: 4688 device_remove_file(&dev->dev, &dev_attr_failover); 4689 4690 ibmvnic_dev_file_err: 4691 release_stats_token(adapter); 4692 4693 ibmvnic_stats_fail: 4694 release_stats_buffers(adapter); 4695 4696 ibmvnic_init_fail: 4697 release_sub_crqs(adapter, 1); 4698 release_crq_queue(adapter); 4699 free_netdev(netdev); 4700 4701 return rc; 4702 } 4703 4704 static int ibmvnic_remove(struct vio_dev *dev) 4705 { 4706 struct net_device *netdev = dev_get_drvdata(&dev->dev); 4707 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4708 4709 adapter->state = VNIC_REMOVING; 4710 unregister_netdev(netdev); 4711 mutex_lock(&adapter->reset_lock); 4712 4713 release_resources(adapter); 4714 release_sub_crqs(adapter, 1); 4715 release_crq_queue(adapter); 4716 4717 release_stats_token(adapter); 4718 release_stats_buffers(adapter); 4719 4720 adapter->state = VNIC_REMOVED; 4721 4722 mutex_unlock(&adapter->reset_lock); 4723 device_remove_file(&dev->dev, &dev_attr_failover); 4724 free_netdev(netdev); 4725 dev_set_drvdata(&dev->dev, NULL); 4726 4727 return 0; 4728 } 4729 4730 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 4731 const char *buf, size_t count) 4732 { 4733 struct net_device *netdev = dev_get_drvdata(dev); 4734 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4735 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 4736 __be64 session_token; 4737 long rc; 4738 4739 if (!sysfs_streq(buf, "1")) 4740 return -EINVAL; 4741 4742 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 4743 H_GET_SESSION_TOKEN, 0, 0, 0); 4744 if (rc) { 4745 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 4746 rc); 4747 return -EINVAL; 4748 } 4749 4750 session_token = (__be64)retbuf[0]; 4751 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 4752 be64_to_cpu(session_token)); 4753 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4754 H_SESSION_ERR_DETECTED, session_token, 0, 0); 4755 if (rc) { 4756 netdev_err(netdev, "Client initiated failover failed, rc %ld\n", 4757 rc); 4758 return -EINVAL; 4759 } 4760 4761 return count; 4762 } 4763 4764 static DEVICE_ATTR_WO(failover); 4765 4766 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 4767 { 4768 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 4769 struct ibmvnic_adapter *adapter; 4770 struct iommu_table *tbl; 4771 unsigned long ret = 0; 4772 int i; 4773 4774 tbl = get_iommu_table_base(&vdev->dev); 4775 4776 /* netdev inits at probe time along with the structures we need below*/ 4777 if (!netdev) 4778 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 4779 4780 adapter = netdev_priv(netdev); 4781 4782 ret += PAGE_SIZE; /* the crq message queue */ 4783 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 4784 4785 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 4786 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 4787 4788 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 4789 i++) 4790 ret += adapter->rx_pool[i].size * 4791 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 4792 4793 return ret; 4794 } 4795 4796 static int ibmvnic_resume(struct device *dev) 4797 { 4798 struct net_device *netdev = dev_get_drvdata(dev); 4799 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4800 4801 if (adapter->state != VNIC_OPEN) 4802 return 0; 4803 4804 tasklet_schedule(&adapter->tasklet); 4805 4806 return 0; 4807 } 4808 4809 static const struct vio_device_id ibmvnic_device_table[] = { 4810 {"network", "IBM,vnic"}, 4811 {"", "" } 4812 }; 4813 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 4814 4815 static const struct dev_pm_ops ibmvnic_pm_ops = { 4816 .resume = ibmvnic_resume 4817 }; 4818 4819 static struct vio_driver ibmvnic_driver = { 4820 .id_table = ibmvnic_device_table, 4821 .probe = ibmvnic_probe, 4822 .remove = ibmvnic_remove, 4823 .get_desired_dma = ibmvnic_get_desired_dma, 4824 .name = ibmvnic_driver_name, 4825 .pm = &ibmvnic_pm_ops, 4826 }; 4827 4828 /* module functions */ 4829 static int __init ibmvnic_module_init(void) 4830 { 4831 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 4832 IBMVNIC_DRIVER_VERSION); 4833 4834 return vio_register_driver(&ibmvnic_driver); 4835 } 4836 4837 static void __exit ibmvnic_module_exit(void) 4838 { 4839 vio_unregister_driver(&ibmvnic_driver); 4840 } 4841 4842 module_init(ibmvnic_module_init); 4843 module_exit(ibmvnic_module_exit); 4844