xref: /openbmc/linux/drivers/net/ethernet/ibm/ibmvnic.c (revision 0760aad038b5a032c31ea124feed63d88627d2f1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
3 /*                                                                        */
4 /*  IBM System i and System p Virtual NIC Device Driver                   */
5 /*  Copyright (C) 2014 IBM Corp.                                          */
6 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
7 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
8 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
9 /*                                                                        */
10 /*                                                                        */
11 /* This module contains the implementation of a virtual ethernet device   */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
13 /* option of the RS/6000 Platform Architecture to interface with virtual  */
14 /* ethernet NICs that are presented to the partition by the hypervisor.   */
15 /*									   */
16 /* Messages are passed between the VNIC driver and the VNIC server using  */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
20 /* are used by the driver to notify the server that a packet is           */
21 /* ready for transmission or that a buffer has been added to receive a    */
22 /* packet. Subsequently, sCRQs are used by the server to notify the       */
23 /* driver that a packet transmission has been completed or that a packet  */
24 /* has been received and placed in a waiting buffer.                      */
25 /*                                                                        */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
28 /* or receive has been completed, the VNIC driver is required to use      */
29 /* "long term mapping". This entails that large, continuous DMA mapped    */
30 /* buffers are allocated on driver initialization and these buffers are   */
31 /* then continuously reused to pass skbs to and from the VNIC server.     */
32 /*                                                                        */
33 /**************************************************************************/
34 
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/mm.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
52 #include <linux/in.h>
53 #include <linux/ip.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
62 #include <asm/vio.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
69 
70 #include "ibmvnic.h"
71 
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74 
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79 
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 		       union sub_crq *sub_crq);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 			   struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 			    struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 			struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 					struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static void send_map_query(struct ibmvnic_adapter *adapter);
101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102 static int send_request_unmap(struct ibmvnic_adapter *, u8);
103 static int send_login(struct ibmvnic_adapter *adapter);
104 static void send_cap_queries(struct ibmvnic_adapter *adapter);
105 static int init_sub_crqs(struct ibmvnic_adapter *);
106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
108 static void release_crq_queue(struct ibmvnic_adapter *);
109 static int __ibmvnic_set_mac(struct net_device *, u8 *);
110 static int init_crq_queue(struct ibmvnic_adapter *adapter);
111 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
112 
113 struct ibmvnic_stat {
114 	char name[ETH_GSTRING_LEN];
115 	int offset;
116 };
117 
118 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 			     offsetof(struct ibmvnic_statistics, stat))
120 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
121 
122 static const struct ibmvnic_stat ibmvnic_stats[] = {
123 	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145 };
146 
147 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 			  unsigned long length, unsigned long *number,
149 			  unsigned long *irq)
150 {
151 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152 	long rc;
153 
154 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
155 	*number = retbuf[0];
156 	*irq = retbuf[1];
157 
158 	return rc;
159 }
160 
161 /**
162  * ibmvnic_wait_for_completion - Check device state and wait for completion
163  * @adapter: private device data
164  * @comp_done: completion structure to wait for
165  * @timeout: time to wait in milliseconds
166  *
167  * Wait for a completion signal or until the timeout limit is reached
168  * while checking that the device is still active.
169  */
170 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
171 				       struct completion *comp_done,
172 				       unsigned long timeout)
173 {
174 	struct net_device *netdev;
175 	unsigned long div_timeout;
176 	u8 retry;
177 
178 	netdev = adapter->netdev;
179 	retry = 5;
180 	div_timeout = msecs_to_jiffies(timeout / retry);
181 	while (true) {
182 		if (!adapter->crq.active) {
183 			netdev_err(netdev, "Device down!\n");
184 			return -ENODEV;
185 		}
186 		if (!retry--)
187 			break;
188 		if (wait_for_completion_timeout(comp_done, div_timeout))
189 			return 0;
190 	}
191 	netdev_err(netdev, "Operation timed out.\n");
192 	return -ETIMEDOUT;
193 }
194 
195 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
196 				struct ibmvnic_long_term_buff *ltb, int size)
197 {
198 	struct device *dev = &adapter->vdev->dev;
199 	int rc;
200 
201 	ltb->size = size;
202 	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
203 				       GFP_KERNEL);
204 
205 	if (!ltb->buff) {
206 		dev_err(dev, "Couldn't alloc long term buffer\n");
207 		return -ENOMEM;
208 	}
209 	ltb->map_id = adapter->map_id;
210 	adapter->map_id++;
211 
212 	mutex_lock(&adapter->fw_lock);
213 	adapter->fw_done_rc = 0;
214 	reinit_completion(&adapter->fw_done);
215 	rc = send_request_map(adapter, ltb->addr,
216 			      ltb->size, ltb->map_id);
217 	if (rc) {
218 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
219 		mutex_unlock(&adapter->fw_lock);
220 		return rc;
221 	}
222 
223 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
224 	if (rc) {
225 		dev_err(dev,
226 			"Long term map request aborted or timed out,rc = %d\n",
227 			rc);
228 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
229 		mutex_unlock(&adapter->fw_lock);
230 		return rc;
231 	}
232 
233 	if (adapter->fw_done_rc) {
234 		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
235 			adapter->fw_done_rc);
236 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
237 		mutex_unlock(&adapter->fw_lock);
238 		return -1;
239 	}
240 	mutex_unlock(&adapter->fw_lock);
241 	return 0;
242 }
243 
244 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
245 				struct ibmvnic_long_term_buff *ltb)
246 {
247 	struct device *dev = &adapter->vdev->dev;
248 
249 	if (!ltb->buff)
250 		return;
251 
252 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
253 	    adapter->reset_reason != VNIC_RESET_MOBILITY)
254 		send_request_unmap(adapter, ltb->map_id);
255 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
256 }
257 
258 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
259 				struct ibmvnic_long_term_buff *ltb)
260 {
261 	struct device *dev = &adapter->vdev->dev;
262 	int rc;
263 
264 	memset(ltb->buff, 0, ltb->size);
265 
266 	mutex_lock(&adapter->fw_lock);
267 	adapter->fw_done_rc = 0;
268 
269 	reinit_completion(&adapter->fw_done);
270 	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
271 	if (rc) {
272 		mutex_unlock(&adapter->fw_lock);
273 		return rc;
274 	}
275 
276 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
277 	if (rc) {
278 		dev_info(dev,
279 			 "Reset failed, long term map request timed out or aborted\n");
280 		mutex_unlock(&adapter->fw_lock);
281 		return rc;
282 	}
283 
284 	if (adapter->fw_done_rc) {
285 		dev_info(dev,
286 			 "Reset failed, attempting to free and reallocate buffer\n");
287 		free_long_term_buff(adapter, ltb);
288 		mutex_unlock(&adapter->fw_lock);
289 		return alloc_long_term_buff(adapter, ltb, ltb->size);
290 	}
291 	mutex_unlock(&adapter->fw_lock);
292 	return 0;
293 }
294 
295 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
296 {
297 	int i;
298 
299 	for (i = 0; i < adapter->num_active_rx_pools; i++)
300 		adapter->rx_pool[i].active = 0;
301 }
302 
303 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
304 			      struct ibmvnic_rx_pool *pool)
305 {
306 	int count = pool->size - atomic_read(&pool->available);
307 	u64 handle = adapter->rx_scrq[pool->index]->handle;
308 	struct device *dev = &adapter->vdev->dev;
309 	int buffers_added = 0;
310 	unsigned long lpar_rc;
311 	union sub_crq sub_crq;
312 	struct sk_buff *skb;
313 	unsigned int offset;
314 	dma_addr_t dma_addr;
315 	unsigned char *dst;
316 	int shift = 0;
317 	int index;
318 	int i;
319 
320 	if (!pool->active)
321 		return;
322 
323 	for (i = 0; i < count; ++i) {
324 		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
325 		if (!skb) {
326 			dev_err(dev, "Couldn't replenish rx buff\n");
327 			adapter->replenish_no_mem++;
328 			break;
329 		}
330 
331 		index = pool->free_map[pool->next_free];
332 
333 		if (pool->rx_buff[index].skb)
334 			dev_err(dev, "Inconsistent free_map!\n");
335 
336 		/* Copy the skb to the long term mapped DMA buffer */
337 		offset = index * pool->buff_size;
338 		dst = pool->long_term_buff.buff + offset;
339 		memset(dst, 0, pool->buff_size);
340 		dma_addr = pool->long_term_buff.addr + offset;
341 		pool->rx_buff[index].data = dst;
342 
343 		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
344 		pool->rx_buff[index].dma = dma_addr;
345 		pool->rx_buff[index].skb = skb;
346 		pool->rx_buff[index].pool_index = pool->index;
347 		pool->rx_buff[index].size = pool->buff_size;
348 
349 		memset(&sub_crq, 0, sizeof(sub_crq));
350 		sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
351 		sub_crq.rx_add.correlator =
352 		    cpu_to_be64((u64)&pool->rx_buff[index]);
353 		sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
354 		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
355 
356 		/* The length field of the sCRQ is defined to be 24 bits so the
357 		 * buffer size needs to be left shifted by a byte before it is
358 		 * converted to big endian to prevent the last byte from being
359 		 * truncated.
360 		 */
361 #ifdef __LITTLE_ENDIAN__
362 		shift = 8;
363 #endif
364 		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
365 
366 		lpar_rc = send_subcrq(adapter, handle, &sub_crq);
367 		if (lpar_rc != H_SUCCESS)
368 			goto failure;
369 
370 		buffers_added++;
371 		adapter->replenish_add_buff_success++;
372 		pool->next_free = (pool->next_free + 1) % pool->size;
373 	}
374 	atomic_add(buffers_added, &pool->available);
375 	return;
376 
377 failure:
378 	if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
379 		dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
380 	pool->free_map[pool->next_free] = index;
381 	pool->rx_buff[index].skb = NULL;
382 
383 	dev_kfree_skb_any(skb);
384 	adapter->replenish_add_buff_failure++;
385 	atomic_add(buffers_added, &pool->available);
386 
387 	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
388 		/* Disable buffer pool replenishment and report carrier off if
389 		 * queue is closed or pending failover.
390 		 * Firmware guarantees that a signal will be sent to the
391 		 * driver, triggering a reset.
392 		 */
393 		deactivate_rx_pools(adapter);
394 		netif_carrier_off(adapter->netdev);
395 	}
396 }
397 
398 static void replenish_pools(struct ibmvnic_adapter *adapter)
399 {
400 	int i;
401 
402 	adapter->replenish_task_cycles++;
403 	for (i = 0; i < adapter->num_active_rx_pools; i++) {
404 		if (adapter->rx_pool[i].active)
405 			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
406 	}
407 }
408 
409 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
410 {
411 	kfree(adapter->tx_stats_buffers);
412 	kfree(adapter->rx_stats_buffers);
413 	adapter->tx_stats_buffers = NULL;
414 	adapter->rx_stats_buffers = NULL;
415 }
416 
417 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
418 {
419 	adapter->tx_stats_buffers =
420 				kcalloc(IBMVNIC_MAX_QUEUES,
421 					sizeof(struct ibmvnic_tx_queue_stats),
422 					GFP_KERNEL);
423 	if (!adapter->tx_stats_buffers)
424 		return -ENOMEM;
425 
426 	adapter->rx_stats_buffers =
427 				kcalloc(IBMVNIC_MAX_QUEUES,
428 					sizeof(struct ibmvnic_rx_queue_stats),
429 					GFP_KERNEL);
430 	if (!adapter->rx_stats_buffers)
431 		return -ENOMEM;
432 
433 	return 0;
434 }
435 
436 static void release_stats_token(struct ibmvnic_adapter *adapter)
437 {
438 	struct device *dev = &adapter->vdev->dev;
439 
440 	if (!adapter->stats_token)
441 		return;
442 
443 	dma_unmap_single(dev, adapter->stats_token,
444 			 sizeof(struct ibmvnic_statistics),
445 			 DMA_FROM_DEVICE);
446 	adapter->stats_token = 0;
447 }
448 
449 static int init_stats_token(struct ibmvnic_adapter *adapter)
450 {
451 	struct device *dev = &adapter->vdev->dev;
452 	dma_addr_t stok;
453 
454 	stok = dma_map_single(dev, &adapter->stats,
455 			      sizeof(struct ibmvnic_statistics),
456 			      DMA_FROM_DEVICE);
457 	if (dma_mapping_error(dev, stok)) {
458 		dev_err(dev, "Couldn't map stats buffer\n");
459 		return -1;
460 	}
461 
462 	adapter->stats_token = stok;
463 	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
464 	return 0;
465 }
466 
467 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
468 {
469 	struct ibmvnic_rx_pool *rx_pool;
470 	u64 buff_size;
471 	int rx_scrqs;
472 	int i, j, rc;
473 
474 	if (!adapter->rx_pool)
475 		return -1;
476 
477 	buff_size = adapter->cur_rx_buf_sz;
478 	rx_scrqs = adapter->num_active_rx_pools;
479 	for (i = 0; i < rx_scrqs; i++) {
480 		rx_pool = &adapter->rx_pool[i];
481 
482 		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
483 
484 		if (rx_pool->buff_size != buff_size) {
485 			free_long_term_buff(adapter, &rx_pool->long_term_buff);
486 			rx_pool->buff_size = buff_size;
487 			rc = alloc_long_term_buff(adapter,
488 						  &rx_pool->long_term_buff,
489 						  rx_pool->size *
490 						  rx_pool->buff_size);
491 		} else {
492 			rc = reset_long_term_buff(adapter,
493 						  &rx_pool->long_term_buff);
494 		}
495 
496 		if (rc)
497 			return rc;
498 
499 		for (j = 0; j < rx_pool->size; j++)
500 			rx_pool->free_map[j] = j;
501 
502 		memset(rx_pool->rx_buff, 0,
503 		       rx_pool->size * sizeof(struct ibmvnic_rx_buff));
504 
505 		atomic_set(&rx_pool->available, 0);
506 		rx_pool->next_alloc = 0;
507 		rx_pool->next_free = 0;
508 		rx_pool->active = 1;
509 	}
510 
511 	return 0;
512 }
513 
514 static void release_rx_pools(struct ibmvnic_adapter *adapter)
515 {
516 	struct ibmvnic_rx_pool *rx_pool;
517 	int i, j;
518 
519 	if (!adapter->rx_pool)
520 		return;
521 
522 	for (i = 0; i < adapter->num_active_rx_pools; i++) {
523 		rx_pool = &adapter->rx_pool[i];
524 
525 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
526 
527 		kfree(rx_pool->free_map);
528 		free_long_term_buff(adapter, &rx_pool->long_term_buff);
529 
530 		if (!rx_pool->rx_buff)
531 			continue;
532 
533 		for (j = 0; j < rx_pool->size; j++) {
534 			if (rx_pool->rx_buff[j].skb) {
535 				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
536 				rx_pool->rx_buff[j].skb = NULL;
537 			}
538 		}
539 
540 		kfree(rx_pool->rx_buff);
541 	}
542 
543 	kfree(adapter->rx_pool);
544 	adapter->rx_pool = NULL;
545 	adapter->num_active_rx_pools = 0;
546 }
547 
548 static int init_rx_pools(struct net_device *netdev)
549 {
550 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
551 	struct device *dev = &adapter->vdev->dev;
552 	struct ibmvnic_rx_pool *rx_pool;
553 	int rxadd_subcrqs;
554 	u64 buff_size;
555 	int i, j;
556 
557 	rxadd_subcrqs = adapter->num_active_rx_scrqs;
558 	buff_size = adapter->cur_rx_buf_sz;
559 
560 	adapter->rx_pool = kcalloc(rxadd_subcrqs,
561 				   sizeof(struct ibmvnic_rx_pool),
562 				   GFP_KERNEL);
563 	if (!adapter->rx_pool) {
564 		dev_err(dev, "Failed to allocate rx pools\n");
565 		return -1;
566 	}
567 
568 	adapter->num_active_rx_pools = rxadd_subcrqs;
569 
570 	for (i = 0; i < rxadd_subcrqs; i++) {
571 		rx_pool = &adapter->rx_pool[i];
572 
573 		netdev_dbg(adapter->netdev,
574 			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
575 			   i, adapter->req_rx_add_entries_per_subcrq,
576 			   buff_size);
577 
578 		rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
579 		rx_pool->index = i;
580 		rx_pool->buff_size = buff_size;
581 		rx_pool->active = 1;
582 
583 		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
584 					    GFP_KERNEL);
585 		if (!rx_pool->free_map) {
586 			release_rx_pools(adapter);
587 			return -1;
588 		}
589 
590 		rx_pool->rx_buff = kcalloc(rx_pool->size,
591 					   sizeof(struct ibmvnic_rx_buff),
592 					   GFP_KERNEL);
593 		if (!rx_pool->rx_buff) {
594 			dev_err(dev, "Couldn't alloc rx buffers\n");
595 			release_rx_pools(adapter);
596 			return -1;
597 		}
598 
599 		if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
600 					 rx_pool->size * rx_pool->buff_size)) {
601 			release_rx_pools(adapter);
602 			return -1;
603 		}
604 
605 		for (j = 0; j < rx_pool->size; ++j)
606 			rx_pool->free_map[j] = j;
607 
608 		atomic_set(&rx_pool->available, 0);
609 		rx_pool->next_alloc = 0;
610 		rx_pool->next_free = 0;
611 	}
612 
613 	return 0;
614 }
615 
616 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
617 			     struct ibmvnic_tx_pool *tx_pool)
618 {
619 	int rc, i;
620 
621 	rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
622 	if (rc)
623 		return rc;
624 
625 	memset(tx_pool->tx_buff, 0,
626 	       tx_pool->num_buffers *
627 	       sizeof(struct ibmvnic_tx_buff));
628 
629 	for (i = 0; i < tx_pool->num_buffers; i++)
630 		tx_pool->free_map[i] = i;
631 
632 	tx_pool->consumer_index = 0;
633 	tx_pool->producer_index = 0;
634 
635 	return 0;
636 }
637 
638 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
639 {
640 	int tx_scrqs;
641 	int i, rc;
642 
643 	if (!adapter->tx_pool)
644 		return -1;
645 
646 	tx_scrqs = adapter->num_active_tx_pools;
647 	for (i = 0; i < tx_scrqs; i++) {
648 		rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
649 		if (rc)
650 			return rc;
651 		rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
652 		if (rc)
653 			return rc;
654 	}
655 
656 	return 0;
657 }
658 
659 static void release_vpd_data(struct ibmvnic_adapter *adapter)
660 {
661 	if (!adapter->vpd)
662 		return;
663 
664 	kfree(adapter->vpd->buff);
665 	kfree(adapter->vpd);
666 
667 	adapter->vpd = NULL;
668 }
669 
670 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
671 				struct ibmvnic_tx_pool *tx_pool)
672 {
673 	kfree(tx_pool->tx_buff);
674 	kfree(tx_pool->free_map);
675 	free_long_term_buff(adapter, &tx_pool->long_term_buff);
676 }
677 
678 static void release_tx_pools(struct ibmvnic_adapter *adapter)
679 {
680 	int i;
681 
682 	if (!adapter->tx_pool)
683 		return;
684 
685 	for (i = 0; i < adapter->num_active_tx_pools; i++) {
686 		release_one_tx_pool(adapter, &adapter->tx_pool[i]);
687 		release_one_tx_pool(adapter, &adapter->tso_pool[i]);
688 	}
689 
690 	kfree(adapter->tx_pool);
691 	adapter->tx_pool = NULL;
692 	kfree(adapter->tso_pool);
693 	adapter->tso_pool = NULL;
694 	adapter->num_active_tx_pools = 0;
695 }
696 
697 static int init_one_tx_pool(struct net_device *netdev,
698 			    struct ibmvnic_tx_pool *tx_pool,
699 			    int num_entries, int buf_size)
700 {
701 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
702 	int i;
703 
704 	tx_pool->tx_buff = kcalloc(num_entries,
705 				   sizeof(struct ibmvnic_tx_buff),
706 				   GFP_KERNEL);
707 	if (!tx_pool->tx_buff)
708 		return -1;
709 
710 	if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
711 				 num_entries * buf_size))
712 		return -1;
713 
714 	tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
715 	if (!tx_pool->free_map)
716 		return -1;
717 
718 	for (i = 0; i < num_entries; i++)
719 		tx_pool->free_map[i] = i;
720 
721 	tx_pool->consumer_index = 0;
722 	tx_pool->producer_index = 0;
723 	tx_pool->num_buffers = num_entries;
724 	tx_pool->buf_size = buf_size;
725 
726 	return 0;
727 }
728 
729 static int init_tx_pools(struct net_device *netdev)
730 {
731 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
732 	int tx_subcrqs;
733 	int i, rc;
734 
735 	tx_subcrqs = adapter->num_active_tx_scrqs;
736 	adapter->tx_pool = kcalloc(tx_subcrqs,
737 				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
738 	if (!adapter->tx_pool)
739 		return -1;
740 
741 	adapter->tso_pool = kcalloc(tx_subcrqs,
742 				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
743 	if (!adapter->tso_pool)
744 		return -1;
745 
746 	adapter->num_active_tx_pools = tx_subcrqs;
747 
748 	for (i = 0; i < tx_subcrqs; i++) {
749 		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
750 				      adapter->req_tx_entries_per_subcrq,
751 				      adapter->req_mtu + VLAN_HLEN);
752 		if (rc) {
753 			release_tx_pools(adapter);
754 			return rc;
755 		}
756 
757 		rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
758 				      IBMVNIC_TSO_BUFS,
759 				      IBMVNIC_TSO_BUF_SZ);
760 		if (rc) {
761 			release_tx_pools(adapter);
762 			return rc;
763 		}
764 	}
765 
766 	return 0;
767 }
768 
769 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
770 {
771 	int i;
772 
773 	if (adapter->napi_enabled)
774 		return;
775 
776 	for (i = 0; i < adapter->req_rx_queues; i++)
777 		napi_enable(&adapter->napi[i]);
778 
779 	adapter->napi_enabled = true;
780 }
781 
782 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
783 {
784 	int i;
785 
786 	if (!adapter->napi_enabled)
787 		return;
788 
789 	for (i = 0; i < adapter->req_rx_queues; i++) {
790 		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
791 		napi_disable(&adapter->napi[i]);
792 	}
793 
794 	adapter->napi_enabled = false;
795 }
796 
797 static int init_napi(struct ibmvnic_adapter *adapter)
798 {
799 	int i;
800 
801 	adapter->napi = kcalloc(adapter->req_rx_queues,
802 				sizeof(struct napi_struct), GFP_KERNEL);
803 	if (!adapter->napi)
804 		return -ENOMEM;
805 
806 	for (i = 0; i < adapter->req_rx_queues; i++) {
807 		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
808 		netif_napi_add(adapter->netdev, &adapter->napi[i],
809 			       ibmvnic_poll, NAPI_POLL_WEIGHT);
810 	}
811 
812 	adapter->num_active_rx_napi = adapter->req_rx_queues;
813 	return 0;
814 }
815 
816 static void release_napi(struct ibmvnic_adapter *adapter)
817 {
818 	int i;
819 
820 	if (!adapter->napi)
821 		return;
822 
823 	for (i = 0; i < adapter->num_active_rx_napi; i++) {
824 		netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
825 		netif_napi_del(&adapter->napi[i]);
826 	}
827 
828 	kfree(adapter->napi);
829 	adapter->napi = NULL;
830 	adapter->num_active_rx_napi = 0;
831 	adapter->napi_enabled = false;
832 }
833 
834 static int ibmvnic_login(struct net_device *netdev)
835 {
836 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
837 	unsigned long timeout = msecs_to_jiffies(30000);
838 	int retry_count = 0;
839 	int retries = 10;
840 	bool retry;
841 	int rc;
842 
843 	do {
844 		retry = false;
845 		if (retry_count > retries) {
846 			netdev_warn(netdev, "Login attempts exceeded\n");
847 			return -1;
848 		}
849 
850 		adapter->init_done_rc = 0;
851 		reinit_completion(&adapter->init_done);
852 		rc = send_login(adapter);
853 		if (rc) {
854 			netdev_warn(netdev, "Unable to login\n");
855 			return rc;
856 		}
857 
858 		if (!wait_for_completion_timeout(&adapter->init_done,
859 						 timeout)) {
860 			netdev_warn(netdev, "Login timed out, retrying...\n");
861 			retry = true;
862 			adapter->init_done_rc = 0;
863 			retry_count++;
864 			continue;
865 		}
866 
867 		if (adapter->init_done_rc == ABORTED) {
868 			netdev_warn(netdev, "Login aborted, retrying...\n");
869 			retry = true;
870 			adapter->init_done_rc = 0;
871 			retry_count++;
872 			/* FW or device may be busy, so
873 			 * wait a bit before retrying login
874 			 */
875 			msleep(500);
876 		} else if (adapter->init_done_rc == PARTIALSUCCESS) {
877 			retry_count++;
878 			release_sub_crqs(adapter, 1);
879 
880 			retry = true;
881 			netdev_dbg(netdev,
882 				   "Received partial success, retrying...\n");
883 			adapter->init_done_rc = 0;
884 			reinit_completion(&adapter->init_done);
885 			send_cap_queries(adapter);
886 			if (!wait_for_completion_timeout(&adapter->init_done,
887 							 timeout)) {
888 				netdev_warn(netdev,
889 					    "Capabilities query timed out\n");
890 				return -1;
891 			}
892 
893 			rc = init_sub_crqs(adapter);
894 			if (rc) {
895 				netdev_warn(netdev,
896 					    "SCRQ initialization failed\n");
897 				return -1;
898 			}
899 
900 			rc = init_sub_crq_irqs(adapter);
901 			if (rc) {
902 				netdev_warn(netdev,
903 					    "SCRQ irq initialization failed\n");
904 				return -1;
905 			}
906 		} else if (adapter->init_done_rc) {
907 			netdev_warn(netdev, "Adapter login failed\n");
908 			return -1;
909 		}
910 	} while (retry);
911 
912 	__ibmvnic_set_mac(netdev, adapter->mac_addr);
913 
914 	return 0;
915 }
916 
917 static void release_login_buffer(struct ibmvnic_adapter *adapter)
918 {
919 	kfree(adapter->login_buf);
920 	adapter->login_buf = NULL;
921 }
922 
923 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
924 {
925 	kfree(adapter->login_rsp_buf);
926 	adapter->login_rsp_buf = NULL;
927 }
928 
929 static void release_resources(struct ibmvnic_adapter *adapter)
930 {
931 	release_vpd_data(adapter);
932 
933 	release_tx_pools(adapter);
934 	release_rx_pools(adapter);
935 
936 	release_napi(adapter);
937 	release_login_rsp_buffer(adapter);
938 }
939 
940 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
941 {
942 	struct net_device *netdev = adapter->netdev;
943 	unsigned long timeout = msecs_to_jiffies(30000);
944 	union ibmvnic_crq crq;
945 	bool resend;
946 	int rc;
947 
948 	netdev_dbg(netdev, "setting link state %d\n", link_state);
949 
950 	memset(&crq, 0, sizeof(crq));
951 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
952 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
953 	crq.logical_link_state.link_state = link_state;
954 
955 	do {
956 		resend = false;
957 
958 		reinit_completion(&adapter->init_done);
959 		rc = ibmvnic_send_crq(adapter, &crq);
960 		if (rc) {
961 			netdev_err(netdev, "Failed to set link state\n");
962 			return rc;
963 		}
964 
965 		if (!wait_for_completion_timeout(&adapter->init_done,
966 						 timeout)) {
967 			netdev_err(netdev, "timeout setting link state\n");
968 			return -1;
969 		}
970 
971 		if (adapter->init_done_rc == PARTIALSUCCESS) {
972 			/* Partuial success, delay and re-send */
973 			mdelay(1000);
974 			resend = true;
975 		} else if (adapter->init_done_rc) {
976 			netdev_warn(netdev, "Unable to set link state, rc=%d\n",
977 				    adapter->init_done_rc);
978 			return adapter->init_done_rc;
979 		}
980 	} while (resend);
981 
982 	return 0;
983 }
984 
985 static int set_real_num_queues(struct net_device *netdev)
986 {
987 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
988 	int rc;
989 
990 	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
991 		   adapter->req_tx_queues, adapter->req_rx_queues);
992 
993 	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
994 	if (rc) {
995 		netdev_err(netdev, "failed to set the number of tx queues\n");
996 		return rc;
997 	}
998 
999 	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1000 	if (rc)
1001 		netdev_err(netdev, "failed to set the number of rx queues\n");
1002 
1003 	return rc;
1004 }
1005 
1006 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1007 {
1008 	struct device *dev = &adapter->vdev->dev;
1009 	union ibmvnic_crq crq;
1010 	int len = 0;
1011 	int rc;
1012 
1013 	if (adapter->vpd->buff)
1014 		len = adapter->vpd->len;
1015 
1016 	mutex_lock(&adapter->fw_lock);
1017 	adapter->fw_done_rc = 0;
1018 	reinit_completion(&adapter->fw_done);
1019 
1020 	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1021 	crq.get_vpd_size.cmd = GET_VPD_SIZE;
1022 	rc = ibmvnic_send_crq(adapter, &crq);
1023 	if (rc) {
1024 		mutex_unlock(&adapter->fw_lock);
1025 		return rc;
1026 	}
1027 
1028 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1029 	if (rc) {
1030 		dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1031 		mutex_unlock(&adapter->fw_lock);
1032 		return rc;
1033 	}
1034 	mutex_unlock(&adapter->fw_lock);
1035 
1036 	if (!adapter->vpd->len)
1037 		return -ENODATA;
1038 
1039 	if (!adapter->vpd->buff)
1040 		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1041 	else if (adapter->vpd->len != len)
1042 		adapter->vpd->buff =
1043 			krealloc(adapter->vpd->buff,
1044 				 adapter->vpd->len, GFP_KERNEL);
1045 
1046 	if (!adapter->vpd->buff) {
1047 		dev_err(dev, "Could allocate VPD buffer\n");
1048 		return -ENOMEM;
1049 	}
1050 
1051 	adapter->vpd->dma_addr =
1052 		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1053 			       DMA_FROM_DEVICE);
1054 	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1055 		dev_err(dev, "Could not map VPD buffer\n");
1056 		kfree(adapter->vpd->buff);
1057 		adapter->vpd->buff = NULL;
1058 		return -ENOMEM;
1059 	}
1060 
1061 	mutex_lock(&adapter->fw_lock);
1062 	adapter->fw_done_rc = 0;
1063 	reinit_completion(&adapter->fw_done);
1064 
1065 	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1066 	crq.get_vpd.cmd = GET_VPD;
1067 	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1068 	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1069 	rc = ibmvnic_send_crq(adapter, &crq);
1070 	if (rc) {
1071 		kfree(adapter->vpd->buff);
1072 		adapter->vpd->buff = NULL;
1073 		mutex_unlock(&adapter->fw_lock);
1074 		return rc;
1075 	}
1076 
1077 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1078 	if (rc) {
1079 		dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1080 		kfree(adapter->vpd->buff);
1081 		adapter->vpd->buff = NULL;
1082 		mutex_unlock(&adapter->fw_lock);
1083 		return rc;
1084 	}
1085 
1086 	mutex_unlock(&adapter->fw_lock);
1087 	return 0;
1088 }
1089 
1090 static int init_resources(struct ibmvnic_adapter *adapter)
1091 {
1092 	struct net_device *netdev = adapter->netdev;
1093 	int rc;
1094 
1095 	rc = set_real_num_queues(netdev);
1096 	if (rc)
1097 		return rc;
1098 
1099 	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1100 	if (!adapter->vpd)
1101 		return -ENOMEM;
1102 
1103 	/* Vital Product Data (VPD) */
1104 	rc = ibmvnic_get_vpd(adapter);
1105 	if (rc) {
1106 		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1107 		return rc;
1108 	}
1109 
1110 	adapter->map_id = 1;
1111 
1112 	rc = init_napi(adapter);
1113 	if (rc)
1114 		return rc;
1115 
1116 	send_map_query(adapter);
1117 
1118 	rc = init_rx_pools(netdev);
1119 	if (rc)
1120 		return rc;
1121 
1122 	rc = init_tx_pools(netdev);
1123 	return rc;
1124 }
1125 
1126 static int __ibmvnic_open(struct net_device *netdev)
1127 {
1128 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1129 	enum vnic_state prev_state = adapter->state;
1130 	int i, rc;
1131 
1132 	adapter->state = VNIC_OPENING;
1133 	replenish_pools(adapter);
1134 	ibmvnic_napi_enable(adapter);
1135 
1136 	/* We're ready to receive frames, enable the sub-crq interrupts and
1137 	 * set the logical link state to up
1138 	 */
1139 	for (i = 0; i < adapter->req_rx_queues; i++) {
1140 		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1141 		if (prev_state == VNIC_CLOSED)
1142 			enable_irq(adapter->rx_scrq[i]->irq);
1143 		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1144 	}
1145 
1146 	for (i = 0; i < adapter->req_tx_queues; i++) {
1147 		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1148 		if (prev_state == VNIC_CLOSED)
1149 			enable_irq(adapter->tx_scrq[i]->irq);
1150 		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1151 	}
1152 
1153 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1154 	if (rc) {
1155 		for (i = 0; i < adapter->req_rx_queues; i++)
1156 			napi_disable(&adapter->napi[i]);
1157 		release_resources(adapter);
1158 		return rc;
1159 	}
1160 
1161 	netif_tx_start_all_queues(netdev);
1162 
1163 	if (prev_state == VNIC_CLOSED) {
1164 		for (i = 0; i < adapter->req_rx_queues; i++)
1165 			napi_schedule(&adapter->napi[i]);
1166 	}
1167 
1168 	adapter->state = VNIC_OPEN;
1169 	return rc;
1170 }
1171 
1172 static int ibmvnic_open(struct net_device *netdev)
1173 {
1174 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1175 	int rc;
1176 
1177 	/* If device failover is pending, just set device state and return.
1178 	 * Device operation will be handled by reset routine.
1179 	 */
1180 	if (adapter->failover_pending) {
1181 		adapter->state = VNIC_OPEN;
1182 		return 0;
1183 	}
1184 
1185 	if (adapter->state != VNIC_CLOSED) {
1186 		rc = ibmvnic_login(netdev);
1187 		if (rc)
1188 			return rc;
1189 
1190 		rc = init_resources(adapter);
1191 		if (rc) {
1192 			netdev_err(netdev, "failed to initialize resources\n");
1193 			release_resources(adapter);
1194 			return rc;
1195 		}
1196 	}
1197 
1198 	rc = __ibmvnic_open(netdev);
1199 
1200 	return rc;
1201 }
1202 
1203 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1204 {
1205 	struct ibmvnic_rx_pool *rx_pool;
1206 	struct ibmvnic_rx_buff *rx_buff;
1207 	u64 rx_entries;
1208 	int rx_scrqs;
1209 	int i, j;
1210 
1211 	if (!adapter->rx_pool)
1212 		return;
1213 
1214 	rx_scrqs = adapter->num_active_rx_pools;
1215 	rx_entries = adapter->req_rx_add_entries_per_subcrq;
1216 
1217 	/* Free any remaining skbs in the rx buffer pools */
1218 	for (i = 0; i < rx_scrqs; i++) {
1219 		rx_pool = &adapter->rx_pool[i];
1220 		if (!rx_pool || !rx_pool->rx_buff)
1221 			continue;
1222 
1223 		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1224 		for (j = 0; j < rx_entries; j++) {
1225 			rx_buff = &rx_pool->rx_buff[j];
1226 			if (rx_buff && rx_buff->skb) {
1227 				dev_kfree_skb_any(rx_buff->skb);
1228 				rx_buff->skb = NULL;
1229 			}
1230 		}
1231 	}
1232 }
1233 
1234 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1235 			      struct ibmvnic_tx_pool *tx_pool)
1236 {
1237 	struct ibmvnic_tx_buff *tx_buff;
1238 	u64 tx_entries;
1239 	int i;
1240 
1241 	if (!tx_pool || !tx_pool->tx_buff)
1242 		return;
1243 
1244 	tx_entries = tx_pool->num_buffers;
1245 
1246 	for (i = 0; i < tx_entries; i++) {
1247 		tx_buff = &tx_pool->tx_buff[i];
1248 		if (tx_buff && tx_buff->skb) {
1249 			dev_kfree_skb_any(tx_buff->skb);
1250 			tx_buff->skb = NULL;
1251 		}
1252 	}
1253 }
1254 
1255 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1256 {
1257 	int tx_scrqs;
1258 	int i;
1259 
1260 	if (!adapter->tx_pool || !adapter->tso_pool)
1261 		return;
1262 
1263 	tx_scrqs = adapter->num_active_tx_pools;
1264 
1265 	/* Free any remaining skbs in the tx buffer pools */
1266 	for (i = 0; i < tx_scrqs; i++) {
1267 		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1268 		clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1269 		clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1270 	}
1271 }
1272 
1273 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1274 {
1275 	struct net_device *netdev = adapter->netdev;
1276 	int i;
1277 
1278 	if (adapter->tx_scrq) {
1279 		for (i = 0; i < adapter->req_tx_queues; i++)
1280 			if (adapter->tx_scrq[i]->irq) {
1281 				netdev_dbg(netdev,
1282 					   "Disabling tx_scrq[%d] irq\n", i);
1283 				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1284 				disable_irq(adapter->tx_scrq[i]->irq);
1285 			}
1286 	}
1287 
1288 	if (adapter->rx_scrq) {
1289 		for (i = 0; i < adapter->req_rx_queues; i++) {
1290 			if (adapter->rx_scrq[i]->irq) {
1291 				netdev_dbg(netdev,
1292 					   "Disabling rx_scrq[%d] irq\n", i);
1293 				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1294 				disable_irq(adapter->rx_scrq[i]->irq);
1295 			}
1296 		}
1297 	}
1298 }
1299 
1300 static void ibmvnic_cleanup(struct net_device *netdev)
1301 {
1302 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1303 
1304 	/* ensure that transmissions are stopped if called by do_reset */
1305 	if (test_bit(0, &adapter->resetting))
1306 		netif_tx_disable(netdev);
1307 	else
1308 		netif_tx_stop_all_queues(netdev);
1309 
1310 	ibmvnic_napi_disable(adapter);
1311 	ibmvnic_disable_irqs(adapter);
1312 
1313 	clean_rx_pools(adapter);
1314 	clean_tx_pools(adapter);
1315 }
1316 
1317 static int __ibmvnic_close(struct net_device *netdev)
1318 {
1319 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1320 	int rc = 0;
1321 
1322 	adapter->state = VNIC_CLOSING;
1323 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1324 	if (rc)
1325 		return rc;
1326 	adapter->state = VNIC_CLOSED;
1327 	return 0;
1328 }
1329 
1330 static int ibmvnic_close(struct net_device *netdev)
1331 {
1332 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1333 	int rc;
1334 
1335 	/* If device failover is pending, just set device state and return.
1336 	 * Device operation will be handled by reset routine.
1337 	 */
1338 	if (adapter->failover_pending) {
1339 		adapter->state = VNIC_CLOSED;
1340 		return 0;
1341 	}
1342 
1343 	rc = __ibmvnic_close(netdev);
1344 	ibmvnic_cleanup(netdev);
1345 
1346 	return rc;
1347 }
1348 
1349 /**
1350  * build_hdr_data - creates L2/L3/L4 header data buffer
1351  * @hdr_field - bitfield determining needed headers
1352  * @skb - socket buffer
1353  * @hdr_len - array of header lengths
1354  * @tot_len - total length of data
1355  *
1356  * Reads hdr_field to determine which headers are needed by firmware.
1357  * Builds a buffer containing these headers.  Saves individual header
1358  * lengths and total buffer length to be used to build descriptors.
1359  */
1360 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1361 			  int *hdr_len, u8 *hdr_data)
1362 {
1363 	int len = 0;
1364 	u8 *hdr;
1365 
1366 	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1367 		hdr_len[0] = sizeof(struct vlan_ethhdr);
1368 	else
1369 		hdr_len[0] = sizeof(struct ethhdr);
1370 
1371 	if (skb->protocol == htons(ETH_P_IP)) {
1372 		hdr_len[1] = ip_hdr(skb)->ihl * 4;
1373 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1374 			hdr_len[2] = tcp_hdrlen(skb);
1375 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1376 			hdr_len[2] = sizeof(struct udphdr);
1377 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1378 		hdr_len[1] = sizeof(struct ipv6hdr);
1379 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1380 			hdr_len[2] = tcp_hdrlen(skb);
1381 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1382 			hdr_len[2] = sizeof(struct udphdr);
1383 	} else if (skb->protocol == htons(ETH_P_ARP)) {
1384 		hdr_len[1] = arp_hdr_len(skb->dev);
1385 		hdr_len[2] = 0;
1386 	}
1387 
1388 	memset(hdr_data, 0, 120);
1389 	if ((hdr_field >> 6) & 1) {
1390 		hdr = skb_mac_header(skb);
1391 		memcpy(hdr_data, hdr, hdr_len[0]);
1392 		len += hdr_len[0];
1393 	}
1394 
1395 	if ((hdr_field >> 5) & 1) {
1396 		hdr = skb_network_header(skb);
1397 		memcpy(hdr_data + len, hdr, hdr_len[1]);
1398 		len += hdr_len[1];
1399 	}
1400 
1401 	if ((hdr_field >> 4) & 1) {
1402 		hdr = skb_transport_header(skb);
1403 		memcpy(hdr_data + len, hdr, hdr_len[2]);
1404 		len += hdr_len[2];
1405 	}
1406 	return len;
1407 }
1408 
1409 /**
1410  * create_hdr_descs - create header and header extension descriptors
1411  * @hdr_field - bitfield determining needed headers
1412  * @data - buffer containing header data
1413  * @len - length of data buffer
1414  * @hdr_len - array of individual header lengths
1415  * @scrq_arr - descriptor array
1416  *
1417  * Creates header and, if needed, header extension descriptors and
1418  * places them in a descriptor array, scrq_arr
1419  */
1420 
1421 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1422 			    union sub_crq *scrq_arr)
1423 {
1424 	union sub_crq hdr_desc;
1425 	int tmp_len = len;
1426 	int num_descs = 0;
1427 	u8 *data, *cur;
1428 	int tmp;
1429 
1430 	while (tmp_len > 0) {
1431 		cur = hdr_data + len - tmp_len;
1432 
1433 		memset(&hdr_desc, 0, sizeof(hdr_desc));
1434 		if (cur != hdr_data) {
1435 			data = hdr_desc.hdr_ext.data;
1436 			tmp = tmp_len > 29 ? 29 : tmp_len;
1437 			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1438 			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1439 			hdr_desc.hdr_ext.len = tmp;
1440 		} else {
1441 			data = hdr_desc.hdr.data;
1442 			tmp = tmp_len > 24 ? 24 : tmp_len;
1443 			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1444 			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1445 			hdr_desc.hdr.len = tmp;
1446 			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1447 			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1448 			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1449 			hdr_desc.hdr.flag = hdr_field << 1;
1450 		}
1451 		memcpy(data, cur, tmp);
1452 		tmp_len -= tmp;
1453 		*scrq_arr = hdr_desc;
1454 		scrq_arr++;
1455 		num_descs++;
1456 	}
1457 
1458 	return num_descs;
1459 }
1460 
1461 /**
1462  * build_hdr_descs_arr - build a header descriptor array
1463  * @skb - socket buffer
1464  * @num_entries - number of descriptors to be sent
1465  * @subcrq - first TX descriptor
1466  * @hdr_field - bit field determining which headers will be sent
1467  *
1468  * This function will build a TX descriptor array with applicable
1469  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1470  */
1471 
1472 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1473 				int *num_entries, u8 hdr_field)
1474 {
1475 	int hdr_len[3] = {0, 0, 0};
1476 	int tot_len;
1477 	u8 *hdr_data = txbuff->hdr_data;
1478 
1479 	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1480 				 txbuff->hdr_data);
1481 	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1482 			 txbuff->indir_arr + 1);
1483 }
1484 
1485 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1486 				    struct net_device *netdev)
1487 {
1488 	/* For some backing devices, mishandling of small packets
1489 	 * can result in a loss of connection or TX stall. Device
1490 	 * architects recommend that no packet should be smaller
1491 	 * than the minimum MTU value provided to the driver, so
1492 	 * pad any packets to that length
1493 	 */
1494 	if (skb->len < netdev->min_mtu)
1495 		return skb_put_padto(skb, netdev->min_mtu);
1496 
1497 	return 0;
1498 }
1499 
1500 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1501 {
1502 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1503 	int queue_num = skb_get_queue_mapping(skb);
1504 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1505 	struct device *dev = &adapter->vdev->dev;
1506 	struct ibmvnic_tx_buff *tx_buff = NULL;
1507 	struct ibmvnic_sub_crq_queue *tx_scrq;
1508 	struct ibmvnic_tx_pool *tx_pool;
1509 	unsigned int tx_send_failed = 0;
1510 	unsigned int tx_map_failed = 0;
1511 	unsigned int tx_dropped = 0;
1512 	unsigned int tx_packets = 0;
1513 	unsigned int tx_bytes = 0;
1514 	dma_addr_t data_dma_addr;
1515 	struct netdev_queue *txq;
1516 	unsigned long lpar_rc;
1517 	union sub_crq tx_crq;
1518 	unsigned int offset;
1519 	int num_entries = 1;
1520 	unsigned char *dst;
1521 	int index = 0;
1522 	u8 proto = 0;
1523 	u64 handle;
1524 	netdev_tx_t ret = NETDEV_TX_OK;
1525 
1526 	if (test_bit(0, &adapter->resetting)) {
1527 		if (!netif_subqueue_stopped(netdev, skb))
1528 			netif_stop_subqueue(netdev, queue_num);
1529 		dev_kfree_skb_any(skb);
1530 
1531 		tx_send_failed++;
1532 		tx_dropped++;
1533 		ret = NETDEV_TX_OK;
1534 		goto out;
1535 	}
1536 
1537 	if (ibmvnic_xmit_workarounds(skb, netdev)) {
1538 		tx_dropped++;
1539 		tx_send_failed++;
1540 		ret = NETDEV_TX_OK;
1541 		goto out;
1542 	}
1543 	if (skb_is_gso(skb))
1544 		tx_pool = &adapter->tso_pool[queue_num];
1545 	else
1546 		tx_pool = &adapter->tx_pool[queue_num];
1547 
1548 	tx_scrq = adapter->tx_scrq[queue_num];
1549 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1550 	handle = tx_scrq->handle;
1551 
1552 	index = tx_pool->free_map[tx_pool->consumer_index];
1553 
1554 	if (index == IBMVNIC_INVALID_MAP) {
1555 		dev_kfree_skb_any(skb);
1556 		tx_send_failed++;
1557 		tx_dropped++;
1558 		ret = NETDEV_TX_OK;
1559 		goto out;
1560 	}
1561 
1562 	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1563 
1564 	offset = index * tx_pool->buf_size;
1565 	dst = tx_pool->long_term_buff.buff + offset;
1566 	memset(dst, 0, tx_pool->buf_size);
1567 	data_dma_addr = tx_pool->long_term_buff.addr + offset;
1568 
1569 	if (skb_shinfo(skb)->nr_frags) {
1570 		int cur, i;
1571 
1572 		/* Copy the head */
1573 		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1574 		cur = skb_headlen(skb);
1575 
1576 		/* Copy the frags */
1577 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1578 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1579 
1580 			memcpy(dst + cur,
1581 			       page_address(skb_frag_page(frag)) +
1582 			       skb_frag_off(frag), skb_frag_size(frag));
1583 			cur += skb_frag_size(frag);
1584 		}
1585 	} else {
1586 		skb_copy_from_linear_data(skb, dst, skb->len);
1587 	}
1588 
1589 	tx_pool->consumer_index =
1590 	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1591 
1592 	tx_buff = &tx_pool->tx_buff[index];
1593 	tx_buff->skb = skb;
1594 	tx_buff->data_dma[0] = data_dma_addr;
1595 	tx_buff->data_len[0] = skb->len;
1596 	tx_buff->index = index;
1597 	tx_buff->pool_index = queue_num;
1598 	tx_buff->last_frag = true;
1599 
1600 	memset(&tx_crq, 0, sizeof(tx_crq));
1601 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1602 	tx_crq.v1.type = IBMVNIC_TX_DESC;
1603 	tx_crq.v1.n_crq_elem = 1;
1604 	tx_crq.v1.n_sge = 1;
1605 	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1606 
1607 	if (skb_is_gso(skb))
1608 		tx_crq.v1.correlator =
1609 			cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1610 	else
1611 		tx_crq.v1.correlator = cpu_to_be32(index);
1612 	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1613 	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1614 	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1615 
1616 	if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1617 		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1618 		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1619 	}
1620 
1621 	if (skb->protocol == htons(ETH_P_IP)) {
1622 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1623 		proto = ip_hdr(skb)->protocol;
1624 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1625 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1626 		proto = ipv6_hdr(skb)->nexthdr;
1627 	}
1628 
1629 	if (proto == IPPROTO_TCP)
1630 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1631 	else if (proto == IPPROTO_UDP)
1632 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1633 
1634 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1635 		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1636 		hdrs += 2;
1637 	}
1638 	if (skb_is_gso(skb)) {
1639 		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1640 		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1641 		hdrs += 2;
1642 	}
1643 	/* determine if l2/3/4 headers are sent to firmware */
1644 	if ((*hdrs >> 7) & 1) {
1645 		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1646 		tx_crq.v1.n_crq_elem = num_entries;
1647 		tx_buff->num_entries = num_entries;
1648 		tx_buff->indir_arr[0] = tx_crq;
1649 		tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1650 						    sizeof(tx_buff->indir_arr),
1651 						    DMA_TO_DEVICE);
1652 		if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1653 			dev_kfree_skb_any(skb);
1654 			tx_buff->skb = NULL;
1655 			if (!firmware_has_feature(FW_FEATURE_CMO))
1656 				dev_err(dev, "tx: unable to map descriptor array\n");
1657 			tx_map_failed++;
1658 			tx_dropped++;
1659 			ret = NETDEV_TX_OK;
1660 			goto tx_err_out;
1661 		}
1662 		lpar_rc = send_subcrq_indirect(adapter, handle,
1663 					       (u64)tx_buff->indir_dma,
1664 					       (u64)num_entries);
1665 		dma_unmap_single(dev, tx_buff->indir_dma,
1666 				 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1667 	} else {
1668 		tx_buff->num_entries = num_entries;
1669 		lpar_rc = send_subcrq(adapter, handle,
1670 				      &tx_crq);
1671 	}
1672 	if (lpar_rc != H_SUCCESS) {
1673 		if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1674 			dev_err_ratelimited(dev, "tx: send failed\n");
1675 		dev_kfree_skb_any(skb);
1676 		tx_buff->skb = NULL;
1677 
1678 		if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1679 			/* Disable TX and report carrier off if queue is closed
1680 			 * or pending failover.
1681 			 * Firmware guarantees that a signal will be sent to the
1682 			 * driver, triggering a reset or some other action.
1683 			 */
1684 			netif_tx_stop_all_queues(netdev);
1685 			netif_carrier_off(netdev);
1686 		}
1687 
1688 		tx_send_failed++;
1689 		tx_dropped++;
1690 		ret = NETDEV_TX_OK;
1691 		goto tx_err_out;
1692 	}
1693 
1694 	if (atomic_add_return(num_entries, &tx_scrq->used)
1695 					>= adapter->req_tx_entries_per_subcrq) {
1696 		netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1697 		netif_stop_subqueue(netdev, queue_num);
1698 	}
1699 
1700 	tx_packets++;
1701 	tx_bytes += skb->len;
1702 	txq->trans_start = jiffies;
1703 	ret = NETDEV_TX_OK;
1704 	goto out;
1705 
1706 tx_err_out:
1707 	/* roll back consumer index and map array*/
1708 	if (tx_pool->consumer_index == 0)
1709 		tx_pool->consumer_index =
1710 			tx_pool->num_buffers - 1;
1711 	else
1712 		tx_pool->consumer_index--;
1713 	tx_pool->free_map[tx_pool->consumer_index] = index;
1714 out:
1715 	netdev->stats.tx_dropped += tx_dropped;
1716 	netdev->stats.tx_bytes += tx_bytes;
1717 	netdev->stats.tx_packets += tx_packets;
1718 	adapter->tx_send_failed += tx_send_failed;
1719 	adapter->tx_map_failed += tx_map_failed;
1720 	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1721 	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1722 	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1723 
1724 	return ret;
1725 }
1726 
1727 static void ibmvnic_set_multi(struct net_device *netdev)
1728 {
1729 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1730 	struct netdev_hw_addr *ha;
1731 	union ibmvnic_crq crq;
1732 
1733 	memset(&crq, 0, sizeof(crq));
1734 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1735 	crq.request_capability.cmd = REQUEST_CAPABILITY;
1736 
1737 	if (netdev->flags & IFF_PROMISC) {
1738 		if (!adapter->promisc_supported)
1739 			return;
1740 	} else {
1741 		if (netdev->flags & IFF_ALLMULTI) {
1742 			/* Accept all multicast */
1743 			memset(&crq, 0, sizeof(crq));
1744 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1745 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1746 			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1747 			ibmvnic_send_crq(adapter, &crq);
1748 		} else if (netdev_mc_empty(netdev)) {
1749 			/* Reject all multicast */
1750 			memset(&crq, 0, sizeof(crq));
1751 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1752 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1753 			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1754 			ibmvnic_send_crq(adapter, &crq);
1755 		} else {
1756 			/* Accept one or more multicast(s) */
1757 			netdev_for_each_mc_addr(ha, netdev) {
1758 				memset(&crq, 0, sizeof(crq));
1759 				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1760 				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1761 				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1762 				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1763 						ha->addr);
1764 				ibmvnic_send_crq(adapter, &crq);
1765 			}
1766 		}
1767 	}
1768 }
1769 
1770 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1771 {
1772 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1773 	union ibmvnic_crq crq;
1774 	int rc;
1775 
1776 	if (!is_valid_ether_addr(dev_addr)) {
1777 		rc = -EADDRNOTAVAIL;
1778 		goto err;
1779 	}
1780 
1781 	memset(&crq, 0, sizeof(crq));
1782 	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1783 	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1784 	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1785 
1786 	mutex_lock(&adapter->fw_lock);
1787 	adapter->fw_done_rc = 0;
1788 	reinit_completion(&adapter->fw_done);
1789 
1790 	rc = ibmvnic_send_crq(adapter, &crq);
1791 	if (rc) {
1792 		rc = -EIO;
1793 		mutex_unlock(&adapter->fw_lock);
1794 		goto err;
1795 	}
1796 
1797 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1798 	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
1799 	if (rc || adapter->fw_done_rc) {
1800 		rc = -EIO;
1801 		mutex_unlock(&adapter->fw_lock);
1802 		goto err;
1803 	}
1804 	mutex_unlock(&adapter->fw_lock);
1805 	return 0;
1806 err:
1807 	ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1808 	return rc;
1809 }
1810 
1811 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1812 {
1813 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1814 	struct sockaddr *addr = p;
1815 	int rc;
1816 
1817 	rc = 0;
1818 	ether_addr_copy(adapter->mac_addr, addr->sa_data);
1819 	if (adapter->state != VNIC_PROBED)
1820 		rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1821 
1822 	return rc;
1823 }
1824 
1825 /**
1826  * do_change_param_reset returns zero if we are able to keep processing reset
1827  * events, or non-zero if we hit a fatal error and must halt.
1828  */
1829 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1830 				 struct ibmvnic_rwi *rwi,
1831 				 u32 reset_state)
1832 {
1833 	struct net_device *netdev = adapter->netdev;
1834 	int i, rc;
1835 
1836 	netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1837 		   rwi->reset_reason);
1838 
1839 	netif_carrier_off(netdev);
1840 	adapter->reset_reason = rwi->reset_reason;
1841 
1842 	ibmvnic_cleanup(netdev);
1843 
1844 	if (reset_state == VNIC_OPEN) {
1845 		rc = __ibmvnic_close(netdev);
1846 		if (rc)
1847 			return rc;
1848 	}
1849 
1850 	release_resources(adapter);
1851 	release_sub_crqs(adapter, 1);
1852 	release_crq_queue(adapter);
1853 
1854 	adapter->state = VNIC_PROBED;
1855 
1856 	rc = init_crq_queue(adapter);
1857 
1858 	if (rc) {
1859 		netdev_err(adapter->netdev,
1860 			   "Couldn't initialize crq. rc=%d\n", rc);
1861 		return rc;
1862 	}
1863 
1864 	rc = ibmvnic_reset_init(adapter, true);
1865 	if (rc)
1866 		return IBMVNIC_INIT_FAILED;
1867 
1868 	/* If the adapter was in PROBE state prior to the reset,
1869 	 * exit here.
1870 	 */
1871 	if (reset_state == VNIC_PROBED)
1872 		return 0;
1873 
1874 	rc = ibmvnic_login(netdev);
1875 	if (rc) {
1876 		adapter->state = reset_state;
1877 		return rc;
1878 	}
1879 
1880 	rc = init_resources(adapter);
1881 	if (rc)
1882 		return rc;
1883 
1884 	ibmvnic_disable_irqs(adapter);
1885 
1886 	adapter->state = VNIC_CLOSED;
1887 
1888 	if (reset_state == VNIC_CLOSED)
1889 		return 0;
1890 
1891 	rc = __ibmvnic_open(netdev);
1892 	if (rc)
1893 		return IBMVNIC_OPEN_FAILED;
1894 
1895 	/* refresh device's multicast list */
1896 	ibmvnic_set_multi(netdev);
1897 
1898 	/* kick napi */
1899 	for (i = 0; i < adapter->req_rx_queues; i++)
1900 		napi_schedule(&adapter->napi[i]);
1901 
1902 	return 0;
1903 }
1904 
1905 /**
1906  * do_reset returns zero if we are able to keep processing reset events, or
1907  * non-zero if we hit a fatal error and must halt.
1908  */
1909 static int do_reset(struct ibmvnic_adapter *adapter,
1910 		    struct ibmvnic_rwi *rwi, u32 reset_state)
1911 {
1912 	u64 old_num_rx_queues, old_num_tx_queues;
1913 	u64 old_num_rx_slots, old_num_tx_slots;
1914 	struct net_device *netdev = adapter->netdev;
1915 	int i, rc;
1916 
1917 	netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1918 		   rwi->reset_reason);
1919 
1920 	rtnl_lock();
1921 
1922 	netif_carrier_off(netdev);
1923 	adapter->reset_reason = rwi->reset_reason;
1924 
1925 	old_num_rx_queues = adapter->req_rx_queues;
1926 	old_num_tx_queues = adapter->req_tx_queues;
1927 	old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1928 	old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1929 
1930 	ibmvnic_cleanup(netdev);
1931 
1932 	if (reset_state == VNIC_OPEN &&
1933 	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
1934 	    adapter->reset_reason != VNIC_RESET_FAILOVER) {
1935 		adapter->state = VNIC_CLOSING;
1936 
1937 		/* Release the RTNL lock before link state change and
1938 		 * re-acquire after the link state change to allow
1939 		 * linkwatch_event to grab the RTNL lock and run during
1940 		 * a reset.
1941 		 */
1942 		rtnl_unlock();
1943 		rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1944 		rtnl_lock();
1945 		if (rc)
1946 			goto out;
1947 
1948 		if (adapter->state != VNIC_CLOSING) {
1949 			rc = -1;
1950 			goto out;
1951 		}
1952 
1953 		adapter->state = VNIC_CLOSED;
1954 	}
1955 
1956 	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1957 		/* remove the closed state so when we call open it appears
1958 		 * we are coming from the probed state.
1959 		 */
1960 		adapter->state = VNIC_PROBED;
1961 
1962 		if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1963 			rc = ibmvnic_reenable_crq_queue(adapter);
1964 			release_sub_crqs(adapter, 1);
1965 		} else {
1966 			rc = ibmvnic_reset_crq(adapter);
1967 			if (rc == H_CLOSED || rc == H_SUCCESS) {
1968 				rc = vio_enable_interrupts(adapter->vdev);
1969 				if (rc)
1970 					netdev_err(adapter->netdev,
1971 						   "Reset failed to enable interrupts. rc=%d\n",
1972 						   rc);
1973 			}
1974 		}
1975 
1976 		if (rc) {
1977 			netdev_err(adapter->netdev,
1978 				   "Reset couldn't initialize crq. rc=%d\n", rc);
1979 			goto out;
1980 		}
1981 
1982 		rc = ibmvnic_reset_init(adapter, true);
1983 		if (rc) {
1984 			rc = IBMVNIC_INIT_FAILED;
1985 			goto out;
1986 		}
1987 
1988 		/* If the adapter was in PROBE state prior to the reset,
1989 		 * exit here.
1990 		 */
1991 		if (reset_state == VNIC_PROBED) {
1992 			rc = 0;
1993 			goto out;
1994 		}
1995 
1996 		rc = ibmvnic_login(netdev);
1997 		if (rc) {
1998 			adapter->state = reset_state;
1999 			goto out;
2000 		}
2001 
2002 		if (adapter->req_rx_queues != old_num_rx_queues ||
2003 		    adapter->req_tx_queues != old_num_tx_queues ||
2004 		    adapter->req_rx_add_entries_per_subcrq !=
2005 		    old_num_rx_slots ||
2006 		    adapter->req_tx_entries_per_subcrq !=
2007 		    old_num_tx_slots ||
2008 		    !adapter->rx_pool ||
2009 		    !adapter->tso_pool ||
2010 		    !adapter->tx_pool) {
2011 			release_rx_pools(adapter);
2012 			release_tx_pools(adapter);
2013 			release_napi(adapter);
2014 			release_vpd_data(adapter);
2015 
2016 			rc = init_resources(adapter);
2017 			if (rc)
2018 				goto out;
2019 
2020 		} else {
2021 			rc = reset_tx_pools(adapter);
2022 			if (rc)
2023 				netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2024 						rc);
2025 				goto out;
2026 
2027 			rc = reset_rx_pools(adapter);
2028 			if (rc)
2029 				netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2030 						rc);
2031 				goto out;
2032 		}
2033 		ibmvnic_disable_irqs(adapter);
2034 	}
2035 	adapter->state = VNIC_CLOSED;
2036 
2037 	if (reset_state == VNIC_CLOSED) {
2038 		rc = 0;
2039 		goto out;
2040 	}
2041 
2042 	rc = __ibmvnic_open(netdev);
2043 	if (rc) {
2044 		rc = IBMVNIC_OPEN_FAILED;
2045 		goto out;
2046 	}
2047 
2048 	/* refresh device's multicast list */
2049 	ibmvnic_set_multi(netdev);
2050 
2051 	/* kick napi */
2052 	for (i = 0; i < adapter->req_rx_queues; i++)
2053 		napi_schedule(&adapter->napi[i]);
2054 
2055 	if (adapter->reset_reason != VNIC_RESET_FAILOVER)
2056 		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2057 
2058 	rc = 0;
2059 
2060 out:
2061 	rtnl_unlock();
2062 
2063 	return rc;
2064 }
2065 
2066 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2067 			 struct ibmvnic_rwi *rwi, u32 reset_state)
2068 {
2069 	struct net_device *netdev = adapter->netdev;
2070 	int rc;
2071 
2072 	netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2073 		   rwi->reset_reason);
2074 
2075 	netif_carrier_off(netdev);
2076 	adapter->reset_reason = rwi->reset_reason;
2077 
2078 	ibmvnic_cleanup(netdev);
2079 	release_resources(adapter);
2080 	release_sub_crqs(adapter, 0);
2081 	release_crq_queue(adapter);
2082 
2083 	/* remove the closed state so when we call open it appears
2084 	 * we are coming from the probed state.
2085 	 */
2086 	adapter->state = VNIC_PROBED;
2087 
2088 	reinit_completion(&adapter->init_done);
2089 	rc = init_crq_queue(adapter);
2090 	if (rc) {
2091 		netdev_err(adapter->netdev,
2092 			   "Couldn't initialize crq. rc=%d\n", rc);
2093 		return rc;
2094 	}
2095 
2096 	rc = ibmvnic_reset_init(adapter, false);
2097 	if (rc)
2098 		return rc;
2099 
2100 	/* If the adapter was in PROBE state prior to the reset,
2101 	 * exit here.
2102 	 */
2103 	if (reset_state == VNIC_PROBED)
2104 		return 0;
2105 
2106 	rc = ibmvnic_login(netdev);
2107 	if (rc) {
2108 		adapter->state = VNIC_PROBED;
2109 		return 0;
2110 	}
2111 
2112 	rc = init_resources(adapter);
2113 	if (rc)
2114 		return rc;
2115 
2116 	ibmvnic_disable_irqs(adapter);
2117 	adapter->state = VNIC_CLOSED;
2118 
2119 	if (reset_state == VNIC_CLOSED)
2120 		return 0;
2121 
2122 	rc = __ibmvnic_open(netdev);
2123 	if (rc)
2124 		return IBMVNIC_OPEN_FAILED;
2125 
2126 	return 0;
2127 }
2128 
2129 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2130 {
2131 	struct ibmvnic_rwi *rwi;
2132 	unsigned long flags;
2133 
2134 	spin_lock_irqsave(&adapter->rwi_lock, flags);
2135 
2136 	if (!list_empty(&adapter->rwi_list)) {
2137 		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2138 				       list);
2139 		list_del(&rwi->list);
2140 	} else {
2141 		rwi = NULL;
2142 	}
2143 
2144 	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2145 	return rwi;
2146 }
2147 
2148 static void free_all_rwi(struct ibmvnic_adapter *adapter)
2149 {
2150 	struct ibmvnic_rwi *rwi;
2151 
2152 	rwi = get_next_rwi(adapter);
2153 	while (rwi) {
2154 		kfree(rwi);
2155 		rwi = get_next_rwi(adapter);
2156 	}
2157 }
2158 
2159 static void __ibmvnic_reset(struct work_struct *work)
2160 {
2161 	struct ibmvnic_rwi *rwi;
2162 	struct ibmvnic_adapter *adapter;
2163 	bool saved_state = false;
2164 	unsigned long flags;
2165 	u32 reset_state;
2166 	int rc = 0;
2167 
2168 	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2169 
2170 	if (test_and_set_bit_lock(0, &adapter->resetting)) {
2171 		schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2172 				      IBMVNIC_RESET_DELAY);
2173 		return;
2174 	}
2175 
2176 	rwi = get_next_rwi(adapter);
2177 	while (rwi) {
2178 		spin_lock_irqsave(&adapter->state_lock, flags);
2179 
2180 		if (adapter->state == VNIC_REMOVING ||
2181 		    adapter->state == VNIC_REMOVED) {
2182 			spin_unlock_irqrestore(&adapter->state_lock, flags);
2183 			kfree(rwi);
2184 			rc = EBUSY;
2185 			break;
2186 		}
2187 
2188 		if (!saved_state) {
2189 			reset_state = adapter->state;
2190 			adapter->state = VNIC_RESETTING;
2191 			saved_state = true;
2192 		}
2193 		spin_unlock_irqrestore(&adapter->state_lock, flags);
2194 
2195 		if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2196 			/* CHANGE_PARAM requestor holds rtnl_lock */
2197 			rc = do_change_param_reset(adapter, rwi, reset_state);
2198 		} else if (adapter->force_reset_recovery) {
2199 			/* Transport event occurred during previous reset */
2200 			if (adapter->wait_for_reset) {
2201 				/* Previous was CHANGE_PARAM; caller locked */
2202 				adapter->force_reset_recovery = false;
2203 				rc = do_hard_reset(adapter, rwi, reset_state);
2204 			} else {
2205 				rtnl_lock();
2206 				adapter->force_reset_recovery = false;
2207 				rc = do_hard_reset(adapter, rwi, reset_state);
2208 				rtnl_unlock();
2209 			}
2210 		} else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2211 				adapter->from_passive_init)) {
2212 			rc = do_reset(adapter, rwi, reset_state);
2213 		}
2214 		kfree(rwi);
2215 		if (rc == IBMVNIC_OPEN_FAILED) {
2216 			if (list_empty(&adapter->rwi_list))
2217 				adapter->state = VNIC_CLOSED;
2218 			else
2219 				adapter->state = reset_state;
2220 			rc = 0;
2221 		} else if (rc && rc != IBMVNIC_INIT_FAILED &&
2222 		    !adapter->force_reset_recovery)
2223 			break;
2224 
2225 		rwi = get_next_rwi(adapter);
2226 
2227 		if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2228 			    rwi->reset_reason == VNIC_RESET_MOBILITY))
2229 			adapter->force_reset_recovery = true;
2230 	}
2231 
2232 	if (adapter->wait_for_reset) {
2233 		adapter->reset_done_rc = rc;
2234 		complete(&adapter->reset_done);
2235 	}
2236 
2237 	if (rc) {
2238 		netdev_dbg(adapter->netdev, "Reset failed\n");
2239 		free_all_rwi(adapter);
2240 	}
2241 
2242 	clear_bit_unlock(0, &adapter->resetting);
2243 }
2244 
2245 static void __ibmvnic_delayed_reset(struct work_struct *work)
2246 {
2247 	struct ibmvnic_adapter *adapter;
2248 
2249 	adapter = container_of(work, struct ibmvnic_adapter,
2250 			       ibmvnic_delayed_reset.work);
2251 	__ibmvnic_reset(&adapter->ibmvnic_reset);
2252 }
2253 
2254 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2255 			 enum ibmvnic_reset_reason reason)
2256 {
2257 	struct list_head *entry, *tmp_entry;
2258 	struct ibmvnic_rwi *rwi, *tmp;
2259 	struct net_device *netdev = adapter->netdev;
2260 	unsigned long flags;
2261 	int ret;
2262 
2263 	if (adapter->state == VNIC_REMOVING ||
2264 	    adapter->state == VNIC_REMOVED ||
2265 	    adapter->failover_pending) {
2266 		ret = EBUSY;
2267 		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2268 		goto err;
2269 	}
2270 
2271 	if (adapter->state == VNIC_PROBING) {
2272 		netdev_warn(netdev, "Adapter reset during probe\n");
2273 		ret = adapter->init_done_rc = EAGAIN;
2274 		goto err;
2275 	}
2276 
2277 	spin_lock_irqsave(&adapter->rwi_lock, flags);
2278 
2279 	list_for_each(entry, &adapter->rwi_list) {
2280 		tmp = list_entry(entry, struct ibmvnic_rwi, list);
2281 		if (tmp->reset_reason == reason) {
2282 			netdev_dbg(netdev, "Skipping matching reset\n");
2283 			spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2284 			ret = EBUSY;
2285 			goto err;
2286 		}
2287 	}
2288 
2289 	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2290 	if (!rwi) {
2291 		spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2292 		ibmvnic_close(netdev);
2293 		ret = ENOMEM;
2294 		goto err;
2295 	}
2296 	/* if we just received a transport event,
2297 	 * flush reset queue and process this reset
2298 	 */
2299 	if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2300 		list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2301 			list_del(entry);
2302 	}
2303 	rwi->reset_reason = reason;
2304 	list_add_tail(&rwi->list, &adapter->rwi_list);
2305 	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2306 	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2307 	schedule_work(&adapter->ibmvnic_reset);
2308 
2309 	return 0;
2310 err:
2311 	return -ret;
2312 }
2313 
2314 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2315 {
2316 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2317 
2318 	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2319 }
2320 
2321 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2322 				  struct ibmvnic_rx_buff *rx_buff)
2323 {
2324 	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2325 
2326 	rx_buff->skb = NULL;
2327 
2328 	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2329 	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2330 
2331 	atomic_dec(&pool->available);
2332 }
2333 
2334 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2335 {
2336 	struct net_device *netdev = napi->dev;
2337 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2338 	int scrq_num = (int)(napi - adapter->napi);
2339 	int frames_processed = 0;
2340 
2341 restart_poll:
2342 	while (frames_processed < budget) {
2343 		struct sk_buff *skb;
2344 		struct ibmvnic_rx_buff *rx_buff;
2345 		union sub_crq *next;
2346 		u32 length;
2347 		u16 offset;
2348 		u8 flags = 0;
2349 
2350 		if (unlikely(test_bit(0, &adapter->resetting) &&
2351 			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2352 			enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2353 			napi_complete_done(napi, frames_processed);
2354 			return frames_processed;
2355 		}
2356 
2357 		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2358 			break;
2359 		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2360 		rx_buff =
2361 		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2362 							  rx_comp.correlator);
2363 		/* do error checking */
2364 		if (next->rx_comp.rc) {
2365 			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2366 				   be16_to_cpu(next->rx_comp.rc));
2367 			/* free the entry */
2368 			next->rx_comp.first = 0;
2369 			dev_kfree_skb_any(rx_buff->skb);
2370 			remove_buff_from_pool(adapter, rx_buff);
2371 			continue;
2372 		} else if (!rx_buff->skb) {
2373 			/* free the entry */
2374 			next->rx_comp.first = 0;
2375 			remove_buff_from_pool(adapter, rx_buff);
2376 			continue;
2377 		}
2378 
2379 		length = be32_to_cpu(next->rx_comp.len);
2380 		offset = be16_to_cpu(next->rx_comp.off_frame_data);
2381 		flags = next->rx_comp.flags;
2382 		skb = rx_buff->skb;
2383 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
2384 					length);
2385 
2386 		/* VLAN Header has been stripped by the system firmware and
2387 		 * needs to be inserted by the driver
2388 		 */
2389 		if (adapter->rx_vlan_header_insertion &&
2390 		    (flags & IBMVNIC_VLAN_STRIPPED))
2391 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2392 					       ntohs(next->rx_comp.vlan_tci));
2393 
2394 		/* free the entry */
2395 		next->rx_comp.first = 0;
2396 		remove_buff_from_pool(adapter, rx_buff);
2397 
2398 		skb_put(skb, length);
2399 		skb->protocol = eth_type_trans(skb, netdev);
2400 		skb_record_rx_queue(skb, scrq_num);
2401 
2402 		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2403 		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2404 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2405 		}
2406 
2407 		length = skb->len;
2408 		napi_gro_receive(napi, skb); /* send it up */
2409 		netdev->stats.rx_packets++;
2410 		netdev->stats.rx_bytes += length;
2411 		adapter->rx_stats_buffers[scrq_num].packets++;
2412 		adapter->rx_stats_buffers[scrq_num].bytes += length;
2413 		frames_processed++;
2414 	}
2415 
2416 	if (adapter->state != VNIC_CLOSING)
2417 		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2418 
2419 	if (frames_processed < budget) {
2420 		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2421 		napi_complete_done(napi, frames_processed);
2422 		if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2423 		    napi_reschedule(napi)) {
2424 			disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2425 			goto restart_poll;
2426 		}
2427 	}
2428 	return frames_processed;
2429 }
2430 
2431 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2432 {
2433 	int rc, ret;
2434 
2435 	adapter->fallback.mtu = adapter->req_mtu;
2436 	adapter->fallback.rx_queues = adapter->req_rx_queues;
2437 	adapter->fallback.tx_queues = adapter->req_tx_queues;
2438 	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2439 	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2440 
2441 	reinit_completion(&adapter->reset_done);
2442 	adapter->wait_for_reset = true;
2443 	rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2444 
2445 	if (rc) {
2446 		ret = rc;
2447 		goto out;
2448 	}
2449 	rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2450 	if (rc) {
2451 		ret = -ENODEV;
2452 		goto out;
2453 	}
2454 
2455 	ret = 0;
2456 	if (adapter->reset_done_rc) {
2457 		ret = -EIO;
2458 		adapter->desired.mtu = adapter->fallback.mtu;
2459 		adapter->desired.rx_queues = adapter->fallback.rx_queues;
2460 		adapter->desired.tx_queues = adapter->fallback.tx_queues;
2461 		adapter->desired.rx_entries = adapter->fallback.rx_entries;
2462 		adapter->desired.tx_entries = adapter->fallback.tx_entries;
2463 
2464 		reinit_completion(&adapter->reset_done);
2465 		adapter->wait_for_reset = true;
2466 		rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2467 		if (rc) {
2468 			ret = rc;
2469 			goto out;
2470 		}
2471 		rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2472 						 60000);
2473 		if (rc) {
2474 			ret = -ENODEV;
2475 			goto out;
2476 		}
2477 	}
2478 out:
2479 	adapter->wait_for_reset = false;
2480 
2481 	return ret;
2482 }
2483 
2484 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2485 {
2486 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2487 
2488 	adapter->desired.mtu = new_mtu + ETH_HLEN;
2489 
2490 	return wait_for_reset(adapter);
2491 }
2492 
2493 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2494 						struct net_device *dev,
2495 						netdev_features_t features)
2496 {
2497 	/* Some backing hardware adapters can not
2498 	 * handle packets with a MSS less than 224
2499 	 * or with only one segment.
2500 	 */
2501 	if (skb_is_gso(skb)) {
2502 		if (skb_shinfo(skb)->gso_size < 224 ||
2503 		    skb_shinfo(skb)->gso_segs == 1)
2504 			features &= ~NETIF_F_GSO_MASK;
2505 	}
2506 
2507 	return features;
2508 }
2509 
2510 static const struct net_device_ops ibmvnic_netdev_ops = {
2511 	.ndo_open		= ibmvnic_open,
2512 	.ndo_stop		= ibmvnic_close,
2513 	.ndo_start_xmit		= ibmvnic_xmit,
2514 	.ndo_set_rx_mode	= ibmvnic_set_multi,
2515 	.ndo_set_mac_address	= ibmvnic_set_mac,
2516 	.ndo_validate_addr	= eth_validate_addr,
2517 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
2518 	.ndo_change_mtu		= ibmvnic_change_mtu,
2519 	.ndo_features_check     = ibmvnic_features_check,
2520 };
2521 
2522 /* ethtool functions */
2523 
2524 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2525 				      struct ethtool_link_ksettings *cmd)
2526 {
2527 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2528 	int rc;
2529 
2530 	rc = send_query_phys_parms(adapter);
2531 	if (rc) {
2532 		adapter->speed = SPEED_UNKNOWN;
2533 		adapter->duplex = DUPLEX_UNKNOWN;
2534 	}
2535 	cmd->base.speed = adapter->speed;
2536 	cmd->base.duplex = adapter->duplex;
2537 	cmd->base.port = PORT_FIBRE;
2538 	cmd->base.phy_address = 0;
2539 	cmd->base.autoneg = AUTONEG_ENABLE;
2540 
2541 	return 0;
2542 }
2543 
2544 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2545 				struct ethtool_drvinfo *info)
2546 {
2547 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2548 
2549 	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2550 	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2551 	strlcpy(info->fw_version, adapter->fw_version,
2552 		sizeof(info->fw_version));
2553 }
2554 
2555 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2556 {
2557 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2558 
2559 	return adapter->msg_enable;
2560 }
2561 
2562 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2563 {
2564 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2565 
2566 	adapter->msg_enable = data;
2567 }
2568 
2569 static u32 ibmvnic_get_link(struct net_device *netdev)
2570 {
2571 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2572 
2573 	/* Don't need to send a query because we request a logical link up at
2574 	 * init and then we wait for link state indications
2575 	 */
2576 	return adapter->logical_link_state;
2577 }
2578 
2579 static void ibmvnic_get_ringparam(struct net_device *netdev,
2580 				  struct ethtool_ringparam *ring)
2581 {
2582 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2583 
2584 	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2585 		ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2586 		ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2587 	} else {
2588 		ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2589 		ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2590 	}
2591 	ring->rx_mini_max_pending = 0;
2592 	ring->rx_jumbo_max_pending = 0;
2593 	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2594 	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2595 	ring->rx_mini_pending = 0;
2596 	ring->rx_jumbo_pending = 0;
2597 }
2598 
2599 static int ibmvnic_set_ringparam(struct net_device *netdev,
2600 				 struct ethtool_ringparam *ring)
2601 {
2602 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2603 	int ret;
2604 
2605 	ret = 0;
2606 	adapter->desired.rx_entries = ring->rx_pending;
2607 	adapter->desired.tx_entries = ring->tx_pending;
2608 
2609 	ret = wait_for_reset(adapter);
2610 
2611 	if (!ret &&
2612 	    (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2613 	     adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2614 		netdev_info(netdev,
2615 			    "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2616 			    ring->rx_pending, ring->tx_pending,
2617 			    adapter->req_rx_add_entries_per_subcrq,
2618 			    adapter->req_tx_entries_per_subcrq);
2619 	return ret;
2620 }
2621 
2622 static void ibmvnic_get_channels(struct net_device *netdev,
2623 				 struct ethtool_channels *channels)
2624 {
2625 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2626 
2627 	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2628 		channels->max_rx = adapter->max_rx_queues;
2629 		channels->max_tx = adapter->max_tx_queues;
2630 	} else {
2631 		channels->max_rx = IBMVNIC_MAX_QUEUES;
2632 		channels->max_tx = IBMVNIC_MAX_QUEUES;
2633 	}
2634 
2635 	channels->max_other = 0;
2636 	channels->max_combined = 0;
2637 	channels->rx_count = adapter->req_rx_queues;
2638 	channels->tx_count = adapter->req_tx_queues;
2639 	channels->other_count = 0;
2640 	channels->combined_count = 0;
2641 }
2642 
2643 static int ibmvnic_set_channels(struct net_device *netdev,
2644 				struct ethtool_channels *channels)
2645 {
2646 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2647 	int ret;
2648 
2649 	ret = 0;
2650 	adapter->desired.rx_queues = channels->rx_count;
2651 	adapter->desired.tx_queues = channels->tx_count;
2652 
2653 	ret = wait_for_reset(adapter);
2654 
2655 	if (!ret &&
2656 	    (adapter->req_rx_queues != channels->rx_count ||
2657 	     adapter->req_tx_queues != channels->tx_count))
2658 		netdev_info(netdev,
2659 			    "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2660 			    channels->rx_count, channels->tx_count,
2661 			    adapter->req_rx_queues, adapter->req_tx_queues);
2662 	return ret;
2663 
2664 }
2665 
2666 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2667 {
2668 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2669 	int i;
2670 
2671 	switch (stringset) {
2672 	case ETH_SS_STATS:
2673 		for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2674 				i++, data += ETH_GSTRING_LEN)
2675 			memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2676 
2677 		for (i = 0; i < adapter->req_tx_queues; i++) {
2678 			snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2679 			data += ETH_GSTRING_LEN;
2680 
2681 			snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2682 			data += ETH_GSTRING_LEN;
2683 
2684 			snprintf(data, ETH_GSTRING_LEN,
2685 				 "tx%d_dropped_packets", i);
2686 			data += ETH_GSTRING_LEN;
2687 		}
2688 
2689 		for (i = 0; i < adapter->req_rx_queues; i++) {
2690 			snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2691 			data += ETH_GSTRING_LEN;
2692 
2693 			snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2694 			data += ETH_GSTRING_LEN;
2695 
2696 			snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2697 			data += ETH_GSTRING_LEN;
2698 		}
2699 		break;
2700 
2701 	case ETH_SS_PRIV_FLAGS:
2702 		for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2703 			strcpy(data + i * ETH_GSTRING_LEN,
2704 			       ibmvnic_priv_flags[i]);
2705 		break;
2706 	default:
2707 		return;
2708 	}
2709 }
2710 
2711 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2712 {
2713 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2714 
2715 	switch (sset) {
2716 	case ETH_SS_STATS:
2717 		return ARRAY_SIZE(ibmvnic_stats) +
2718 		       adapter->req_tx_queues * NUM_TX_STATS +
2719 		       adapter->req_rx_queues * NUM_RX_STATS;
2720 	case ETH_SS_PRIV_FLAGS:
2721 		return ARRAY_SIZE(ibmvnic_priv_flags);
2722 	default:
2723 		return -EOPNOTSUPP;
2724 	}
2725 }
2726 
2727 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2728 				      struct ethtool_stats *stats, u64 *data)
2729 {
2730 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2731 	union ibmvnic_crq crq;
2732 	int i, j;
2733 	int rc;
2734 
2735 	memset(&crq, 0, sizeof(crq));
2736 	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2737 	crq.request_statistics.cmd = REQUEST_STATISTICS;
2738 	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2739 	crq.request_statistics.len =
2740 	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
2741 
2742 	/* Wait for data to be written */
2743 	reinit_completion(&adapter->stats_done);
2744 	rc = ibmvnic_send_crq(adapter, &crq);
2745 	if (rc)
2746 		return;
2747 	rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2748 	if (rc)
2749 		return;
2750 
2751 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2752 		data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2753 						ibmvnic_stats[i].offset));
2754 
2755 	for (j = 0; j < adapter->req_tx_queues; j++) {
2756 		data[i] = adapter->tx_stats_buffers[j].packets;
2757 		i++;
2758 		data[i] = adapter->tx_stats_buffers[j].bytes;
2759 		i++;
2760 		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2761 		i++;
2762 	}
2763 
2764 	for (j = 0; j < adapter->req_rx_queues; j++) {
2765 		data[i] = adapter->rx_stats_buffers[j].packets;
2766 		i++;
2767 		data[i] = adapter->rx_stats_buffers[j].bytes;
2768 		i++;
2769 		data[i] = adapter->rx_stats_buffers[j].interrupts;
2770 		i++;
2771 	}
2772 }
2773 
2774 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2775 {
2776 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2777 
2778 	return adapter->priv_flags;
2779 }
2780 
2781 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2782 {
2783 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2784 	bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2785 
2786 	if (which_maxes)
2787 		adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2788 	else
2789 		adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2790 
2791 	return 0;
2792 }
2793 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2794 	.get_drvinfo		= ibmvnic_get_drvinfo,
2795 	.get_msglevel		= ibmvnic_get_msglevel,
2796 	.set_msglevel		= ibmvnic_set_msglevel,
2797 	.get_link		= ibmvnic_get_link,
2798 	.get_ringparam		= ibmvnic_get_ringparam,
2799 	.set_ringparam		= ibmvnic_set_ringparam,
2800 	.get_channels		= ibmvnic_get_channels,
2801 	.set_channels		= ibmvnic_set_channels,
2802 	.get_strings            = ibmvnic_get_strings,
2803 	.get_sset_count         = ibmvnic_get_sset_count,
2804 	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
2805 	.get_link_ksettings	= ibmvnic_get_link_ksettings,
2806 	.get_priv_flags		= ibmvnic_get_priv_flags,
2807 	.set_priv_flags		= ibmvnic_set_priv_flags,
2808 };
2809 
2810 /* Routines for managing CRQs/sCRQs  */
2811 
2812 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2813 				   struct ibmvnic_sub_crq_queue *scrq)
2814 {
2815 	int rc;
2816 
2817 	if (scrq->irq) {
2818 		free_irq(scrq->irq, scrq);
2819 		irq_dispose_mapping(scrq->irq);
2820 		scrq->irq = 0;
2821 	}
2822 
2823 	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2824 	atomic_set(&scrq->used, 0);
2825 	scrq->cur = 0;
2826 
2827 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2828 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2829 	return rc;
2830 }
2831 
2832 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2833 {
2834 	int i, rc;
2835 
2836 	for (i = 0; i < adapter->req_tx_queues; i++) {
2837 		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2838 		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2839 		if (rc)
2840 			return rc;
2841 	}
2842 
2843 	for (i = 0; i < adapter->req_rx_queues; i++) {
2844 		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2845 		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2846 		if (rc)
2847 			return rc;
2848 	}
2849 
2850 	return rc;
2851 }
2852 
2853 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2854 				  struct ibmvnic_sub_crq_queue *scrq,
2855 				  bool do_h_free)
2856 {
2857 	struct device *dev = &adapter->vdev->dev;
2858 	long rc;
2859 
2860 	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2861 
2862 	if (do_h_free) {
2863 		/* Close the sub-crqs */
2864 		do {
2865 			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2866 						adapter->vdev->unit_address,
2867 						scrq->crq_num);
2868 		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2869 
2870 		if (rc) {
2871 			netdev_err(adapter->netdev,
2872 				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
2873 				   scrq->crq_num, rc);
2874 		}
2875 	}
2876 
2877 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2878 			 DMA_BIDIRECTIONAL);
2879 	free_pages((unsigned long)scrq->msgs, 2);
2880 	kfree(scrq);
2881 }
2882 
2883 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2884 							*adapter)
2885 {
2886 	struct device *dev = &adapter->vdev->dev;
2887 	struct ibmvnic_sub_crq_queue *scrq;
2888 	int rc;
2889 
2890 	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2891 	if (!scrq)
2892 		return NULL;
2893 
2894 	scrq->msgs =
2895 		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2896 	if (!scrq->msgs) {
2897 		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2898 		goto zero_page_failed;
2899 	}
2900 
2901 	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2902 					 DMA_BIDIRECTIONAL);
2903 	if (dma_mapping_error(dev, scrq->msg_token)) {
2904 		dev_warn(dev, "Couldn't map crq queue messages page\n");
2905 		goto map_failed;
2906 	}
2907 
2908 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2909 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2910 
2911 	if (rc == H_RESOURCE)
2912 		rc = ibmvnic_reset_crq(adapter);
2913 
2914 	if (rc == H_CLOSED) {
2915 		dev_warn(dev, "Partner adapter not ready, waiting.\n");
2916 	} else if (rc) {
2917 		dev_warn(dev, "Error %d registering sub-crq\n", rc);
2918 		goto reg_failed;
2919 	}
2920 
2921 	scrq->adapter = adapter;
2922 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2923 	spin_lock_init(&scrq->lock);
2924 
2925 	netdev_dbg(adapter->netdev,
2926 		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2927 		   scrq->crq_num, scrq->hw_irq, scrq->irq);
2928 
2929 	return scrq;
2930 
2931 reg_failed:
2932 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2933 			 DMA_BIDIRECTIONAL);
2934 map_failed:
2935 	free_pages((unsigned long)scrq->msgs, 2);
2936 zero_page_failed:
2937 	kfree(scrq);
2938 
2939 	return NULL;
2940 }
2941 
2942 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2943 {
2944 	int i;
2945 
2946 	if (adapter->tx_scrq) {
2947 		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2948 			if (!adapter->tx_scrq[i])
2949 				continue;
2950 
2951 			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2952 				   i);
2953 			if (adapter->tx_scrq[i]->irq) {
2954 				free_irq(adapter->tx_scrq[i]->irq,
2955 					 adapter->tx_scrq[i]);
2956 				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2957 				adapter->tx_scrq[i]->irq = 0;
2958 			}
2959 
2960 			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2961 					      do_h_free);
2962 		}
2963 
2964 		kfree(adapter->tx_scrq);
2965 		adapter->tx_scrq = NULL;
2966 		adapter->num_active_tx_scrqs = 0;
2967 	}
2968 
2969 	if (adapter->rx_scrq) {
2970 		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2971 			if (!adapter->rx_scrq[i])
2972 				continue;
2973 
2974 			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2975 				   i);
2976 			if (adapter->rx_scrq[i]->irq) {
2977 				free_irq(adapter->rx_scrq[i]->irq,
2978 					 adapter->rx_scrq[i]);
2979 				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2980 				adapter->rx_scrq[i]->irq = 0;
2981 			}
2982 
2983 			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2984 					      do_h_free);
2985 		}
2986 
2987 		kfree(adapter->rx_scrq);
2988 		adapter->rx_scrq = NULL;
2989 		adapter->num_active_rx_scrqs = 0;
2990 	}
2991 }
2992 
2993 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2994 			    struct ibmvnic_sub_crq_queue *scrq)
2995 {
2996 	struct device *dev = &adapter->vdev->dev;
2997 	unsigned long rc;
2998 
2999 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3000 				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3001 	if (rc)
3002 		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3003 			scrq->hw_irq, rc);
3004 	return rc;
3005 }
3006 
3007 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3008 			   struct ibmvnic_sub_crq_queue *scrq)
3009 {
3010 	struct device *dev = &adapter->vdev->dev;
3011 	unsigned long rc;
3012 
3013 	if (scrq->hw_irq > 0x100000000ULL) {
3014 		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3015 		return 1;
3016 	}
3017 
3018 	if (test_bit(0, &adapter->resetting) &&
3019 	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
3020 		u64 val = (0xff000000) | scrq->hw_irq;
3021 
3022 		rc = plpar_hcall_norets(H_EOI, val);
3023 		/* H_EOI would fail with rc = H_FUNCTION when running
3024 		 * in XIVE mode which is expected, but not an error.
3025 		 */
3026 		if (rc && (rc != H_FUNCTION))
3027 			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3028 				val, rc);
3029 	}
3030 
3031 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3032 				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3033 	if (rc)
3034 		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3035 			scrq->hw_irq, rc);
3036 	return rc;
3037 }
3038 
3039 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3040 			       struct ibmvnic_sub_crq_queue *scrq)
3041 {
3042 	struct device *dev = &adapter->vdev->dev;
3043 	struct ibmvnic_tx_pool *tx_pool;
3044 	struct ibmvnic_tx_buff *txbuff;
3045 	union sub_crq *next;
3046 	int index;
3047 	int i, j;
3048 
3049 restart_loop:
3050 	while (pending_scrq(adapter, scrq)) {
3051 		unsigned int pool = scrq->pool_index;
3052 		int num_entries = 0;
3053 
3054 		next = ibmvnic_next_scrq(adapter, scrq);
3055 		for (i = 0; i < next->tx_comp.num_comps; i++) {
3056 			if (next->tx_comp.rcs[i]) {
3057 				dev_err(dev, "tx error %x\n",
3058 					next->tx_comp.rcs[i]);
3059 				continue;
3060 			}
3061 			index = be32_to_cpu(next->tx_comp.correlators[i]);
3062 			if (index & IBMVNIC_TSO_POOL_MASK) {
3063 				tx_pool = &adapter->tso_pool[pool];
3064 				index &= ~IBMVNIC_TSO_POOL_MASK;
3065 			} else {
3066 				tx_pool = &adapter->tx_pool[pool];
3067 			}
3068 
3069 			txbuff = &tx_pool->tx_buff[index];
3070 
3071 			for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
3072 				if (!txbuff->data_dma[j])
3073 					continue;
3074 
3075 				txbuff->data_dma[j] = 0;
3076 			}
3077 
3078 			if (txbuff->last_frag) {
3079 				dev_kfree_skb_any(txbuff->skb);
3080 				txbuff->skb = NULL;
3081 			}
3082 
3083 			num_entries += txbuff->num_entries;
3084 
3085 			tx_pool->free_map[tx_pool->producer_index] = index;
3086 			tx_pool->producer_index =
3087 				(tx_pool->producer_index + 1) %
3088 					tx_pool->num_buffers;
3089 		}
3090 		/* remove tx_comp scrq*/
3091 		next->tx_comp.first = 0;
3092 
3093 		if (atomic_sub_return(num_entries, &scrq->used) <=
3094 		    (adapter->req_tx_entries_per_subcrq / 2) &&
3095 		    __netif_subqueue_stopped(adapter->netdev,
3096 					     scrq->pool_index)) {
3097 			netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3098 			netdev_dbg(adapter->netdev, "Started queue %d\n",
3099 				   scrq->pool_index);
3100 		}
3101 	}
3102 
3103 	enable_scrq_irq(adapter, scrq);
3104 
3105 	if (pending_scrq(adapter, scrq)) {
3106 		disable_scrq_irq(adapter, scrq);
3107 		goto restart_loop;
3108 	}
3109 
3110 	return 0;
3111 }
3112 
3113 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3114 {
3115 	struct ibmvnic_sub_crq_queue *scrq = instance;
3116 	struct ibmvnic_adapter *adapter = scrq->adapter;
3117 
3118 	disable_scrq_irq(adapter, scrq);
3119 	ibmvnic_complete_tx(adapter, scrq);
3120 
3121 	return IRQ_HANDLED;
3122 }
3123 
3124 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3125 {
3126 	struct ibmvnic_sub_crq_queue *scrq = instance;
3127 	struct ibmvnic_adapter *adapter = scrq->adapter;
3128 
3129 	/* When booting a kdump kernel we can hit pending interrupts
3130 	 * prior to completing driver initialization.
3131 	 */
3132 	if (unlikely(adapter->state != VNIC_OPEN))
3133 		return IRQ_NONE;
3134 
3135 	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3136 
3137 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3138 		disable_scrq_irq(adapter, scrq);
3139 		__napi_schedule(&adapter->napi[scrq->scrq_num]);
3140 	}
3141 
3142 	return IRQ_HANDLED;
3143 }
3144 
3145 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3146 {
3147 	struct device *dev = &adapter->vdev->dev;
3148 	struct ibmvnic_sub_crq_queue *scrq;
3149 	int i = 0, j = 0;
3150 	int rc = 0;
3151 
3152 	for (i = 0; i < adapter->req_tx_queues; i++) {
3153 		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3154 			   i);
3155 		scrq = adapter->tx_scrq[i];
3156 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3157 
3158 		if (!scrq->irq) {
3159 			rc = -EINVAL;
3160 			dev_err(dev, "Error mapping irq\n");
3161 			goto req_tx_irq_failed;
3162 		}
3163 
3164 		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3165 			 adapter->vdev->unit_address, i);
3166 		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3167 				 0, scrq->name, scrq);
3168 
3169 		if (rc) {
3170 			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3171 				scrq->irq, rc);
3172 			irq_dispose_mapping(scrq->irq);
3173 			goto req_tx_irq_failed;
3174 		}
3175 	}
3176 
3177 	for (i = 0; i < adapter->req_rx_queues; i++) {
3178 		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3179 			   i);
3180 		scrq = adapter->rx_scrq[i];
3181 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3182 		if (!scrq->irq) {
3183 			rc = -EINVAL;
3184 			dev_err(dev, "Error mapping irq\n");
3185 			goto req_rx_irq_failed;
3186 		}
3187 		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3188 			 adapter->vdev->unit_address, i);
3189 		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3190 				 0, scrq->name, scrq);
3191 		if (rc) {
3192 			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3193 				scrq->irq, rc);
3194 			irq_dispose_mapping(scrq->irq);
3195 			goto req_rx_irq_failed;
3196 		}
3197 	}
3198 	return rc;
3199 
3200 req_rx_irq_failed:
3201 	for (j = 0; j < i; j++) {
3202 		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3203 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3204 	}
3205 	i = adapter->req_tx_queues;
3206 req_tx_irq_failed:
3207 	for (j = 0; j < i; j++) {
3208 		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3209 		irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3210 	}
3211 	release_sub_crqs(adapter, 1);
3212 	return rc;
3213 }
3214 
3215 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3216 {
3217 	struct device *dev = &adapter->vdev->dev;
3218 	struct ibmvnic_sub_crq_queue **allqueues;
3219 	int registered_queues = 0;
3220 	int total_queues;
3221 	int more = 0;
3222 	int i;
3223 
3224 	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3225 
3226 	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3227 	if (!allqueues)
3228 		return -1;
3229 
3230 	for (i = 0; i < total_queues; i++) {
3231 		allqueues[i] = init_sub_crq_queue(adapter);
3232 		if (!allqueues[i]) {
3233 			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3234 			break;
3235 		}
3236 		registered_queues++;
3237 	}
3238 
3239 	/* Make sure we were able to register the minimum number of queues */
3240 	if (registered_queues <
3241 	    adapter->min_tx_queues + adapter->min_rx_queues) {
3242 		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
3243 		goto tx_failed;
3244 	}
3245 
3246 	/* Distribute the failed allocated queues*/
3247 	for (i = 0; i < total_queues - registered_queues + more ; i++) {
3248 		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3249 		switch (i % 3) {
3250 		case 0:
3251 			if (adapter->req_rx_queues > adapter->min_rx_queues)
3252 				adapter->req_rx_queues--;
3253 			else
3254 				more++;
3255 			break;
3256 		case 1:
3257 			if (adapter->req_tx_queues > adapter->min_tx_queues)
3258 				adapter->req_tx_queues--;
3259 			else
3260 				more++;
3261 			break;
3262 		}
3263 	}
3264 
3265 	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3266 				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
3267 	if (!adapter->tx_scrq)
3268 		goto tx_failed;
3269 
3270 	for (i = 0; i < adapter->req_tx_queues; i++) {
3271 		adapter->tx_scrq[i] = allqueues[i];
3272 		adapter->tx_scrq[i]->pool_index = i;
3273 		adapter->num_active_tx_scrqs++;
3274 	}
3275 
3276 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3277 				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
3278 	if (!adapter->rx_scrq)
3279 		goto rx_failed;
3280 
3281 	for (i = 0; i < adapter->req_rx_queues; i++) {
3282 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3283 		adapter->rx_scrq[i]->scrq_num = i;
3284 		adapter->num_active_rx_scrqs++;
3285 	}
3286 
3287 	kfree(allqueues);
3288 	return 0;
3289 
3290 rx_failed:
3291 	kfree(adapter->tx_scrq);
3292 	adapter->tx_scrq = NULL;
3293 tx_failed:
3294 	for (i = 0; i < registered_queues; i++)
3295 		release_sub_crq_queue(adapter, allqueues[i], 1);
3296 	kfree(allqueues);
3297 	return -1;
3298 }
3299 
3300 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3301 {
3302 	struct device *dev = &adapter->vdev->dev;
3303 	union ibmvnic_crq crq;
3304 	int max_entries;
3305 
3306 	if (!retry) {
3307 		/* Sub-CRQ entries are 32 byte long */
3308 		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3309 
3310 		if (adapter->min_tx_entries_per_subcrq > entries_page ||
3311 		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
3312 			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3313 			return;
3314 		}
3315 
3316 		if (adapter->desired.mtu)
3317 			adapter->req_mtu = adapter->desired.mtu;
3318 		else
3319 			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3320 
3321 		if (!adapter->desired.tx_entries)
3322 			adapter->desired.tx_entries =
3323 					adapter->max_tx_entries_per_subcrq;
3324 		if (!adapter->desired.rx_entries)
3325 			adapter->desired.rx_entries =
3326 					adapter->max_rx_add_entries_per_subcrq;
3327 
3328 		max_entries = IBMVNIC_MAX_LTB_SIZE /
3329 			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3330 
3331 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3332 			adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3333 			adapter->desired.tx_entries = max_entries;
3334 		}
3335 
3336 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3337 			adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3338 			adapter->desired.rx_entries = max_entries;
3339 		}
3340 
3341 		if (adapter->desired.tx_entries)
3342 			adapter->req_tx_entries_per_subcrq =
3343 					adapter->desired.tx_entries;
3344 		else
3345 			adapter->req_tx_entries_per_subcrq =
3346 					adapter->max_tx_entries_per_subcrq;
3347 
3348 		if (adapter->desired.rx_entries)
3349 			adapter->req_rx_add_entries_per_subcrq =
3350 					adapter->desired.rx_entries;
3351 		else
3352 			adapter->req_rx_add_entries_per_subcrq =
3353 					adapter->max_rx_add_entries_per_subcrq;
3354 
3355 		if (adapter->desired.tx_queues)
3356 			adapter->req_tx_queues =
3357 					adapter->desired.tx_queues;
3358 		else
3359 			adapter->req_tx_queues =
3360 					adapter->opt_tx_comp_sub_queues;
3361 
3362 		if (adapter->desired.rx_queues)
3363 			adapter->req_rx_queues =
3364 					adapter->desired.rx_queues;
3365 		else
3366 			adapter->req_rx_queues =
3367 					adapter->opt_rx_comp_queues;
3368 
3369 		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3370 	}
3371 
3372 	memset(&crq, 0, sizeof(crq));
3373 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
3374 	crq.request_capability.cmd = REQUEST_CAPABILITY;
3375 
3376 	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3377 	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3378 	atomic_inc(&adapter->running_cap_crqs);
3379 	ibmvnic_send_crq(adapter, &crq);
3380 
3381 	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3382 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3383 	atomic_inc(&adapter->running_cap_crqs);
3384 	ibmvnic_send_crq(adapter, &crq);
3385 
3386 	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3387 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3388 	atomic_inc(&adapter->running_cap_crqs);
3389 	ibmvnic_send_crq(adapter, &crq);
3390 
3391 	crq.request_capability.capability =
3392 	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3393 	crq.request_capability.number =
3394 	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3395 	atomic_inc(&adapter->running_cap_crqs);
3396 	ibmvnic_send_crq(adapter, &crq);
3397 
3398 	crq.request_capability.capability =
3399 	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3400 	crq.request_capability.number =
3401 	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3402 	atomic_inc(&adapter->running_cap_crqs);
3403 	ibmvnic_send_crq(adapter, &crq);
3404 
3405 	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3406 	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3407 	atomic_inc(&adapter->running_cap_crqs);
3408 	ibmvnic_send_crq(adapter, &crq);
3409 
3410 	if (adapter->netdev->flags & IFF_PROMISC) {
3411 		if (adapter->promisc_supported) {
3412 			crq.request_capability.capability =
3413 			    cpu_to_be16(PROMISC_REQUESTED);
3414 			crq.request_capability.number = cpu_to_be64(1);
3415 			atomic_inc(&adapter->running_cap_crqs);
3416 			ibmvnic_send_crq(adapter, &crq);
3417 		}
3418 	} else {
3419 		crq.request_capability.capability =
3420 		    cpu_to_be16(PROMISC_REQUESTED);
3421 		crq.request_capability.number = cpu_to_be64(0);
3422 		atomic_inc(&adapter->running_cap_crqs);
3423 		ibmvnic_send_crq(adapter, &crq);
3424 	}
3425 }
3426 
3427 static int pending_scrq(struct ibmvnic_adapter *adapter,
3428 			struct ibmvnic_sub_crq_queue *scrq)
3429 {
3430 	union sub_crq *entry = &scrq->msgs[scrq->cur];
3431 
3432 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3433 		return 1;
3434 	else
3435 		return 0;
3436 }
3437 
3438 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3439 					struct ibmvnic_sub_crq_queue *scrq)
3440 {
3441 	union sub_crq *entry;
3442 	unsigned long flags;
3443 
3444 	spin_lock_irqsave(&scrq->lock, flags);
3445 	entry = &scrq->msgs[scrq->cur];
3446 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3447 		if (++scrq->cur == scrq->size)
3448 			scrq->cur = 0;
3449 	} else {
3450 		entry = NULL;
3451 	}
3452 	spin_unlock_irqrestore(&scrq->lock, flags);
3453 
3454 	return entry;
3455 }
3456 
3457 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3458 {
3459 	struct ibmvnic_crq_queue *queue = &adapter->crq;
3460 	union ibmvnic_crq *crq;
3461 
3462 	crq = &queue->msgs[queue->cur];
3463 	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3464 		if (++queue->cur == queue->size)
3465 			queue->cur = 0;
3466 	} else {
3467 		crq = NULL;
3468 	}
3469 
3470 	return crq;
3471 }
3472 
3473 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3474 {
3475 	switch (rc) {
3476 	case H_PARAMETER:
3477 		dev_warn_ratelimited(dev,
3478 				     "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3479 				     func, rc);
3480 		break;
3481 	case H_CLOSED:
3482 		dev_warn_ratelimited(dev,
3483 				     "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3484 				     func, rc);
3485 		break;
3486 	default:
3487 		dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3488 		break;
3489 	}
3490 }
3491 
3492 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3493 		       union sub_crq *sub_crq)
3494 {
3495 	unsigned int ua = adapter->vdev->unit_address;
3496 	struct device *dev = &adapter->vdev->dev;
3497 	u64 *u64_crq = (u64 *)sub_crq;
3498 	int rc;
3499 
3500 	netdev_dbg(adapter->netdev,
3501 		   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3502 		   (unsigned long int)cpu_to_be64(remote_handle),
3503 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3504 		   (unsigned long int)cpu_to_be64(u64_crq[1]),
3505 		   (unsigned long int)cpu_to_be64(u64_crq[2]),
3506 		   (unsigned long int)cpu_to_be64(u64_crq[3]));
3507 
3508 	/* Make sure the hypervisor sees the complete request */
3509 	mb();
3510 
3511 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3512 				cpu_to_be64(remote_handle),
3513 				cpu_to_be64(u64_crq[0]),
3514 				cpu_to_be64(u64_crq[1]),
3515 				cpu_to_be64(u64_crq[2]),
3516 				cpu_to_be64(u64_crq[3]));
3517 
3518 	if (rc)
3519 		print_subcrq_error(dev, rc, __func__);
3520 
3521 	return rc;
3522 }
3523 
3524 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3525 				u64 remote_handle, u64 ioba, u64 num_entries)
3526 {
3527 	unsigned int ua = adapter->vdev->unit_address;
3528 	struct device *dev = &adapter->vdev->dev;
3529 	int rc;
3530 
3531 	/* Make sure the hypervisor sees the complete request */
3532 	mb();
3533 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3534 				cpu_to_be64(remote_handle),
3535 				ioba, num_entries);
3536 
3537 	if (rc)
3538 		print_subcrq_error(dev, rc, __func__);
3539 
3540 	return rc;
3541 }
3542 
3543 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3544 			    union ibmvnic_crq *crq)
3545 {
3546 	unsigned int ua = adapter->vdev->unit_address;
3547 	struct device *dev = &adapter->vdev->dev;
3548 	u64 *u64_crq = (u64 *)crq;
3549 	int rc;
3550 
3551 	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3552 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3553 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
3554 
3555 	if (!adapter->crq.active &&
3556 	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3557 		dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3558 		return -EINVAL;
3559 	}
3560 
3561 	/* Make sure the hypervisor sees the complete request */
3562 	mb();
3563 
3564 	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3565 				cpu_to_be64(u64_crq[0]),
3566 				cpu_to_be64(u64_crq[1]));
3567 
3568 	if (rc) {
3569 		if (rc == H_CLOSED) {
3570 			dev_warn(dev, "CRQ Queue closed\n");
3571 			/* do not reset, report the fail, wait for passive init from server */
3572 		}
3573 
3574 		dev_warn(dev, "Send error (rc=%d)\n", rc);
3575 	}
3576 
3577 	return rc;
3578 }
3579 
3580 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3581 {
3582 	struct device *dev = &adapter->vdev->dev;
3583 	union ibmvnic_crq crq;
3584 	int retries = 100;
3585 	int rc;
3586 
3587 	memset(&crq, 0, sizeof(crq));
3588 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3589 	crq.generic.cmd = IBMVNIC_CRQ_INIT;
3590 	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3591 
3592 	do {
3593 		rc = ibmvnic_send_crq(adapter, &crq);
3594 		if (rc != H_CLOSED)
3595 			break;
3596 		retries--;
3597 		msleep(50);
3598 
3599 	} while (retries > 0);
3600 
3601 	if (rc) {
3602 		dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3603 		return rc;
3604 	}
3605 
3606 	return 0;
3607 }
3608 
3609 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3610 {
3611 	union ibmvnic_crq crq;
3612 
3613 	memset(&crq, 0, sizeof(crq));
3614 	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3615 	crq.version_exchange.cmd = VERSION_EXCHANGE;
3616 	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3617 
3618 	return ibmvnic_send_crq(adapter, &crq);
3619 }
3620 
3621 struct vnic_login_client_data {
3622 	u8	type;
3623 	__be16	len;
3624 	char	name[];
3625 } __packed;
3626 
3627 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3628 {
3629 	int len;
3630 
3631 	/* Calculate the amount of buffer space needed for the
3632 	 * vnic client data in the login buffer. There are four entries,
3633 	 * OS name, LPAR name, device name, and a null last entry.
3634 	 */
3635 	len = 4 * sizeof(struct vnic_login_client_data);
3636 	len += 6; /* "Linux" plus NULL */
3637 	len += strlen(utsname()->nodename) + 1;
3638 	len += strlen(adapter->netdev->name) + 1;
3639 
3640 	return len;
3641 }
3642 
3643 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3644 				 struct vnic_login_client_data *vlcd)
3645 {
3646 	const char *os_name = "Linux";
3647 	int len;
3648 
3649 	/* Type 1 - LPAR OS */
3650 	vlcd->type = 1;
3651 	len = strlen(os_name) + 1;
3652 	vlcd->len = cpu_to_be16(len);
3653 	strncpy(vlcd->name, os_name, len);
3654 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3655 
3656 	/* Type 2 - LPAR name */
3657 	vlcd->type = 2;
3658 	len = strlen(utsname()->nodename) + 1;
3659 	vlcd->len = cpu_to_be16(len);
3660 	strncpy(vlcd->name, utsname()->nodename, len);
3661 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3662 
3663 	/* Type 3 - device name */
3664 	vlcd->type = 3;
3665 	len = strlen(adapter->netdev->name) + 1;
3666 	vlcd->len = cpu_to_be16(len);
3667 	strncpy(vlcd->name, adapter->netdev->name, len);
3668 }
3669 
3670 static int send_login(struct ibmvnic_adapter *adapter)
3671 {
3672 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3673 	struct ibmvnic_login_buffer *login_buffer;
3674 	struct device *dev = &adapter->vdev->dev;
3675 	dma_addr_t rsp_buffer_token;
3676 	dma_addr_t buffer_token;
3677 	size_t rsp_buffer_size;
3678 	union ibmvnic_crq crq;
3679 	size_t buffer_size;
3680 	__be64 *tx_list_p;
3681 	__be64 *rx_list_p;
3682 	int client_data_len;
3683 	struct vnic_login_client_data *vlcd;
3684 	int i;
3685 
3686 	if (!adapter->tx_scrq || !adapter->rx_scrq) {
3687 		netdev_err(adapter->netdev,
3688 			   "RX or TX queues are not allocated, device login failed\n");
3689 		return -1;
3690 	}
3691 
3692 	release_login_rsp_buffer(adapter);
3693 	client_data_len = vnic_client_data_len(adapter);
3694 
3695 	buffer_size =
3696 	    sizeof(struct ibmvnic_login_buffer) +
3697 	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3698 	    client_data_len;
3699 
3700 	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3701 	if (!login_buffer)
3702 		goto buf_alloc_failed;
3703 
3704 	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3705 				      DMA_TO_DEVICE);
3706 	if (dma_mapping_error(dev, buffer_token)) {
3707 		dev_err(dev, "Couldn't map login buffer\n");
3708 		goto buf_map_failed;
3709 	}
3710 
3711 	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3712 			  sizeof(u64) * adapter->req_tx_queues +
3713 			  sizeof(u64) * adapter->req_rx_queues +
3714 			  sizeof(u64) * adapter->req_rx_queues +
3715 			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3716 
3717 	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3718 	if (!login_rsp_buffer)
3719 		goto buf_rsp_alloc_failed;
3720 
3721 	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3722 					  rsp_buffer_size, DMA_FROM_DEVICE);
3723 	if (dma_mapping_error(dev, rsp_buffer_token)) {
3724 		dev_err(dev, "Couldn't map login rsp buffer\n");
3725 		goto buf_rsp_map_failed;
3726 	}
3727 
3728 	adapter->login_buf = login_buffer;
3729 	adapter->login_buf_token = buffer_token;
3730 	adapter->login_buf_sz = buffer_size;
3731 	adapter->login_rsp_buf = login_rsp_buffer;
3732 	adapter->login_rsp_buf_token = rsp_buffer_token;
3733 	adapter->login_rsp_buf_sz = rsp_buffer_size;
3734 
3735 	login_buffer->len = cpu_to_be32(buffer_size);
3736 	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3737 	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3738 	login_buffer->off_txcomp_subcrqs =
3739 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3740 	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3741 	login_buffer->off_rxcomp_subcrqs =
3742 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3743 			sizeof(u64) * adapter->req_tx_queues);
3744 	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3745 	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3746 
3747 	tx_list_p = (__be64 *)((char *)login_buffer +
3748 				      sizeof(struct ibmvnic_login_buffer));
3749 	rx_list_p = (__be64 *)((char *)login_buffer +
3750 				      sizeof(struct ibmvnic_login_buffer) +
3751 				      sizeof(u64) * adapter->req_tx_queues);
3752 
3753 	for (i = 0; i < adapter->req_tx_queues; i++) {
3754 		if (adapter->tx_scrq[i]) {
3755 			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3756 						   crq_num);
3757 		}
3758 	}
3759 
3760 	for (i = 0; i < adapter->req_rx_queues; i++) {
3761 		if (adapter->rx_scrq[i]) {
3762 			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3763 						   crq_num);
3764 		}
3765 	}
3766 
3767 	/* Insert vNIC login client data */
3768 	vlcd = (struct vnic_login_client_data *)
3769 		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3770 	login_buffer->client_data_offset =
3771 			cpu_to_be32((char *)vlcd - (char *)login_buffer);
3772 	login_buffer->client_data_len = cpu_to_be32(client_data_len);
3773 
3774 	vnic_add_client_data(adapter, vlcd);
3775 
3776 	netdev_dbg(adapter->netdev, "Login Buffer:\n");
3777 	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3778 		netdev_dbg(adapter->netdev, "%016lx\n",
3779 			   ((unsigned long int *)(adapter->login_buf))[i]);
3780 	}
3781 
3782 	memset(&crq, 0, sizeof(crq));
3783 	crq.login.first = IBMVNIC_CRQ_CMD;
3784 	crq.login.cmd = LOGIN;
3785 	crq.login.ioba = cpu_to_be32(buffer_token);
3786 	crq.login.len = cpu_to_be32(buffer_size);
3787 	ibmvnic_send_crq(adapter, &crq);
3788 
3789 	return 0;
3790 
3791 buf_rsp_map_failed:
3792 	kfree(login_rsp_buffer);
3793 buf_rsp_alloc_failed:
3794 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3795 buf_map_failed:
3796 	kfree(login_buffer);
3797 buf_alloc_failed:
3798 	return -1;
3799 }
3800 
3801 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3802 			    u32 len, u8 map_id)
3803 {
3804 	union ibmvnic_crq crq;
3805 
3806 	memset(&crq, 0, sizeof(crq));
3807 	crq.request_map.first = IBMVNIC_CRQ_CMD;
3808 	crq.request_map.cmd = REQUEST_MAP;
3809 	crq.request_map.map_id = map_id;
3810 	crq.request_map.ioba = cpu_to_be32(addr);
3811 	crq.request_map.len = cpu_to_be32(len);
3812 	return ibmvnic_send_crq(adapter, &crq);
3813 }
3814 
3815 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3816 {
3817 	union ibmvnic_crq crq;
3818 
3819 	memset(&crq, 0, sizeof(crq));
3820 	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3821 	crq.request_unmap.cmd = REQUEST_UNMAP;
3822 	crq.request_unmap.map_id = map_id;
3823 	return ibmvnic_send_crq(adapter, &crq);
3824 }
3825 
3826 static void send_map_query(struct ibmvnic_adapter *adapter)
3827 {
3828 	union ibmvnic_crq crq;
3829 
3830 	memset(&crq, 0, sizeof(crq));
3831 	crq.query_map.first = IBMVNIC_CRQ_CMD;
3832 	crq.query_map.cmd = QUERY_MAP;
3833 	ibmvnic_send_crq(adapter, &crq);
3834 }
3835 
3836 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3837 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3838 {
3839 	union ibmvnic_crq crq;
3840 
3841 	atomic_set(&adapter->running_cap_crqs, 0);
3842 	memset(&crq, 0, sizeof(crq));
3843 	crq.query_capability.first = IBMVNIC_CRQ_CMD;
3844 	crq.query_capability.cmd = QUERY_CAPABILITY;
3845 
3846 	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3847 	atomic_inc(&adapter->running_cap_crqs);
3848 	ibmvnic_send_crq(adapter, &crq);
3849 
3850 	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3851 	atomic_inc(&adapter->running_cap_crqs);
3852 	ibmvnic_send_crq(adapter, &crq);
3853 
3854 	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3855 	atomic_inc(&adapter->running_cap_crqs);
3856 	ibmvnic_send_crq(adapter, &crq);
3857 
3858 	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3859 	atomic_inc(&adapter->running_cap_crqs);
3860 	ibmvnic_send_crq(adapter, &crq);
3861 
3862 	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3863 	atomic_inc(&adapter->running_cap_crqs);
3864 	ibmvnic_send_crq(adapter, &crq);
3865 
3866 	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3867 	atomic_inc(&adapter->running_cap_crqs);
3868 	ibmvnic_send_crq(adapter, &crq);
3869 
3870 	crq.query_capability.capability =
3871 	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3872 	atomic_inc(&adapter->running_cap_crqs);
3873 	ibmvnic_send_crq(adapter, &crq);
3874 
3875 	crq.query_capability.capability =
3876 	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3877 	atomic_inc(&adapter->running_cap_crqs);
3878 	ibmvnic_send_crq(adapter, &crq);
3879 
3880 	crq.query_capability.capability =
3881 	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3882 	atomic_inc(&adapter->running_cap_crqs);
3883 	ibmvnic_send_crq(adapter, &crq);
3884 
3885 	crq.query_capability.capability =
3886 	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3887 	atomic_inc(&adapter->running_cap_crqs);
3888 	ibmvnic_send_crq(adapter, &crq);
3889 
3890 	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3891 	atomic_inc(&adapter->running_cap_crqs);
3892 	ibmvnic_send_crq(adapter, &crq);
3893 
3894 	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3895 	atomic_inc(&adapter->running_cap_crqs);
3896 	ibmvnic_send_crq(adapter, &crq);
3897 
3898 	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3899 	atomic_inc(&adapter->running_cap_crqs);
3900 	ibmvnic_send_crq(adapter, &crq);
3901 
3902 	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3903 	atomic_inc(&adapter->running_cap_crqs);
3904 	ibmvnic_send_crq(adapter, &crq);
3905 
3906 	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3907 	atomic_inc(&adapter->running_cap_crqs);
3908 	ibmvnic_send_crq(adapter, &crq);
3909 
3910 	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3911 	atomic_inc(&adapter->running_cap_crqs);
3912 	ibmvnic_send_crq(adapter, &crq);
3913 
3914 	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3915 	atomic_inc(&adapter->running_cap_crqs);
3916 	ibmvnic_send_crq(adapter, &crq);
3917 
3918 	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3919 	atomic_inc(&adapter->running_cap_crqs);
3920 	ibmvnic_send_crq(adapter, &crq);
3921 
3922 	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3923 	atomic_inc(&adapter->running_cap_crqs);
3924 	ibmvnic_send_crq(adapter, &crq);
3925 
3926 	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3927 	atomic_inc(&adapter->running_cap_crqs);
3928 	ibmvnic_send_crq(adapter, &crq);
3929 
3930 	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3931 	atomic_inc(&adapter->running_cap_crqs);
3932 	ibmvnic_send_crq(adapter, &crq);
3933 
3934 	crq.query_capability.capability =
3935 			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3936 	atomic_inc(&adapter->running_cap_crqs);
3937 	ibmvnic_send_crq(adapter, &crq);
3938 
3939 	crq.query_capability.capability =
3940 			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3941 	atomic_inc(&adapter->running_cap_crqs);
3942 	ibmvnic_send_crq(adapter, &crq);
3943 
3944 	crq.query_capability.capability =
3945 			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3946 	atomic_inc(&adapter->running_cap_crqs);
3947 	ibmvnic_send_crq(adapter, &crq);
3948 
3949 	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3950 	atomic_inc(&adapter->running_cap_crqs);
3951 	ibmvnic_send_crq(adapter, &crq);
3952 }
3953 
3954 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3955 				struct ibmvnic_adapter *adapter)
3956 {
3957 	struct device *dev = &adapter->vdev->dev;
3958 
3959 	if (crq->get_vpd_size_rsp.rc.code) {
3960 		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3961 			crq->get_vpd_size_rsp.rc.code);
3962 		complete(&adapter->fw_done);
3963 		return;
3964 	}
3965 
3966 	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3967 	complete(&adapter->fw_done);
3968 }
3969 
3970 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3971 			   struct ibmvnic_adapter *adapter)
3972 {
3973 	struct device *dev = &adapter->vdev->dev;
3974 	unsigned char *substr = NULL;
3975 	u8 fw_level_len = 0;
3976 
3977 	memset(adapter->fw_version, 0, 32);
3978 
3979 	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3980 			 DMA_FROM_DEVICE);
3981 
3982 	if (crq->get_vpd_rsp.rc.code) {
3983 		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3984 			crq->get_vpd_rsp.rc.code);
3985 		goto complete;
3986 	}
3987 
3988 	/* get the position of the firmware version info
3989 	 * located after the ASCII 'RM' substring in the buffer
3990 	 */
3991 	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3992 	if (!substr) {
3993 		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3994 		goto complete;
3995 	}
3996 
3997 	/* get length of firmware level ASCII substring */
3998 	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3999 		fw_level_len = *(substr + 2);
4000 	} else {
4001 		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4002 		goto complete;
4003 	}
4004 
4005 	/* copy firmware version string from vpd into adapter */
4006 	if ((substr + 3 + fw_level_len) <
4007 	    (adapter->vpd->buff + adapter->vpd->len)) {
4008 		strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4009 	} else {
4010 		dev_info(dev, "FW substr extrapolated VPD buff\n");
4011 	}
4012 
4013 complete:
4014 	if (adapter->fw_version[0] == '\0')
4015 		strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4016 	complete(&adapter->fw_done);
4017 }
4018 
4019 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4020 {
4021 	struct device *dev = &adapter->vdev->dev;
4022 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4023 	netdev_features_t old_hw_features = 0;
4024 	union ibmvnic_crq crq;
4025 	int i;
4026 
4027 	dma_unmap_single(dev, adapter->ip_offload_tok,
4028 			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4029 
4030 	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4031 	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4032 		netdev_dbg(adapter->netdev, "%016lx\n",
4033 			   ((unsigned long int *)(buf))[i]);
4034 
4035 	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4036 	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4037 	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4038 		   buf->tcp_ipv4_chksum);
4039 	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4040 		   buf->tcp_ipv6_chksum);
4041 	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4042 		   buf->udp_ipv4_chksum);
4043 	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4044 		   buf->udp_ipv6_chksum);
4045 	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4046 		   buf->large_tx_ipv4);
4047 	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4048 		   buf->large_tx_ipv6);
4049 	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4050 		   buf->large_rx_ipv4);
4051 	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4052 		   buf->large_rx_ipv6);
4053 	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4054 		   buf->max_ipv4_header_size);
4055 	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4056 		   buf->max_ipv6_header_size);
4057 	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4058 		   buf->max_tcp_header_size);
4059 	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4060 		   buf->max_udp_header_size);
4061 	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4062 		   buf->max_large_tx_size);
4063 	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4064 		   buf->max_large_rx_size);
4065 	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4066 		   buf->ipv6_extension_header);
4067 	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4068 		   buf->tcp_pseudosum_req);
4069 	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4070 		   buf->num_ipv6_ext_headers);
4071 	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4072 		   buf->off_ipv6_ext_headers);
4073 
4074 	adapter->ip_offload_ctrl_tok =
4075 	    dma_map_single(dev, &adapter->ip_offload_ctrl,
4076 			   sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
4077 
4078 	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4079 		dev_err(dev, "Couldn't map ip offload control buffer\n");
4080 		return;
4081 	}
4082 
4083 	adapter->ip_offload_ctrl.len =
4084 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4085 	adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
4086 	adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
4087 	adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
4088 	adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4089 	adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
4090 	adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4091 	adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
4092 	adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
4093 	adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
4094 
4095 	/* large_rx disabled for now, additional features needed */
4096 	adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
4097 	adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
4098 
4099 	if (adapter->state != VNIC_PROBING) {
4100 		old_hw_features = adapter->netdev->hw_features;
4101 		adapter->netdev->hw_features = 0;
4102 	}
4103 
4104 	adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4105 
4106 	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4107 		adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4108 
4109 	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4110 		adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4111 
4112 	if ((adapter->netdev->features &
4113 	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4114 		adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4115 
4116 	if (buf->large_tx_ipv4)
4117 		adapter->netdev->hw_features |= NETIF_F_TSO;
4118 	if (buf->large_tx_ipv6)
4119 		adapter->netdev->hw_features |= NETIF_F_TSO6;
4120 
4121 	if (adapter->state == VNIC_PROBING) {
4122 		adapter->netdev->features |= adapter->netdev->hw_features;
4123 	} else if (old_hw_features != adapter->netdev->hw_features) {
4124 		netdev_features_t tmp = 0;
4125 
4126 		/* disable features no longer supported */
4127 		adapter->netdev->features &= adapter->netdev->hw_features;
4128 		/* turn on features now supported if previously enabled */
4129 		tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4130 			adapter->netdev->hw_features;
4131 		adapter->netdev->features |=
4132 				tmp & adapter->netdev->wanted_features;
4133 	}
4134 
4135 	memset(&crq, 0, sizeof(crq));
4136 	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4137 	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4138 	crq.control_ip_offload.len =
4139 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4140 	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4141 	ibmvnic_send_crq(adapter, &crq);
4142 }
4143 
4144 static const char *ibmvnic_fw_err_cause(u16 cause)
4145 {
4146 	switch (cause) {
4147 	case ADAPTER_PROBLEM:
4148 		return "adapter problem";
4149 	case BUS_PROBLEM:
4150 		return "bus problem";
4151 	case FW_PROBLEM:
4152 		return "firmware problem";
4153 	case DD_PROBLEM:
4154 		return "device driver problem";
4155 	case EEH_RECOVERY:
4156 		return "EEH recovery";
4157 	case FW_UPDATED:
4158 		return "firmware updated";
4159 	case LOW_MEMORY:
4160 		return "low Memory";
4161 	default:
4162 		return "unknown";
4163 	}
4164 }
4165 
4166 static void handle_error_indication(union ibmvnic_crq *crq,
4167 				    struct ibmvnic_adapter *adapter)
4168 {
4169 	struct device *dev = &adapter->vdev->dev;
4170 	u16 cause;
4171 
4172 	cause = be16_to_cpu(crq->error_indication.error_cause);
4173 
4174 	dev_warn_ratelimited(dev,
4175 			     "Firmware reports %serror, cause: %s. Starting recovery...\n",
4176 			     crq->error_indication.flags
4177 				& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4178 			     ibmvnic_fw_err_cause(cause));
4179 
4180 	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4181 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4182 	else
4183 		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4184 }
4185 
4186 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4187 				 struct ibmvnic_adapter *adapter)
4188 {
4189 	struct net_device *netdev = adapter->netdev;
4190 	struct device *dev = &adapter->vdev->dev;
4191 	long rc;
4192 
4193 	rc = crq->change_mac_addr_rsp.rc.code;
4194 	if (rc) {
4195 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4196 		goto out;
4197 	}
4198 	ether_addr_copy(netdev->dev_addr,
4199 			&crq->change_mac_addr_rsp.mac_addr[0]);
4200 out:
4201 	complete(&adapter->fw_done);
4202 	return rc;
4203 }
4204 
4205 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4206 				   struct ibmvnic_adapter *adapter)
4207 {
4208 	struct device *dev = &adapter->vdev->dev;
4209 	u64 *req_value;
4210 	char *name;
4211 
4212 	atomic_dec(&adapter->running_cap_crqs);
4213 	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4214 	case REQ_TX_QUEUES:
4215 		req_value = &adapter->req_tx_queues;
4216 		name = "tx";
4217 		break;
4218 	case REQ_RX_QUEUES:
4219 		req_value = &adapter->req_rx_queues;
4220 		name = "rx";
4221 		break;
4222 	case REQ_RX_ADD_QUEUES:
4223 		req_value = &adapter->req_rx_add_queues;
4224 		name = "rx_add";
4225 		break;
4226 	case REQ_TX_ENTRIES_PER_SUBCRQ:
4227 		req_value = &adapter->req_tx_entries_per_subcrq;
4228 		name = "tx_entries_per_subcrq";
4229 		break;
4230 	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4231 		req_value = &adapter->req_rx_add_entries_per_subcrq;
4232 		name = "rx_add_entries_per_subcrq";
4233 		break;
4234 	case REQ_MTU:
4235 		req_value = &adapter->req_mtu;
4236 		name = "mtu";
4237 		break;
4238 	case PROMISC_REQUESTED:
4239 		req_value = &adapter->promisc;
4240 		name = "promisc";
4241 		break;
4242 	default:
4243 		dev_err(dev, "Got invalid cap request rsp %d\n",
4244 			crq->request_capability.capability);
4245 		return;
4246 	}
4247 
4248 	switch (crq->request_capability_rsp.rc.code) {
4249 	case SUCCESS:
4250 		break;
4251 	case PARTIALSUCCESS:
4252 		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4253 			 *req_value,
4254 			 (long int)be64_to_cpu(crq->request_capability_rsp.
4255 					       number), name);
4256 
4257 		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4258 		    REQ_MTU) {
4259 			pr_err("mtu of %llu is not supported. Reverting.\n",
4260 			       *req_value);
4261 			*req_value = adapter->fallback.mtu;
4262 		} else {
4263 			*req_value =
4264 				be64_to_cpu(crq->request_capability_rsp.number);
4265 		}
4266 
4267 		ibmvnic_send_req_caps(adapter, 1);
4268 		return;
4269 	default:
4270 		dev_err(dev, "Error %d in request cap rsp\n",
4271 			crq->request_capability_rsp.rc.code);
4272 		return;
4273 	}
4274 
4275 	/* Done receiving requested capabilities, query IP offload support */
4276 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4277 		union ibmvnic_crq newcrq;
4278 		int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4279 		struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4280 		    &adapter->ip_offload_buf;
4281 
4282 		adapter->wait_capability = false;
4283 		adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4284 							 buf_sz,
4285 							 DMA_FROM_DEVICE);
4286 
4287 		if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4288 			if (!firmware_has_feature(FW_FEATURE_CMO))
4289 				dev_err(dev, "Couldn't map offload buffer\n");
4290 			return;
4291 		}
4292 
4293 		memset(&newcrq, 0, sizeof(newcrq));
4294 		newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4295 		newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4296 		newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4297 		newcrq.query_ip_offload.ioba =
4298 		    cpu_to_be32(adapter->ip_offload_tok);
4299 
4300 		ibmvnic_send_crq(adapter, &newcrq);
4301 	}
4302 }
4303 
4304 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4305 			    struct ibmvnic_adapter *adapter)
4306 {
4307 	struct device *dev = &adapter->vdev->dev;
4308 	struct net_device *netdev = adapter->netdev;
4309 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4310 	struct ibmvnic_login_buffer *login = adapter->login_buf;
4311 	u64 *tx_handle_array;
4312 	u64 *rx_handle_array;
4313 	int num_tx_pools;
4314 	int num_rx_pools;
4315 	u64 *size_array;
4316 	int i;
4317 
4318 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4319 			 DMA_TO_DEVICE);
4320 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
4321 			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4322 
4323 	/* If the number of queues requested can't be allocated by the
4324 	 * server, the login response will return with code 1. We will need
4325 	 * to resend the login buffer with fewer queues requested.
4326 	 */
4327 	if (login_rsp_crq->generic.rc.code) {
4328 		adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4329 		complete(&adapter->init_done);
4330 		return 0;
4331 	}
4332 
4333 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4334 
4335 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4336 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4337 		netdev_dbg(adapter->netdev, "%016lx\n",
4338 			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4339 	}
4340 
4341 	/* Sanity checks */
4342 	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4343 	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
4344 	     adapter->req_rx_add_queues !=
4345 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4346 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4347 		ibmvnic_remove(adapter->vdev);
4348 		return -EIO;
4349 	}
4350 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4351 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4352 	/* variable buffer sizes are not supported, so just read the
4353 	 * first entry.
4354 	 */
4355 	adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4356 
4357 	num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4358 	num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4359 
4360 	tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4361 				  be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4362 	rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4363 				  be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4364 
4365 	for (i = 0; i < num_tx_pools; i++)
4366 		adapter->tx_scrq[i]->handle = tx_handle_array[i];
4367 
4368 	for (i = 0; i < num_rx_pools; i++)
4369 		adapter->rx_scrq[i]->handle = rx_handle_array[i];
4370 
4371 	adapter->num_active_tx_scrqs = num_tx_pools;
4372 	adapter->num_active_rx_scrqs = num_rx_pools;
4373 	release_login_rsp_buffer(adapter);
4374 	release_login_buffer(adapter);
4375 	complete(&adapter->init_done);
4376 
4377 	return 0;
4378 }
4379 
4380 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4381 				     struct ibmvnic_adapter *adapter)
4382 {
4383 	struct device *dev = &adapter->vdev->dev;
4384 	long rc;
4385 
4386 	rc = crq->request_unmap_rsp.rc.code;
4387 	if (rc)
4388 		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4389 }
4390 
4391 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4392 				 struct ibmvnic_adapter *adapter)
4393 {
4394 	struct net_device *netdev = adapter->netdev;
4395 	struct device *dev = &adapter->vdev->dev;
4396 	long rc;
4397 
4398 	rc = crq->query_map_rsp.rc.code;
4399 	if (rc) {
4400 		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4401 		return;
4402 	}
4403 	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4404 		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4405 		   crq->query_map_rsp.free_pages);
4406 }
4407 
4408 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4409 				 struct ibmvnic_adapter *adapter)
4410 {
4411 	struct net_device *netdev = adapter->netdev;
4412 	struct device *dev = &adapter->vdev->dev;
4413 	long rc;
4414 
4415 	atomic_dec(&adapter->running_cap_crqs);
4416 	netdev_dbg(netdev, "Outstanding queries: %d\n",
4417 		   atomic_read(&adapter->running_cap_crqs));
4418 	rc = crq->query_capability.rc.code;
4419 	if (rc) {
4420 		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4421 		goto out;
4422 	}
4423 
4424 	switch (be16_to_cpu(crq->query_capability.capability)) {
4425 	case MIN_TX_QUEUES:
4426 		adapter->min_tx_queues =
4427 		    be64_to_cpu(crq->query_capability.number);
4428 		netdev_dbg(netdev, "min_tx_queues = %lld\n",
4429 			   adapter->min_tx_queues);
4430 		break;
4431 	case MIN_RX_QUEUES:
4432 		adapter->min_rx_queues =
4433 		    be64_to_cpu(crq->query_capability.number);
4434 		netdev_dbg(netdev, "min_rx_queues = %lld\n",
4435 			   adapter->min_rx_queues);
4436 		break;
4437 	case MIN_RX_ADD_QUEUES:
4438 		adapter->min_rx_add_queues =
4439 		    be64_to_cpu(crq->query_capability.number);
4440 		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4441 			   adapter->min_rx_add_queues);
4442 		break;
4443 	case MAX_TX_QUEUES:
4444 		adapter->max_tx_queues =
4445 		    be64_to_cpu(crq->query_capability.number);
4446 		netdev_dbg(netdev, "max_tx_queues = %lld\n",
4447 			   adapter->max_tx_queues);
4448 		break;
4449 	case MAX_RX_QUEUES:
4450 		adapter->max_rx_queues =
4451 		    be64_to_cpu(crq->query_capability.number);
4452 		netdev_dbg(netdev, "max_rx_queues = %lld\n",
4453 			   adapter->max_rx_queues);
4454 		break;
4455 	case MAX_RX_ADD_QUEUES:
4456 		adapter->max_rx_add_queues =
4457 		    be64_to_cpu(crq->query_capability.number);
4458 		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4459 			   adapter->max_rx_add_queues);
4460 		break;
4461 	case MIN_TX_ENTRIES_PER_SUBCRQ:
4462 		adapter->min_tx_entries_per_subcrq =
4463 		    be64_to_cpu(crq->query_capability.number);
4464 		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4465 			   adapter->min_tx_entries_per_subcrq);
4466 		break;
4467 	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4468 		adapter->min_rx_add_entries_per_subcrq =
4469 		    be64_to_cpu(crq->query_capability.number);
4470 		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4471 			   adapter->min_rx_add_entries_per_subcrq);
4472 		break;
4473 	case MAX_TX_ENTRIES_PER_SUBCRQ:
4474 		adapter->max_tx_entries_per_subcrq =
4475 		    be64_to_cpu(crq->query_capability.number);
4476 		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4477 			   adapter->max_tx_entries_per_subcrq);
4478 		break;
4479 	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4480 		adapter->max_rx_add_entries_per_subcrq =
4481 		    be64_to_cpu(crq->query_capability.number);
4482 		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4483 			   adapter->max_rx_add_entries_per_subcrq);
4484 		break;
4485 	case TCP_IP_OFFLOAD:
4486 		adapter->tcp_ip_offload =
4487 		    be64_to_cpu(crq->query_capability.number);
4488 		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4489 			   adapter->tcp_ip_offload);
4490 		break;
4491 	case PROMISC_SUPPORTED:
4492 		adapter->promisc_supported =
4493 		    be64_to_cpu(crq->query_capability.number);
4494 		netdev_dbg(netdev, "promisc_supported = %lld\n",
4495 			   adapter->promisc_supported);
4496 		break;
4497 	case MIN_MTU:
4498 		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4499 		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4500 		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4501 		break;
4502 	case MAX_MTU:
4503 		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4504 		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4505 		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4506 		break;
4507 	case MAX_MULTICAST_FILTERS:
4508 		adapter->max_multicast_filters =
4509 		    be64_to_cpu(crq->query_capability.number);
4510 		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4511 			   adapter->max_multicast_filters);
4512 		break;
4513 	case VLAN_HEADER_INSERTION:
4514 		adapter->vlan_header_insertion =
4515 		    be64_to_cpu(crq->query_capability.number);
4516 		if (adapter->vlan_header_insertion)
4517 			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4518 		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4519 			   adapter->vlan_header_insertion);
4520 		break;
4521 	case RX_VLAN_HEADER_INSERTION:
4522 		adapter->rx_vlan_header_insertion =
4523 		    be64_to_cpu(crq->query_capability.number);
4524 		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4525 			   adapter->rx_vlan_header_insertion);
4526 		break;
4527 	case MAX_TX_SG_ENTRIES:
4528 		adapter->max_tx_sg_entries =
4529 		    be64_to_cpu(crq->query_capability.number);
4530 		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4531 			   adapter->max_tx_sg_entries);
4532 		break;
4533 	case RX_SG_SUPPORTED:
4534 		adapter->rx_sg_supported =
4535 		    be64_to_cpu(crq->query_capability.number);
4536 		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4537 			   adapter->rx_sg_supported);
4538 		break;
4539 	case OPT_TX_COMP_SUB_QUEUES:
4540 		adapter->opt_tx_comp_sub_queues =
4541 		    be64_to_cpu(crq->query_capability.number);
4542 		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4543 			   adapter->opt_tx_comp_sub_queues);
4544 		break;
4545 	case OPT_RX_COMP_QUEUES:
4546 		adapter->opt_rx_comp_queues =
4547 		    be64_to_cpu(crq->query_capability.number);
4548 		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4549 			   adapter->opt_rx_comp_queues);
4550 		break;
4551 	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4552 		adapter->opt_rx_bufadd_q_per_rx_comp_q =
4553 		    be64_to_cpu(crq->query_capability.number);
4554 		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4555 			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
4556 		break;
4557 	case OPT_TX_ENTRIES_PER_SUBCRQ:
4558 		adapter->opt_tx_entries_per_subcrq =
4559 		    be64_to_cpu(crq->query_capability.number);
4560 		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4561 			   adapter->opt_tx_entries_per_subcrq);
4562 		break;
4563 	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4564 		adapter->opt_rxba_entries_per_subcrq =
4565 		    be64_to_cpu(crq->query_capability.number);
4566 		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4567 			   adapter->opt_rxba_entries_per_subcrq);
4568 		break;
4569 	case TX_RX_DESC_REQ:
4570 		adapter->tx_rx_desc_req = crq->query_capability.number;
4571 		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4572 			   adapter->tx_rx_desc_req);
4573 		break;
4574 
4575 	default:
4576 		netdev_err(netdev, "Got invalid cap rsp %d\n",
4577 			   crq->query_capability.capability);
4578 	}
4579 
4580 out:
4581 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4582 		adapter->wait_capability = false;
4583 		ibmvnic_send_req_caps(adapter, 0);
4584 	}
4585 }
4586 
4587 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4588 {
4589 	union ibmvnic_crq crq;
4590 	int rc;
4591 
4592 	memset(&crq, 0, sizeof(crq));
4593 	crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4594 	crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4595 
4596 	mutex_lock(&adapter->fw_lock);
4597 	adapter->fw_done_rc = 0;
4598 	reinit_completion(&adapter->fw_done);
4599 
4600 	rc = ibmvnic_send_crq(adapter, &crq);
4601 	if (rc) {
4602 		mutex_unlock(&adapter->fw_lock);
4603 		return rc;
4604 	}
4605 
4606 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4607 	if (rc) {
4608 		mutex_unlock(&adapter->fw_lock);
4609 		return rc;
4610 	}
4611 
4612 	mutex_unlock(&adapter->fw_lock);
4613 	return adapter->fw_done_rc ? -EIO : 0;
4614 }
4615 
4616 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4617 				       struct ibmvnic_adapter *adapter)
4618 {
4619 	struct net_device *netdev = adapter->netdev;
4620 	int rc;
4621 	__be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4622 
4623 	rc = crq->query_phys_parms_rsp.rc.code;
4624 	if (rc) {
4625 		netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4626 		return rc;
4627 	}
4628 	switch (rspeed) {
4629 	case IBMVNIC_10MBPS:
4630 		adapter->speed = SPEED_10;
4631 		break;
4632 	case IBMVNIC_100MBPS:
4633 		adapter->speed = SPEED_100;
4634 		break;
4635 	case IBMVNIC_1GBPS:
4636 		adapter->speed = SPEED_1000;
4637 		break;
4638 	case IBMVNIC_10GBP:
4639 		adapter->speed = SPEED_10000;
4640 		break;
4641 	case IBMVNIC_25GBPS:
4642 		adapter->speed = SPEED_25000;
4643 		break;
4644 	case IBMVNIC_40GBPS:
4645 		adapter->speed = SPEED_40000;
4646 		break;
4647 	case IBMVNIC_50GBPS:
4648 		adapter->speed = SPEED_50000;
4649 		break;
4650 	case IBMVNIC_100GBPS:
4651 		adapter->speed = SPEED_100000;
4652 		break;
4653 	default:
4654 		if (netif_carrier_ok(netdev))
4655 			netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4656 		adapter->speed = SPEED_UNKNOWN;
4657 	}
4658 	if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4659 		adapter->duplex = DUPLEX_FULL;
4660 	else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4661 		adapter->duplex = DUPLEX_HALF;
4662 	else
4663 		adapter->duplex = DUPLEX_UNKNOWN;
4664 
4665 	return rc;
4666 }
4667 
4668 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4669 			       struct ibmvnic_adapter *adapter)
4670 {
4671 	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4672 	struct net_device *netdev = adapter->netdev;
4673 	struct device *dev = &adapter->vdev->dev;
4674 	u64 *u64_crq = (u64 *)crq;
4675 	long rc;
4676 
4677 	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4678 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
4679 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
4680 	switch (gen_crq->first) {
4681 	case IBMVNIC_CRQ_INIT_RSP:
4682 		switch (gen_crq->cmd) {
4683 		case IBMVNIC_CRQ_INIT:
4684 			dev_info(dev, "Partner initialized\n");
4685 			adapter->from_passive_init = true;
4686 			adapter->failover_pending = false;
4687 			if (!completion_done(&adapter->init_done)) {
4688 				complete(&adapter->init_done);
4689 				adapter->init_done_rc = -EIO;
4690 			}
4691 			ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4692 			break;
4693 		case IBMVNIC_CRQ_INIT_COMPLETE:
4694 			dev_info(dev, "Partner initialization complete\n");
4695 			adapter->crq.active = true;
4696 			send_version_xchg(adapter);
4697 			break;
4698 		default:
4699 			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4700 		}
4701 		return;
4702 	case IBMVNIC_CRQ_XPORT_EVENT:
4703 		netif_carrier_off(netdev);
4704 		adapter->crq.active = false;
4705 		/* terminate any thread waiting for a response
4706 		 * from the device
4707 		 */
4708 		if (!completion_done(&adapter->fw_done)) {
4709 			adapter->fw_done_rc = -EIO;
4710 			complete(&adapter->fw_done);
4711 		}
4712 		if (!completion_done(&adapter->stats_done))
4713 			complete(&adapter->stats_done);
4714 		if (test_bit(0, &adapter->resetting))
4715 			adapter->force_reset_recovery = true;
4716 		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4717 			dev_info(dev, "Migrated, re-enabling adapter\n");
4718 			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4719 		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4720 			dev_info(dev, "Backing device failover detected\n");
4721 			adapter->failover_pending = true;
4722 		} else {
4723 			/* The adapter lost the connection */
4724 			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4725 				gen_crq->cmd);
4726 			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4727 		}
4728 		return;
4729 	case IBMVNIC_CRQ_CMD_RSP:
4730 		break;
4731 	default:
4732 		dev_err(dev, "Got an invalid msg type 0x%02x\n",
4733 			gen_crq->first);
4734 		return;
4735 	}
4736 
4737 	switch (gen_crq->cmd) {
4738 	case VERSION_EXCHANGE_RSP:
4739 		rc = crq->version_exchange_rsp.rc.code;
4740 		if (rc) {
4741 			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4742 			break;
4743 		}
4744 		ibmvnic_version =
4745 			    be16_to_cpu(crq->version_exchange_rsp.version);
4746 		dev_info(dev, "Partner protocol version is %d\n",
4747 			 ibmvnic_version);
4748 		send_cap_queries(adapter);
4749 		break;
4750 	case QUERY_CAPABILITY_RSP:
4751 		handle_query_cap_rsp(crq, adapter);
4752 		break;
4753 	case QUERY_MAP_RSP:
4754 		handle_query_map_rsp(crq, adapter);
4755 		break;
4756 	case REQUEST_MAP_RSP:
4757 		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4758 		complete(&adapter->fw_done);
4759 		break;
4760 	case REQUEST_UNMAP_RSP:
4761 		handle_request_unmap_rsp(crq, adapter);
4762 		break;
4763 	case REQUEST_CAPABILITY_RSP:
4764 		handle_request_cap_rsp(crq, adapter);
4765 		break;
4766 	case LOGIN_RSP:
4767 		netdev_dbg(netdev, "Got Login Response\n");
4768 		handle_login_rsp(crq, adapter);
4769 		break;
4770 	case LOGICAL_LINK_STATE_RSP:
4771 		netdev_dbg(netdev,
4772 			   "Got Logical Link State Response, state: %d rc: %d\n",
4773 			   crq->logical_link_state_rsp.link_state,
4774 			   crq->logical_link_state_rsp.rc.code);
4775 		adapter->logical_link_state =
4776 		    crq->logical_link_state_rsp.link_state;
4777 		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4778 		complete(&adapter->init_done);
4779 		break;
4780 	case LINK_STATE_INDICATION:
4781 		netdev_dbg(netdev, "Got Logical Link State Indication\n");
4782 		adapter->phys_link_state =
4783 		    crq->link_state_indication.phys_link_state;
4784 		adapter->logical_link_state =
4785 		    crq->link_state_indication.logical_link_state;
4786 		if (adapter->phys_link_state && adapter->logical_link_state)
4787 			netif_carrier_on(netdev);
4788 		else
4789 			netif_carrier_off(netdev);
4790 		break;
4791 	case CHANGE_MAC_ADDR_RSP:
4792 		netdev_dbg(netdev, "Got MAC address change Response\n");
4793 		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4794 		break;
4795 	case ERROR_INDICATION:
4796 		netdev_dbg(netdev, "Got Error Indication\n");
4797 		handle_error_indication(crq, adapter);
4798 		break;
4799 	case REQUEST_STATISTICS_RSP:
4800 		netdev_dbg(netdev, "Got Statistics Response\n");
4801 		complete(&adapter->stats_done);
4802 		break;
4803 	case QUERY_IP_OFFLOAD_RSP:
4804 		netdev_dbg(netdev, "Got Query IP offload Response\n");
4805 		handle_query_ip_offload_rsp(adapter);
4806 		break;
4807 	case MULTICAST_CTRL_RSP:
4808 		netdev_dbg(netdev, "Got multicast control Response\n");
4809 		break;
4810 	case CONTROL_IP_OFFLOAD_RSP:
4811 		netdev_dbg(netdev, "Got Control IP offload Response\n");
4812 		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4813 				 sizeof(adapter->ip_offload_ctrl),
4814 				 DMA_TO_DEVICE);
4815 		complete(&adapter->init_done);
4816 		break;
4817 	case COLLECT_FW_TRACE_RSP:
4818 		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4819 		complete(&adapter->fw_done);
4820 		break;
4821 	case GET_VPD_SIZE_RSP:
4822 		handle_vpd_size_rsp(crq, adapter);
4823 		break;
4824 	case GET_VPD_RSP:
4825 		handle_vpd_rsp(crq, adapter);
4826 		break;
4827 	case QUERY_PHYS_PARMS_RSP:
4828 		adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4829 		complete(&adapter->fw_done);
4830 		break;
4831 	default:
4832 		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4833 			   gen_crq->cmd);
4834 	}
4835 }
4836 
4837 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4838 {
4839 	struct ibmvnic_adapter *adapter = instance;
4840 
4841 	tasklet_schedule(&adapter->tasklet);
4842 	return IRQ_HANDLED;
4843 }
4844 
4845 static void ibmvnic_tasklet(void *data)
4846 {
4847 	struct ibmvnic_adapter *adapter = data;
4848 	struct ibmvnic_crq_queue *queue = &adapter->crq;
4849 	union ibmvnic_crq *crq;
4850 	unsigned long flags;
4851 	bool done = false;
4852 
4853 	spin_lock_irqsave(&queue->lock, flags);
4854 	while (!done) {
4855 		/* Pull all the valid messages off the CRQ */
4856 		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4857 			ibmvnic_handle_crq(crq, adapter);
4858 			crq->generic.first = 0;
4859 		}
4860 
4861 		/* remain in tasklet until all
4862 		 * capabilities responses are received
4863 		 */
4864 		if (!adapter->wait_capability)
4865 			done = true;
4866 	}
4867 	/* if capabilities CRQ's were sent in this tasklet, the following
4868 	 * tasklet must wait until all responses are received
4869 	 */
4870 	if (atomic_read(&adapter->running_cap_crqs) != 0)
4871 		adapter->wait_capability = true;
4872 	spin_unlock_irqrestore(&queue->lock, flags);
4873 }
4874 
4875 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4876 {
4877 	struct vio_dev *vdev = adapter->vdev;
4878 	int rc;
4879 
4880 	do {
4881 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4882 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4883 
4884 	if (rc)
4885 		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4886 
4887 	return rc;
4888 }
4889 
4890 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4891 {
4892 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4893 	struct device *dev = &adapter->vdev->dev;
4894 	struct vio_dev *vdev = adapter->vdev;
4895 	int rc;
4896 
4897 	/* Close the CRQ */
4898 	do {
4899 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4900 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4901 
4902 	/* Clean out the queue */
4903 	memset(crq->msgs, 0, PAGE_SIZE);
4904 	crq->cur = 0;
4905 	crq->active = false;
4906 
4907 	/* And re-open it again */
4908 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4909 				crq->msg_token, PAGE_SIZE);
4910 
4911 	if (rc == H_CLOSED)
4912 		/* Adapter is good, but other end is not ready */
4913 		dev_warn(dev, "Partner adapter not ready\n");
4914 	else if (rc != 0)
4915 		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4916 
4917 	return rc;
4918 }
4919 
4920 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4921 {
4922 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4923 	struct vio_dev *vdev = adapter->vdev;
4924 	long rc;
4925 
4926 	if (!crq->msgs)
4927 		return;
4928 
4929 	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4930 	free_irq(vdev->irq, adapter);
4931 	tasklet_kill(&adapter->tasklet);
4932 	do {
4933 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4934 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4935 
4936 	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4937 			 DMA_BIDIRECTIONAL);
4938 	free_page((unsigned long)crq->msgs);
4939 	crq->msgs = NULL;
4940 	crq->active = false;
4941 }
4942 
4943 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4944 {
4945 	struct ibmvnic_crq_queue *crq = &adapter->crq;
4946 	struct device *dev = &adapter->vdev->dev;
4947 	struct vio_dev *vdev = adapter->vdev;
4948 	int rc, retrc = -ENOMEM;
4949 
4950 	if (crq->msgs)
4951 		return 0;
4952 
4953 	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4954 	/* Should we allocate more than one page? */
4955 
4956 	if (!crq->msgs)
4957 		return -ENOMEM;
4958 
4959 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4960 	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4961 					DMA_BIDIRECTIONAL);
4962 	if (dma_mapping_error(dev, crq->msg_token))
4963 		goto map_failed;
4964 
4965 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4966 				crq->msg_token, PAGE_SIZE);
4967 
4968 	if (rc == H_RESOURCE)
4969 		/* maybe kexecing and resource is busy. try a reset */
4970 		rc = ibmvnic_reset_crq(adapter);
4971 	retrc = rc;
4972 
4973 	if (rc == H_CLOSED) {
4974 		dev_warn(dev, "Partner adapter not ready\n");
4975 	} else if (rc) {
4976 		dev_warn(dev, "Error %d opening adapter\n", rc);
4977 		goto reg_crq_failed;
4978 	}
4979 
4980 	retrc = 0;
4981 
4982 	tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4983 		     (unsigned long)adapter);
4984 
4985 	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4986 	snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4987 		 adapter->vdev->unit_address);
4988 	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
4989 	if (rc) {
4990 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4991 			vdev->irq, rc);
4992 		goto req_irq_failed;
4993 	}
4994 
4995 	rc = vio_enable_interrupts(vdev);
4996 	if (rc) {
4997 		dev_err(dev, "Error %d enabling interrupts\n", rc);
4998 		goto req_irq_failed;
4999 	}
5000 
5001 	crq->cur = 0;
5002 	spin_lock_init(&crq->lock);
5003 
5004 	return retrc;
5005 
5006 req_irq_failed:
5007 	tasklet_kill(&adapter->tasklet);
5008 	do {
5009 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5010 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5011 reg_crq_failed:
5012 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5013 map_failed:
5014 	free_page((unsigned long)crq->msgs);
5015 	crq->msgs = NULL;
5016 	return retrc;
5017 }
5018 
5019 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5020 {
5021 	struct device *dev = &adapter->vdev->dev;
5022 	unsigned long timeout = msecs_to_jiffies(30000);
5023 	u64 old_num_rx_queues, old_num_tx_queues;
5024 	int rc;
5025 
5026 	adapter->from_passive_init = false;
5027 
5028 	if (reset) {
5029 		old_num_rx_queues = adapter->req_rx_queues;
5030 		old_num_tx_queues = adapter->req_tx_queues;
5031 		reinit_completion(&adapter->init_done);
5032 	}
5033 
5034 	adapter->init_done_rc = 0;
5035 	rc = ibmvnic_send_crq_init(adapter);
5036 	if (rc) {
5037 		dev_err(dev, "Send crq init failed with error %d\n", rc);
5038 		return rc;
5039 	}
5040 
5041 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5042 		dev_err(dev, "Initialization sequence timed out\n");
5043 		return -1;
5044 	}
5045 
5046 	if (adapter->init_done_rc) {
5047 		release_crq_queue(adapter);
5048 		return adapter->init_done_rc;
5049 	}
5050 
5051 	if (reset &&
5052 	    test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5053 	    adapter->reset_reason != VNIC_RESET_MOBILITY) {
5054 		if (adapter->req_rx_queues != old_num_rx_queues ||
5055 		    adapter->req_tx_queues != old_num_tx_queues) {
5056 			release_sub_crqs(adapter, 0);
5057 			rc = init_sub_crqs(adapter);
5058 		} else {
5059 			rc = reset_sub_crq_queues(adapter);
5060 		}
5061 	} else {
5062 		rc = init_sub_crqs(adapter);
5063 	}
5064 
5065 	if (rc) {
5066 		dev_err(dev, "Initialization of sub crqs failed\n");
5067 		release_crq_queue(adapter);
5068 		return rc;
5069 	}
5070 
5071 	rc = init_sub_crq_irqs(adapter);
5072 	if (rc) {
5073 		dev_err(dev, "Failed to initialize sub crq irqs\n");
5074 		release_crq_queue(adapter);
5075 	}
5076 
5077 	return rc;
5078 }
5079 
5080 static struct device_attribute dev_attr_failover;
5081 
5082 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5083 {
5084 	struct ibmvnic_adapter *adapter;
5085 	struct net_device *netdev;
5086 	unsigned char *mac_addr_p;
5087 	int rc;
5088 
5089 	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5090 		dev->unit_address);
5091 
5092 	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5093 							VETH_MAC_ADDR, NULL);
5094 	if (!mac_addr_p) {
5095 		dev_err(&dev->dev,
5096 			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5097 			__FILE__, __LINE__);
5098 		return 0;
5099 	}
5100 
5101 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5102 				   IBMVNIC_MAX_QUEUES);
5103 	if (!netdev)
5104 		return -ENOMEM;
5105 
5106 	adapter = netdev_priv(netdev);
5107 	adapter->state = VNIC_PROBING;
5108 	dev_set_drvdata(&dev->dev, netdev);
5109 	adapter->vdev = dev;
5110 	adapter->netdev = netdev;
5111 
5112 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
5113 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5114 	netdev->irq = dev->irq;
5115 	netdev->netdev_ops = &ibmvnic_netdev_ops;
5116 	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5117 	SET_NETDEV_DEV(netdev, &dev->dev);
5118 
5119 	spin_lock_init(&adapter->stats_lock);
5120 
5121 	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5122 	INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5123 			  __ibmvnic_delayed_reset);
5124 	INIT_LIST_HEAD(&adapter->rwi_list);
5125 	spin_lock_init(&adapter->rwi_lock);
5126 	spin_lock_init(&adapter->state_lock);
5127 	mutex_init(&adapter->fw_lock);
5128 	init_completion(&adapter->init_done);
5129 	init_completion(&adapter->fw_done);
5130 	init_completion(&adapter->reset_done);
5131 	init_completion(&adapter->stats_done);
5132 	clear_bit(0, &adapter->resetting);
5133 
5134 	do {
5135 		rc = init_crq_queue(adapter);
5136 		if (rc) {
5137 			dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5138 				rc);
5139 			goto ibmvnic_init_fail;
5140 		}
5141 
5142 		rc = ibmvnic_reset_init(adapter, false);
5143 		if (rc && rc != EAGAIN)
5144 			goto ibmvnic_init_fail;
5145 	} while (rc == EAGAIN);
5146 
5147 	rc = init_stats_buffers(adapter);
5148 	if (rc)
5149 		goto ibmvnic_init_fail;
5150 
5151 	rc = init_stats_token(adapter);
5152 	if (rc)
5153 		goto ibmvnic_stats_fail;
5154 
5155 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
5156 	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5157 	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5158 
5159 	rc = device_create_file(&dev->dev, &dev_attr_failover);
5160 	if (rc)
5161 		goto ibmvnic_dev_file_err;
5162 
5163 	netif_carrier_off(netdev);
5164 	rc = register_netdev(netdev);
5165 	if (rc) {
5166 		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5167 		goto ibmvnic_register_fail;
5168 	}
5169 	dev_info(&dev->dev, "ibmvnic registered\n");
5170 
5171 	adapter->state = VNIC_PROBED;
5172 
5173 	adapter->wait_for_reset = false;
5174 
5175 	return 0;
5176 
5177 ibmvnic_register_fail:
5178 	device_remove_file(&dev->dev, &dev_attr_failover);
5179 
5180 ibmvnic_dev_file_err:
5181 	release_stats_token(adapter);
5182 
5183 ibmvnic_stats_fail:
5184 	release_stats_buffers(adapter);
5185 
5186 ibmvnic_init_fail:
5187 	release_sub_crqs(adapter, 1);
5188 	release_crq_queue(adapter);
5189 	mutex_destroy(&adapter->fw_lock);
5190 	free_netdev(netdev);
5191 
5192 	return rc;
5193 }
5194 
5195 static int ibmvnic_remove(struct vio_dev *dev)
5196 {
5197 	struct net_device *netdev = dev_get_drvdata(&dev->dev);
5198 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5199 	unsigned long flags;
5200 
5201 	spin_lock_irqsave(&adapter->state_lock, flags);
5202 	if (adapter->state == VNIC_RESETTING) {
5203 		spin_unlock_irqrestore(&adapter->state_lock, flags);
5204 		return -EBUSY;
5205 	}
5206 
5207 	adapter->state = VNIC_REMOVING;
5208 	spin_unlock_irqrestore(&adapter->state_lock, flags);
5209 
5210 	flush_work(&adapter->ibmvnic_reset);
5211 	flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5212 
5213 	rtnl_lock();
5214 	unregister_netdevice(netdev);
5215 
5216 	release_resources(adapter);
5217 	release_sub_crqs(adapter, 1);
5218 	release_crq_queue(adapter);
5219 
5220 	release_stats_token(adapter);
5221 	release_stats_buffers(adapter);
5222 
5223 	adapter->state = VNIC_REMOVED;
5224 
5225 	rtnl_unlock();
5226 	mutex_destroy(&adapter->fw_lock);
5227 	device_remove_file(&dev->dev, &dev_attr_failover);
5228 	free_netdev(netdev);
5229 	dev_set_drvdata(&dev->dev, NULL);
5230 
5231 	return 0;
5232 }
5233 
5234 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5235 			      const char *buf, size_t count)
5236 {
5237 	struct net_device *netdev = dev_get_drvdata(dev);
5238 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5239 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5240 	__be64 session_token;
5241 	long rc;
5242 
5243 	if (!sysfs_streq(buf, "1"))
5244 		return -EINVAL;
5245 
5246 	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5247 			 H_GET_SESSION_TOKEN, 0, 0, 0);
5248 	if (rc) {
5249 		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5250 			   rc);
5251 		return -EINVAL;
5252 	}
5253 
5254 	session_token = (__be64)retbuf[0];
5255 	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5256 		   be64_to_cpu(session_token));
5257 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5258 				H_SESSION_ERR_DETECTED, session_token, 0, 0);
5259 	if (rc) {
5260 		netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5261 			   rc);
5262 		return -EINVAL;
5263 	}
5264 
5265 	return count;
5266 }
5267 
5268 static DEVICE_ATTR_WO(failover);
5269 
5270 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5271 {
5272 	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5273 	struct ibmvnic_adapter *adapter;
5274 	struct iommu_table *tbl;
5275 	unsigned long ret = 0;
5276 	int i;
5277 
5278 	tbl = get_iommu_table_base(&vdev->dev);
5279 
5280 	/* netdev inits at probe time along with the structures we need below*/
5281 	if (!netdev)
5282 		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5283 
5284 	adapter = netdev_priv(netdev);
5285 
5286 	ret += PAGE_SIZE; /* the crq message queue */
5287 	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5288 
5289 	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5290 		ret += 4 * PAGE_SIZE; /* the scrq message queue */
5291 
5292 	for (i = 0; i < adapter->num_active_rx_pools; i++)
5293 		ret += adapter->rx_pool[i].size *
5294 		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5295 
5296 	return ret;
5297 }
5298 
5299 static int ibmvnic_resume(struct device *dev)
5300 {
5301 	struct net_device *netdev = dev_get_drvdata(dev);
5302 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5303 
5304 	if (adapter->state != VNIC_OPEN)
5305 		return 0;
5306 
5307 	tasklet_schedule(&adapter->tasklet);
5308 
5309 	return 0;
5310 }
5311 
5312 static const struct vio_device_id ibmvnic_device_table[] = {
5313 	{"network", "IBM,vnic"},
5314 	{"", "" }
5315 };
5316 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5317 
5318 static const struct dev_pm_ops ibmvnic_pm_ops = {
5319 	.resume = ibmvnic_resume
5320 };
5321 
5322 static struct vio_driver ibmvnic_driver = {
5323 	.id_table       = ibmvnic_device_table,
5324 	.probe          = ibmvnic_probe,
5325 	.remove         = ibmvnic_remove,
5326 	.get_desired_dma = ibmvnic_get_desired_dma,
5327 	.name		= ibmvnic_driver_name,
5328 	.pm		= &ibmvnic_pm_ops,
5329 };
5330 
5331 /* module functions */
5332 static int __init ibmvnic_module_init(void)
5333 {
5334 	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5335 		IBMVNIC_DRIVER_VERSION);
5336 
5337 	return vio_register_driver(&ibmvnic_driver);
5338 }
5339 
5340 static void __exit ibmvnic_module_exit(void)
5341 {
5342 	vio_unregister_driver(&ibmvnic_driver);
5343 }
5344 
5345 module_init(ibmvnic_module_init);
5346 module_exit(ibmvnic_module_exit);
5347