xref: /openbmc/linux/drivers/net/ethernet/ibm/ibmvnic.c (revision 84b102f5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
3 /*                                                                        */
4 /*  IBM System i and System p Virtual NIC Device Driver                   */
5 /*  Copyright (C) 2014 IBM Corp.                                          */
6 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
7 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
8 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
9 /*                                                                        */
10 /*                                                                        */
11 /* This module contains the implementation of a virtual ethernet device   */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
13 /* option of the RS/6000 Platform Architecture to interface with virtual  */
14 /* ethernet NICs that are presented to the partition by the hypervisor.   */
15 /*									   */
16 /* Messages are passed between the VNIC driver and the VNIC server using  */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
20 /* are used by the driver to notify the server that a packet is           */
21 /* ready for transmission or that a buffer has been added to receive a    */
22 /* packet. Subsequently, sCRQs are used by the server to notify the       */
23 /* driver that a packet transmission has been completed or that a packet  */
24 /* has been received and placed in a waiting buffer.                      */
25 /*                                                                        */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
28 /* or receive has been completed, the VNIC driver is required to use      */
29 /* "long term mapping". This entails that large, continuous DMA mapped    */
30 /* buffers are allocated on driver initialization and these buffers are   */
31 /* then continuously reused to pass skbs to and from the VNIC server.     */
32 /*                                                                        */
33 /**************************************************************************/
34 
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/mm.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
52 #include <linux/in.h>
53 #include <linux/ip.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
62 #include <asm/vio.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
69 
70 #include "ibmvnic.h"
71 
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74 
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79 
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
88 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
89 static int enable_scrq_irq(struct ibmvnic_adapter *,
90 			   struct ibmvnic_sub_crq_queue *);
91 static int disable_scrq_irq(struct ibmvnic_adapter *,
92 			    struct ibmvnic_sub_crq_queue *);
93 static int pending_scrq(struct ibmvnic_adapter *,
94 			struct ibmvnic_sub_crq_queue *);
95 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
96 					struct ibmvnic_sub_crq_queue *);
97 static int ibmvnic_poll(struct napi_struct *napi, int data);
98 static void send_query_map(struct ibmvnic_adapter *adapter);
99 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
100 static int send_request_unmap(struct ibmvnic_adapter *, u8);
101 static int send_login(struct ibmvnic_adapter *adapter);
102 static void send_query_cap(struct ibmvnic_adapter *adapter);
103 static int init_sub_crqs(struct ibmvnic_adapter *);
104 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
105 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
106 static void release_crq_queue(struct ibmvnic_adapter *);
107 static int __ibmvnic_set_mac(struct net_device *, u8 *);
108 static int init_crq_queue(struct ibmvnic_adapter *adapter);
109 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
110 
111 struct ibmvnic_stat {
112 	char name[ETH_GSTRING_LEN];
113 	int offset;
114 };
115 
116 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
117 			     offsetof(struct ibmvnic_statistics, stat))
118 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
119 
120 static const struct ibmvnic_stat ibmvnic_stats[] = {
121 	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
122 	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
123 	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
124 	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
125 	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
126 	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
127 	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
128 	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
129 	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
130 	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
131 	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
132 	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
133 	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
134 	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
135 	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
136 	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
137 	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
138 	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
139 	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
140 	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
141 	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
142 	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
143 };
144 
145 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
146 			  unsigned long length, unsigned long *number,
147 			  unsigned long *irq)
148 {
149 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
150 	long rc;
151 
152 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
153 	*number = retbuf[0];
154 	*irq = retbuf[1];
155 
156 	return rc;
157 }
158 
159 /**
160  * ibmvnic_wait_for_completion - Check device state and wait for completion
161  * @adapter: private device data
162  * @comp_done: completion structure to wait for
163  * @timeout: time to wait in milliseconds
164  *
165  * Wait for a completion signal or until the timeout limit is reached
166  * while checking that the device is still active.
167  */
168 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
169 				       struct completion *comp_done,
170 				       unsigned long timeout)
171 {
172 	struct net_device *netdev;
173 	unsigned long div_timeout;
174 	u8 retry;
175 
176 	netdev = adapter->netdev;
177 	retry = 5;
178 	div_timeout = msecs_to_jiffies(timeout / retry);
179 	while (true) {
180 		if (!adapter->crq.active) {
181 			netdev_err(netdev, "Device down!\n");
182 			return -ENODEV;
183 		}
184 		if (!retry--)
185 			break;
186 		if (wait_for_completion_timeout(comp_done, div_timeout))
187 			return 0;
188 	}
189 	netdev_err(netdev, "Operation timed out.\n");
190 	return -ETIMEDOUT;
191 }
192 
193 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
194 				struct ibmvnic_long_term_buff *ltb, int size)
195 {
196 	struct device *dev = &adapter->vdev->dev;
197 	int rc;
198 
199 	ltb->size = size;
200 	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
201 				       GFP_KERNEL);
202 
203 	if (!ltb->buff) {
204 		dev_err(dev, "Couldn't alloc long term buffer\n");
205 		return -ENOMEM;
206 	}
207 	ltb->map_id = adapter->map_id;
208 	adapter->map_id++;
209 
210 	mutex_lock(&adapter->fw_lock);
211 	adapter->fw_done_rc = 0;
212 	reinit_completion(&adapter->fw_done);
213 	rc = send_request_map(adapter, ltb->addr,
214 			      ltb->size, ltb->map_id);
215 	if (rc) {
216 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
217 		mutex_unlock(&adapter->fw_lock);
218 		return rc;
219 	}
220 
221 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
222 	if (rc) {
223 		dev_err(dev,
224 			"Long term map request aborted or timed out,rc = %d\n",
225 			rc);
226 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
227 		mutex_unlock(&adapter->fw_lock);
228 		return rc;
229 	}
230 
231 	if (adapter->fw_done_rc) {
232 		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
233 			adapter->fw_done_rc);
234 		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
235 		mutex_unlock(&adapter->fw_lock);
236 		return -1;
237 	}
238 	mutex_unlock(&adapter->fw_lock);
239 	return 0;
240 }
241 
242 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
243 				struct ibmvnic_long_term_buff *ltb)
244 {
245 	struct device *dev = &adapter->vdev->dev;
246 
247 	if (!ltb->buff)
248 		return;
249 
250 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
251 	    adapter->reset_reason != VNIC_RESET_MOBILITY)
252 		send_request_unmap(adapter, ltb->map_id);
253 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
254 }
255 
256 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
257 				struct ibmvnic_long_term_buff *ltb)
258 {
259 	struct device *dev = &adapter->vdev->dev;
260 	int rc;
261 
262 	memset(ltb->buff, 0, ltb->size);
263 
264 	mutex_lock(&adapter->fw_lock);
265 	adapter->fw_done_rc = 0;
266 
267 	reinit_completion(&adapter->fw_done);
268 	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
269 	if (rc) {
270 		mutex_unlock(&adapter->fw_lock);
271 		return rc;
272 	}
273 
274 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
275 	if (rc) {
276 		dev_info(dev,
277 			 "Reset failed, long term map request timed out or aborted\n");
278 		mutex_unlock(&adapter->fw_lock);
279 		return rc;
280 	}
281 
282 	if (adapter->fw_done_rc) {
283 		dev_info(dev,
284 			 "Reset failed, attempting to free and reallocate buffer\n");
285 		free_long_term_buff(adapter, ltb);
286 		mutex_unlock(&adapter->fw_lock);
287 		return alloc_long_term_buff(adapter, ltb, ltb->size);
288 	}
289 	mutex_unlock(&adapter->fw_lock);
290 	return 0;
291 }
292 
293 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
294 {
295 	int i;
296 
297 	for (i = 0; i < adapter->num_active_rx_pools; i++)
298 		adapter->rx_pool[i].active = 0;
299 }
300 
301 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
302 			      struct ibmvnic_rx_pool *pool)
303 {
304 	int count = pool->size - atomic_read(&pool->available);
305 	u64 handle = adapter->rx_scrq[pool->index]->handle;
306 	struct device *dev = &adapter->vdev->dev;
307 	struct ibmvnic_ind_xmit_queue *ind_bufp;
308 	struct ibmvnic_sub_crq_queue *rx_scrq;
309 	union sub_crq *sub_crq;
310 	int buffers_added = 0;
311 	unsigned long lpar_rc;
312 	struct sk_buff *skb;
313 	unsigned int offset;
314 	dma_addr_t dma_addr;
315 	unsigned char *dst;
316 	int shift = 0;
317 	int index;
318 	int i;
319 
320 	if (!pool->active)
321 		return;
322 
323 	rx_scrq = adapter->rx_scrq[pool->index];
324 	ind_bufp = &rx_scrq->ind_buf;
325 	for (i = 0; i < count; ++i) {
326 		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
327 		if (!skb) {
328 			dev_err(dev, "Couldn't replenish rx buff\n");
329 			adapter->replenish_no_mem++;
330 			break;
331 		}
332 
333 		index = pool->free_map[pool->next_free];
334 
335 		if (pool->rx_buff[index].skb)
336 			dev_err(dev, "Inconsistent free_map!\n");
337 
338 		/* Copy the skb to the long term mapped DMA buffer */
339 		offset = index * pool->buff_size;
340 		dst = pool->long_term_buff.buff + offset;
341 		memset(dst, 0, pool->buff_size);
342 		dma_addr = pool->long_term_buff.addr + offset;
343 		pool->rx_buff[index].data = dst;
344 
345 		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
346 		pool->rx_buff[index].dma = dma_addr;
347 		pool->rx_buff[index].skb = skb;
348 		pool->rx_buff[index].pool_index = pool->index;
349 		pool->rx_buff[index].size = pool->buff_size;
350 
351 		sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
352 		memset(sub_crq, 0, sizeof(*sub_crq));
353 		sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
354 		sub_crq->rx_add.correlator =
355 		    cpu_to_be64((u64)&pool->rx_buff[index]);
356 		sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
357 		sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
358 
359 		/* The length field of the sCRQ is defined to be 24 bits so the
360 		 * buffer size needs to be left shifted by a byte before it is
361 		 * converted to big endian to prevent the last byte from being
362 		 * truncated.
363 		 */
364 #ifdef __LITTLE_ENDIAN__
365 		shift = 8;
366 #endif
367 		sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
368 		pool->next_free = (pool->next_free + 1) % pool->size;
369 		if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
370 		    i == count - 1) {
371 			lpar_rc =
372 				send_subcrq_indirect(adapter, handle,
373 						     (u64)ind_bufp->indir_dma,
374 						     (u64)ind_bufp->index);
375 			if (lpar_rc != H_SUCCESS)
376 				goto failure;
377 			buffers_added += ind_bufp->index;
378 			adapter->replenish_add_buff_success += ind_bufp->index;
379 			ind_bufp->index = 0;
380 		}
381 	}
382 	atomic_add(buffers_added, &pool->available);
383 	return;
384 
385 failure:
386 	if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
387 		dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
388 	for (i = ind_bufp->index - 1; i >= 0; --i) {
389 		struct ibmvnic_rx_buff *rx_buff;
390 
391 		pool->next_free = pool->next_free == 0 ?
392 				  pool->size - 1 : pool->next_free - 1;
393 		sub_crq = &ind_bufp->indir_arr[i];
394 		rx_buff = (struct ibmvnic_rx_buff *)
395 				be64_to_cpu(sub_crq->rx_add.correlator);
396 		index = (int)(rx_buff - pool->rx_buff);
397 		pool->free_map[pool->next_free] = index;
398 		dev_kfree_skb_any(pool->rx_buff[index].skb);
399 		pool->rx_buff[index].skb = NULL;
400 	}
401 	adapter->replenish_add_buff_failure += ind_bufp->index;
402 	atomic_add(buffers_added, &pool->available);
403 	ind_bufp->index = 0;
404 	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
405 		/* Disable buffer pool replenishment and report carrier off if
406 		 * queue is closed or pending failover.
407 		 * Firmware guarantees that a signal will be sent to the
408 		 * driver, triggering a reset.
409 		 */
410 		deactivate_rx_pools(adapter);
411 		netif_carrier_off(adapter->netdev);
412 	}
413 }
414 
415 static void replenish_pools(struct ibmvnic_adapter *adapter)
416 {
417 	int i;
418 
419 	adapter->replenish_task_cycles++;
420 	for (i = 0; i < adapter->num_active_rx_pools; i++) {
421 		if (adapter->rx_pool[i].active)
422 			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
423 	}
424 
425 	netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
426 }
427 
428 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
429 {
430 	kfree(adapter->tx_stats_buffers);
431 	kfree(adapter->rx_stats_buffers);
432 	adapter->tx_stats_buffers = NULL;
433 	adapter->rx_stats_buffers = NULL;
434 }
435 
436 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
437 {
438 	adapter->tx_stats_buffers =
439 				kcalloc(IBMVNIC_MAX_QUEUES,
440 					sizeof(struct ibmvnic_tx_queue_stats),
441 					GFP_KERNEL);
442 	if (!adapter->tx_stats_buffers)
443 		return -ENOMEM;
444 
445 	adapter->rx_stats_buffers =
446 				kcalloc(IBMVNIC_MAX_QUEUES,
447 					sizeof(struct ibmvnic_rx_queue_stats),
448 					GFP_KERNEL);
449 	if (!adapter->rx_stats_buffers)
450 		return -ENOMEM;
451 
452 	return 0;
453 }
454 
455 static void release_stats_token(struct ibmvnic_adapter *adapter)
456 {
457 	struct device *dev = &adapter->vdev->dev;
458 
459 	if (!adapter->stats_token)
460 		return;
461 
462 	dma_unmap_single(dev, adapter->stats_token,
463 			 sizeof(struct ibmvnic_statistics),
464 			 DMA_FROM_DEVICE);
465 	adapter->stats_token = 0;
466 }
467 
468 static int init_stats_token(struct ibmvnic_adapter *adapter)
469 {
470 	struct device *dev = &adapter->vdev->dev;
471 	dma_addr_t stok;
472 
473 	stok = dma_map_single(dev, &adapter->stats,
474 			      sizeof(struct ibmvnic_statistics),
475 			      DMA_FROM_DEVICE);
476 	if (dma_mapping_error(dev, stok)) {
477 		dev_err(dev, "Couldn't map stats buffer\n");
478 		return -1;
479 	}
480 
481 	adapter->stats_token = stok;
482 	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
483 	return 0;
484 }
485 
486 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
487 {
488 	struct ibmvnic_rx_pool *rx_pool;
489 	u64 buff_size;
490 	int rx_scrqs;
491 	int i, j, rc;
492 
493 	if (!adapter->rx_pool)
494 		return -1;
495 
496 	buff_size = adapter->cur_rx_buf_sz;
497 	rx_scrqs = adapter->num_active_rx_pools;
498 	for (i = 0; i < rx_scrqs; i++) {
499 		rx_pool = &adapter->rx_pool[i];
500 
501 		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
502 
503 		if (rx_pool->buff_size != buff_size) {
504 			free_long_term_buff(adapter, &rx_pool->long_term_buff);
505 			rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
506 			rc = alloc_long_term_buff(adapter,
507 						  &rx_pool->long_term_buff,
508 						  rx_pool->size *
509 						  rx_pool->buff_size);
510 		} else {
511 			rc = reset_long_term_buff(adapter,
512 						  &rx_pool->long_term_buff);
513 		}
514 
515 		if (rc)
516 			return rc;
517 
518 		for (j = 0; j < rx_pool->size; j++)
519 			rx_pool->free_map[j] = j;
520 
521 		memset(rx_pool->rx_buff, 0,
522 		       rx_pool->size * sizeof(struct ibmvnic_rx_buff));
523 
524 		atomic_set(&rx_pool->available, 0);
525 		rx_pool->next_alloc = 0;
526 		rx_pool->next_free = 0;
527 		rx_pool->active = 1;
528 	}
529 
530 	return 0;
531 }
532 
533 static void release_rx_pools(struct ibmvnic_adapter *adapter)
534 {
535 	struct ibmvnic_rx_pool *rx_pool;
536 	int i, j;
537 
538 	if (!adapter->rx_pool)
539 		return;
540 
541 	for (i = 0; i < adapter->num_active_rx_pools; i++) {
542 		rx_pool = &adapter->rx_pool[i];
543 
544 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
545 
546 		kfree(rx_pool->free_map);
547 		free_long_term_buff(adapter, &rx_pool->long_term_buff);
548 
549 		if (!rx_pool->rx_buff)
550 			continue;
551 
552 		for (j = 0; j < rx_pool->size; j++) {
553 			if (rx_pool->rx_buff[j].skb) {
554 				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
555 				rx_pool->rx_buff[j].skb = NULL;
556 			}
557 		}
558 
559 		kfree(rx_pool->rx_buff);
560 	}
561 
562 	kfree(adapter->rx_pool);
563 	adapter->rx_pool = NULL;
564 	adapter->num_active_rx_pools = 0;
565 }
566 
567 static int init_rx_pools(struct net_device *netdev)
568 {
569 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
570 	struct device *dev = &adapter->vdev->dev;
571 	struct ibmvnic_rx_pool *rx_pool;
572 	int rxadd_subcrqs;
573 	u64 buff_size;
574 	int i, j;
575 
576 	rxadd_subcrqs = adapter->num_active_rx_scrqs;
577 	buff_size = adapter->cur_rx_buf_sz;
578 
579 	adapter->rx_pool = kcalloc(rxadd_subcrqs,
580 				   sizeof(struct ibmvnic_rx_pool),
581 				   GFP_KERNEL);
582 	if (!adapter->rx_pool) {
583 		dev_err(dev, "Failed to allocate rx pools\n");
584 		return -1;
585 	}
586 
587 	adapter->num_active_rx_pools = rxadd_subcrqs;
588 
589 	for (i = 0; i < rxadd_subcrqs; i++) {
590 		rx_pool = &adapter->rx_pool[i];
591 
592 		netdev_dbg(adapter->netdev,
593 			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
594 			   i, adapter->req_rx_add_entries_per_subcrq,
595 			   buff_size);
596 
597 		rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
598 		rx_pool->index = i;
599 		rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
600 		rx_pool->active = 1;
601 
602 		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
603 					    GFP_KERNEL);
604 		if (!rx_pool->free_map) {
605 			release_rx_pools(adapter);
606 			return -1;
607 		}
608 
609 		rx_pool->rx_buff = kcalloc(rx_pool->size,
610 					   sizeof(struct ibmvnic_rx_buff),
611 					   GFP_KERNEL);
612 		if (!rx_pool->rx_buff) {
613 			dev_err(dev, "Couldn't alloc rx buffers\n");
614 			release_rx_pools(adapter);
615 			return -1;
616 		}
617 
618 		if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
619 					 rx_pool->size * rx_pool->buff_size)) {
620 			release_rx_pools(adapter);
621 			return -1;
622 		}
623 
624 		for (j = 0; j < rx_pool->size; ++j)
625 			rx_pool->free_map[j] = j;
626 
627 		atomic_set(&rx_pool->available, 0);
628 		rx_pool->next_alloc = 0;
629 		rx_pool->next_free = 0;
630 	}
631 
632 	return 0;
633 }
634 
635 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
636 			     struct ibmvnic_tx_pool *tx_pool)
637 {
638 	int rc, i;
639 
640 	rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
641 	if (rc)
642 		return rc;
643 
644 	memset(tx_pool->tx_buff, 0,
645 	       tx_pool->num_buffers *
646 	       sizeof(struct ibmvnic_tx_buff));
647 
648 	for (i = 0; i < tx_pool->num_buffers; i++)
649 		tx_pool->free_map[i] = i;
650 
651 	tx_pool->consumer_index = 0;
652 	tx_pool->producer_index = 0;
653 
654 	return 0;
655 }
656 
657 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
658 {
659 	int tx_scrqs;
660 	int i, rc;
661 
662 	if (!adapter->tx_pool)
663 		return -1;
664 
665 	tx_scrqs = adapter->num_active_tx_pools;
666 	for (i = 0; i < tx_scrqs; i++) {
667 		rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
668 		if (rc)
669 			return rc;
670 		rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
671 		if (rc)
672 			return rc;
673 	}
674 
675 	return 0;
676 }
677 
678 static void release_vpd_data(struct ibmvnic_adapter *adapter)
679 {
680 	if (!adapter->vpd)
681 		return;
682 
683 	kfree(adapter->vpd->buff);
684 	kfree(adapter->vpd);
685 
686 	adapter->vpd = NULL;
687 }
688 
689 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
690 				struct ibmvnic_tx_pool *tx_pool)
691 {
692 	kfree(tx_pool->tx_buff);
693 	kfree(tx_pool->free_map);
694 	free_long_term_buff(adapter, &tx_pool->long_term_buff);
695 }
696 
697 static void release_tx_pools(struct ibmvnic_adapter *adapter)
698 {
699 	int i;
700 
701 	if (!adapter->tx_pool)
702 		return;
703 
704 	for (i = 0; i < adapter->num_active_tx_pools; i++) {
705 		release_one_tx_pool(adapter, &adapter->tx_pool[i]);
706 		release_one_tx_pool(adapter, &adapter->tso_pool[i]);
707 	}
708 
709 	kfree(adapter->tx_pool);
710 	adapter->tx_pool = NULL;
711 	kfree(adapter->tso_pool);
712 	adapter->tso_pool = NULL;
713 	adapter->num_active_tx_pools = 0;
714 }
715 
716 static int init_one_tx_pool(struct net_device *netdev,
717 			    struct ibmvnic_tx_pool *tx_pool,
718 			    int num_entries, int buf_size)
719 {
720 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
721 	int i;
722 
723 	tx_pool->tx_buff = kcalloc(num_entries,
724 				   sizeof(struct ibmvnic_tx_buff),
725 				   GFP_KERNEL);
726 	if (!tx_pool->tx_buff)
727 		return -1;
728 
729 	if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
730 				 num_entries * buf_size))
731 		return -1;
732 
733 	tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
734 	if (!tx_pool->free_map)
735 		return -1;
736 
737 	for (i = 0; i < num_entries; i++)
738 		tx_pool->free_map[i] = i;
739 
740 	tx_pool->consumer_index = 0;
741 	tx_pool->producer_index = 0;
742 	tx_pool->num_buffers = num_entries;
743 	tx_pool->buf_size = buf_size;
744 
745 	return 0;
746 }
747 
748 static int init_tx_pools(struct net_device *netdev)
749 {
750 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
751 	int tx_subcrqs;
752 	u64 buff_size;
753 	int i, rc;
754 
755 	tx_subcrqs = adapter->num_active_tx_scrqs;
756 	adapter->tx_pool = kcalloc(tx_subcrqs,
757 				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
758 	if (!adapter->tx_pool)
759 		return -1;
760 
761 	adapter->tso_pool = kcalloc(tx_subcrqs,
762 				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
763 	if (!adapter->tso_pool)
764 		return -1;
765 
766 	adapter->num_active_tx_pools = tx_subcrqs;
767 
768 	for (i = 0; i < tx_subcrqs; i++) {
769 		buff_size = adapter->req_mtu + VLAN_HLEN;
770 		buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
771 		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
772 				      adapter->req_tx_entries_per_subcrq,
773 				      buff_size);
774 		if (rc) {
775 			release_tx_pools(adapter);
776 			return rc;
777 		}
778 
779 		rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
780 				      IBMVNIC_TSO_BUFS,
781 				      IBMVNIC_TSO_BUF_SZ);
782 		if (rc) {
783 			release_tx_pools(adapter);
784 			return rc;
785 		}
786 	}
787 
788 	return 0;
789 }
790 
791 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
792 {
793 	int i;
794 
795 	if (adapter->napi_enabled)
796 		return;
797 
798 	for (i = 0; i < adapter->req_rx_queues; i++)
799 		napi_enable(&adapter->napi[i]);
800 
801 	adapter->napi_enabled = true;
802 }
803 
804 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
805 {
806 	int i;
807 
808 	if (!adapter->napi_enabled)
809 		return;
810 
811 	for (i = 0; i < adapter->req_rx_queues; i++) {
812 		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
813 		napi_disable(&adapter->napi[i]);
814 	}
815 
816 	adapter->napi_enabled = false;
817 }
818 
819 static int init_napi(struct ibmvnic_adapter *adapter)
820 {
821 	int i;
822 
823 	adapter->napi = kcalloc(adapter->req_rx_queues,
824 				sizeof(struct napi_struct), GFP_KERNEL);
825 	if (!adapter->napi)
826 		return -ENOMEM;
827 
828 	for (i = 0; i < adapter->req_rx_queues; i++) {
829 		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
830 		netif_napi_add(adapter->netdev, &adapter->napi[i],
831 			       ibmvnic_poll, NAPI_POLL_WEIGHT);
832 	}
833 
834 	adapter->num_active_rx_napi = adapter->req_rx_queues;
835 	return 0;
836 }
837 
838 static void release_napi(struct ibmvnic_adapter *adapter)
839 {
840 	int i;
841 
842 	if (!adapter->napi)
843 		return;
844 
845 	for (i = 0; i < adapter->num_active_rx_napi; i++) {
846 		netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
847 		netif_napi_del(&adapter->napi[i]);
848 	}
849 
850 	kfree(adapter->napi);
851 	adapter->napi = NULL;
852 	adapter->num_active_rx_napi = 0;
853 	adapter->napi_enabled = false;
854 }
855 
856 static int ibmvnic_login(struct net_device *netdev)
857 {
858 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
859 	unsigned long timeout = msecs_to_jiffies(20000);
860 	int retry_count = 0;
861 	int retries = 10;
862 	bool retry;
863 	int rc;
864 
865 	do {
866 		retry = false;
867 		if (retry_count > retries) {
868 			netdev_warn(netdev, "Login attempts exceeded\n");
869 			return -1;
870 		}
871 
872 		adapter->init_done_rc = 0;
873 		reinit_completion(&adapter->init_done);
874 		rc = send_login(adapter);
875 		if (rc)
876 			return rc;
877 
878 		if (!wait_for_completion_timeout(&adapter->init_done,
879 						 timeout)) {
880 			netdev_warn(netdev, "Login timed out, retrying...\n");
881 			retry = true;
882 			adapter->init_done_rc = 0;
883 			retry_count++;
884 			continue;
885 		}
886 
887 		if (adapter->init_done_rc == ABORTED) {
888 			netdev_warn(netdev, "Login aborted, retrying...\n");
889 			retry = true;
890 			adapter->init_done_rc = 0;
891 			retry_count++;
892 			/* FW or device may be busy, so
893 			 * wait a bit before retrying login
894 			 */
895 			msleep(500);
896 		} else if (adapter->init_done_rc == PARTIALSUCCESS) {
897 			retry_count++;
898 			release_sub_crqs(adapter, 1);
899 
900 			retry = true;
901 			netdev_dbg(netdev,
902 				   "Received partial success, retrying...\n");
903 			adapter->init_done_rc = 0;
904 			reinit_completion(&adapter->init_done);
905 			send_query_cap(adapter);
906 			if (!wait_for_completion_timeout(&adapter->init_done,
907 							 timeout)) {
908 				netdev_warn(netdev,
909 					    "Capabilities query timed out\n");
910 				return -1;
911 			}
912 
913 			rc = init_sub_crqs(adapter);
914 			if (rc) {
915 				netdev_warn(netdev,
916 					    "SCRQ initialization failed\n");
917 				return -1;
918 			}
919 
920 			rc = init_sub_crq_irqs(adapter);
921 			if (rc) {
922 				netdev_warn(netdev,
923 					    "SCRQ irq initialization failed\n");
924 				return -1;
925 			}
926 		} else if (adapter->init_done_rc) {
927 			netdev_warn(netdev, "Adapter login failed\n");
928 			return -1;
929 		}
930 	} while (retry);
931 
932 	__ibmvnic_set_mac(netdev, adapter->mac_addr);
933 
934 	netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state);
935 	return 0;
936 }
937 
938 static void release_login_buffer(struct ibmvnic_adapter *adapter)
939 {
940 	kfree(adapter->login_buf);
941 	adapter->login_buf = NULL;
942 }
943 
944 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
945 {
946 	kfree(adapter->login_rsp_buf);
947 	adapter->login_rsp_buf = NULL;
948 }
949 
950 static void release_resources(struct ibmvnic_adapter *adapter)
951 {
952 	release_vpd_data(adapter);
953 
954 	release_tx_pools(adapter);
955 	release_rx_pools(adapter);
956 
957 	release_napi(adapter);
958 	release_login_buffer(adapter);
959 	release_login_rsp_buffer(adapter);
960 }
961 
962 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
963 {
964 	struct net_device *netdev = adapter->netdev;
965 	unsigned long timeout = msecs_to_jiffies(20000);
966 	union ibmvnic_crq crq;
967 	bool resend;
968 	int rc;
969 
970 	netdev_dbg(netdev, "setting link state %d\n", link_state);
971 
972 	memset(&crq, 0, sizeof(crq));
973 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
974 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
975 	crq.logical_link_state.link_state = link_state;
976 
977 	do {
978 		resend = false;
979 
980 		reinit_completion(&adapter->init_done);
981 		rc = ibmvnic_send_crq(adapter, &crq);
982 		if (rc) {
983 			netdev_err(netdev, "Failed to set link state\n");
984 			return rc;
985 		}
986 
987 		if (!wait_for_completion_timeout(&adapter->init_done,
988 						 timeout)) {
989 			netdev_err(netdev, "timeout setting link state\n");
990 			return -1;
991 		}
992 
993 		if (adapter->init_done_rc == PARTIALSUCCESS) {
994 			/* Partuial success, delay and re-send */
995 			mdelay(1000);
996 			resend = true;
997 		} else if (adapter->init_done_rc) {
998 			netdev_warn(netdev, "Unable to set link state, rc=%d\n",
999 				    adapter->init_done_rc);
1000 			return adapter->init_done_rc;
1001 		}
1002 	} while (resend);
1003 
1004 	return 0;
1005 }
1006 
1007 static int set_real_num_queues(struct net_device *netdev)
1008 {
1009 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1010 	int rc;
1011 
1012 	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1013 		   adapter->req_tx_queues, adapter->req_rx_queues);
1014 
1015 	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1016 	if (rc) {
1017 		netdev_err(netdev, "failed to set the number of tx queues\n");
1018 		return rc;
1019 	}
1020 
1021 	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1022 	if (rc)
1023 		netdev_err(netdev, "failed to set the number of rx queues\n");
1024 
1025 	return rc;
1026 }
1027 
1028 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1029 {
1030 	struct device *dev = &adapter->vdev->dev;
1031 	union ibmvnic_crq crq;
1032 	int len = 0;
1033 	int rc;
1034 
1035 	if (adapter->vpd->buff)
1036 		len = adapter->vpd->len;
1037 
1038 	mutex_lock(&adapter->fw_lock);
1039 	adapter->fw_done_rc = 0;
1040 	reinit_completion(&adapter->fw_done);
1041 
1042 	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1043 	crq.get_vpd_size.cmd = GET_VPD_SIZE;
1044 	rc = ibmvnic_send_crq(adapter, &crq);
1045 	if (rc) {
1046 		mutex_unlock(&adapter->fw_lock);
1047 		return rc;
1048 	}
1049 
1050 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1051 	if (rc) {
1052 		dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1053 		mutex_unlock(&adapter->fw_lock);
1054 		return rc;
1055 	}
1056 	mutex_unlock(&adapter->fw_lock);
1057 
1058 	if (!adapter->vpd->len)
1059 		return -ENODATA;
1060 
1061 	if (!adapter->vpd->buff)
1062 		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1063 	else if (adapter->vpd->len != len)
1064 		adapter->vpd->buff =
1065 			krealloc(adapter->vpd->buff,
1066 				 adapter->vpd->len, GFP_KERNEL);
1067 
1068 	if (!adapter->vpd->buff) {
1069 		dev_err(dev, "Could allocate VPD buffer\n");
1070 		return -ENOMEM;
1071 	}
1072 
1073 	adapter->vpd->dma_addr =
1074 		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1075 			       DMA_FROM_DEVICE);
1076 	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1077 		dev_err(dev, "Could not map VPD buffer\n");
1078 		kfree(adapter->vpd->buff);
1079 		adapter->vpd->buff = NULL;
1080 		return -ENOMEM;
1081 	}
1082 
1083 	mutex_lock(&adapter->fw_lock);
1084 	adapter->fw_done_rc = 0;
1085 	reinit_completion(&adapter->fw_done);
1086 
1087 	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1088 	crq.get_vpd.cmd = GET_VPD;
1089 	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1090 	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1091 	rc = ibmvnic_send_crq(adapter, &crq);
1092 	if (rc) {
1093 		kfree(adapter->vpd->buff);
1094 		adapter->vpd->buff = NULL;
1095 		mutex_unlock(&adapter->fw_lock);
1096 		return rc;
1097 	}
1098 
1099 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1100 	if (rc) {
1101 		dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1102 		kfree(adapter->vpd->buff);
1103 		adapter->vpd->buff = NULL;
1104 		mutex_unlock(&adapter->fw_lock);
1105 		return rc;
1106 	}
1107 
1108 	mutex_unlock(&adapter->fw_lock);
1109 	return 0;
1110 }
1111 
1112 static int init_resources(struct ibmvnic_adapter *adapter)
1113 {
1114 	struct net_device *netdev = adapter->netdev;
1115 	int rc;
1116 
1117 	rc = set_real_num_queues(netdev);
1118 	if (rc)
1119 		return rc;
1120 
1121 	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1122 	if (!adapter->vpd)
1123 		return -ENOMEM;
1124 
1125 	/* Vital Product Data (VPD) */
1126 	rc = ibmvnic_get_vpd(adapter);
1127 	if (rc) {
1128 		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1129 		return rc;
1130 	}
1131 
1132 	adapter->map_id = 1;
1133 
1134 	rc = init_napi(adapter);
1135 	if (rc)
1136 		return rc;
1137 
1138 	send_query_map(adapter);
1139 
1140 	rc = init_rx_pools(netdev);
1141 	if (rc)
1142 		return rc;
1143 
1144 	rc = init_tx_pools(netdev);
1145 	return rc;
1146 }
1147 
1148 static int __ibmvnic_open(struct net_device *netdev)
1149 {
1150 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1151 	enum vnic_state prev_state = adapter->state;
1152 	int i, rc;
1153 
1154 	adapter->state = VNIC_OPENING;
1155 	replenish_pools(adapter);
1156 	ibmvnic_napi_enable(adapter);
1157 
1158 	/* We're ready to receive frames, enable the sub-crq interrupts and
1159 	 * set the logical link state to up
1160 	 */
1161 	for (i = 0; i < adapter->req_rx_queues; i++) {
1162 		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1163 		if (prev_state == VNIC_CLOSED)
1164 			enable_irq(adapter->rx_scrq[i]->irq);
1165 		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1166 	}
1167 
1168 	for (i = 0; i < adapter->req_tx_queues; i++) {
1169 		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1170 		if (prev_state == VNIC_CLOSED)
1171 			enable_irq(adapter->tx_scrq[i]->irq);
1172 		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1173 		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1174 	}
1175 
1176 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1177 	if (rc) {
1178 		for (i = 0; i < adapter->req_rx_queues; i++)
1179 			napi_disable(&adapter->napi[i]);
1180 		release_resources(adapter);
1181 		return rc;
1182 	}
1183 
1184 	netif_tx_start_all_queues(netdev);
1185 
1186 	if (prev_state == VNIC_CLOSED) {
1187 		for (i = 0; i < adapter->req_rx_queues; i++)
1188 			napi_schedule(&adapter->napi[i]);
1189 	}
1190 
1191 	adapter->state = VNIC_OPEN;
1192 	return rc;
1193 }
1194 
1195 static int ibmvnic_open(struct net_device *netdev)
1196 {
1197 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1198 	int rc;
1199 
1200 	/* If device failover is pending, just set device state and return.
1201 	 * Device operation will be handled by reset routine.
1202 	 */
1203 	if (adapter->failover_pending) {
1204 		adapter->state = VNIC_OPEN;
1205 		return 0;
1206 	}
1207 
1208 	if (adapter->state != VNIC_CLOSED) {
1209 		rc = ibmvnic_login(netdev);
1210 		if (rc)
1211 			goto out;
1212 
1213 		rc = init_resources(adapter);
1214 		if (rc) {
1215 			netdev_err(netdev, "failed to initialize resources\n");
1216 			release_resources(adapter);
1217 			goto out;
1218 		}
1219 	}
1220 
1221 	rc = __ibmvnic_open(netdev);
1222 
1223 out:
1224 	/*
1225 	 * If open fails due to a pending failover, set device state and
1226 	 * return. Device operation will be handled by reset routine.
1227 	 */
1228 	if (rc && adapter->failover_pending) {
1229 		adapter->state = VNIC_OPEN;
1230 		rc = 0;
1231 	}
1232 	return rc;
1233 }
1234 
1235 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1236 {
1237 	struct ibmvnic_rx_pool *rx_pool;
1238 	struct ibmvnic_rx_buff *rx_buff;
1239 	u64 rx_entries;
1240 	int rx_scrqs;
1241 	int i, j;
1242 
1243 	if (!adapter->rx_pool)
1244 		return;
1245 
1246 	rx_scrqs = adapter->num_active_rx_pools;
1247 	rx_entries = adapter->req_rx_add_entries_per_subcrq;
1248 
1249 	/* Free any remaining skbs in the rx buffer pools */
1250 	for (i = 0; i < rx_scrqs; i++) {
1251 		rx_pool = &adapter->rx_pool[i];
1252 		if (!rx_pool || !rx_pool->rx_buff)
1253 			continue;
1254 
1255 		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1256 		for (j = 0; j < rx_entries; j++) {
1257 			rx_buff = &rx_pool->rx_buff[j];
1258 			if (rx_buff && rx_buff->skb) {
1259 				dev_kfree_skb_any(rx_buff->skb);
1260 				rx_buff->skb = NULL;
1261 			}
1262 		}
1263 	}
1264 }
1265 
1266 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1267 			      struct ibmvnic_tx_pool *tx_pool)
1268 {
1269 	struct ibmvnic_tx_buff *tx_buff;
1270 	u64 tx_entries;
1271 	int i;
1272 
1273 	if (!tx_pool || !tx_pool->tx_buff)
1274 		return;
1275 
1276 	tx_entries = tx_pool->num_buffers;
1277 
1278 	for (i = 0; i < tx_entries; i++) {
1279 		tx_buff = &tx_pool->tx_buff[i];
1280 		if (tx_buff && tx_buff->skb) {
1281 			dev_kfree_skb_any(tx_buff->skb);
1282 			tx_buff->skb = NULL;
1283 		}
1284 	}
1285 }
1286 
1287 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1288 {
1289 	int tx_scrqs;
1290 	int i;
1291 
1292 	if (!adapter->tx_pool || !adapter->tso_pool)
1293 		return;
1294 
1295 	tx_scrqs = adapter->num_active_tx_pools;
1296 
1297 	/* Free any remaining skbs in the tx buffer pools */
1298 	for (i = 0; i < tx_scrqs; i++) {
1299 		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1300 		clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1301 		clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1302 	}
1303 }
1304 
1305 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1306 {
1307 	struct net_device *netdev = adapter->netdev;
1308 	int i;
1309 
1310 	if (adapter->tx_scrq) {
1311 		for (i = 0; i < adapter->req_tx_queues; i++)
1312 			if (adapter->tx_scrq[i]->irq) {
1313 				netdev_dbg(netdev,
1314 					   "Disabling tx_scrq[%d] irq\n", i);
1315 				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1316 				disable_irq(adapter->tx_scrq[i]->irq);
1317 			}
1318 	}
1319 
1320 	if (adapter->rx_scrq) {
1321 		for (i = 0; i < adapter->req_rx_queues; i++) {
1322 			if (adapter->rx_scrq[i]->irq) {
1323 				netdev_dbg(netdev,
1324 					   "Disabling rx_scrq[%d] irq\n", i);
1325 				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1326 				disable_irq(adapter->rx_scrq[i]->irq);
1327 			}
1328 		}
1329 	}
1330 }
1331 
1332 static void ibmvnic_cleanup(struct net_device *netdev)
1333 {
1334 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1335 
1336 	/* ensure that transmissions are stopped if called by do_reset */
1337 	if (test_bit(0, &adapter->resetting))
1338 		netif_tx_disable(netdev);
1339 	else
1340 		netif_tx_stop_all_queues(netdev);
1341 
1342 	ibmvnic_napi_disable(adapter);
1343 	ibmvnic_disable_irqs(adapter);
1344 
1345 	clean_rx_pools(adapter);
1346 	clean_tx_pools(adapter);
1347 }
1348 
1349 static int __ibmvnic_close(struct net_device *netdev)
1350 {
1351 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1352 	int rc = 0;
1353 
1354 	adapter->state = VNIC_CLOSING;
1355 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1356 	if (rc)
1357 		return rc;
1358 	adapter->state = VNIC_CLOSED;
1359 	return 0;
1360 }
1361 
1362 static int ibmvnic_close(struct net_device *netdev)
1363 {
1364 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1365 	int rc;
1366 
1367 	netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n",
1368 		   adapter->state, adapter->failover_pending,
1369 		   adapter->force_reset_recovery);
1370 
1371 	/* If device failover is pending, just set device state and return.
1372 	 * Device operation will be handled by reset routine.
1373 	 */
1374 	if (adapter->failover_pending) {
1375 		adapter->state = VNIC_CLOSED;
1376 		return 0;
1377 	}
1378 
1379 	rc = __ibmvnic_close(netdev);
1380 	ibmvnic_cleanup(netdev);
1381 
1382 	return rc;
1383 }
1384 
1385 /**
1386  * build_hdr_data - creates L2/L3/L4 header data buffer
1387  * @hdr_field - bitfield determining needed headers
1388  * @skb - socket buffer
1389  * @hdr_len - array of header lengths
1390  * @tot_len - total length of data
1391  *
1392  * Reads hdr_field to determine which headers are needed by firmware.
1393  * Builds a buffer containing these headers.  Saves individual header
1394  * lengths and total buffer length to be used to build descriptors.
1395  */
1396 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1397 			  int *hdr_len, u8 *hdr_data)
1398 {
1399 	int len = 0;
1400 	u8 *hdr;
1401 
1402 	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1403 		hdr_len[0] = sizeof(struct vlan_ethhdr);
1404 	else
1405 		hdr_len[0] = sizeof(struct ethhdr);
1406 
1407 	if (skb->protocol == htons(ETH_P_IP)) {
1408 		hdr_len[1] = ip_hdr(skb)->ihl * 4;
1409 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1410 			hdr_len[2] = tcp_hdrlen(skb);
1411 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1412 			hdr_len[2] = sizeof(struct udphdr);
1413 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1414 		hdr_len[1] = sizeof(struct ipv6hdr);
1415 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1416 			hdr_len[2] = tcp_hdrlen(skb);
1417 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1418 			hdr_len[2] = sizeof(struct udphdr);
1419 	} else if (skb->protocol == htons(ETH_P_ARP)) {
1420 		hdr_len[1] = arp_hdr_len(skb->dev);
1421 		hdr_len[2] = 0;
1422 	}
1423 
1424 	memset(hdr_data, 0, 120);
1425 	if ((hdr_field >> 6) & 1) {
1426 		hdr = skb_mac_header(skb);
1427 		memcpy(hdr_data, hdr, hdr_len[0]);
1428 		len += hdr_len[0];
1429 	}
1430 
1431 	if ((hdr_field >> 5) & 1) {
1432 		hdr = skb_network_header(skb);
1433 		memcpy(hdr_data + len, hdr, hdr_len[1]);
1434 		len += hdr_len[1];
1435 	}
1436 
1437 	if ((hdr_field >> 4) & 1) {
1438 		hdr = skb_transport_header(skb);
1439 		memcpy(hdr_data + len, hdr, hdr_len[2]);
1440 		len += hdr_len[2];
1441 	}
1442 	return len;
1443 }
1444 
1445 /**
1446  * create_hdr_descs - create header and header extension descriptors
1447  * @hdr_field - bitfield determining needed headers
1448  * @data - buffer containing header data
1449  * @len - length of data buffer
1450  * @hdr_len - array of individual header lengths
1451  * @scrq_arr - descriptor array
1452  *
1453  * Creates header and, if needed, header extension descriptors and
1454  * places them in a descriptor array, scrq_arr
1455  */
1456 
1457 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1458 			    union sub_crq *scrq_arr)
1459 {
1460 	union sub_crq hdr_desc;
1461 	int tmp_len = len;
1462 	int num_descs = 0;
1463 	u8 *data, *cur;
1464 	int tmp;
1465 
1466 	while (tmp_len > 0) {
1467 		cur = hdr_data + len - tmp_len;
1468 
1469 		memset(&hdr_desc, 0, sizeof(hdr_desc));
1470 		if (cur != hdr_data) {
1471 			data = hdr_desc.hdr_ext.data;
1472 			tmp = tmp_len > 29 ? 29 : tmp_len;
1473 			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1474 			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1475 			hdr_desc.hdr_ext.len = tmp;
1476 		} else {
1477 			data = hdr_desc.hdr.data;
1478 			tmp = tmp_len > 24 ? 24 : tmp_len;
1479 			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1480 			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1481 			hdr_desc.hdr.len = tmp;
1482 			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1483 			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1484 			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1485 			hdr_desc.hdr.flag = hdr_field << 1;
1486 		}
1487 		memcpy(data, cur, tmp);
1488 		tmp_len -= tmp;
1489 		*scrq_arr = hdr_desc;
1490 		scrq_arr++;
1491 		num_descs++;
1492 	}
1493 
1494 	return num_descs;
1495 }
1496 
1497 /**
1498  * build_hdr_descs_arr - build a header descriptor array
1499  * @skb - socket buffer
1500  * @num_entries - number of descriptors to be sent
1501  * @subcrq - first TX descriptor
1502  * @hdr_field - bit field determining which headers will be sent
1503  *
1504  * This function will build a TX descriptor array with applicable
1505  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1506  */
1507 
1508 static void build_hdr_descs_arr(struct sk_buff *skb,
1509 				union sub_crq *indir_arr,
1510 				int *num_entries, u8 hdr_field)
1511 {
1512 	int hdr_len[3] = {0, 0, 0};
1513 	u8 hdr_data[140] = {0};
1514 	int tot_len;
1515 
1516 	tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1517 				 hdr_data);
1518 	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1519 					 indir_arr + 1);
1520 }
1521 
1522 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1523 				    struct net_device *netdev)
1524 {
1525 	/* For some backing devices, mishandling of small packets
1526 	 * can result in a loss of connection or TX stall. Device
1527 	 * architects recommend that no packet should be smaller
1528 	 * than the minimum MTU value provided to the driver, so
1529 	 * pad any packets to that length
1530 	 */
1531 	if (skb->len < netdev->min_mtu)
1532 		return skb_put_padto(skb, netdev->min_mtu);
1533 
1534 	return 0;
1535 }
1536 
1537 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1538 					 struct ibmvnic_sub_crq_queue *tx_scrq)
1539 {
1540 	struct ibmvnic_ind_xmit_queue *ind_bufp;
1541 	struct ibmvnic_tx_buff *tx_buff;
1542 	struct ibmvnic_tx_pool *tx_pool;
1543 	union sub_crq tx_scrq_entry;
1544 	int queue_num;
1545 	int entries;
1546 	int index;
1547 	int i;
1548 
1549 	ind_bufp = &tx_scrq->ind_buf;
1550 	entries = (u64)ind_bufp->index;
1551 	queue_num = tx_scrq->pool_index;
1552 
1553 	for (i = entries - 1; i >= 0; --i) {
1554 		tx_scrq_entry = ind_bufp->indir_arr[i];
1555 		if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1556 			continue;
1557 		index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1558 		if (index & IBMVNIC_TSO_POOL_MASK) {
1559 			tx_pool = &adapter->tso_pool[queue_num];
1560 			index &= ~IBMVNIC_TSO_POOL_MASK;
1561 		} else {
1562 			tx_pool = &adapter->tx_pool[queue_num];
1563 		}
1564 		tx_pool->free_map[tx_pool->consumer_index] = index;
1565 		tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1566 					  tx_pool->num_buffers - 1 :
1567 					  tx_pool->consumer_index - 1;
1568 		tx_buff = &tx_pool->tx_buff[index];
1569 		adapter->netdev->stats.tx_packets--;
1570 		adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1571 		adapter->tx_stats_buffers[queue_num].packets--;
1572 		adapter->tx_stats_buffers[queue_num].bytes -=
1573 						tx_buff->skb->len;
1574 		dev_kfree_skb_any(tx_buff->skb);
1575 		tx_buff->skb = NULL;
1576 		adapter->netdev->stats.tx_dropped++;
1577 	}
1578 	ind_bufp->index = 0;
1579 	if (atomic_sub_return(entries, &tx_scrq->used) <=
1580 	    (adapter->req_tx_entries_per_subcrq / 2) &&
1581 	    __netif_subqueue_stopped(adapter->netdev, queue_num)) {
1582 		netif_wake_subqueue(adapter->netdev, queue_num);
1583 		netdev_dbg(adapter->netdev, "Started queue %d\n",
1584 			   queue_num);
1585 	}
1586 }
1587 
1588 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1589 				 struct ibmvnic_sub_crq_queue *tx_scrq)
1590 {
1591 	struct ibmvnic_ind_xmit_queue *ind_bufp;
1592 	u64 dma_addr;
1593 	u64 entries;
1594 	u64 handle;
1595 	int rc;
1596 
1597 	ind_bufp = &tx_scrq->ind_buf;
1598 	dma_addr = (u64)ind_bufp->indir_dma;
1599 	entries = (u64)ind_bufp->index;
1600 	handle = tx_scrq->handle;
1601 
1602 	if (!entries)
1603 		return 0;
1604 	rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1605 	if (rc)
1606 		ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1607 	else
1608 		ind_bufp->index = 0;
1609 	return 0;
1610 }
1611 
1612 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1613 {
1614 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1615 	int queue_num = skb_get_queue_mapping(skb);
1616 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1617 	struct device *dev = &adapter->vdev->dev;
1618 	struct ibmvnic_ind_xmit_queue *ind_bufp;
1619 	struct ibmvnic_tx_buff *tx_buff = NULL;
1620 	struct ibmvnic_sub_crq_queue *tx_scrq;
1621 	struct ibmvnic_tx_pool *tx_pool;
1622 	unsigned int tx_send_failed = 0;
1623 	netdev_tx_t ret = NETDEV_TX_OK;
1624 	unsigned int tx_map_failed = 0;
1625 	union sub_crq indir_arr[16];
1626 	unsigned int tx_dropped = 0;
1627 	unsigned int tx_packets = 0;
1628 	unsigned int tx_bytes = 0;
1629 	dma_addr_t data_dma_addr;
1630 	struct netdev_queue *txq;
1631 	unsigned long lpar_rc;
1632 	union sub_crq tx_crq;
1633 	unsigned int offset;
1634 	int num_entries = 1;
1635 	unsigned char *dst;
1636 	int index = 0;
1637 	u8 proto = 0;
1638 
1639 	tx_scrq = adapter->tx_scrq[queue_num];
1640 	txq = netdev_get_tx_queue(netdev, queue_num);
1641 	ind_bufp = &tx_scrq->ind_buf;
1642 
1643 	if (test_bit(0, &adapter->resetting)) {
1644 		if (!netif_subqueue_stopped(netdev, skb))
1645 			netif_stop_subqueue(netdev, queue_num);
1646 		dev_kfree_skb_any(skb);
1647 
1648 		tx_send_failed++;
1649 		tx_dropped++;
1650 		ret = NETDEV_TX_OK;
1651 		ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1652 		goto out;
1653 	}
1654 
1655 	if (ibmvnic_xmit_workarounds(skb, netdev)) {
1656 		tx_dropped++;
1657 		tx_send_failed++;
1658 		ret = NETDEV_TX_OK;
1659 		ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1660 		goto out;
1661 	}
1662 	if (skb_is_gso(skb))
1663 		tx_pool = &adapter->tso_pool[queue_num];
1664 	else
1665 		tx_pool = &adapter->tx_pool[queue_num];
1666 
1667 	index = tx_pool->free_map[tx_pool->consumer_index];
1668 
1669 	if (index == IBMVNIC_INVALID_MAP) {
1670 		dev_kfree_skb_any(skb);
1671 		tx_send_failed++;
1672 		tx_dropped++;
1673 		ret = NETDEV_TX_OK;
1674 		ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1675 		goto out;
1676 	}
1677 
1678 	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1679 
1680 	offset = index * tx_pool->buf_size;
1681 	dst = tx_pool->long_term_buff.buff + offset;
1682 	memset(dst, 0, tx_pool->buf_size);
1683 	data_dma_addr = tx_pool->long_term_buff.addr + offset;
1684 
1685 	if (skb_shinfo(skb)->nr_frags) {
1686 		int cur, i;
1687 
1688 		/* Copy the head */
1689 		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1690 		cur = skb_headlen(skb);
1691 
1692 		/* Copy the frags */
1693 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1694 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1695 
1696 			memcpy(dst + cur,
1697 			       page_address(skb_frag_page(frag)) +
1698 			       skb_frag_off(frag), skb_frag_size(frag));
1699 			cur += skb_frag_size(frag);
1700 		}
1701 	} else {
1702 		skb_copy_from_linear_data(skb, dst, skb->len);
1703 	}
1704 
1705 	tx_pool->consumer_index =
1706 	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1707 
1708 	tx_buff = &tx_pool->tx_buff[index];
1709 	tx_buff->skb = skb;
1710 	tx_buff->index = index;
1711 	tx_buff->pool_index = queue_num;
1712 
1713 	memset(&tx_crq, 0, sizeof(tx_crq));
1714 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1715 	tx_crq.v1.type = IBMVNIC_TX_DESC;
1716 	tx_crq.v1.n_crq_elem = 1;
1717 	tx_crq.v1.n_sge = 1;
1718 	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1719 
1720 	if (skb_is_gso(skb))
1721 		tx_crq.v1.correlator =
1722 			cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1723 	else
1724 		tx_crq.v1.correlator = cpu_to_be32(index);
1725 	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1726 	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1727 	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1728 
1729 	if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1730 		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1731 		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1732 	}
1733 
1734 	if (skb->protocol == htons(ETH_P_IP)) {
1735 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1736 		proto = ip_hdr(skb)->protocol;
1737 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1738 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1739 		proto = ipv6_hdr(skb)->nexthdr;
1740 	}
1741 
1742 	if (proto == IPPROTO_TCP)
1743 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1744 	else if (proto == IPPROTO_UDP)
1745 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1746 
1747 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1748 		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1749 		hdrs += 2;
1750 	}
1751 	if (skb_is_gso(skb)) {
1752 		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1753 		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1754 		hdrs += 2;
1755 	}
1756 
1757 	if ((*hdrs >> 7) & 1)
1758 		build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
1759 
1760 	tx_crq.v1.n_crq_elem = num_entries;
1761 	tx_buff->num_entries = num_entries;
1762 	/* flush buffer if current entry can not fit */
1763 	if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
1764 		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1765 		if (lpar_rc != H_SUCCESS)
1766 			goto tx_flush_err;
1767 	}
1768 
1769 	indir_arr[0] = tx_crq;
1770 	memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
1771 	       num_entries * sizeof(struct ibmvnic_generic_scrq));
1772 	ind_bufp->index += num_entries;
1773 	if (__netdev_tx_sent_queue(txq, skb->len,
1774 				   netdev_xmit_more() &&
1775 				   ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
1776 		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1777 		if (lpar_rc != H_SUCCESS)
1778 			goto tx_err;
1779 	}
1780 
1781 	if (atomic_add_return(num_entries, &tx_scrq->used)
1782 					>= adapter->req_tx_entries_per_subcrq) {
1783 		netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1784 		netif_stop_subqueue(netdev, queue_num);
1785 	}
1786 
1787 	tx_packets++;
1788 	tx_bytes += skb->len;
1789 	txq->trans_start = jiffies;
1790 	ret = NETDEV_TX_OK;
1791 	goto out;
1792 
1793 tx_flush_err:
1794 	dev_kfree_skb_any(skb);
1795 	tx_buff->skb = NULL;
1796 	tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1797 				  tx_pool->num_buffers - 1 :
1798 				  tx_pool->consumer_index - 1;
1799 	tx_dropped++;
1800 tx_err:
1801 	if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1802 		dev_err_ratelimited(dev, "tx: send failed\n");
1803 
1804 	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1805 		/* Disable TX and report carrier off if queue is closed
1806 		 * or pending failover.
1807 		 * Firmware guarantees that a signal will be sent to the
1808 		 * driver, triggering a reset or some other action.
1809 		 */
1810 		netif_tx_stop_all_queues(netdev);
1811 		netif_carrier_off(netdev);
1812 	}
1813 out:
1814 	netdev->stats.tx_dropped += tx_dropped;
1815 	netdev->stats.tx_bytes += tx_bytes;
1816 	netdev->stats.tx_packets += tx_packets;
1817 	adapter->tx_send_failed += tx_send_failed;
1818 	adapter->tx_map_failed += tx_map_failed;
1819 	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1820 	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1821 	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1822 
1823 	return ret;
1824 }
1825 
1826 static void ibmvnic_set_multi(struct net_device *netdev)
1827 {
1828 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1829 	struct netdev_hw_addr *ha;
1830 	union ibmvnic_crq crq;
1831 
1832 	memset(&crq, 0, sizeof(crq));
1833 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1834 	crq.request_capability.cmd = REQUEST_CAPABILITY;
1835 
1836 	if (netdev->flags & IFF_PROMISC) {
1837 		if (!adapter->promisc_supported)
1838 			return;
1839 	} else {
1840 		if (netdev->flags & IFF_ALLMULTI) {
1841 			/* Accept all multicast */
1842 			memset(&crq, 0, sizeof(crq));
1843 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1844 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1845 			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1846 			ibmvnic_send_crq(adapter, &crq);
1847 		} else if (netdev_mc_empty(netdev)) {
1848 			/* Reject all multicast */
1849 			memset(&crq, 0, sizeof(crq));
1850 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1851 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1852 			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1853 			ibmvnic_send_crq(adapter, &crq);
1854 		} else {
1855 			/* Accept one or more multicast(s) */
1856 			netdev_for_each_mc_addr(ha, netdev) {
1857 				memset(&crq, 0, sizeof(crq));
1858 				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1859 				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1860 				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1861 				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1862 						ha->addr);
1863 				ibmvnic_send_crq(adapter, &crq);
1864 			}
1865 		}
1866 	}
1867 }
1868 
1869 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1870 {
1871 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1872 	union ibmvnic_crq crq;
1873 	int rc;
1874 
1875 	if (!is_valid_ether_addr(dev_addr)) {
1876 		rc = -EADDRNOTAVAIL;
1877 		goto err;
1878 	}
1879 
1880 	memset(&crq, 0, sizeof(crq));
1881 	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1882 	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1883 	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1884 
1885 	mutex_lock(&adapter->fw_lock);
1886 	adapter->fw_done_rc = 0;
1887 	reinit_completion(&adapter->fw_done);
1888 
1889 	rc = ibmvnic_send_crq(adapter, &crq);
1890 	if (rc) {
1891 		rc = -EIO;
1892 		mutex_unlock(&adapter->fw_lock);
1893 		goto err;
1894 	}
1895 
1896 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1897 	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
1898 	if (rc || adapter->fw_done_rc) {
1899 		rc = -EIO;
1900 		mutex_unlock(&adapter->fw_lock);
1901 		goto err;
1902 	}
1903 	mutex_unlock(&adapter->fw_lock);
1904 	return 0;
1905 err:
1906 	ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1907 	return rc;
1908 }
1909 
1910 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1911 {
1912 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1913 	struct sockaddr *addr = p;
1914 	int rc;
1915 
1916 	rc = 0;
1917 	if (!is_valid_ether_addr(addr->sa_data))
1918 		return -EADDRNOTAVAIL;
1919 
1920 	if (adapter->state != VNIC_PROBED) {
1921 		ether_addr_copy(adapter->mac_addr, addr->sa_data);
1922 		rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1923 	}
1924 
1925 	return rc;
1926 }
1927 
1928 /**
1929  * do_change_param_reset returns zero if we are able to keep processing reset
1930  * events, or non-zero if we hit a fatal error and must halt.
1931  */
1932 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1933 				 struct ibmvnic_rwi *rwi,
1934 				 u32 reset_state)
1935 {
1936 	struct net_device *netdev = adapter->netdev;
1937 	int i, rc;
1938 
1939 	netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1940 		   rwi->reset_reason);
1941 
1942 	netif_carrier_off(netdev);
1943 	adapter->reset_reason = rwi->reset_reason;
1944 
1945 	ibmvnic_cleanup(netdev);
1946 
1947 	if (reset_state == VNIC_OPEN) {
1948 		rc = __ibmvnic_close(netdev);
1949 		if (rc)
1950 			goto out;
1951 	}
1952 
1953 	release_resources(adapter);
1954 	release_sub_crqs(adapter, 1);
1955 	release_crq_queue(adapter);
1956 
1957 	adapter->state = VNIC_PROBED;
1958 
1959 	rc = init_crq_queue(adapter);
1960 
1961 	if (rc) {
1962 		netdev_err(adapter->netdev,
1963 			   "Couldn't initialize crq. rc=%d\n", rc);
1964 		return rc;
1965 	}
1966 
1967 	rc = ibmvnic_reset_init(adapter, true);
1968 	if (rc) {
1969 		rc = IBMVNIC_INIT_FAILED;
1970 		goto out;
1971 	}
1972 
1973 	/* If the adapter was in PROBE state prior to the reset,
1974 	 * exit here.
1975 	 */
1976 	if (reset_state == VNIC_PROBED)
1977 		goto out;
1978 
1979 	rc = ibmvnic_login(netdev);
1980 	if (rc) {
1981 		goto out;
1982 	}
1983 
1984 	rc = init_resources(adapter);
1985 	if (rc)
1986 		goto out;
1987 
1988 	ibmvnic_disable_irqs(adapter);
1989 
1990 	adapter->state = VNIC_CLOSED;
1991 
1992 	if (reset_state == VNIC_CLOSED)
1993 		return 0;
1994 
1995 	rc = __ibmvnic_open(netdev);
1996 	if (rc) {
1997 		rc = IBMVNIC_OPEN_FAILED;
1998 		goto out;
1999 	}
2000 
2001 	/* refresh device's multicast list */
2002 	ibmvnic_set_multi(netdev);
2003 
2004 	/* kick napi */
2005 	for (i = 0; i < adapter->req_rx_queues; i++)
2006 		napi_schedule(&adapter->napi[i]);
2007 
2008 out:
2009 	if (rc)
2010 		adapter->state = reset_state;
2011 	return rc;
2012 }
2013 
2014 /**
2015  * do_reset returns zero if we are able to keep processing reset events, or
2016  * non-zero if we hit a fatal error and must halt.
2017  */
2018 static int do_reset(struct ibmvnic_adapter *adapter,
2019 		    struct ibmvnic_rwi *rwi, u32 reset_state)
2020 {
2021 	u64 old_num_rx_queues, old_num_tx_queues;
2022 	u64 old_num_rx_slots, old_num_tx_slots;
2023 	struct net_device *netdev = adapter->netdev;
2024 	int i, rc;
2025 
2026 	netdev_dbg(adapter->netdev,
2027 		   "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
2028 		   adapter->state, adapter->failover_pending,
2029 		   rwi->reset_reason, reset_state);
2030 
2031 	rtnl_lock();
2032 	/*
2033 	 * Now that we have the rtnl lock, clear any pending failover.
2034 	 * This will ensure ibmvnic_open() has either completed or will
2035 	 * block until failover is complete.
2036 	 */
2037 	if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2038 		adapter->failover_pending = false;
2039 
2040 	netif_carrier_off(netdev);
2041 	adapter->reset_reason = rwi->reset_reason;
2042 
2043 	old_num_rx_queues = adapter->req_rx_queues;
2044 	old_num_tx_queues = adapter->req_tx_queues;
2045 	old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2046 	old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2047 
2048 	ibmvnic_cleanup(netdev);
2049 
2050 	if (reset_state == VNIC_OPEN &&
2051 	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
2052 	    adapter->reset_reason != VNIC_RESET_FAILOVER) {
2053 		adapter->state = VNIC_CLOSING;
2054 
2055 		/* Release the RTNL lock before link state change and
2056 		 * re-acquire after the link state change to allow
2057 		 * linkwatch_event to grab the RTNL lock and run during
2058 		 * a reset.
2059 		 */
2060 		rtnl_unlock();
2061 		rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2062 		rtnl_lock();
2063 		if (rc)
2064 			goto out;
2065 
2066 		if (adapter->state != VNIC_CLOSING) {
2067 			rc = -1;
2068 			goto out;
2069 		}
2070 
2071 		adapter->state = VNIC_CLOSED;
2072 	}
2073 
2074 	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2075 		/* remove the closed state so when we call open it appears
2076 		 * we are coming from the probed state.
2077 		 */
2078 		adapter->state = VNIC_PROBED;
2079 
2080 		if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2081 			rc = ibmvnic_reenable_crq_queue(adapter);
2082 			release_sub_crqs(adapter, 1);
2083 		} else {
2084 			rc = ibmvnic_reset_crq(adapter);
2085 			if (rc == H_CLOSED || rc == H_SUCCESS) {
2086 				rc = vio_enable_interrupts(adapter->vdev);
2087 				if (rc)
2088 					netdev_err(adapter->netdev,
2089 						   "Reset failed to enable interrupts. rc=%d\n",
2090 						   rc);
2091 			}
2092 		}
2093 
2094 		if (rc) {
2095 			netdev_err(adapter->netdev,
2096 				   "Reset couldn't initialize crq. rc=%d\n", rc);
2097 			goto out;
2098 		}
2099 
2100 		rc = ibmvnic_reset_init(adapter, true);
2101 		if (rc) {
2102 			rc = IBMVNIC_INIT_FAILED;
2103 			goto out;
2104 		}
2105 
2106 		/* If the adapter was in PROBE state prior to the reset,
2107 		 * exit here.
2108 		 */
2109 		if (reset_state == VNIC_PROBED) {
2110 			rc = 0;
2111 			goto out;
2112 		}
2113 
2114 		rc = ibmvnic_login(netdev);
2115 		if (rc) {
2116 			goto out;
2117 		}
2118 
2119 		if (adapter->req_rx_queues != old_num_rx_queues ||
2120 		    adapter->req_tx_queues != old_num_tx_queues ||
2121 		    adapter->req_rx_add_entries_per_subcrq !=
2122 		    old_num_rx_slots ||
2123 		    adapter->req_tx_entries_per_subcrq !=
2124 		    old_num_tx_slots ||
2125 		    !adapter->rx_pool ||
2126 		    !adapter->tso_pool ||
2127 		    !adapter->tx_pool) {
2128 			release_rx_pools(adapter);
2129 			release_tx_pools(adapter);
2130 			release_napi(adapter);
2131 			release_vpd_data(adapter);
2132 
2133 			rc = init_resources(adapter);
2134 			if (rc)
2135 				goto out;
2136 
2137 		} else {
2138 			rc = reset_tx_pools(adapter);
2139 			if (rc) {
2140 				netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2141 						rc);
2142 				goto out;
2143 			}
2144 
2145 			rc = reset_rx_pools(adapter);
2146 			if (rc) {
2147 				netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2148 						rc);
2149 				goto out;
2150 			}
2151 		}
2152 		ibmvnic_disable_irqs(adapter);
2153 	}
2154 	adapter->state = VNIC_CLOSED;
2155 
2156 	if (reset_state == VNIC_CLOSED) {
2157 		rc = 0;
2158 		goto out;
2159 	}
2160 
2161 	rc = __ibmvnic_open(netdev);
2162 	if (rc) {
2163 		rc = IBMVNIC_OPEN_FAILED;
2164 		goto out;
2165 	}
2166 
2167 	/* refresh device's multicast list */
2168 	ibmvnic_set_multi(netdev);
2169 
2170 	/* kick napi */
2171 	for (i = 0; i < adapter->req_rx_queues; i++)
2172 		napi_schedule(&adapter->napi[i]);
2173 
2174 	if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2175 	    adapter->reset_reason == VNIC_RESET_MOBILITY)
2176 		__netdev_notify_peers(netdev);
2177 
2178 	rc = 0;
2179 
2180 out:
2181 	/* restore the adapter state if reset failed */
2182 	if (rc)
2183 		adapter->state = reset_state;
2184 	rtnl_unlock();
2185 
2186 	netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
2187 		   adapter->state, adapter->failover_pending, rc);
2188 	return rc;
2189 }
2190 
2191 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2192 			 struct ibmvnic_rwi *rwi, u32 reset_state)
2193 {
2194 	struct net_device *netdev = adapter->netdev;
2195 	int rc;
2196 
2197 	netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2198 		   rwi->reset_reason);
2199 
2200 	netif_carrier_off(netdev);
2201 	adapter->reset_reason = rwi->reset_reason;
2202 
2203 	ibmvnic_cleanup(netdev);
2204 	release_resources(adapter);
2205 	release_sub_crqs(adapter, 0);
2206 	release_crq_queue(adapter);
2207 
2208 	/* remove the closed state so when we call open it appears
2209 	 * we are coming from the probed state.
2210 	 */
2211 	adapter->state = VNIC_PROBED;
2212 
2213 	reinit_completion(&adapter->init_done);
2214 	rc = init_crq_queue(adapter);
2215 	if (rc) {
2216 		netdev_err(adapter->netdev,
2217 			   "Couldn't initialize crq. rc=%d\n", rc);
2218 		goto out;
2219 	}
2220 
2221 	rc = ibmvnic_reset_init(adapter, false);
2222 	if (rc)
2223 		goto out;
2224 
2225 	/* If the adapter was in PROBE state prior to the reset,
2226 	 * exit here.
2227 	 */
2228 	if (reset_state == VNIC_PROBED)
2229 		goto out;
2230 
2231 	rc = ibmvnic_login(netdev);
2232 	if (rc)
2233 		goto out;
2234 
2235 	rc = init_resources(adapter);
2236 	if (rc)
2237 		goto out;
2238 
2239 	ibmvnic_disable_irqs(adapter);
2240 	adapter->state = VNIC_CLOSED;
2241 
2242 	if (reset_state == VNIC_CLOSED)
2243 		goto out;
2244 
2245 	rc = __ibmvnic_open(netdev);
2246 	if (rc) {
2247 		rc = IBMVNIC_OPEN_FAILED;
2248 		goto out;
2249 	}
2250 
2251 	__netdev_notify_peers(netdev);
2252 out:
2253 	/* restore adapter state if reset failed */
2254 	if (rc)
2255 		adapter->state = reset_state;
2256 	netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n",
2257 		   adapter->state, adapter->failover_pending, rc);
2258 	return rc;
2259 }
2260 
2261 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2262 {
2263 	struct ibmvnic_rwi *rwi;
2264 	unsigned long flags;
2265 
2266 	spin_lock_irqsave(&adapter->rwi_lock, flags);
2267 
2268 	if (!list_empty(&adapter->rwi_list)) {
2269 		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2270 				       list);
2271 		list_del(&rwi->list);
2272 	} else {
2273 		rwi = NULL;
2274 	}
2275 
2276 	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2277 	return rwi;
2278 }
2279 
2280 static void __ibmvnic_reset(struct work_struct *work)
2281 {
2282 	struct ibmvnic_rwi *rwi;
2283 	struct ibmvnic_adapter *adapter;
2284 	bool saved_state = false;
2285 	unsigned long flags;
2286 	u32 reset_state;
2287 	int rc = 0;
2288 
2289 	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2290 
2291 	if (test_and_set_bit_lock(0, &adapter->resetting)) {
2292 		schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2293 				      IBMVNIC_RESET_DELAY);
2294 		return;
2295 	}
2296 
2297 	rwi = get_next_rwi(adapter);
2298 	while (rwi) {
2299 		spin_lock_irqsave(&adapter->state_lock, flags);
2300 
2301 		if (adapter->state == VNIC_REMOVING ||
2302 		    adapter->state == VNIC_REMOVED) {
2303 			spin_unlock_irqrestore(&adapter->state_lock, flags);
2304 			kfree(rwi);
2305 			rc = EBUSY;
2306 			break;
2307 		}
2308 
2309 		if (!saved_state) {
2310 			reset_state = adapter->state;
2311 			saved_state = true;
2312 		}
2313 		spin_unlock_irqrestore(&adapter->state_lock, flags);
2314 
2315 		if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2316 			/* CHANGE_PARAM requestor holds rtnl_lock */
2317 			rc = do_change_param_reset(adapter, rwi, reset_state);
2318 		} else if (adapter->force_reset_recovery) {
2319 			/*
2320 			 * Since we are doing a hard reset now, clear the
2321 			 * failover_pending flag so we don't ignore any
2322 			 * future MOBILITY or other resets.
2323 			 */
2324 			adapter->failover_pending = false;
2325 
2326 			/* Transport event occurred during previous reset */
2327 			if (adapter->wait_for_reset) {
2328 				/* Previous was CHANGE_PARAM; caller locked */
2329 				adapter->force_reset_recovery = false;
2330 				rc = do_hard_reset(adapter, rwi, reset_state);
2331 			} else {
2332 				rtnl_lock();
2333 				adapter->force_reset_recovery = false;
2334 				rc = do_hard_reset(adapter, rwi, reset_state);
2335 				rtnl_unlock();
2336 			}
2337 			if (rc) {
2338 				/* give backing device time to settle down */
2339 				netdev_dbg(adapter->netdev,
2340 					   "[S:%d] Hard reset failed, waiting 60 secs\n",
2341 					   adapter->state);
2342 				set_current_state(TASK_UNINTERRUPTIBLE);
2343 				schedule_timeout(60 * HZ);
2344 			}
2345 		} else {
2346 			rc = do_reset(adapter, rwi, reset_state);
2347 		}
2348 		kfree(rwi);
2349 		adapter->last_reset_time = jiffies;
2350 
2351 		if (rc)
2352 			netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2353 
2354 		rwi = get_next_rwi(adapter);
2355 
2356 		if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2357 			    rwi->reset_reason == VNIC_RESET_MOBILITY))
2358 			adapter->force_reset_recovery = true;
2359 	}
2360 
2361 	if (adapter->wait_for_reset) {
2362 		adapter->reset_done_rc = rc;
2363 		complete(&adapter->reset_done);
2364 	}
2365 
2366 	clear_bit_unlock(0, &adapter->resetting);
2367 
2368 	netdev_dbg(adapter->netdev,
2369 		   "[S:%d FRR:%d WFR:%d] Done processing resets\n",
2370 		   adapter->state, adapter->force_reset_recovery,
2371 		   adapter->wait_for_reset);
2372 }
2373 
2374 static void __ibmvnic_delayed_reset(struct work_struct *work)
2375 {
2376 	struct ibmvnic_adapter *adapter;
2377 
2378 	adapter = container_of(work, struct ibmvnic_adapter,
2379 			       ibmvnic_delayed_reset.work);
2380 	__ibmvnic_reset(&adapter->ibmvnic_reset);
2381 }
2382 
2383 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2384 			 enum ibmvnic_reset_reason reason)
2385 {
2386 	struct list_head *entry, *tmp_entry;
2387 	struct ibmvnic_rwi *rwi, *tmp;
2388 	struct net_device *netdev = adapter->netdev;
2389 	unsigned long flags;
2390 	int ret;
2391 
2392 	/*
2393 	 * If failover is pending don't schedule any other reset.
2394 	 * Instead let the failover complete. If there is already a
2395 	 * a failover reset scheduled, we will detect and drop the
2396 	 * duplicate reset when walking the ->rwi_list below.
2397 	 */
2398 	if (adapter->state == VNIC_REMOVING ||
2399 	    adapter->state == VNIC_REMOVED ||
2400 	    (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2401 		ret = EBUSY;
2402 		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2403 		goto err;
2404 	}
2405 
2406 	if (adapter->state == VNIC_PROBING) {
2407 		netdev_warn(netdev, "Adapter reset during probe\n");
2408 		ret = adapter->init_done_rc = EAGAIN;
2409 		goto err;
2410 	}
2411 
2412 	spin_lock_irqsave(&adapter->rwi_lock, flags);
2413 
2414 	list_for_each(entry, &adapter->rwi_list) {
2415 		tmp = list_entry(entry, struct ibmvnic_rwi, list);
2416 		if (tmp->reset_reason == reason) {
2417 			netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
2418 				   reason);
2419 			spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2420 			ret = EBUSY;
2421 			goto err;
2422 		}
2423 	}
2424 
2425 	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2426 	if (!rwi) {
2427 		spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2428 		ibmvnic_close(netdev);
2429 		ret = ENOMEM;
2430 		goto err;
2431 	}
2432 	/* if we just received a transport event,
2433 	 * flush reset queue and process this reset
2434 	 */
2435 	if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2436 		list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2437 			list_del(entry);
2438 	}
2439 	rwi->reset_reason = reason;
2440 	list_add_tail(&rwi->list, &adapter->rwi_list);
2441 	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2442 	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2443 	schedule_work(&adapter->ibmvnic_reset);
2444 
2445 	return 0;
2446 err:
2447 	return -ret;
2448 }
2449 
2450 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2451 {
2452 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2453 
2454 	if (test_bit(0, &adapter->resetting)) {
2455 		netdev_err(adapter->netdev,
2456 			   "Adapter is resetting, skip timeout reset\n");
2457 		return;
2458 	}
2459 	/* No queuing up reset until at least 5 seconds (default watchdog val)
2460 	 * after last reset
2461 	 */
2462 	if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2463 		netdev_dbg(dev, "Not yet time to tx timeout.\n");
2464 		return;
2465 	}
2466 	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2467 }
2468 
2469 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2470 				  struct ibmvnic_rx_buff *rx_buff)
2471 {
2472 	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2473 
2474 	rx_buff->skb = NULL;
2475 
2476 	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2477 	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2478 
2479 	atomic_dec(&pool->available);
2480 }
2481 
2482 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2483 {
2484 	struct ibmvnic_sub_crq_queue *rx_scrq;
2485 	struct ibmvnic_adapter *adapter;
2486 	struct net_device *netdev;
2487 	int frames_processed;
2488 	int scrq_num;
2489 
2490 	netdev = napi->dev;
2491 	adapter = netdev_priv(netdev);
2492 	scrq_num = (int)(napi - adapter->napi);
2493 	frames_processed = 0;
2494 	rx_scrq = adapter->rx_scrq[scrq_num];
2495 
2496 restart_poll:
2497 	while (frames_processed < budget) {
2498 		struct sk_buff *skb;
2499 		struct ibmvnic_rx_buff *rx_buff;
2500 		union sub_crq *next;
2501 		u32 length;
2502 		u16 offset;
2503 		u8 flags = 0;
2504 
2505 		if (unlikely(test_bit(0, &adapter->resetting) &&
2506 			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2507 			enable_scrq_irq(adapter, rx_scrq);
2508 			napi_complete_done(napi, frames_processed);
2509 			return frames_processed;
2510 		}
2511 
2512 		if (!pending_scrq(adapter, rx_scrq))
2513 			break;
2514 		/* The queue entry at the current index is peeked at above
2515 		 * to determine that there is a valid descriptor awaiting
2516 		 * processing. We want to be sure that the current slot
2517 		 * holds a valid descriptor before reading its contents.
2518 		 */
2519 		dma_rmb();
2520 		next = ibmvnic_next_scrq(adapter, rx_scrq);
2521 		rx_buff =
2522 		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2523 							  rx_comp.correlator);
2524 		/* do error checking */
2525 		if (next->rx_comp.rc) {
2526 			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2527 				   be16_to_cpu(next->rx_comp.rc));
2528 			/* free the entry */
2529 			next->rx_comp.first = 0;
2530 			dev_kfree_skb_any(rx_buff->skb);
2531 			remove_buff_from_pool(adapter, rx_buff);
2532 			continue;
2533 		} else if (!rx_buff->skb) {
2534 			/* free the entry */
2535 			next->rx_comp.first = 0;
2536 			remove_buff_from_pool(adapter, rx_buff);
2537 			continue;
2538 		}
2539 
2540 		length = be32_to_cpu(next->rx_comp.len);
2541 		offset = be16_to_cpu(next->rx_comp.off_frame_data);
2542 		flags = next->rx_comp.flags;
2543 		skb = rx_buff->skb;
2544 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
2545 					length);
2546 
2547 		/* VLAN Header has been stripped by the system firmware and
2548 		 * needs to be inserted by the driver
2549 		 */
2550 		if (adapter->rx_vlan_header_insertion &&
2551 		    (flags & IBMVNIC_VLAN_STRIPPED))
2552 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2553 					       ntohs(next->rx_comp.vlan_tci));
2554 
2555 		/* free the entry */
2556 		next->rx_comp.first = 0;
2557 		remove_buff_from_pool(adapter, rx_buff);
2558 
2559 		skb_put(skb, length);
2560 		skb->protocol = eth_type_trans(skb, netdev);
2561 		skb_record_rx_queue(skb, scrq_num);
2562 
2563 		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2564 		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2565 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2566 		}
2567 
2568 		length = skb->len;
2569 		napi_gro_receive(napi, skb); /* send it up */
2570 		netdev->stats.rx_packets++;
2571 		netdev->stats.rx_bytes += length;
2572 		adapter->rx_stats_buffers[scrq_num].packets++;
2573 		adapter->rx_stats_buffers[scrq_num].bytes += length;
2574 		frames_processed++;
2575 	}
2576 
2577 	if (adapter->state != VNIC_CLOSING &&
2578 	    ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2579 	      adapter->req_rx_add_entries_per_subcrq / 2) ||
2580 	      frames_processed < budget))
2581 		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2582 	if (frames_processed < budget) {
2583 		if (napi_complete_done(napi, frames_processed)) {
2584 			enable_scrq_irq(adapter, rx_scrq);
2585 			if (pending_scrq(adapter, rx_scrq)) {
2586 				rmb();
2587 				if (napi_reschedule(napi)) {
2588 					disable_scrq_irq(adapter, rx_scrq);
2589 					goto restart_poll;
2590 				}
2591 			}
2592 		}
2593 	}
2594 	return frames_processed;
2595 }
2596 
2597 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2598 {
2599 	int rc, ret;
2600 
2601 	adapter->fallback.mtu = adapter->req_mtu;
2602 	adapter->fallback.rx_queues = adapter->req_rx_queues;
2603 	adapter->fallback.tx_queues = adapter->req_tx_queues;
2604 	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2605 	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2606 
2607 	reinit_completion(&adapter->reset_done);
2608 	adapter->wait_for_reset = true;
2609 	rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2610 
2611 	if (rc) {
2612 		ret = rc;
2613 		goto out;
2614 	}
2615 	rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2616 	if (rc) {
2617 		ret = -ENODEV;
2618 		goto out;
2619 	}
2620 
2621 	ret = 0;
2622 	if (adapter->reset_done_rc) {
2623 		ret = -EIO;
2624 		adapter->desired.mtu = adapter->fallback.mtu;
2625 		adapter->desired.rx_queues = adapter->fallback.rx_queues;
2626 		adapter->desired.tx_queues = adapter->fallback.tx_queues;
2627 		adapter->desired.rx_entries = adapter->fallback.rx_entries;
2628 		adapter->desired.tx_entries = adapter->fallback.tx_entries;
2629 
2630 		reinit_completion(&adapter->reset_done);
2631 		adapter->wait_for_reset = true;
2632 		rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2633 		if (rc) {
2634 			ret = rc;
2635 			goto out;
2636 		}
2637 		rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2638 						 60000);
2639 		if (rc) {
2640 			ret = -ENODEV;
2641 			goto out;
2642 		}
2643 	}
2644 out:
2645 	adapter->wait_for_reset = false;
2646 
2647 	return ret;
2648 }
2649 
2650 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2651 {
2652 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2653 
2654 	adapter->desired.mtu = new_mtu + ETH_HLEN;
2655 
2656 	return wait_for_reset(adapter);
2657 }
2658 
2659 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2660 						struct net_device *dev,
2661 						netdev_features_t features)
2662 {
2663 	/* Some backing hardware adapters can not
2664 	 * handle packets with a MSS less than 224
2665 	 * or with only one segment.
2666 	 */
2667 	if (skb_is_gso(skb)) {
2668 		if (skb_shinfo(skb)->gso_size < 224 ||
2669 		    skb_shinfo(skb)->gso_segs == 1)
2670 			features &= ~NETIF_F_GSO_MASK;
2671 	}
2672 
2673 	return features;
2674 }
2675 
2676 static const struct net_device_ops ibmvnic_netdev_ops = {
2677 	.ndo_open		= ibmvnic_open,
2678 	.ndo_stop		= ibmvnic_close,
2679 	.ndo_start_xmit		= ibmvnic_xmit,
2680 	.ndo_set_rx_mode	= ibmvnic_set_multi,
2681 	.ndo_set_mac_address	= ibmvnic_set_mac,
2682 	.ndo_validate_addr	= eth_validate_addr,
2683 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
2684 	.ndo_change_mtu		= ibmvnic_change_mtu,
2685 	.ndo_features_check     = ibmvnic_features_check,
2686 };
2687 
2688 /* ethtool functions */
2689 
2690 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2691 				      struct ethtool_link_ksettings *cmd)
2692 {
2693 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2694 	int rc;
2695 
2696 	rc = send_query_phys_parms(adapter);
2697 	if (rc) {
2698 		adapter->speed = SPEED_UNKNOWN;
2699 		adapter->duplex = DUPLEX_UNKNOWN;
2700 	}
2701 	cmd->base.speed = adapter->speed;
2702 	cmd->base.duplex = adapter->duplex;
2703 	cmd->base.port = PORT_FIBRE;
2704 	cmd->base.phy_address = 0;
2705 	cmd->base.autoneg = AUTONEG_ENABLE;
2706 
2707 	return 0;
2708 }
2709 
2710 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2711 				struct ethtool_drvinfo *info)
2712 {
2713 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2714 
2715 	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2716 	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2717 	strlcpy(info->fw_version, adapter->fw_version,
2718 		sizeof(info->fw_version));
2719 }
2720 
2721 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2722 {
2723 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2724 
2725 	return adapter->msg_enable;
2726 }
2727 
2728 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2729 {
2730 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2731 
2732 	adapter->msg_enable = data;
2733 }
2734 
2735 static u32 ibmvnic_get_link(struct net_device *netdev)
2736 {
2737 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2738 
2739 	/* Don't need to send a query because we request a logical link up at
2740 	 * init and then we wait for link state indications
2741 	 */
2742 	return adapter->logical_link_state;
2743 }
2744 
2745 static void ibmvnic_get_ringparam(struct net_device *netdev,
2746 				  struct ethtool_ringparam *ring)
2747 {
2748 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2749 
2750 	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2751 		ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2752 		ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2753 	} else {
2754 		ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2755 		ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2756 	}
2757 	ring->rx_mini_max_pending = 0;
2758 	ring->rx_jumbo_max_pending = 0;
2759 	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2760 	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2761 	ring->rx_mini_pending = 0;
2762 	ring->rx_jumbo_pending = 0;
2763 }
2764 
2765 static int ibmvnic_set_ringparam(struct net_device *netdev,
2766 				 struct ethtool_ringparam *ring)
2767 {
2768 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2769 	int ret;
2770 
2771 	ret = 0;
2772 	adapter->desired.rx_entries = ring->rx_pending;
2773 	adapter->desired.tx_entries = ring->tx_pending;
2774 
2775 	ret = wait_for_reset(adapter);
2776 
2777 	if (!ret &&
2778 	    (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2779 	     adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2780 		netdev_info(netdev,
2781 			    "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2782 			    ring->rx_pending, ring->tx_pending,
2783 			    adapter->req_rx_add_entries_per_subcrq,
2784 			    adapter->req_tx_entries_per_subcrq);
2785 	return ret;
2786 }
2787 
2788 static void ibmvnic_get_channels(struct net_device *netdev,
2789 				 struct ethtool_channels *channels)
2790 {
2791 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2792 
2793 	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2794 		channels->max_rx = adapter->max_rx_queues;
2795 		channels->max_tx = adapter->max_tx_queues;
2796 	} else {
2797 		channels->max_rx = IBMVNIC_MAX_QUEUES;
2798 		channels->max_tx = IBMVNIC_MAX_QUEUES;
2799 	}
2800 
2801 	channels->max_other = 0;
2802 	channels->max_combined = 0;
2803 	channels->rx_count = adapter->req_rx_queues;
2804 	channels->tx_count = adapter->req_tx_queues;
2805 	channels->other_count = 0;
2806 	channels->combined_count = 0;
2807 }
2808 
2809 static int ibmvnic_set_channels(struct net_device *netdev,
2810 				struct ethtool_channels *channels)
2811 {
2812 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2813 	int ret;
2814 
2815 	ret = 0;
2816 	adapter->desired.rx_queues = channels->rx_count;
2817 	adapter->desired.tx_queues = channels->tx_count;
2818 
2819 	ret = wait_for_reset(adapter);
2820 
2821 	if (!ret &&
2822 	    (adapter->req_rx_queues != channels->rx_count ||
2823 	     adapter->req_tx_queues != channels->tx_count))
2824 		netdev_info(netdev,
2825 			    "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2826 			    channels->rx_count, channels->tx_count,
2827 			    adapter->req_rx_queues, adapter->req_tx_queues);
2828 	return ret;
2829 
2830 }
2831 
2832 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2833 {
2834 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2835 	int i;
2836 
2837 	switch (stringset) {
2838 	case ETH_SS_STATS:
2839 		for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2840 				i++, data += ETH_GSTRING_LEN)
2841 			memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2842 
2843 		for (i = 0; i < adapter->req_tx_queues; i++) {
2844 			snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2845 			data += ETH_GSTRING_LEN;
2846 
2847 			snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2848 			data += ETH_GSTRING_LEN;
2849 
2850 			snprintf(data, ETH_GSTRING_LEN,
2851 				 "tx%d_dropped_packets", i);
2852 			data += ETH_GSTRING_LEN;
2853 		}
2854 
2855 		for (i = 0; i < adapter->req_rx_queues; i++) {
2856 			snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2857 			data += ETH_GSTRING_LEN;
2858 
2859 			snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2860 			data += ETH_GSTRING_LEN;
2861 
2862 			snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2863 			data += ETH_GSTRING_LEN;
2864 		}
2865 		break;
2866 
2867 	case ETH_SS_PRIV_FLAGS:
2868 		for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2869 			strcpy(data + i * ETH_GSTRING_LEN,
2870 			       ibmvnic_priv_flags[i]);
2871 		break;
2872 	default:
2873 		return;
2874 	}
2875 }
2876 
2877 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2878 {
2879 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2880 
2881 	switch (sset) {
2882 	case ETH_SS_STATS:
2883 		return ARRAY_SIZE(ibmvnic_stats) +
2884 		       adapter->req_tx_queues * NUM_TX_STATS +
2885 		       adapter->req_rx_queues * NUM_RX_STATS;
2886 	case ETH_SS_PRIV_FLAGS:
2887 		return ARRAY_SIZE(ibmvnic_priv_flags);
2888 	default:
2889 		return -EOPNOTSUPP;
2890 	}
2891 }
2892 
2893 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2894 				      struct ethtool_stats *stats, u64 *data)
2895 {
2896 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2897 	union ibmvnic_crq crq;
2898 	int i, j;
2899 	int rc;
2900 
2901 	memset(&crq, 0, sizeof(crq));
2902 	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2903 	crq.request_statistics.cmd = REQUEST_STATISTICS;
2904 	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2905 	crq.request_statistics.len =
2906 	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
2907 
2908 	/* Wait for data to be written */
2909 	reinit_completion(&adapter->stats_done);
2910 	rc = ibmvnic_send_crq(adapter, &crq);
2911 	if (rc)
2912 		return;
2913 	rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2914 	if (rc)
2915 		return;
2916 
2917 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2918 		data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2919 						ibmvnic_stats[i].offset));
2920 
2921 	for (j = 0; j < adapter->req_tx_queues; j++) {
2922 		data[i] = adapter->tx_stats_buffers[j].packets;
2923 		i++;
2924 		data[i] = adapter->tx_stats_buffers[j].bytes;
2925 		i++;
2926 		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2927 		i++;
2928 	}
2929 
2930 	for (j = 0; j < adapter->req_rx_queues; j++) {
2931 		data[i] = adapter->rx_stats_buffers[j].packets;
2932 		i++;
2933 		data[i] = adapter->rx_stats_buffers[j].bytes;
2934 		i++;
2935 		data[i] = adapter->rx_stats_buffers[j].interrupts;
2936 		i++;
2937 	}
2938 }
2939 
2940 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2941 {
2942 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2943 
2944 	return adapter->priv_flags;
2945 }
2946 
2947 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2948 {
2949 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2950 	bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2951 
2952 	if (which_maxes)
2953 		adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2954 	else
2955 		adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2956 
2957 	return 0;
2958 }
2959 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2960 	.get_drvinfo		= ibmvnic_get_drvinfo,
2961 	.get_msglevel		= ibmvnic_get_msglevel,
2962 	.set_msglevel		= ibmvnic_set_msglevel,
2963 	.get_link		= ibmvnic_get_link,
2964 	.get_ringparam		= ibmvnic_get_ringparam,
2965 	.set_ringparam		= ibmvnic_set_ringparam,
2966 	.get_channels		= ibmvnic_get_channels,
2967 	.set_channels		= ibmvnic_set_channels,
2968 	.get_strings            = ibmvnic_get_strings,
2969 	.get_sset_count         = ibmvnic_get_sset_count,
2970 	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
2971 	.get_link_ksettings	= ibmvnic_get_link_ksettings,
2972 	.get_priv_flags		= ibmvnic_get_priv_flags,
2973 	.set_priv_flags		= ibmvnic_set_priv_flags,
2974 };
2975 
2976 /* Routines for managing CRQs/sCRQs  */
2977 
2978 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2979 				   struct ibmvnic_sub_crq_queue *scrq)
2980 {
2981 	int rc;
2982 
2983 	if (!scrq) {
2984 		netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
2985 		return -EINVAL;
2986 	}
2987 
2988 	if (scrq->irq) {
2989 		free_irq(scrq->irq, scrq);
2990 		irq_dispose_mapping(scrq->irq);
2991 		scrq->irq = 0;
2992 	}
2993 
2994 	if (scrq->msgs) {
2995 		memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2996 		atomic_set(&scrq->used, 0);
2997 		scrq->cur = 0;
2998 		scrq->ind_buf.index = 0;
2999 	} else {
3000 		netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3001 		return -EINVAL;
3002 	}
3003 
3004 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3005 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3006 	return rc;
3007 }
3008 
3009 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3010 {
3011 	int i, rc;
3012 
3013 	if (!adapter->tx_scrq || !adapter->rx_scrq)
3014 		return -EINVAL;
3015 
3016 	for (i = 0; i < adapter->req_tx_queues; i++) {
3017 		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3018 		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3019 		if (rc)
3020 			return rc;
3021 	}
3022 
3023 	for (i = 0; i < adapter->req_rx_queues; i++) {
3024 		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3025 		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3026 		if (rc)
3027 			return rc;
3028 	}
3029 
3030 	return rc;
3031 }
3032 
3033 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3034 				  struct ibmvnic_sub_crq_queue *scrq,
3035 				  bool do_h_free)
3036 {
3037 	struct device *dev = &adapter->vdev->dev;
3038 	long rc;
3039 
3040 	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3041 
3042 	if (do_h_free) {
3043 		/* Close the sub-crqs */
3044 		do {
3045 			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3046 						adapter->vdev->unit_address,
3047 						scrq->crq_num);
3048 		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3049 
3050 		if (rc) {
3051 			netdev_err(adapter->netdev,
3052 				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
3053 				   scrq->crq_num, rc);
3054 		}
3055 	}
3056 
3057 	dma_free_coherent(dev,
3058 			  IBMVNIC_IND_ARR_SZ,
3059 			  scrq->ind_buf.indir_arr,
3060 			  scrq->ind_buf.indir_dma);
3061 
3062 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3063 			 DMA_BIDIRECTIONAL);
3064 	free_pages((unsigned long)scrq->msgs, 2);
3065 	kfree(scrq);
3066 }
3067 
3068 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3069 							*adapter)
3070 {
3071 	struct device *dev = &adapter->vdev->dev;
3072 	struct ibmvnic_sub_crq_queue *scrq;
3073 	int rc;
3074 
3075 	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3076 	if (!scrq)
3077 		return NULL;
3078 
3079 	scrq->msgs =
3080 		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3081 	if (!scrq->msgs) {
3082 		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3083 		goto zero_page_failed;
3084 	}
3085 
3086 	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3087 					 DMA_BIDIRECTIONAL);
3088 	if (dma_mapping_error(dev, scrq->msg_token)) {
3089 		dev_warn(dev, "Couldn't map crq queue messages page\n");
3090 		goto map_failed;
3091 	}
3092 
3093 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3094 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3095 
3096 	if (rc == H_RESOURCE)
3097 		rc = ibmvnic_reset_crq(adapter);
3098 
3099 	if (rc == H_CLOSED) {
3100 		dev_warn(dev, "Partner adapter not ready, waiting.\n");
3101 	} else if (rc) {
3102 		dev_warn(dev, "Error %d registering sub-crq\n", rc);
3103 		goto reg_failed;
3104 	}
3105 
3106 	scrq->adapter = adapter;
3107 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3108 	scrq->ind_buf.index = 0;
3109 
3110 	scrq->ind_buf.indir_arr =
3111 		dma_alloc_coherent(dev,
3112 				   IBMVNIC_IND_ARR_SZ,
3113 				   &scrq->ind_buf.indir_dma,
3114 				   GFP_KERNEL);
3115 
3116 	if (!scrq->ind_buf.indir_arr)
3117 		goto indir_failed;
3118 
3119 	spin_lock_init(&scrq->lock);
3120 
3121 	netdev_dbg(adapter->netdev,
3122 		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3123 		   scrq->crq_num, scrq->hw_irq, scrq->irq);
3124 
3125 	return scrq;
3126 
3127 indir_failed:
3128 	do {
3129 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3130 					adapter->vdev->unit_address,
3131 					scrq->crq_num);
3132 	} while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3133 reg_failed:
3134 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3135 			 DMA_BIDIRECTIONAL);
3136 map_failed:
3137 	free_pages((unsigned long)scrq->msgs, 2);
3138 zero_page_failed:
3139 	kfree(scrq);
3140 
3141 	return NULL;
3142 }
3143 
3144 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3145 {
3146 	int i;
3147 
3148 	if (adapter->tx_scrq) {
3149 		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3150 			if (!adapter->tx_scrq[i])
3151 				continue;
3152 
3153 			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3154 				   i);
3155 			if (adapter->tx_scrq[i]->irq) {
3156 				free_irq(adapter->tx_scrq[i]->irq,
3157 					 adapter->tx_scrq[i]);
3158 				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3159 				adapter->tx_scrq[i]->irq = 0;
3160 			}
3161 
3162 			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3163 					      do_h_free);
3164 		}
3165 
3166 		kfree(adapter->tx_scrq);
3167 		adapter->tx_scrq = NULL;
3168 		adapter->num_active_tx_scrqs = 0;
3169 	}
3170 
3171 	if (adapter->rx_scrq) {
3172 		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3173 			if (!adapter->rx_scrq[i])
3174 				continue;
3175 
3176 			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3177 				   i);
3178 			if (adapter->rx_scrq[i]->irq) {
3179 				free_irq(adapter->rx_scrq[i]->irq,
3180 					 adapter->rx_scrq[i]);
3181 				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3182 				adapter->rx_scrq[i]->irq = 0;
3183 			}
3184 
3185 			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3186 					      do_h_free);
3187 		}
3188 
3189 		kfree(adapter->rx_scrq);
3190 		adapter->rx_scrq = NULL;
3191 		adapter->num_active_rx_scrqs = 0;
3192 	}
3193 }
3194 
3195 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3196 			    struct ibmvnic_sub_crq_queue *scrq)
3197 {
3198 	struct device *dev = &adapter->vdev->dev;
3199 	unsigned long rc;
3200 
3201 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3202 				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3203 	if (rc)
3204 		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3205 			scrq->hw_irq, rc);
3206 	return rc;
3207 }
3208 
3209 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3210 			   struct ibmvnic_sub_crq_queue *scrq)
3211 {
3212 	struct device *dev = &adapter->vdev->dev;
3213 	unsigned long rc;
3214 
3215 	if (scrq->hw_irq > 0x100000000ULL) {
3216 		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3217 		return 1;
3218 	}
3219 
3220 	if (test_bit(0, &adapter->resetting) &&
3221 	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
3222 		u64 val = (0xff000000) | scrq->hw_irq;
3223 
3224 		rc = plpar_hcall_norets(H_EOI, val);
3225 		/* H_EOI would fail with rc = H_FUNCTION when running
3226 		 * in XIVE mode which is expected, but not an error.
3227 		 */
3228 		if (rc && (rc != H_FUNCTION))
3229 			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3230 				val, rc);
3231 	}
3232 
3233 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3234 				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3235 	if (rc)
3236 		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3237 			scrq->hw_irq, rc);
3238 	return rc;
3239 }
3240 
3241 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3242 			       struct ibmvnic_sub_crq_queue *scrq)
3243 {
3244 	struct device *dev = &adapter->vdev->dev;
3245 	struct ibmvnic_tx_pool *tx_pool;
3246 	struct ibmvnic_tx_buff *txbuff;
3247 	struct netdev_queue *txq;
3248 	union sub_crq *next;
3249 	int index;
3250 	int i;
3251 
3252 restart_loop:
3253 	while (pending_scrq(adapter, scrq)) {
3254 		unsigned int pool = scrq->pool_index;
3255 		int num_entries = 0;
3256 		int total_bytes = 0;
3257 		int num_packets = 0;
3258 
3259 		/* The queue entry at the current index is peeked at above
3260 		 * to determine that there is a valid descriptor awaiting
3261 		 * processing. We want to be sure that the current slot
3262 		 * holds a valid descriptor before reading its contents.
3263 		 */
3264 		dma_rmb();
3265 
3266 		next = ibmvnic_next_scrq(adapter, scrq);
3267 		for (i = 0; i < next->tx_comp.num_comps; i++) {
3268 			if (next->tx_comp.rcs[i])
3269 				dev_err(dev, "tx error %x\n",
3270 					next->tx_comp.rcs[i]);
3271 			index = be32_to_cpu(next->tx_comp.correlators[i]);
3272 			if (index & IBMVNIC_TSO_POOL_MASK) {
3273 				tx_pool = &adapter->tso_pool[pool];
3274 				index &= ~IBMVNIC_TSO_POOL_MASK;
3275 			} else {
3276 				tx_pool = &adapter->tx_pool[pool];
3277 			}
3278 
3279 			txbuff = &tx_pool->tx_buff[index];
3280 			num_packets++;
3281 			num_entries += txbuff->num_entries;
3282 			if (txbuff->skb) {
3283 				total_bytes += txbuff->skb->len;
3284 				dev_consume_skb_irq(txbuff->skb);
3285 				txbuff->skb = NULL;
3286 			} else {
3287 				netdev_warn(adapter->netdev,
3288 					    "TX completion received with NULL socket buffer\n");
3289 			}
3290 			tx_pool->free_map[tx_pool->producer_index] = index;
3291 			tx_pool->producer_index =
3292 				(tx_pool->producer_index + 1) %
3293 					tx_pool->num_buffers;
3294 		}
3295 		/* remove tx_comp scrq*/
3296 		next->tx_comp.first = 0;
3297 
3298 		txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3299 		netdev_tx_completed_queue(txq, num_packets, total_bytes);
3300 
3301 		if (atomic_sub_return(num_entries, &scrq->used) <=
3302 		    (adapter->req_tx_entries_per_subcrq / 2) &&
3303 		    __netif_subqueue_stopped(adapter->netdev,
3304 					     scrq->pool_index)) {
3305 			netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3306 			netdev_dbg(adapter->netdev, "Started queue %d\n",
3307 				   scrq->pool_index);
3308 		}
3309 	}
3310 
3311 	enable_scrq_irq(adapter, scrq);
3312 
3313 	if (pending_scrq(adapter, scrq)) {
3314 		disable_scrq_irq(adapter, scrq);
3315 		goto restart_loop;
3316 	}
3317 
3318 	return 0;
3319 }
3320 
3321 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3322 {
3323 	struct ibmvnic_sub_crq_queue *scrq = instance;
3324 	struct ibmvnic_adapter *adapter = scrq->adapter;
3325 
3326 	disable_scrq_irq(adapter, scrq);
3327 	ibmvnic_complete_tx(adapter, scrq);
3328 
3329 	return IRQ_HANDLED;
3330 }
3331 
3332 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3333 {
3334 	struct ibmvnic_sub_crq_queue *scrq = instance;
3335 	struct ibmvnic_adapter *adapter = scrq->adapter;
3336 
3337 	/* When booting a kdump kernel we can hit pending interrupts
3338 	 * prior to completing driver initialization.
3339 	 */
3340 	if (unlikely(adapter->state != VNIC_OPEN))
3341 		return IRQ_NONE;
3342 
3343 	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3344 
3345 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3346 		disable_scrq_irq(adapter, scrq);
3347 		__napi_schedule(&adapter->napi[scrq->scrq_num]);
3348 	}
3349 
3350 	return IRQ_HANDLED;
3351 }
3352 
3353 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3354 {
3355 	struct device *dev = &adapter->vdev->dev;
3356 	struct ibmvnic_sub_crq_queue *scrq;
3357 	int i = 0, j = 0;
3358 	int rc = 0;
3359 
3360 	for (i = 0; i < adapter->req_tx_queues; i++) {
3361 		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3362 			   i);
3363 		scrq = adapter->tx_scrq[i];
3364 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3365 
3366 		if (!scrq->irq) {
3367 			rc = -EINVAL;
3368 			dev_err(dev, "Error mapping irq\n");
3369 			goto req_tx_irq_failed;
3370 		}
3371 
3372 		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3373 			 adapter->vdev->unit_address, i);
3374 		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3375 				 0, scrq->name, scrq);
3376 
3377 		if (rc) {
3378 			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3379 				scrq->irq, rc);
3380 			irq_dispose_mapping(scrq->irq);
3381 			goto req_tx_irq_failed;
3382 		}
3383 	}
3384 
3385 	for (i = 0; i < adapter->req_rx_queues; i++) {
3386 		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3387 			   i);
3388 		scrq = adapter->rx_scrq[i];
3389 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3390 		if (!scrq->irq) {
3391 			rc = -EINVAL;
3392 			dev_err(dev, "Error mapping irq\n");
3393 			goto req_rx_irq_failed;
3394 		}
3395 		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3396 			 adapter->vdev->unit_address, i);
3397 		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3398 				 0, scrq->name, scrq);
3399 		if (rc) {
3400 			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3401 				scrq->irq, rc);
3402 			irq_dispose_mapping(scrq->irq);
3403 			goto req_rx_irq_failed;
3404 		}
3405 	}
3406 	return rc;
3407 
3408 req_rx_irq_failed:
3409 	for (j = 0; j < i; j++) {
3410 		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3411 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3412 	}
3413 	i = adapter->req_tx_queues;
3414 req_tx_irq_failed:
3415 	for (j = 0; j < i; j++) {
3416 		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3417 		irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3418 	}
3419 	release_sub_crqs(adapter, 1);
3420 	return rc;
3421 }
3422 
3423 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3424 {
3425 	struct device *dev = &adapter->vdev->dev;
3426 	struct ibmvnic_sub_crq_queue **allqueues;
3427 	int registered_queues = 0;
3428 	int total_queues;
3429 	int more = 0;
3430 	int i;
3431 
3432 	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3433 
3434 	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3435 	if (!allqueues)
3436 		return -1;
3437 
3438 	for (i = 0; i < total_queues; i++) {
3439 		allqueues[i] = init_sub_crq_queue(adapter);
3440 		if (!allqueues[i]) {
3441 			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3442 			break;
3443 		}
3444 		registered_queues++;
3445 	}
3446 
3447 	/* Make sure we were able to register the minimum number of queues */
3448 	if (registered_queues <
3449 	    adapter->min_tx_queues + adapter->min_rx_queues) {
3450 		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
3451 		goto tx_failed;
3452 	}
3453 
3454 	/* Distribute the failed allocated queues*/
3455 	for (i = 0; i < total_queues - registered_queues + more ; i++) {
3456 		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3457 		switch (i % 3) {
3458 		case 0:
3459 			if (adapter->req_rx_queues > adapter->min_rx_queues)
3460 				adapter->req_rx_queues--;
3461 			else
3462 				more++;
3463 			break;
3464 		case 1:
3465 			if (adapter->req_tx_queues > adapter->min_tx_queues)
3466 				adapter->req_tx_queues--;
3467 			else
3468 				more++;
3469 			break;
3470 		}
3471 	}
3472 
3473 	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3474 				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
3475 	if (!adapter->tx_scrq)
3476 		goto tx_failed;
3477 
3478 	for (i = 0; i < adapter->req_tx_queues; i++) {
3479 		adapter->tx_scrq[i] = allqueues[i];
3480 		adapter->tx_scrq[i]->pool_index = i;
3481 		adapter->num_active_tx_scrqs++;
3482 	}
3483 
3484 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3485 				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
3486 	if (!adapter->rx_scrq)
3487 		goto rx_failed;
3488 
3489 	for (i = 0; i < adapter->req_rx_queues; i++) {
3490 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3491 		adapter->rx_scrq[i]->scrq_num = i;
3492 		adapter->num_active_rx_scrqs++;
3493 	}
3494 
3495 	kfree(allqueues);
3496 	return 0;
3497 
3498 rx_failed:
3499 	kfree(adapter->tx_scrq);
3500 	adapter->tx_scrq = NULL;
3501 tx_failed:
3502 	for (i = 0; i < registered_queues; i++)
3503 		release_sub_crq_queue(adapter, allqueues[i], 1);
3504 	kfree(allqueues);
3505 	return -1;
3506 }
3507 
3508 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3509 {
3510 	struct device *dev = &adapter->vdev->dev;
3511 	union ibmvnic_crq crq;
3512 	int max_entries;
3513 
3514 	if (!retry) {
3515 		/* Sub-CRQ entries are 32 byte long */
3516 		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3517 
3518 		if (adapter->min_tx_entries_per_subcrq > entries_page ||
3519 		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
3520 			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3521 			return;
3522 		}
3523 
3524 		if (adapter->desired.mtu)
3525 			adapter->req_mtu = adapter->desired.mtu;
3526 		else
3527 			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3528 
3529 		if (!adapter->desired.tx_entries)
3530 			adapter->desired.tx_entries =
3531 					adapter->max_tx_entries_per_subcrq;
3532 		if (!adapter->desired.rx_entries)
3533 			adapter->desired.rx_entries =
3534 					adapter->max_rx_add_entries_per_subcrq;
3535 
3536 		max_entries = IBMVNIC_MAX_LTB_SIZE /
3537 			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3538 
3539 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3540 			adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3541 			adapter->desired.tx_entries = max_entries;
3542 		}
3543 
3544 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3545 			adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3546 			adapter->desired.rx_entries = max_entries;
3547 		}
3548 
3549 		if (adapter->desired.tx_entries)
3550 			adapter->req_tx_entries_per_subcrq =
3551 					adapter->desired.tx_entries;
3552 		else
3553 			adapter->req_tx_entries_per_subcrq =
3554 					adapter->max_tx_entries_per_subcrq;
3555 
3556 		if (adapter->desired.rx_entries)
3557 			adapter->req_rx_add_entries_per_subcrq =
3558 					adapter->desired.rx_entries;
3559 		else
3560 			adapter->req_rx_add_entries_per_subcrq =
3561 					adapter->max_rx_add_entries_per_subcrq;
3562 
3563 		if (adapter->desired.tx_queues)
3564 			adapter->req_tx_queues =
3565 					adapter->desired.tx_queues;
3566 		else
3567 			adapter->req_tx_queues =
3568 					adapter->opt_tx_comp_sub_queues;
3569 
3570 		if (adapter->desired.rx_queues)
3571 			adapter->req_rx_queues =
3572 					adapter->desired.rx_queues;
3573 		else
3574 			adapter->req_rx_queues =
3575 					adapter->opt_rx_comp_queues;
3576 
3577 		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3578 	}
3579 
3580 	memset(&crq, 0, sizeof(crq));
3581 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
3582 	crq.request_capability.cmd = REQUEST_CAPABILITY;
3583 
3584 	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3585 	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3586 	atomic_inc(&adapter->running_cap_crqs);
3587 	ibmvnic_send_crq(adapter, &crq);
3588 
3589 	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3590 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3591 	atomic_inc(&adapter->running_cap_crqs);
3592 	ibmvnic_send_crq(adapter, &crq);
3593 
3594 	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3595 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3596 	atomic_inc(&adapter->running_cap_crqs);
3597 	ibmvnic_send_crq(adapter, &crq);
3598 
3599 	crq.request_capability.capability =
3600 	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3601 	crq.request_capability.number =
3602 	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3603 	atomic_inc(&adapter->running_cap_crqs);
3604 	ibmvnic_send_crq(adapter, &crq);
3605 
3606 	crq.request_capability.capability =
3607 	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3608 	crq.request_capability.number =
3609 	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3610 	atomic_inc(&adapter->running_cap_crqs);
3611 	ibmvnic_send_crq(adapter, &crq);
3612 
3613 	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3614 	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3615 	atomic_inc(&adapter->running_cap_crqs);
3616 	ibmvnic_send_crq(adapter, &crq);
3617 
3618 	if (adapter->netdev->flags & IFF_PROMISC) {
3619 		if (adapter->promisc_supported) {
3620 			crq.request_capability.capability =
3621 			    cpu_to_be16(PROMISC_REQUESTED);
3622 			crq.request_capability.number = cpu_to_be64(1);
3623 			atomic_inc(&adapter->running_cap_crqs);
3624 			ibmvnic_send_crq(adapter, &crq);
3625 		}
3626 	} else {
3627 		crq.request_capability.capability =
3628 		    cpu_to_be16(PROMISC_REQUESTED);
3629 		crq.request_capability.number = cpu_to_be64(0);
3630 		atomic_inc(&adapter->running_cap_crqs);
3631 		ibmvnic_send_crq(adapter, &crq);
3632 	}
3633 }
3634 
3635 static int pending_scrq(struct ibmvnic_adapter *adapter,
3636 			struct ibmvnic_sub_crq_queue *scrq)
3637 {
3638 	union sub_crq *entry = &scrq->msgs[scrq->cur];
3639 
3640 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3641 		return 1;
3642 	else
3643 		return 0;
3644 }
3645 
3646 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3647 					struct ibmvnic_sub_crq_queue *scrq)
3648 {
3649 	union sub_crq *entry;
3650 	unsigned long flags;
3651 
3652 	spin_lock_irqsave(&scrq->lock, flags);
3653 	entry = &scrq->msgs[scrq->cur];
3654 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3655 		if (++scrq->cur == scrq->size)
3656 			scrq->cur = 0;
3657 	} else {
3658 		entry = NULL;
3659 	}
3660 	spin_unlock_irqrestore(&scrq->lock, flags);
3661 
3662 	/* Ensure that the entire buffer descriptor has been
3663 	 * loaded before reading its contents
3664 	 */
3665 	dma_rmb();
3666 
3667 	return entry;
3668 }
3669 
3670 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3671 {
3672 	struct ibmvnic_crq_queue *queue = &adapter->crq;
3673 	union ibmvnic_crq *crq;
3674 
3675 	crq = &queue->msgs[queue->cur];
3676 	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3677 		if (++queue->cur == queue->size)
3678 			queue->cur = 0;
3679 	} else {
3680 		crq = NULL;
3681 	}
3682 
3683 	return crq;
3684 }
3685 
3686 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3687 {
3688 	switch (rc) {
3689 	case H_PARAMETER:
3690 		dev_warn_ratelimited(dev,
3691 				     "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3692 				     func, rc);
3693 		break;
3694 	case H_CLOSED:
3695 		dev_warn_ratelimited(dev,
3696 				     "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3697 				     func, rc);
3698 		break;
3699 	default:
3700 		dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3701 		break;
3702 	}
3703 }
3704 
3705 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3706 				u64 remote_handle, u64 ioba, u64 num_entries)
3707 {
3708 	unsigned int ua = adapter->vdev->unit_address;
3709 	struct device *dev = &adapter->vdev->dev;
3710 	int rc;
3711 
3712 	/* Make sure the hypervisor sees the complete request */
3713 	mb();
3714 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3715 				cpu_to_be64(remote_handle),
3716 				ioba, num_entries);
3717 
3718 	if (rc)
3719 		print_subcrq_error(dev, rc, __func__);
3720 
3721 	return rc;
3722 }
3723 
3724 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3725 			    union ibmvnic_crq *crq)
3726 {
3727 	unsigned int ua = adapter->vdev->unit_address;
3728 	struct device *dev = &adapter->vdev->dev;
3729 	u64 *u64_crq = (u64 *)crq;
3730 	int rc;
3731 
3732 	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3733 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3734 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
3735 
3736 	if (!adapter->crq.active &&
3737 	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3738 		dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3739 		return -EINVAL;
3740 	}
3741 
3742 	/* Make sure the hypervisor sees the complete request */
3743 	mb();
3744 
3745 	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3746 				cpu_to_be64(u64_crq[0]),
3747 				cpu_to_be64(u64_crq[1]));
3748 
3749 	if (rc) {
3750 		if (rc == H_CLOSED) {
3751 			dev_warn(dev, "CRQ Queue closed\n");
3752 			/* do not reset, report the fail, wait for passive init from server */
3753 		}
3754 
3755 		dev_warn(dev, "Send error (rc=%d)\n", rc);
3756 	}
3757 
3758 	return rc;
3759 }
3760 
3761 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3762 {
3763 	struct device *dev = &adapter->vdev->dev;
3764 	union ibmvnic_crq crq;
3765 	int retries = 100;
3766 	int rc;
3767 
3768 	memset(&crq, 0, sizeof(crq));
3769 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3770 	crq.generic.cmd = IBMVNIC_CRQ_INIT;
3771 	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3772 
3773 	do {
3774 		rc = ibmvnic_send_crq(adapter, &crq);
3775 		if (rc != H_CLOSED)
3776 			break;
3777 		retries--;
3778 		msleep(50);
3779 
3780 	} while (retries > 0);
3781 
3782 	if (rc) {
3783 		dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3784 		return rc;
3785 	}
3786 
3787 	return 0;
3788 }
3789 
3790 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3791 {
3792 	union ibmvnic_crq crq;
3793 
3794 	memset(&crq, 0, sizeof(crq));
3795 	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3796 	crq.version_exchange.cmd = VERSION_EXCHANGE;
3797 	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3798 
3799 	return ibmvnic_send_crq(adapter, &crq);
3800 }
3801 
3802 struct vnic_login_client_data {
3803 	u8	type;
3804 	__be16	len;
3805 	char	name[];
3806 } __packed;
3807 
3808 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3809 {
3810 	int len;
3811 
3812 	/* Calculate the amount of buffer space needed for the
3813 	 * vnic client data in the login buffer. There are four entries,
3814 	 * OS name, LPAR name, device name, and a null last entry.
3815 	 */
3816 	len = 4 * sizeof(struct vnic_login_client_data);
3817 	len += 6; /* "Linux" plus NULL */
3818 	len += strlen(utsname()->nodename) + 1;
3819 	len += strlen(adapter->netdev->name) + 1;
3820 
3821 	return len;
3822 }
3823 
3824 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3825 				 struct vnic_login_client_data *vlcd)
3826 {
3827 	const char *os_name = "Linux";
3828 	int len;
3829 
3830 	/* Type 1 - LPAR OS */
3831 	vlcd->type = 1;
3832 	len = strlen(os_name) + 1;
3833 	vlcd->len = cpu_to_be16(len);
3834 	strncpy(vlcd->name, os_name, len);
3835 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3836 
3837 	/* Type 2 - LPAR name */
3838 	vlcd->type = 2;
3839 	len = strlen(utsname()->nodename) + 1;
3840 	vlcd->len = cpu_to_be16(len);
3841 	strncpy(vlcd->name, utsname()->nodename, len);
3842 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3843 
3844 	/* Type 3 - device name */
3845 	vlcd->type = 3;
3846 	len = strlen(adapter->netdev->name) + 1;
3847 	vlcd->len = cpu_to_be16(len);
3848 	strncpy(vlcd->name, adapter->netdev->name, len);
3849 }
3850 
3851 static int send_login(struct ibmvnic_adapter *adapter)
3852 {
3853 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3854 	struct ibmvnic_login_buffer *login_buffer;
3855 	struct device *dev = &adapter->vdev->dev;
3856 	struct vnic_login_client_data *vlcd;
3857 	dma_addr_t rsp_buffer_token;
3858 	dma_addr_t buffer_token;
3859 	size_t rsp_buffer_size;
3860 	union ibmvnic_crq crq;
3861 	int client_data_len;
3862 	size_t buffer_size;
3863 	__be64 *tx_list_p;
3864 	__be64 *rx_list_p;
3865 	int rc;
3866 	int i;
3867 
3868 	if (!adapter->tx_scrq || !adapter->rx_scrq) {
3869 		netdev_err(adapter->netdev,
3870 			   "RX or TX queues are not allocated, device login failed\n");
3871 		return -1;
3872 	}
3873 
3874 	release_login_buffer(adapter);
3875 	release_login_rsp_buffer(adapter);
3876 
3877 	client_data_len = vnic_client_data_len(adapter);
3878 
3879 	buffer_size =
3880 	    sizeof(struct ibmvnic_login_buffer) +
3881 	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3882 	    client_data_len;
3883 
3884 	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3885 	if (!login_buffer)
3886 		goto buf_alloc_failed;
3887 
3888 	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3889 				      DMA_TO_DEVICE);
3890 	if (dma_mapping_error(dev, buffer_token)) {
3891 		dev_err(dev, "Couldn't map login buffer\n");
3892 		goto buf_map_failed;
3893 	}
3894 
3895 	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3896 			  sizeof(u64) * adapter->req_tx_queues +
3897 			  sizeof(u64) * adapter->req_rx_queues +
3898 			  sizeof(u64) * adapter->req_rx_queues +
3899 			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3900 
3901 	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3902 	if (!login_rsp_buffer)
3903 		goto buf_rsp_alloc_failed;
3904 
3905 	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3906 					  rsp_buffer_size, DMA_FROM_DEVICE);
3907 	if (dma_mapping_error(dev, rsp_buffer_token)) {
3908 		dev_err(dev, "Couldn't map login rsp buffer\n");
3909 		goto buf_rsp_map_failed;
3910 	}
3911 
3912 	adapter->login_buf = login_buffer;
3913 	adapter->login_buf_token = buffer_token;
3914 	adapter->login_buf_sz = buffer_size;
3915 	adapter->login_rsp_buf = login_rsp_buffer;
3916 	adapter->login_rsp_buf_token = rsp_buffer_token;
3917 	adapter->login_rsp_buf_sz = rsp_buffer_size;
3918 
3919 	login_buffer->len = cpu_to_be32(buffer_size);
3920 	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3921 	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3922 	login_buffer->off_txcomp_subcrqs =
3923 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3924 	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3925 	login_buffer->off_rxcomp_subcrqs =
3926 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3927 			sizeof(u64) * adapter->req_tx_queues);
3928 	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3929 	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3930 
3931 	tx_list_p = (__be64 *)((char *)login_buffer +
3932 				      sizeof(struct ibmvnic_login_buffer));
3933 	rx_list_p = (__be64 *)((char *)login_buffer +
3934 				      sizeof(struct ibmvnic_login_buffer) +
3935 				      sizeof(u64) * adapter->req_tx_queues);
3936 
3937 	for (i = 0; i < adapter->req_tx_queues; i++) {
3938 		if (adapter->tx_scrq[i]) {
3939 			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3940 						   crq_num);
3941 		}
3942 	}
3943 
3944 	for (i = 0; i < adapter->req_rx_queues; i++) {
3945 		if (adapter->rx_scrq[i]) {
3946 			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3947 						   crq_num);
3948 		}
3949 	}
3950 
3951 	/* Insert vNIC login client data */
3952 	vlcd = (struct vnic_login_client_data *)
3953 		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3954 	login_buffer->client_data_offset =
3955 			cpu_to_be32((char *)vlcd - (char *)login_buffer);
3956 	login_buffer->client_data_len = cpu_to_be32(client_data_len);
3957 
3958 	vnic_add_client_data(adapter, vlcd);
3959 
3960 	netdev_dbg(adapter->netdev, "Login Buffer:\n");
3961 	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3962 		netdev_dbg(adapter->netdev, "%016lx\n",
3963 			   ((unsigned long int *)(adapter->login_buf))[i]);
3964 	}
3965 
3966 	memset(&crq, 0, sizeof(crq));
3967 	crq.login.first = IBMVNIC_CRQ_CMD;
3968 	crq.login.cmd = LOGIN;
3969 	crq.login.ioba = cpu_to_be32(buffer_token);
3970 	crq.login.len = cpu_to_be32(buffer_size);
3971 
3972 	adapter->login_pending = true;
3973 	rc = ibmvnic_send_crq(adapter, &crq);
3974 	if (rc) {
3975 		adapter->login_pending = false;
3976 		netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
3977 		goto buf_rsp_map_failed;
3978 	}
3979 
3980 	return 0;
3981 
3982 buf_rsp_map_failed:
3983 	kfree(login_rsp_buffer);
3984 	adapter->login_rsp_buf = NULL;
3985 buf_rsp_alloc_failed:
3986 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3987 buf_map_failed:
3988 	kfree(login_buffer);
3989 	adapter->login_buf = NULL;
3990 buf_alloc_failed:
3991 	return -1;
3992 }
3993 
3994 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3995 			    u32 len, u8 map_id)
3996 {
3997 	union ibmvnic_crq crq;
3998 
3999 	memset(&crq, 0, sizeof(crq));
4000 	crq.request_map.first = IBMVNIC_CRQ_CMD;
4001 	crq.request_map.cmd = REQUEST_MAP;
4002 	crq.request_map.map_id = map_id;
4003 	crq.request_map.ioba = cpu_to_be32(addr);
4004 	crq.request_map.len = cpu_to_be32(len);
4005 	return ibmvnic_send_crq(adapter, &crq);
4006 }
4007 
4008 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4009 {
4010 	union ibmvnic_crq crq;
4011 
4012 	memset(&crq, 0, sizeof(crq));
4013 	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4014 	crq.request_unmap.cmd = REQUEST_UNMAP;
4015 	crq.request_unmap.map_id = map_id;
4016 	return ibmvnic_send_crq(adapter, &crq);
4017 }
4018 
4019 static void send_query_map(struct ibmvnic_adapter *adapter)
4020 {
4021 	union ibmvnic_crq crq;
4022 
4023 	memset(&crq, 0, sizeof(crq));
4024 	crq.query_map.first = IBMVNIC_CRQ_CMD;
4025 	crq.query_map.cmd = QUERY_MAP;
4026 	ibmvnic_send_crq(adapter, &crq);
4027 }
4028 
4029 /* Send a series of CRQs requesting various capabilities of the VNIC server */
4030 static void send_query_cap(struct ibmvnic_adapter *adapter)
4031 {
4032 	union ibmvnic_crq crq;
4033 
4034 	atomic_set(&adapter->running_cap_crqs, 0);
4035 	memset(&crq, 0, sizeof(crq));
4036 	crq.query_capability.first = IBMVNIC_CRQ_CMD;
4037 	crq.query_capability.cmd = QUERY_CAPABILITY;
4038 
4039 	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4040 	atomic_inc(&adapter->running_cap_crqs);
4041 	ibmvnic_send_crq(adapter, &crq);
4042 
4043 	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4044 	atomic_inc(&adapter->running_cap_crqs);
4045 	ibmvnic_send_crq(adapter, &crq);
4046 
4047 	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4048 	atomic_inc(&adapter->running_cap_crqs);
4049 	ibmvnic_send_crq(adapter, &crq);
4050 
4051 	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4052 	atomic_inc(&adapter->running_cap_crqs);
4053 	ibmvnic_send_crq(adapter, &crq);
4054 
4055 	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4056 	atomic_inc(&adapter->running_cap_crqs);
4057 	ibmvnic_send_crq(adapter, &crq);
4058 
4059 	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4060 	atomic_inc(&adapter->running_cap_crqs);
4061 	ibmvnic_send_crq(adapter, &crq);
4062 
4063 	crq.query_capability.capability =
4064 	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4065 	atomic_inc(&adapter->running_cap_crqs);
4066 	ibmvnic_send_crq(adapter, &crq);
4067 
4068 	crq.query_capability.capability =
4069 	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4070 	atomic_inc(&adapter->running_cap_crqs);
4071 	ibmvnic_send_crq(adapter, &crq);
4072 
4073 	crq.query_capability.capability =
4074 	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4075 	atomic_inc(&adapter->running_cap_crqs);
4076 	ibmvnic_send_crq(adapter, &crq);
4077 
4078 	crq.query_capability.capability =
4079 	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4080 	atomic_inc(&adapter->running_cap_crqs);
4081 	ibmvnic_send_crq(adapter, &crq);
4082 
4083 	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4084 	atomic_inc(&adapter->running_cap_crqs);
4085 	ibmvnic_send_crq(adapter, &crq);
4086 
4087 	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4088 	atomic_inc(&adapter->running_cap_crqs);
4089 	ibmvnic_send_crq(adapter, &crq);
4090 
4091 	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4092 	atomic_inc(&adapter->running_cap_crqs);
4093 	ibmvnic_send_crq(adapter, &crq);
4094 
4095 	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4096 	atomic_inc(&adapter->running_cap_crqs);
4097 	ibmvnic_send_crq(adapter, &crq);
4098 
4099 	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4100 	atomic_inc(&adapter->running_cap_crqs);
4101 	ibmvnic_send_crq(adapter, &crq);
4102 
4103 	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4104 	atomic_inc(&adapter->running_cap_crqs);
4105 	ibmvnic_send_crq(adapter, &crq);
4106 
4107 	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4108 	atomic_inc(&adapter->running_cap_crqs);
4109 	ibmvnic_send_crq(adapter, &crq);
4110 
4111 	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4112 	atomic_inc(&adapter->running_cap_crqs);
4113 	ibmvnic_send_crq(adapter, &crq);
4114 
4115 	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4116 	atomic_inc(&adapter->running_cap_crqs);
4117 	ibmvnic_send_crq(adapter, &crq);
4118 
4119 	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4120 	atomic_inc(&adapter->running_cap_crqs);
4121 	ibmvnic_send_crq(adapter, &crq);
4122 
4123 	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4124 	atomic_inc(&adapter->running_cap_crqs);
4125 	ibmvnic_send_crq(adapter, &crq);
4126 
4127 	crq.query_capability.capability =
4128 			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4129 	atomic_inc(&adapter->running_cap_crqs);
4130 	ibmvnic_send_crq(adapter, &crq);
4131 
4132 	crq.query_capability.capability =
4133 			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4134 	atomic_inc(&adapter->running_cap_crqs);
4135 	ibmvnic_send_crq(adapter, &crq);
4136 
4137 	crq.query_capability.capability =
4138 			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4139 	atomic_inc(&adapter->running_cap_crqs);
4140 	ibmvnic_send_crq(adapter, &crq);
4141 
4142 	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4143 	atomic_inc(&adapter->running_cap_crqs);
4144 	ibmvnic_send_crq(adapter, &crq);
4145 }
4146 
4147 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4148 {
4149 	int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4150 	struct device *dev = &adapter->vdev->dev;
4151 	union ibmvnic_crq crq;
4152 
4153 	adapter->ip_offload_tok =
4154 		dma_map_single(dev,
4155 			       &adapter->ip_offload_buf,
4156 			       buf_sz,
4157 			       DMA_FROM_DEVICE);
4158 
4159 	if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4160 		if (!firmware_has_feature(FW_FEATURE_CMO))
4161 			dev_err(dev, "Couldn't map offload buffer\n");
4162 		return;
4163 	}
4164 
4165 	memset(&crq, 0, sizeof(crq));
4166 	crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4167 	crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4168 	crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4169 	crq.query_ip_offload.ioba =
4170 	    cpu_to_be32(adapter->ip_offload_tok);
4171 
4172 	ibmvnic_send_crq(adapter, &crq);
4173 }
4174 
4175 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4176 {
4177 	struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4178 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4179 	struct device *dev = &adapter->vdev->dev;
4180 	netdev_features_t old_hw_features = 0;
4181 	union ibmvnic_crq crq;
4182 
4183 	adapter->ip_offload_ctrl_tok =
4184 		dma_map_single(dev,
4185 			       ctrl_buf,
4186 			       sizeof(adapter->ip_offload_ctrl),
4187 			       DMA_TO_DEVICE);
4188 
4189 	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4190 		dev_err(dev, "Couldn't map ip offload control buffer\n");
4191 		return;
4192 	}
4193 
4194 	ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4195 	ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4196 	ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4197 	ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4198 	ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4199 	ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4200 	ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4201 	ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4202 	ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4203 	ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4204 
4205 	/* large_rx disabled for now, additional features needed */
4206 	ctrl_buf->large_rx_ipv4 = 0;
4207 	ctrl_buf->large_rx_ipv6 = 0;
4208 
4209 	if (adapter->state != VNIC_PROBING) {
4210 		old_hw_features = adapter->netdev->hw_features;
4211 		adapter->netdev->hw_features = 0;
4212 	}
4213 
4214 	adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4215 
4216 	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4217 		adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4218 
4219 	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4220 		adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4221 
4222 	if ((adapter->netdev->features &
4223 	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4224 		adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4225 
4226 	if (buf->large_tx_ipv4)
4227 		adapter->netdev->hw_features |= NETIF_F_TSO;
4228 	if (buf->large_tx_ipv6)
4229 		adapter->netdev->hw_features |= NETIF_F_TSO6;
4230 
4231 	if (adapter->state == VNIC_PROBING) {
4232 		adapter->netdev->features |= adapter->netdev->hw_features;
4233 	} else if (old_hw_features != adapter->netdev->hw_features) {
4234 		netdev_features_t tmp = 0;
4235 
4236 		/* disable features no longer supported */
4237 		adapter->netdev->features &= adapter->netdev->hw_features;
4238 		/* turn on features now supported if previously enabled */
4239 		tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4240 			adapter->netdev->hw_features;
4241 		adapter->netdev->features |=
4242 				tmp & adapter->netdev->wanted_features;
4243 	}
4244 
4245 	memset(&crq, 0, sizeof(crq));
4246 	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4247 	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4248 	crq.control_ip_offload.len =
4249 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4250 	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4251 	ibmvnic_send_crq(adapter, &crq);
4252 }
4253 
4254 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4255 				struct ibmvnic_adapter *adapter)
4256 {
4257 	struct device *dev = &adapter->vdev->dev;
4258 
4259 	if (crq->get_vpd_size_rsp.rc.code) {
4260 		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4261 			crq->get_vpd_size_rsp.rc.code);
4262 		complete(&adapter->fw_done);
4263 		return;
4264 	}
4265 
4266 	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4267 	complete(&adapter->fw_done);
4268 }
4269 
4270 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4271 			   struct ibmvnic_adapter *adapter)
4272 {
4273 	struct device *dev = &adapter->vdev->dev;
4274 	unsigned char *substr = NULL;
4275 	u8 fw_level_len = 0;
4276 
4277 	memset(adapter->fw_version, 0, 32);
4278 
4279 	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4280 			 DMA_FROM_DEVICE);
4281 
4282 	if (crq->get_vpd_rsp.rc.code) {
4283 		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4284 			crq->get_vpd_rsp.rc.code);
4285 		goto complete;
4286 	}
4287 
4288 	/* get the position of the firmware version info
4289 	 * located after the ASCII 'RM' substring in the buffer
4290 	 */
4291 	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4292 	if (!substr) {
4293 		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4294 		goto complete;
4295 	}
4296 
4297 	/* get length of firmware level ASCII substring */
4298 	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4299 		fw_level_len = *(substr + 2);
4300 	} else {
4301 		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4302 		goto complete;
4303 	}
4304 
4305 	/* copy firmware version string from vpd into adapter */
4306 	if ((substr + 3 + fw_level_len) <
4307 	    (adapter->vpd->buff + adapter->vpd->len)) {
4308 		strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4309 	} else {
4310 		dev_info(dev, "FW substr extrapolated VPD buff\n");
4311 	}
4312 
4313 complete:
4314 	if (adapter->fw_version[0] == '\0')
4315 		strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4316 	complete(&adapter->fw_done);
4317 }
4318 
4319 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4320 {
4321 	struct device *dev = &adapter->vdev->dev;
4322 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4323 	int i;
4324 
4325 	dma_unmap_single(dev, adapter->ip_offload_tok,
4326 			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4327 
4328 	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4329 	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4330 		netdev_dbg(adapter->netdev, "%016lx\n",
4331 			   ((unsigned long int *)(buf))[i]);
4332 
4333 	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4334 	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4335 	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4336 		   buf->tcp_ipv4_chksum);
4337 	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4338 		   buf->tcp_ipv6_chksum);
4339 	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4340 		   buf->udp_ipv4_chksum);
4341 	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4342 		   buf->udp_ipv6_chksum);
4343 	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4344 		   buf->large_tx_ipv4);
4345 	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4346 		   buf->large_tx_ipv6);
4347 	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4348 		   buf->large_rx_ipv4);
4349 	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4350 		   buf->large_rx_ipv6);
4351 	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4352 		   buf->max_ipv4_header_size);
4353 	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4354 		   buf->max_ipv6_header_size);
4355 	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4356 		   buf->max_tcp_header_size);
4357 	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4358 		   buf->max_udp_header_size);
4359 	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4360 		   buf->max_large_tx_size);
4361 	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4362 		   buf->max_large_rx_size);
4363 	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4364 		   buf->ipv6_extension_header);
4365 	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4366 		   buf->tcp_pseudosum_req);
4367 	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4368 		   buf->num_ipv6_ext_headers);
4369 	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4370 		   buf->off_ipv6_ext_headers);
4371 
4372 	send_control_ip_offload(adapter);
4373 }
4374 
4375 static const char *ibmvnic_fw_err_cause(u16 cause)
4376 {
4377 	switch (cause) {
4378 	case ADAPTER_PROBLEM:
4379 		return "adapter problem";
4380 	case BUS_PROBLEM:
4381 		return "bus problem";
4382 	case FW_PROBLEM:
4383 		return "firmware problem";
4384 	case DD_PROBLEM:
4385 		return "device driver problem";
4386 	case EEH_RECOVERY:
4387 		return "EEH recovery";
4388 	case FW_UPDATED:
4389 		return "firmware updated";
4390 	case LOW_MEMORY:
4391 		return "low Memory";
4392 	default:
4393 		return "unknown";
4394 	}
4395 }
4396 
4397 static void handle_error_indication(union ibmvnic_crq *crq,
4398 				    struct ibmvnic_adapter *adapter)
4399 {
4400 	struct device *dev = &adapter->vdev->dev;
4401 	u16 cause;
4402 
4403 	cause = be16_to_cpu(crq->error_indication.error_cause);
4404 
4405 	dev_warn_ratelimited(dev,
4406 			     "Firmware reports %serror, cause: %s. Starting recovery...\n",
4407 			     crq->error_indication.flags
4408 				& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4409 			     ibmvnic_fw_err_cause(cause));
4410 
4411 	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4412 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4413 	else
4414 		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4415 }
4416 
4417 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4418 				 struct ibmvnic_adapter *adapter)
4419 {
4420 	struct net_device *netdev = adapter->netdev;
4421 	struct device *dev = &adapter->vdev->dev;
4422 	long rc;
4423 
4424 	rc = crq->change_mac_addr_rsp.rc.code;
4425 	if (rc) {
4426 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4427 		goto out;
4428 	}
4429 	/* crq->change_mac_addr.mac_addr is the requested one
4430 	 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4431 	 */
4432 	ether_addr_copy(netdev->dev_addr,
4433 			&crq->change_mac_addr_rsp.mac_addr[0]);
4434 	ether_addr_copy(adapter->mac_addr,
4435 			&crq->change_mac_addr_rsp.mac_addr[0]);
4436 out:
4437 	complete(&adapter->fw_done);
4438 	return rc;
4439 }
4440 
4441 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4442 				   struct ibmvnic_adapter *adapter)
4443 {
4444 	struct device *dev = &adapter->vdev->dev;
4445 	u64 *req_value;
4446 	char *name;
4447 
4448 	atomic_dec(&adapter->running_cap_crqs);
4449 	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4450 	case REQ_TX_QUEUES:
4451 		req_value = &adapter->req_tx_queues;
4452 		name = "tx";
4453 		break;
4454 	case REQ_RX_QUEUES:
4455 		req_value = &adapter->req_rx_queues;
4456 		name = "rx";
4457 		break;
4458 	case REQ_RX_ADD_QUEUES:
4459 		req_value = &adapter->req_rx_add_queues;
4460 		name = "rx_add";
4461 		break;
4462 	case REQ_TX_ENTRIES_PER_SUBCRQ:
4463 		req_value = &adapter->req_tx_entries_per_subcrq;
4464 		name = "tx_entries_per_subcrq";
4465 		break;
4466 	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4467 		req_value = &adapter->req_rx_add_entries_per_subcrq;
4468 		name = "rx_add_entries_per_subcrq";
4469 		break;
4470 	case REQ_MTU:
4471 		req_value = &adapter->req_mtu;
4472 		name = "mtu";
4473 		break;
4474 	case PROMISC_REQUESTED:
4475 		req_value = &adapter->promisc;
4476 		name = "promisc";
4477 		break;
4478 	default:
4479 		dev_err(dev, "Got invalid cap request rsp %d\n",
4480 			crq->request_capability.capability);
4481 		return;
4482 	}
4483 
4484 	switch (crq->request_capability_rsp.rc.code) {
4485 	case SUCCESS:
4486 		break;
4487 	case PARTIALSUCCESS:
4488 		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4489 			 *req_value,
4490 			 (long int)be64_to_cpu(crq->request_capability_rsp.
4491 					       number), name);
4492 
4493 		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4494 		    REQ_MTU) {
4495 			pr_err("mtu of %llu is not supported. Reverting.\n",
4496 			       *req_value);
4497 			*req_value = adapter->fallback.mtu;
4498 		} else {
4499 			*req_value =
4500 				be64_to_cpu(crq->request_capability_rsp.number);
4501 		}
4502 
4503 		send_request_cap(adapter, 1);
4504 		return;
4505 	default:
4506 		dev_err(dev, "Error %d in request cap rsp\n",
4507 			crq->request_capability_rsp.rc.code);
4508 		return;
4509 	}
4510 
4511 	/* Done receiving requested capabilities, query IP offload support */
4512 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4513 		adapter->wait_capability = false;
4514 		send_query_ip_offload(adapter);
4515 	}
4516 }
4517 
4518 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4519 			    struct ibmvnic_adapter *adapter)
4520 {
4521 	struct device *dev = &adapter->vdev->dev;
4522 	struct net_device *netdev = adapter->netdev;
4523 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4524 	struct ibmvnic_login_buffer *login = adapter->login_buf;
4525 	u64 *tx_handle_array;
4526 	u64 *rx_handle_array;
4527 	int num_tx_pools;
4528 	int num_rx_pools;
4529 	u64 *size_array;
4530 	int i;
4531 
4532 	/* CHECK: Test/set of login_pending does not need to be atomic
4533 	 * because only ibmvnic_tasklet tests/clears this.
4534 	 */
4535 	if (!adapter->login_pending) {
4536 		netdev_warn(netdev, "Ignoring unexpected login response\n");
4537 		return 0;
4538 	}
4539 	adapter->login_pending = false;
4540 
4541 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4542 			 DMA_TO_DEVICE);
4543 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
4544 			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4545 
4546 	/* If the number of queues requested can't be allocated by the
4547 	 * server, the login response will return with code 1. We will need
4548 	 * to resend the login buffer with fewer queues requested.
4549 	 */
4550 	if (login_rsp_crq->generic.rc.code) {
4551 		adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4552 		complete(&adapter->init_done);
4553 		return 0;
4554 	}
4555 
4556 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4557 
4558 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4559 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4560 		netdev_dbg(adapter->netdev, "%016lx\n",
4561 			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4562 	}
4563 
4564 	/* Sanity checks */
4565 	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4566 	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
4567 	     adapter->req_rx_add_queues !=
4568 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4569 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4570 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4571 		return -EIO;
4572 	}
4573 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4574 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4575 	/* variable buffer sizes are not supported, so just read the
4576 	 * first entry.
4577 	 */
4578 	adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4579 
4580 	num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4581 	num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4582 
4583 	tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4584 				  be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4585 	rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4586 				  be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4587 
4588 	for (i = 0; i < num_tx_pools; i++)
4589 		adapter->tx_scrq[i]->handle = tx_handle_array[i];
4590 
4591 	for (i = 0; i < num_rx_pools; i++)
4592 		adapter->rx_scrq[i]->handle = rx_handle_array[i];
4593 
4594 	adapter->num_active_tx_scrqs = num_tx_pools;
4595 	adapter->num_active_rx_scrqs = num_rx_pools;
4596 	release_login_rsp_buffer(adapter);
4597 	release_login_buffer(adapter);
4598 	complete(&adapter->init_done);
4599 
4600 	return 0;
4601 }
4602 
4603 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4604 				     struct ibmvnic_adapter *adapter)
4605 {
4606 	struct device *dev = &adapter->vdev->dev;
4607 	long rc;
4608 
4609 	rc = crq->request_unmap_rsp.rc.code;
4610 	if (rc)
4611 		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4612 }
4613 
4614 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4615 				 struct ibmvnic_adapter *adapter)
4616 {
4617 	struct net_device *netdev = adapter->netdev;
4618 	struct device *dev = &adapter->vdev->dev;
4619 	long rc;
4620 
4621 	rc = crq->query_map_rsp.rc.code;
4622 	if (rc) {
4623 		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4624 		return;
4625 	}
4626 	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4627 		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4628 		   crq->query_map_rsp.free_pages);
4629 }
4630 
4631 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4632 				 struct ibmvnic_adapter *adapter)
4633 {
4634 	struct net_device *netdev = adapter->netdev;
4635 	struct device *dev = &adapter->vdev->dev;
4636 	long rc;
4637 
4638 	atomic_dec(&adapter->running_cap_crqs);
4639 	netdev_dbg(netdev, "Outstanding queries: %d\n",
4640 		   atomic_read(&adapter->running_cap_crqs));
4641 	rc = crq->query_capability.rc.code;
4642 	if (rc) {
4643 		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4644 		goto out;
4645 	}
4646 
4647 	switch (be16_to_cpu(crq->query_capability.capability)) {
4648 	case MIN_TX_QUEUES:
4649 		adapter->min_tx_queues =
4650 		    be64_to_cpu(crq->query_capability.number);
4651 		netdev_dbg(netdev, "min_tx_queues = %lld\n",
4652 			   adapter->min_tx_queues);
4653 		break;
4654 	case MIN_RX_QUEUES:
4655 		adapter->min_rx_queues =
4656 		    be64_to_cpu(crq->query_capability.number);
4657 		netdev_dbg(netdev, "min_rx_queues = %lld\n",
4658 			   adapter->min_rx_queues);
4659 		break;
4660 	case MIN_RX_ADD_QUEUES:
4661 		adapter->min_rx_add_queues =
4662 		    be64_to_cpu(crq->query_capability.number);
4663 		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4664 			   adapter->min_rx_add_queues);
4665 		break;
4666 	case MAX_TX_QUEUES:
4667 		adapter->max_tx_queues =
4668 		    be64_to_cpu(crq->query_capability.number);
4669 		netdev_dbg(netdev, "max_tx_queues = %lld\n",
4670 			   adapter->max_tx_queues);
4671 		break;
4672 	case MAX_RX_QUEUES:
4673 		adapter->max_rx_queues =
4674 		    be64_to_cpu(crq->query_capability.number);
4675 		netdev_dbg(netdev, "max_rx_queues = %lld\n",
4676 			   adapter->max_rx_queues);
4677 		break;
4678 	case MAX_RX_ADD_QUEUES:
4679 		adapter->max_rx_add_queues =
4680 		    be64_to_cpu(crq->query_capability.number);
4681 		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4682 			   adapter->max_rx_add_queues);
4683 		break;
4684 	case MIN_TX_ENTRIES_PER_SUBCRQ:
4685 		adapter->min_tx_entries_per_subcrq =
4686 		    be64_to_cpu(crq->query_capability.number);
4687 		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4688 			   adapter->min_tx_entries_per_subcrq);
4689 		break;
4690 	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4691 		adapter->min_rx_add_entries_per_subcrq =
4692 		    be64_to_cpu(crq->query_capability.number);
4693 		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4694 			   adapter->min_rx_add_entries_per_subcrq);
4695 		break;
4696 	case MAX_TX_ENTRIES_PER_SUBCRQ:
4697 		adapter->max_tx_entries_per_subcrq =
4698 		    be64_to_cpu(crq->query_capability.number);
4699 		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4700 			   adapter->max_tx_entries_per_subcrq);
4701 		break;
4702 	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4703 		adapter->max_rx_add_entries_per_subcrq =
4704 		    be64_to_cpu(crq->query_capability.number);
4705 		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4706 			   adapter->max_rx_add_entries_per_subcrq);
4707 		break;
4708 	case TCP_IP_OFFLOAD:
4709 		adapter->tcp_ip_offload =
4710 		    be64_to_cpu(crq->query_capability.number);
4711 		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4712 			   adapter->tcp_ip_offload);
4713 		break;
4714 	case PROMISC_SUPPORTED:
4715 		adapter->promisc_supported =
4716 		    be64_to_cpu(crq->query_capability.number);
4717 		netdev_dbg(netdev, "promisc_supported = %lld\n",
4718 			   adapter->promisc_supported);
4719 		break;
4720 	case MIN_MTU:
4721 		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4722 		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4723 		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4724 		break;
4725 	case MAX_MTU:
4726 		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4727 		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4728 		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4729 		break;
4730 	case MAX_MULTICAST_FILTERS:
4731 		adapter->max_multicast_filters =
4732 		    be64_to_cpu(crq->query_capability.number);
4733 		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4734 			   adapter->max_multicast_filters);
4735 		break;
4736 	case VLAN_HEADER_INSERTION:
4737 		adapter->vlan_header_insertion =
4738 		    be64_to_cpu(crq->query_capability.number);
4739 		if (adapter->vlan_header_insertion)
4740 			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4741 		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4742 			   adapter->vlan_header_insertion);
4743 		break;
4744 	case RX_VLAN_HEADER_INSERTION:
4745 		adapter->rx_vlan_header_insertion =
4746 		    be64_to_cpu(crq->query_capability.number);
4747 		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4748 			   adapter->rx_vlan_header_insertion);
4749 		break;
4750 	case MAX_TX_SG_ENTRIES:
4751 		adapter->max_tx_sg_entries =
4752 		    be64_to_cpu(crq->query_capability.number);
4753 		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4754 			   adapter->max_tx_sg_entries);
4755 		break;
4756 	case RX_SG_SUPPORTED:
4757 		adapter->rx_sg_supported =
4758 		    be64_to_cpu(crq->query_capability.number);
4759 		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4760 			   adapter->rx_sg_supported);
4761 		break;
4762 	case OPT_TX_COMP_SUB_QUEUES:
4763 		adapter->opt_tx_comp_sub_queues =
4764 		    be64_to_cpu(crq->query_capability.number);
4765 		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4766 			   adapter->opt_tx_comp_sub_queues);
4767 		break;
4768 	case OPT_RX_COMP_QUEUES:
4769 		adapter->opt_rx_comp_queues =
4770 		    be64_to_cpu(crq->query_capability.number);
4771 		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4772 			   adapter->opt_rx_comp_queues);
4773 		break;
4774 	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4775 		adapter->opt_rx_bufadd_q_per_rx_comp_q =
4776 		    be64_to_cpu(crq->query_capability.number);
4777 		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4778 			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
4779 		break;
4780 	case OPT_TX_ENTRIES_PER_SUBCRQ:
4781 		adapter->opt_tx_entries_per_subcrq =
4782 		    be64_to_cpu(crq->query_capability.number);
4783 		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4784 			   adapter->opt_tx_entries_per_subcrq);
4785 		break;
4786 	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4787 		adapter->opt_rxba_entries_per_subcrq =
4788 		    be64_to_cpu(crq->query_capability.number);
4789 		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4790 			   adapter->opt_rxba_entries_per_subcrq);
4791 		break;
4792 	case TX_RX_DESC_REQ:
4793 		adapter->tx_rx_desc_req = crq->query_capability.number;
4794 		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4795 			   adapter->tx_rx_desc_req);
4796 		break;
4797 
4798 	default:
4799 		netdev_err(netdev, "Got invalid cap rsp %d\n",
4800 			   crq->query_capability.capability);
4801 	}
4802 
4803 out:
4804 	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4805 		adapter->wait_capability = false;
4806 		send_request_cap(adapter, 0);
4807 	}
4808 }
4809 
4810 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4811 {
4812 	union ibmvnic_crq crq;
4813 	int rc;
4814 
4815 	memset(&crq, 0, sizeof(crq));
4816 	crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4817 	crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4818 
4819 	mutex_lock(&adapter->fw_lock);
4820 	adapter->fw_done_rc = 0;
4821 	reinit_completion(&adapter->fw_done);
4822 
4823 	rc = ibmvnic_send_crq(adapter, &crq);
4824 	if (rc) {
4825 		mutex_unlock(&adapter->fw_lock);
4826 		return rc;
4827 	}
4828 
4829 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4830 	if (rc) {
4831 		mutex_unlock(&adapter->fw_lock);
4832 		return rc;
4833 	}
4834 
4835 	mutex_unlock(&adapter->fw_lock);
4836 	return adapter->fw_done_rc ? -EIO : 0;
4837 }
4838 
4839 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4840 				       struct ibmvnic_adapter *adapter)
4841 {
4842 	struct net_device *netdev = adapter->netdev;
4843 	int rc;
4844 	__be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4845 
4846 	rc = crq->query_phys_parms_rsp.rc.code;
4847 	if (rc) {
4848 		netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4849 		return rc;
4850 	}
4851 	switch (rspeed) {
4852 	case IBMVNIC_10MBPS:
4853 		adapter->speed = SPEED_10;
4854 		break;
4855 	case IBMVNIC_100MBPS:
4856 		adapter->speed = SPEED_100;
4857 		break;
4858 	case IBMVNIC_1GBPS:
4859 		adapter->speed = SPEED_1000;
4860 		break;
4861 	case IBMVNIC_10GBPS:
4862 		adapter->speed = SPEED_10000;
4863 		break;
4864 	case IBMVNIC_25GBPS:
4865 		adapter->speed = SPEED_25000;
4866 		break;
4867 	case IBMVNIC_40GBPS:
4868 		adapter->speed = SPEED_40000;
4869 		break;
4870 	case IBMVNIC_50GBPS:
4871 		adapter->speed = SPEED_50000;
4872 		break;
4873 	case IBMVNIC_100GBPS:
4874 		adapter->speed = SPEED_100000;
4875 		break;
4876 	case IBMVNIC_200GBPS:
4877 		adapter->speed = SPEED_200000;
4878 		break;
4879 	default:
4880 		if (netif_carrier_ok(netdev))
4881 			netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4882 		adapter->speed = SPEED_UNKNOWN;
4883 	}
4884 	if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4885 		adapter->duplex = DUPLEX_FULL;
4886 	else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4887 		adapter->duplex = DUPLEX_HALF;
4888 	else
4889 		adapter->duplex = DUPLEX_UNKNOWN;
4890 
4891 	return rc;
4892 }
4893 
4894 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4895 			       struct ibmvnic_adapter *adapter)
4896 {
4897 	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4898 	struct net_device *netdev = adapter->netdev;
4899 	struct device *dev = &adapter->vdev->dev;
4900 	u64 *u64_crq = (u64 *)crq;
4901 	long rc;
4902 
4903 	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4904 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
4905 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
4906 	switch (gen_crq->first) {
4907 	case IBMVNIC_CRQ_INIT_RSP:
4908 		switch (gen_crq->cmd) {
4909 		case IBMVNIC_CRQ_INIT:
4910 			dev_info(dev, "Partner initialized\n");
4911 			adapter->from_passive_init = true;
4912 			/* Discard any stale login responses from prev reset.
4913 			 * CHECK: should we clear even on INIT_COMPLETE?
4914 			 */
4915 			adapter->login_pending = false;
4916 
4917 			if (!completion_done(&adapter->init_done)) {
4918 				complete(&adapter->init_done);
4919 				adapter->init_done_rc = -EIO;
4920 			}
4921 			rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4922 			if (rc && rc != -EBUSY) {
4923 				/* We were unable to schedule the failover
4924 				 * reset either because the adapter was still
4925 				 * probing (eg: during kexec) or we could not
4926 				 * allocate memory. Clear the failover_pending
4927 				 * flag since no one else will. We ignore
4928 				 * EBUSY because it means either FAILOVER reset
4929 				 * is already scheduled or the adapter is
4930 				 * being removed.
4931 				 */
4932 				netdev_err(netdev,
4933 					   "Error %ld scheduling failover reset\n",
4934 					   rc);
4935 				adapter->failover_pending = false;
4936 			}
4937 			break;
4938 		case IBMVNIC_CRQ_INIT_COMPLETE:
4939 			dev_info(dev, "Partner initialization complete\n");
4940 			adapter->crq.active = true;
4941 			send_version_xchg(adapter);
4942 			break;
4943 		default:
4944 			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4945 		}
4946 		return;
4947 	case IBMVNIC_CRQ_XPORT_EVENT:
4948 		netif_carrier_off(netdev);
4949 		adapter->crq.active = false;
4950 		/* terminate any thread waiting for a response
4951 		 * from the device
4952 		 */
4953 		if (!completion_done(&adapter->fw_done)) {
4954 			adapter->fw_done_rc = -EIO;
4955 			complete(&adapter->fw_done);
4956 		}
4957 		if (!completion_done(&adapter->stats_done))
4958 			complete(&adapter->stats_done);
4959 		if (test_bit(0, &adapter->resetting))
4960 			adapter->force_reset_recovery = true;
4961 		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4962 			dev_info(dev, "Migrated, re-enabling adapter\n");
4963 			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4964 		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4965 			dev_info(dev, "Backing device failover detected\n");
4966 			adapter->failover_pending = true;
4967 		} else {
4968 			/* The adapter lost the connection */
4969 			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4970 				gen_crq->cmd);
4971 			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4972 		}
4973 		return;
4974 	case IBMVNIC_CRQ_CMD_RSP:
4975 		break;
4976 	default:
4977 		dev_err(dev, "Got an invalid msg type 0x%02x\n",
4978 			gen_crq->first);
4979 		return;
4980 	}
4981 
4982 	switch (gen_crq->cmd) {
4983 	case VERSION_EXCHANGE_RSP:
4984 		rc = crq->version_exchange_rsp.rc.code;
4985 		if (rc) {
4986 			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4987 			break;
4988 		}
4989 		ibmvnic_version =
4990 			    be16_to_cpu(crq->version_exchange_rsp.version);
4991 		dev_info(dev, "Partner protocol version is %d\n",
4992 			 ibmvnic_version);
4993 		send_query_cap(adapter);
4994 		break;
4995 	case QUERY_CAPABILITY_RSP:
4996 		handle_query_cap_rsp(crq, adapter);
4997 		break;
4998 	case QUERY_MAP_RSP:
4999 		handle_query_map_rsp(crq, adapter);
5000 		break;
5001 	case REQUEST_MAP_RSP:
5002 		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5003 		complete(&adapter->fw_done);
5004 		break;
5005 	case REQUEST_UNMAP_RSP:
5006 		handle_request_unmap_rsp(crq, adapter);
5007 		break;
5008 	case REQUEST_CAPABILITY_RSP:
5009 		handle_request_cap_rsp(crq, adapter);
5010 		break;
5011 	case LOGIN_RSP:
5012 		netdev_dbg(netdev, "Got Login Response\n");
5013 		handle_login_rsp(crq, adapter);
5014 		break;
5015 	case LOGICAL_LINK_STATE_RSP:
5016 		netdev_dbg(netdev,
5017 			   "Got Logical Link State Response, state: %d rc: %d\n",
5018 			   crq->logical_link_state_rsp.link_state,
5019 			   crq->logical_link_state_rsp.rc.code);
5020 		adapter->logical_link_state =
5021 		    crq->logical_link_state_rsp.link_state;
5022 		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5023 		complete(&adapter->init_done);
5024 		break;
5025 	case LINK_STATE_INDICATION:
5026 		netdev_dbg(netdev, "Got Logical Link State Indication\n");
5027 		adapter->phys_link_state =
5028 		    crq->link_state_indication.phys_link_state;
5029 		adapter->logical_link_state =
5030 		    crq->link_state_indication.logical_link_state;
5031 		if (adapter->phys_link_state && adapter->logical_link_state)
5032 			netif_carrier_on(netdev);
5033 		else
5034 			netif_carrier_off(netdev);
5035 		break;
5036 	case CHANGE_MAC_ADDR_RSP:
5037 		netdev_dbg(netdev, "Got MAC address change Response\n");
5038 		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5039 		break;
5040 	case ERROR_INDICATION:
5041 		netdev_dbg(netdev, "Got Error Indication\n");
5042 		handle_error_indication(crq, adapter);
5043 		break;
5044 	case REQUEST_STATISTICS_RSP:
5045 		netdev_dbg(netdev, "Got Statistics Response\n");
5046 		complete(&adapter->stats_done);
5047 		break;
5048 	case QUERY_IP_OFFLOAD_RSP:
5049 		netdev_dbg(netdev, "Got Query IP offload Response\n");
5050 		handle_query_ip_offload_rsp(adapter);
5051 		break;
5052 	case MULTICAST_CTRL_RSP:
5053 		netdev_dbg(netdev, "Got multicast control Response\n");
5054 		break;
5055 	case CONTROL_IP_OFFLOAD_RSP:
5056 		netdev_dbg(netdev, "Got Control IP offload Response\n");
5057 		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5058 				 sizeof(adapter->ip_offload_ctrl),
5059 				 DMA_TO_DEVICE);
5060 		complete(&adapter->init_done);
5061 		break;
5062 	case COLLECT_FW_TRACE_RSP:
5063 		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5064 		complete(&adapter->fw_done);
5065 		break;
5066 	case GET_VPD_SIZE_RSP:
5067 		handle_vpd_size_rsp(crq, adapter);
5068 		break;
5069 	case GET_VPD_RSP:
5070 		handle_vpd_rsp(crq, adapter);
5071 		break;
5072 	case QUERY_PHYS_PARMS_RSP:
5073 		adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5074 		complete(&adapter->fw_done);
5075 		break;
5076 	default:
5077 		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5078 			   gen_crq->cmd);
5079 	}
5080 }
5081 
5082 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5083 {
5084 	struct ibmvnic_adapter *adapter = instance;
5085 
5086 	tasklet_schedule(&adapter->tasklet);
5087 	return IRQ_HANDLED;
5088 }
5089 
5090 static void ibmvnic_tasklet(struct tasklet_struct *t)
5091 {
5092 	struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5093 	struct ibmvnic_crq_queue *queue = &adapter->crq;
5094 	union ibmvnic_crq *crq;
5095 	unsigned long flags;
5096 	bool done = false;
5097 
5098 	spin_lock_irqsave(&queue->lock, flags);
5099 	while (!done) {
5100 		/* Pull all the valid messages off the CRQ */
5101 		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5102 			/* This barrier makes sure ibmvnic_next_crq()'s
5103 			 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5104 			 * before ibmvnic_handle_crq()'s
5105 			 * switch(gen_crq->first) and switch(gen_crq->cmd).
5106 			 */
5107 			dma_rmb();
5108 			ibmvnic_handle_crq(crq, adapter);
5109 			crq->generic.first = 0;
5110 		}
5111 
5112 		/* remain in tasklet until all
5113 		 * capabilities responses are received
5114 		 */
5115 		if (!adapter->wait_capability)
5116 			done = true;
5117 	}
5118 	/* if capabilities CRQ's were sent in this tasklet, the following
5119 	 * tasklet must wait until all responses are received
5120 	 */
5121 	if (atomic_read(&adapter->running_cap_crqs) != 0)
5122 		adapter->wait_capability = true;
5123 	spin_unlock_irqrestore(&queue->lock, flags);
5124 }
5125 
5126 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5127 {
5128 	struct vio_dev *vdev = adapter->vdev;
5129 	int rc;
5130 
5131 	do {
5132 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5133 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5134 
5135 	if (rc)
5136 		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5137 
5138 	return rc;
5139 }
5140 
5141 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5142 {
5143 	struct ibmvnic_crq_queue *crq = &adapter->crq;
5144 	struct device *dev = &adapter->vdev->dev;
5145 	struct vio_dev *vdev = adapter->vdev;
5146 	int rc;
5147 
5148 	/* Close the CRQ */
5149 	do {
5150 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5151 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5152 
5153 	/* Clean out the queue */
5154 	if (!crq->msgs)
5155 		return -EINVAL;
5156 
5157 	memset(crq->msgs, 0, PAGE_SIZE);
5158 	crq->cur = 0;
5159 	crq->active = false;
5160 
5161 	/* And re-open it again */
5162 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5163 				crq->msg_token, PAGE_SIZE);
5164 
5165 	if (rc == H_CLOSED)
5166 		/* Adapter is good, but other end is not ready */
5167 		dev_warn(dev, "Partner adapter not ready\n");
5168 	else if (rc != 0)
5169 		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5170 
5171 	return rc;
5172 }
5173 
5174 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5175 {
5176 	struct ibmvnic_crq_queue *crq = &adapter->crq;
5177 	struct vio_dev *vdev = adapter->vdev;
5178 	long rc;
5179 
5180 	if (!crq->msgs)
5181 		return;
5182 
5183 	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5184 	free_irq(vdev->irq, adapter);
5185 	tasklet_kill(&adapter->tasklet);
5186 	do {
5187 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5188 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5189 
5190 	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5191 			 DMA_BIDIRECTIONAL);
5192 	free_page((unsigned long)crq->msgs);
5193 	crq->msgs = NULL;
5194 	crq->active = false;
5195 }
5196 
5197 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5198 {
5199 	struct ibmvnic_crq_queue *crq = &adapter->crq;
5200 	struct device *dev = &adapter->vdev->dev;
5201 	struct vio_dev *vdev = adapter->vdev;
5202 	int rc, retrc = -ENOMEM;
5203 
5204 	if (crq->msgs)
5205 		return 0;
5206 
5207 	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5208 	/* Should we allocate more than one page? */
5209 
5210 	if (!crq->msgs)
5211 		return -ENOMEM;
5212 
5213 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5214 	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5215 					DMA_BIDIRECTIONAL);
5216 	if (dma_mapping_error(dev, crq->msg_token))
5217 		goto map_failed;
5218 
5219 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5220 				crq->msg_token, PAGE_SIZE);
5221 
5222 	if (rc == H_RESOURCE)
5223 		/* maybe kexecing and resource is busy. try a reset */
5224 		rc = ibmvnic_reset_crq(adapter);
5225 	retrc = rc;
5226 
5227 	if (rc == H_CLOSED) {
5228 		dev_warn(dev, "Partner adapter not ready\n");
5229 	} else if (rc) {
5230 		dev_warn(dev, "Error %d opening adapter\n", rc);
5231 		goto reg_crq_failed;
5232 	}
5233 
5234 	retrc = 0;
5235 
5236 	tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5237 
5238 	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5239 	snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5240 		 adapter->vdev->unit_address);
5241 	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5242 	if (rc) {
5243 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5244 			vdev->irq, rc);
5245 		goto req_irq_failed;
5246 	}
5247 
5248 	rc = vio_enable_interrupts(vdev);
5249 	if (rc) {
5250 		dev_err(dev, "Error %d enabling interrupts\n", rc);
5251 		goto req_irq_failed;
5252 	}
5253 
5254 	crq->cur = 0;
5255 	spin_lock_init(&crq->lock);
5256 
5257 	return retrc;
5258 
5259 req_irq_failed:
5260 	tasklet_kill(&adapter->tasklet);
5261 	do {
5262 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5263 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5264 reg_crq_failed:
5265 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5266 map_failed:
5267 	free_page((unsigned long)crq->msgs);
5268 	crq->msgs = NULL;
5269 	return retrc;
5270 }
5271 
5272 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5273 {
5274 	struct device *dev = &adapter->vdev->dev;
5275 	unsigned long timeout = msecs_to_jiffies(20000);
5276 	u64 old_num_rx_queues, old_num_tx_queues;
5277 	int rc;
5278 
5279 	adapter->from_passive_init = false;
5280 
5281 	if (reset) {
5282 		old_num_rx_queues = adapter->req_rx_queues;
5283 		old_num_tx_queues = adapter->req_tx_queues;
5284 		reinit_completion(&adapter->init_done);
5285 	}
5286 
5287 	adapter->init_done_rc = 0;
5288 	rc = ibmvnic_send_crq_init(adapter);
5289 	if (rc) {
5290 		dev_err(dev, "Send crq init failed with error %d\n", rc);
5291 		return rc;
5292 	}
5293 
5294 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5295 		dev_err(dev, "Initialization sequence timed out\n");
5296 		return -1;
5297 	}
5298 
5299 	if (adapter->init_done_rc) {
5300 		release_crq_queue(adapter);
5301 		return adapter->init_done_rc;
5302 	}
5303 
5304 	if (adapter->from_passive_init) {
5305 		adapter->state = VNIC_OPEN;
5306 		adapter->from_passive_init = false;
5307 		return -1;
5308 	}
5309 
5310 	if (reset &&
5311 	    test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5312 	    adapter->reset_reason != VNIC_RESET_MOBILITY) {
5313 		if (adapter->req_rx_queues != old_num_rx_queues ||
5314 		    adapter->req_tx_queues != old_num_tx_queues) {
5315 			release_sub_crqs(adapter, 0);
5316 			rc = init_sub_crqs(adapter);
5317 		} else {
5318 			rc = reset_sub_crq_queues(adapter);
5319 		}
5320 	} else {
5321 		rc = init_sub_crqs(adapter);
5322 	}
5323 
5324 	if (rc) {
5325 		dev_err(dev, "Initialization of sub crqs failed\n");
5326 		release_crq_queue(adapter);
5327 		return rc;
5328 	}
5329 
5330 	rc = init_sub_crq_irqs(adapter);
5331 	if (rc) {
5332 		dev_err(dev, "Failed to initialize sub crq irqs\n");
5333 		release_crq_queue(adapter);
5334 	}
5335 
5336 	return rc;
5337 }
5338 
5339 static struct device_attribute dev_attr_failover;
5340 
5341 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5342 {
5343 	struct ibmvnic_adapter *adapter;
5344 	struct net_device *netdev;
5345 	unsigned char *mac_addr_p;
5346 	int rc;
5347 
5348 	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5349 		dev->unit_address);
5350 
5351 	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5352 							VETH_MAC_ADDR, NULL);
5353 	if (!mac_addr_p) {
5354 		dev_err(&dev->dev,
5355 			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5356 			__FILE__, __LINE__);
5357 		return 0;
5358 	}
5359 
5360 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5361 				   IBMVNIC_MAX_QUEUES);
5362 	if (!netdev)
5363 		return -ENOMEM;
5364 
5365 	adapter = netdev_priv(netdev);
5366 	adapter->state = VNIC_PROBING;
5367 	dev_set_drvdata(&dev->dev, netdev);
5368 	adapter->vdev = dev;
5369 	adapter->netdev = netdev;
5370 	adapter->login_pending = false;
5371 
5372 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
5373 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5374 	netdev->irq = dev->irq;
5375 	netdev->netdev_ops = &ibmvnic_netdev_ops;
5376 	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5377 	SET_NETDEV_DEV(netdev, &dev->dev);
5378 
5379 	spin_lock_init(&adapter->stats_lock);
5380 
5381 	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5382 	INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5383 			  __ibmvnic_delayed_reset);
5384 	INIT_LIST_HEAD(&adapter->rwi_list);
5385 	spin_lock_init(&adapter->rwi_lock);
5386 	spin_lock_init(&adapter->state_lock);
5387 	mutex_init(&adapter->fw_lock);
5388 	init_completion(&adapter->init_done);
5389 	init_completion(&adapter->fw_done);
5390 	init_completion(&adapter->reset_done);
5391 	init_completion(&adapter->stats_done);
5392 	clear_bit(0, &adapter->resetting);
5393 
5394 	do {
5395 		rc = init_crq_queue(adapter);
5396 		if (rc) {
5397 			dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5398 				rc);
5399 			goto ibmvnic_init_fail;
5400 		}
5401 
5402 		rc = ibmvnic_reset_init(adapter, false);
5403 		if (rc && rc != EAGAIN)
5404 			goto ibmvnic_init_fail;
5405 	} while (rc == EAGAIN);
5406 
5407 	rc = init_stats_buffers(adapter);
5408 	if (rc)
5409 		goto ibmvnic_init_fail;
5410 
5411 	rc = init_stats_token(adapter);
5412 	if (rc)
5413 		goto ibmvnic_stats_fail;
5414 
5415 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
5416 	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5417 	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5418 
5419 	rc = device_create_file(&dev->dev, &dev_attr_failover);
5420 	if (rc)
5421 		goto ibmvnic_dev_file_err;
5422 
5423 	netif_carrier_off(netdev);
5424 	rc = register_netdev(netdev);
5425 	if (rc) {
5426 		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5427 		goto ibmvnic_register_fail;
5428 	}
5429 	dev_info(&dev->dev, "ibmvnic registered\n");
5430 
5431 	adapter->state = VNIC_PROBED;
5432 
5433 	adapter->wait_for_reset = false;
5434 	adapter->last_reset_time = jiffies;
5435 	return 0;
5436 
5437 ibmvnic_register_fail:
5438 	device_remove_file(&dev->dev, &dev_attr_failover);
5439 
5440 ibmvnic_dev_file_err:
5441 	release_stats_token(adapter);
5442 
5443 ibmvnic_stats_fail:
5444 	release_stats_buffers(adapter);
5445 
5446 ibmvnic_init_fail:
5447 	release_sub_crqs(adapter, 1);
5448 	release_crq_queue(adapter);
5449 	mutex_destroy(&adapter->fw_lock);
5450 	free_netdev(netdev);
5451 
5452 	return rc;
5453 }
5454 
5455 static int ibmvnic_remove(struct vio_dev *dev)
5456 {
5457 	struct net_device *netdev = dev_get_drvdata(&dev->dev);
5458 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5459 	unsigned long flags;
5460 
5461 	spin_lock_irqsave(&adapter->state_lock, flags);
5462 	adapter->state = VNIC_REMOVING;
5463 	spin_unlock_irqrestore(&adapter->state_lock, flags);
5464 
5465 	flush_work(&adapter->ibmvnic_reset);
5466 	flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5467 
5468 	rtnl_lock();
5469 	unregister_netdevice(netdev);
5470 
5471 	release_resources(adapter);
5472 	release_sub_crqs(adapter, 1);
5473 	release_crq_queue(adapter);
5474 
5475 	release_stats_token(adapter);
5476 	release_stats_buffers(adapter);
5477 
5478 	adapter->state = VNIC_REMOVED;
5479 
5480 	rtnl_unlock();
5481 	mutex_destroy(&adapter->fw_lock);
5482 	device_remove_file(&dev->dev, &dev_attr_failover);
5483 	free_netdev(netdev);
5484 	dev_set_drvdata(&dev->dev, NULL);
5485 
5486 	return 0;
5487 }
5488 
5489 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5490 			      const char *buf, size_t count)
5491 {
5492 	struct net_device *netdev = dev_get_drvdata(dev);
5493 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5494 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5495 	__be64 session_token;
5496 	long rc;
5497 
5498 	if (!sysfs_streq(buf, "1"))
5499 		return -EINVAL;
5500 
5501 	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5502 			 H_GET_SESSION_TOKEN, 0, 0, 0);
5503 	if (rc) {
5504 		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5505 			   rc);
5506 		return -EINVAL;
5507 	}
5508 
5509 	session_token = (__be64)retbuf[0];
5510 	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5511 		   be64_to_cpu(session_token));
5512 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5513 				H_SESSION_ERR_DETECTED, session_token, 0, 0);
5514 	if (rc) {
5515 		netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5516 			   rc);
5517 		return -EINVAL;
5518 	}
5519 
5520 	return count;
5521 }
5522 
5523 static DEVICE_ATTR_WO(failover);
5524 
5525 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5526 {
5527 	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5528 	struct ibmvnic_adapter *adapter;
5529 	struct iommu_table *tbl;
5530 	unsigned long ret = 0;
5531 	int i;
5532 
5533 	tbl = get_iommu_table_base(&vdev->dev);
5534 
5535 	/* netdev inits at probe time along with the structures we need below*/
5536 	if (!netdev)
5537 		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5538 
5539 	adapter = netdev_priv(netdev);
5540 
5541 	ret += PAGE_SIZE; /* the crq message queue */
5542 	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5543 
5544 	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5545 		ret += 4 * PAGE_SIZE; /* the scrq message queue */
5546 
5547 	for (i = 0; i < adapter->num_active_rx_pools; i++)
5548 		ret += adapter->rx_pool[i].size *
5549 		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5550 
5551 	return ret;
5552 }
5553 
5554 static int ibmvnic_resume(struct device *dev)
5555 {
5556 	struct net_device *netdev = dev_get_drvdata(dev);
5557 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5558 
5559 	if (adapter->state != VNIC_OPEN)
5560 		return 0;
5561 
5562 	tasklet_schedule(&adapter->tasklet);
5563 
5564 	return 0;
5565 }
5566 
5567 static const struct vio_device_id ibmvnic_device_table[] = {
5568 	{"network", "IBM,vnic"},
5569 	{"", "" }
5570 };
5571 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5572 
5573 static const struct dev_pm_ops ibmvnic_pm_ops = {
5574 	.resume = ibmvnic_resume
5575 };
5576 
5577 static struct vio_driver ibmvnic_driver = {
5578 	.id_table       = ibmvnic_device_table,
5579 	.probe          = ibmvnic_probe,
5580 	.remove         = ibmvnic_remove,
5581 	.get_desired_dma = ibmvnic_get_desired_dma,
5582 	.name		= ibmvnic_driver_name,
5583 	.pm		= &ibmvnic_pm_ops,
5584 };
5585 
5586 /* module functions */
5587 static int __init ibmvnic_module_init(void)
5588 {
5589 	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5590 		IBMVNIC_DRIVER_VERSION);
5591 
5592 	return vio_register_driver(&ibmvnic_driver);
5593 }
5594 
5595 static void __exit ibmvnic_module_exit(void)
5596 {
5597 	vio_unregister_driver(&ibmvnic_driver);
5598 }
5599 
5600 module_init(ibmvnic_module_init);
5601 module_exit(ibmvnic_module_exit);
5602