xref: /openbmc/linux/drivers/net/ethernet/ibm/ibmvnic.c (revision 8ebc80a25f9d9bf7a8e368b266d5b740c485c362)
1d5bb994bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2032c5e82SThomas Falcon /**************************************************************************/
3032c5e82SThomas Falcon /*                                                                        */
4032c5e82SThomas Falcon /*  IBM System i and System p Virtual NIC Device Driver                   */
5032c5e82SThomas Falcon /*  Copyright (C) 2014 IBM Corp.                                          */
6032c5e82SThomas Falcon /*  Santiago Leon (santi_leon@yahoo.com)                                  */
7032c5e82SThomas Falcon /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
8032c5e82SThomas Falcon /*  John Allen (jallen@linux.vnet.ibm.com)                                */
9032c5e82SThomas Falcon /*                                                                        */
10032c5e82SThomas Falcon /*                                                                        */
11032c5e82SThomas Falcon /* This module contains the implementation of a virtual ethernet device   */
12032c5e82SThomas Falcon /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
13032c5e82SThomas Falcon /* option of the RS/6000 Platform Architecture to interface with virtual  */
14032c5e82SThomas Falcon /* ethernet NICs that are presented to the partition by the hypervisor.   */
15032c5e82SThomas Falcon /*									   */
16032c5e82SThomas Falcon /* Messages are passed between the VNIC driver and the VNIC server using  */
17032c5e82SThomas Falcon /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
18032c5e82SThomas Falcon /* issue and receive commands that initiate communication with the server */
19032c5e82SThomas Falcon /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
20032c5e82SThomas Falcon /* are used by the driver to notify the server that a packet is           */
21032c5e82SThomas Falcon /* ready for transmission or that a buffer has been added to receive a    */
22032c5e82SThomas Falcon /* packet. Subsequently, sCRQs are used by the server to notify the       */
23032c5e82SThomas Falcon /* driver that a packet transmission has been completed or that a packet  */
24032c5e82SThomas Falcon /* has been received and placed in a waiting buffer.                      */
25032c5e82SThomas Falcon /*                                                                        */
26032c5e82SThomas Falcon /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
27032c5e82SThomas Falcon /* which skbs are DMA mapped and immediately unmapped when the transmit   */
28032c5e82SThomas Falcon /* or receive has been completed, the VNIC driver is required to use      */
29032c5e82SThomas Falcon /* "long term mapping". This entails that large, continuous DMA mapped    */
30032c5e82SThomas Falcon /* buffers are allocated on driver initialization and these buffers are   */
31032c5e82SThomas Falcon /* then continuously reused to pass skbs to and from the VNIC server.     */
32032c5e82SThomas Falcon /*                                                                        */
33032c5e82SThomas Falcon /**************************************************************************/
34032c5e82SThomas Falcon 
35032c5e82SThomas Falcon #include <linux/module.h>
36032c5e82SThomas Falcon #include <linux/moduleparam.h>
37032c5e82SThomas Falcon #include <linux/types.h>
38032c5e82SThomas Falcon #include <linux/errno.h>
39032c5e82SThomas Falcon #include <linux/completion.h>
40032c5e82SThomas Falcon #include <linux/ioport.h>
41032c5e82SThomas Falcon #include <linux/dma-mapping.h>
42032c5e82SThomas Falcon #include <linux/kernel.h>
43032c5e82SThomas Falcon #include <linux/netdevice.h>
44032c5e82SThomas Falcon #include <linux/etherdevice.h>
45032c5e82SThomas Falcon #include <linux/skbuff.h>
46032c5e82SThomas Falcon #include <linux/init.h>
47032c5e82SThomas Falcon #include <linux/delay.h>
48032c5e82SThomas Falcon #include <linux/mm.h>
49032c5e82SThomas Falcon #include <linux/ethtool.h>
50032c5e82SThomas Falcon #include <linux/proc_fs.h>
514eb50cebSThomas Falcon #include <linux/if_arp.h>
52032c5e82SThomas Falcon #include <linux/in.h>
53032c5e82SThomas Falcon #include <linux/ip.h>
54ad7775dcSThomas Falcon #include <linux/ipv6.h>
55032c5e82SThomas Falcon #include <linux/irq.h>
566bff3ffcSChristophe Leroy #include <linux/irqdomain.h>
57032c5e82SThomas Falcon #include <linux/kthread.h>
58032c5e82SThomas Falcon #include <linux/seq_file.h>
59032c5e82SThomas Falcon #include <linux/interrupt.h>
60032c5e82SThomas Falcon #include <net/net_namespace.h>
61032c5e82SThomas Falcon #include <asm/hvcall.h>
62032c5e82SThomas Falcon #include <linux/atomic.h>
63032c5e82SThomas Falcon #include <asm/vio.h>
647ea0c16aSCédric Le Goater #include <asm/xive.h>
65032c5e82SThomas Falcon #include <asm/iommu.h>
66032c5e82SThomas Falcon #include <linux/uaccess.h>
67032c5e82SThomas Falcon #include <asm/firmware.h>
6865dc6891SThomas Falcon #include <linux/workqueue.h>
696052d5e2SMurilo Fossa Vicentini #include <linux/if_vlan.h>
7037798d02SNathan Fontenot #include <linux/utsname.h>
7144fbc1b6SNick Child #include <linux/cpu.h>
72032c5e82SThomas Falcon 
73032c5e82SThomas Falcon #include "ibmvnic.h"
74032c5e82SThomas Falcon 
75032c5e82SThomas Falcon static const char ibmvnic_driver_name[] = "ibmvnic";
76032c5e82SThomas Falcon static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
77032c5e82SThomas Falcon 
7878b07ac1SThomas Falcon MODULE_AUTHOR("Santiago Leon");
79032c5e82SThomas Falcon MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
80032c5e82SThomas Falcon MODULE_LICENSE("GPL");
81032c5e82SThomas Falcon MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
82032c5e82SThomas Falcon 
83032c5e82SThomas Falcon static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
84d7c0ef36SNathan Fontenot static void release_sub_crqs(struct ibmvnic_adapter *, bool);
85032c5e82SThomas Falcon static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
86032c5e82SThomas Falcon static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
87032c5e82SThomas Falcon static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
88032c5e82SThomas Falcon static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
89ad7775dcSThomas Falcon static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90032c5e82SThomas Falcon static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91032c5e82SThomas Falcon static int enable_scrq_irq(struct ibmvnic_adapter *,
92032c5e82SThomas Falcon 			   struct ibmvnic_sub_crq_queue *);
93032c5e82SThomas Falcon static int disable_scrq_irq(struct ibmvnic_adapter *,
94032c5e82SThomas Falcon 			    struct ibmvnic_sub_crq_queue *);
95032c5e82SThomas Falcon static int pending_scrq(struct ibmvnic_adapter *,
96032c5e82SThomas Falcon 			struct ibmvnic_sub_crq_queue *);
97032c5e82SThomas Falcon static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98032c5e82SThomas Falcon 					struct ibmvnic_sub_crq_queue *);
99032c5e82SThomas Falcon static int ibmvnic_poll(struct napi_struct *napi, int data);
10023cc5f66SNick Child static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter);
10123cc5f66SNick Child static inline void reinit_init_done(struct ibmvnic_adapter *adapter);
10269980d02SLijun Pan static void send_query_map(struct ibmvnic_adapter *adapter);
103673ead24SLijun Pan static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
1049c4eaabdSThomas Falcon static int send_request_unmap(struct ibmvnic_adapter *, u8);
10520a8ab74SThomas Falcon static int send_login(struct ibmvnic_adapter *adapter);
106491099adSLijun Pan static void send_query_cap(struct ibmvnic_adapter *adapter);
1074d96f12aSThomas Falcon static int init_sub_crqs(struct ibmvnic_adapter *);
108bd0b6723SJohn Allen static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
109635e442fSLijun Pan static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
110f992887cSNathan Fontenot static void release_crq_queue(struct ibmvnic_adapter *);
11162740e97SThomas Falcon static int __ibmvnic_set_mac(struct net_device *, u8 *);
11230f79625SNathan Fontenot static int init_crq_queue(struct ibmvnic_adapter *adapter);
113f8d6ae0dSMurilo Fossa Vicentini static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
11465d6470dSSukadev Bhattiprolu static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
11565d6470dSSukadev Bhattiprolu 					 struct ibmvnic_sub_crq_queue *tx_scrq);
116f8ac0bfaSSukadev Bhattiprolu static void free_long_term_buff(struct ibmvnic_adapter *adapter,
117f8ac0bfaSSukadev Bhattiprolu 				struct ibmvnic_long_term_buff *ltb);
11861772b09SSukadev Bhattiprolu static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
1196db541aeSNick Child static void flush_reset_queue(struct ibmvnic_adapter *adapter);
1204b1555f9SNick Child static void print_subcrq_error(struct device *dev, int rc, const char *func);
121032c5e82SThomas Falcon 
122032c5e82SThomas Falcon struct ibmvnic_stat {
123032c5e82SThomas Falcon 	char name[ETH_GSTRING_LEN];
124032c5e82SThomas Falcon 	int offset;
125032c5e82SThomas Falcon };
126032c5e82SThomas Falcon 
127032c5e82SThomas Falcon #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
128032c5e82SThomas Falcon 			     offsetof(struct ibmvnic_statistics, stat))
12991dc5d25SLijun Pan #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
130032c5e82SThomas Falcon 
131032c5e82SThomas Falcon static const struct ibmvnic_stat ibmvnic_stats[] = {
132032c5e82SThomas Falcon 	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
133032c5e82SThomas Falcon 	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
134032c5e82SThomas Falcon 	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
135032c5e82SThomas Falcon 	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
136032c5e82SThomas Falcon 	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
137032c5e82SThomas Falcon 	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
138032c5e82SThomas Falcon 	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
139032c5e82SThomas Falcon 	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
140032c5e82SThomas Falcon 	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
141032c5e82SThomas Falcon 	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
142032c5e82SThomas Falcon 	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
143032c5e82SThomas Falcon 	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
144032c5e82SThomas Falcon 	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
145032c5e82SThomas Falcon 	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
146032c5e82SThomas Falcon 	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
147032c5e82SThomas Falcon 	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
148032c5e82SThomas Falcon 	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
149032c5e82SThomas Falcon 	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
150032c5e82SThomas Falcon 	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
151032c5e82SThomas Falcon 	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
152032c5e82SThomas Falcon 	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
153032c5e82SThomas Falcon 	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
154032c5e82SThomas Falcon };
155032c5e82SThomas Falcon 
send_crq_init_complete(struct ibmvnic_adapter * adapter)15653f8b1b2SCristobal Forno static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
15753f8b1b2SCristobal Forno {
15853f8b1b2SCristobal Forno 	union ibmvnic_crq crq;
15953f8b1b2SCristobal Forno 
16053f8b1b2SCristobal Forno 	memset(&crq, 0, sizeof(crq));
16153f8b1b2SCristobal Forno 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
16253f8b1b2SCristobal Forno 	crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
16353f8b1b2SCristobal Forno 
16453f8b1b2SCristobal Forno 	return ibmvnic_send_crq(adapter, &crq);
16553f8b1b2SCristobal Forno }
16653f8b1b2SCristobal Forno 
send_version_xchg(struct ibmvnic_adapter * adapter)16753f8b1b2SCristobal Forno static int send_version_xchg(struct ibmvnic_adapter *adapter)
16853f8b1b2SCristobal Forno {
16953f8b1b2SCristobal Forno 	union ibmvnic_crq crq;
17053f8b1b2SCristobal Forno 
17153f8b1b2SCristobal Forno 	memset(&crq, 0, sizeof(crq));
17253f8b1b2SCristobal Forno 	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
17353f8b1b2SCristobal Forno 	crq.version_exchange.cmd = VERSION_EXCHANGE;
17453f8b1b2SCristobal Forno 	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
17553f8b1b2SCristobal Forno 
17653f8b1b2SCristobal Forno 	return ibmvnic_send_crq(adapter, &crq);
17753f8b1b2SCristobal Forno }
17853f8b1b2SCristobal Forno 
ibmvnic_clean_queue_affinity(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * queue)17944fbc1b6SNick Child static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter,
18044fbc1b6SNick Child 					 struct ibmvnic_sub_crq_queue *queue)
18144fbc1b6SNick Child {
18244fbc1b6SNick Child 	if (!(queue && queue->irq))
18344fbc1b6SNick Child 		return;
18444fbc1b6SNick Child 
18544fbc1b6SNick Child 	cpumask_clear(queue->affinity_mask);
18644fbc1b6SNick Child 
18744fbc1b6SNick Child 	if (irq_set_affinity_and_hint(queue->irq, NULL))
18844fbc1b6SNick Child 		netdev_warn(adapter->netdev,
18944fbc1b6SNick Child 			    "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n",
19044fbc1b6SNick Child 			    __func__, queue, queue->irq);
19144fbc1b6SNick Child }
19244fbc1b6SNick Child 
ibmvnic_clean_affinity(struct ibmvnic_adapter * adapter)19344fbc1b6SNick Child static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
19444fbc1b6SNick Child {
19544fbc1b6SNick Child 	struct ibmvnic_sub_crq_queue **rxqs;
19644fbc1b6SNick Child 	struct ibmvnic_sub_crq_queue **txqs;
19744fbc1b6SNick Child 	int num_rxqs, num_txqs;
198813f3662SYu Liao 	int i;
19944fbc1b6SNick Child 
20044fbc1b6SNick Child 	rxqs = adapter->rx_scrq;
20144fbc1b6SNick Child 	txqs = adapter->tx_scrq;
20244fbc1b6SNick Child 	num_txqs = adapter->num_active_tx_scrqs;
20344fbc1b6SNick Child 	num_rxqs = adapter->num_active_rx_scrqs;
20444fbc1b6SNick Child 
20544fbc1b6SNick Child 	netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__);
20644fbc1b6SNick Child 	if (txqs) {
20744fbc1b6SNick Child 		for (i = 0; i < num_txqs; i++)
20844fbc1b6SNick Child 			ibmvnic_clean_queue_affinity(adapter, txqs[i]);
20944fbc1b6SNick Child 	}
21044fbc1b6SNick Child 	if (rxqs) {
21144fbc1b6SNick Child 		for (i = 0; i < num_rxqs; i++)
21244fbc1b6SNick Child 			ibmvnic_clean_queue_affinity(adapter, rxqs[i]);
21344fbc1b6SNick Child 	}
21444fbc1b6SNick Child }
21544fbc1b6SNick Child 
ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue * queue,unsigned int * cpu,int * stragglers,int stride)21644fbc1b6SNick Child static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
21744fbc1b6SNick Child 				      unsigned int *cpu, int *stragglers,
21844fbc1b6SNick Child 				      int stride)
21944fbc1b6SNick Child {
22044fbc1b6SNick Child 	cpumask_var_t mask;
22144fbc1b6SNick Child 	int i;
22244fbc1b6SNick Child 	int rc = 0;
22344fbc1b6SNick Child 
22444fbc1b6SNick Child 	if (!(queue && queue->irq))
22544fbc1b6SNick Child 		return rc;
22644fbc1b6SNick Child 
22744fbc1b6SNick Child 	/* cpumask_var_t is either a pointer or array, allocation works here */
22844fbc1b6SNick Child 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
22944fbc1b6SNick Child 		return -ENOMEM;
23044fbc1b6SNick Child 
23144fbc1b6SNick Child 	/* while we have extra cpu give one extra to this irq */
23244fbc1b6SNick Child 	if (*stragglers) {
23344fbc1b6SNick Child 		stride++;
23444fbc1b6SNick Child 		(*stragglers)--;
23544fbc1b6SNick Child 	}
23644fbc1b6SNick Child 	/* atomic write is safer than writing bit by bit directly */
23744fbc1b6SNick Child 	for (i = 0; i < stride; i++) {
23844fbc1b6SNick Child 		cpumask_set_cpu(*cpu, mask);
23944fbc1b6SNick Child 		*cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
24044fbc1b6SNick Child 					 nr_cpu_ids, false);
24144fbc1b6SNick Child 	}
24244fbc1b6SNick Child 	/* set queue affinity mask */
24344fbc1b6SNick Child 	cpumask_copy(queue->affinity_mask, mask);
24444fbc1b6SNick Child 	rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
24544fbc1b6SNick Child 	free_cpumask_var(mask);
24644fbc1b6SNick Child 
24744fbc1b6SNick Child 	return rc;
24844fbc1b6SNick Child }
24944fbc1b6SNick Child 
25044fbc1b6SNick Child /* assumes cpu read lock is held */
ibmvnic_set_affinity(struct ibmvnic_adapter * adapter)25144fbc1b6SNick Child static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
25244fbc1b6SNick Child {
25344fbc1b6SNick Child 	struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
25444fbc1b6SNick Child 	struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
25544fbc1b6SNick Child 	struct ibmvnic_sub_crq_queue *queue;
25668315829SNick Child 	int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
25768315829SNick Child 	int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
25844fbc1b6SNick Child 	int total_queues, stride, stragglers, i;
25944fbc1b6SNick Child 	unsigned int num_cpu, cpu;
26068315829SNick Child 	bool is_rx_queue;
26144fbc1b6SNick Child 	int rc = 0;
26244fbc1b6SNick Child 
26344fbc1b6SNick Child 	netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
26444fbc1b6SNick Child 	if (!(adapter->rx_scrq && adapter->tx_scrq)) {
26544fbc1b6SNick Child 		netdev_warn(adapter->netdev,
26644fbc1b6SNick Child 			    "%s: Set affinity failed, queues not allocated\n",
26744fbc1b6SNick Child 			    __func__);
26844fbc1b6SNick Child 		return;
26944fbc1b6SNick Child 	}
27044fbc1b6SNick Child 
27144fbc1b6SNick Child 	total_queues = num_rxqs + num_txqs;
27244fbc1b6SNick Child 	num_cpu = num_online_cpus();
27344fbc1b6SNick Child 	/* number of cpu's assigned per irq */
27444fbc1b6SNick Child 	stride = max_t(int, num_cpu / total_queues, 1);
27544fbc1b6SNick Child 	/* number of leftover cpu's */
27644fbc1b6SNick Child 	stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
27744fbc1b6SNick Child 	/* next available cpu to assign irq to */
27844fbc1b6SNick Child 	cpu = cpumask_next(-1, cpu_online_mask);
27944fbc1b6SNick Child 
28068315829SNick Child 	for (i = 0; i < total_queues; i++) {
28168315829SNick Child 		is_rx_queue = false;
28268315829SNick Child 		/* balance core load by alternating rx and tx assignments
28368315829SNick Child 		 * ex: TX0 -> RX0 -> TX1 -> RX1 etc.
28468315829SNick Child 		 */
28568315829SNick Child 		if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) {
28668315829SNick Child 			queue = rxqs[i_rxqs++];
28768315829SNick Child 			is_rx_queue = true;
28868315829SNick Child 		} else {
28968315829SNick Child 			queue = txqs[i_txqs++];
29068315829SNick Child 		}
29168315829SNick Child 
29244fbc1b6SNick Child 		rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
29344fbc1b6SNick Child 						stride);
29444fbc1b6SNick Child 		if (rc)
29544fbc1b6SNick Child 			goto out;
296df8f66d0SNick Child 
29768315829SNick Child 		if (!queue || is_rx_queue)
298df8f66d0SNick Child 			continue;
299df8f66d0SNick Child 
300df8f66d0SNick Child 		rc = __netif_set_xps_queue(adapter->netdev,
301df8f66d0SNick Child 					   cpumask_bits(queue->affinity_mask),
3026f2ce45fSNick Child 					   i_txqs - 1, XPS_CPUS);
303df8f66d0SNick Child 		if (rc)
304df8f66d0SNick Child 			netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n",
3056f2ce45fSNick Child 				    __func__, i_txqs - 1, rc);
30644fbc1b6SNick Child 	}
30744fbc1b6SNick Child 
30844fbc1b6SNick Child out:
30944fbc1b6SNick Child 	if (rc) {
31044fbc1b6SNick Child 		netdev_warn(adapter->netdev,
31144fbc1b6SNick Child 			    "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n",
31244fbc1b6SNick Child 			    __func__, queue, queue->irq, rc);
31344fbc1b6SNick Child 		ibmvnic_clean_affinity(adapter);
31444fbc1b6SNick Child 	}
31544fbc1b6SNick Child }
31644fbc1b6SNick Child 
ibmvnic_cpu_online(unsigned int cpu,struct hlist_node * node)31792125c3aSNick Child static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node)
31892125c3aSNick Child {
31992125c3aSNick Child 	struct ibmvnic_adapter *adapter;
32092125c3aSNick Child 
32192125c3aSNick Child 	adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
32292125c3aSNick Child 	ibmvnic_set_affinity(adapter);
32392125c3aSNick Child 	return 0;
32492125c3aSNick Child }
32592125c3aSNick Child 
ibmvnic_cpu_dead(unsigned int cpu,struct hlist_node * node)32692125c3aSNick Child static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node)
32792125c3aSNick Child {
32892125c3aSNick Child 	struct ibmvnic_adapter *adapter;
32992125c3aSNick Child 
33092125c3aSNick Child 	adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead);
33192125c3aSNick Child 	ibmvnic_set_affinity(adapter);
33292125c3aSNick Child 	return 0;
33392125c3aSNick Child }
33492125c3aSNick Child 
ibmvnic_cpu_down_prep(unsigned int cpu,struct hlist_node * node)33592125c3aSNick Child static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
33692125c3aSNick Child {
33792125c3aSNick Child 	struct ibmvnic_adapter *adapter;
33892125c3aSNick Child 
33992125c3aSNick Child 	adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
34092125c3aSNick Child 	ibmvnic_clean_affinity(adapter);
34192125c3aSNick Child 	return 0;
34292125c3aSNick Child }
34392125c3aSNick Child 
34492125c3aSNick Child static enum cpuhp_state ibmvnic_online;
34592125c3aSNick Child 
ibmvnic_cpu_notif_add(struct ibmvnic_adapter * adapter)34692125c3aSNick Child static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter)
34792125c3aSNick Child {
34892125c3aSNick Child 	int ret;
34992125c3aSNick Child 
35092125c3aSNick Child 	ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node);
35192125c3aSNick Child 	if (ret)
35292125c3aSNick Child 		return ret;
35392125c3aSNick Child 	ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD,
35492125c3aSNick Child 					       &adapter->node_dead);
35592125c3aSNick Child 	if (!ret)
35692125c3aSNick Child 		return ret;
35792125c3aSNick Child 	cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
35892125c3aSNick Child 	return ret;
35992125c3aSNick Child }
36092125c3aSNick Child 
ibmvnic_cpu_notif_remove(struct ibmvnic_adapter * adapter)36192125c3aSNick Child static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter)
36292125c3aSNick Child {
36392125c3aSNick Child 	cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
36492125c3aSNick Child 	cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD,
36592125c3aSNick Child 					    &adapter->node_dead);
36692125c3aSNick Child }
36792125c3aSNick Child 
h_reg_sub_crq(unsigned long unit_address,unsigned long token,unsigned long length,unsigned long * number,unsigned long * irq)368032c5e82SThomas Falcon static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
369032c5e82SThomas Falcon 			  unsigned long length, unsigned long *number,
370032c5e82SThomas Falcon 			  unsigned long *irq)
371032c5e82SThomas Falcon {
372032c5e82SThomas Falcon 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
373032c5e82SThomas Falcon 	long rc;
374032c5e82SThomas Falcon 
375032c5e82SThomas Falcon 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
376032c5e82SThomas Falcon 	*number = retbuf[0];
377032c5e82SThomas Falcon 	*irq = retbuf[1];
378032c5e82SThomas Falcon 
379032c5e82SThomas Falcon 	return rc;
380032c5e82SThomas Falcon }
381032c5e82SThomas Falcon 
382476d96caSThomas Falcon /**
383476d96caSThomas Falcon  * ibmvnic_wait_for_completion - Check device state and wait for completion
384476d96caSThomas Falcon  * @adapter: private device data
385476d96caSThomas Falcon  * @comp_done: completion structure to wait for
386476d96caSThomas Falcon  * @timeout: time to wait in milliseconds
387476d96caSThomas Falcon  *
388476d96caSThomas Falcon  * Wait for a completion signal or until the timeout limit is reached
389476d96caSThomas Falcon  * while checking that the device is still active.
390476d96caSThomas Falcon  */
ibmvnic_wait_for_completion(struct ibmvnic_adapter * adapter,struct completion * comp_done,unsigned long timeout)391476d96caSThomas Falcon static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
392476d96caSThomas Falcon 				       struct completion *comp_done,
393476d96caSThomas Falcon 				       unsigned long timeout)
394476d96caSThomas Falcon {
395476d96caSThomas Falcon 	struct net_device *netdev;
396476d96caSThomas Falcon 	unsigned long div_timeout;
397476d96caSThomas Falcon 	u8 retry;
398476d96caSThomas Falcon 
399476d96caSThomas Falcon 	netdev = adapter->netdev;
400476d96caSThomas Falcon 	retry = 5;
401476d96caSThomas Falcon 	div_timeout = msecs_to_jiffies(timeout / retry);
402476d96caSThomas Falcon 	while (true) {
403476d96caSThomas Falcon 		if (!adapter->crq.active) {
404476d96caSThomas Falcon 			netdev_err(netdev, "Device down!\n");
405476d96caSThomas Falcon 			return -ENODEV;
406476d96caSThomas Falcon 		}
4078f9cc1eeSThomas Falcon 		if (!retry--)
408476d96caSThomas Falcon 			break;
409476d96caSThomas Falcon 		if (wait_for_completion_timeout(comp_done, div_timeout))
410476d96caSThomas Falcon 			return 0;
411476d96caSThomas Falcon 	}
412476d96caSThomas Falcon 	netdev_err(netdev, "Operation timed out.\n");
413476d96caSThomas Falcon 	return -ETIMEDOUT;
414476d96caSThomas Falcon }
415476d96caSThomas Falcon 
416f8ac0bfaSSukadev Bhattiprolu /**
417f8ac0bfaSSukadev Bhattiprolu  * reuse_ltb() - Check if a long term buffer can be reused
418f8ac0bfaSSukadev Bhattiprolu  * @ltb:  The long term buffer to be checked
419f8ac0bfaSSukadev Bhattiprolu  * @size: The size of the long term buffer.
420f8ac0bfaSSukadev Bhattiprolu  *
421f8ac0bfaSSukadev Bhattiprolu  * An LTB can be reused unless its size has changed.
422f8ac0bfaSSukadev Bhattiprolu  *
423f8ac0bfaSSukadev Bhattiprolu  * Return: Return true if the LTB can be reused, false otherwise.
424f8ac0bfaSSukadev Bhattiprolu  */
reuse_ltb(struct ibmvnic_long_term_buff * ltb,int size)425f8ac0bfaSSukadev Bhattiprolu static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
426f8ac0bfaSSukadev Bhattiprolu {
427f8ac0bfaSSukadev Bhattiprolu 	return (ltb->buff && ltb->size == size);
428f8ac0bfaSSukadev Bhattiprolu }
429f8ac0bfaSSukadev Bhattiprolu 
430f8ac0bfaSSukadev Bhattiprolu /**
431f8ac0bfaSSukadev Bhattiprolu  * alloc_long_term_buff() - Allocate a long term buffer (LTB)
432f8ac0bfaSSukadev Bhattiprolu  *
433f8ac0bfaSSukadev Bhattiprolu  * @adapter: ibmvnic adapter associated to the LTB
434f8ac0bfaSSukadev Bhattiprolu  * @ltb:     container object for the LTB
435f8ac0bfaSSukadev Bhattiprolu  * @size:    size of the LTB
436f8ac0bfaSSukadev Bhattiprolu  *
437f8ac0bfaSSukadev Bhattiprolu  * Allocate an LTB of the specified size and notify VIOS.
438f8ac0bfaSSukadev Bhattiprolu  *
439f8ac0bfaSSukadev Bhattiprolu  * If the given @ltb already has the correct size, reuse it. Otherwise if
440f8ac0bfaSSukadev Bhattiprolu  * its non-NULL, free it. Then allocate a new one of the correct size.
441f8ac0bfaSSukadev Bhattiprolu  * Notify the VIOS either way since we may now be working with a new VIOS.
442f8ac0bfaSSukadev Bhattiprolu  *
443f8ac0bfaSSukadev Bhattiprolu  * Allocating larger chunks of memory during resets, specially LPM or under
444f8ac0bfaSSukadev Bhattiprolu  * low memory situations can cause resets to fail/timeout and for LPAR to
445f8ac0bfaSSukadev Bhattiprolu  * lose connectivity. So hold onto the LTB even if we fail to communicate
446f8ac0bfaSSukadev Bhattiprolu  * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
447f8ac0bfaSSukadev Bhattiprolu  *
448f8ac0bfaSSukadev Bhattiprolu  * Return: 0 if we were able to allocate the LTB and notify the VIOS and
449f8ac0bfaSSukadev Bhattiprolu  *	   a negative value otherwise.
450f8ac0bfaSSukadev Bhattiprolu  */
alloc_long_term_buff(struct ibmvnic_adapter * adapter,struct ibmvnic_long_term_buff * ltb,int size)451032c5e82SThomas Falcon static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
452032c5e82SThomas Falcon 				struct ibmvnic_long_term_buff *ltb, int size)
453032c5e82SThomas Falcon {
454032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
45593b1ebb3SSukadev Bhattiprolu 	u64 prev = 0;
4569c4eaabdSThomas Falcon 	int rc;
457032c5e82SThomas Falcon 
458f8ac0bfaSSukadev Bhattiprolu 	if (!reuse_ltb(ltb, size)) {
459f8ac0bfaSSukadev Bhattiprolu 		dev_dbg(dev,
460f8ac0bfaSSukadev Bhattiprolu 			"LTB size changed from 0x%llx to 0x%x, reallocating\n",
461f8ac0bfaSSukadev Bhattiprolu 			 ltb->size, size);
46293b1ebb3SSukadev Bhattiprolu 		prev = ltb->size;
463f8ac0bfaSSukadev Bhattiprolu 		free_long_term_buff(adapter, ltb);
464f8ac0bfaSSukadev Bhattiprolu 	}
465032c5e82SThomas Falcon 
466f8ac0bfaSSukadev Bhattiprolu 	if (ltb->buff) {
467f8ac0bfaSSukadev Bhattiprolu 		dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
468f8ac0bfaSSukadev Bhattiprolu 			ltb->map_id, ltb->size);
469f8ac0bfaSSukadev Bhattiprolu 	} else {
470f8ac0bfaSSukadev Bhattiprolu 		ltb->buff = dma_alloc_coherent(dev, size, &ltb->addr,
471f8ac0bfaSSukadev Bhattiprolu 					       GFP_KERNEL);
472032c5e82SThomas Falcon 		if (!ltb->buff) {
473032c5e82SThomas Falcon 			dev_err(dev, "Couldn't alloc long term buffer\n");
474032c5e82SThomas Falcon 			return -ENOMEM;
475032c5e82SThomas Falcon 		}
476f8ac0bfaSSukadev Bhattiprolu 		ltb->size = size;
477f8ac0bfaSSukadev Bhattiprolu 
478129854f0SSukadev Bhattiprolu 		ltb->map_id = find_first_zero_bit(adapter->map_ids,
479129854f0SSukadev Bhattiprolu 						  MAX_MAP_ID);
480129854f0SSukadev Bhattiprolu 		bitmap_set(adapter->map_ids, ltb->map_id, 1);
481db5d0b59SNathan Fontenot 
482f8ac0bfaSSukadev Bhattiprolu 		dev_dbg(dev,
48393b1ebb3SSukadev Bhattiprolu 			"Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
48493b1ebb3SSukadev Bhattiprolu 			 ltb->map_id, ltb->size, prev);
485f8ac0bfaSSukadev Bhattiprolu 	}
486f8ac0bfaSSukadev Bhattiprolu 
487f8ac0bfaSSukadev Bhattiprolu 	/* Ensure ltb is zeroed - specially when reusing it. */
488f8ac0bfaSSukadev Bhattiprolu 	memset(ltb->buff, 0, ltb->size);
489f8ac0bfaSSukadev Bhattiprolu 
490ff25dcb9SThomas Falcon 	mutex_lock(&adapter->fw_lock);
491ff25dcb9SThomas Falcon 	adapter->fw_done_rc = 0;
492070eca95SThomas Falcon 	reinit_completion(&adapter->fw_done);
493552a3372SSukadev Bhattiprolu 
494552a3372SSukadev Bhattiprolu 	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
4959c4eaabdSThomas Falcon 	if (rc) {
496552a3372SSukadev Bhattiprolu 		dev_err(dev, "send_request_map failed, rc = %d\n", rc);
497552a3372SSukadev Bhattiprolu 		goto out;
4989c4eaabdSThomas Falcon 	}
499476d96caSThomas Falcon 
500476d96caSThomas Falcon 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
501476d96caSThomas Falcon 	if (rc) {
5020f2bf318SSukadev Bhattiprolu 		dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
503476d96caSThomas Falcon 			rc);
504552a3372SSukadev Bhattiprolu 		goto out;
505476d96caSThomas Falcon 	}
506f3be0cbcSThomas Falcon 
507f3be0cbcSThomas Falcon 	if (adapter->fw_done_rc) {
5080f2bf318SSukadev Bhattiprolu 		dev_err(dev, "Couldn't map LTB, rc = %d\n",
509f3be0cbcSThomas Falcon 			adapter->fw_done_rc);
510b6ee566cSDany Madden 		rc = -EIO;
511552a3372SSukadev Bhattiprolu 		goto out;
512552a3372SSukadev Bhattiprolu 	}
513552a3372SSukadev Bhattiprolu 	rc = 0;
514552a3372SSukadev Bhattiprolu out:
515f8ac0bfaSSukadev Bhattiprolu 	/* don't free LTB on communication error - see function header */
516ff25dcb9SThomas Falcon 	mutex_unlock(&adapter->fw_lock);
517552a3372SSukadev Bhattiprolu 	return rc;
518032c5e82SThomas Falcon }
519032c5e82SThomas Falcon 
free_long_term_buff(struct ibmvnic_adapter * adapter,struct ibmvnic_long_term_buff * ltb)520032c5e82SThomas Falcon static void free_long_term_buff(struct ibmvnic_adapter *adapter,
521032c5e82SThomas Falcon 				struct ibmvnic_long_term_buff *ltb)
522032c5e82SThomas Falcon {
523032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
524032c5e82SThomas Falcon 
525c657e32cSNathan Fontenot 	if (!ltb->buff)
526c657e32cSNathan Fontenot 		return;
527c657e32cSNathan Fontenot 
5287d3a7b9eSLijun Pan 	/* VIOS automatically unmaps the long term buffer at remote
5297d3a7b9eSLijun Pan 	 * end for the following resets:
5307d3a7b9eSLijun Pan 	 * FAILOVER, MOBILITY, TIMEOUT.
5317d3a7b9eSLijun Pan 	 */
532ed651a10SNathan Fontenot 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
5337d3a7b9eSLijun Pan 	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
5347d3a7b9eSLijun Pan 	    adapter->reset_reason != VNIC_RESET_TIMEOUT)
535032c5e82SThomas Falcon 		send_request_unmap(adapter, ltb->map_id);
5360f2bf318SSukadev Bhattiprolu 
53759af56c2SBrian King 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
5380f2bf318SSukadev Bhattiprolu 
539552a3372SSukadev Bhattiprolu 	ltb->buff = NULL;
540129854f0SSukadev Bhattiprolu 	/* mark this map_id free */
541129854f0SSukadev Bhattiprolu 	bitmap_clear(adapter->map_ids, ltb->map_id, 1);
542552a3372SSukadev Bhattiprolu 	ltb->map_id = 0;
543032c5e82SThomas Falcon }
544032c5e82SThomas Falcon 
545a75de820SSukadev Bhattiprolu /**
546a75de820SSukadev Bhattiprolu  * free_ltb_set - free the given set of long term buffers (LTBS)
547a75de820SSukadev Bhattiprolu  * @adapter: The ibmvnic adapter containing this ltb set
548a75de820SSukadev Bhattiprolu  * @ltb_set: The ltb_set to be freed
549a75de820SSukadev Bhattiprolu  *
550a75de820SSukadev Bhattiprolu  * Free the set of LTBs in the given set.
551a75de820SSukadev Bhattiprolu  */
552a75de820SSukadev Bhattiprolu 
free_ltb_set(struct ibmvnic_adapter * adapter,struct ibmvnic_ltb_set * ltb_set)553d6b45850SSukadev Bhattiprolu static void free_ltb_set(struct ibmvnic_adapter *adapter,
554d6b45850SSukadev Bhattiprolu 			 struct ibmvnic_ltb_set *ltb_set)
555d6b45850SSukadev Bhattiprolu {
556d6b45850SSukadev Bhattiprolu 	int i;
557d6b45850SSukadev Bhattiprolu 
558d6b45850SSukadev Bhattiprolu 	for (i = 0; i < ltb_set->num_ltbs; i++)
559d6b45850SSukadev Bhattiprolu 		free_long_term_buff(adapter, &ltb_set->ltbs[i]);
560d6b45850SSukadev Bhattiprolu 
561d6b45850SSukadev Bhattiprolu 	kfree(ltb_set->ltbs);
562d6b45850SSukadev Bhattiprolu 	ltb_set->ltbs = NULL;
563d6b45850SSukadev Bhattiprolu 	ltb_set->num_ltbs = 0;
564d6b45850SSukadev Bhattiprolu }
565d6b45850SSukadev Bhattiprolu 
566a75de820SSukadev Bhattiprolu /**
567a75de820SSukadev Bhattiprolu  * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
568a75de820SSukadev Bhattiprolu  *
569a75de820SSukadev Bhattiprolu  * @adapter: ibmvnic adapter associated to the LTB
570a75de820SSukadev Bhattiprolu  * @ltb_set: container object for the set of LTBs
571a75de820SSukadev Bhattiprolu  * @num_buffs: Number of buffers in the LTB
572a75de820SSukadev Bhattiprolu  * @buff_size: Size of each buffer in the LTB
573a75de820SSukadev Bhattiprolu  *
574a75de820SSukadev Bhattiprolu  * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
575a75de820SSukadev Bhattiprolu  * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
576a75de820SSukadev Bhattiprolu  * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
577a75de820SSukadev Bhattiprolu  * If new set needs more than in old set, allocate the remaining ones.
578a75de820SSukadev Bhattiprolu  * Try and reuse as many LTBs as possible and avoid reallocation.
579a75de820SSukadev Bhattiprolu  *
580a75de820SSukadev Bhattiprolu  * Any changes to this allocation strategy must be reflected in
581a75de820SSukadev Bhattiprolu  * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
582a75de820SSukadev Bhattiprolu  */
alloc_ltb_set(struct ibmvnic_adapter * adapter,struct ibmvnic_ltb_set * ltb_set,int num_buffs,int buff_size)583d6b45850SSukadev Bhattiprolu static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
584d6b45850SSukadev Bhattiprolu 			 struct ibmvnic_ltb_set *ltb_set, int num_buffs,
585d6b45850SSukadev Bhattiprolu 			 int buff_size)
586d6b45850SSukadev Bhattiprolu {
587a75de820SSukadev Bhattiprolu 	struct device *dev = &adapter->vdev->dev;
588a75de820SSukadev Bhattiprolu 	struct ibmvnic_ltb_set old_set;
589a75de820SSukadev Bhattiprolu 	struct ibmvnic_ltb_set new_set;
590a75de820SSukadev Bhattiprolu 	int rem_size;
591a75de820SSukadev Bhattiprolu 	int tot_size;		/* size of all ltbs */
592a75de820SSukadev Bhattiprolu 	int ltb_size;		/* size of one ltb */
593a75de820SSukadev Bhattiprolu 	int nltbs;
594a75de820SSukadev Bhattiprolu 	int rc;
595a75de820SSukadev Bhattiprolu 	int n;
596a75de820SSukadev Bhattiprolu 	int i;
597d6b45850SSukadev Bhattiprolu 
598a75de820SSukadev Bhattiprolu 	dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
599a75de820SSukadev Bhattiprolu 		buff_size);
600d6b45850SSukadev Bhattiprolu 
601a75de820SSukadev Bhattiprolu 	ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
602a75de820SSukadev Bhattiprolu 	tot_size = num_buffs * buff_size;
603a75de820SSukadev Bhattiprolu 
604a75de820SSukadev Bhattiprolu 	if (ltb_size > tot_size)
605a75de820SSukadev Bhattiprolu 		ltb_size = tot_size;
606a75de820SSukadev Bhattiprolu 
607a75de820SSukadev Bhattiprolu 	nltbs = tot_size / ltb_size;
608a75de820SSukadev Bhattiprolu 	if (tot_size % ltb_size)
609a75de820SSukadev Bhattiprolu 		nltbs++;
610a75de820SSukadev Bhattiprolu 
611a75de820SSukadev Bhattiprolu 	old_set = *ltb_set;
612a75de820SSukadev Bhattiprolu 
613a75de820SSukadev Bhattiprolu 	if (old_set.num_ltbs == nltbs) {
614a75de820SSukadev Bhattiprolu 		new_set = old_set;
615a75de820SSukadev Bhattiprolu 	} else {
616a75de820SSukadev Bhattiprolu 		int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
617a75de820SSukadev Bhattiprolu 
618a75de820SSukadev Bhattiprolu 		new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
619a75de820SSukadev Bhattiprolu 		if (!new_set.ltbs)
620d6b45850SSukadev Bhattiprolu 			return -ENOMEM;
621d6b45850SSukadev Bhattiprolu 
622a75de820SSukadev Bhattiprolu 		new_set.num_ltbs = nltbs;
623d6b45850SSukadev Bhattiprolu 
624a75de820SSukadev Bhattiprolu 		/* Free any excess ltbs in old set */
625a75de820SSukadev Bhattiprolu 		for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
626a75de820SSukadev Bhattiprolu 			free_long_term_buff(adapter, &old_set.ltbs[i]);
627d6b45850SSukadev Bhattiprolu 
628a75de820SSukadev Bhattiprolu 		/* Copy remaining ltbs to new set. All LTBs except the
629a75de820SSukadev Bhattiprolu 		 * last one are of the same size. alloc_long_term_buff()
630a75de820SSukadev Bhattiprolu 		 * will realloc if the size changes.
631a75de820SSukadev Bhattiprolu 		 */
632a75de820SSukadev Bhattiprolu 		n = min(old_set.num_ltbs, new_set.num_ltbs);
633a75de820SSukadev Bhattiprolu 		for (i = 0; i < n; i++)
634a75de820SSukadev Bhattiprolu 			new_set.ltbs[i] = old_set.ltbs[i];
635a75de820SSukadev Bhattiprolu 
636a75de820SSukadev Bhattiprolu 		/* Any additional ltbs in new set will have NULL ltbs for
637a75de820SSukadev Bhattiprolu 		 * now and will be allocated in alloc_long_term_buff().
638a75de820SSukadev Bhattiprolu 		 */
639a75de820SSukadev Bhattiprolu 
640a75de820SSukadev Bhattiprolu 		/* We no longer need the old_set so free it. Note that we
641a75de820SSukadev Bhattiprolu 		 * may have reused some ltbs from old set and freed excess
642a75de820SSukadev Bhattiprolu 		 * ltbs above. So we only need to free the container now
643a75de820SSukadev Bhattiprolu 		 * not the LTBs themselves. (i.e. dont free_ltb_set()!)
644a75de820SSukadev Bhattiprolu 		 */
645a75de820SSukadev Bhattiprolu 		kfree(old_set.ltbs);
646a75de820SSukadev Bhattiprolu 		old_set.ltbs = NULL;
647a75de820SSukadev Bhattiprolu 		old_set.num_ltbs = 0;
648a75de820SSukadev Bhattiprolu 
649a75de820SSukadev Bhattiprolu 		/* Install the new set. If allocations fail below, we will
650a75de820SSukadev Bhattiprolu 		 * retry later and know what size LTBs we need.
651a75de820SSukadev Bhattiprolu 		 */
652a75de820SSukadev Bhattiprolu 		*ltb_set = new_set;
653a75de820SSukadev Bhattiprolu 	}
654a75de820SSukadev Bhattiprolu 
655a75de820SSukadev Bhattiprolu 	i = 0;
656a75de820SSukadev Bhattiprolu 	rem_size = tot_size;
657a75de820SSukadev Bhattiprolu 	while (rem_size) {
658a75de820SSukadev Bhattiprolu 		if (ltb_size > rem_size)
659a75de820SSukadev Bhattiprolu 			ltb_size = rem_size;
660a75de820SSukadev Bhattiprolu 
661a75de820SSukadev Bhattiprolu 		rem_size -= ltb_size;
662a75de820SSukadev Bhattiprolu 
663a75de820SSukadev Bhattiprolu 		rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
664a75de820SSukadev Bhattiprolu 		if (rc)
665a75de820SSukadev Bhattiprolu 			goto out;
666a75de820SSukadev Bhattiprolu 		i++;
667a75de820SSukadev Bhattiprolu 	}
668a75de820SSukadev Bhattiprolu 
669a75de820SSukadev Bhattiprolu 	WARN_ON(i != new_set.num_ltbs);
670a75de820SSukadev Bhattiprolu 
671a75de820SSukadev Bhattiprolu 	return 0;
672a75de820SSukadev Bhattiprolu out:
673a75de820SSukadev Bhattiprolu 	/* We may have allocated one/more LTBs before failing and we
674a75de820SSukadev Bhattiprolu 	 * want to try and reuse on next reset. So don't free ltb set.
675a75de820SSukadev Bhattiprolu 	 */
676a75de820SSukadev Bhattiprolu 	return rc;
677d6b45850SSukadev Bhattiprolu }
678d6b45850SSukadev Bhattiprolu 
6792872a67cSSukadev Bhattiprolu /**
6802872a67cSSukadev Bhattiprolu  * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
6812872a67cSSukadev Bhattiprolu  * @rxpool: The receive buffer pool containing buffer
6822872a67cSSukadev Bhattiprolu  * @bufidx: Index of buffer in rxpool
6832872a67cSSukadev Bhattiprolu  * @ltbp: (Output) pointer to the long term buffer containing the buffer
6842872a67cSSukadev Bhattiprolu  * @offset: (Output) offset of buffer in the LTB from @ltbp
6852872a67cSSukadev Bhattiprolu  *
6862872a67cSSukadev Bhattiprolu  * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
687a75de820SSukadev Bhattiprolu  * pool and its corresponding offset. Assume for now that each LTB is of
688a75de820SSukadev Bhattiprolu  * different size but could possibly be optimized based on the allocation
689a75de820SSukadev Bhattiprolu  * strategy in alloc_ltb_set().
6902872a67cSSukadev Bhattiprolu  */
map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool * rxpool,unsigned int bufidx,struct ibmvnic_long_term_buff ** ltbp,unsigned int * offset)6912872a67cSSukadev Bhattiprolu static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
6922872a67cSSukadev Bhattiprolu 				  unsigned int bufidx,
6932872a67cSSukadev Bhattiprolu 				  struct ibmvnic_long_term_buff **ltbp,
6942872a67cSSukadev Bhattiprolu 				  unsigned int *offset)
6952872a67cSSukadev Bhattiprolu {
696a75de820SSukadev Bhattiprolu 	struct ibmvnic_long_term_buff *ltb;
697a75de820SSukadev Bhattiprolu 	int nbufs;	/* # of buffers in one ltb */
698a75de820SSukadev Bhattiprolu 	int i;
699a75de820SSukadev Bhattiprolu 
700a75de820SSukadev Bhattiprolu 	WARN_ON(bufidx >= rxpool->size);
701a75de820SSukadev Bhattiprolu 
702a75de820SSukadev Bhattiprolu 	for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
703a75de820SSukadev Bhattiprolu 		ltb = &rxpool->ltb_set.ltbs[i];
704a75de820SSukadev Bhattiprolu 		nbufs = ltb->size / rxpool->buff_size;
705a75de820SSukadev Bhattiprolu 		if (bufidx < nbufs)
706a75de820SSukadev Bhattiprolu 			break;
707a75de820SSukadev Bhattiprolu 		bufidx -= nbufs;
708a75de820SSukadev Bhattiprolu 	}
709a75de820SSukadev Bhattiprolu 
710a75de820SSukadev Bhattiprolu 	*ltbp = ltb;
7112872a67cSSukadev Bhattiprolu 	*offset = bufidx * rxpool->buff_size;
7122872a67cSSukadev Bhattiprolu }
7132872a67cSSukadev Bhattiprolu 
7140c91bf9cSSukadev Bhattiprolu /**
7150c91bf9cSSukadev Bhattiprolu  * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
7160c91bf9cSSukadev Bhattiprolu  * @txpool: The transmit buffer pool containing buffer
7170c91bf9cSSukadev Bhattiprolu  * @bufidx: Index of buffer in txpool
7180c91bf9cSSukadev Bhattiprolu  * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
7190c91bf9cSSukadev Bhattiprolu  * @offset: (Output) offset of buffer in the LTB from @ltbp
7200c91bf9cSSukadev Bhattiprolu  *
7210c91bf9cSSukadev Bhattiprolu  * Map the given buffer identified by [txpool, bufidx] to an LTB in the
7220c91bf9cSSukadev Bhattiprolu  * pool and its corresponding offset.
7230c91bf9cSSukadev Bhattiprolu  */
map_txpool_buf_to_ltb(struct ibmvnic_tx_pool * txpool,unsigned int bufidx,struct ibmvnic_long_term_buff ** ltbp,unsigned int * offset)7240c91bf9cSSukadev Bhattiprolu static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
7250c91bf9cSSukadev Bhattiprolu 				  unsigned int bufidx,
7260c91bf9cSSukadev Bhattiprolu 				  struct ibmvnic_long_term_buff **ltbp,
7270c91bf9cSSukadev Bhattiprolu 				  unsigned int *offset)
7280c91bf9cSSukadev Bhattiprolu {
72993b1ebb3SSukadev Bhattiprolu 	struct ibmvnic_long_term_buff *ltb;
73093b1ebb3SSukadev Bhattiprolu 	int nbufs;	/* # of buffers in one ltb */
73193b1ebb3SSukadev Bhattiprolu 	int i;
73293b1ebb3SSukadev Bhattiprolu 
73393b1ebb3SSukadev Bhattiprolu 	WARN_ON_ONCE(bufidx >= txpool->num_buffers);
73493b1ebb3SSukadev Bhattiprolu 
73593b1ebb3SSukadev Bhattiprolu 	for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
73693b1ebb3SSukadev Bhattiprolu 		ltb = &txpool->ltb_set.ltbs[i];
73793b1ebb3SSukadev Bhattiprolu 		nbufs = ltb->size / txpool->buf_size;
73893b1ebb3SSukadev Bhattiprolu 		if (bufidx < nbufs)
73993b1ebb3SSukadev Bhattiprolu 			break;
74093b1ebb3SSukadev Bhattiprolu 		bufidx -= nbufs;
74193b1ebb3SSukadev Bhattiprolu 	}
74293b1ebb3SSukadev Bhattiprolu 
74393b1ebb3SSukadev Bhattiprolu 	*ltbp = ltb;
7440c91bf9cSSukadev Bhattiprolu 	*offset = bufidx * txpool->buf_size;
7450c91bf9cSSukadev Bhattiprolu }
7460c91bf9cSSukadev Bhattiprolu 
deactivate_rx_pools(struct ibmvnic_adapter * adapter)747f185a49aSThomas Falcon static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
748f185a49aSThomas Falcon {
749f185a49aSThomas Falcon 	int i;
750f185a49aSThomas Falcon 
751507ebe64SThomas Falcon 	for (i = 0; i < adapter->num_active_rx_pools; i++)
752f185a49aSThomas Falcon 		adapter->rx_pool[i].active = 0;
753f185a49aSThomas Falcon }
754f185a49aSThomas Falcon 
replenish_rx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_rx_pool * pool)755032c5e82SThomas Falcon static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
756032c5e82SThomas Falcon 			      struct ibmvnic_rx_pool *pool)
757032c5e82SThomas Falcon {
758032c5e82SThomas Falcon 	int count = pool->size - atomic_read(&pool->available);
759f3ae59c0SCristobal Forno 	u64 handle = adapter->rx_scrq[pool->index]->handle;
760032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
7614f0b6812SThomas Falcon 	struct ibmvnic_ind_xmit_queue *ind_bufp;
7624f0b6812SThomas Falcon 	struct ibmvnic_sub_crq_queue *rx_scrq;
7632872a67cSSukadev Bhattiprolu 	struct ibmvnic_long_term_buff *ltb;
7644f0b6812SThomas Falcon 	union sub_crq *sub_crq;
765032c5e82SThomas Falcon 	int buffers_added = 0;
766032c5e82SThomas Falcon 	unsigned long lpar_rc;
767032c5e82SThomas Falcon 	struct sk_buff *skb;
768032c5e82SThomas Falcon 	unsigned int offset;
769032c5e82SThomas Falcon 	dma_addr_t dma_addr;
770032c5e82SThomas Falcon 	unsigned char *dst;
771032c5e82SThomas Falcon 	int shift = 0;
7728880fc66SSukadev Bhattiprolu 	int bufidx;
773032c5e82SThomas Falcon 	int i;
774032c5e82SThomas Falcon 
775f185a49aSThomas Falcon 	if (!pool->active)
776f185a49aSThomas Falcon 		return;
777f185a49aSThomas Falcon 
7784f0b6812SThomas Falcon 	rx_scrq = adapter->rx_scrq[pool->index];
7794f0b6812SThomas Falcon 	ind_bufp = &rx_scrq->ind_buf;
78072368f8bSSukadev Bhattiprolu 
78172368f8bSSukadev Bhattiprolu 	/* netdev_skb_alloc() could have failed after we saved a few skbs
78272368f8bSSukadev Bhattiprolu 	 * in the indir_buf and we would not have sent them to VIOS yet.
78372368f8bSSukadev Bhattiprolu 	 * To account for them, start the loop at ind_bufp->index rather
78472368f8bSSukadev Bhattiprolu 	 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
78572368f8bSSukadev Bhattiprolu 	 * be 0.
78672368f8bSSukadev Bhattiprolu 	 */
78772368f8bSSukadev Bhattiprolu 	for (i = ind_bufp->index; i < count; ++i) {
7888880fc66SSukadev Bhattiprolu 		bufidx = pool->free_map[pool->next_free];
789489de956SSukadev Bhattiprolu 
790489de956SSukadev Bhattiprolu 		/* We maybe reusing the skb from earlier resets. Allocate
791489de956SSukadev Bhattiprolu 		 * only if necessary. But since the LTB may have changed
792489de956SSukadev Bhattiprolu 		 * during reset (see init_rx_pools()), update LTB below
793489de956SSukadev Bhattiprolu 		 * even if reusing skb.
794489de956SSukadev Bhattiprolu 		 */
7958880fc66SSukadev Bhattiprolu 		skb = pool->rx_buff[bufidx].skb;
796489de956SSukadev Bhattiprolu 		if (!skb) {
797489de956SSukadev Bhattiprolu 			skb = netdev_alloc_skb(adapter->netdev,
798489de956SSukadev Bhattiprolu 					       pool->buff_size);
799032c5e82SThomas Falcon 			if (!skb) {
800032c5e82SThomas Falcon 				dev_err(dev, "Couldn't replenish rx buff\n");
801032c5e82SThomas Falcon 				adapter->replenish_no_mem++;
802032c5e82SThomas Falcon 				break;
803032c5e82SThomas Falcon 			}
804489de956SSukadev Bhattiprolu 		}
805032c5e82SThomas Falcon 
80638106b2cSSukadev Bhattiprolu 		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
80738106b2cSSukadev Bhattiprolu 		pool->next_free = (pool->next_free + 1) % pool->size;
808032c5e82SThomas Falcon 
809032c5e82SThomas Falcon 		/* Copy the skb to the long term mapped DMA buffer */
8102872a67cSSukadev Bhattiprolu 		map_rxpool_buf_to_ltb(pool, bufidx, &ltb, &offset);
8112872a67cSSukadev Bhattiprolu 		dst = ltb->buff + offset;
812032c5e82SThomas Falcon 		memset(dst, 0, pool->buff_size);
8132872a67cSSukadev Bhattiprolu 		dma_addr = ltb->addr + offset;
814032c5e82SThomas Falcon 
81538106b2cSSukadev Bhattiprolu 		/* add the skb to an rx_buff in the pool */
8168880fc66SSukadev Bhattiprolu 		pool->rx_buff[bufidx].data = dst;
8178880fc66SSukadev Bhattiprolu 		pool->rx_buff[bufidx].dma = dma_addr;
8188880fc66SSukadev Bhattiprolu 		pool->rx_buff[bufidx].skb = skb;
8198880fc66SSukadev Bhattiprolu 		pool->rx_buff[bufidx].pool_index = pool->index;
8208880fc66SSukadev Bhattiprolu 		pool->rx_buff[bufidx].size = pool->buff_size;
821032c5e82SThomas Falcon 
82238106b2cSSukadev Bhattiprolu 		/* queue the rx_buff for the next send_subcrq_indirect */
8234f0b6812SThomas Falcon 		sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
8244f0b6812SThomas Falcon 		memset(sub_crq, 0, sizeof(*sub_crq));
8254f0b6812SThomas Falcon 		sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
8264f0b6812SThomas Falcon 		sub_crq->rx_add.correlator =
8278880fc66SSukadev Bhattiprolu 		    cpu_to_be64((u64)&pool->rx_buff[bufidx]);
8284f0b6812SThomas Falcon 		sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
8292872a67cSSukadev Bhattiprolu 		sub_crq->rx_add.map_id = ltb->map_id;
830032c5e82SThomas Falcon 
831032c5e82SThomas Falcon 		/* The length field of the sCRQ is defined to be 24 bits so the
832032c5e82SThomas Falcon 		 * buffer size needs to be left shifted by a byte before it is
833032c5e82SThomas Falcon 		 * converted to big endian to prevent the last byte from being
834032c5e82SThomas Falcon 		 * truncated.
835032c5e82SThomas Falcon 		 */
836032c5e82SThomas Falcon #ifdef __LITTLE_ENDIAN__
837032c5e82SThomas Falcon 		shift = 8;
838032c5e82SThomas Falcon #endif
8394f0b6812SThomas Falcon 		sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
84038106b2cSSukadev Bhattiprolu 
84138106b2cSSukadev Bhattiprolu 		/* if send_subcrq_indirect queue is full, flush to VIOS */
8424f0b6812SThomas Falcon 		if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
8434f0b6812SThomas Falcon 		    i == count - 1) {
8444f0b6812SThomas Falcon 			lpar_rc =
8454f0b6812SThomas Falcon 				send_subcrq_indirect(adapter, handle,
8464f0b6812SThomas Falcon 						     (u64)ind_bufp->indir_dma,
8474f0b6812SThomas Falcon 						     (u64)ind_bufp->index);
848032c5e82SThomas Falcon 			if (lpar_rc != H_SUCCESS)
849032c5e82SThomas Falcon 				goto failure;
8504f0b6812SThomas Falcon 			buffers_added += ind_bufp->index;
8514f0b6812SThomas Falcon 			adapter->replenish_add_buff_success += ind_bufp->index;
8524f0b6812SThomas Falcon 			ind_bufp->index = 0;
8534f0b6812SThomas Falcon 		}
854032c5e82SThomas Falcon 	}
855032c5e82SThomas Falcon 	atomic_add(buffers_added, &pool->available);
856032c5e82SThomas Falcon 	return;
857032c5e82SThomas Falcon 
858032c5e82SThomas Falcon failure:
8592d14d379SThomas Falcon 	if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
8602d14d379SThomas Falcon 		dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
8614f0b6812SThomas Falcon 	for (i = ind_bufp->index - 1; i >= 0; --i) {
8624f0b6812SThomas Falcon 		struct ibmvnic_rx_buff *rx_buff;
8634f0b6812SThomas Falcon 
8644f0b6812SThomas Falcon 		pool->next_free = pool->next_free == 0 ?
8654f0b6812SThomas Falcon 				  pool->size - 1 : pool->next_free - 1;
8664f0b6812SThomas Falcon 		sub_crq = &ind_bufp->indir_arr[i];
8674f0b6812SThomas Falcon 		rx_buff = (struct ibmvnic_rx_buff *)
8684f0b6812SThomas Falcon 				be64_to_cpu(sub_crq->rx_add.correlator);
8698880fc66SSukadev Bhattiprolu 		bufidx = (int)(rx_buff - pool->rx_buff);
8708880fc66SSukadev Bhattiprolu 		pool->free_map[pool->next_free] = bufidx;
8718880fc66SSukadev Bhattiprolu 		dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
8728880fc66SSukadev Bhattiprolu 		pool->rx_buff[bufidx].skb = NULL;
8734f0b6812SThomas Falcon 	}
874c2af6225SDwip N. Banerjee 	adapter->replenish_add_buff_failure += ind_bufp->index;
875c2af6225SDwip N. Banerjee 	atomic_add(buffers_added, &pool->available);
8764f0b6812SThomas Falcon 	ind_bufp->index = 0;
8775a18e1e0SThomas Falcon 	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
878f185a49aSThomas Falcon 		/* Disable buffer pool replenishment and report carrier off if
8795a18e1e0SThomas Falcon 		 * queue is closed or pending failover.
8805a18e1e0SThomas Falcon 		 * Firmware guarantees that a signal will be sent to the
8815a18e1e0SThomas Falcon 		 * driver, triggering a reset.
882f185a49aSThomas Falcon 		 */
883f185a49aSThomas Falcon 		deactivate_rx_pools(adapter);
884f185a49aSThomas Falcon 		netif_carrier_off(adapter->netdev);
885f185a49aSThomas Falcon 	}
886032c5e82SThomas Falcon }
887032c5e82SThomas Falcon 
replenish_pools(struct ibmvnic_adapter * adapter)888032c5e82SThomas Falcon static void replenish_pools(struct ibmvnic_adapter *adapter)
889032c5e82SThomas Falcon {
890032c5e82SThomas Falcon 	int i;
891032c5e82SThomas Falcon 
892032c5e82SThomas Falcon 	adapter->replenish_task_cycles++;
893507ebe64SThomas Falcon 	for (i = 0; i < adapter->num_active_rx_pools; i++) {
894032c5e82SThomas Falcon 		if (adapter->rx_pool[i].active)
895032c5e82SThomas Falcon 			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
896032c5e82SThomas Falcon 	}
89738bd5cecSSukadev Bhattiprolu 
89838bd5cecSSukadev Bhattiprolu 	netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
899032c5e82SThomas Falcon }
900032c5e82SThomas Falcon 
release_stats_buffers(struct ibmvnic_adapter * adapter)9013d52b594SJohn Allen static void release_stats_buffers(struct ibmvnic_adapter *adapter)
9023d52b594SJohn Allen {
9033d52b594SJohn Allen 	kfree(adapter->tx_stats_buffers);
9043d52b594SJohn Allen 	kfree(adapter->rx_stats_buffers);
905b0992ecaSThomas Falcon 	adapter->tx_stats_buffers = NULL;
906b0992ecaSThomas Falcon 	adapter->rx_stats_buffers = NULL;
9073d52b594SJohn Allen }
9083d52b594SJohn Allen 
init_stats_buffers(struct ibmvnic_adapter * adapter)9093d52b594SJohn Allen static int init_stats_buffers(struct ibmvnic_adapter *adapter)
9103d52b594SJohn Allen {
9113d52b594SJohn Allen 	adapter->tx_stats_buffers =
912abcae546SNathan Fontenot 				kcalloc(IBMVNIC_MAX_QUEUES,
9133d52b594SJohn Allen 					sizeof(struct ibmvnic_tx_queue_stats),
9143d52b594SJohn Allen 					GFP_KERNEL);
9153d52b594SJohn Allen 	if (!adapter->tx_stats_buffers)
9163d52b594SJohn Allen 		return -ENOMEM;
9173d52b594SJohn Allen 
9183d52b594SJohn Allen 	adapter->rx_stats_buffers =
919abcae546SNathan Fontenot 				kcalloc(IBMVNIC_MAX_QUEUES,
9203d52b594SJohn Allen 					sizeof(struct ibmvnic_rx_queue_stats),
9213d52b594SJohn Allen 					GFP_KERNEL);
9223d52b594SJohn Allen 	if (!adapter->rx_stats_buffers)
9233d52b594SJohn Allen 		return -ENOMEM;
9243d52b594SJohn Allen 
9253d52b594SJohn Allen 	return 0;
9263d52b594SJohn Allen }
9273d52b594SJohn Allen 
release_stats_token(struct ibmvnic_adapter * adapter)9287bbc27a4SNathan Fontenot static void release_stats_token(struct ibmvnic_adapter *adapter)
9297bbc27a4SNathan Fontenot {
9307bbc27a4SNathan Fontenot 	struct device *dev = &adapter->vdev->dev;
9317bbc27a4SNathan Fontenot 
9327bbc27a4SNathan Fontenot 	if (!adapter->stats_token)
9337bbc27a4SNathan Fontenot 		return;
9347bbc27a4SNathan Fontenot 
9357bbc27a4SNathan Fontenot 	dma_unmap_single(dev, adapter->stats_token,
9367bbc27a4SNathan Fontenot 			 sizeof(struct ibmvnic_statistics),
9377bbc27a4SNathan Fontenot 			 DMA_FROM_DEVICE);
9387bbc27a4SNathan Fontenot 	adapter->stats_token = 0;
9397bbc27a4SNathan Fontenot }
9407bbc27a4SNathan Fontenot 
init_stats_token(struct ibmvnic_adapter * adapter)9417bbc27a4SNathan Fontenot static int init_stats_token(struct ibmvnic_adapter *adapter)
9427bbc27a4SNathan Fontenot {
9437bbc27a4SNathan Fontenot 	struct device *dev = &adapter->vdev->dev;
9447bbc27a4SNathan Fontenot 	dma_addr_t stok;
945b6ee566cSDany Madden 	int rc;
9467bbc27a4SNathan Fontenot 
9477bbc27a4SNathan Fontenot 	stok = dma_map_single(dev, &adapter->stats,
9487bbc27a4SNathan Fontenot 			      sizeof(struct ibmvnic_statistics),
9497bbc27a4SNathan Fontenot 			      DMA_FROM_DEVICE);
950b6ee566cSDany Madden 	rc = dma_mapping_error(dev, stok);
951b6ee566cSDany Madden 	if (rc) {
952b6ee566cSDany Madden 		dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
953b6ee566cSDany Madden 		return rc;
9547bbc27a4SNathan Fontenot 	}
9557bbc27a4SNathan Fontenot 
9567bbc27a4SNathan Fontenot 	adapter->stats_token = stok;
957d1cf33d9SNathan Fontenot 	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
9587bbc27a4SNathan Fontenot 	return 0;
9597bbc27a4SNathan Fontenot }
9607bbc27a4SNathan Fontenot 
9610f2bf318SSukadev Bhattiprolu /**
9620f2bf318SSukadev Bhattiprolu  * release_rx_pools() - Release any rx pools attached to @adapter.
9630f2bf318SSukadev Bhattiprolu  * @adapter: ibmvnic adapter
9640f2bf318SSukadev Bhattiprolu  *
9650f2bf318SSukadev Bhattiprolu  * Safe to call this multiple times - even if no pools are attached.
9660f2bf318SSukadev Bhattiprolu  */
release_rx_pools(struct ibmvnic_adapter * adapter)9670ffe2cb7SNathan Fontenot static void release_rx_pools(struct ibmvnic_adapter *adapter)
968032c5e82SThomas Falcon {
9690ffe2cb7SNathan Fontenot 	struct ibmvnic_rx_pool *rx_pool;
9700ffe2cb7SNathan Fontenot 	int i, j;
971032c5e82SThomas Falcon 
9720ffe2cb7SNathan Fontenot 	if (!adapter->rx_pool)
973032c5e82SThomas Falcon 		return;
974032c5e82SThomas Falcon 
97582e3be32SNathan Fontenot 	for (i = 0; i < adapter->num_active_rx_pools; i++) {
9760ffe2cb7SNathan Fontenot 		rx_pool = &adapter->rx_pool[i];
9770ffe2cb7SNathan Fontenot 
978d1cf33d9SNathan Fontenot 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
979d1cf33d9SNathan Fontenot 
9800ffe2cb7SNathan Fontenot 		kfree(rx_pool->free_map);
981489de956SSukadev Bhattiprolu 
982d6b45850SSukadev Bhattiprolu 		free_ltb_set(adapter, &rx_pool->ltb_set);
9830ffe2cb7SNathan Fontenot 
9840ffe2cb7SNathan Fontenot 		if (!rx_pool->rx_buff)
9850ffe2cb7SNathan Fontenot 			continue;
9860ffe2cb7SNathan Fontenot 
9870ffe2cb7SNathan Fontenot 		for (j = 0; j < rx_pool->size; j++) {
9880ffe2cb7SNathan Fontenot 			if (rx_pool->rx_buff[j].skb) {
989b7cdec3dSThomas Falcon 				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
990b7cdec3dSThomas Falcon 				rx_pool->rx_buff[j].skb = NULL;
991032c5e82SThomas Falcon 			}
992032c5e82SThomas Falcon 		}
9930ffe2cb7SNathan Fontenot 
9940ffe2cb7SNathan Fontenot 		kfree(rx_pool->rx_buff);
9950ffe2cb7SNathan Fontenot 	}
9960ffe2cb7SNathan Fontenot 
9970ffe2cb7SNathan Fontenot 	kfree(adapter->rx_pool);
9980ffe2cb7SNathan Fontenot 	adapter->rx_pool = NULL;
99982e3be32SNathan Fontenot 	adapter->num_active_rx_pools = 0;
1000489de956SSukadev Bhattiprolu 	adapter->prev_rx_pool_size = 0;
10010ffe2cb7SNathan Fontenot }
10020ffe2cb7SNathan Fontenot 
1003489de956SSukadev Bhattiprolu /**
1004489de956SSukadev Bhattiprolu  * reuse_rx_pools() - Check if the existing rx pools can be reused.
1005489de956SSukadev Bhattiprolu  * @adapter: ibmvnic adapter
1006489de956SSukadev Bhattiprolu  *
1007489de956SSukadev Bhattiprolu  * Check if the existing rx pools in the adapter can be reused. The
1008489de956SSukadev Bhattiprolu  * pools can be reused if the pool parameters (number of pools,
1009489de956SSukadev Bhattiprolu  * number of buffers in the pool and size of each buffer) have not
1010489de956SSukadev Bhattiprolu  * changed.
1011489de956SSukadev Bhattiprolu  *
1012489de956SSukadev Bhattiprolu  * NOTE: This assumes that all pools have the same number of buffers
1013489de956SSukadev Bhattiprolu  *       which is the case currently. If that changes, we must fix this.
1014489de956SSukadev Bhattiprolu  *
1015489de956SSukadev Bhattiprolu  * Return: true if the rx pools can be reused, false otherwise.
1016489de956SSukadev Bhattiprolu  */
reuse_rx_pools(struct ibmvnic_adapter * adapter)1017489de956SSukadev Bhattiprolu static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
1018489de956SSukadev Bhattiprolu {
1019489de956SSukadev Bhattiprolu 	u64 old_num_pools, new_num_pools;
1020489de956SSukadev Bhattiprolu 	u64 old_pool_size, new_pool_size;
1021489de956SSukadev Bhattiprolu 	u64 old_buff_size, new_buff_size;
1022489de956SSukadev Bhattiprolu 
1023489de956SSukadev Bhattiprolu 	if (!adapter->rx_pool)
1024489de956SSukadev Bhattiprolu 		return false;
1025489de956SSukadev Bhattiprolu 
1026489de956SSukadev Bhattiprolu 	old_num_pools = adapter->num_active_rx_pools;
1027489de956SSukadev Bhattiprolu 	new_num_pools = adapter->req_rx_queues;
1028489de956SSukadev Bhattiprolu 
1029489de956SSukadev Bhattiprolu 	old_pool_size = adapter->prev_rx_pool_size;
1030489de956SSukadev Bhattiprolu 	new_pool_size = adapter->req_rx_add_entries_per_subcrq;
1031489de956SSukadev Bhattiprolu 
1032489de956SSukadev Bhattiprolu 	old_buff_size = adapter->prev_rx_buf_sz;
1033489de956SSukadev Bhattiprolu 	new_buff_size = adapter->cur_rx_buf_sz;
1034489de956SSukadev Bhattiprolu 
10350584f494SSukadev Bhattiprolu 	if (old_buff_size != new_buff_size ||
10360584f494SSukadev Bhattiprolu 	    old_num_pools != new_num_pools ||
10370584f494SSukadev Bhattiprolu 	    old_pool_size != new_pool_size)
1038489de956SSukadev Bhattiprolu 		return false;
1039489de956SSukadev Bhattiprolu 
1040489de956SSukadev Bhattiprolu 	return true;
1041489de956SSukadev Bhattiprolu }
1042489de956SSukadev Bhattiprolu 
1043489de956SSukadev Bhattiprolu /**
1044489de956SSukadev Bhattiprolu  * init_rx_pools(): Initialize the set of receiver pools in the adapter.
1045489de956SSukadev Bhattiprolu  * @netdev: net device associated with the vnic interface
1046489de956SSukadev Bhattiprolu  *
1047489de956SSukadev Bhattiprolu  * Initialize the set of receiver pools in the ibmvnic adapter associated
1048489de956SSukadev Bhattiprolu  * with the net_device @netdev. If possible, reuse the existing rx pools.
1049489de956SSukadev Bhattiprolu  * Otherwise free any existing pools and  allocate a new set of pools
1050489de956SSukadev Bhattiprolu  * before initializing them.
1051489de956SSukadev Bhattiprolu  *
1052489de956SSukadev Bhattiprolu  * Return: 0 on success and negative value on error.
1053489de956SSukadev Bhattiprolu  */
init_rx_pools(struct net_device * netdev)10540ffe2cb7SNathan Fontenot static int init_rx_pools(struct net_device *netdev)
10550ffe2cb7SNathan Fontenot {
10560ffe2cb7SNathan Fontenot 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
10570ffe2cb7SNathan Fontenot 	struct device *dev = &adapter->vdev->dev;
10580ffe2cb7SNathan Fontenot 	struct ibmvnic_rx_pool *rx_pool;
10590df7b9adSSukadev Bhattiprolu 	u64 num_pools;
10600df7b9adSSukadev Bhattiprolu 	u64 pool_size;		/* # of buffers in one pool */
1061507ebe64SThomas Falcon 	u64 buff_size;
1062b6ee566cSDany Madden 	int i, j, rc;
10630ffe2cb7SNathan Fontenot 
10640df7b9adSSukadev Bhattiprolu 	pool_size = adapter->req_rx_add_entries_per_subcrq;
1065489de956SSukadev Bhattiprolu 	num_pools = adapter->req_rx_queues;
1066507ebe64SThomas Falcon 	buff_size = adapter->cur_rx_buf_sz;
10670ffe2cb7SNathan Fontenot 
1068489de956SSukadev Bhattiprolu 	if (reuse_rx_pools(adapter)) {
1069489de956SSukadev Bhattiprolu 		dev_dbg(dev, "Reusing rx pools\n");
1070489de956SSukadev Bhattiprolu 		goto update_ltb;
1071489de956SSukadev Bhattiprolu 	}
1072489de956SSukadev Bhattiprolu 
1073489de956SSukadev Bhattiprolu 	/* Allocate/populate the pools. */
1074489de956SSukadev Bhattiprolu 	release_rx_pools(adapter);
1075489de956SSukadev Bhattiprolu 
10760df7b9adSSukadev Bhattiprolu 	adapter->rx_pool = kcalloc(num_pools,
10770ffe2cb7SNathan Fontenot 				   sizeof(struct ibmvnic_rx_pool),
10780ffe2cb7SNathan Fontenot 				   GFP_KERNEL);
10790ffe2cb7SNathan Fontenot 	if (!adapter->rx_pool) {
10800ffe2cb7SNathan Fontenot 		dev_err(dev, "Failed to allocate rx pools\n");
1081b6ee566cSDany Madden 		return -ENOMEM;
10820ffe2cb7SNathan Fontenot 	}
10830ffe2cb7SNathan Fontenot 
10840f2bf318SSukadev Bhattiprolu 	/* Set num_active_rx_pools early. If we fail below after partial
10850f2bf318SSukadev Bhattiprolu 	 * allocation, release_rx_pools() will know how many to look for.
10860f2bf318SSukadev Bhattiprolu 	 */
10870df7b9adSSukadev Bhattiprolu 	adapter->num_active_rx_pools = num_pools;
108882e3be32SNathan Fontenot 
10890df7b9adSSukadev Bhattiprolu 	for (i = 0; i < num_pools; i++) {
10900ffe2cb7SNathan Fontenot 		rx_pool = &adapter->rx_pool[i];
10910ffe2cb7SNathan Fontenot 
10920ffe2cb7SNathan Fontenot 		netdev_dbg(adapter->netdev,
1093d1cf33d9SNathan Fontenot 			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
10940df7b9adSSukadev Bhattiprolu 			   i, pool_size, buff_size);
10950ffe2cb7SNathan Fontenot 
10960df7b9adSSukadev Bhattiprolu 		rx_pool->size = pool_size;
10970ffe2cb7SNathan Fontenot 		rx_pool->index = i;
10989a87c3fcSDwip N. Banerjee 		rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
10990ffe2cb7SNathan Fontenot 
11000ffe2cb7SNathan Fontenot 		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
11010ffe2cb7SNathan Fontenot 					    GFP_KERNEL);
11020ffe2cb7SNathan Fontenot 		if (!rx_pool->free_map) {
11030f2bf318SSukadev Bhattiprolu 			dev_err(dev, "Couldn't alloc free_map %d\n", i);
1104b6ee566cSDany Madden 			rc = -ENOMEM;
1105489de956SSukadev Bhattiprolu 			goto out_release;
11060ffe2cb7SNathan Fontenot 		}
11070ffe2cb7SNathan Fontenot 
11080ffe2cb7SNathan Fontenot 		rx_pool->rx_buff = kcalloc(rx_pool->size,
11090ffe2cb7SNathan Fontenot 					   sizeof(struct ibmvnic_rx_buff),
11100ffe2cb7SNathan Fontenot 					   GFP_KERNEL);
11110ffe2cb7SNathan Fontenot 		if (!rx_pool->rx_buff) {
11120ffe2cb7SNathan Fontenot 			dev_err(dev, "Couldn't alloc rx buffers\n");
1113b6ee566cSDany Madden 			rc = -ENOMEM;
1114489de956SSukadev Bhattiprolu 			goto out_release;
11150ffe2cb7SNathan Fontenot 		}
1116489de956SSukadev Bhattiprolu 	}
1117489de956SSukadev Bhattiprolu 
1118489de956SSukadev Bhattiprolu 	adapter->prev_rx_pool_size = pool_size;
1119489de956SSukadev Bhattiprolu 	adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
1120489de956SSukadev Bhattiprolu 
1121489de956SSukadev Bhattiprolu update_ltb:
1122489de956SSukadev Bhattiprolu 	for (i = 0; i < num_pools; i++) {
1123489de956SSukadev Bhattiprolu 		rx_pool = &adapter->rx_pool[i];
1124489de956SSukadev Bhattiprolu 		dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
1125489de956SSukadev Bhattiprolu 			i, rx_pool->size, rx_pool->buff_size);
11260ffe2cb7SNathan Fontenot 
1127a75de820SSukadev Bhattiprolu 		rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
1128a75de820SSukadev Bhattiprolu 				   rx_pool->size, rx_pool->buff_size);
1129a75de820SSukadev Bhattiprolu 		if (rc)
1130489de956SSukadev Bhattiprolu 			goto out;
11310ffe2cb7SNathan Fontenot 
1132489de956SSukadev Bhattiprolu 		for (j = 0; j < rx_pool->size; ++j) {
1133489de956SSukadev Bhattiprolu 			struct ibmvnic_rx_buff *rx_buff;
1134489de956SSukadev Bhattiprolu 
11350ffe2cb7SNathan Fontenot 			rx_pool->free_map[j] = j;
11360ffe2cb7SNathan Fontenot 
1137489de956SSukadev Bhattiprolu 			/* NOTE: Don't clear rx_buff->skb here - will leak
1138489de956SSukadev Bhattiprolu 			 * memory! replenish_rx_pool() will reuse skbs or
1139489de956SSukadev Bhattiprolu 			 * allocate as necessary.
1140489de956SSukadev Bhattiprolu 			 */
1141489de956SSukadev Bhattiprolu 			rx_buff = &rx_pool->rx_buff[j];
1142489de956SSukadev Bhattiprolu 			rx_buff->dma = 0;
1143489de956SSukadev Bhattiprolu 			rx_buff->data = 0;
1144489de956SSukadev Bhattiprolu 			rx_buff->size = 0;
1145489de956SSukadev Bhattiprolu 			rx_buff->pool_index = 0;
1146489de956SSukadev Bhattiprolu 		}
1147489de956SSukadev Bhattiprolu 
1148489de956SSukadev Bhattiprolu 		/* Mark pool "empty" so replenish_rx_pools() will
1149489de956SSukadev Bhattiprolu 		 * update the LTB info for each buffer
1150489de956SSukadev Bhattiprolu 		 */
11510ffe2cb7SNathan Fontenot 		atomic_set(&rx_pool->available, 0);
11520ffe2cb7SNathan Fontenot 		rx_pool->next_alloc = 0;
11530ffe2cb7SNathan Fontenot 		rx_pool->next_free = 0;
1154489de956SSukadev Bhattiprolu 		/* replenish_rx_pool() may have called deactivate_rx_pools()
1155489de956SSukadev Bhattiprolu 		 * on failover. Ensure pool is active now.
1156489de956SSukadev Bhattiprolu 		 */
1157489de956SSukadev Bhattiprolu 		rx_pool->active = 1;
11580ffe2cb7SNathan Fontenot 	}
11590ffe2cb7SNathan Fontenot 	return 0;
1160489de956SSukadev Bhattiprolu out_release:
1161489de956SSukadev Bhattiprolu 	release_rx_pools(adapter);
1162489de956SSukadev Bhattiprolu out:
1163489de956SSukadev Bhattiprolu 	/* We failed to allocate one or more LTBs or map them on the VIOS.
1164489de956SSukadev Bhattiprolu 	 * Hold onto the pools and any LTBs that we did allocate/map.
1165489de956SSukadev Bhattiprolu 	 */
1166b6ee566cSDany Madden 	return rc;
1167032c5e82SThomas Falcon }
1168032c5e82SThomas Falcon 
release_vpd_data(struct ibmvnic_adapter * adapter)11694e6759beSDesnes Augusto Nunes do Rosario static void release_vpd_data(struct ibmvnic_adapter *adapter)
11704e6759beSDesnes Augusto Nunes do Rosario {
11714e6759beSDesnes Augusto Nunes do Rosario 	if (!adapter->vpd)
11724e6759beSDesnes Augusto Nunes do Rosario 		return;
11734e6759beSDesnes Augusto Nunes do Rosario 
11744e6759beSDesnes Augusto Nunes do Rosario 	kfree(adapter->vpd->buff);
11754e6759beSDesnes Augusto Nunes do Rosario 	kfree(adapter->vpd);
1176b0992ecaSThomas Falcon 
1177b0992ecaSThomas Falcon 	adapter->vpd = NULL;
11784e6759beSDesnes Augusto Nunes do Rosario }
11794e6759beSDesnes Augusto Nunes do Rosario 
release_one_tx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_tx_pool * tx_pool)1180fb79421cSThomas Falcon static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
1181fb79421cSThomas Falcon 				struct ibmvnic_tx_pool *tx_pool)
1182fb79421cSThomas Falcon {
1183fb79421cSThomas Falcon 	kfree(tx_pool->tx_buff);
1184fb79421cSThomas Falcon 	kfree(tx_pool->free_map);
118593b1ebb3SSukadev Bhattiprolu 	free_ltb_set(adapter, &tx_pool->ltb_set);
1186fb79421cSThomas Falcon }
1187fb79421cSThomas Falcon 
11880f2bf318SSukadev Bhattiprolu /**
11890f2bf318SSukadev Bhattiprolu  * release_tx_pools() - Release any tx pools attached to @adapter.
11900f2bf318SSukadev Bhattiprolu  * @adapter: ibmvnic adapter
11910f2bf318SSukadev Bhattiprolu  *
11920f2bf318SSukadev Bhattiprolu  * Safe to call this multiple times - even if no pools are attached.
11930f2bf318SSukadev Bhattiprolu  */
release_tx_pools(struct ibmvnic_adapter * adapter)1194c657e32cSNathan Fontenot static void release_tx_pools(struct ibmvnic_adapter *adapter)
1195c657e32cSNathan Fontenot {
1196896d8695SJohn Allen 	int i;
1197c657e32cSNathan Fontenot 
11980f2bf318SSukadev Bhattiprolu 	/* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
11990f2bf318SSukadev Bhattiprolu 	 * both NULL or both non-NULL. So we only need to check one.
12000f2bf318SSukadev Bhattiprolu 	 */
1201c657e32cSNathan Fontenot 	if (!adapter->tx_pool)
1202c657e32cSNathan Fontenot 		return;
1203c657e32cSNathan Fontenot 
120482e3be32SNathan Fontenot 	for (i = 0; i < adapter->num_active_tx_pools; i++) {
1205fb79421cSThomas Falcon 		release_one_tx_pool(adapter, &adapter->tx_pool[i]);
1206fb79421cSThomas Falcon 		release_one_tx_pool(adapter, &adapter->tso_pool[i]);
1207c657e32cSNathan Fontenot 	}
1208c657e32cSNathan Fontenot 
1209c657e32cSNathan Fontenot 	kfree(adapter->tx_pool);
1210c657e32cSNathan Fontenot 	adapter->tx_pool = NULL;
1211fb79421cSThomas Falcon 	kfree(adapter->tso_pool);
1212fb79421cSThomas Falcon 	adapter->tso_pool = NULL;
121382e3be32SNathan Fontenot 	adapter->num_active_tx_pools = 0;
1214bbd80930SSukadev Bhattiprolu 	adapter->prev_tx_pool_size = 0;
1215c657e32cSNathan Fontenot }
1216c657e32cSNathan Fontenot 
init_one_tx_pool(struct net_device * netdev,struct ibmvnic_tx_pool * tx_pool,int pool_size,int buf_size)12173205306cSThomas Falcon static int init_one_tx_pool(struct net_device *netdev,
12183205306cSThomas Falcon 			    struct ibmvnic_tx_pool *tx_pool,
12198243c7edSSukadev Bhattiprolu 			    int pool_size, int buf_size)
12203205306cSThomas Falcon {
12213205306cSThomas Falcon 	int i;
12223205306cSThomas Falcon 
12238243c7edSSukadev Bhattiprolu 	tx_pool->tx_buff = kcalloc(pool_size,
12243205306cSThomas Falcon 				   sizeof(struct ibmvnic_tx_buff),
12253205306cSThomas Falcon 				   GFP_KERNEL);
12263205306cSThomas Falcon 	if (!tx_pool->tx_buff)
1227b6ee566cSDany Madden 		return -ENOMEM;
12283205306cSThomas Falcon 
12298243c7edSSukadev Bhattiprolu 	tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
1230bbd80930SSukadev Bhattiprolu 	if (!tx_pool->free_map) {
1231bbd80930SSukadev Bhattiprolu 		kfree(tx_pool->tx_buff);
1232bbd80930SSukadev Bhattiprolu 		tx_pool->tx_buff = NULL;
1233b6ee566cSDany Madden 		return -ENOMEM;
1234bbd80930SSukadev Bhattiprolu 	}
12353205306cSThomas Falcon 
12368243c7edSSukadev Bhattiprolu 	for (i = 0; i < pool_size; i++)
12373205306cSThomas Falcon 		tx_pool->free_map[i] = i;
12383205306cSThomas Falcon 
12393205306cSThomas Falcon 	tx_pool->consumer_index = 0;
12403205306cSThomas Falcon 	tx_pool->producer_index = 0;
12418243c7edSSukadev Bhattiprolu 	tx_pool->num_buffers = pool_size;
12423205306cSThomas Falcon 	tx_pool->buf_size = buf_size;
12433205306cSThomas Falcon 
12443205306cSThomas Falcon 	return 0;
12453205306cSThomas Falcon }
12463205306cSThomas Falcon 
1247bbd80930SSukadev Bhattiprolu /**
1248bbd80930SSukadev Bhattiprolu  * reuse_tx_pools() - Check if the existing tx pools can be reused.
1249bbd80930SSukadev Bhattiprolu  * @adapter: ibmvnic adapter
1250bbd80930SSukadev Bhattiprolu  *
1251bbd80930SSukadev Bhattiprolu  * Check if the existing tx pools in the adapter can be reused. The
1252bbd80930SSukadev Bhattiprolu  * pools can be reused if the pool parameters (number of pools,
1253bbd80930SSukadev Bhattiprolu  * number of buffers in the pool and mtu) have not changed.
1254bbd80930SSukadev Bhattiprolu  *
1255bbd80930SSukadev Bhattiprolu  * NOTE: This assumes that all pools have the same number of buffers
1256bbd80930SSukadev Bhattiprolu  *       which is the case currently. If that changes, we must fix this.
1257bbd80930SSukadev Bhattiprolu  *
1258bbd80930SSukadev Bhattiprolu  * Return: true if the tx pools can be reused, false otherwise.
1259bbd80930SSukadev Bhattiprolu  */
reuse_tx_pools(struct ibmvnic_adapter * adapter)1260bbd80930SSukadev Bhattiprolu static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
1261bbd80930SSukadev Bhattiprolu {
1262bbd80930SSukadev Bhattiprolu 	u64 old_num_pools, new_num_pools;
1263bbd80930SSukadev Bhattiprolu 	u64 old_pool_size, new_pool_size;
1264bbd80930SSukadev Bhattiprolu 	u64 old_mtu, new_mtu;
1265bbd80930SSukadev Bhattiprolu 
1266bbd80930SSukadev Bhattiprolu 	if (!adapter->tx_pool)
1267bbd80930SSukadev Bhattiprolu 		return false;
1268bbd80930SSukadev Bhattiprolu 
1269bbd80930SSukadev Bhattiprolu 	old_num_pools = adapter->num_active_tx_pools;
1270bbd80930SSukadev Bhattiprolu 	new_num_pools = adapter->num_active_tx_scrqs;
1271bbd80930SSukadev Bhattiprolu 	old_pool_size = adapter->prev_tx_pool_size;
1272bbd80930SSukadev Bhattiprolu 	new_pool_size = adapter->req_tx_entries_per_subcrq;
1273bbd80930SSukadev Bhattiprolu 	old_mtu = adapter->prev_mtu;
1274bbd80930SSukadev Bhattiprolu 	new_mtu = adapter->req_mtu;
1275bbd80930SSukadev Bhattiprolu 
12765b085601SSukadev Bhattiprolu 	if (old_mtu != new_mtu ||
12775b085601SSukadev Bhattiprolu 	    old_num_pools != new_num_pools ||
12785b085601SSukadev Bhattiprolu 	    old_pool_size != new_pool_size)
1279bbd80930SSukadev Bhattiprolu 		return false;
1280bbd80930SSukadev Bhattiprolu 
1281bbd80930SSukadev Bhattiprolu 	return true;
1282bbd80930SSukadev Bhattiprolu }
1283bbd80930SSukadev Bhattiprolu 
1284bbd80930SSukadev Bhattiprolu /**
1285bbd80930SSukadev Bhattiprolu  * init_tx_pools(): Initialize the set of transmit pools in the adapter.
1286bbd80930SSukadev Bhattiprolu  * @netdev: net device associated with the vnic interface
1287bbd80930SSukadev Bhattiprolu  *
1288bbd80930SSukadev Bhattiprolu  * Initialize the set of transmit pools in the ibmvnic adapter associated
1289bbd80930SSukadev Bhattiprolu  * with the net_device @netdev. If possible, reuse the existing tx pools.
1290bbd80930SSukadev Bhattiprolu  * Otherwise free any existing pools and  allocate a new set of pools
1291bbd80930SSukadev Bhattiprolu  * before initializing them.
1292bbd80930SSukadev Bhattiprolu  *
1293bbd80930SSukadev Bhattiprolu  * Return: 0 on success and negative value on error.
1294bbd80930SSukadev Bhattiprolu  */
init_tx_pools(struct net_device * netdev)1295c657e32cSNathan Fontenot static int init_tx_pools(struct net_device *netdev)
1296c657e32cSNathan Fontenot {
1297c657e32cSNathan Fontenot 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
12980f2bf318SSukadev Bhattiprolu 	struct device *dev = &adapter->vdev->dev;
12998243c7edSSukadev Bhattiprolu 	int num_pools;
13008243c7edSSukadev Bhattiprolu 	u64 pool_size;		/* # of buffers in pool */
13019a87c3fcSDwip N. Banerjee 	u64 buff_size;
1302bbd80930SSukadev Bhattiprolu 	int i, j, rc;
1303bbd80930SSukadev Bhattiprolu 
1304bbd80930SSukadev Bhattiprolu 	num_pools = adapter->req_tx_queues;
1305bbd80930SSukadev Bhattiprolu 
1306bbd80930SSukadev Bhattiprolu 	/* We must notify the VIOS about the LTB on all resets - but we only
1307bbd80930SSukadev Bhattiprolu 	 * need to alloc/populate pools if either the number of buffers or
1308bbd80930SSukadev Bhattiprolu 	 * size of each buffer in the pool has changed.
1309bbd80930SSukadev Bhattiprolu 	 */
1310bbd80930SSukadev Bhattiprolu 	if (reuse_tx_pools(adapter)) {
1311bbd80930SSukadev Bhattiprolu 		netdev_dbg(netdev, "Reusing tx pools\n");
1312bbd80930SSukadev Bhattiprolu 		goto update_ltb;
1313bbd80930SSukadev Bhattiprolu 	}
1314bbd80930SSukadev Bhattiprolu 
1315bbd80930SSukadev Bhattiprolu 	/* Allocate/populate the pools. */
1316bbd80930SSukadev Bhattiprolu 	release_tx_pools(adapter);
1317c657e32cSNathan Fontenot 
13188243c7edSSukadev Bhattiprolu 	pool_size = adapter->req_tx_entries_per_subcrq;
13198243c7edSSukadev Bhattiprolu 	num_pools = adapter->num_active_tx_scrqs;
13208243c7edSSukadev Bhattiprolu 
13218243c7edSSukadev Bhattiprolu 	adapter->tx_pool = kcalloc(num_pools,
1322c657e32cSNathan Fontenot 				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1323c657e32cSNathan Fontenot 	if (!adapter->tx_pool)
1324b6ee566cSDany Madden 		return -ENOMEM;
1325c657e32cSNathan Fontenot 
13268243c7edSSukadev Bhattiprolu 	adapter->tso_pool = kcalloc(num_pools,
13273205306cSThomas Falcon 				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
13280f2bf318SSukadev Bhattiprolu 	/* To simplify release_tx_pools() ensure that ->tx_pool and
13290f2bf318SSukadev Bhattiprolu 	 * ->tso_pool are either both NULL or both non-NULL.
13300f2bf318SSukadev Bhattiprolu 	 */
1331f6ebca8eSSukadev Bhattiprolu 	if (!adapter->tso_pool) {
1332f6ebca8eSSukadev Bhattiprolu 		kfree(adapter->tx_pool);
1333f6ebca8eSSukadev Bhattiprolu 		adapter->tx_pool = NULL;
1334b6ee566cSDany Madden 		return -ENOMEM;
1335f6ebca8eSSukadev Bhattiprolu 	}
13363205306cSThomas Falcon 
13370f2bf318SSukadev Bhattiprolu 	/* Set num_active_tx_pools early. If we fail below after partial
13380f2bf318SSukadev Bhattiprolu 	 * allocation, release_tx_pools() will know how many to look for.
13390f2bf318SSukadev Bhattiprolu 	 */
13408243c7edSSukadev Bhattiprolu 	adapter->num_active_tx_pools = num_pools;
1341bbd80930SSukadev Bhattiprolu 
13429a87c3fcSDwip N. Banerjee 	buff_size = adapter->req_mtu + VLAN_HLEN;
13439a87c3fcSDwip N. Banerjee 	buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
13440f2bf318SSukadev Bhattiprolu 
13450d1af4faSSukadev Bhattiprolu 	for (i = 0; i < num_pools; i++) {
13460f2bf318SSukadev Bhattiprolu 		dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
13470f2bf318SSukadev Bhattiprolu 			i, adapter->req_tx_entries_per_subcrq, buff_size);
13480f2bf318SSukadev Bhattiprolu 
13493205306cSThomas Falcon 		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
13508243c7edSSukadev Bhattiprolu 				      pool_size, buff_size);
1351bbd80930SSukadev Bhattiprolu 		if (rc)
1352bbd80930SSukadev Bhattiprolu 			goto out_release;
1353c657e32cSNathan Fontenot 
13547c940b1aSThomas Falcon 		rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
13553205306cSThomas Falcon 				      IBMVNIC_TSO_BUFS,
13563205306cSThomas Falcon 				      IBMVNIC_TSO_BUF_SZ);
1357bbd80930SSukadev Bhattiprolu 		if (rc)
1358bbd80930SSukadev Bhattiprolu 			goto out_release;
1359c657e32cSNathan Fontenot 	}
1360bbd80930SSukadev Bhattiprolu 
1361bbd80930SSukadev Bhattiprolu 	adapter->prev_tx_pool_size = pool_size;
1362bbd80930SSukadev Bhattiprolu 	adapter->prev_mtu = adapter->req_mtu;
1363bbd80930SSukadev Bhattiprolu 
1364bbd80930SSukadev Bhattiprolu update_ltb:
1365bbd80930SSukadev Bhattiprolu 	/* NOTE: All tx_pools have the same number of buffers (which is
1366bbd80930SSukadev Bhattiprolu 	 *       same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
1367bbd80930SSukadev Bhattiprolu 	 *       buffers (see calls init_one_tx_pool() for these).
1368bbd80930SSukadev Bhattiprolu 	 *       For consistency, we use tx_pool->num_buffers and
1369bbd80930SSukadev Bhattiprolu 	 *       tso_pool->num_buffers below.
1370bbd80930SSukadev Bhattiprolu 	 */
1371bbd80930SSukadev Bhattiprolu 	rc = -1;
1372bbd80930SSukadev Bhattiprolu 	for (i = 0; i < num_pools; i++) {
1373bbd80930SSukadev Bhattiprolu 		struct ibmvnic_tx_pool *tso_pool;
1374bbd80930SSukadev Bhattiprolu 		struct ibmvnic_tx_pool *tx_pool;
1375bbd80930SSukadev Bhattiprolu 
1376bbd80930SSukadev Bhattiprolu 		tx_pool = &adapter->tx_pool[i];
1377bbd80930SSukadev Bhattiprolu 
137893b1ebb3SSukadev Bhattiprolu 		dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
137993b1ebb3SSukadev Bhattiprolu 			i, tx_pool->num_buffers, tx_pool->buf_size);
138093b1ebb3SSukadev Bhattiprolu 
138193b1ebb3SSukadev Bhattiprolu 		rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
1382bbd80930SSukadev Bhattiprolu 				   tx_pool->num_buffers, tx_pool->buf_size);
138393b1ebb3SSukadev Bhattiprolu 		if (rc)
138493b1ebb3SSukadev Bhattiprolu 			goto out;
1385bbd80930SSukadev Bhattiprolu 
1386bbd80930SSukadev Bhattiprolu 		tx_pool->consumer_index = 0;
1387bbd80930SSukadev Bhattiprolu 		tx_pool->producer_index = 0;
1388bbd80930SSukadev Bhattiprolu 
1389bbd80930SSukadev Bhattiprolu 		for (j = 0; j < tx_pool->num_buffers; j++)
1390bbd80930SSukadev Bhattiprolu 			tx_pool->free_map[j] = j;
1391bbd80930SSukadev Bhattiprolu 
1392bbd80930SSukadev Bhattiprolu 		tso_pool = &adapter->tso_pool[i];
1393bbd80930SSukadev Bhattiprolu 
139493b1ebb3SSukadev Bhattiprolu 		dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
139593b1ebb3SSukadev Bhattiprolu 			i, tso_pool->num_buffers, tso_pool->buf_size);
139693b1ebb3SSukadev Bhattiprolu 
139793b1ebb3SSukadev Bhattiprolu 		rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
1398bbd80930SSukadev Bhattiprolu 				   tso_pool->num_buffers, tso_pool->buf_size);
139993b1ebb3SSukadev Bhattiprolu 		if (rc)
140093b1ebb3SSukadev Bhattiprolu 			goto out;
1401bbd80930SSukadev Bhattiprolu 
1402bbd80930SSukadev Bhattiprolu 		tso_pool->consumer_index = 0;
1403bbd80930SSukadev Bhattiprolu 		tso_pool->producer_index = 0;
1404bbd80930SSukadev Bhattiprolu 
1405bbd80930SSukadev Bhattiprolu 		for (j = 0; j < tso_pool->num_buffers; j++)
1406bbd80930SSukadev Bhattiprolu 			tso_pool->free_map[j] = j;
1407c657e32cSNathan Fontenot 	}
1408c657e32cSNathan Fontenot 
1409c657e32cSNathan Fontenot 	return 0;
1410bbd80930SSukadev Bhattiprolu out_release:
1411bbd80930SSukadev Bhattiprolu 	release_tx_pools(adapter);
1412bbd80930SSukadev Bhattiprolu out:
1413bbd80930SSukadev Bhattiprolu 	/* We failed to allocate one or more LTBs or map them on the VIOS.
1414bbd80930SSukadev Bhattiprolu 	 * Hold onto the pools and any LTBs that we did allocate/map.
1415bbd80930SSukadev Bhattiprolu 	 */
1416bbd80930SSukadev Bhattiprolu 	return rc;
1417c657e32cSNathan Fontenot }
1418c657e32cSNathan Fontenot 
ibmvnic_napi_enable(struct ibmvnic_adapter * adapter)1419d944c3d6SJohn Allen static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1420d944c3d6SJohn Allen {
1421d944c3d6SJohn Allen 	int i;
1422d944c3d6SJohn Allen 
1423d944c3d6SJohn Allen 	if (adapter->napi_enabled)
1424d944c3d6SJohn Allen 		return;
1425d944c3d6SJohn Allen 
1426d944c3d6SJohn Allen 	for (i = 0; i < adapter->req_rx_queues; i++)
1427d944c3d6SJohn Allen 		napi_enable(&adapter->napi[i]);
1428d944c3d6SJohn Allen 
1429d944c3d6SJohn Allen 	adapter->napi_enabled = true;
1430d944c3d6SJohn Allen }
1431d944c3d6SJohn Allen 
ibmvnic_napi_disable(struct ibmvnic_adapter * adapter)1432d944c3d6SJohn Allen static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1433d944c3d6SJohn Allen {
1434d944c3d6SJohn Allen 	int i;
1435d944c3d6SJohn Allen 
1436d944c3d6SJohn Allen 	if (!adapter->napi_enabled)
1437d944c3d6SJohn Allen 		return;
1438d944c3d6SJohn Allen 
1439d1cf33d9SNathan Fontenot 	for (i = 0; i < adapter->req_rx_queues; i++) {
1440d1cf33d9SNathan Fontenot 		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1441d944c3d6SJohn Allen 		napi_disable(&adapter->napi[i]);
1442d1cf33d9SNathan Fontenot 	}
1443d944c3d6SJohn Allen 
1444d944c3d6SJohn Allen 	adapter->napi_enabled = false;
1445d944c3d6SJohn Allen }
1446d944c3d6SJohn Allen 
init_napi(struct ibmvnic_adapter * adapter)144786f669b2SNathan Fontenot static int init_napi(struct ibmvnic_adapter *adapter)
144886f669b2SNathan Fontenot {
144986f669b2SNathan Fontenot 	int i;
145086f669b2SNathan Fontenot 
145186f669b2SNathan Fontenot 	adapter->napi = kcalloc(adapter->req_rx_queues,
145286f669b2SNathan Fontenot 				sizeof(struct napi_struct), GFP_KERNEL);
145386f669b2SNathan Fontenot 	if (!adapter->napi)
145486f669b2SNathan Fontenot 		return -ENOMEM;
145586f669b2SNathan Fontenot 
145686f669b2SNathan Fontenot 	for (i = 0; i < adapter->req_rx_queues; i++) {
145786f669b2SNathan Fontenot 		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
145886f669b2SNathan Fontenot 		netif_napi_add(adapter->netdev, &adapter->napi[i],
1459b48b89f9SJakub Kicinski 			       ibmvnic_poll);
146086f669b2SNathan Fontenot 	}
146186f669b2SNathan Fontenot 
146282e3be32SNathan Fontenot 	adapter->num_active_rx_napi = adapter->req_rx_queues;
146386f669b2SNathan Fontenot 	return 0;
146486f669b2SNathan Fontenot }
146586f669b2SNathan Fontenot 
release_napi(struct ibmvnic_adapter * adapter)146686f669b2SNathan Fontenot static void release_napi(struct ibmvnic_adapter *adapter)
146786f669b2SNathan Fontenot {
146886f669b2SNathan Fontenot 	int i;
146986f669b2SNathan Fontenot 
147086f669b2SNathan Fontenot 	if (!adapter->napi)
147186f669b2SNathan Fontenot 		return;
147286f669b2SNathan Fontenot 
147382e3be32SNathan Fontenot 	for (i = 0; i < adapter->num_active_rx_napi; i++) {
1474390de194SWen Yang 		netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
147586f669b2SNathan Fontenot 		netif_napi_del(&adapter->napi[i]);
147686f669b2SNathan Fontenot 	}
147786f669b2SNathan Fontenot 
147886f669b2SNathan Fontenot 	kfree(adapter->napi);
147986f669b2SNathan Fontenot 	adapter->napi = NULL;
148082e3be32SNathan Fontenot 	adapter->num_active_rx_napi = 0;
1481c3f22415SThomas Falcon 	adapter->napi_enabled = false;
148286f669b2SNathan Fontenot }
148386f669b2SNathan Fontenot 
adapter_state_to_string(enum vnic_state state)14840666ef7fSLijun Pan static const char *adapter_state_to_string(enum vnic_state state)
14850666ef7fSLijun Pan {
14860666ef7fSLijun Pan 	switch (state) {
14870666ef7fSLijun Pan 	case VNIC_PROBING:
14880666ef7fSLijun Pan 		return "PROBING";
14890666ef7fSLijun Pan 	case VNIC_PROBED:
14900666ef7fSLijun Pan 		return "PROBED";
14910666ef7fSLijun Pan 	case VNIC_OPENING:
14920666ef7fSLijun Pan 		return "OPENING";
14930666ef7fSLijun Pan 	case VNIC_OPEN:
14940666ef7fSLijun Pan 		return "OPEN";
14950666ef7fSLijun Pan 	case VNIC_CLOSING:
14960666ef7fSLijun Pan 		return "CLOSING";
14970666ef7fSLijun Pan 	case VNIC_CLOSED:
14980666ef7fSLijun Pan 		return "CLOSED";
14990666ef7fSLijun Pan 	case VNIC_REMOVING:
15000666ef7fSLijun Pan 		return "REMOVING";
15010666ef7fSLijun Pan 	case VNIC_REMOVED:
15020666ef7fSLijun Pan 		return "REMOVED";
1503822ebc2cSLijun Pan 	case VNIC_DOWN:
1504822ebc2cSLijun Pan 		return "DOWN";
15050666ef7fSLijun Pan 	}
150607b5dc1dSMichal Suchanek 	return "UNKNOWN";
15070666ef7fSLijun Pan }
15080666ef7fSLijun Pan 
ibmvnic_login(struct net_device * netdev)1509a57a5d25SJohn Allen static int ibmvnic_login(struct net_device *netdev)
1510032c5e82SThomas Falcon {
15116db541aeSNick Child 	unsigned long flags, timeout = msecs_to_jiffies(20000);
1512032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
151364d92aa2SNathan Fontenot 	int retry_count = 0;
1514dff515a3SThomas Falcon 	int retries = 10;
1515eb110410SThomas Falcon 	bool retry;
15164d96f12aSThomas Falcon 	int rc;
1517032c5e82SThomas Falcon 
1518bd0b6723SJohn Allen 	do {
1519eb110410SThomas Falcon 		retry = false;
1520dff515a3SThomas Falcon 		if (retry_count > retries) {
152164d92aa2SNathan Fontenot 			netdev_warn(netdev, "Login attempts exceeded\n");
1522b6ee566cSDany Madden 			return -EACCES;
152364d92aa2SNathan Fontenot 		}
152464d92aa2SNathan Fontenot 
152564d92aa2SNathan Fontenot 		adapter->init_done_rc = 0;
152664d92aa2SNathan Fontenot 		reinit_completion(&adapter->init_done);
152764d92aa2SNathan Fontenot 		rc = send_login(adapter);
1528c98d9cc4SDany Madden 		if (rc)
152964d92aa2SNathan Fontenot 			return rc;
153064d92aa2SNathan Fontenot 
153164d92aa2SNathan Fontenot 		if (!wait_for_completion_timeout(&adapter->init_done,
153264d92aa2SNathan Fontenot 						 timeout)) {
153323cc5f66SNick Child 			netdev_warn(netdev, "Login timed out\n");
153423cc5f66SNick Child 			adapter->login_pending = false;
153523cc5f66SNick Child 			goto partial_reset;
153664d92aa2SNathan Fontenot 		}
153764d92aa2SNathan Fontenot 
1538dff515a3SThomas Falcon 		if (adapter->init_done_rc == ABORTED) {
1539dff515a3SThomas Falcon 			netdev_warn(netdev, "Login aborted, retrying...\n");
1540dff515a3SThomas Falcon 			retry = true;
1541dff515a3SThomas Falcon 			adapter->init_done_rc = 0;
1542dff515a3SThomas Falcon 			retry_count++;
1543dff515a3SThomas Falcon 			/* FW or device may be busy, so
1544dff515a3SThomas Falcon 			 * wait a bit before retrying login
1545dff515a3SThomas Falcon 			 */
1546dff515a3SThomas Falcon 			msleep(500);
1547dff515a3SThomas Falcon 		} else if (adapter->init_done_rc == PARTIALSUCCESS) {
154864d92aa2SNathan Fontenot 			retry_count++;
1549d7c0ef36SNathan Fontenot 			release_sub_crqs(adapter, 1);
1550bd0b6723SJohn Allen 
1551eb110410SThomas Falcon 			retry = true;
1552eb110410SThomas Falcon 			netdev_dbg(netdev,
1553eb110410SThomas Falcon 				   "Received partial success, retrying...\n");
155464d92aa2SNathan Fontenot 			adapter->init_done_rc = 0;
1555bd0b6723SJohn Allen 			reinit_completion(&adapter->init_done);
1556491099adSLijun Pan 			send_query_cap(adapter);
1557bd0b6723SJohn Allen 			if (!wait_for_completion_timeout(&adapter->init_done,
1558bd0b6723SJohn Allen 							 timeout)) {
155964d92aa2SNathan Fontenot 				netdev_warn(netdev,
156064d92aa2SNathan Fontenot 					    "Capabilities query timed out\n");
1561b6ee566cSDany Madden 				return -ETIMEDOUT;
1562bd0b6723SJohn Allen 			}
1563bd0b6723SJohn Allen 
156464d92aa2SNathan Fontenot 			rc = init_sub_crqs(adapter);
156520a8ab74SThomas Falcon 			if (rc) {
156664d92aa2SNathan Fontenot 				netdev_warn(netdev,
156764d92aa2SNathan Fontenot 					    "SCRQ initialization failed\n");
1568b6ee566cSDany Madden 				return rc;
1569bd0b6723SJohn Allen 			}
157064d92aa2SNathan Fontenot 
157164d92aa2SNathan Fontenot 			rc = init_sub_crq_irqs(adapter);
157264d92aa2SNathan Fontenot 			if (rc) {
157364d92aa2SNathan Fontenot 				netdev_warn(netdev,
157464d92aa2SNathan Fontenot 					    "SCRQ irq initialization failed\n");
1575b6ee566cSDany Madden 				return rc;
157664d92aa2SNathan Fontenot 			}
15776db541aeSNick Child 		/* Default/timeout error handling, reset and start fresh */
157864d92aa2SNathan Fontenot 		} else if (adapter->init_done_rc) {
1579b6ee566cSDany Madden 			netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1580b6ee566cSDany Madden 				    adapter->init_done_rc);
158123cc5f66SNick Child 
158223cc5f66SNick Child partial_reset:
158323cc5f66SNick Child 			/* adapter login failed, so free any CRQs or sub-CRQs
158423cc5f66SNick Child 			 * and register again before attempting to login again.
158523cc5f66SNick Child 			 * If we don't do this then the VIOS may think that
158623cc5f66SNick Child 			 * we are already logged in and reject any subsequent
158723cc5f66SNick Child 			 * attempts
158823cc5f66SNick Child 			 */
158923cc5f66SNick Child 			netdev_warn(netdev,
159023cc5f66SNick Child 				    "Freeing and re-registering CRQs before attempting to login again\n");
159123cc5f66SNick Child 			retry = true;
159223cc5f66SNick Child 			adapter->init_done_rc = 0;
159323cc5f66SNick Child 			release_sub_crqs(adapter, true);
15946db541aeSNick Child 			/* Much of this is similar logic as ibmvnic_probe(),
15956db541aeSNick Child 			 * we are essentially re-initializing communication
15966db541aeSNick Child 			 * with the server. We really should not run any
15976db541aeSNick Child 			 * resets/failovers here because this is already a form
15986db541aeSNick Child 			 * of reset and we do not want parallel resets occurring
15996db541aeSNick Child 			 */
16006db541aeSNick Child 			do {
160123cc5f66SNick Child 				reinit_init_done(adapter);
16026db541aeSNick Child 				/* Clear any failovers we got in the previous
16036db541aeSNick Child 				 * pass since we are re-initializing the CRQ
16046db541aeSNick Child 				 */
16056db541aeSNick Child 				adapter->failover_pending = false;
160623cc5f66SNick Child 				release_crq_queue(adapter);
16076db541aeSNick Child 				/* If we don't sleep here then we risk an
16086db541aeSNick Child 				 * unnecessary failover event from the VIOS.
16096db541aeSNick Child 				 * This is a known VIOS issue caused by a vnic
16106db541aeSNick Child 				 * device freeing and registering a CRQ too
16116db541aeSNick Child 				 * quickly.
161223cc5f66SNick Child 				 */
161323cc5f66SNick Child 				msleep(1500);
16146db541aeSNick Child 				/* Avoid any resets, since we are currently
16156db541aeSNick Child 				 * resetting.
16166db541aeSNick Child 				 */
16176db541aeSNick Child 				spin_lock_irqsave(&adapter->rwi_lock, flags);
16186db541aeSNick Child 				flush_reset_queue(adapter);
16196db541aeSNick Child 				spin_unlock_irqrestore(&adapter->rwi_lock,
16206db541aeSNick Child 						       flags);
16216db541aeSNick Child 
162223cc5f66SNick Child 				rc = init_crq_queue(adapter);
162323cc5f66SNick Child 				if (rc) {
162423cc5f66SNick Child 					netdev_err(netdev, "login recovery: init CRQ failed %d\n",
162523cc5f66SNick Child 						   rc);
1626b6ee566cSDany Madden 					return -EIO;
162764d92aa2SNathan Fontenot 				}
162823cc5f66SNick Child 
162923cc5f66SNick Child 				rc = ibmvnic_reset_init(adapter, false);
16306db541aeSNick Child 				if (rc)
163123cc5f66SNick Child 					netdev_err(netdev, "login recovery: Reset init failed %d\n",
163223cc5f66SNick Child 						   rc);
16336db541aeSNick Child 				/* IBMVNIC_CRQ_INIT will return EAGAIN if it
16346db541aeSNick Child 				 * fails, since ibmvnic_reset_init will free
16356db541aeSNick Child 				 * irq's in failure, we won't be able to receive
16366db541aeSNick Child 				 * new CRQs so we need to keep trying. probe()
16376db541aeSNick Child 				 * handles this similarly.
16386db541aeSNick Child 				 */
16396db541aeSNick Child 			} while (rc == -EAGAIN && retry_count++ < retries);
164023cc5f66SNick Child 		}
1641eb110410SThomas Falcon 	} while (retry);
1642bd0b6723SJohn Allen 
164362740e97SThomas Falcon 	__ibmvnic_set_mac(netdev, adapter->mac_addr);
16443d166130SThomas Falcon 
16450666ef7fSLijun Pan 	netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1646a57a5d25SJohn Allen 	return 0;
1647a57a5d25SJohn Allen }
1648a57a5d25SJohn Allen 
release_login_buffer(struct ibmvnic_adapter * adapter)164934f0f4e3SThomas Falcon static void release_login_buffer(struct ibmvnic_adapter *adapter)
165034f0f4e3SThomas Falcon {
1651d78a671eSNick Child 	if (!adapter->login_buf)
1652d78a671eSNick Child 		return;
1653d78a671eSNick Child 
1654d78a671eSNick Child 	dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
1655d78a671eSNick Child 			 adapter->login_buf_sz, DMA_TO_DEVICE);
165634f0f4e3SThomas Falcon 	kfree(adapter->login_buf);
165734f0f4e3SThomas Falcon 	adapter->login_buf = NULL;
165834f0f4e3SThomas Falcon }
165934f0f4e3SThomas Falcon 
release_login_rsp_buffer(struct ibmvnic_adapter * adapter)166034f0f4e3SThomas Falcon static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
166134f0f4e3SThomas Falcon {
1662d78a671eSNick Child 	if (!adapter->login_rsp_buf)
1663d78a671eSNick Child 		return;
1664d78a671eSNick Child 
1665d78a671eSNick Child 	dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
1666d78a671eSNick Child 			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
166734f0f4e3SThomas Falcon 	kfree(adapter->login_rsp_buf);
166834f0f4e3SThomas Falcon 	adapter->login_rsp_buf = NULL;
166934f0f4e3SThomas Falcon }
167034f0f4e3SThomas Falcon 
release_resources(struct ibmvnic_adapter * adapter)16711b8955eeSNathan Fontenot static void release_resources(struct ibmvnic_adapter *adapter)
16721b8955eeSNathan Fontenot {
16734e6759beSDesnes Augusto Nunes do Rosario 	release_vpd_data(adapter);
16744e6759beSDesnes Augusto Nunes do Rosario 
167586f669b2SNathan Fontenot 	release_napi(adapter);
1676a0c8be56SLijun Pan 	release_login_buffer(adapter);
167734f0f4e3SThomas Falcon 	release_login_rsp_buffer(adapter);
1678d1cf33d9SNathan Fontenot }
16791b8955eeSNathan Fontenot 
set_link_state(struct ibmvnic_adapter * adapter,u8 link_state)168053da09e9SNathan Fontenot static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
168153da09e9SNathan Fontenot {
168253da09e9SNathan Fontenot 	struct net_device *netdev = adapter->netdev;
168398c41f04SDany Madden 	unsigned long timeout = msecs_to_jiffies(20000);
168453da09e9SNathan Fontenot 	union ibmvnic_crq crq;
168553da09e9SNathan Fontenot 	bool resend;
168653da09e9SNathan Fontenot 	int rc;
168753da09e9SNathan Fontenot 
1688d1cf33d9SNathan Fontenot 	netdev_dbg(netdev, "setting link state %d\n", link_state);
1689d1cf33d9SNathan Fontenot 
169053da09e9SNathan Fontenot 	memset(&crq, 0, sizeof(crq));
169153da09e9SNathan Fontenot 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
169253da09e9SNathan Fontenot 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
169353da09e9SNathan Fontenot 	crq.logical_link_state.link_state = link_state;
169453da09e9SNathan Fontenot 
169553da09e9SNathan Fontenot 	do {
169653da09e9SNathan Fontenot 		resend = false;
169753da09e9SNathan Fontenot 
169853da09e9SNathan Fontenot 		reinit_completion(&adapter->init_done);
169953da09e9SNathan Fontenot 		rc = ibmvnic_send_crq(adapter, &crq);
170053da09e9SNathan Fontenot 		if (rc) {
170153da09e9SNathan Fontenot 			netdev_err(netdev, "Failed to set link state\n");
170253da09e9SNathan Fontenot 			return rc;
170353da09e9SNathan Fontenot 		}
170453da09e9SNathan Fontenot 
170553da09e9SNathan Fontenot 		if (!wait_for_completion_timeout(&adapter->init_done,
170653da09e9SNathan Fontenot 						 timeout)) {
170753da09e9SNathan Fontenot 			netdev_err(netdev, "timeout setting link state\n");
1708b6ee566cSDany Madden 			return -ETIMEDOUT;
170953da09e9SNathan Fontenot 		}
171053da09e9SNathan Fontenot 
17114c5f6af0SLijun Pan 		if (adapter->init_done_rc == PARTIALSUCCESS) {
171253da09e9SNathan Fontenot 			/* Partuial success, delay and re-send */
171353da09e9SNathan Fontenot 			mdelay(1000);
171453da09e9SNathan Fontenot 			resend = true;
1715ab5ec33bSThomas Falcon 		} else if (adapter->init_done_rc) {
1716ab5ec33bSThomas Falcon 			netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1717ab5ec33bSThomas Falcon 				    adapter->init_done_rc);
1718ab5ec33bSThomas Falcon 			return adapter->init_done_rc;
171953da09e9SNathan Fontenot 		}
172053da09e9SNathan Fontenot 	} while (resend);
172153da09e9SNathan Fontenot 
172253da09e9SNathan Fontenot 	return 0;
172353da09e9SNathan Fontenot }
172453da09e9SNathan Fontenot 
set_real_num_queues(struct net_device * netdev)17257f3c6e6bSThomas Falcon static int set_real_num_queues(struct net_device *netdev)
17267f3c6e6bSThomas Falcon {
17277f3c6e6bSThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
17287f3c6e6bSThomas Falcon 	int rc;
17297f3c6e6bSThomas Falcon 
1730d1cf33d9SNathan Fontenot 	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1731d1cf33d9SNathan Fontenot 		   adapter->req_tx_queues, adapter->req_rx_queues);
1732d1cf33d9SNathan Fontenot 
17337f3c6e6bSThomas Falcon 	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
17347f3c6e6bSThomas Falcon 	if (rc) {
17357f3c6e6bSThomas Falcon 		netdev_err(netdev, "failed to set the number of tx queues\n");
17367f3c6e6bSThomas Falcon 		return rc;
17377f3c6e6bSThomas Falcon 	}
17387f3c6e6bSThomas Falcon 
17397f3c6e6bSThomas Falcon 	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
17407f3c6e6bSThomas Falcon 	if (rc)
17417f3c6e6bSThomas Falcon 		netdev_err(netdev, "failed to set the number of rx queues\n");
17427f3c6e6bSThomas Falcon 
17437f3c6e6bSThomas Falcon 	return rc;
17447f3c6e6bSThomas Falcon }
17457f3c6e6bSThomas Falcon 
ibmvnic_get_vpd(struct ibmvnic_adapter * adapter)17464e6759beSDesnes Augusto Nunes do Rosario static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
17474e6759beSDesnes Augusto Nunes do Rosario {
17484e6759beSDesnes Augusto Nunes do Rosario 	struct device *dev = &adapter->vdev->dev;
17494e6759beSDesnes Augusto Nunes do Rosario 	union ibmvnic_crq crq;
17504e6759beSDesnes Augusto Nunes do Rosario 	int len = 0;
17519c4eaabdSThomas Falcon 	int rc;
17524e6759beSDesnes Augusto Nunes do Rosario 
17534e6759beSDesnes Augusto Nunes do Rosario 	if (adapter->vpd->buff)
17544e6759beSDesnes Augusto Nunes do Rosario 		len = adapter->vpd->len;
17554e6759beSDesnes Augusto Nunes do Rosario 
1756ff25dcb9SThomas Falcon 	mutex_lock(&adapter->fw_lock);
1757ff25dcb9SThomas Falcon 	adapter->fw_done_rc = 0;
1758070eca95SThomas Falcon 	reinit_completion(&adapter->fw_done);
1759ff25dcb9SThomas Falcon 
17604e6759beSDesnes Augusto Nunes do Rosario 	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
17614e6759beSDesnes Augusto Nunes do Rosario 	crq.get_vpd_size.cmd = GET_VPD_SIZE;
17629c4eaabdSThomas Falcon 	rc = ibmvnic_send_crq(adapter, &crq);
1763ff25dcb9SThomas Falcon 	if (rc) {
1764ff25dcb9SThomas Falcon 		mutex_unlock(&adapter->fw_lock);
17659c4eaabdSThomas Falcon 		return rc;
1766ff25dcb9SThomas Falcon 	}
1767476d96caSThomas Falcon 
1768476d96caSThomas Falcon 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1769476d96caSThomas Falcon 	if (rc) {
1770476d96caSThomas Falcon 		dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1771ff25dcb9SThomas Falcon 		mutex_unlock(&adapter->fw_lock);
1772476d96caSThomas Falcon 		return rc;
1773476d96caSThomas Falcon 	}
1774ff25dcb9SThomas Falcon 	mutex_unlock(&adapter->fw_lock);
17754e6759beSDesnes Augusto Nunes do Rosario 
17764e6759beSDesnes Augusto Nunes do Rosario 	if (!adapter->vpd->len)
17774e6759beSDesnes Augusto Nunes do Rosario 		return -ENODATA;
17784e6759beSDesnes Augusto Nunes do Rosario 
17794e6759beSDesnes Augusto Nunes do Rosario 	if (!adapter->vpd->buff)
17804e6759beSDesnes Augusto Nunes do Rosario 		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
17814e6759beSDesnes Augusto Nunes do Rosario 	else if (adapter->vpd->len != len)
17824e6759beSDesnes Augusto Nunes do Rosario 		adapter->vpd->buff =
17834e6759beSDesnes Augusto Nunes do Rosario 			krealloc(adapter->vpd->buff,
17844e6759beSDesnes Augusto Nunes do Rosario 				 adapter->vpd->len, GFP_KERNEL);
17854e6759beSDesnes Augusto Nunes do Rosario 
17864e6759beSDesnes Augusto Nunes do Rosario 	if (!adapter->vpd->buff) {
17874e6759beSDesnes Augusto Nunes do Rosario 		dev_err(dev, "Could allocate VPD buffer\n");
17884e6759beSDesnes Augusto Nunes do Rosario 		return -ENOMEM;
17894e6759beSDesnes Augusto Nunes do Rosario 	}
17904e6759beSDesnes Augusto Nunes do Rosario 
17914e6759beSDesnes Augusto Nunes do Rosario 	adapter->vpd->dma_addr =
17924e6759beSDesnes Augusto Nunes do Rosario 		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
17934e6759beSDesnes Augusto Nunes do Rosario 			       DMA_FROM_DEVICE);
1794f743106eSDesnes Augusto Nunes do Rosario 	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
17954e6759beSDesnes Augusto Nunes do Rosario 		dev_err(dev, "Could not map VPD buffer\n");
17964e6759beSDesnes Augusto Nunes do Rosario 		kfree(adapter->vpd->buff);
1797b0992ecaSThomas Falcon 		adapter->vpd->buff = NULL;
17984e6759beSDesnes Augusto Nunes do Rosario 		return -ENOMEM;
17994e6759beSDesnes Augusto Nunes do Rosario 	}
18004e6759beSDesnes Augusto Nunes do Rosario 
1801ff25dcb9SThomas Falcon 	mutex_lock(&adapter->fw_lock);
1802ff25dcb9SThomas Falcon 	adapter->fw_done_rc = 0;
18034e6759beSDesnes Augusto Nunes do Rosario 	reinit_completion(&adapter->fw_done);
1804ff25dcb9SThomas Falcon 
18054e6759beSDesnes Augusto Nunes do Rosario 	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
18064e6759beSDesnes Augusto Nunes do Rosario 	crq.get_vpd.cmd = GET_VPD;
18074e6759beSDesnes Augusto Nunes do Rosario 	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
18084e6759beSDesnes Augusto Nunes do Rosario 	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
18099c4eaabdSThomas Falcon 	rc = ibmvnic_send_crq(adapter, &crq);
18109c4eaabdSThomas Falcon 	if (rc) {
18119c4eaabdSThomas Falcon 		kfree(adapter->vpd->buff);
18129c4eaabdSThomas Falcon 		adapter->vpd->buff = NULL;
1813ff25dcb9SThomas Falcon 		mutex_unlock(&adapter->fw_lock);
18149c4eaabdSThomas Falcon 		return rc;
18159c4eaabdSThomas Falcon 	}
1816476d96caSThomas Falcon 
1817476d96caSThomas Falcon 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1818476d96caSThomas Falcon 	if (rc) {
1819476d96caSThomas Falcon 		dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1820476d96caSThomas Falcon 		kfree(adapter->vpd->buff);
1821476d96caSThomas Falcon 		adapter->vpd->buff = NULL;
1822ff25dcb9SThomas Falcon 		mutex_unlock(&adapter->fw_lock);
1823476d96caSThomas Falcon 		return rc;
1824476d96caSThomas Falcon 	}
18254e6759beSDesnes Augusto Nunes do Rosario 
1826ff25dcb9SThomas Falcon 	mutex_unlock(&adapter->fw_lock);
18274e6759beSDesnes Augusto Nunes do Rosario 	return 0;
18284e6759beSDesnes Augusto Nunes do Rosario }
18294e6759beSDesnes Augusto Nunes do Rosario 
init_resources(struct ibmvnic_adapter * adapter)1830bfc32f29SNathan Fontenot static int init_resources(struct ibmvnic_adapter *adapter)
1831a57a5d25SJohn Allen {
1832bfc32f29SNathan Fontenot 	struct net_device *netdev = adapter->netdev;
183386f669b2SNathan Fontenot 	int rc;
1834a57a5d25SJohn Allen 
18357f3c6e6bSThomas Falcon 	rc = set_real_num_queues(netdev);
18367f3c6e6bSThomas Falcon 	if (rc)
18377f3c6e6bSThomas Falcon 		return rc;
1838bd0b6723SJohn Allen 
18394e6759beSDesnes Augusto Nunes do Rosario 	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
18404e6759beSDesnes Augusto Nunes do Rosario 	if (!adapter->vpd)
18414e6759beSDesnes Augusto Nunes do Rosario 		return -ENOMEM;
18424e6759beSDesnes Augusto Nunes do Rosario 
184369d08dcbSJohn Allen 	/* Vital Product Data (VPD) */
184469d08dcbSJohn Allen 	rc = ibmvnic_get_vpd(adapter);
184569d08dcbSJohn Allen 	if (rc) {
184669d08dcbSJohn Allen 		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
184769d08dcbSJohn Allen 		return rc;
184869d08dcbSJohn Allen 	}
184969d08dcbSJohn Allen 
185086f669b2SNathan Fontenot 	rc = init_napi(adapter);
185186f669b2SNathan Fontenot 	if (rc)
185286f669b2SNathan Fontenot 		return rc;
1853032c5e82SThomas Falcon 
185469980d02SLijun Pan 	send_query_map(adapter);
18550ffe2cb7SNathan Fontenot 
18560ffe2cb7SNathan Fontenot 	rc = init_rx_pools(netdev);
18570ffe2cb7SNathan Fontenot 	if (rc)
1858bfc32f29SNathan Fontenot 		return rc;
1859032c5e82SThomas Falcon 
1860c657e32cSNathan Fontenot 	rc = init_tx_pools(netdev);
1861bfc32f29SNathan Fontenot 	return rc;
1862bfc32f29SNathan Fontenot }
1863bfc32f29SNathan Fontenot 
__ibmvnic_open(struct net_device * netdev)1864ed651a10SNathan Fontenot static int __ibmvnic_open(struct net_device *netdev)
1865bfc32f29SNathan Fontenot {
1866bfc32f29SNathan Fontenot 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1867ed651a10SNathan Fontenot 	enum vnic_state prev_state = adapter->state;
1868bfc32f29SNathan Fontenot 	int i, rc;
1869bfc32f29SNathan Fontenot 
187090c8014cSNathan Fontenot 	adapter->state = VNIC_OPENING;
1871032c5e82SThomas Falcon 	replenish_pools(adapter);
1872d944c3d6SJohn Allen 	ibmvnic_napi_enable(adapter);
1873bfc32f29SNathan Fontenot 
1874032c5e82SThomas Falcon 	/* We're ready to receive frames, enable the sub-crq interrupts and
1875032c5e82SThomas Falcon 	 * set the logical link state to up
1876032c5e82SThomas Falcon 	 */
1877ed651a10SNathan Fontenot 	for (i = 0; i < adapter->req_rx_queues; i++) {
1878d1cf33d9SNathan Fontenot 		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1879ed651a10SNathan Fontenot 		if (prev_state == VNIC_CLOSED)
1880ed651a10SNathan Fontenot 			enable_irq(adapter->rx_scrq[i]->irq);
1881032c5e82SThomas Falcon 		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1882ed651a10SNathan Fontenot 	}
1883032c5e82SThomas Falcon 
1884ed651a10SNathan Fontenot 	for (i = 0; i < adapter->req_tx_queues; i++) {
1885d1cf33d9SNathan Fontenot 		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1886ed651a10SNathan Fontenot 		if (prev_state == VNIC_CLOSED)
1887ed651a10SNathan Fontenot 			enable_irq(adapter->tx_scrq[i]->irq);
1888032c5e82SThomas Falcon 		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
188948538ccbSNick Child 		/* netdev_tx_reset_queue will reset dql stats. During NON_FATAL
189048538ccbSNick Child 		 * resets, don't reset the stats because there could be batched
189148538ccbSNick Child 		 * skb's waiting to be sent. If we reset dql stats, we risk
189248538ccbSNick Child 		 * num_completed being greater than num_queued. This will cause
189348538ccbSNick Child 		 * a BUG_ON in dql_completed().
189448538ccbSNick Child 		 */
189548538ccbSNick Child 		if (adapter->reset_reason != VNIC_RESET_NON_FATAL)
18960d973388SThomas Falcon 			netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1897ed651a10SNathan Fontenot 	}
1898032c5e82SThomas Falcon 
189953da09e9SNathan Fontenot 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1900bfc32f29SNathan Fontenot 	if (rc) {
19010775ebc4SLijun Pan 		ibmvnic_napi_disable(adapter);
190261772b09SSukadev Bhattiprolu 		ibmvnic_disable_irqs(adapter);
1903ed651a10SNathan Fontenot 		return rc;
1904bfc32f29SNathan Fontenot 	}
1905bfc32f29SNathan Fontenot 
19064219196dSSukadev Bhattiprolu 	adapter->tx_queues_active = true;
19074219196dSSukadev Bhattiprolu 
19084219196dSSukadev Bhattiprolu 	/* Since queues were stopped until now, there shouldn't be any
19094219196dSSukadev Bhattiprolu 	 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
19104219196dSSukadev Bhattiprolu 	 * don't need the synchronize_rcu()? Leaving it for consistency
19114219196dSSukadev Bhattiprolu 	 * with setting ->tx_queues_active = false.
19124219196dSSukadev Bhattiprolu 	 */
19134219196dSSukadev Bhattiprolu 	synchronize_rcu();
19144219196dSSukadev Bhattiprolu 
1915ed651a10SNathan Fontenot 	netif_tx_start_all_queues(netdev);
1916ed651a10SNathan Fontenot 
19172ca220f9SDany Madden 	if (prev_state == VNIC_CLOSED) {
19182ca220f9SDany Madden 		for (i = 0; i < adapter->req_rx_queues; i++)
19192ca220f9SDany Madden 			napi_schedule(&adapter->napi[i]);
19202ca220f9SDany Madden 	}
19212ca220f9SDany Madden 
1922ed651a10SNathan Fontenot 	adapter->state = VNIC_OPEN;
1923ed651a10SNathan Fontenot 	return rc;
1924ed651a10SNathan Fontenot }
1925ed651a10SNathan Fontenot 
ibmvnic_open(struct net_device * netdev)1926ed651a10SNathan Fontenot static int ibmvnic_open(struct net_device *netdev)
1927ed651a10SNathan Fontenot {
1928ed651a10SNathan Fontenot 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
192969d08dcbSJohn Allen 	int rc;
1930ed651a10SNathan Fontenot 
19318f1c0fd2SSukadev Bhattiprolu 	ASSERT_RTNL();
19328f1c0fd2SSukadev Bhattiprolu 
19338f1c0fd2SSukadev Bhattiprolu 	/* If device failover is pending or we are about to reset, just set
19348f1c0fd2SSukadev Bhattiprolu 	 * device state and return. Device operation will be handled by reset
19358f1c0fd2SSukadev Bhattiprolu 	 * routine.
19368f1c0fd2SSukadev Bhattiprolu 	 *
19378f1c0fd2SSukadev Bhattiprolu 	 * It should be safe to overwrite the adapter->state here. Since
19388f1c0fd2SSukadev Bhattiprolu 	 * we hold the rtnl, either the reset has not actually started or
19398f1c0fd2SSukadev Bhattiprolu 	 * the rtnl got dropped during the set_link_state() in do_reset().
19408f1c0fd2SSukadev Bhattiprolu 	 * In the former case, no one else is changing the state (again we
19418f1c0fd2SSukadev Bhattiprolu 	 * have the rtnl) and in the latter case, do_reset() will detect and
19428f1c0fd2SSukadev Bhattiprolu 	 * honor our setting below.
19435a18e1e0SThomas Falcon 	 */
19448f1c0fd2SSukadev Bhattiprolu 	if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
19450666ef7fSLijun Pan 		netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
19460666ef7fSLijun Pan 			   adapter_state_to_string(adapter->state),
19470666ef7fSLijun Pan 			   adapter->failover_pending);
19485a18e1e0SThomas Falcon 		adapter->state = VNIC_OPEN;
19498f1c0fd2SSukadev Bhattiprolu 		rc = 0;
19508f1c0fd2SSukadev Bhattiprolu 		goto out;
19515a18e1e0SThomas Falcon 	}
19525a18e1e0SThomas Falcon 
1953ed651a10SNathan Fontenot 	if (adapter->state != VNIC_CLOSED) {
1954ed651a10SNathan Fontenot 		rc = ibmvnic_login(netdev);
1955a5681e20SJuliet Kim 		if (rc)
19561d850493SSukadev Bhattiprolu 			goto out;
1957ed651a10SNathan Fontenot 
1958ed651a10SNathan Fontenot 		rc = init_resources(adapter);
1959ed651a10SNathan Fontenot 		if (rc) {
1960ed651a10SNathan Fontenot 			netdev_err(netdev, "failed to initialize resources\n");
19611d850493SSukadev Bhattiprolu 			goto out;
1962ed651a10SNathan Fontenot 		}
1963ed651a10SNathan Fontenot 	}
1964ed651a10SNathan Fontenot 
1965ed651a10SNathan Fontenot 	rc = __ibmvnic_open(netdev);
19664e6759beSDesnes Augusto Nunes do Rosario 
19671d850493SSukadev Bhattiprolu out:
19688f1c0fd2SSukadev Bhattiprolu 	/* If open failed and there is a pending failover or in-progress reset,
19698f1c0fd2SSukadev Bhattiprolu 	 * set device state and return. Device operation will be handled by
19708f1c0fd2SSukadev Bhattiprolu 	 * reset routine. See also comments above regarding rtnl.
19711d850493SSukadev Bhattiprolu 	 */
19728f1c0fd2SSukadev Bhattiprolu 	if (rc &&
19738f1c0fd2SSukadev Bhattiprolu 	    (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
19741d850493SSukadev Bhattiprolu 		adapter->state = VNIC_OPEN;
19751d850493SSukadev Bhattiprolu 		rc = 0;
19761d850493SSukadev Bhattiprolu 	}
197761772b09SSukadev Bhattiprolu 
197861772b09SSukadev Bhattiprolu 	if (rc) {
197961772b09SSukadev Bhattiprolu 		release_resources(adapter);
198061772b09SSukadev Bhattiprolu 		release_rx_pools(adapter);
198161772b09SSukadev Bhattiprolu 		release_tx_pools(adapter);
198261772b09SSukadev Bhattiprolu 	}
198361772b09SSukadev Bhattiprolu 
1984bfc32f29SNathan Fontenot 	return rc;
1985032c5e82SThomas Falcon }
1986032c5e82SThomas Falcon 
clean_rx_pools(struct ibmvnic_adapter * adapter)1987d0869c00SThomas Falcon static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1988d0869c00SThomas Falcon {
1989d0869c00SThomas Falcon 	struct ibmvnic_rx_pool *rx_pool;
1990637f81d1SThomas Falcon 	struct ibmvnic_rx_buff *rx_buff;
1991d0869c00SThomas Falcon 	u64 rx_entries;
1992d0869c00SThomas Falcon 	int rx_scrqs;
1993d0869c00SThomas Falcon 	int i, j;
1994d0869c00SThomas Falcon 
1995d0869c00SThomas Falcon 	if (!adapter->rx_pool)
1996d0869c00SThomas Falcon 		return;
1997d0869c00SThomas Falcon 
1998660e309dSThomas Falcon 	rx_scrqs = adapter->num_active_rx_pools;
1999d0869c00SThomas Falcon 	rx_entries = adapter->req_rx_add_entries_per_subcrq;
2000d0869c00SThomas Falcon 
2001d0869c00SThomas Falcon 	/* Free any remaining skbs in the rx buffer pools */
2002d0869c00SThomas Falcon 	for (i = 0; i < rx_scrqs; i++) {
2003d0869c00SThomas Falcon 		rx_pool = &adapter->rx_pool[i];
2004637f81d1SThomas Falcon 		if (!rx_pool || !rx_pool->rx_buff)
2005d0869c00SThomas Falcon 			continue;
2006d0869c00SThomas Falcon 
2007d0869c00SThomas Falcon 		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
2008d0869c00SThomas Falcon 		for (j = 0; j < rx_entries; j++) {
2009637f81d1SThomas Falcon 			rx_buff = &rx_pool->rx_buff[j];
2010637f81d1SThomas Falcon 			if (rx_buff && rx_buff->skb) {
2011637f81d1SThomas Falcon 				dev_kfree_skb_any(rx_buff->skb);
2012637f81d1SThomas Falcon 				rx_buff->skb = NULL;
2013d0869c00SThomas Falcon 			}
2014d0869c00SThomas Falcon 		}
2015d0869c00SThomas Falcon 	}
2016d0869c00SThomas Falcon }
2017d0869c00SThomas Falcon 
clean_one_tx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_tx_pool * tx_pool)2018e9e1e978SThomas Falcon static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
2019e9e1e978SThomas Falcon 			      struct ibmvnic_tx_pool *tx_pool)
2020b41b83e9SNathan Fontenot {
2021637f81d1SThomas Falcon 	struct ibmvnic_tx_buff *tx_buff;
2022b41b83e9SNathan Fontenot 	u64 tx_entries;
2023e9e1e978SThomas Falcon 	int i;
2024b41b83e9SNathan Fontenot 
2025050e85c9SDan Carpenter 	if (!tx_pool || !tx_pool->tx_buff)
2026b41b83e9SNathan Fontenot 		return;
2027b41b83e9SNathan Fontenot 
2028e9e1e978SThomas Falcon 	tx_entries = tx_pool->num_buffers;
2029b41b83e9SNathan Fontenot 
2030e9e1e978SThomas Falcon 	for (i = 0; i < tx_entries; i++) {
2031e9e1e978SThomas Falcon 		tx_buff = &tx_pool->tx_buff[i];
2032637f81d1SThomas Falcon 		if (tx_buff && tx_buff->skb) {
2033637f81d1SThomas Falcon 			dev_kfree_skb_any(tx_buff->skb);
2034637f81d1SThomas Falcon 			tx_buff->skb = NULL;
2035b41b83e9SNathan Fontenot 		}
2036b41b83e9SNathan Fontenot 	}
2037b41b83e9SNathan Fontenot }
2038e9e1e978SThomas Falcon 
clean_tx_pools(struct ibmvnic_adapter * adapter)2039e9e1e978SThomas Falcon static void clean_tx_pools(struct ibmvnic_adapter *adapter)
2040e9e1e978SThomas Falcon {
2041e9e1e978SThomas Falcon 	int tx_scrqs;
2042e9e1e978SThomas Falcon 	int i;
2043e9e1e978SThomas Falcon 
2044e9e1e978SThomas Falcon 	if (!adapter->tx_pool || !adapter->tso_pool)
2045e9e1e978SThomas Falcon 		return;
2046e9e1e978SThomas Falcon 
2047660e309dSThomas Falcon 	tx_scrqs = adapter->num_active_tx_pools;
2048e9e1e978SThomas Falcon 
2049e9e1e978SThomas Falcon 	/* Free any remaining skbs in the tx buffer pools */
2050e9e1e978SThomas Falcon 	for (i = 0; i < tx_scrqs; i++) {
2051e9e1e978SThomas Falcon 		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
2052e9e1e978SThomas Falcon 		clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
2053e9e1e978SThomas Falcon 		clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
2054e9e1e978SThomas Falcon 	}
2055b41b83e9SNathan Fontenot }
2056b41b83e9SNathan Fontenot 
ibmvnic_disable_irqs(struct ibmvnic_adapter * adapter)20576095e590SJohn Allen static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
2058ea5509f5SJohn Allen {
20596095e590SJohn Allen 	struct net_device *netdev = adapter->netdev;
2060ea5509f5SJohn Allen 	int i;
2061ea5509f5SJohn Allen 
206246293b94SNathan Fontenot 	if (adapter->tx_scrq) {
206346293b94SNathan Fontenot 		for (i = 0; i < adapter->req_tx_queues; i++)
2064d1cf33d9SNathan Fontenot 			if (adapter->tx_scrq[i]->irq) {
2065f873866aSThomas Falcon 				netdev_dbg(netdev,
2066d1cf33d9SNathan Fontenot 					   "Disabling tx_scrq[%d] irq\n", i);
2067f23e0643SThomas Falcon 				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
206846293b94SNathan Fontenot 				disable_irq(adapter->tx_scrq[i]->irq);
206946293b94SNathan Fontenot 			}
2070d1cf33d9SNathan Fontenot 	}
207146293b94SNathan Fontenot 
207246293b94SNathan Fontenot 	if (adapter->rx_scrq) {
207346293b94SNathan Fontenot 		for (i = 0; i < adapter->req_rx_queues; i++) {
2074d1cf33d9SNathan Fontenot 			if (adapter->rx_scrq[i]->irq) {
2075f873866aSThomas Falcon 				netdev_dbg(netdev,
2076d1cf33d9SNathan Fontenot 					   "Disabling rx_scrq[%d] irq\n", i);
2077f23e0643SThomas Falcon 				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
207846293b94SNathan Fontenot 				disable_irq(adapter->rx_scrq[i]->irq);
207946293b94SNathan Fontenot 			}
208046293b94SNathan Fontenot 		}
2081d1cf33d9SNathan Fontenot 	}
20826095e590SJohn Allen }
20836095e590SJohn Allen 
ibmvnic_cleanup(struct net_device * netdev)20846095e590SJohn Allen static void ibmvnic_cleanup(struct net_device *netdev)
20856095e590SJohn Allen {
20866095e590SJohn Allen 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
20876095e590SJohn Allen 
20886095e590SJohn Allen 	/* ensure that transmissions are stopped if called by do_reset */
20894219196dSSukadev Bhattiprolu 
20904219196dSSukadev Bhattiprolu 	adapter->tx_queues_active = false;
20914219196dSSukadev Bhattiprolu 
20924219196dSSukadev Bhattiprolu 	/* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
20934219196dSSukadev Bhattiprolu 	 * update so they don't restart a queue after we stop it below.
20944219196dSSukadev Bhattiprolu 	 */
20954219196dSSukadev Bhattiprolu 	synchronize_rcu();
20964219196dSSukadev Bhattiprolu 
20977ed5b31fSJuliet Kim 	if (test_bit(0, &adapter->resetting))
20986095e590SJohn Allen 		netif_tx_disable(netdev);
20996095e590SJohn Allen 	else
21006095e590SJohn Allen 		netif_tx_stop_all_queues(netdev);
21016095e590SJohn Allen 
21026095e590SJohn Allen 	ibmvnic_napi_disable(adapter);
21036095e590SJohn Allen 	ibmvnic_disable_irqs(adapter);
210401d9bd79SThomas Falcon }
210501d9bd79SThomas Falcon 
__ibmvnic_close(struct net_device * netdev)210601d9bd79SThomas Falcon static int __ibmvnic_close(struct net_device *netdev)
210701d9bd79SThomas Falcon {
210801d9bd79SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
210901d9bd79SThomas Falcon 	int rc = 0;
211001d9bd79SThomas Falcon 
211101d9bd79SThomas Falcon 	adapter->state = VNIC_CLOSING;
211201d9bd79SThomas Falcon 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
211301d9bd79SThomas Falcon 	adapter->state = VNIC_CLOSED;
2114d4083d3cSSukadev Bhattiprolu 	return rc;
2115032c5e82SThomas Falcon }
2116032c5e82SThomas Falcon 
ibmvnic_close(struct net_device * netdev)2117ed651a10SNathan Fontenot static int ibmvnic_close(struct net_device *netdev)
2118ed651a10SNathan Fontenot {
2119ed651a10SNathan Fontenot 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2120ed651a10SNathan Fontenot 	int rc;
2121ed651a10SNathan Fontenot 
21220666ef7fSLijun Pan 	netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
21230666ef7fSLijun Pan 		   adapter_state_to_string(adapter->state),
21240666ef7fSLijun Pan 		   adapter->failover_pending,
212538bd5cecSSukadev Bhattiprolu 		   adapter->force_reset_recovery);
212638bd5cecSSukadev Bhattiprolu 
21275a18e1e0SThomas Falcon 	/* If device failover is pending, just set device state and return.
21285a18e1e0SThomas Falcon 	 * Device operation will be handled by reset routine.
21295a18e1e0SThomas Falcon 	 */
21305a18e1e0SThomas Falcon 	if (adapter->failover_pending) {
21315a18e1e0SThomas Falcon 		adapter->state = VNIC_CLOSED;
21325a18e1e0SThomas Falcon 		return 0;
21335a18e1e0SThomas Falcon 	}
21345a18e1e0SThomas Falcon 
2135ed651a10SNathan Fontenot 	rc = __ibmvnic_close(netdev);
213630f79625SNathan Fontenot 	ibmvnic_cleanup(netdev);
2137489de956SSukadev Bhattiprolu 	clean_rx_pools(adapter);
2138bbd80930SSukadev Bhattiprolu 	clean_tx_pools(adapter);
2139ed651a10SNathan Fontenot 
2140ed651a10SNathan Fontenot 	return rc;
2141ed651a10SNathan Fontenot }
2142ed651a10SNathan Fontenot 
2143ad7775dcSThomas Falcon /**
2144ad7775dcSThomas Falcon  * build_hdr_data - creates L2/L3/L4 header data buffer
214580708602SLee Jones  * @hdr_field: bitfield determining needed headers
214680708602SLee Jones  * @skb: socket buffer
214780708602SLee Jones  * @hdr_len: array of header lengths
214880708602SLee Jones  * @hdr_data: buffer to write the header to
2149ad7775dcSThomas Falcon  *
2150ad7775dcSThomas Falcon  * Reads hdr_field to determine which headers are needed by firmware.
2151ad7775dcSThomas Falcon  * Builds a buffer containing these headers.  Saves individual header
2152ad7775dcSThomas Falcon  * lengths and total buffer length to be used to build descriptors.
2153ad7775dcSThomas Falcon  */
build_hdr_data(u8 hdr_field,struct sk_buff * skb,int * hdr_len,u8 * hdr_data)2154ad7775dcSThomas Falcon static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
2155ad7775dcSThomas Falcon 			  int *hdr_len, u8 *hdr_data)
2156ad7775dcSThomas Falcon {
2157ad7775dcSThomas Falcon 	int len = 0;
2158ad7775dcSThomas Falcon 	u8 *hdr;
2159ad7775dcSThomas Falcon 
2160da75e3b6SThomas Falcon 	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
2161da75e3b6SThomas Falcon 		hdr_len[0] = sizeof(struct vlan_ethhdr);
2162da75e3b6SThomas Falcon 	else
2163ad7775dcSThomas Falcon 		hdr_len[0] = sizeof(struct ethhdr);
2164ad7775dcSThomas Falcon 
2165ad7775dcSThomas Falcon 	if (skb->protocol == htons(ETH_P_IP)) {
2166ad7775dcSThomas Falcon 		hdr_len[1] = ip_hdr(skb)->ihl * 4;
2167ad7775dcSThomas Falcon 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2168ad7775dcSThomas Falcon 			hdr_len[2] = tcp_hdrlen(skb);
2169ad7775dcSThomas Falcon 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2170ad7775dcSThomas Falcon 			hdr_len[2] = sizeof(struct udphdr);
2171ad7775dcSThomas Falcon 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
2172ad7775dcSThomas Falcon 		hdr_len[1] = sizeof(struct ipv6hdr);
2173ad7775dcSThomas Falcon 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2174ad7775dcSThomas Falcon 			hdr_len[2] = tcp_hdrlen(skb);
2175ad7775dcSThomas Falcon 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
2176ad7775dcSThomas Falcon 			hdr_len[2] = sizeof(struct udphdr);
21774eb50cebSThomas Falcon 	} else if (skb->protocol == htons(ETH_P_ARP)) {
21784eb50cebSThomas Falcon 		hdr_len[1] = arp_hdr_len(skb->dev);
21794eb50cebSThomas Falcon 		hdr_len[2] = 0;
2180ad7775dcSThomas Falcon 	}
2181ad7775dcSThomas Falcon 
2182ad7775dcSThomas Falcon 	memset(hdr_data, 0, 120);
2183ad7775dcSThomas Falcon 	if ((hdr_field >> 6) & 1) {
2184ad7775dcSThomas Falcon 		hdr = skb_mac_header(skb);
2185ad7775dcSThomas Falcon 		memcpy(hdr_data, hdr, hdr_len[0]);
2186ad7775dcSThomas Falcon 		len += hdr_len[0];
2187ad7775dcSThomas Falcon 	}
2188ad7775dcSThomas Falcon 
2189ad7775dcSThomas Falcon 	if ((hdr_field >> 5) & 1) {
2190ad7775dcSThomas Falcon 		hdr = skb_network_header(skb);
2191ad7775dcSThomas Falcon 		memcpy(hdr_data + len, hdr, hdr_len[1]);
2192ad7775dcSThomas Falcon 		len += hdr_len[1];
2193ad7775dcSThomas Falcon 	}
2194ad7775dcSThomas Falcon 
2195ad7775dcSThomas Falcon 	if ((hdr_field >> 4) & 1) {
2196ad7775dcSThomas Falcon 		hdr = skb_transport_header(skb);
2197ad7775dcSThomas Falcon 		memcpy(hdr_data + len, hdr, hdr_len[2]);
2198ad7775dcSThomas Falcon 		len += hdr_len[2];
2199ad7775dcSThomas Falcon 	}
2200ad7775dcSThomas Falcon 	return len;
2201ad7775dcSThomas Falcon }
2202ad7775dcSThomas Falcon 
2203ad7775dcSThomas Falcon /**
2204ad7775dcSThomas Falcon  * create_hdr_descs - create header and header extension descriptors
220580708602SLee Jones  * @hdr_field: bitfield determining needed headers
220680708602SLee Jones  * @hdr_data: buffer containing header data
220780708602SLee Jones  * @len: length of data buffer
220880708602SLee Jones  * @hdr_len: array of individual header lengths
220980708602SLee Jones  * @scrq_arr: descriptor array
2210ad7775dcSThomas Falcon  *
2211ad7775dcSThomas Falcon  * Creates header and, if needed, header extension descriptors and
2212ad7775dcSThomas Falcon  * places them in a descriptor array, scrq_arr
2213ad7775dcSThomas Falcon  */
2214ad7775dcSThomas Falcon 
create_hdr_descs(u8 hdr_field,u8 * hdr_data,int len,int * hdr_len,union sub_crq * scrq_arr)22152de09681SThomas Falcon static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
2216ad7775dcSThomas Falcon 			    union sub_crq *scrq_arr)
2217ad7775dcSThomas Falcon {
2218ad7775dcSThomas Falcon 	union sub_crq hdr_desc;
2219ad7775dcSThomas Falcon 	int tmp_len = len;
22202de09681SThomas Falcon 	int num_descs = 0;
2221ad7775dcSThomas Falcon 	u8 *data, *cur;
2222ad7775dcSThomas Falcon 	int tmp;
2223ad7775dcSThomas Falcon 
2224ad7775dcSThomas Falcon 	while (tmp_len > 0) {
2225ad7775dcSThomas Falcon 		cur = hdr_data + len - tmp_len;
2226ad7775dcSThomas Falcon 
2227ad7775dcSThomas Falcon 		memset(&hdr_desc, 0, sizeof(hdr_desc));
2228ad7775dcSThomas Falcon 		if (cur != hdr_data) {
2229ad7775dcSThomas Falcon 			data = hdr_desc.hdr_ext.data;
2230ad7775dcSThomas Falcon 			tmp = tmp_len > 29 ? 29 : tmp_len;
2231ad7775dcSThomas Falcon 			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
2232ad7775dcSThomas Falcon 			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
2233ad7775dcSThomas Falcon 			hdr_desc.hdr_ext.len = tmp;
2234ad7775dcSThomas Falcon 		} else {
2235ad7775dcSThomas Falcon 			data = hdr_desc.hdr.data;
2236ad7775dcSThomas Falcon 			tmp = tmp_len > 24 ? 24 : tmp_len;
2237ad7775dcSThomas Falcon 			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
2238ad7775dcSThomas Falcon 			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
2239ad7775dcSThomas Falcon 			hdr_desc.hdr.len = tmp;
2240ad7775dcSThomas Falcon 			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
2241ad7775dcSThomas Falcon 			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
2242ad7775dcSThomas Falcon 			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
2243ad7775dcSThomas Falcon 			hdr_desc.hdr.flag = hdr_field << 1;
2244ad7775dcSThomas Falcon 		}
2245ad7775dcSThomas Falcon 		memcpy(data, cur, tmp);
2246ad7775dcSThomas Falcon 		tmp_len -= tmp;
2247ad7775dcSThomas Falcon 		*scrq_arr = hdr_desc;
2248ad7775dcSThomas Falcon 		scrq_arr++;
22492de09681SThomas Falcon 		num_descs++;
2250ad7775dcSThomas Falcon 	}
22512de09681SThomas Falcon 
22522de09681SThomas Falcon 	return num_descs;
2253ad7775dcSThomas Falcon }
2254ad7775dcSThomas Falcon 
2255ad7775dcSThomas Falcon /**
2256ad7775dcSThomas Falcon  * build_hdr_descs_arr - build a header descriptor array
225773214a69SLijun Pan  * @skb: tx socket buffer
225873214a69SLijun Pan  * @indir_arr: indirect array
225980708602SLee Jones  * @num_entries: number of descriptors to be sent
226080708602SLee Jones  * @hdr_field: bit field determining which headers will be sent
2261ad7775dcSThomas Falcon  *
2262ad7775dcSThomas Falcon  * This function will build a TX descriptor array with applicable
2263ad7775dcSThomas Falcon  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
2264ad7775dcSThomas Falcon  */
2265ad7775dcSThomas Falcon 
build_hdr_descs_arr(struct sk_buff * skb,union sub_crq * indir_arr,int * num_entries,u8 hdr_field)2266c62aa373SThomas Falcon static void build_hdr_descs_arr(struct sk_buff *skb,
2267c62aa373SThomas Falcon 				union sub_crq *indir_arr,
2268ad7775dcSThomas Falcon 				int *num_entries, u8 hdr_field)
2269ad7775dcSThomas Falcon {
2270ad7775dcSThomas Falcon 	int hdr_len[3] = {0, 0, 0};
2271c62aa373SThomas Falcon 	u8 hdr_data[140] = {0};
22722de09681SThomas Falcon 	int tot_len;
2273ad7775dcSThomas Falcon 
2274c62aa373SThomas Falcon 	tot_len = build_hdr_data(hdr_field, skb, hdr_len,
2275c62aa373SThomas Falcon 				 hdr_data);
22762de09681SThomas Falcon 	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
2277c62aa373SThomas Falcon 					 indir_arr + 1);
2278ad7775dcSThomas Falcon }
2279ad7775dcSThomas Falcon 
ibmvnic_xmit_workarounds(struct sk_buff * skb,struct net_device * netdev)22801f247a6fSThomas Falcon static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
22811f247a6fSThomas Falcon 				    struct net_device *netdev)
22821f247a6fSThomas Falcon {
22831f247a6fSThomas Falcon 	/* For some backing devices, mishandling of small packets
22841f247a6fSThomas Falcon 	 * can result in a loss of connection or TX stall. Device
22851f247a6fSThomas Falcon 	 * architects recommend that no packet should be smaller
22861f247a6fSThomas Falcon 	 * than the minimum MTU value provided to the driver, so
22871f247a6fSThomas Falcon 	 * pad any packets to that length
22881f247a6fSThomas Falcon 	 */
22891f247a6fSThomas Falcon 	if (skb->len < netdev->min_mtu)
22901f247a6fSThomas Falcon 		return skb_put_padto(skb, netdev->min_mtu);
22917083a45aSThomas Falcon 
22927083a45aSThomas Falcon 	return 0;
22931f247a6fSThomas Falcon }
22941f247a6fSThomas Falcon 
ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * tx_scrq)22950d973388SThomas Falcon static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
22960d973388SThomas Falcon 					 struct ibmvnic_sub_crq_queue *tx_scrq)
22970d973388SThomas Falcon {
22980d973388SThomas Falcon 	struct ibmvnic_ind_xmit_queue *ind_bufp;
22990d973388SThomas Falcon 	struct ibmvnic_tx_buff *tx_buff;
23000d973388SThomas Falcon 	struct ibmvnic_tx_pool *tx_pool;
23010d973388SThomas Falcon 	union sub_crq tx_scrq_entry;
23020d973388SThomas Falcon 	int queue_num;
23030d973388SThomas Falcon 	int entries;
23040d973388SThomas Falcon 	int index;
23050d973388SThomas Falcon 	int i;
23060d973388SThomas Falcon 
23070d973388SThomas Falcon 	ind_bufp = &tx_scrq->ind_buf;
23080d973388SThomas Falcon 	entries = (u64)ind_bufp->index;
23090d973388SThomas Falcon 	queue_num = tx_scrq->pool_index;
23100d973388SThomas Falcon 
23110d973388SThomas Falcon 	for (i = entries - 1; i >= 0; --i) {
23120d973388SThomas Falcon 		tx_scrq_entry = ind_bufp->indir_arr[i];
23130d973388SThomas Falcon 		if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
23140d973388SThomas Falcon 			continue;
23150d973388SThomas Falcon 		index = be32_to_cpu(tx_scrq_entry.v1.correlator);
23160d973388SThomas Falcon 		if (index & IBMVNIC_TSO_POOL_MASK) {
23170d973388SThomas Falcon 			tx_pool = &adapter->tso_pool[queue_num];
23180d973388SThomas Falcon 			index &= ~IBMVNIC_TSO_POOL_MASK;
23190d973388SThomas Falcon 		} else {
23200d973388SThomas Falcon 			tx_pool = &adapter->tx_pool[queue_num];
23210d973388SThomas Falcon 		}
23220d973388SThomas Falcon 		tx_pool->free_map[tx_pool->consumer_index] = index;
23230d973388SThomas Falcon 		tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
23240d973388SThomas Falcon 					  tx_pool->num_buffers - 1 :
23250d973388SThomas Falcon 					  tx_pool->consumer_index - 1;
23260d973388SThomas Falcon 		tx_buff = &tx_pool->tx_buff[index];
23270d973388SThomas Falcon 		adapter->netdev->stats.tx_packets--;
23280d973388SThomas Falcon 		adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
2329da8c1f9dSNick Child 		adapter->tx_stats_buffers[queue_num].batched_packets--;
23300d973388SThomas Falcon 		adapter->tx_stats_buffers[queue_num].bytes -=
23310d973388SThomas Falcon 						tx_buff->skb->len;
23320d973388SThomas Falcon 		dev_kfree_skb_any(tx_buff->skb);
23330d973388SThomas Falcon 		tx_buff->skb = NULL;
23340d973388SThomas Falcon 		adapter->netdev->stats.tx_dropped++;
23350d973388SThomas Falcon 	}
23364219196dSSukadev Bhattiprolu 
23370d973388SThomas Falcon 	ind_bufp->index = 0;
23384219196dSSukadev Bhattiprolu 
23390d973388SThomas Falcon 	if (atomic_sub_return(entries, &tx_scrq->used) <=
23400d973388SThomas Falcon 	    (adapter->req_tx_entries_per_subcrq / 2) &&
23414219196dSSukadev Bhattiprolu 	    __netif_subqueue_stopped(adapter->netdev, queue_num)) {
23424219196dSSukadev Bhattiprolu 		rcu_read_lock();
23434219196dSSukadev Bhattiprolu 
23444219196dSSukadev Bhattiprolu 		if (adapter->tx_queues_active) {
23450d973388SThomas Falcon 			netif_wake_subqueue(adapter->netdev, queue_num);
23460d973388SThomas Falcon 			netdev_dbg(adapter->netdev, "Started queue %d\n",
23470d973388SThomas Falcon 				   queue_num);
23480d973388SThomas Falcon 		}
23494219196dSSukadev Bhattiprolu 
23504219196dSSukadev Bhattiprolu 		rcu_read_unlock();
23514219196dSSukadev Bhattiprolu 	}
23520d973388SThomas Falcon }
23530d973388SThomas Falcon 
send_subcrq_direct(struct ibmvnic_adapter * adapter,u64 remote_handle,u64 * entry)23544b1555f9SNick Child static int send_subcrq_direct(struct ibmvnic_adapter *adapter,
23554b1555f9SNick Child 			      u64 remote_handle, u64 *entry)
23564b1555f9SNick Child {
23574b1555f9SNick Child 	unsigned int ua = adapter->vdev->unit_address;
23584b1555f9SNick Child 	struct device *dev = &adapter->vdev->dev;
23594b1555f9SNick Child 	int rc;
23604b1555f9SNick Child 
23614b1555f9SNick Child 	/* Make sure the hypervisor sees the complete request */
23624b1555f9SNick Child 	dma_wmb();
23634b1555f9SNick Child 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
23644b1555f9SNick Child 				cpu_to_be64(remote_handle),
23654b1555f9SNick Child 				cpu_to_be64(entry[0]), cpu_to_be64(entry[1]),
23664b1555f9SNick Child 				cpu_to_be64(entry[2]), cpu_to_be64(entry[3]));
23674b1555f9SNick Child 
23684b1555f9SNick Child 	if (rc)
23694b1555f9SNick Child 		print_subcrq_error(dev, rc, __func__);
23704b1555f9SNick Child 
23714b1555f9SNick Child 	return rc;
23724b1555f9SNick Child }
23734b1555f9SNick Child 
ibmvnic_tx_scrq_flush(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * tx_scrq,bool indirect)23740d973388SThomas Falcon static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
23754b1555f9SNick Child 				 struct ibmvnic_sub_crq_queue *tx_scrq,
23764b1555f9SNick Child 				 bool indirect)
23770d973388SThomas Falcon {
23780d973388SThomas Falcon 	struct ibmvnic_ind_xmit_queue *ind_bufp;
23790d973388SThomas Falcon 	u64 dma_addr;
23800d973388SThomas Falcon 	u64 entries;
23810d973388SThomas Falcon 	u64 handle;
23820d973388SThomas Falcon 	int rc;
23830d973388SThomas Falcon 
23840d973388SThomas Falcon 	ind_bufp = &tx_scrq->ind_buf;
23850d973388SThomas Falcon 	dma_addr = (u64)ind_bufp->indir_dma;
23860d973388SThomas Falcon 	entries = (u64)ind_bufp->index;
23870d973388SThomas Falcon 	handle = tx_scrq->handle;
23880d973388SThomas Falcon 
23890d973388SThomas Falcon 	if (!entries)
23900d973388SThomas Falcon 		return 0;
23914b1555f9SNick Child 
23924b1555f9SNick Child 	if (indirect)
23930d973388SThomas Falcon 		rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
23944b1555f9SNick Child 	else
23954b1555f9SNick Child 		rc = send_subcrq_direct(adapter, handle,
23964b1555f9SNick Child 					(u64 *)ind_bufp->indir_arr);
23974b1555f9SNick Child 
23980d973388SThomas Falcon 	if (rc)
23990d973388SThomas Falcon 		ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
24000d973388SThomas Falcon 	else
24010d973388SThomas Falcon 		ind_bufp->index = 0;
2402b43a1ad2SNick Child 	return rc;
24030d973388SThomas Falcon }
24040d973388SThomas Falcon 
ibmvnic_xmit(struct sk_buff * skb,struct net_device * netdev)240594b2bb28SYueHaibing static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2406032c5e82SThomas Falcon {
2407032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2408032c5e82SThomas Falcon 	int queue_num = skb_get_queue_mapping(skb);
2409ad7775dcSThomas Falcon 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
2410032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
24110d973388SThomas Falcon 	struct ibmvnic_ind_xmit_queue *ind_bufp;
2412032c5e82SThomas Falcon 	struct ibmvnic_tx_buff *tx_buff = NULL;
2413142c0ac4SThomas Falcon 	struct ibmvnic_sub_crq_queue *tx_scrq;
24140c91bf9cSSukadev Bhattiprolu 	struct ibmvnic_long_term_buff *ltb;
2415032c5e82SThomas Falcon 	struct ibmvnic_tx_pool *tx_pool;
2416032c5e82SThomas Falcon 	unsigned int tx_send_failed = 0;
24170d973388SThomas Falcon 	netdev_tx_t ret = NETDEV_TX_OK;
2418032c5e82SThomas Falcon 	unsigned int tx_map_failed = 0;
2419c62aa373SThomas Falcon 	union sub_crq indir_arr[16];
2420032c5e82SThomas Falcon 	unsigned int tx_dropped = 0;
2421da8c1f9dSNick Child 	unsigned int tx_dpackets = 0;
2422da8c1f9dSNick Child 	unsigned int tx_bpackets = 0;
2423032c5e82SThomas Falcon 	unsigned int tx_bytes = 0;
2424032c5e82SThomas Falcon 	dma_addr_t data_dma_addr;
2425032c5e82SThomas Falcon 	struct netdev_queue *txq;
2426032c5e82SThomas Falcon 	unsigned long lpar_rc;
2427093b0e5cSNick Child 	unsigned int skblen;
2428032c5e82SThomas Falcon 	union sub_crq tx_crq;
2429032c5e82SThomas Falcon 	unsigned int offset;
24306511585eSNick Child 	bool use_scrq_send_direct = false;
2431ad7775dcSThomas Falcon 	int num_entries = 1;
2432032c5e82SThomas Falcon 	unsigned char *dst;
24338880fc66SSukadev Bhattiprolu 	int bufidx = 0;
2434a0dca10fSThomas Falcon 	u8 proto = 0;
24350d973388SThomas Falcon 
24364219196dSSukadev Bhattiprolu 	/* If a reset is in progress, drop the packet since
24374219196dSSukadev Bhattiprolu 	 * the scrqs may get torn down. Otherwise use the
24384219196dSSukadev Bhattiprolu 	 * rcu to ensure reset waits for us to complete.
24394219196dSSukadev Bhattiprolu 	 */
24404219196dSSukadev Bhattiprolu 	rcu_read_lock();
24414219196dSSukadev Bhattiprolu 	if (!adapter->tx_queues_active) {
24427f5b0308SThomas Falcon 		dev_kfree_skb_any(skb);
24437f5b0308SThomas Falcon 
2444032c5e82SThomas Falcon 		tx_send_failed++;
2445032c5e82SThomas Falcon 		tx_dropped++;
24467f5b0308SThomas Falcon 		ret = NETDEV_TX_OK;
2447032c5e82SThomas Falcon 		goto out;
2448032c5e82SThomas Falcon 	}
2449032c5e82SThomas Falcon 
24504219196dSSukadev Bhattiprolu 	tx_scrq = adapter->tx_scrq[queue_num];
24514219196dSSukadev Bhattiprolu 	txq = netdev_get_tx_queue(netdev, queue_num);
24524219196dSSukadev Bhattiprolu 	ind_bufp = &tx_scrq->ind_buf;
24534219196dSSukadev Bhattiprolu 
24547083a45aSThomas Falcon 	if (ibmvnic_xmit_workarounds(skb, netdev)) {
24551f247a6fSThomas Falcon 		tx_dropped++;
24561f247a6fSThomas Falcon 		tx_send_failed++;
24571f247a6fSThomas Falcon 		ret = NETDEV_TX_OK;
24584b1555f9SNick Child 		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2459b43a1ad2SNick Child 		if (lpar_rc != H_SUCCESS)
2460b43a1ad2SNick Child 			goto tx_err;
24611f247a6fSThomas Falcon 		goto out;
24621f247a6fSThomas Falcon 	}
24634219196dSSukadev Bhattiprolu 
246406b3e357SThomas Falcon 	if (skb_is_gso(skb))
246506b3e357SThomas Falcon 		tx_pool = &adapter->tso_pool[queue_num];
246606b3e357SThomas Falcon 	else
2467161b8a81SNathan Fontenot 		tx_pool = &adapter->tx_pool[queue_num];
246806b3e357SThomas Falcon 
24698880fc66SSukadev Bhattiprolu 	bufidx = tx_pool->free_map[tx_pool->consumer_index];
2470fdb06105SThomas Falcon 
24718880fc66SSukadev Bhattiprolu 	if (bufidx == IBMVNIC_INVALID_MAP) {
247286b61a5fSThomas Falcon 		dev_kfree_skb_any(skb);
247386b61a5fSThomas Falcon 		tx_send_failed++;
247486b61a5fSThomas Falcon 		tx_dropped++;
247586b61a5fSThomas Falcon 		ret = NETDEV_TX_OK;
24764b1555f9SNick Child 		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
2477b43a1ad2SNick Child 		if (lpar_rc != H_SUCCESS)
2478b43a1ad2SNick Child 			goto tx_err;
247986b61a5fSThomas Falcon 		goto out;
248086b61a5fSThomas Falcon 	}
248186b61a5fSThomas Falcon 
248286b61a5fSThomas Falcon 	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
248386b61a5fSThomas Falcon 
24840c91bf9cSSukadev Bhattiprolu 	map_txpool_buf_to_ltb(tx_pool, bufidx, &ltb, &offset);
24850c91bf9cSSukadev Bhattiprolu 
24860c91bf9cSSukadev Bhattiprolu 	dst = ltb->buff + offset;
248706b3e357SThomas Falcon 	memset(dst, 0, tx_pool->buf_size);
24880c91bf9cSSukadev Bhattiprolu 	data_dma_addr = ltb->addr + offset;
2489032c5e82SThomas Falcon 
24906511585eSNick Child 	/* if we are going to send_subcrq_direct this then we need to
24916511585eSNick Child 	 * update the checksum before copying the data into ltb. Essentially
24926511585eSNick Child 	 * these packets force disable CSO so that we can guarantee that
2493*4d2a7df7SNick Child 	 * FW does not need header info and we can send direct. Also, vnic
2494*4d2a7df7SNick Child 	 * server must be able to xmit standard packets without header data
24956511585eSNick Child 	 */
2496*4d2a7df7SNick Child 	if (*hdrs == 0 && !skb_is_gso(skb) &&
2497*4d2a7df7SNick Child 	    !ind_bufp->index && !netdev_xmit_more()) {
24986511585eSNick Child 		use_scrq_send_direct = true;
24996511585eSNick Child 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
25006511585eSNick Child 		    skb_checksum_help(skb))
25016511585eSNick Child 			use_scrq_send_direct = false;
25026511585eSNick Child 	}
25036511585eSNick Child 
250415482056SThomas Falcon 	if (skb_shinfo(skb)->nr_frags) {
250515482056SThomas Falcon 		int cur, i;
250615482056SThomas Falcon 
250715482056SThomas Falcon 		/* Copy the head */
250815482056SThomas Falcon 		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
250915482056SThomas Falcon 		cur = skb_headlen(skb);
251015482056SThomas Falcon 
251115482056SThomas Falcon 		/* Copy the frags */
251215482056SThomas Falcon 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
251315482056SThomas Falcon 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
251415482056SThomas Falcon 
2515c3105f84SChristophe JAILLET 			memcpy(dst + cur, skb_frag_address(frag),
2516c3105f84SChristophe JAILLET 			       skb_frag_size(frag));
251715482056SThomas Falcon 			cur += skb_frag_size(frag);
251815482056SThomas Falcon 		}
251915482056SThomas Falcon 	} else {
252015482056SThomas Falcon 		skb_copy_from_linear_data(skb, dst, skb->len);
252115482056SThomas Falcon 	}
252215482056SThomas Falcon 
252342557dabSLijun Pan 	/* post changes to long_term_buff *dst before VIOS accessing it */
252442557dabSLijun Pan 	dma_wmb();
252542557dabSLijun Pan 
2526032c5e82SThomas Falcon 	tx_pool->consumer_index =
252706b3e357SThomas Falcon 	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
2528032c5e82SThomas Falcon 
25298880fc66SSukadev Bhattiprolu 	tx_buff = &tx_pool->tx_buff[bufidx];
2530267c61c4SNick Child 
2531267c61c4SNick Child 	/* Sanity checks on our free map to make sure it points to an index
2532267c61c4SNick Child 	 * that is not being occupied by another skb. If skb memory is
2533267c61c4SNick Child 	 * not freed then we see congestion control kick in and halt tx.
2534267c61c4SNick Child 	 */
2535267c61c4SNick Child 	if (unlikely(tx_buff->skb)) {
2536267c61c4SNick Child 		dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
2537267c61c4SNick Child 				     skb_is_gso(skb) ? "tso_pool" : "tx_pool",
2538267c61c4SNick Child 				     queue_num, bufidx);
2539267c61c4SNick Child 		dev_kfree_skb_any(tx_buff->skb);
2540267c61c4SNick Child 	}
2541267c61c4SNick Child 
2542032c5e82SThomas Falcon 	tx_buff->skb = skb;
25438880fc66SSukadev Bhattiprolu 	tx_buff->index = bufidx;
2544032c5e82SThomas Falcon 	tx_buff->pool_index = queue_num;
2545093b0e5cSNick Child 	skblen = skb->len;
2546032c5e82SThomas Falcon 
2547032c5e82SThomas Falcon 	memset(&tx_crq, 0, sizeof(tx_crq));
2548032c5e82SThomas Falcon 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
2549032c5e82SThomas Falcon 	tx_crq.v1.type = IBMVNIC_TX_DESC;
2550032c5e82SThomas Falcon 	tx_crq.v1.n_crq_elem = 1;
2551032c5e82SThomas Falcon 	tx_crq.v1.n_sge = 1;
2552032c5e82SThomas Falcon 	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
255306b3e357SThomas Falcon 
2554fdb06105SThomas Falcon 	if (skb_is_gso(skb))
255506b3e357SThomas Falcon 		tx_crq.v1.correlator =
25568880fc66SSukadev Bhattiprolu 			cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
2557fdb06105SThomas Falcon 	else
25588880fc66SSukadev Bhattiprolu 		tx_crq.v1.correlator = cpu_to_be32(bufidx);
25590c91bf9cSSukadev Bhattiprolu 	tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
2560032c5e82SThomas Falcon 	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
2561032c5e82SThomas Falcon 	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
2562032c5e82SThomas Falcon 
2563e84b4794SMichał Mirosław 	if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
2564032c5e82SThomas Falcon 		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
2565032c5e82SThomas Falcon 		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
2566032c5e82SThomas Falcon 	}
2567032c5e82SThomas Falcon 
2568032c5e82SThomas Falcon 	if (skb->protocol == htons(ETH_P_IP)) {
2569032c5e82SThomas Falcon 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2570a0dca10fSThomas Falcon 		proto = ip_hdr(skb)->protocol;
2571a0dca10fSThomas Falcon 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
2572032c5e82SThomas Falcon 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2573a0dca10fSThomas Falcon 		proto = ipv6_hdr(skb)->nexthdr;
2574032c5e82SThomas Falcon 	}
2575032c5e82SThomas Falcon 
2576a0dca10fSThomas Falcon 	if (proto == IPPROTO_TCP)
2577a0dca10fSThomas Falcon 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2578a0dca10fSThomas Falcon 	else if (proto == IPPROTO_UDP)
2579a0dca10fSThomas Falcon 		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2580a0dca10fSThomas Falcon 
2581ad7775dcSThomas Falcon 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2582032c5e82SThomas Falcon 		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2583ad7775dcSThomas Falcon 		hdrs += 2;
2584ad7775dcSThomas Falcon 	}
2585fdb06105SThomas Falcon 	if (skb_is_gso(skb)) {
2586fdb06105SThomas Falcon 		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2587fdb06105SThomas Falcon 		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2588fdb06105SThomas Falcon 		hdrs += 2;
25896511585eSNick Child 	} else if (use_scrq_send_direct) {
25906511585eSNick Child 		/* See above comment, CSO disabled with direct xmit */
25916511585eSNick Child 		tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD);
25924b1555f9SNick Child 		ind_bufp->index = 1;
25934b1555f9SNick Child 		tx_buff->num_entries = 1;
25944b1555f9SNick Child 		netdev_tx_sent_queue(txq, skb->len);
25956511585eSNick Child 		ind_bufp->indir_arr[0] = tx_crq;
25964b1555f9SNick Child 		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false);
25974b1555f9SNick Child 		if (lpar_rc != H_SUCCESS)
25984b1555f9SNick Child 			goto tx_err;
25994b1555f9SNick Child 
2600da8c1f9dSNick Child 		tx_dpackets++;
26014b1555f9SNick Child 		goto early_exit;
2602fdb06105SThomas Falcon 	}
26030d973388SThomas Falcon 
26040d973388SThomas Falcon 	if ((*hdrs >> 7) & 1)
2605c62aa373SThomas Falcon 		build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
26060d973388SThomas Falcon 
2607ad7775dcSThomas Falcon 	tx_crq.v1.n_crq_elem = num_entries;
2608ecba616eSThomas Falcon 	tx_buff->num_entries = num_entries;
26090d973388SThomas Falcon 	/* flush buffer if current entry can not fit */
26100d973388SThomas Falcon 	if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
26114b1555f9SNick Child 		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
26120d973388SThomas Falcon 		if (lpar_rc != H_SUCCESS)
26130d973388SThomas Falcon 			goto tx_flush_err;
26140d973388SThomas Falcon 	}
26150d973388SThomas Falcon 
2616c62aa373SThomas Falcon 	indir_arr[0] = tx_crq;
2617c62aa373SThomas Falcon 	memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
26180d973388SThomas Falcon 	       num_entries * sizeof(struct ibmvnic_generic_scrq));
26194b1555f9SNick Child 
26200d973388SThomas Falcon 	ind_bufp->index += num_entries;
26210d973388SThomas Falcon 	if (__netdev_tx_sent_queue(txq, skb->len,
26220d973388SThomas Falcon 				   netdev_xmit_more() &&
26230d973388SThomas Falcon 				   ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
26244b1555f9SNick Child 		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
26250d973388SThomas Falcon 		if (lpar_rc != H_SUCCESS)
26260d973388SThomas Falcon 			goto tx_err;
2627032c5e82SThomas Falcon 	}
2628142c0ac4SThomas Falcon 
2629da8c1f9dSNick Child 	tx_bpackets++;
2630da8c1f9dSNick Child 
26314b1555f9SNick Child early_exit:
2632ffc385b9SThomas Falcon 	if (atomic_add_return(num_entries, &tx_scrq->used)
263358c8c0c0SBrian King 					>= adapter->req_tx_entries_per_subcrq) {
26340aecb13cSThomas Falcon 		netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2635142c0ac4SThomas Falcon 		netif_stop_subqueue(netdev, queue_num);
2636142c0ac4SThomas Falcon 	}
2637142c0ac4SThomas Falcon 
2638093b0e5cSNick Child 	tx_bytes += skblen;
26395337824fSEric Dumazet 	txq_trans_cond_update(txq);
2640032c5e82SThomas Falcon 	ret = NETDEV_TX_OK;
264186b61a5fSThomas Falcon 	goto out;
2642032c5e82SThomas Falcon 
26430d973388SThomas Falcon tx_flush_err:
26440d973388SThomas Falcon 	dev_kfree_skb_any(skb);
26450d973388SThomas Falcon 	tx_buff->skb = NULL;
26460d973388SThomas Falcon 	tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
26470d973388SThomas Falcon 				  tx_pool->num_buffers - 1 :
26480d973388SThomas Falcon 				  tx_pool->consumer_index - 1;
26490d973388SThomas Falcon 	tx_dropped++;
26500d973388SThomas Falcon tx_err:
26510d973388SThomas Falcon 	if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
26520d973388SThomas Falcon 		dev_err_ratelimited(dev, "tx: send failed\n");
26530d973388SThomas Falcon 
26540d973388SThomas Falcon 	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
26550d973388SThomas Falcon 		/* Disable TX and report carrier off if queue is closed
26560d973388SThomas Falcon 		 * or pending failover.
26570d973388SThomas Falcon 		 * Firmware guarantees that a signal will be sent to the
26580d973388SThomas Falcon 		 * driver, triggering a reset or some other action.
26590d973388SThomas Falcon 		 */
26600d973388SThomas Falcon 		netif_tx_stop_all_queues(netdev);
26610d973388SThomas Falcon 		netif_carrier_off(netdev);
26620d973388SThomas Falcon 	}
2663032c5e82SThomas Falcon out:
26644219196dSSukadev Bhattiprolu 	rcu_read_unlock();
2665032c5e82SThomas Falcon 	netdev->stats.tx_dropped += tx_dropped;
2666032c5e82SThomas Falcon 	netdev->stats.tx_bytes += tx_bytes;
2667da8c1f9dSNick Child 	netdev->stats.tx_packets += tx_bpackets + tx_dpackets;
2668032c5e82SThomas Falcon 	adapter->tx_send_failed += tx_send_failed;
2669032c5e82SThomas Falcon 	adapter->tx_map_failed += tx_map_failed;
2670da8c1f9dSNick Child 	adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
2671da8c1f9dSNick Child 	adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
26723d52b594SJohn Allen 	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
26733d52b594SJohn Allen 	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2674032c5e82SThomas Falcon 
2675032c5e82SThomas Falcon 	return ret;
2676032c5e82SThomas Falcon }
2677032c5e82SThomas Falcon 
ibmvnic_set_multi(struct net_device * netdev)2678032c5e82SThomas Falcon static void ibmvnic_set_multi(struct net_device *netdev)
2679032c5e82SThomas Falcon {
2680032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2681032c5e82SThomas Falcon 	struct netdev_hw_addr *ha;
2682032c5e82SThomas Falcon 	union ibmvnic_crq crq;
2683032c5e82SThomas Falcon 
2684032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
2685032c5e82SThomas Falcon 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
2686032c5e82SThomas Falcon 	crq.request_capability.cmd = REQUEST_CAPABILITY;
2687032c5e82SThomas Falcon 
2688032c5e82SThomas Falcon 	if (netdev->flags & IFF_PROMISC) {
2689032c5e82SThomas Falcon 		if (!adapter->promisc_supported)
2690032c5e82SThomas Falcon 			return;
2691032c5e82SThomas Falcon 	} else {
2692032c5e82SThomas Falcon 		if (netdev->flags & IFF_ALLMULTI) {
2693032c5e82SThomas Falcon 			/* Accept all multicast */
2694032c5e82SThomas Falcon 			memset(&crq, 0, sizeof(crq));
2695032c5e82SThomas Falcon 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2696032c5e82SThomas Falcon 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2697032c5e82SThomas Falcon 			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2698032c5e82SThomas Falcon 			ibmvnic_send_crq(adapter, &crq);
2699032c5e82SThomas Falcon 		} else if (netdev_mc_empty(netdev)) {
2700032c5e82SThomas Falcon 			/* Reject all multicast */
2701032c5e82SThomas Falcon 			memset(&crq, 0, sizeof(crq));
2702032c5e82SThomas Falcon 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2703032c5e82SThomas Falcon 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2704032c5e82SThomas Falcon 			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2705032c5e82SThomas Falcon 			ibmvnic_send_crq(adapter, &crq);
2706032c5e82SThomas Falcon 		} else {
2707032c5e82SThomas Falcon 			/* Accept one or more multicast(s) */
2708032c5e82SThomas Falcon 			netdev_for_each_mc_addr(ha, netdev) {
2709032c5e82SThomas Falcon 				memset(&crq, 0, sizeof(crq));
2710032c5e82SThomas Falcon 				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2711032c5e82SThomas Falcon 				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2712032c5e82SThomas Falcon 				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2713032c5e82SThomas Falcon 				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2714032c5e82SThomas Falcon 						ha->addr);
2715032c5e82SThomas Falcon 				ibmvnic_send_crq(adapter, &crq);
2716032c5e82SThomas Falcon 			}
2717032c5e82SThomas Falcon 		}
2718032c5e82SThomas Falcon 	}
2719032c5e82SThomas Falcon }
2720032c5e82SThomas Falcon 
__ibmvnic_set_mac(struct net_device * netdev,u8 * dev_addr)272162740e97SThomas Falcon static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2722032c5e82SThomas Falcon {
2723032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2724032c5e82SThomas Falcon 	union ibmvnic_crq crq;
27259c4eaabdSThomas Falcon 	int rc;
2726032c5e82SThomas Falcon 
272762740e97SThomas Falcon 	if (!is_valid_ether_addr(dev_addr)) {
272862740e97SThomas Falcon 		rc = -EADDRNOTAVAIL;
272962740e97SThomas Falcon 		goto err;
273062740e97SThomas Falcon 	}
2731032c5e82SThomas Falcon 
2732032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
2733032c5e82SThomas Falcon 	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2734032c5e82SThomas Falcon 	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
273562740e97SThomas Falcon 	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2736f813614fSThomas Falcon 
2737ff25dcb9SThomas Falcon 	mutex_lock(&adapter->fw_lock);
2738ff25dcb9SThomas Falcon 	adapter->fw_done_rc = 0;
2739070eca95SThomas Falcon 	reinit_completion(&adapter->fw_done);
2740ff25dcb9SThomas Falcon 
27419c4eaabdSThomas Falcon 	rc = ibmvnic_send_crq(adapter, &crq);
274262740e97SThomas Falcon 	if (rc) {
274362740e97SThomas Falcon 		rc = -EIO;
2744ff25dcb9SThomas Falcon 		mutex_unlock(&adapter->fw_lock);
274562740e97SThomas Falcon 		goto err;
274662740e97SThomas Falcon 	}
274762740e97SThomas Falcon 
2748476d96caSThomas Falcon 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2749032c5e82SThomas Falcon 	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
2750476d96caSThomas Falcon 	if (rc || adapter->fw_done_rc) {
275162740e97SThomas Falcon 		rc = -EIO;
2752ff25dcb9SThomas Falcon 		mutex_unlock(&adapter->fw_lock);
275362740e97SThomas Falcon 		goto err;
275462740e97SThomas Falcon 	}
2755ff25dcb9SThomas Falcon 	mutex_unlock(&adapter->fw_lock);
275662740e97SThomas Falcon 	return 0;
275762740e97SThomas Falcon err:
275862740e97SThomas Falcon 	ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
275962740e97SThomas Falcon 	return rc;
2760032c5e82SThomas Falcon }
2761032c5e82SThomas Falcon 
ibmvnic_set_mac(struct net_device * netdev,void * p)2762c26eba03SJohn Allen static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2763c26eba03SJohn Allen {
2764c26eba03SJohn Allen 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2765c26eba03SJohn Allen 	struct sockaddr *addr = p;
2766f813614fSThomas Falcon 	int rc;
2767c26eba03SJohn Allen 
276862740e97SThomas Falcon 	rc = 0;
27698fc3672aSLijun Pan 	if (!is_valid_ether_addr(addr->sa_data))
27708fc3672aSLijun Pan 		return -EADDRNOTAVAIL;
27718fc3672aSLijun Pan 
277262740e97SThomas Falcon 	ether_addr_copy(adapter->mac_addr, addr->sa_data);
277367eb2114SJiri Wiesner 	if (adapter->state != VNIC_PROBED)
277462740e97SThomas Falcon 		rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2775c26eba03SJohn Allen 
2776f813614fSThomas Falcon 	return rc;
2777c26eba03SJohn Allen }
2778c26eba03SJohn Allen 
reset_reason_to_string(enum ibmvnic_reset_reason reason)2779caee7bf5SLijun Pan static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2780caee7bf5SLijun Pan {
2781caee7bf5SLijun Pan 	switch (reason) {
2782caee7bf5SLijun Pan 	case VNIC_RESET_FAILOVER:
2783caee7bf5SLijun Pan 		return "FAILOVER";
2784caee7bf5SLijun Pan 	case VNIC_RESET_MOBILITY:
2785caee7bf5SLijun Pan 		return "MOBILITY";
2786caee7bf5SLijun Pan 	case VNIC_RESET_FATAL:
2787caee7bf5SLijun Pan 		return "FATAL";
2788caee7bf5SLijun Pan 	case VNIC_RESET_NON_FATAL:
2789caee7bf5SLijun Pan 		return "NON_FATAL";
2790caee7bf5SLijun Pan 	case VNIC_RESET_TIMEOUT:
2791caee7bf5SLijun Pan 		return "TIMEOUT";
2792caee7bf5SLijun Pan 	case VNIC_RESET_CHANGE_PARAM:
2793caee7bf5SLijun Pan 		return "CHANGE_PARAM";
2794822ebc2cSLijun Pan 	case VNIC_RESET_PASSIVE_INIT:
2795822ebc2cSLijun Pan 		return "PASSIVE_INIT";
2796caee7bf5SLijun Pan 	}
279707b5dc1dSMichal Suchanek 	return "UNKNOWN";
2798caee7bf5SLijun Pan }
2799caee7bf5SLijun Pan 
280080708602SLee Jones /*
2801ae16bf15SSukadev Bhattiprolu  * Initialize the init_done completion and return code values. We
2802ae16bf15SSukadev Bhattiprolu  * can get a transport event just after registering the CRQ and the
2803ae16bf15SSukadev Bhattiprolu  * tasklet will use this to communicate the transport event. To ensure
2804ae16bf15SSukadev Bhattiprolu  * we don't miss the notification/error, initialize these _before_
2805ae16bf15SSukadev Bhattiprolu  * regisering the CRQ.
2806ae16bf15SSukadev Bhattiprolu  */
reinit_init_done(struct ibmvnic_adapter * adapter)2807ae16bf15SSukadev Bhattiprolu static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2808ae16bf15SSukadev Bhattiprolu {
2809ae16bf15SSukadev Bhattiprolu 	reinit_completion(&adapter->init_done);
2810ae16bf15SSukadev Bhattiprolu 	adapter->init_done_rc = 0;
2811ae16bf15SSukadev Bhattiprolu }
2812ae16bf15SSukadev Bhattiprolu 
2813ae16bf15SSukadev Bhattiprolu /*
2814b27507bbSJuliet Kim  * do_reset returns zero if we are able to keep processing reset events, or
2815b27507bbSJuliet Kim  * non-zero if we hit a fatal error and must halt.
2816b27507bbSJuliet Kim  */
do_reset(struct ibmvnic_adapter * adapter,struct ibmvnic_rwi * rwi,u32 reset_state)2817b27507bbSJuliet Kim static int do_reset(struct ibmvnic_adapter *adapter,
2818b27507bbSJuliet Kim 		    struct ibmvnic_rwi *rwi, u32 reset_state)
2819b27507bbSJuliet Kim {
2820bbd80930SSukadev Bhattiprolu 	struct net_device *netdev = adapter->netdev;
2821b27507bbSJuliet Kim 	u64 old_num_rx_queues, old_num_tx_queues;
2822b27507bbSJuliet Kim 	u64 old_num_rx_slots, old_num_tx_slots;
2823d3a6abccSLijun Pan 	int rc;
2824b27507bbSJuliet Kim 
282538bd5cecSSukadev Bhattiprolu 	netdev_dbg(adapter->netdev,
28260666ef7fSLijun Pan 		   "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
28270666ef7fSLijun Pan 		   adapter_state_to_string(adapter->state),
28280666ef7fSLijun Pan 		   adapter->failover_pending,
28290666ef7fSLijun Pan 		   reset_reason_to_string(rwi->reset_reason),
28300666ef7fSLijun Pan 		   adapter_state_to_string(reset_state));
2831b27507bbSJuliet Kim 
28323f5ec374SLijun Pan 	adapter->reset_reason = rwi->reset_reason;
28333f5ec374SLijun Pan 	/* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
28343f5ec374SLijun Pan 	if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2835b27507bbSJuliet Kim 		rtnl_lock();
28363f5ec374SLijun Pan 
2837bab08bedSLijun Pan 	/* Now that we have the rtnl lock, clear any pending failover.
28381d850493SSukadev Bhattiprolu 	 * This will ensure ibmvnic_open() has either completed or will
28391d850493SSukadev Bhattiprolu 	 * block until failover is complete.
28401d850493SSukadev Bhattiprolu 	 */
28411d850493SSukadev Bhattiprolu 	if (rwi->reset_reason == VNIC_RESET_FAILOVER)
28421d850493SSukadev Bhattiprolu 		adapter->failover_pending = false;
2843b27507bbSJuliet Kim 
28448f1c0fd2SSukadev Bhattiprolu 	/* read the state and check (again) after getting rtnl */
28458f1c0fd2SSukadev Bhattiprolu 	reset_state = adapter->state;
28468f1c0fd2SSukadev Bhattiprolu 
28478f1c0fd2SSukadev Bhattiprolu 	if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
28488f1c0fd2SSukadev Bhattiprolu 		rc = -EBUSY;
28498f1c0fd2SSukadev Bhattiprolu 		goto out;
28508f1c0fd2SSukadev Bhattiprolu 	}
28518f1c0fd2SSukadev Bhattiprolu 
2852b27507bbSJuliet Kim 	netif_carrier_off(netdev);
2853b27507bbSJuliet Kim 
2854b27507bbSJuliet Kim 	old_num_rx_queues = adapter->req_rx_queues;
2855b27507bbSJuliet Kim 	old_num_tx_queues = adapter->req_tx_queues;
2856b27507bbSJuliet Kim 	old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2857b27507bbSJuliet Kim 	old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2858b27507bbSJuliet Kim 
2859b27507bbSJuliet Kim 	ibmvnic_cleanup(netdev);
2860b27507bbSJuliet Kim 
2861b27507bbSJuliet Kim 	if (reset_state == VNIC_OPEN &&
2862b27507bbSJuliet Kim 	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
2863b27507bbSJuliet Kim 	    adapter->reset_reason != VNIC_RESET_FAILOVER) {
28643f5ec374SLijun Pan 		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
28653f5ec374SLijun Pan 			rc = __ibmvnic_close(netdev);
28663f5ec374SLijun Pan 			if (rc)
28673f5ec374SLijun Pan 				goto out;
28683f5ec374SLijun Pan 		} else {
2869b27507bbSJuliet Kim 			adapter->state = VNIC_CLOSING;
2870b27507bbSJuliet Kim 
2871b27507bbSJuliet Kim 			/* Release the RTNL lock before link state change and
2872b27507bbSJuliet Kim 			 * re-acquire after the link state change to allow
2873b27507bbSJuliet Kim 			 * linkwatch_event to grab the RTNL lock and run during
2874b27507bbSJuliet Kim 			 * a reset.
2875b27507bbSJuliet Kim 			 */
2876b27507bbSJuliet Kim 			rtnl_unlock();
2877b27507bbSJuliet Kim 			rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2878b27507bbSJuliet Kim 			rtnl_lock();
2879b27507bbSJuliet Kim 			if (rc)
2880b27507bbSJuliet Kim 				goto out;
2881b27507bbSJuliet Kim 
28828f1c0fd2SSukadev Bhattiprolu 			if (adapter->state == VNIC_OPEN) {
28838f1c0fd2SSukadev Bhattiprolu 				/* When we dropped rtnl, ibmvnic_open() got
28848f1c0fd2SSukadev Bhattiprolu 				 * it and noticed that we are resetting and
28858f1c0fd2SSukadev Bhattiprolu 				 * set the adapter state to OPEN. Update our
28868f1c0fd2SSukadev Bhattiprolu 				 * new "target" state, and resume the reset
28878f1c0fd2SSukadev Bhattiprolu 				 * from VNIC_CLOSING state.
28888f1c0fd2SSukadev Bhattiprolu 				 */
28898f1c0fd2SSukadev Bhattiprolu 				netdev_dbg(netdev,
28900666ef7fSLijun Pan 					   "Open changed state from %s, updating.\n",
28910666ef7fSLijun Pan 					   adapter_state_to_string(reset_state));
28928f1c0fd2SSukadev Bhattiprolu 				reset_state = VNIC_OPEN;
28938f1c0fd2SSukadev Bhattiprolu 				adapter->state = VNIC_CLOSING;
28948f1c0fd2SSukadev Bhattiprolu 			}
28958f1c0fd2SSukadev Bhattiprolu 
2896b27507bbSJuliet Kim 			if (adapter->state != VNIC_CLOSING) {
28978f1c0fd2SSukadev Bhattiprolu 				/* If someone else changed the adapter state
28988f1c0fd2SSukadev Bhattiprolu 				 * when we dropped the rtnl, fail the reset
28998f1c0fd2SSukadev Bhattiprolu 				 */
2900b6ee566cSDany Madden 				rc = -EAGAIN;
2901b27507bbSJuliet Kim 				goto out;
2902b27507bbSJuliet Kim 			}
2903b27507bbSJuliet Kim 			adapter->state = VNIC_CLOSED;
2904b27507bbSJuliet Kim 		}
29053f5ec374SLijun Pan 	}
29063f5ec374SLijun Pan 
29073f5ec374SLijun Pan 	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
29083f5ec374SLijun Pan 		release_resources(adapter);
29093f5ec374SLijun Pan 		release_sub_crqs(adapter, 1);
29103f5ec374SLijun Pan 		release_crq_queue(adapter);
29113f5ec374SLijun Pan 	}
2912b27507bbSJuliet Kim 
2913b27507bbSJuliet Kim 	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2914b27507bbSJuliet Kim 		/* remove the closed state so when we call open it appears
2915b27507bbSJuliet Kim 		 * we are coming from the probed state.
2916b27507bbSJuliet Kim 		 */
2917b27507bbSJuliet Kim 		adapter->state = VNIC_PROBED;
2918b27507bbSJuliet Kim 
2919ae16bf15SSukadev Bhattiprolu 		reinit_init_done(adapter);
2920ae16bf15SSukadev Bhattiprolu 
29213f5ec374SLijun Pan 		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
29223f5ec374SLijun Pan 			rc = init_crq_queue(adapter);
29233f5ec374SLijun Pan 		} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2924b27507bbSJuliet Kim 			rc = ibmvnic_reenable_crq_queue(adapter);
2925b27507bbSJuliet Kim 			release_sub_crqs(adapter, 1);
2926b27507bbSJuliet Kim 		} else {
2927b27507bbSJuliet Kim 			rc = ibmvnic_reset_crq(adapter);
29288b40eb73SDany Madden 			if (rc == H_CLOSED || rc == H_SUCCESS) {
2929b27507bbSJuliet Kim 				rc = vio_enable_interrupts(adapter->vdev);
29308b40eb73SDany Madden 				if (rc)
29318b40eb73SDany Madden 					netdev_err(adapter->netdev,
29328b40eb73SDany Madden 						   "Reset failed to enable interrupts. rc=%d\n",
29338b40eb73SDany Madden 						   rc);
29348b40eb73SDany Madden 			}
2935b27507bbSJuliet Kim 		}
2936b27507bbSJuliet Kim 
2937b27507bbSJuliet Kim 		if (rc) {
2938b27507bbSJuliet Kim 			netdev_err(adapter->netdev,
29398b40eb73SDany Madden 				   "Reset couldn't initialize crq. rc=%d\n", rc);
2940b27507bbSJuliet Kim 			goto out;
2941b27507bbSJuliet Kim 		}
2942b27507bbSJuliet Kim 
2943635e442fSLijun Pan 		rc = ibmvnic_reset_init(adapter, true);
2944b6ee566cSDany Madden 		if (rc)
2945b27507bbSJuliet Kim 			goto out;
2946b27507bbSJuliet Kim 
294753f8b1b2SCristobal Forno 		/* If the adapter was in PROBE or DOWN state prior to the reset,
2948b27507bbSJuliet Kim 		 * exit here.
2949b27507bbSJuliet Kim 		 */
295053f8b1b2SCristobal Forno 		if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2951b27507bbSJuliet Kim 			rc = 0;
2952b27507bbSJuliet Kim 			goto out;
2953b27507bbSJuliet Kim 		}
2954b27507bbSJuliet Kim 
2955b27507bbSJuliet Kim 		rc = ibmvnic_login(netdev);
2956f78afaacSLijun Pan 		if (rc)
2957b27507bbSJuliet Kim 			goto out;
2958b27507bbSJuliet Kim 
29593f5ec374SLijun Pan 		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
29603f5ec374SLijun Pan 			rc = init_resources(adapter);
29613f5ec374SLijun Pan 			if (rc)
29623f5ec374SLijun Pan 				goto out;
29633f5ec374SLijun Pan 		} else if (adapter->req_rx_queues != old_num_rx_queues ||
29645bf032efSThomas Falcon 		    adapter->req_tx_queues != old_num_tx_queues ||
29655bf032efSThomas Falcon 		    adapter->req_rx_add_entries_per_subcrq !=
29665bf032efSThomas Falcon 		    old_num_rx_slots ||
29675bf032efSThomas Falcon 		    adapter->req_tx_entries_per_subcrq !=
29689f134573SMingming Cao 		    old_num_tx_slots ||
29699f134573SMingming Cao 		    !adapter->rx_pool ||
29709f134573SMingming Cao 		    !adapter->tso_pool ||
29719f134573SMingming Cao 		    !adapter->tx_pool) {
2972a5681e20SJuliet Kim 			release_napi(adapter);
2973a5681e20SJuliet Kim 			release_vpd_data(adapter);
2974a5681e20SJuliet Kim 
2975a5681e20SJuliet Kim 			rc = init_resources(adapter);
2976f611a5b4SThomas Falcon 			if (rc)
2977b27507bbSJuliet Kim 				goto out;
2978d9043c10SNathan Fontenot 
2979c26eba03SJohn Allen 		} else {
2980bbd80930SSukadev Bhattiprolu 			rc = init_tx_pools(netdev);
29818ae4dff8SJakub Kicinski 			if (rc) {
2982bbd80930SSukadev Bhattiprolu 				netdev_dbg(netdev,
2983bbd80930SSukadev Bhattiprolu 					   "init tx pools failed (%d)\n",
29849f134573SMingming Cao 					   rc);
2985b27507bbSJuliet Kim 				goto out;
29868ae4dff8SJakub Kicinski 			}
29878c0543adSNathan Fontenot 
2988489de956SSukadev Bhattiprolu 			rc = init_rx_pools(netdev);
29898ae4dff8SJakub Kicinski 			if (rc) {
2990489de956SSukadev Bhattiprolu 				netdev_dbg(netdev,
2991489de956SSukadev Bhattiprolu 					   "init rx pools failed (%d)\n",
29929f134573SMingming Cao 					   rc);
2993b27507bbSJuliet Kim 				goto out;
2994e676d81cSJohn Allen 			}
29958ae4dff8SJakub Kicinski 		}
29966095e590SJohn Allen 		ibmvnic_disable_irqs(adapter);
2997134bbe7fSThomas Falcon 	}
2998e676d81cSJohn Allen 	adapter->state = VNIC_CLOSED;
2999ed651a10SNathan Fontenot 
3000b27507bbSJuliet Kim 	if (reset_state == VNIC_CLOSED) {
3001b27507bbSJuliet Kim 		rc = 0;
3002b27507bbSJuliet Kim 		goto out;
3003b27507bbSJuliet Kim 	}
3004ed651a10SNathan Fontenot 
3005ed651a10SNathan Fontenot 	rc = __ibmvnic_open(netdev);
3006ed651a10SNathan Fontenot 	if (rc) {
3007b27507bbSJuliet Kim 		rc = IBMVNIC_OPEN_FAILED;
3008b27507bbSJuliet Kim 		goto out;
3009ed651a10SNathan Fontenot 	}
3010ed651a10SNathan Fontenot 
3011be32a243SThomas Falcon 	/* refresh device's multicast list */
3012be32a243SThomas Falcon 	ibmvnic_set_multi(netdev);
3013be32a243SThomas Falcon 
301498025bceSLijun Pan 	if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
30156be46662SLijun Pan 	    adapter->reset_reason == VNIC_RESET_MOBILITY)
30166be46662SLijun Pan 		__netdev_notify_peers(netdev);
301761d3e1d9SNathan Fontenot 
3018b27507bbSJuliet Kim 	rc = 0;
3019b27507bbSJuliet Kim 
3020b27507bbSJuliet Kim out:
30210cb4bc66SDany Madden 	/* restore the adapter state if reset failed */
30220cb4bc66SDany Madden 	if (rc)
30230cb4bc66SDany Madden 		adapter->state = reset_state;
30243f5ec374SLijun Pan 	/* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
30253f5ec374SLijun Pan 	if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
3026b27507bbSJuliet Kim 		rtnl_unlock();
3027b27507bbSJuliet Kim 
30280666ef7fSLijun Pan 	netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
30290666ef7fSLijun Pan 		   adapter_state_to_string(adapter->state),
30300666ef7fSLijun Pan 		   adapter->failover_pending, rc);
3031b27507bbSJuliet Kim 	return rc;
3032ed651a10SNathan Fontenot }
3033ed651a10SNathan Fontenot 
do_hard_reset(struct ibmvnic_adapter * adapter,struct ibmvnic_rwi * rwi,u32 reset_state)30342770a798SThomas Falcon static int do_hard_reset(struct ibmvnic_adapter *adapter,
30352770a798SThomas Falcon 			 struct ibmvnic_rwi *rwi, u32 reset_state)
30362770a798SThomas Falcon {
30372770a798SThomas Falcon 	struct net_device *netdev = adapter->netdev;
30382770a798SThomas Falcon 	int rc;
30392770a798SThomas Falcon 
3040caee7bf5SLijun Pan 	netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
3041caee7bf5SLijun Pan 		   reset_reason_to_string(rwi->reset_reason));
30422770a798SThomas Falcon 
30438f1c0fd2SSukadev Bhattiprolu 	/* read the state and check (again) after getting rtnl */
30448f1c0fd2SSukadev Bhattiprolu 	reset_state = adapter->state;
30458f1c0fd2SSukadev Bhattiprolu 
30468f1c0fd2SSukadev Bhattiprolu 	if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
30478f1c0fd2SSukadev Bhattiprolu 		rc = -EBUSY;
30488f1c0fd2SSukadev Bhattiprolu 		goto out;
30498f1c0fd2SSukadev Bhattiprolu 	}
30508f1c0fd2SSukadev Bhattiprolu 
30512770a798SThomas Falcon 	netif_carrier_off(netdev);
30522770a798SThomas Falcon 	adapter->reset_reason = rwi->reset_reason;
30532770a798SThomas Falcon 
30542770a798SThomas Falcon 	ibmvnic_cleanup(netdev);
30552770a798SThomas Falcon 	release_resources(adapter);
30562770a798SThomas Falcon 	release_sub_crqs(adapter, 0);
30572770a798SThomas Falcon 	release_crq_queue(adapter);
30582770a798SThomas Falcon 
30592770a798SThomas Falcon 	/* remove the closed state so when we call open it appears
30602770a798SThomas Falcon 	 * we are coming from the probed state.
30612770a798SThomas Falcon 	 */
30622770a798SThomas Falcon 	adapter->state = VNIC_PROBED;
30632770a798SThomas Falcon 
3064ae16bf15SSukadev Bhattiprolu 	reinit_init_done(adapter);
3065ae16bf15SSukadev Bhattiprolu 
30662770a798SThomas Falcon 	rc = init_crq_queue(adapter);
30672770a798SThomas Falcon 	if (rc) {
30682770a798SThomas Falcon 		netdev_err(adapter->netdev,
30692770a798SThomas Falcon 			   "Couldn't initialize crq. rc=%d\n", rc);
30700cb4bc66SDany Madden 		goto out;
30712770a798SThomas Falcon 	}
30722770a798SThomas Falcon 
3073635e442fSLijun Pan 	rc = ibmvnic_reset_init(adapter, false);
30742770a798SThomas Falcon 	if (rc)
30750cb4bc66SDany Madden 		goto out;
30762770a798SThomas Falcon 
307753f8b1b2SCristobal Forno 	/* If the adapter was in PROBE or DOWN state prior to the reset,
30782770a798SThomas Falcon 	 * exit here.
30792770a798SThomas Falcon 	 */
308053f8b1b2SCristobal Forno 	if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
30810cb4bc66SDany Madden 		goto out;
30822770a798SThomas Falcon 
30832770a798SThomas Falcon 	rc = ibmvnic_login(netdev);
30840cb4bc66SDany Madden 	if (rc)
30850cb4bc66SDany Madden 		goto out;
3086a5681e20SJuliet Kim 
30872770a798SThomas Falcon 	rc = init_resources(adapter);
30882770a798SThomas Falcon 	if (rc)
30890cb4bc66SDany Madden 		goto out;
30902770a798SThomas Falcon 
30912770a798SThomas Falcon 	ibmvnic_disable_irqs(adapter);
30922770a798SThomas Falcon 	adapter->state = VNIC_CLOSED;
30932770a798SThomas Falcon 
30942770a798SThomas Falcon 	if (reset_state == VNIC_CLOSED)
30950cb4bc66SDany Madden 		goto out;
30962770a798SThomas Falcon 
30972770a798SThomas Falcon 	rc = __ibmvnic_open(netdev);
30980cb4bc66SDany Madden 	if (rc) {
30990cb4bc66SDany Madden 		rc = IBMVNIC_OPEN_FAILED;
31000cb4bc66SDany Madden 		goto out;
31010cb4bc66SDany Madden 	}
31022770a798SThomas Falcon 
31036be46662SLijun Pan 	__netdev_notify_peers(netdev);
31040cb4bc66SDany Madden out:
31050cb4bc66SDany Madden 	/* restore adapter state if reset failed */
31060cb4bc66SDany Madden 	if (rc)
31070cb4bc66SDany Madden 		adapter->state = reset_state;
31080666ef7fSLijun Pan 	netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
31090666ef7fSLijun Pan 		   adapter_state_to_string(adapter->state),
31100666ef7fSLijun Pan 		   adapter->failover_pending, rc);
31110cb4bc66SDany Madden 	return rc;
31122770a798SThomas Falcon }
31132770a798SThomas Falcon 
get_next_rwi(struct ibmvnic_adapter * adapter)3114ed651a10SNathan Fontenot static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
3115ed651a10SNathan Fontenot {
3116ed651a10SNathan Fontenot 	struct ibmvnic_rwi *rwi;
31176c5c7489SThomas Falcon 	unsigned long flags;
3118ed651a10SNathan Fontenot 
31196c5c7489SThomas Falcon 	spin_lock_irqsave(&adapter->rwi_lock, flags);
3120ed651a10SNathan Fontenot 
3121ed651a10SNathan Fontenot 	if (!list_empty(&adapter->rwi_list)) {
3122ed651a10SNathan Fontenot 		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
3123ed651a10SNathan Fontenot 				       list);
3124ed651a10SNathan Fontenot 		list_del(&rwi->list);
3125ed651a10SNathan Fontenot 	} else {
3126ed651a10SNathan Fontenot 		rwi = NULL;
3127ed651a10SNathan Fontenot 	}
3128ed651a10SNathan Fontenot 
31296c5c7489SThomas Falcon 	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3130ed651a10SNathan Fontenot 	return rwi;
3131ed651a10SNathan Fontenot }
3132ed651a10SNathan Fontenot 
313353f8b1b2SCristobal Forno /**
313453f8b1b2SCristobal Forno  * do_passive_init - complete probing when partner device is detected.
313553f8b1b2SCristobal Forno  * @adapter: ibmvnic_adapter struct
313653f8b1b2SCristobal Forno  *
313753f8b1b2SCristobal Forno  * If the ibmvnic device does not have a partner device to communicate with at boot
313853f8b1b2SCristobal Forno  * and that partner device comes online at a later time, this function is called
313953f8b1b2SCristobal Forno  * to complete the initialization process of ibmvnic device.
314053f8b1b2SCristobal Forno  * Caller is expected to hold rtnl_lock().
314153f8b1b2SCristobal Forno  *
314253f8b1b2SCristobal Forno  * Returns non-zero if sub-CRQs are not initialized properly leaving the device
314353f8b1b2SCristobal Forno  * in the down state.
314453f8b1b2SCristobal Forno  * Returns 0 upon success and the device is in PROBED state.
314553f8b1b2SCristobal Forno  */
314653f8b1b2SCristobal Forno 
do_passive_init(struct ibmvnic_adapter * adapter)314753f8b1b2SCristobal Forno static int do_passive_init(struct ibmvnic_adapter *adapter)
314853f8b1b2SCristobal Forno {
314953f8b1b2SCristobal Forno 	unsigned long timeout = msecs_to_jiffies(30000);
315053f8b1b2SCristobal Forno 	struct net_device *netdev = adapter->netdev;
315153f8b1b2SCristobal Forno 	struct device *dev = &adapter->vdev->dev;
315253f8b1b2SCristobal Forno 	int rc;
315353f8b1b2SCristobal Forno 
315453f8b1b2SCristobal Forno 	netdev_dbg(netdev, "Partner device found, probing.\n");
315553f8b1b2SCristobal Forno 
315653f8b1b2SCristobal Forno 	adapter->state = VNIC_PROBING;
315753f8b1b2SCristobal Forno 	reinit_completion(&adapter->init_done);
315853f8b1b2SCristobal Forno 	adapter->init_done_rc = 0;
315953f8b1b2SCristobal Forno 	adapter->crq.active = true;
316053f8b1b2SCristobal Forno 
316153f8b1b2SCristobal Forno 	rc = send_crq_init_complete(adapter);
316253f8b1b2SCristobal Forno 	if (rc)
316353f8b1b2SCristobal Forno 		goto out;
316453f8b1b2SCristobal Forno 
316553f8b1b2SCristobal Forno 	rc = send_version_xchg(adapter);
316653f8b1b2SCristobal Forno 	if (rc)
316753f8b1b2SCristobal Forno 		netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
316853f8b1b2SCristobal Forno 
316953f8b1b2SCristobal Forno 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
317053f8b1b2SCristobal Forno 		dev_err(dev, "Initialization sequence timed out\n");
317153f8b1b2SCristobal Forno 		rc = -ETIMEDOUT;
317253f8b1b2SCristobal Forno 		goto out;
317353f8b1b2SCristobal Forno 	}
317453f8b1b2SCristobal Forno 
317553f8b1b2SCristobal Forno 	rc = init_sub_crqs(adapter);
317653f8b1b2SCristobal Forno 	if (rc) {
317753f8b1b2SCristobal Forno 		dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
317853f8b1b2SCristobal Forno 		goto out;
317953f8b1b2SCristobal Forno 	}
318053f8b1b2SCristobal Forno 
318153f8b1b2SCristobal Forno 	rc = init_sub_crq_irqs(adapter);
318253f8b1b2SCristobal Forno 	if (rc) {
318353f8b1b2SCristobal Forno 		dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
318453f8b1b2SCristobal Forno 		goto init_failed;
318553f8b1b2SCristobal Forno 	}
318653f8b1b2SCristobal Forno 
318753f8b1b2SCristobal Forno 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
318853f8b1b2SCristobal Forno 	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
318953f8b1b2SCristobal Forno 	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
319053f8b1b2SCristobal Forno 
319153f8b1b2SCristobal Forno 	adapter->state = VNIC_PROBED;
319253f8b1b2SCristobal Forno 	netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
319353f8b1b2SCristobal Forno 
319453f8b1b2SCristobal Forno 	return 0;
319553f8b1b2SCristobal Forno 
319653f8b1b2SCristobal Forno init_failed:
319753f8b1b2SCristobal Forno 	release_sub_crqs(adapter, 1);
319853f8b1b2SCristobal Forno out:
319953f8b1b2SCristobal Forno 	adapter->state = VNIC_DOWN;
320053f8b1b2SCristobal Forno 	return rc;
320153f8b1b2SCristobal Forno }
320253f8b1b2SCristobal Forno 
__ibmvnic_reset(struct work_struct * work)3203ed651a10SNathan Fontenot static void __ibmvnic_reset(struct work_struct *work)
3204ed651a10SNathan Fontenot {
3205ed651a10SNathan Fontenot 	struct ibmvnic_adapter *adapter;
3206fd98693cSSukadev Bhattiprolu 	unsigned int timeout = 5000;
32074f408e1fSSukadev Bhattiprolu 	struct ibmvnic_rwi *tmprwi;
3208fd98693cSSukadev Bhattiprolu 	bool saved_state = false;
32094f408e1fSSukadev Bhattiprolu 	struct ibmvnic_rwi *rwi;
32107d7195a0SJuliet Kim 	unsigned long flags;
3211fd98693cSSukadev Bhattiprolu 	struct device *dev;
3212fd98693cSSukadev Bhattiprolu 	bool need_reset;
3213db9f0e8bSSukadev Bhattiprolu 	int num_fails = 0;
3214fd98693cSSukadev Bhattiprolu 	u32 reset_state;
3215c26eba03SJohn Allen 	int rc = 0;
3216ed651a10SNathan Fontenot 
3217ed651a10SNathan Fontenot 	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
3218fd98693cSSukadev Bhattiprolu 		dev = &adapter->vdev->dev;
3219ed651a10SNathan Fontenot 
3220fd98693cSSukadev Bhattiprolu 	/* Wait for ibmvnic_probe() to complete. If probe is taking too long
3221fd98693cSSukadev Bhattiprolu 	 * or if another reset is in progress, defer work for now. If probe
3222fd98693cSSukadev Bhattiprolu 	 * eventually fails it will flush and terminate our work.
3223fd98693cSSukadev Bhattiprolu 	 *
3224fd98693cSSukadev Bhattiprolu 	 * Three possibilities here:
3225fd98693cSSukadev Bhattiprolu 	 * 1. Adpater being removed  - just return
3226fd98693cSSukadev Bhattiprolu 	 * 2. Timed out on probe or another reset in progress - delay the work
3227fd98693cSSukadev Bhattiprolu 	 * 3. Completed probe - perform any resets in queue
3228fd98693cSSukadev Bhattiprolu 	 */
3229fd98693cSSukadev Bhattiprolu 	if (adapter->state == VNIC_PROBING &&
3230fd98693cSSukadev Bhattiprolu 	    !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
3231fd98693cSSukadev Bhattiprolu 		dev_err(dev, "Reset thread timed out on probe");
3232870e04aeSLijun Pan 		queue_delayed_work(system_long_wq,
3233870e04aeSLijun Pan 				   &adapter->ibmvnic_delayed_reset,
32347ed5b31fSJuliet Kim 				   IBMVNIC_RESET_DELAY);
32357ed5b31fSJuliet Kim 		return;
32367ed5b31fSJuliet Kim 	}
32377ed5b31fSJuliet Kim 
3238fd98693cSSukadev Bhattiprolu 	/* adapter is done with probe (i.e state is never VNIC_PROBING now) */
3239fd98693cSSukadev Bhattiprolu 	if (adapter->state == VNIC_REMOVING)
3240fd98693cSSukadev Bhattiprolu 		return;
3241fd98693cSSukadev Bhattiprolu 
3242fd98693cSSukadev Bhattiprolu 	/* ->rwi_list is stable now (no one else is removing entries) */
3243fd98693cSSukadev Bhattiprolu 
3244fd98693cSSukadev Bhattiprolu 	/* ibmvnic_probe() may have purged the reset queue after we were
3245fd98693cSSukadev Bhattiprolu 	 * scheduled to process a reset so there maybe no resets to process.
3246fd98693cSSukadev Bhattiprolu 	 * Before setting the ->resetting bit though, we have to make sure
3247fd98693cSSukadev Bhattiprolu 	 * that there is infact a reset to process. Otherwise we may race
3248fd98693cSSukadev Bhattiprolu 	 * with ibmvnic_open() and end up leaving the vnic down:
3249fd98693cSSukadev Bhattiprolu 	 *
3250fd98693cSSukadev Bhattiprolu 	 *	__ibmvnic_reset()	    ibmvnic_open()
3251fd98693cSSukadev Bhattiprolu 	 *	-----------------	    --------------
3252fd98693cSSukadev Bhattiprolu 	 *
3253fd98693cSSukadev Bhattiprolu 	 *  set ->resetting bit
3254fd98693cSSukadev Bhattiprolu 	 *  				find ->resetting bit is set
3255fd98693cSSukadev Bhattiprolu 	 *  				set ->state to IBMVNIC_OPEN (i.e
3256fd98693cSSukadev Bhattiprolu 	 *  				assume reset will open device)
3257fd98693cSSukadev Bhattiprolu 	 *  				return
3258fd98693cSSukadev Bhattiprolu 	 *  find reset queue empty
3259fd98693cSSukadev Bhattiprolu 	 *  return
3260fd98693cSSukadev Bhattiprolu 	 *
3261fd98693cSSukadev Bhattiprolu 	 *  	Neither performed vnic login/open and vnic stays down
3262fd98693cSSukadev Bhattiprolu 	 *
3263fd98693cSSukadev Bhattiprolu 	 * If we hold the lock and conditionally set the bit, either we
3264fd98693cSSukadev Bhattiprolu 	 * or ibmvnic_open() will complete the open.
3265fd98693cSSukadev Bhattiprolu 	 */
3266fd98693cSSukadev Bhattiprolu 	need_reset = false;
3267fd98693cSSukadev Bhattiprolu 	spin_lock(&adapter->rwi_lock);
3268fd98693cSSukadev Bhattiprolu 	if (!list_empty(&adapter->rwi_list)) {
3269fd98693cSSukadev Bhattiprolu 		if (test_and_set_bit_lock(0, &adapter->resetting)) {
3270fd98693cSSukadev Bhattiprolu 			queue_delayed_work(system_long_wq,
3271fd98693cSSukadev Bhattiprolu 					   &adapter->ibmvnic_delayed_reset,
3272fd98693cSSukadev Bhattiprolu 					   IBMVNIC_RESET_DELAY);
3273fd98693cSSukadev Bhattiprolu 		} else {
3274fd98693cSSukadev Bhattiprolu 			need_reset = true;
3275fd98693cSSukadev Bhattiprolu 		}
3276fd98693cSSukadev Bhattiprolu 	}
3277fd98693cSSukadev Bhattiprolu 	spin_unlock(&adapter->rwi_lock);
3278fd98693cSSukadev Bhattiprolu 
3279fd98693cSSukadev Bhattiprolu 	if (!need_reset)
3280fd98693cSSukadev Bhattiprolu 		return;
3281fd98693cSSukadev Bhattiprolu 
3282ed651a10SNathan Fontenot 	rwi = get_next_rwi(adapter);
3283ed651a10SNathan Fontenot 	while (rwi) {
32847d7195a0SJuliet Kim 		spin_lock_irqsave(&adapter->state_lock, flags);
32857d7195a0SJuliet Kim 
328636f1031cSThomas Falcon 		if (adapter->state == VNIC_REMOVING ||
3287c8dc5595SMichal Suchanek 		    adapter->state == VNIC_REMOVED) {
32887d7195a0SJuliet Kim 			spin_unlock_irqrestore(&adapter->state_lock, flags);
32891c2977c0SJuliet Kim 			kfree(rwi);
32901c2977c0SJuliet Kim 			rc = EBUSY;
32911c2977c0SJuliet Kim 			break;
32921c2977c0SJuliet Kim 		}
329336f1031cSThomas Falcon 
32947d7195a0SJuliet Kim 		if (!saved_state) {
32957d7195a0SJuliet Kim 			reset_state = adapter->state;
32967d7195a0SJuliet Kim 			saved_state = true;
32977d7195a0SJuliet Kim 		}
32987d7195a0SJuliet Kim 		spin_unlock_irqrestore(&adapter->state_lock, flags);
32997d7195a0SJuliet Kim 
330053f8b1b2SCristobal Forno 		if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
330153f8b1b2SCristobal Forno 			rtnl_lock();
330253f8b1b2SCristobal Forno 			rc = do_passive_init(adapter);
330353f8b1b2SCristobal Forno 			rtnl_unlock();
330453f8b1b2SCristobal Forno 			if (!rc)
330553f8b1b2SCristobal Forno 				netif_carrier_on(adapter->netdev);
330653f8b1b2SCristobal Forno 		} else if (adapter->force_reset_recovery) {
3307bab08bedSLijun Pan 			/* Since we are doing a hard reset now, clear the
33081d850493SSukadev Bhattiprolu 			 * failover_pending flag so we don't ignore any
33091d850493SSukadev Bhattiprolu 			 * future MOBILITY or other resets.
33101d850493SSukadev Bhattiprolu 			 */
33111d850493SSukadev Bhattiprolu 			adapter->failover_pending = false;
33121d850493SSukadev Bhattiprolu 
3313b27507bbSJuliet Kim 			/* Transport event occurred during previous reset */
3314b27507bbSJuliet Kim 			if (adapter->wait_for_reset) {
3315b27507bbSJuliet Kim 				/* Previous was CHANGE_PARAM; caller locked */
33162770a798SThomas Falcon 				adapter->force_reset_recovery = false;
33172770a798SThomas Falcon 				rc = do_hard_reset(adapter, rwi, reset_state);
33182770a798SThomas Falcon 			} else {
3319b27507bbSJuliet Kim 				rtnl_lock();
3320b27507bbSJuliet Kim 				adapter->force_reset_recovery = false;
3321b27507bbSJuliet Kim 				rc = do_hard_reset(adapter, rwi, reset_state);
3322b27507bbSJuliet Kim 				rtnl_unlock();
3323b27507bbSJuliet Kim 			}
3324db9f0e8bSSukadev Bhattiprolu 			if (rc)
3325db9f0e8bSSukadev Bhattiprolu 				num_fails++;
3326db9f0e8bSSukadev Bhattiprolu 			else
3327db9f0e8bSSukadev Bhattiprolu 				num_fails = 0;
3328db9f0e8bSSukadev Bhattiprolu 
3329db9f0e8bSSukadev Bhattiprolu 			/* If auto-priority-failover is enabled we can get
3330db9f0e8bSSukadev Bhattiprolu 			 * back to back failovers during resets, resulting
3331db9f0e8bSSukadev Bhattiprolu 			 * in at least two failed resets (from high-priority
3332db9f0e8bSSukadev Bhattiprolu 			 * backing device to low-priority one and then back)
3333db9f0e8bSSukadev Bhattiprolu 			 * If resets continue to fail beyond that, give the
3334db9f0e8bSSukadev Bhattiprolu 			 * adapter some time to settle down before retrying.
3335db9f0e8bSSukadev Bhattiprolu 			 */
3336db9f0e8bSSukadev Bhattiprolu 			if (num_fails >= 3) {
3337f15fde9dSSukadev Bhattiprolu 				netdev_dbg(adapter->netdev,
3338db9f0e8bSSukadev Bhattiprolu 					   "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
3339db9f0e8bSSukadev Bhattiprolu 					   adapter_state_to_string(adapter->state),
3340db9f0e8bSSukadev Bhattiprolu 					   num_fails);
3341f15fde9dSSukadev Bhattiprolu 				set_current_state(TASK_UNINTERRUPTIBLE);
3342f15fde9dSSukadev Bhattiprolu 				schedule_timeout(60 * HZ);
3343f15fde9dSSukadev Bhattiprolu 			}
33441f45dc22SLijun Pan 		} else {
3345ed651a10SNathan Fontenot 			rc = do_reset(adapter, rwi, reset_state);
33462770a798SThomas Falcon 		}
33474f408e1fSSukadev Bhattiprolu 		tmprwi = rwi;
3348a86d5c68SDany Madden 		adapter->last_reset_time = jiffies;
33490cb4bc66SDany Madden 
335018f141bfSDany Madden 		if (rc)
335118f141bfSDany Madden 			netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
3352ed651a10SNathan Fontenot 
3353ed651a10SNathan Fontenot 		rwi = get_next_rwi(adapter);
33547ed5b31fSJuliet Kim 
33554f408e1fSSukadev Bhattiprolu 		/*
33564f408e1fSSukadev Bhattiprolu 		 * If there are no resets queued and the previous reset failed,
33574f408e1fSSukadev Bhattiprolu 		 * the adapter would be in an undefined state. So retry the
33584f408e1fSSukadev Bhattiprolu 		 * previous reset as a hard reset.
3359d6dd2fe7SNick Child 		 *
3360d6dd2fe7SNick Child 		 * Else, free the previous rwi and, if there is another reset
3361d6dd2fe7SNick Child 		 * queued, process the new reset even if previous reset failed
3362d6dd2fe7SNick Child 		 * (the previous reset could have failed because of a fail
3363d6dd2fe7SNick Child 		 * over for instance, so process the fail over).
33644f408e1fSSukadev Bhattiprolu 		 */
3365d6dd2fe7SNick Child 		if (!rwi && rc)
33664f408e1fSSukadev Bhattiprolu 			rwi = tmprwi;
3367d6dd2fe7SNick Child 		else
3368d6dd2fe7SNick Child 			kfree(tmprwi);
33694f408e1fSSukadev Bhattiprolu 
33707ed5b31fSJuliet Kim 		if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
33714f408e1fSSukadev Bhattiprolu 			    rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
33727ed5b31fSJuliet Kim 			adapter->force_reset_recovery = true;
3373ed651a10SNathan Fontenot 	}
3374ed651a10SNathan Fontenot 
3375c26eba03SJohn Allen 	if (adapter->wait_for_reset) {
3376c26eba03SJohn Allen 		adapter->reset_done_rc = rc;
3377c26eba03SJohn Allen 		complete(&adapter->reset_done);
3378c26eba03SJohn Allen 	}
3379c26eba03SJohn Allen 
33807ed5b31fSJuliet Kim 	clear_bit_unlock(0, &adapter->resetting);
338138bd5cecSSukadev Bhattiprolu 
338238bd5cecSSukadev Bhattiprolu 	netdev_dbg(adapter->netdev,
33830666ef7fSLijun Pan 		   "[S:%s FRR:%d WFR:%d] Done processing resets\n",
33840666ef7fSLijun Pan 		   adapter_state_to_string(adapter->state),
33850666ef7fSLijun Pan 		   adapter->force_reset_recovery,
338638bd5cecSSukadev Bhattiprolu 		   adapter->wait_for_reset);
33877ed5b31fSJuliet Kim }
33887ed5b31fSJuliet Kim 
__ibmvnic_delayed_reset(struct work_struct * work)33897ed5b31fSJuliet Kim static void __ibmvnic_delayed_reset(struct work_struct *work)
33907ed5b31fSJuliet Kim {
33917ed5b31fSJuliet Kim 	struct ibmvnic_adapter *adapter;
33927ed5b31fSJuliet Kim 
33937ed5b31fSJuliet Kim 	adapter = container_of(work, struct ibmvnic_adapter,
33947ed5b31fSJuliet Kim 			       ibmvnic_delayed_reset.work);
33957ed5b31fSJuliet Kim 	__ibmvnic_reset(&adapter->ibmvnic_reset);
3396ed651a10SNathan Fontenot }
3397ed651a10SNathan Fontenot 
flush_reset_queue(struct ibmvnic_adapter * adapter)339883da53f7SSukadev Bhattiprolu static void flush_reset_queue(struct ibmvnic_adapter *adapter)
339983da53f7SSukadev Bhattiprolu {
340083da53f7SSukadev Bhattiprolu 	struct list_head *entry, *tmp_entry;
340183da53f7SSukadev Bhattiprolu 
340283da53f7SSukadev Bhattiprolu 	if (!list_empty(&adapter->rwi_list)) {
340383da53f7SSukadev Bhattiprolu 		list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
340483da53f7SSukadev Bhattiprolu 			list_del(entry);
340583da53f7SSukadev Bhattiprolu 			kfree(list_entry(entry, struct ibmvnic_rwi, list));
340683da53f7SSukadev Bhattiprolu 		}
340783da53f7SSukadev Bhattiprolu 	}
340883da53f7SSukadev Bhattiprolu }
340983da53f7SSukadev Bhattiprolu 
ibmvnic_reset(struct ibmvnic_adapter * adapter,enum ibmvnic_reset_reason reason)3410af894d23SThomas Falcon static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
3411ed651a10SNathan Fontenot 			 enum ibmvnic_reset_reason reason)
3412ed651a10SNathan Fontenot {
3413ed651a10SNathan Fontenot 	struct net_device *netdev = adapter->netdev;
341483da53f7SSukadev Bhattiprolu 	struct ibmvnic_rwi *rwi, *tmp;
34156c5c7489SThomas Falcon 	unsigned long flags;
3416af894d23SThomas Falcon 	int ret;
3417ed651a10SNathan Fontenot 
3418b646acd5SJakub Kicinski 	spin_lock_irqsave(&adapter->rwi_lock, flags);
3419b646acd5SJakub Kicinski 
3420b646acd5SJakub Kicinski 	/* If failover is pending don't schedule any other reset.
34211d850493SSukadev Bhattiprolu 	 * Instead let the failover complete. If there is already a
34221d850493SSukadev Bhattiprolu 	 * a failover reset scheduled, we will detect and drop the
34231d850493SSukadev Bhattiprolu 	 * duplicate reset when walking the ->rwi_list below.
34241d850493SSukadev Bhattiprolu 	 */
3425ed651a10SNathan Fontenot 	if (adapter->state == VNIC_REMOVING ||
34265a18e1e0SThomas Falcon 	    adapter->state == VNIC_REMOVED ||
34271d850493SSukadev Bhattiprolu 	    (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
3428af894d23SThomas Falcon 		ret = EBUSY;
34295a18e1e0SThomas Falcon 		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
3430af894d23SThomas Falcon 		goto err;
3431ed651a10SNathan Fontenot 	}
3432ed651a10SNathan Fontenot 
34333e98ae00SWang Hai 	list_for_each_entry(tmp, &adapter->rwi_list, list) {
3434ed651a10SNathan Fontenot 		if (tmp->reset_reason == reason) {
3435caee7bf5SLijun Pan 			netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
3436caee7bf5SLijun Pan 				   reset_reason_to_string(reason));
3437af894d23SThomas Falcon 			ret = EBUSY;
3438af894d23SThomas Falcon 			goto err;
3439ed651a10SNathan Fontenot 		}
3440ed651a10SNathan Fontenot 	}
3441ed651a10SNathan Fontenot 
34421d1bbc37SThomas Falcon 	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
3443ed651a10SNathan Fontenot 	if (!rwi) {
3444af894d23SThomas Falcon 		ret = ENOMEM;
3445af894d23SThomas Falcon 		goto err;
3446ed651a10SNathan Fontenot 	}
34472770a798SThomas Falcon 	/* if we just received a transport event,
34482770a798SThomas Falcon 	 * flush reset queue and process this reset
34492770a798SThomas Falcon 	 */
345083da53f7SSukadev Bhattiprolu 	if (adapter->force_reset_recovery)
345183da53f7SSukadev Bhattiprolu 		flush_reset_queue(adapter);
345283da53f7SSukadev Bhattiprolu 
3453ed651a10SNathan Fontenot 	rwi->reset_reason = reason;
3454ed651a10SNathan Fontenot 	list_add_tail(&rwi->list, &adapter->rwi_list);
3455caee7bf5SLijun Pan 	netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
3456caee7bf5SLijun Pan 		   reset_reason_to_string(reason));
3457870e04aeSLijun Pan 	queue_work(system_long_wq, &adapter->ibmvnic_reset);
3458af894d23SThomas Falcon 
34594a41c421SSukadev Bhattiprolu 	ret = 0;
3460af894d23SThomas Falcon err:
34614a41c421SSukadev Bhattiprolu 	/* ibmvnic_close() below can block, so drop the lock first */
34624a41c421SSukadev Bhattiprolu 	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
34634a41c421SSukadev Bhattiprolu 
34644a41c421SSukadev Bhattiprolu 	if (ret == ENOMEM)
34654a41c421SSukadev Bhattiprolu 		ibmvnic_close(netdev);
34664a41c421SSukadev Bhattiprolu 
3467af894d23SThomas Falcon 	return -ret;
3468ed651a10SNathan Fontenot }
3469ed651a10SNathan Fontenot 
ibmvnic_tx_timeout(struct net_device * dev,unsigned int txqueue)34700290bd29SMichael S. Tsirkin static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
3471032c5e82SThomas Falcon {
3472032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
3473032c5e82SThomas Falcon 
3474855a631aSLijun Pan 	if (test_bit(0, &adapter->resetting)) {
3475855a631aSLijun Pan 		netdev_err(adapter->netdev,
3476855a631aSLijun Pan 			   "Adapter is resetting, skip timeout reset\n");
3477855a631aSLijun Pan 		return;
3478855a631aSLijun Pan 	}
3479a86d5c68SDany Madden 	/* No queuing up reset until at least 5 seconds (default watchdog val)
3480a86d5c68SDany Madden 	 * after last reset
3481a86d5c68SDany Madden 	 */
3482a86d5c68SDany Madden 	if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
3483a86d5c68SDany Madden 		netdev_dbg(dev, "Not yet time to tx timeout.\n");
3484a86d5c68SDany Madden 		return;
3485a86d5c68SDany Madden 	}
3486ed651a10SNathan Fontenot 	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
3487032c5e82SThomas Falcon }
3488032c5e82SThomas Falcon 
remove_buff_from_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_rx_buff * rx_buff)3489032c5e82SThomas Falcon static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
3490032c5e82SThomas Falcon 				  struct ibmvnic_rx_buff *rx_buff)
3491032c5e82SThomas Falcon {
3492032c5e82SThomas Falcon 	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
3493032c5e82SThomas Falcon 
3494032c5e82SThomas Falcon 	rx_buff->skb = NULL;
3495032c5e82SThomas Falcon 
3496032c5e82SThomas Falcon 	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
3497032c5e82SThomas Falcon 	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
3498032c5e82SThomas Falcon 
3499032c5e82SThomas Falcon 	atomic_dec(&pool->available);
3500032c5e82SThomas Falcon }
3501032c5e82SThomas Falcon 
ibmvnic_poll(struct napi_struct * napi,int budget)3502032c5e82SThomas Falcon static int ibmvnic_poll(struct napi_struct *napi, int budget)
3503032c5e82SThomas Falcon {
3504ec20f36bSDwip N. Banerjee 	struct ibmvnic_sub_crq_queue *rx_scrq;
3505ec20f36bSDwip N. Banerjee 	struct ibmvnic_adapter *adapter;
3506ec20f36bSDwip N. Banerjee 	struct net_device *netdev;
3507ec20f36bSDwip N. Banerjee 	int frames_processed;
3508ec20f36bSDwip N. Banerjee 	int scrq_num;
3509ec20f36bSDwip N. Banerjee 
3510ec20f36bSDwip N. Banerjee 	netdev = napi->dev;
3511ec20f36bSDwip N. Banerjee 	adapter = netdev_priv(netdev);
3512ec20f36bSDwip N. Banerjee 	scrq_num = (int)(napi - adapter->napi);
3513ec20f36bSDwip N. Banerjee 	frames_processed = 0;
3514ec20f36bSDwip N. Banerjee 	rx_scrq = adapter->rx_scrq[scrq_num];
3515152ce47dSNathan Fontenot 
3516032c5e82SThomas Falcon restart_poll:
3517032c5e82SThomas Falcon 	while (frames_processed < budget) {
3518032c5e82SThomas Falcon 		struct sk_buff *skb;
3519032c5e82SThomas Falcon 		struct ibmvnic_rx_buff *rx_buff;
3520032c5e82SThomas Falcon 		union sub_crq *next;
3521032c5e82SThomas Falcon 		u32 length;
3522032c5e82SThomas Falcon 		u16 offset;
3523032c5e82SThomas Falcon 		u8 flags = 0;
3524032c5e82SThomas Falcon 
35257ed5b31fSJuliet Kim 		if (unlikely(test_bit(0, &adapter->resetting) &&
35263468656fSJohn Allen 			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
3527ec20f36bSDwip N. Banerjee 			enable_scrq_irq(adapter, rx_scrq);
352821ecba6cSThomas Falcon 			napi_complete_done(napi, frames_processed);
352921ecba6cSThomas Falcon 			return frames_processed;
353021ecba6cSThomas Falcon 		}
353121ecba6cSThomas Falcon 
3532ec20f36bSDwip N. Banerjee 		if (!pending_scrq(adapter, rx_scrq))
3533032c5e82SThomas Falcon 			break;
3534ec20f36bSDwip N. Banerjee 		next = ibmvnic_next_scrq(adapter, rx_scrq);
3535914789acSLijun Pan 		rx_buff = (struct ibmvnic_rx_buff *)
3536914789acSLijun Pan 			  be64_to_cpu(next->rx_comp.correlator);
3537032c5e82SThomas Falcon 		/* do error checking */
3538032c5e82SThomas Falcon 		if (next->rx_comp.rc) {
3539e1cea2e7SJohn Allen 			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
3540e1cea2e7SJohn Allen 				   be16_to_cpu(next->rx_comp.rc));
3541032c5e82SThomas Falcon 			/* free the entry */
3542032c5e82SThomas Falcon 			next->rx_comp.first = 0;
35434b9b0f01SThomas Falcon 			dev_kfree_skb_any(rx_buff->skb);
3544032c5e82SThomas Falcon 			remove_buff_from_pool(adapter, rx_buff);
3545ca05e316SNathan Fontenot 			continue;
3546abe27a88SThomas Falcon 		} else if (!rx_buff->skb) {
3547abe27a88SThomas Falcon 			/* free the entry */
3548abe27a88SThomas Falcon 			next->rx_comp.first = 0;
3549abe27a88SThomas Falcon 			remove_buff_from_pool(adapter, rx_buff);
3550abe27a88SThomas Falcon 			continue;
3551032c5e82SThomas Falcon 		}
3552032c5e82SThomas Falcon 
3553032c5e82SThomas Falcon 		length = be32_to_cpu(next->rx_comp.len);
3554032c5e82SThomas Falcon 		offset = be16_to_cpu(next->rx_comp.off_frame_data);
3555032c5e82SThomas Falcon 		flags = next->rx_comp.flags;
3556032c5e82SThomas Falcon 		skb = rx_buff->skb;
355742557dabSLijun Pan 		/* load long_term_buff before copying to skb */
355842557dabSLijun Pan 		dma_rmb();
3559032c5e82SThomas Falcon 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
3560032c5e82SThomas Falcon 					length);
35616052d5e2SMurilo Fossa Vicentini 
35626052d5e2SMurilo Fossa Vicentini 		/* VLAN Header has been stripped by the system firmware and
35636052d5e2SMurilo Fossa Vicentini 		 * needs to be inserted by the driver
35646052d5e2SMurilo Fossa Vicentini 		 */
35656052d5e2SMurilo Fossa Vicentini 		if (adapter->rx_vlan_header_insertion &&
35666052d5e2SMurilo Fossa Vicentini 		    (flags & IBMVNIC_VLAN_STRIPPED))
35676052d5e2SMurilo Fossa Vicentini 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
35686052d5e2SMurilo Fossa Vicentini 					       ntohs(next->rx_comp.vlan_tci));
35696052d5e2SMurilo Fossa Vicentini 
3570032c5e82SThomas Falcon 		/* free the entry */
3571032c5e82SThomas Falcon 		next->rx_comp.first = 0;
3572032c5e82SThomas Falcon 		remove_buff_from_pool(adapter, rx_buff);
3573032c5e82SThomas Falcon 
3574032c5e82SThomas Falcon 		skb_put(skb, length);
3575032c5e82SThomas Falcon 		skb->protocol = eth_type_trans(skb, netdev);
357694ca305fSThomas Falcon 		skb_record_rx_queue(skb, scrq_num);
3577032c5e82SThomas Falcon 
3578032c5e82SThomas Falcon 		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
3579032c5e82SThomas Falcon 		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
3580032c5e82SThomas Falcon 			skb->ip_summed = CHECKSUM_UNNECESSARY;
3581032c5e82SThomas Falcon 		}
3582032c5e82SThomas Falcon 
3583032c5e82SThomas Falcon 		length = skb->len;
3584032c5e82SThomas Falcon 		napi_gro_receive(napi, skb); /* send it up */
3585032c5e82SThomas Falcon 		netdev->stats.rx_packets++;
3586032c5e82SThomas Falcon 		netdev->stats.rx_bytes += length;
35873d52b594SJohn Allen 		adapter->rx_stats_buffers[scrq_num].packets++;
35883d52b594SJohn Allen 		adapter->rx_stats_buffers[scrq_num].bytes += length;
3589032c5e82SThomas Falcon 		frames_processed++;
3590032c5e82SThomas Falcon 	}
3591152ce47dSNathan Fontenot 
359241ed0a00SDwip N. Banerjee 	if (adapter->state != VNIC_CLOSING &&
359341ed0a00SDwip N. Banerjee 	    ((atomic_read(&adapter->rx_pool[scrq_num].available) <
359441ed0a00SDwip N. Banerjee 	      adapter->req_rx_add_entries_per_subcrq / 2) ||
359541ed0a00SDwip N. Banerjee 	      frames_processed < budget))
3596498cd8e4SJohn Allen 		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
3597032c5e82SThomas Falcon 	if (frames_processed < budget) {
3598ec20f36bSDwip N. Banerjee 		if (napi_complete_done(napi, frames_processed)) {
3599ec20f36bSDwip N. Banerjee 			enable_scrq_irq(adapter, rx_scrq);
3600ec20f36bSDwip N. Banerjee 			if (pending_scrq(adapter, rx_scrq)) {
3601ec20f36bSDwip N. Banerjee 				if (napi_reschedule(napi)) {
3602ec20f36bSDwip N. Banerjee 					disable_scrq_irq(adapter, rx_scrq);
3603032c5e82SThomas Falcon 					goto restart_poll;
3604032c5e82SThomas Falcon 				}
3605032c5e82SThomas Falcon 			}
3606ec20f36bSDwip N. Banerjee 		}
3607ec20f36bSDwip N. Banerjee 	}
3608032c5e82SThomas Falcon 	return frames_processed;
3609032c5e82SThomas Falcon }
3610032c5e82SThomas Falcon 
wait_for_reset(struct ibmvnic_adapter * adapter)3611c26eba03SJohn Allen static int wait_for_reset(struct ibmvnic_adapter *adapter)
3612c26eba03SJohn Allen {
3613af894d23SThomas Falcon 	int rc, ret;
3614af894d23SThomas Falcon 
3615c26eba03SJohn Allen 	adapter->fallback.mtu = adapter->req_mtu;
3616c26eba03SJohn Allen 	adapter->fallback.rx_queues = adapter->req_rx_queues;
3617c26eba03SJohn Allen 	adapter->fallback.tx_queues = adapter->req_tx_queues;
3618c26eba03SJohn Allen 	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3619c26eba03SJohn Allen 	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3620c26eba03SJohn Allen 
3621070eca95SThomas Falcon 	reinit_completion(&adapter->reset_done);
3622c26eba03SJohn Allen 	adapter->wait_for_reset = true;
3623af894d23SThomas Falcon 	rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3624476d96caSThomas Falcon 
3625476d96caSThomas Falcon 	if (rc) {
3626476d96caSThomas Falcon 		ret = rc;
3627476d96caSThomas Falcon 		goto out;
3628476d96caSThomas Falcon 	}
3629476d96caSThomas Falcon 	rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3630476d96caSThomas Falcon 	if (rc) {
3631476d96caSThomas Falcon 		ret = -ENODEV;
3632476d96caSThomas Falcon 		goto out;
3633476d96caSThomas Falcon 	}
3634c26eba03SJohn Allen 
3635af894d23SThomas Falcon 	ret = 0;
3636c26eba03SJohn Allen 	if (adapter->reset_done_rc) {
3637af894d23SThomas Falcon 		ret = -EIO;
3638c26eba03SJohn Allen 		adapter->desired.mtu = adapter->fallback.mtu;
3639c26eba03SJohn Allen 		adapter->desired.rx_queues = adapter->fallback.rx_queues;
3640c26eba03SJohn Allen 		adapter->desired.tx_queues = adapter->fallback.tx_queues;
3641c26eba03SJohn Allen 		adapter->desired.rx_entries = adapter->fallback.rx_entries;
3642c26eba03SJohn Allen 		adapter->desired.tx_entries = adapter->fallback.tx_entries;
3643c26eba03SJohn Allen 
3644070eca95SThomas Falcon 		reinit_completion(&adapter->reset_done);
3645af894d23SThomas Falcon 		adapter->wait_for_reset = true;
3646af894d23SThomas Falcon 		rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3647476d96caSThomas Falcon 		if (rc) {
3648476d96caSThomas Falcon 			ret = rc;
3649476d96caSThomas Falcon 			goto out;
3650c26eba03SJohn Allen 		}
3651476d96caSThomas Falcon 		rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3652476d96caSThomas Falcon 						 60000);
3653476d96caSThomas Falcon 		if (rc) {
3654476d96caSThomas Falcon 			ret = -ENODEV;
3655476d96caSThomas Falcon 			goto out;
3656476d96caSThomas Falcon 		}
3657476d96caSThomas Falcon 	}
3658476d96caSThomas Falcon out:
3659c26eba03SJohn Allen 	adapter->wait_for_reset = false;
3660c26eba03SJohn Allen 
3661af894d23SThomas Falcon 	return ret;
3662c26eba03SJohn Allen }
3663c26eba03SJohn Allen 
ibmvnic_change_mtu(struct net_device * netdev,int new_mtu)36643a807b75SJohn Allen static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
36653a807b75SJohn Allen {
3666c26eba03SJohn Allen 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3667c26eba03SJohn Allen 
3668c26eba03SJohn Allen 	adapter->desired.mtu = new_mtu + ETH_HLEN;
3669c26eba03SJohn Allen 
3670c26eba03SJohn Allen 	return wait_for_reset(adapter);
36713a807b75SJohn Allen }
36723a807b75SJohn Allen 
ibmvnic_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3673f10b09efSThomas Falcon static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3674f10b09efSThomas Falcon 						struct net_device *dev,
3675f10b09efSThomas Falcon 						netdev_features_t features)
3676f10b09efSThomas Falcon {
3677f10b09efSThomas Falcon 	/* Some backing hardware adapters can not
3678f10b09efSThomas Falcon 	 * handle packets with a MSS less than 224
3679f10b09efSThomas Falcon 	 * or with only one segment.
3680f10b09efSThomas Falcon 	 */
3681f10b09efSThomas Falcon 	if (skb_is_gso(skb)) {
3682f10b09efSThomas Falcon 		if (skb_shinfo(skb)->gso_size < 224 ||
3683f10b09efSThomas Falcon 		    skb_shinfo(skb)->gso_segs == 1)
3684f10b09efSThomas Falcon 			features &= ~NETIF_F_GSO_MASK;
3685f10b09efSThomas Falcon 	}
3686f10b09efSThomas Falcon 
3687f10b09efSThomas Falcon 	return features;
3688f10b09efSThomas Falcon }
3689f10b09efSThomas Falcon 
3690032c5e82SThomas Falcon static const struct net_device_ops ibmvnic_netdev_ops = {
3691032c5e82SThomas Falcon 	.ndo_open		= ibmvnic_open,
3692032c5e82SThomas Falcon 	.ndo_stop		= ibmvnic_close,
3693032c5e82SThomas Falcon 	.ndo_start_xmit		= ibmvnic_xmit,
3694032c5e82SThomas Falcon 	.ndo_set_rx_mode	= ibmvnic_set_multi,
3695032c5e82SThomas Falcon 	.ndo_set_mac_address	= ibmvnic_set_mac,
3696032c5e82SThomas Falcon 	.ndo_validate_addr	= eth_validate_addr,
3697032c5e82SThomas Falcon 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
36983a807b75SJohn Allen 	.ndo_change_mtu		= ibmvnic_change_mtu,
3699f10b09efSThomas Falcon 	.ndo_features_check     = ibmvnic_features_check,
3700032c5e82SThomas Falcon };
3701032c5e82SThomas Falcon 
3702032c5e82SThomas Falcon /* ethtool functions */
3703032c5e82SThomas Falcon 
ibmvnic_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)37048a43379fSPhilippe Reynes static int ibmvnic_get_link_ksettings(struct net_device *netdev,
37058a43379fSPhilippe Reynes 				      struct ethtool_link_ksettings *cmd)
3706032c5e82SThomas Falcon {
3707f8d6ae0dSMurilo Fossa Vicentini 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3708f8d6ae0dSMurilo Fossa Vicentini 	int rc;
37098a43379fSPhilippe Reynes 
3710f8d6ae0dSMurilo Fossa Vicentini 	rc = send_query_phys_parms(adapter);
3711f8d6ae0dSMurilo Fossa Vicentini 	if (rc) {
3712f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_UNKNOWN;
3713f8d6ae0dSMurilo Fossa Vicentini 		adapter->duplex = DUPLEX_UNKNOWN;
3714f8d6ae0dSMurilo Fossa Vicentini 	}
3715f8d6ae0dSMurilo Fossa Vicentini 	cmd->base.speed = adapter->speed;
3716f8d6ae0dSMurilo Fossa Vicentini 	cmd->base.duplex = adapter->duplex;
37178a43379fSPhilippe Reynes 	cmd->base.port = PORT_FIBRE;
37188a43379fSPhilippe Reynes 	cmd->base.phy_address = 0;
37198a43379fSPhilippe Reynes 	cmd->base.autoneg = AUTONEG_ENABLE;
37208a43379fSPhilippe Reynes 
3721032c5e82SThomas Falcon 	return 0;
3722032c5e82SThomas Falcon }
3723032c5e82SThomas Falcon 
ibmvnic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)37244e6759beSDesnes Augusto Nunes do Rosario static void ibmvnic_get_drvinfo(struct net_device *netdev,
3725032c5e82SThomas Falcon 				struct ethtool_drvinfo *info)
3726032c5e82SThomas Falcon {
37274e6759beSDesnes Augusto Nunes do Rosario 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
37284e6759beSDesnes Augusto Nunes do Rosario 
37298a96c80eSLijun Pan 	strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
37308a96c80eSLijun Pan 	strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
37318a96c80eSLijun Pan 	strscpy(info->fw_version, adapter->fw_version,
37324e6759beSDesnes Augusto Nunes do Rosario 		sizeof(info->fw_version));
3733032c5e82SThomas Falcon }
3734032c5e82SThomas Falcon 
ibmvnic_get_msglevel(struct net_device * netdev)3735032c5e82SThomas Falcon static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3736032c5e82SThomas Falcon {
3737032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3738032c5e82SThomas Falcon 
3739032c5e82SThomas Falcon 	return adapter->msg_enable;
3740032c5e82SThomas Falcon }
3741032c5e82SThomas Falcon 
ibmvnic_set_msglevel(struct net_device * netdev,u32 data)3742032c5e82SThomas Falcon static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3743032c5e82SThomas Falcon {
3744032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3745032c5e82SThomas Falcon 
3746032c5e82SThomas Falcon 	adapter->msg_enable = data;
3747032c5e82SThomas Falcon }
3748032c5e82SThomas Falcon 
ibmvnic_get_link(struct net_device * netdev)3749032c5e82SThomas Falcon static u32 ibmvnic_get_link(struct net_device *netdev)
3750032c5e82SThomas Falcon {
3751032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3752032c5e82SThomas Falcon 
3753032c5e82SThomas Falcon 	/* Don't need to send a query because we request a logical link up at
3754032c5e82SThomas Falcon 	 * init and then we wait for link state indications
3755032c5e82SThomas Falcon 	 */
3756032c5e82SThomas Falcon 	return adapter->logical_link_state;
3757032c5e82SThomas Falcon }
3758032c5e82SThomas Falcon 
ibmvnic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3759032c5e82SThomas Falcon static void ibmvnic_get_ringparam(struct net_device *netdev,
376074624944SHao Chen 				  struct ethtool_ringparam *ring,
376174624944SHao Chen 				  struct kernel_ethtool_ringparam *kernel_ring,
376274624944SHao Chen 				  struct netlink_ext_ack *extack)
3763032c5e82SThomas Falcon {
3764bc131b3aSJohn Allen 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3765bc131b3aSJohn Allen 
3766bc131b3aSJohn Allen 	ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3767bc131b3aSJohn Allen 	ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3768032c5e82SThomas Falcon 	ring->rx_mini_max_pending = 0;
3769032c5e82SThomas Falcon 	ring->rx_jumbo_max_pending = 0;
3770bc131b3aSJohn Allen 	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3771bc131b3aSJohn Allen 	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3772032c5e82SThomas Falcon 	ring->rx_mini_pending = 0;
3773032c5e82SThomas Falcon 	ring->rx_jumbo_pending = 0;
3774032c5e82SThomas Falcon }
3775032c5e82SThomas Falcon 
ibmvnic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3776c26eba03SJohn Allen static int ibmvnic_set_ringparam(struct net_device *netdev,
377774624944SHao Chen 				 struct ethtool_ringparam *ring,
377874624944SHao Chen 				 struct kernel_ethtool_ringparam *kernel_ring,
377974624944SHao Chen 				 struct netlink_ext_ack *extack)
3780c26eba03SJohn Allen {
3781c26eba03SJohn Allen 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3782c26eba03SJohn Allen 
3783aeaf59b7SDany Madden 	if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
3784aeaf59b7SDany Madden 	    ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
3785aeaf59b7SDany Madden 		netdev_err(netdev, "Invalid request.\n");
3786aeaf59b7SDany Madden 		netdev_err(netdev, "Max tx buffers = %llu\n",
3787aeaf59b7SDany Madden 			   adapter->max_rx_add_entries_per_subcrq);
3788aeaf59b7SDany Madden 		netdev_err(netdev, "Max rx buffers = %llu\n",
3789aeaf59b7SDany Madden 			   adapter->max_tx_entries_per_subcrq);
3790aeaf59b7SDany Madden 		return -EINVAL;
3791aeaf59b7SDany Madden 	}
3792aeaf59b7SDany Madden 
3793c26eba03SJohn Allen 	adapter->desired.rx_entries = ring->rx_pending;
3794c26eba03SJohn Allen 	adapter->desired.tx_entries = ring->tx_pending;
3795c26eba03SJohn Allen 
3796aeaf59b7SDany Madden 	return wait_for_reset(adapter);
3797c26eba03SJohn Allen }
3798c26eba03SJohn Allen 
ibmvnic_get_channels(struct net_device * netdev,struct ethtool_channels * channels)3799c2dbeb67SJohn Allen static void ibmvnic_get_channels(struct net_device *netdev,
3800c2dbeb67SJohn Allen 				 struct ethtool_channels *channels)
3801c2dbeb67SJohn Allen {
3802c2dbeb67SJohn Allen 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3803c2dbeb67SJohn Allen 
3804c2dbeb67SJohn Allen 	channels->max_rx = adapter->max_rx_queues;
3805c2dbeb67SJohn Allen 	channels->max_tx = adapter->max_tx_queues;
3806c2dbeb67SJohn Allen 	channels->max_other = 0;
3807c2dbeb67SJohn Allen 	channels->max_combined = 0;
3808c2dbeb67SJohn Allen 	channels->rx_count = adapter->req_rx_queues;
3809c2dbeb67SJohn Allen 	channels->tx_count = adapter->req_tx_queues;
3810c2dbeb67SJohn Allen 	channels->other_count = 0;
3811c2dbeb67SJohn Allen 	channels->combined_count = 0;
3812c2dbeb67SJohn Allen }
3813c2dbeb67SJohn Allen 
ibmvnic_set_channels(struct net_device * netdev,struct ethtool_channels * channels)3814c26eba03SJohn Allen static int ibmvnic_set_channels(struct net_device *netdev,
3815c26eba03SJohn Allen 				struct ethtool_channels *channels)
3816c26eba03SJohn Allen {
3817c26eba03SJohn Allen 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3818c26eba03SJohn Allen 
3819c26eba03SJohn Allen 	adapter->desired.rx_queues = channels->rx_count;
3820c26eba03SJohn Allen 	adapter->desired.tx_queues = channels->tx_count;
3821c26eba03SJohn Allen 
3822aeaf59b7SDany Madden 	return wait_for_reset(adapter);
3823c26eba03SJohn Allen }
3824c26eba03SJohn Allen 
ibmvnic_get_strings(struct net_device * dev,u32 stringset,u8 * data)3825032c5e82SThomas Falcon static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3826032c5e82SThomas Falcon {
38273d52b594SJohn Allen 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
3828032c5e82SThomas Falcon 	int i;
3829032c5e82SThomas Falcon 
3830aeaf59b7SDany Madden 	if (stringset != ETH_SS_STATS)
3831aeaf59b7SDany Madden 		return;
3832aeaf59b7SDany Madden 
3833aeaf59b7SDany Madden 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
3834032c5e82SThomas Falcon 		memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
38353d52b594SJohn Allen 
38363d52b594SJohn Allen 	for (i = 0; i < adapter->req_tx_queues; i++) {
3837da8c1f9dSNick Child 		snprintf(data, ETH_GSTRING_LEN, "tx%d_batched_packets", i);
3838da8c1f9dSNick Child 		data += ETH_GSTRING_LEN;
3839da8c1f9dSNick Child 
3840da8c1f9dSNick Child 		snprintf(data, ETH_GSTRING_LEN, "tx%d_direct_packets", i);
38413d52b594SJohn Allen 		data += ETH_GSTRING_LEN;
38423d52b594SJohn Allen 
38433d52b594SJohn Allen 		snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
38443d52b594SJohn Allen 		data += ETH_GSTRING_LEN;
38453d52b594SJohn Allen 
3846aeaf59b7SDany Madden 		snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
38473d52b594SJohn Allen 		data += ETH_GSTRING_LEN;
38483d52b594SJohn Allen 	}
38493d52b594SJohn Allen 
38503d52b594SJohn Allen 	for (i = 0; i < adapter->req_rx_queues; i++) {
38513d52b594SJohn Allen 		snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
38523d52b594SJohn Allen 		data += ETH_GSTRING_LEN;
38533d52b594SJohn Allen 
38543d52b594SJohn Allen 		snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
38553d52b594SJohn Allen 		data += ETH_GSTRING_LEN;
38563d52b594SJohn Allen 
38573d52b594SJohn Allen 		snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
38583d52b594SJohn Allen 		data += ETH_GSTRING_LEN;
38593d52b594SJohn Allen 	}
3860032c5e82SThomas Falcon }
3861032c5e82SThomas Falcon 
ibmvnic_get_sset_count(struct net_device * dev,int sset)3862032c5e82SThomas Falcon static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3863032c5e82SThomas Falcon {
38643d52b594SJohn Allen 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
38653d52b594SJohn Allen 
3866032c5e82SThomas Falcon 	switch (sset) {
3867032c5e82SThomas Falcon 	case ETH_SS_STATS:
38683d52b594SJohn Allen 		return ARRAY_SIZE(ibmvnic_stats) +
38693d52b594SJohn Allen 		       adapter->req_tx_queues * NUM_TX_STATS +
38703d52b594SJohn Allen 		       adapter->req_rx_queues * NUM_RX_STATS;
3871032c5e82SThomas Falcon 	default:
3872032c5e82SThomas Falcon 		return -EOPNOTSUPP;
3873032c5e82SThomas Falcon 	}
3874032c5e82SThomas Falcon }
3875032c5e82SThomas Falcon 
ibmvnic_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)3876032c5e82SThomas Falcon static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3877032c5e82SThomas Falcon 				      struct ethtool_stats *stats, u64 *data)
3878032c5e82SThomas Falcon {
3879032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
3880032c5e82SThomas Falcon 	union ibmvnic_crq crq;
38813d52b594SJohn Allen 	int i, j;
38829c4eaabdSThomas Falcon 	int rc;
3883032c5e82SThomas Falcon 
3884032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
3885032c5e82SThomas Falcon 	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3886032c5e82SThomas Falcon 	crq.request_statistics.cmd = REQUEST_STATISTICS;
3887032c5e82SThomas Falcon 	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3888032c5e82SThomas Falcon 	crq.request_statistics.len =
3889032c5e82SThomas Falcon 	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
3890032c5e82SThomas Falcon 
3891032c5e82SThomas Falcon 	/* Wait for data to be written */
3892070eca95SThomas Falcon 	reinit_completion(&adapter->stats_done);
38939c4eaabdSThomas Falcon 	rc = ibmvnic_send_crq(adapter, &crq);
38949c4eaabdSThomas Falcon 	if (rc)
38959c4eaabdSThomas Falcon 		return;
3896476d96caSThomas Falcon 	rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3897476d96caSThomas Falcon 	if (rc)
3898476d96caSThomas Falcon 		return;
3899032c5e82SThomas Falcon 
3900032c5e82SThomas Falcon 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
390191dc5d25SLijun Pan 		data[i] = be64_to_cpu(IBMVNIC_GET_STAT
390291dc5d25SLijun Pan 				      (adapter, ibmvnic_stats[i].offset));
39033d52b594SJohn Allen 
39043d52b594SJohn Allen 	for (j = 0; j < adapter->req_tx_queues; j++) {
3905da8c1f9dSNick Child 		data[i] = adapter->tx_stats_buffers[j].batched_packets;
3906da8c1f9dSNick Child 		i++;
3907da8c1f9dSNick Child 		data[i] = adapter->tx_stats_buffers[j].direct_packets;
39083d52b594SJohn Allen 		i++;
39093d52b594SJohn Allen 		data[i] = adapter->tx_stats_buffers[j].bytes;
39103d52b594SJohn Allen 		i++;
39113d52b594SJohn Allen 		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
39123d52b594SJohn Allen 		i++;
39133d52b594SJohn Allen 	}
39143d52b594SJohn Allen 
39153d52b594SJohn Allen 	for (j = 0; j < adapter->req_rx_queues; j++) {
39163d52b594SJohn Allen 		data[i] = adapter->rx_stats_buffers[j].packets;
39173d52b594SJohn Allen 		i++;
39183d52b594SJohn Allen 		data[i] = adapter->rx_stats_buffers[j].bytes;
39193d52b594SJohn Allen 		i++;
39203d52b594SJohn Allen 		data[i] = adapter->rx_stats_buffers[j].interrupts;
39213d52b594SJohn Allen 		i++;
39223d52b594SJohn Allen 	}
3923032c5e82SThomas Falcon }
3924032c5e82SThomas Falcon 
3925032c5e82SThomas Falcon static const struct ethtool_ops ibmvnic_ethtool_ops = {
3926032c5e82SThomas Falcon 	.get_drvinfo		= ibmvnic_get_drvinfo,
3927032c5e82SThomas Falcon 	.get_msglevel		= ibmvnic_get_msglevel,
3928032c5e82SThomas Falcon 	.set_msglevel		= ibmvnic_set_msglevel,
3929032c5e82SThomas Falcon 	.get_link		= ibmvnic_get_link,
3930032c5e82SThomas Falcon 	.get_ringparam		= ibmvnic_get_ringparam,
3931c26eba03SJohn Allen 	.set_ringparam		= ibmvnic_set_ringparam,
3932c2dbeb67SJohn Allen 	.get_channels		= ibmvnic_get_channels,
3933c26eba03SJohn Allen 	.set_channels		= ibmvnic_set_channels,
3934032c5e82SThomas Falcon 	.get_strings            = ibmvnic_get_strings,
3935032c5e82SThomas Falcon 	.get_sset_count         = ibmvnic_get_sset_count,
3936032c5e82SThomas Falcon 	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
39378a43379fSPhilippe Reynes 	.get_link_ksettings	= ibmvnic_get_link_ksettings,
3938032c5e82SThomas Falcon };
3939032c5e82SThomas Falcon 
3940032c5e82SThomas Falcon /* Routines for managing CRQs/sCRQs  */
3941032c5e82SThomas Falcon 
reset_one_sub_crq_queue(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)394257a49436SNathan Fontenot static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
394357a49436SNathan Fontenot 				   struct ibmvnic_sub_crq_queue *scrq)
394457a49436SNathan Fontenot {
394557a49436SNathan Fontenot 	int rc;
394657a49436SNathan Fontenot 
39479281cf2dSDany Madden 	if (!scrq) {
3948862aecbdSYANG LI 		netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
39499281cf2dSDany Madden 		return -EINVAL;
39509281cf2dSDany Madden 	}
39519281cf2dSDany Madden 
395257a49436SNathan Fontenot 	if (scrq->irq) {
395357a49436SNathan Fontenot 		free_irq(scrq->irq, scrq);
395457a49436SNathan Fontenot 		irq_dispose_mapping(scrq->irq);
395557a49436SNathan Fontenot 		scrq->irq = 0;
395657a49436SNathan Fontenot 	}
395757a49436SNathan Fontenot 
39589281cf2dSDany Madden 	if (scrq->msgs) {
3959c8b2ad0aSThomas Falcon 		memset(scrq->msgs, 0, 4 * PAGE_SIZE);
396041f71467SThomas Falcon 		atomic_set(&scrq->used, 0);
396157a49436SNathan Fontenot 		scrq->cur = 0;
3962f019fb63SThomas Falcon 		scrq->ind_buf.index = 0;
39639281cf2dSDany Madden 	} else {
39649281cf2dSDany Madden 		netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
39659281cf2dSDany Madden 		return -EINVAL;
39669281cf2dSDany Madden 	}
396757a49436SNathan Fontenot 
396857a49436SNathan Fontenot 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
396957a49436SNathan Fontenot 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
397057a49436SNathan Fontenot 	return rc;
397157a49436SNathan Fontenot }
397257a49436SNathan Fontenot 
reset_sub_crq_queues(struct ibmvnic_adapter * adapter)397357a49436SNathan Fontenot static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
397457a49436SNathan Fontenot {
397557a49436SNathan Fontenot 	int i, rc;
397657a49436SNathan Fontenot 
3977a0faaa27SLijun Pan 	if (!adapter->tx_scrq || !adapter->rx_scrq)
3978a0faaa27SLijun Pan 		return -EINVAL;
3979a0faaa27SLijun Pan 
398044fbc1b6SNick Child 	ibmvnic_clean_affinity(adapter);
398144fbc1b6SNick Child 
398257a49436SNathan Fontenot 	for (i = 0; i < adapter->req_tx_queues; i++) {
3983d1cf33d9SNathan Fontenot 		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
398457a49436SNathan Fontenot 		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
398557a49436SNathan Fontenot 		if (rc)
398657a49436SNathan Fontenot 			return rc;
398757a49436SNathan Fontenot 	}
398857a49436SNathan Fontenot 
398957a49436SNathan Fontenot 	for (i = 0; i < adapter->req_rx_queues; i++) {
3990d1cf33d9SNathan Fontenot 		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
399157a49436SNathan Fontenot 		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
399257a49436SNathan Fontenot 		if (rc)
399357a49436SNathan Fontenot 			return rc;
399457a49436SNathan Fontenot 	}
399557a49436SNathan Fontenot 
399657a49436SNathan Fontenot 	return rc;
399757a49436SNathan Fontenot }
399857a49436SNathan Fontenot 
release_sub_crq_queue(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq,bool do_h_free)3999032c5e82SThomas Falcon static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
4000d7c0ef36SNathan Fontenot 				  struct ibmvnic_sub_crq_queue *scrq,
4001d7c0ef36SNathan Fontenot 				  bool do_h_free)
4002032c5e82SThomas Falcon {
4003032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4004032c5e82SThomas Falcon 	long rc;
4005032c5e82SThomas Falcon 
4006032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
4007032c5e82SThomas Falcon 
4008d7c0ef36SNathan Fontenot 	if (do_h_free) {
4009032c5e82SThomas Falcon 		/* Close the sub-crqs */
4010032c5e82SThomas Falcon 		do {
4011032c5e82SThomas Falcon 			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
4012032c5e82SThomas Falcon 						adapter->vdev->unit_address,
4013032c5e82SThomas Falcon 						scrq->crq_num);
4014032c5e82SThomas Falcon 		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4015032c5e82SThomas Falcon 
4016ffa73855SThomas Falcon 		if (rc) {
4017ffa73855SThomas Falcon 			netdev_err(adapter->netdev,
4018ffa73855SThomas Falcon 				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
4019ffa73855SThomas Falcon 				   scrq->crq_num, rc);
4020ffa73855SThomas Falcon 		}
4021d7c0ef36SNathan Fontenot 	}
4022ffa73855SThomas Falcon 
4023f019fb63SThomas Falcon 	dma_free_coherent(dev,
4024f019fb63SThomas Falcon 			  IBMVNIC_IND_ARR_SZ,
4025f019fb63SThomas Falcon 			  scrq->ind_buf.indir_arr,
4026f019fb63SThomas Falcon 			  scrq->ind_buf.indir_dma);
4027f019fb63SThomas Falcon 
4028032c5e82SThomas Falcon 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
4029032c5e82SThomas Falcon 			 DMA_BIDIRECTIONAL);
4030032c5e82SThomas Falcon 	free_pages((unsigned long)scrq->msgs, 2);
403144fbc1b6SNick Child 	free_cpumask_var(scrq->affinity_mask);
4032032c5e82SThomas Falcon 	kfree(scrq);
4033032c5e82SThomas Falcon }
4034032c5e82SThomas Falcon 
init_sub_crq_queue(struct ibmvnic_adapter * adapter)4035032c5e82SThomas Falcon static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
4036032c5e82SThomas Falcon 							*adapter)
4037032c5e82SThomas Falcon {
4038032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4039032c5e82SThomas Falcon 	struct ibmvnic_sub_crq_queue *scrq;
4040032c5e82SThomas Falcon 	int rc;
4041032c5e82SThomas Falcon 
40421bb3c739SNathan Fontenot 	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
4043032c5e82SThomas Falcon 	if (!scrq)
4044032c5e82SThomas Falcon 		return NULL;
4045032c5e82SThomas Falcon 
40467f7adc50SNathan Fontenot 	scrq->msgs =
40471bb3c739SNathan Fontenot 		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
4048032c5e82SThomas Falcon 	if (!scrq->msgs) {
4049032c5e82SThomas Falcon 		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
4050032c5e82SThomas Falcon 		goto zero_page_failed;
4051032c5e82SThomas Falcon 	}
405244fbc1b6SNick Child 	if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL))
405344fbc1b6SNick Child 		goto cpumask_alloc_failed;
4054032c5e82SThomas Falcon 
4055032c5e82SThomas Falcon 	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
4056032c5e82SThomas Falcon 					 DMA_BIDIRECTIONAL);
4057032c5e82SThomas Falcon 	if (dma_mapping_error(dev, scrq->msg_token)) {
4058032c5e82SThomas Falcon 		dev_warn(dev, "Couldn't map crq queue messages page\n");
4059032c5e82SThomas Falcon 		goto map_failed;
4060032c5e82SThomas Falcon 	}
4061032c5e82SThomas Falcon 
4062032c5e82SThomas Falcon 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
4063032c5e82SThomas Falcon 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
4064032c5e82SThomas Falcon 
4065032c5e82SThomas Falcon 	if (rc == H_RESOURCE)
4066032c5e82SThomas Falcon 		rc = ibmvnic_reset_crq(adapter);
4067032c5e82SThomas Falcon 
4068032c5e82SThomas Falcon 	if (rc == H_CLOSED) {
4069032c5e82SThomas Falcon 		dev_warn(dev, "Partner adapter not ready, waiting.\n");
4070032c5e82SThomas Falcon 	} else if (rc) {
4071032c5e82SThomas Falcon 		dev_warn(dev, "Error %d registering sub-crq\n", rc);
4072032c5e82SThomas Falcon 		goto reg_failed;
4073032c5e82SThomas Falcon 	}
4074032c5e82SThomas Falcon 
4075032c5e82SThomas Falcon 	scrq->adapter = adapter;
4076032c5e82SThomas Falcon 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
4077f019fb63SThomas Falcon 	scrq->ind_buf.index = 0;
4078f019fb63SThomas Falcon 
4079f019fb63SThomas Falcon 	scrq->ind_buf.indir_arr =
4080f019fb63SThomas Falcon 		dma_alloc_coherent(dev,
4081f019fb63SThomas Falcon 				   IBMVNIC_IND_ARR_SZ,
4082f019fb63SThomas Falcon 				   &scrq->ind_buf.indir_dma,
4083f019fb63SThomas Falcon 				   GFP_KERNEL);
4084f019fb63SThomas Falcon 
4085f019fb63SThomas Falcon 	if (!scrq->ind_buf.indir_arr)
4086f019fb63SThomas Falcon 		goto indir_failed;
4087f019fb63SThomas Falcon 
4088032c5e82SThomas Falcon 	spin_lock_init(&scrq->lock);
4089032c5e82SThomas Falcon 
4090032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev,
4091032c5e82SThomas Falcon 		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
4092032c5e82SThomas Falcon 		   scrq->crq_num, scrq->hw_irq, scrq->irq);
4093032c5e82SThomas Falcon 
4094032c5e82SThomas Falcon 	return scrq;
4095032c5e82SThomas Falcon 
4096f019fb63SThomas Falcon indir_failed:
4097f019fb63SThomas Falcon 	do {
4098f019fb63SThomas Falcon 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
4099f019fb63SThomas Falcon 					adapter->vdev->unit_address,
4100f019fb63SThomas Falcon 					scrq->crq_num);
4101f019fb63SThomas Falcon 	} while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
4102032c5e82SThomas Falcon reg_failed:
4103032c5e82SThomas Falcon 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
4104032c5e82SThomas Falcon 			 DMA_BIDIRECTIONAL);
4105032c5e82SThomas Falcon map_failed:
410644fbc1b6SNick Child 	free_cpumask_var(scrq->affinity_mask);
410744fbc1b6SNick Child cpumask_alloc_failed:
4108032c5e82SThomas Falcon 	free_pages((unsigned long)scrq->msgs, 2);
4109032c5e82SThomas Falcon zero_page_failed:
4110032c5e82SThomas Falcon 	kfree(scrq);
4111032c5e82SThomas Falcon 
4112032c5e82SThomas Falcon 	return NULL;
4113032c5e82SThomas Falcon }
4114032c5e82SThomas Falcon 
release_sub_crqs(struct ibmvnic_adapter * adapter,bool do_h_free)4115d7c0ef36SNathan Fontenot static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
4116032c5e82SThomas Falcon {
4117032c5e82SThomas Falcon 	int i;
4118032c5e82SThomas Falcon 
411944fbc1b6SNick Child 	ibmvnic_clean_affinity(adapter);
4120032c5e82SThomas Falcon 	if (adapter->tx_scrq) {
412182e3be32SNathan Fontenot 		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
4122b510888fSNathan Fontenot 			if (!adapter->tx_scrq[i])
4123b510888fSNathan Fontenot 				continue;
4124b510888fSNathan Fontenot 
4125d1cf33d9SNathan Fontenot 			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
4126d1cf33d9SNathan Fontenot 				   i);
412765d6470dSSukadev Bhattiprolu 			ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
4128b510888fSNathan Fontenot 			if (adapter->tx_scrq[i]->irq) {
4129032c5e82SThomas Falcon 				free_irq(adapter->tx_scrq[i]->irq,
4130032c5e82SThomas Falcon 					 adapter->tx_scrq[i]);
413188eb98a0SThomas Falcon 				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
4132b510888fSNathan Fontenot 				adapter->tx_scrq[i]->irq = 0;
4133032c5e82SThomas Falcon 			}
4134b510888fSNathan Fontenot 
4135d7c0ef36SNathan Fontenot 			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
4136d7c0ef36SNathan Fontenot 					      do_h_free);
4137b510888fSNathan Fontenot 		}
4138b510888fSNathan Fontenot 
41399501df3cSNathan Fontenot 		kfree(adapter->tx_scrq);
4140032c5e82SThomas Falcon 		adapter->tx_scrq = NULL;
414182e3be32SNathan Fontenot 		adapter->num_active_tx_scrqs = 0;
4142032c5e82SThomas Falcon 	}
4143032c5e82SThomas Falcon 
414423f0624bSNick Child 	/* Clean any remaining outstanding SKBs
414523f0624bSNick Child 	 * we freed the irq so we won't be hearing
414623f0624bSNick Child 	 * from them
414723f0624bSNick Child 	 */
414823f0624bSNick Child 	clean_tx_pools(adapter);
414923f0624bSNick Child 
4150032c5e82SThomas Falcon 	if (adapter->rx_scrq) {
415182e3be32SNathan Fontenot 		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
4152b510888fSNathan Fontenot 			if (!adapter->rx_scrq[i])
4153b510888fSNathan Fontenot 				continue;
4154b510888fSNathan Fontenot 
4155d1cf33d9SNathan Fontenot 			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
4156d1cf33d9SNathan Fontenot 				   i);
4157b510888fSNathan Fontenot 			if (adapter->rx_scrq[i]->irq) {
4158032c5e82SThomas Falcon 				free_irq(adapter->rx_scrq[i]->irq,
4159032c5e82SThomas Falcon 					 adapter->rx_scrq[i]);
416088eb98a0SThomas Falcon 				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
4161b510888fSNathan Fontenot 				adapter->rx_scrq[i]->irq = 0;
4162032c5e82SThomas Falcon 			}
4163b510888fSNathan Fontenot 
4164d7c0ef36SNathan Fontenot 			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
4165d7c0ef36SNathan Fontenot 					      do_h_free);
4166b510888fSNathan Fontenot 		}
4167b510888fSNathan Fontenot 
41689501df3cSNathan Fontenot 		kfree(adapter->rx_scrq);
4169032c5e82SThomas Falcon 		adapter->rx_scrq = NULL;
417082e3be32SNathan Fontenot 		adapter->num_active_rx_scrqs = 0;
4171032c5e82SThomas Falcon 	}
4172032c5e82SThomas Falcon }
4173032c5e82SThomas Falcon 
disable_scrq_irq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4174032c5e82SThomas Falcon static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
4175032c5e82SThomas Falcon 			    struct ibmvnic_sub_crq_queue *scrq)
4176032c5e82SThomas Falcon {
4177032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4178032c5e82SThomas Falcon 	unsigned long rc;
4179032c5e82SThomas Falcon 
4180032c5e82SThomas Falcon 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4181032c5e82SThomas Falcon 				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4182032c5e82SThomas Falcon 	if (rc)
4183032c5e82SThomas Falcon 		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
4184032c5e82SThomas Falcon 			scrq->hw_irq, rc);
4185032c5e82SThomas Falcon 	return rc;
4186032c5e82SThomas Falcon }
4187032c5e82SThomas Falcon 
41887ea0c16aSCédric Le Goater /* We can not use the IRQ chip EOI handler because that has the
41897ea0c16aSCédric Le Goater  * unintended effect of changing the interrupt priority.
41907ea0c16aSCédric Le Goater  */
ibmvnic_xics_eoi(struct device * dev,struct ibmvnic_sub_crq_queue * scrq)41917ea0c16aSCédric Le Goater static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
41927ea0c16aSCédric Le Goater {
41937ea0c16aSCédric Le Goater 	u64 val = 0xff000000 | scrq->hw_irq;
41947ea0c16aSCédric Le Goater 	unsigned long rc;
41957ea0c16aSCédric Le Goater 
41967ea0c16aSCédric Le Goater 	rc = plpar_hcall_norets(H_EOI, val);
41977ea0c16aSCédric Le Goater 	if (rc)
41987ea0c16aSCédric Le Goater 		dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
41997ea0c16aSCédric Le Goater }
42007ea0c16aSCédric Le Goater 
42017ea0c16aSCédric Le Goater /* Due to a firmware bug, the hypervisor can send an interrupt to a
42027ea0c16aSCédric Le Goater  * transmit or receive queue just prior to a partition migration.
42037ea0c16aSCédric Le Goater  * Force an EOI after migration.
42047ea0c16aSCédric Le Goater  */
ibmvnic_clear_pending_interrupt(struct device * dev,struct ibmvnic_sub_crq_queue * scrq)42057ea0c16aSCédric Le Goater static void ibmvnic_clear_pending_interrupt(struct device *dev,
42067ea0c16aSCédric Le Goater 					    struct ibmvnic_sub_crq_queue *scrq)
42077ea0c16aSCédric Le Goater {
42087ea0c16aSCédric Le Goater 	if (!xive_enabled())
42097ea0c16aSCédric Le Goater 		ibmvnic_xics_eoi(dev, scrq);
42107ea0c16aSCédric Le Goater }
42117ea0c16aSCédric Le Goater 
enable_scrq_irq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4212032c5e82SThomas Falcon static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
4213032c5e82SThomas Falcon 			   struct ibmvnic_sub_crq_queue *scrq)
4214032c5e82SThomas Falcon {
4215032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4216032c5e82SThomas Falcon 	unsigned long rc;
4217032c5e82SThomas Falcon 
4218032c5e82SThomas Falcon 	if (scrq->hw_irq > 0x100000000ULL) {
4219032c5e82SThomas Falcon 		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
4220032c5e82SThomas Falcon 		return 1;
4221032c5e82SThomas Falcon 	}
4222032c5e82SThomas Falcon 
42237ed5b31fSJuliet Kim 	if (test_bit(0, &adapter->resetting) &&
422473f9d364SNathan Fontenot 	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
42257ea0c16aSCédric Le Goater 		ibmvnic_clear_pending_interrupt(dev, scrq);
422673f9d364SNathan Fontenot 	}
4227f23e0643SThomas Falcon 
4228032c5e82SThomas Falcon 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4229032c5e82SThomas Falcon 				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4230032c5e82SThomas Falcon 	if (rc)
4231032c5e82SThomas Falcon 		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
4232032c5e82SThomas Falcon 			scrq->hw_irq, rc);
4233032c5e82SThomas Falcon 	return rc;
4234032c5e82SThomas Falcon }
4235032c5e82SThomas Falcon 
ibmvnic_complete_tx(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4236032c5e82SThomas Falcon static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
4237032c5e82SThomas Falcon 			       struct ibmvnic_sub_crq_queue *scrq)
4238032c5e82SThomas Falcon {
4239032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
424006b3e357SThomas Falcon 	struct ibmvnic_tx_pool *tx_pool;
4241032c5e82SThomas Falcon 	struct ibmvnic_tx_buff *txbuff;
42420d973388SThomas Falcon 	struct netdev_queue *txq;
4243032c5e82SThomas Falcon 	union sub_crq *next;
4244032c5e82SThomas Falcon 	int index;
4245c62aa373SThomas Falcon 	int i;
4246032c5e82SThomas Falcon 
4247032c5e82SThomas Falcon restart_loop:
4248032c5e82SThomas Falcon 	while (pending_scrq(adapter, scrq)) {
4249032c5e82SThomas Falcon 		unsigned int pool = scrq->pool_index;
4250ffc385b9SThomas Falcon 		int num_entries = 0;
42510d973388SThomas Falcon 		int total_bytes = 0;
42520d973388SThomas Falcon 		int num_packets = 0;
4253032c5e82SThomas Falcon 
4254032c5e82SThomas Falcon 		next = ibmvnic_next_scrq(adapter, scrq);
4255032c5e82SThomas Falcon 		for (i = 0; i < next->tx_comp.num_comps; i++) {
4256032c5e82SThomas Falcon 			index = be32_to_cpu(next->tx_comp.correlators[i]);
425706b3e357SThomas Falcon 			if (index & IBMVNIC_TSO_POOL_MASK) {
425806b3e357SThomas Falcon 				tx_pool = &adapter->tso_pool[pool];
425906b3e357SThomas Falcon 				index &= ~IBMVNIC_TSO_POOL_MASK;
426006b3e357SThomas Falcon 			} else {
426106b3e357SThomas Falcon 				tx_pool = &adapter->tx_pool[pool];
426206b3e357SThomas Falcon 			}
426306b3e357SThomas Falcon 
426406b3e357SThomas Falcon 			txbuff = &tx_pool->tx_buff[index];
42650d973388SThomas Falcon 			num_packets++;
4266ffc385b9SThomas Falcon 			num_entries += txbuff->num_entries;
42670d973388SThomas Falcon 			if (txbuff->skb) {
42680d973388SThomas Falcon 				total_bytes += txbuff->skb->len;
4269ca09bf7bSLijun Pan 				if (next->tx_comp.rcs[i]) {
4270ca09bf7bSLijun Pan 					dev_err(dev, "tx error %x\n",
4271ca09bf7bSLijun Pan 						next->tx_comp.rcs[i]);
4272ca09bf7bSLijun Pan 					dev_kfree_skb_irq(txbuff->skb);
4273ca09bf7bSLijun Pan 				} else {
42740d973388SThomas Falcon 					dev_consume_skb_irq(txbuff->skb);
4275ca09bf7bSLijun Pan 				}
42760d973388SThomas Falcon 				txbuff->skb = NULL;
42770d973388SThomas Falcon 			} else {
42780d973388SThomas Falcon 				netdev_warn(adapter->netdev,
42790d973388SThomas Falcon 					    "TX completion received with NULL socket buffer\n");
42800d973388SThomas Falcon 			}
428106b3e357SThomas Falcon 			tx_pool->free_map[tx_pool->producer_index] = index;
428206b3e357SThomas Falcon 			tx_pool->producer_index =
428306b3e357SThomas Falcon 				(tx_pool->producer_index + 1) %
428406b3e357SThomas Falcon 					tx_pool->num_buffers;
4285032c5e82SThomas Falcon 		}
4286032c5e82SThomas Falcon 		/* remove tx_comp scrq*/
4287032c5e82SThomas Falcon 		next->tx_comp.first = 0;
42887c3e7de3SNathan Fontenot 
42890d973388SThomas Falcon 		txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
42900d973388SThomas Falcon 		netdev_tx_completed_queue(txq, num_packets, total_bytes);
42910d973388SThomas Falcon 
4292ffc385b9SThomas Falcon 		if (atomic_sub_return(num_entries, &scrq->used) <=
42937c3e7de3SNathan Fontenot 		    (adapter->req_tx_entries_per_subcrq / 2) &&
42947c3e7de3SNathan Fontenot 		    __netif_subqueue_stopped(adapter->netdev,
42957c3e7de3SNathan Fontenot 					     scrq->pool_index)) {
42964219196dSSukadev Bhattiprolu 			rcu_read_lock();
42974219196dSSukadev Bhattiprolu 			if (adapter->tx_queues_active) {
42984219196dSSukadev Bhattiprolu 				netif_wake_subqueue(adapter->netdev,
42997c3e7de3SNathan Fontenot 						    scrq->pool_index);
43004219196dSSukadev Bhattiprolu 				netdev_dbg(adapter->netdev,
43014219196dSSukadev Bhattiprolu 					   "Started queue %d\n",
43024219196dSSukadev Bhattiprolu 					   scrq->pool_index);
43034219196dSSukadev Bhattiprolu 			}
43044219196dSSukadev Bhattiprolu 			rcu_read_unlock();
43057c3e7de3SNathan Fontenot 		}
4306032c5e82SThomas Falcon 	}
4307032c5e82SThomas Falcon 
4308032c5e82SThomas Falcon 	enable_scrq_irq(adapter, scrq);
4309032c5e82SThomas Falcon 
4310032c5e82SThomas Falcon 	if (pending_scrq(adapter, scrq)) {
4311032c5e82SThomas Falcon 		disable_scrq_irq(adapter, scrq);
4312032c5e82SThomas Falcon 		goto restart_loop;
4313032c5e82SThomas Falcon 	}
4314032c5e82SThomas Falcon 
4315032c5e82SThomas Falcon 	return 0;
4316032c5e82SThomas Falcon }
4317032c5e82SThomas Falcon 
ibmvnic_interrupt_tx(int irq,void * instance)4318032c5e82SThomas Falcon static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
4319032c5e82SThomas Falcon {
4320032c5e82SThomas Falcon 	struct ibmvnic_sub_crq_queue *scrq = instance;
4321032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = scrq->adapter;
4322032c5e82SThomas Falcon 
4323032c5e82SThomas Falcon 	disable_scrq_irq(adapter, scrq);
4324032c5e82SThomas Falcon 	ibmvnic_complete_tx(adapter, scrq);
4325032c5e82SThomas Falcon 
4326032c5e82SThomas Falcon 	return IRQ_HANDLED;
4327032c5e82SThomas Falcon }
4328032c5e82SThomas Falcon 
ibmvnic_interrupt_rx(int irq,void * instance)4329032c5e82SThomas Falcon static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
4330032c5e82SThomas Falcon {
4331032c5e82SThomas Falcon 	struct ibmvnic_sub_crq_queue *scrq = instance;
4332032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = scrq->adapter;
4333032c5e82SThomas Falcon 
433409fb35eaSNathan Fontenot 	/* When booting a kdump kernel we can hit pending interrupts
433509fb35eaSNathan Fontenot 	 * prior to completing driver initialization.
433609fb35eaSNathan Fontenot 	 */
433709fb35eaSNathan Fontenot 	if (unlikely(adapter->state != VNIC_OPEN))
433809fb35eaSNathan Fontenot 		return IRQ_NONE;
433909fb35eaSNathan Fontenot 
43403d52b594SJohn Allen 	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
43413d52b594SJohn Allen 
4342032c5e82SThomas Falcon 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
4343032c5e82SThomas Falcon 		disable_scrq_irq(adapter, scrq);
4344032c5e82SThomas Falcon 		__napi_schedule(&adapter->napi[scrq->scrq_num]);
4345032c5e82SThomas Falcon 	}
4346032c5e82SThomas Falcon 
4347032c5e82SThomas Falcon 	return IRQ_HANDLED;
4348032c5e82SThomas Falcon }
4349032c5e82SThomas Falcon 
init_sub_crq_irqs(struct ibmvnic_adapter * adapter)4350ea22d51aSThomas Falcon static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
4351ea22d51aSThomas Falcon {
4352ea22d51aSThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4353ea22d51aSThomas Falcon 	struct ibmvnic_sub_crq_queue *scrq;
4354ea22d51aSThomas Falcon 	int i = 0, j = 0;
4355ea22d51aSThomas Falcon 	int rc = 0;
4356ea22d51aSThomas Falcon 
4357ea22d51aSThomas Falcon 	for (i = 0; i < adapter->req_tx_queues; i++) {
4358d1cf33d9SNathan Fontenot 		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
4359d1cf33d9SNathan Fontenot 			   i);
4360ea22d51aSThomas Falcon 		scrq = adapter->tx_scrq[i];
4361ea22d51aSThomas Falcon 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4362ea22d51aSThomas Falcon 
436399c1790eSMichael Ellerman 		if (!scrq->irq) {
4364ea22d51aSThomas Falcon 			rc = -EINVAL;
4365ea22d51aSThomas Falcon 			dev_err(dev, "Error mapping irq\n");
4366ea22d51aSThomas Falcon 			goto req_tx_irq_failed;
4367ea22d51aSThomas Falcon 		}
4368ea22d51aSThomas Falcon 
4369e56e2515SMurilo Fossa Vicentini 		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
4370e56e2515SMurilo Fossa Vicentini 			 adapter->vdev->unit_address, i);
4371ea22d51aSThomas Falcon 		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
4372e56e2515SMurilo Fossa Vicentini 				 0, scrq->name, scrq);
4373ea22d51aSThomas Falcon 
4374ea22d51aSThomas Falcon 		if (rc) {
4375ea22d51aSThomas Falcon 			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
4376ea22d51aSThomas Falcon 				scrq->irq, rc);
4377ea22d51aSThomas Falcon 			irq_dispose_mapping(scrq->irq);
4378af9090c2SNathan Fontenot 			goto req_tx_irq_failed;
4379ea22d51aSThomas Falcon 		}
4380ea22d51aSThomas Falcon 	}
4381ea22d51aSThomas Falcon 
4382ea22d51aSThomas Falcon 	for (i = 0; i < adapter->req_rx_queues; i++) {
4383d1cf33d9SNathan Fontenot 		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
4384d1cf33d9SNathan Fontenot 			   i);
4385ea22d51aSThomas Falcon 		scrq = adapter->rx_scrq[i];
4386ea22d51aSThomas Falcon 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
438799c1790eSMichael Ellerman 		if (!scrq->irq) {
4388ea22d51aSThomas Falcon 			rc = -EINVAL;
4389ea22d51aSThomas Falcon 			dev_err(dev, "Error mapping irq\n");
4390ea22d51aSThomas Falcon 			goto req_rx_irq_failed;
4391ea22d51aSThomas Falcon 		}
4392e56e2515SMurilo Fossa Vicentini 		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
4393e56e2515SMurilo Fossa Vicentini 			 adapter->vdev->unit_address, i);
4394ea22d51aSThomas Falcon 		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
4395e56e2515SMurilo Fossa Vicentini 				 0, scrq->name, scrq);
4396ea22d51aSThomas Falcon 		if (rc) {
4397ea22d51aSThomas Falcon 			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
4398ea22d51aSThomas Falcon 				scrq->irq, rc);
4399ea22d51aSThomas Falcon 			irq_dispose_mapping(scrq->irq);
4400ea22d51aSThomas Falcon 			goto req_rx_irq_failed;
4401ea22d51aSThomas Falcon 		}
4402ea22d51aSThomas Falcon 	}
440344fbc1b6SNick Child 
440444fbc1b6SNick Child 	cpus_read_lock();
440544fbc1b6SNick Child 	ibmvnic_set_affinity(adapter);
440644fbc1b6SNick Child 	cpus_read_unlock();
440744fbc1b6SNick Child 
4408ea22d51aSThomas Falcon 	return rc;
4409ea22d51aSThomas Falcon 
4410ea22d51aSThomas Falcon req_rx_irq_failed:
44118bf371e6SThomas Falcon 	for (j = 0; j < i; j++) {
4412ea22d51aSThomas Falcon 		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
4413ea22d51aSThomas Falcon 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
44148bf371e6SThomas Falcon 	}
4415ea22d51aSThomas Falcon 	i = adapter->req_tx_queues;
4416ea22d51aSThomas Falcon req_tx_irq_failed:
44178bf371e6SThomas Falcon 	for (j = 0; j < i; j++) {
4418ea22d51aSThomas Falcon 		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
441927a2145dSThomas Falcon 		irq_dispose_mapping(adapter->tx_scrq[j]->irq);
44208bf371e6SThomas Falcon 	}
4421d7c0ef36SNathan Fontenot 	release_sub_crqs(adapter, 1);
4422ea22d51aSThomas Falcon 	return rc;
4423ea22d51aSThomas Falcon }
4424ea22d51aSThomas Falcon 
init_sub_crqs(struct ibmvnic_adapter * adapter)4425d346b9bcSNathan Fontenot static int init_sub_crqs(struct ibmvnic_adapter *adapter)
4426032c5e82SThomas Falcon {
4427032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4428032c5e82SThomas Falcon 	struct ibmvnic_sub_crq_queue **allqueues;
4429032c5e82SThomas Falcon 	int registered_queues = 0;
4430032c5e82SThomas Falcon 	int total_queues;
4431032c5e82SThomas Falcon 	int more = 0;
4432ea22d51aSThomas Falcon 	int i;
4433032c5e82SThomas Falcon 
4434032c5e82SThomas Falcon 	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
4435032c5e82SThomas Falcon 
44361bb3c739SNathan Fontenot 	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
4437032c5e82SThomas Falcon 	if (!allqueues)
4438b6ee566cSDany Madden 		return -ENOMEM;
4439032c5e82SThomas Falcon 
4440032c5e82SThomas Falcon 	for (i = 0; i < total_queues; i++) {
4441032c5e82SThomas Falcon 		allqueues[i] = init_sub_crq_queue(adapter);
4442032c5e82SThomas Falcon 		if (!allqueues[i]) {
4443032c5e82SThomas Falcon 			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
4444032c5e82SThomas Falcon 			break;
4445032c5e82SThomas Falcon 		}
4446032c5e82SThomas Falcon 		registered_queues++;
4447032c5e82SThomas Falcon 	}
4448032c5e82SThomas Falcon 
4449032c5e82SThomas Falcon 	/* Make sure we were able to register the minimum number of queues */
4450032c5e82SThomas Falcon 	if (registered_queues <
4451032c5e82SThomas Falcon 	    adapter->min_tx_queues + adapter->min_rx_queues) {
4452032c5e82SThomas Falcon 		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
4453032c5e82SThomas Falcon 		goto tx_failed;
4454032c5e82SThomas Falcon 	}
4455032c5e82SThomas Falcon 
4456032c5e82SThomas Falcon 	/* Distribute the failed allocated queues*/
4457032c5e82SThomas Falcon 	for (i = 0; i < total_queues - registered_queues + more ; i++) {
4458032c5e82SThomas Falcon 		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
4459032c5e82SThomas Falcon 		switch (i % 3) {
4460032c5e82SThomas Falcon 		case 0:
4461032c5e82SThomas Falcon 			if (adapter->req_rx_queues > adapter->min_rx_queues)
4462032c5e82SThomas Falcon 				adapter->req_rx_queues--;
4463032c5e82SThomas Falcon 			else
4464032c5e82SThomas Falcon 				more++;
4465032c5e82SThomas Falcon 			break;
4466032c5e82SThomas Falcon 		case 1:
4467032c5e82SThomas Falcon 			if (adapter->req_tx_queues > adapter->min_tx_queues)
4468032c5e82SThomas Falcon 				adapter->req_tx_queues--;
4469032c5e82SThomas Falcon 			else
4470032c5e82SThomas Falcon 				more++;
4471032c5e82SThomas Falcon 			break;
4472032c5e82SThomas Falcon 		}
4473032c5e82SThomas Falcon 	}
4474032c5e82SThomas Falcon 
4475032c5e82SThomas Falcon 	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
44761bb3c739SNathan Fontenot 				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
4477032c5e82SThomas Falcon 	if (!adapter->tx_scrq)
4478032c5e82SThomas Falcon 		goto tx_failed;
4479032c5e82SThomas Falcon 
4480032c5e82SThomas Falcon 	for (i = 0; i < adapter->req_tx_queues; i++) {
4481032c5e82SThomas Falcon 		adapter->tx_scrq[i] = allqueues[i];
4482032c5e82SThomas Falcon 		adapter->tx_scrq[i]->pool_index = i;
448382e3be32SNathan Fontenot 		adapter->num_active_tx_scrqs++;
4484032c5e82SThomas Falcon 	}
4485032c5e82SThomas Falcon 
4486032c5e82SThomas Falcon 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
44871bb3c739SNathan Fontenot 				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
4488032c5e82SThomas Falcon 	if (!adapter->rx_scrq)
4489032c5e82SThomas Falcon 		goto rx_failed;
4490032c5e82SThomas Falcon 
4491032c5e82SThomas Falcon 	for (i = 0; i < adapter->req_rx_queues; i++) {
4492032c5e82SThomas Falcon 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
4493032c5e82SThomas Falcon 		adapter->rx_scrq[i]->scrq_num = i;
449482e3be32SNathan Fontenot 		adapter->num_active_rx_scrqs++;
4495032c5e82SThomas Falcon 	}
4496032c5e82SThomas Falcon 
4497d346b9bcSNathan Fontenot 	kfree(allqueues);
4498d346b9bcSNathan Fontenot 	return 0;
4499d346b9bcSNathan Fontenot 
4500d346b9bcSNathan Fontenot rx_failed:
4501d346b9bcSNathan Fontenot 	kfree(adapter->tx_scrq);
4502d346b9bcSNathan Fontenot 	adapter->tx_scrq = NULL;
4503d346b9bcSNathan Fontenot tx_failed:
4504d346b9bcSNathan Fontenot 	for (i = 0; i < registered_queues; i++)
4505d7c0ef36SNathan Fontenot 		release_sub_crq_queue(adapter, allqueues[i], 1);
4506d346b9bcSNathan Fontenot 	kfree(allqueues);
4507b6ee566cSDany Madden 	return -ENOMEM;
4508d346b9bcSNathan Fontenot }
4509d346b9bcSNathan Fontenot 
send_request_cap(struct ibmvnic_adapter * adapter,int retry)451009081b9dSLijun Pan static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
4511d346b9bcSNathan Fontenot {
4512d346b9bcSNathan Fontenot 	struct device *dev = &adapter->vdev->dev;
4513d346b9bcSNathan Fontenot 	union ibmvnic_crq crq;
4514c26eba03SJohn Allen 	int max_entries;
4515151b6a5cSSukadev Bhattiprolu 	int cap_reqs;
4516151b6a5cSSukadev Bhattiprolu 
4517151b6a5cSSukadev Bhattiprolu 	/* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
4518151b6a5cSSukadev Bhattiprolu 	 * the PROMISC flag). Initialize this count upfront. When the tasklet
4519151b6a5cSSukadev Bhattiprolu 	 * receives a response to all of these, it will send the next protocol
4520151b6a5cSSukadev Bhattiprolu 	 * message (QUERY_IP_OFFLOAD).
4521151b6a5cSSukadev Bhattiprolu 	 */
4522151b6a5cSSukadev Bhattiprolu 	if (!(adapter->netdev->flags & IFF_PROMISC) ||
4523151b6a5cSSukadev Bhattiprolu 	    adapter->promisc_supported)
4524151b6a5cSSukadev Bhattiprolu 		cap_reqs = 7;
4525151b6a5cSSukadev Bhattiprolu 	else
4526151b6a5cSSukadev Bhattiprolu 		cap_reqs = 6;
4527d346b9bcSNathan Fontenot 
4528d346b9bcSNathan Fontenot 	if (!retry) {
4529d346b9bcSNathan Fontenot 		/* Sub-CRQ entries are 32 byte long */
4530d346b9bcSNathan Fontenot 		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
4531d346b9bcSNathan Fontenot 
4532151b6a5cSSukadev Bhattiprolu 		atomic_set(&adapter->running_cap_crqs, cap_reqs);
4533151b6a5cSSukadev Bhattiprolu 
4534d346b9bcSNathan Fontenot 		if (adapter->min_tx_entries_per_subcrq > entries_page ||
4535d346b9bcSNathan Fontenot 		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
4536d346b9bcSNathan Fontenot 			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
4537d346b9bcSNathan Fontenot 			return;
4538d346b9bcSNathan Fontenot 		}
4539d346b9bcSNathan Fontenot 
4540c26eba03SJohn Allen 		if (adapter->desired.mtu)
4541c26eba03SJohn Allen 			adapter->req_mtu = adapter->desired.mtu;
4542c26eba03SJohn Allen 		else
4543d346b9bcSNathan Fontenot 			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
4544c26eba03SJohn Allen 
4545c26eba03SJohn Allen 		if (!adapter->desired.tx_entries)
4546c26eba03SJohn Allen 			adapter->desired.tx_entries =
4547c26eba03SJohn Allen 					adapter->max_tx_entries_per_subcrq;
4548c26eba03SJohn Allen 		if (!adapter->desired.rx_entries)
4549c26eba03SJohn Allen 			adapter->desired.rx_entries =
4550c26eba03SJohn Allen 					adapter->max_rx_add_entries_per_subcrq;
4551c26eba03SJohn Allen 
4552a75de820SSukadev Bhattiprolu 		max_entries = IBMVNIC_LTB_SET_SIZE /
4553c26eba03SJohn Allen 			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
4554c26eba03SJohn Allen 
4555c26eba03SJohn Allen 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4556a75de820SSukadev Bhattiprolu 			adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
4557c26eba03SJohn Allen 			adapter->desired.tx_entries = max_entries;
4558c26eba03SJohn Allen 		}
4559c26eba03SJohn Allen 
4560c26eba03SJohn Allen 		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4561a75de820SSukadev Bhattiprolu 			adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
4562c26eba03SJohn Allen 			adapter->desired.rx_entries = max_entries;
4563c26eba03SJohn Allen 		}
4564c26eba03SJohn Allen 
4565c26eba03SJohn Allen 		if (adapter->desired.tx_entries)
4566c26eba03SJohn Allen 			adapter->req_tx_entries_per_subcrq =
4567c26eba03SJohn Allen 					adapter->desired.tx_entries;
4568c26eba03SJohn Allen 		else
4569c26eba03SJohn Allen 			adapter->req_tx_entries_per_subcrq =
4570c26eba03SJohn Allen 					adapter->max_tx_entries_per_subcrq;
4571c26eba03SJohn Allen 
4572c26eba03SJohn Allen 		if (adapter->desired.rx_entries)
4573c26eba03SJohn Allen 			adapter->req_rx_add_entries_per_subcrq =
4574c26eba03SJohn Allen 					adapter->desired.rx_entries;
4575c26eba03SJohn Allen 		else
4576c26eba03SJohn Allen 			adapter->req_rx_add_entries_per_subcrq =
4577c26eba03SJohn Allen 					adapter->max_rx_add_entries_per_subcrq;
4578c26eba03SJohn Allen 
4579c26eba03SJohn Allen 		if (adapter->desired.tx_queues)
4580c26eba03SJohn Allen 			adapter->req_tx_queues =
4581c26eba03SJohn Allen 					adapter->desired.tx_queues;
4582c26eba03SJohn Allen 		else
4583c26eba03SJohn Allen 			adapter->req_tx_queues =
4584c26eba03SJohn Allen 					adapter->opt_tx_comp_sub_queues;
4585c26eba03SJohn Allen 
4586c26eba03SJohn Allen 		if (adapter->desired.rx_queues)
4587c26eba03SJohn Allen 			adapter->req_rx_queues =
4588c26eba03SJohn Allen 					adapter->desired.rx_queues;
4589c26eba03SJohn Allen 		else
4590c26eba03SJohn Allen 			adapter->req_rx_queues =
4591c26eba03SJohn Allen 					adapter->opt_rx_comp_queues;
4592c26eba03SJohn Allen 
4593c26eba03SJohn Allen 		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4594151b6a5cSSukadev Bhattiprolu 	} else {
4595151b6a5cSSukadev Bhattiprolu 		atomic_add(cap_reqs, &adapter->running_cap_crqs);
4596d346b9bcSNathan Fontenot 	}
4597032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
4598032c5e82SThomas Falcon 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
4599032c5e82SThomas Falcon 	crq.request_capability.cmd = REQUEST_CAPABILITY;
4600032c5e82SThomas Falcon 
4601032c5e82SThomas Falcon 	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
4602de89e854SThomas Falcon 	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4603151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
4604032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
4605032c5e82SThomas Falcon 
4606032c5e82SThomas Falcon 	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
4607de89e854SThomas Falcon 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4608151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
4609032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
4610032c5e82SThomas Falcon 
4611032c5e82SThomas Falcon 	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
4612de89e854SThomas Falcon 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4613151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
4614032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
4615032c5e82SThomas Falcon 
4616032c5e82SThomas Falcon 	crq.request_capability.capability =
4617032c5e82SThomas Falcon 	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
4618032c5e82SThomas Falcon 	crq.request_capability.number =
4619de89e854SThomas Falcon 	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4620151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
4621032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
4622032c5e82SThomas Falcon 
4623032c5e82SThomas Falcon 	crq.request_capability.capability =
4624032c5e82SThomas Falcon 	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
4625032c5e82SThomas Falcon 	crq.request_capability.number =
4626de89e854SThomas Falcon 	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4627151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
4628032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
4629032c5e82SThomas Falcon 
4630032c5e82SThomas Falcon 	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
4631de89e854SThomas Falcon 	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4632151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
4633032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
4634032c5e82SThomas Falcon 
4635032c5e82SThomas Falcon 	if (adapter->netdev->flags & IFF_PROMISC) {
4636032c5e82SThomas Falcon 		if (adapter->promisc_supported) {
4637032c5e82SThomas Falcon 			crq.request_capability.capability =
4638032c5e82SThomas Falcon 			    cpu_to_be16(PROMISC_REQUESTED);
4639de89e854SThomas Falcon 			crq.request_capability.number = cpu_to_be64(1);
4640151b6a5cSSukadev Bhattiprolu 			cap_reqs--;
4641032c5e82SThomas Falcon 			ibmvnic_send_crq(adapter, &crq);
4642032c5e82SThomas Falcon 		}
4643032c5e82SThomas Falcon 	} else {
4644032c5e82SThomas Falcon 		crq.request_capability.capability =
4645032c5e82SThomas Falcon 		    cpu_to_be16(PROMISC_REQUESTED);
4646de89e854SThomas Falcon 		crq.request_capability.number = cpu_to_be64(0);
4647151b6a5cSSukadev Bhattiprolu 		cap_reqs--;
4648032c5e82SThomas Falcon 		ibmvnic_send_crq(adapter, &crq);
4649032c5e82SThomas Falcon 	}
4650151b6a5cSSukadev Bhattiprolu 
4651151b6a5cSSukadev Bhattiprolu 	/* Keep at end to catch any discrepancy between expected and actual
4652151b6a5cSSukadev Bhattiprolu 	 * CRQs sent.
4653151b6a5cSSukadev Bhattiprolu 	 */
4654151b6a5cSSukadev Bhattiprolu 	WARN_ON(cap_reqs != 0);
4655032c5e82SThomas Falcon }
4656032c5e82SThomas Falcon 
pending_scrq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4657032c5e82SThomas Falcon static int pending_scrq(struct ibmvnic_adapter *adapter,
4658032c5e82SThomas Falcon 			struct ibmvnic_sub_crq_queue *scrq)
4659032c5e82SThomas Falcon {
4660032c5e82SThomas Falcon 	union sub_crq *entry = &scrq->msgs[scrq->cur];
4661665ab1ebSLijun Pan 	int rc;
4662032c5e82SThomas Falcon 
4663665ab1ebSLijun Pan 	rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4664665ab1ebSLijun Pan 
4665665ab1ebSLijun Pan 	/* Ensure that the SCRQ valid flag is loaded prior to loading the
4666665ab1ebSLijun Pan 	 * contents of the SCRQ descriptor
4667665ab1ebSLijun Pan 	 */
4668665ab1ebSLijun Pan 	dma_rmb();
4669665ab1ebSLijun Pan 
4670665ab1ebSLijun Pan 	return rc;
4671032c5e82SThomas Falcon }
4672032c5e82SThomas Falcon 
ibmvnic_next_scrq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4673032c5e82SThomas Falcon static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4674032c5e82SThomas Falcon 					struct ibmvnic_sub_crq_queue *scrq)
4675032c5e82SThomas Falcon {
4676032c5e82SThomas Falcon 	union sub_crq *entry;
4677032c5e82SThomas Falcon 	unsigned long flags;
4678032c5e82SThomas Falcon 
4679032c5e82SThomas Falcon 	spin_lock_irqsave(&scrq->lock, flags);
4680032c5e82SThomas Falcon 	entry = &scrq->msgs[scrq->cur];
4681032c5e82SThomas Falcon 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4682032c5e82SThomas Falcon 		if (++scrq->cur == scrq->size)
4683032c5e82SThomas Falcon 			scrq->cur = 0;
4684032c5e82SThomas Falcon 	} else {
4685032c5e82SThomas Falcon 		entry = NULL;
4686032c5e82SThomas Falcon 	}
4687032c5e82SThomas Falcon 	spin_unlock_irqrestore(&scrq->lock, flags);
4688032c5e82SThomas Falcon 
4689665ab1ebSLijun Pan 	/* Ensure that the SCRQ valid flag is loaded prior to loading the
4690665ab1ebSLijun Pan 	 * contents of the SCRQ descriptor
4691b71ec952SThomas Falcon 	 */
4692b71ec952SThomas Falcon 	dma_rmb();
4693b71ec952SThomas Falcon 
4694032c5e82SThomas Falcon 	return entry;
4695032c5e82SThomas Falcon }
4696032c5e82SThomas Falcon 
ibmvnic_next_crq(struct ibmvnic_adapter * adapter)4697032c5e82SThomas Falcon static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4698032c5e82SThomas Falcon {
4699032c5e82SThomas Falcon 	struct ibmvnic_crq_queue *queue = &adapter->crq;
4700032c5e82SThomas Falcon 	union ibmvnic_crq *crq;
4701032c5e82SThomas Falcon 
4702032c5e82SThomas Falcon 	crq = &queue->msgs[queue->cur];
4703032c5e82SThomas Falcon 	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4704032c5e82SThomas Falcon 		if (++queue->cur == queue->size)
4705032c5e82SThomas Falcon 			queue->cur = 0;
4706032c5e82SThomas Falcon 	} else {
4707032c5e82SThomas Falcon 		crq = NULL;
4708032c5e82SThomas Falcon 	}
4709032c5e82SThomas Falcon 
4710032c5e82SThomas Falcon 	return crq;
4711032c5e82SThomas Falcon }
4712032c5e82SThomas Falcon 
print_subcrq_error(struct device * dev,int rc,const char * func)47132d14d379SThomas Falcon static void print_subcrq_error(struct device *dev, int rc, const char *func)
47142d14d379SThomas Falcon {
47152d14d379SThomas Falcon 	switch (rc) {
47162d14d379SThomas Falcon 	case H_PARAMETER:
47172d14d379SThomas Falcon 		dev_warn_ratelimited(dev,
47182d14d379SThomas Falcon 				     "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
47192d14d379SThomas Falcon 				     func, rc);
47202d14d379SThomas Falcon 		break;
47212d14d379SThomas Falcon 	case H_CLOSED:
47222d14d379SThomas Falcon 		dev_warn_ratelimited(dev,
47232d14d379SThomas Falcon 				     "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
47242d14d379SThomas Falcon 				     func, rc);
47252d14d379SThomas Falcon 		break;
47262d14d379SThomas Falcon 	default:
47272d14d379SThomas Falcon 		dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
47282d14d379SThomas Falcon 		break;
47292d14d379SThomas Falcon 	}
47302d14d379SThomas Falcon }
47312d14d379SThomas Falcon 
send_subcrq_indirect(struct ibmvnic_adapter * adapter,u64 remote_handle,u64 ioba,u64 num_entries)4732ad7775dcSThomas Falcon static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4733ad7775dcSThomas Falcon 				u64 remote_handle, u64 ioba, u64 num_entries)
4734ad7775dcSThomas Falcon {
4735ad7775dcSThomas Falcon 	unsigned int ua = adapter->vdev->unit_address;
4736ad7775dcSThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4737ad7775dcSThomas Falcon 	int rc;
4738ad7775dcSThomas Falcon 
4739ad7775dcSThomas Falcon 	/* Make sure the hypervisor sees the complete request */
47401a42156fSLijun Pan 	dma_wmb();
4741ad7775dcSThomas Falcon 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4742ad7775dcSThomas Falcon 				cpu_to_be64(remote_handle),
4743ad7775dcSThomas Falcon 				ioba, num_entries);
4744ad7775dcSThomas Falcon 
47452d14d379SThomas Falcon 	if (rc)
47462d14d379SThomas Falcon 		print_subcrq_error(dev, rc, __func__);
4747ad7775dcSThomas Falcon 
4748ad7775dcSThomas Falcon 	return rc;
4749ad7775dcSThomas Falcon }
4750ad7775dcSThomas Falcon 
ibmvnic_send_crq(struct ibmvnic_adapter * adapter,union ibmvnic_crq * crq)4751032c5e82SThomas Falcon static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4752032c5e82SThomas Falcon 			    union ibmvnic_crq *crq)
4753032c5e82SThomas Falcon {
4754032c5e82SThomas Falcon 	unsigned int ua = adapter->vdev->unit_address;
4755032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4756032c5e82SThomas Falcon 	u64 *u64_crq = (u64 *)crq;
4757032c5e82SThomas Falcon 	int rc;
4758032c5e82SThomas Falcon 
4759032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4760429aa364SLijun Pan 		   (unsigned long)cpu_to_be64(u64_crq[0]),
4761429aa364SLijun Pan 		   (unsigned long)cpu_to_be64(u64_crq[1]));
4762032c5e82SThomas Falcon 
47635153698eSThomas Falcon 	if (!adapter->crq.active &&
47645153698eSThomas Falcon 	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
47655153698eSThomas Falcon 		dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
47665153698eSThomas Falcon 		return -EINVAL;
47675153698eSThomas Falcon 	}
47685153698eSThomas Falcon 
4769032c5e82SThomas Falcon 	/* Make sure the hypervisor sees the complete request */
47701a42156fSLijun Pan 	dma_wmb();
4771032c5e82SThomas Falcon 
4772032c5e82SThomas Falcon 	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4773032c5e82SThomas Falcon 				cpu_to_be64(u64_crq[0]),
4774032c5e82SThomas Falcon 				cpu_to_be64(u64_crq[1]));
4775032c5e82SThomas Falcon 
4776032c5e82SThomas Falcon 	if (rc) {
4777ec95dffaSNathan Fontenot 		if (rc == H_CLOSED) {
4778032c5e82SThomas Falcon 			dev_warn(dev, "CRQ Queue closed\n");
4779fa68bfabSLijun Pan 			/* do not reset, report the fail, wait for passive init from server */
4780ec95dffaSNathan Fontenot 		}
4781ec95dffaSNathan Fontenot 
4782032c5e82SThomas Falcon 		dev_warn(dev, "Send error (rc=%d)\n", rc);
4783032c5e82SThomas Falcon 	}
4784032c5e82SThomas Falcon 
4785032c5e82SThomas Falcon 	return rc;
4786032c5e82SThomas Falcon }
4787032c5e82SThomas Falcon 
ibmvnic_send_crq_init(struct ibmvnic_adapter * adapter)4788032c5e82SThomas Falcon static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4789032c5e82SThomas Falcon {
479036a782fdSThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4791032c5e82SThomas Falcon 	union ibmvnic_crq crq;
479236a782fdSThomas Falcon 	int retries = 100;
479336a782fdSThomas Falcon 	int rc;
4794032c5e82SThomas Falcon 
4795032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
4796032c5e82SThomas Falcon 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4797032c5e82SThomas Falcon 	crq.generic.cmd = IBMVNIC_CRQ_INIT;
4798032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4799032c5e82SThomas Falcon 
480036a782fdSThomas Falcon 	do {
480136a782fdSThomas Falcon 		rc = ibmvnic_send_crq(adapter, &crq);
480236a782fdSThomas Falcon 		if (rc != H_CLOSED)
480336a782fdSThomas Falcon 			break;
480436a782fdSThomas Falcon 		retries--;
480536a782fdSThomas Falcon 		msleep(50);
480636a782fdSThomas Falcon 
480736a782fdSThomas Falcon 	} while (retries > 0);
480836a782fdSThomas Falcon 
480936a782fdSThomas Falcon 	if (rc) {
481036a782fdSThomas Falcon 		dev_err(dev, "Failed to send init request, rc = %d\n", rc);
481136a782fdSThomas Falcon 		return rc;
481236a782fdSThomas Falcon 	}
481336a782fdSThomas Falcon 
481436a782fdSThomas Falcon 	return 0;
4815032c5e82SThomas Falcon }
4816032c5e82SThomas Falcon 
481737798d02SNathan Fontenot struct vnic_login_client_data {
481837798d02SNathan Fontenot 	u8	type;
481937798d02SNathan Fontenot 	__be16	len;
482008ea556eSKees Cook 	char	name[];
482137798d02SNathan Fontenot } __packed;
482237798d02SNathan Fontenot 
vnic_client_data_len(struct ibmvnic_adapter * adapter)482337798d02SNathan Fontenot static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
482437798d02SNathan Fontenot {
482537798d02SNathan Fontenot 	int len;
482637798d02SNathan Fontenot 
482737798d02SNathan Fontenot 	/* Calculate the amount of buffer space needed for the
482837798d02SNathan Fontenot 	 * vnic client data in the login buffer. There are four entries,
482937798d02SNathan Fontenot 	 * OS name, LPAR name, device name, and a null last entry.
483037798d02SNathan Fontenot 	 */
483137798d02SNathan Fontenot 	len = 4 * sizeof(struct vnic_login_client_data);
483237798d02SNathan Fontenot 	len += 6; /* "Linux" plus NULL */
483337798d02SNathan Fontenot 	len += strlen(utsname()->nodename) + 1;
483437798d02SNathan Fontenot 	len += strlen(adapter->netdev->name) + 1;
483537798d02SNathan Fontenot 
483637798d02SNathan Fontenot 	return len;
483737798d02SNathan Fontenot }
483837798d02SNathan Fontenot 
vnic_add_client_data(struct ibmvnic_adapter * adapter,struct vnic_login_client_data * vlcd)483937798d02SNathan Fontenot static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
484037798d02SNathan Fontenot 				 struct vnic_login_client_data *vlcd)
484137798d02SNathan Fontenot {
484237798d02SNathan Fontenot 	const char *os_name = "Linux";
484337798d02SNathan Fontenot 	int len;
484437798d02SNathan Fontenot 
484537798d02SNathan Fontenot 	/* Type 1 - LPAR OS */
484637798d02SNathan Fontenot 	vlcd->type = 1;
484737798d02SNathan Fontenot 	len = strlen(os_name) + 1;
484837798d02SNathan Fontenot 	vlcd->len = cpu_to_be16(len);
4849ef2c3ddaSKees Cook 	strscpy(vlcd->name, os_name, len);
485008ea556eSKees Cook 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
485137798d02SNathan Fontenot 
485237798d02SNathan Fontenot 	/* Type 2 - LPAR name */
485337798d02SNathan Fontenot 	vlcd->type = 2;
485437798d02SNathan Fontenot 	len = strlen(utsname()->nodename) + 1;
485537798d02SNathan Fontenot 	vlcd->len = cpu_to_be16(len);
4856ef2c3ddaSKees Cook 	strscpy(vlcd->name, utsname()->nodename, len);
485708ea556eSKees Cook 	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
485837798d02SNathan Fontenot 
485937798d02SNathan Fontenot 	/* Type 3 - device name */
486037798d02SNathan Fontenot 	vlcd->type = 3;
486137798d02SNathan Fontenot 	len = strlen(adapter->netdev->name) + 1;
486237798d02SNathan Fontenot 	vlcd->len = cpu_to_be16(len);
4863ef2c3ddaSKees Cook 	strscpy(vlcd->name, adapter->netdev->name, len);
486437798d02SNathan Fontenot }
486537798d02SNathan Fontenot 
send_login(struct ibmvnic_adapter * adapter)486620a8ab74SThomas Falcon static int send_login(struct ibmvnic_adapter *adapter)
4867032c5e82SThomas Falcon {
4868032c5e82SThomas Falcon 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4869032c5e82SThomas Falcon 	struct ibmvnic_login_buffer *login_buffer;
4870032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
4871c98d9cc4SDany Madden 	struct vnic_login_client_data *vlcd;
4872032c5e82SThomas Falcon 	dma_addr_t rsp_buffer_token;
4873032c5e82SThomas Falcon 	dma_addr_t buffer_token;
4874032c5e82SThomas Falcon 	size_t rsp_buffer_size;
4875032c5e82SThomas Falcon 	union ibmvnic_crq crq;
4876c98d9cc4SDany Madden 	int client_data_len;
4877032c5e82SThomas Falcon 	size_t buffer_size;
4878032c5e82SThomas Falcon 	__be64 *tx_list_p;
4879032c5e82SThomas Falcon 	__be64 *rx_list_p;
4880c98d9cc4SDany Madden 	int rc;
4881032c5e82SThomas Falcon 	int i;
4882032c5e82SThomas Falcon 
488320a8ab74SThomas Falcon 	if (!adapter->tx_scrq || !adapter->rx_scrq) {
488420a8ab74SThomas Falcon 		netdev_err(adapter->netdev,
488520a8ab74SThomas Falcon 			   "RX or TX queues are not allocated, device login failed\n");
4886b6ee566cSDany Madden 		return -ENOMEM;
488720a8ab74SThomas Falcon 	}
488820a8ab74SThomas Falcon 
4889a0c8be56SLijun Pan 	release_login_buffer(adapter);
489034f0f4e3SThomas Falcon 	release_login_rsp_buffer(adapter);
4891a0c8be56SLijun Pan 
489237798d02SNathan Fontenot 	client_data_len = vnic_client_data_len(adapter);
489337798d02SNathan Fontenot 
4894032c5e82SThomas Falcon 	buffer_size =
4895032c5e82SThomas Falcon 	    sizeof(struct ibmvnic_login_buffer) +
489637798d02SNathan Fontenot 	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
489737798d02SNathan Fontenot 	    client_data_len;
4898032c5e82SThomas Falcon 
489937798d02SNathan Fontenot 	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4900032c5e82SThomas Falcon 	if (!login_buffer)
4901032c5e82SThomas Falcon 		goto buf_alloc_failed;
4902032c5e82SThomas Falcon 
4903032c5e82SThomas Falcon 	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4904032c5e82SThomas Falcon 				      DMA_TO_DEVICE);
4905032c5e82SThomas Falcon 	if (dma_mapping_error(dev, buffer_token)) {
4906032c5e82SThomas Falcon 		dev_err(dev, "Couldn't map login buffer\n");
4907032c5e82SThomas Falcon 		goto buf_map_failed;
4908032c5e82SThomas Falcon 	}
4909032c5e82SThomas Falcon 
4910498cd8e4SJohn Allen 	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4911498cd8e4SJohn Allen 			  sizeof(u64) * adapter->req_tx_queues +
4912498cd8e4SJohn Allen 			  sizeof(u64) * adapter->req_rx_queues +
4913498cd8e4SJohn Allen 			  sizeof(u64) * adapter->req_rx_queues +
4914498cd8e4SJohn Allen 			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4915032c5e82SThomas Falcon 
4916032c5e82SThomas Falcon 	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4917032c5e82SThomas Falcon 	if (!login_rsp_buffer)
4918032c5e82SThomas Falcon 		goto buf_rsp_alloc_failed;
4919032c5e82SThomas Falcon 
4920032c5e82SThomas Falcon 	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4921032c5e82SThomas Falcon 					  rsp_buffer_size, DMA_FROM_DEVICE);
4922032c5e82SThomas Falcon 	if (dma_mapping_error(dev, rsp_buffer_token)) {
4923032c5e82SThomas Falcon 		dev_err(dev, "Couldn't map login rsp buffer\n");
4924032c5e82SThomas Falcon 		goto buf_rsp_map_failed;
4925032c5e82SThomas Falcon 	}
4926661a2622SNathan Fontenot 
4927032c5e82SThomas Falcon 	adapter->login_buf = login_buffer;
4928032c5e82SThomas Falcon 	adapter->login_buf_token = buffer_token;
4929032c5e82SThomas Falcon 	adapter->login_buf_sz = buffer_size;
4930032c5e82SThomas Falcon 	adapter->login_rsp_buf = login_rsp_buffer;
4931032c5e82SThomas Falcon 	adapter->login_rsp_buf_token = rsp_buffer_token;
4932032c5e82SThomas Falcon 	adapter->login_rsp_buf_sz = rsp_buffer_size;
4933032c5e82SThomas Falcon 
4934032c5e82SThomas Falcon 	login_buffer->len = cpu_to_be32(buffer_size);
4935032c5e82SThomas Falcon 	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4936032c5e82SThomas Falcon 	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4937032c5e82SThomas Falcon 	login_buffer->off_txcomp_subcrqs =
4938032c5e82SThomas Falcon 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4939032c5e82SThomas Falcon 	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4940032c5e82SThomas Falcon 	login_buffer->off_rxcomp_subcrqs =
4941032c5e82SThomas Falcon 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4942032c5e82SThomas Falcon 			sizeof(u64) * adapter->req_tx_queues);
4943032c5e82SThomas Falcon 	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4944032c5e82SThomas Falcon 	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4945032c5e82SThomas Falcon 
4946032c5e82SThomas Falcon 	tx_list_p = (__be64 *)((char *)login_buffer +
4947032c5e82SThomas Falcon 				      sizeof(struct ibmvnic_login_buffer));
4948032c5e82SThomas Falcon 	rx_list_p = (__be64 *)((char *)login_buffer +
4949032c5e82SThomas Falcon 				      sizeof(struct ibmvnic_login_buffer) +
4950032c5e82SThomas Falcon 				      sizeof(u64) * adapter->req_tx_queues);
4951032c5e82SThomas Falcon 
4952032c5e82SThomas Falcon 	for (i = 0; i < adapter->req_tx_queues; i++) {
4953032c5e82SThomas Falcon 		if (adapter->tx_scrq[i]) {
4954914789acSLijun Pan 			tx_list_p[i] =
4955914789acSLijun Pan 				cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4956032c5e82SThomas Falcon 		}
4957032c5e82SThomas Falcon 	}
4958032c5e82SThomas Falcon 
4959032c5e82SThomas Falcon 	for (i = 0; i < adapter->req_rx_queues; i++) {
4960032c5e82SThomas Falcon 		if (adapter->rx_scrq[i]) {
4961914789acSLijun Pan 			rx_list_p[i] =
4962914789acSLijun Pan 				cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4963032c5e82SThomas Falcon 		}
4964032c5e82SThomas Falcon 	}
4965032c5e82SThomas Falcon 
496637798d02SNathan Fontenot 	/* Insert vNIC login client data */
496737798d02SNathan Fontenot 	vlcd = (struct vnic_login_client_data *)
496837798d02SNathan Fontenot 		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
496937798d02SNathan Fontenot 	login_buffer->client_data_offset =
497037798d02SNathan Fontenot 			cpu_to_be32((char *)vlcd - (char *)login_buffer);
497137798d02SNathan Fontenot 	login_buffer->client_data_len = cpu_to_be32(client_data_len);
497237798d02SNathan Fontenot 
497337798d02SNathan Fontenot 	vnic_add_client_data(adapter, vlcd);
497437798d02SNathan Fontenot 
4975032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "Login Buffer:\n");
4976032c5e82SThomas Falcon 	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4977032c5e82SThomas Falcon 		netdev_dbg(adapter->netdev, "%016lx\n",
4978429aa364SLijun Pan 			   ((unsigned long *)(adapter->login_buf))[i]);
4979032c5e82SThomas Falcon 	}
4980032c5e82SThomas Falcon 
4981032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
4982032c5e82SThomas Falcon 	crq.login.first = IBMVNIC_CRQ_CMD;
4983032c5e82SThomas Falcon 	crq.login.cmd = LOGIN;
4984032c5e82SThomas Falcon 	crq.login.ioba = cpu_to_be32(buffer_token);
4985032c5e82SThomas Falcon 	crq.login.len = cpu_to_be32(buffer_size);
498676cdc5c5SSukadev Bhattiprolu 
498776cdc5c5SSukadev Bhattiprolu 	adapter->login_pending = true;
4988c98d9cc4SDany Madden 	rc = ibmvnic_send_crq(adapter, &crq);
4989c98d9cc4SDany Madden 	if (rc) {
4990c98d9cc4SDany Madden 		adapter->login_pending = false;
4991c98d9cc4SDany Madden 		netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4992411c565bSNick Child 		goto buf_send_failed;
4993c98d9cc4SDany Madden 	}
4994032c5e82SThomas Falcon 
499520a8ab74SThomas Falcon 	return 0;
4996032c5e82SThomas Falcon 
4997411c565bSNick Child buf_send_failed:
4998411c565bSNick Child 	dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
4999411c565bSNick Child 			 DMA_FROM_DEVICE);
5000032c5e82SThomas Falcon buf_rsp_map_failed:
5001032c5e82SThomas Falcon 	kfree(login_rsp_buffer);
5002c98d9cc4SDany Madden 	adapter->login_rsp_buf = NULL;
5003032c5e82SThomas Falcon buf_rsp_alloc_failed:
5004032c5e82SThomas Falcon 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
5005032c5e82SThomas Falcon buf_map_failed:
5006032c5e82SThomas Falcon 	kfree(login_buffer);
5007c98d9cc4SDany Madden 	adapter->login_buf = NULL;
5008032c5e82SThomas Falcon buf_alloc_failed:
5009b6ee566cSDany Madden 	return -ENOMEM;
5010032c5e82SThomas Falcon }
5011032c5e82SThomas Falcon 
send_request_map(struct ibmvnic_adapter * adapter,dma_addr_t addr,u32 len,u8 map_id)50129c4eaabdSThomas Falcon static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
5013032c5e82SThomas Falcon 			    u32 len, u8 map_id)
5014032c5e82SThomas Falcon {
5015032c5e82SThomas Falcon 	union ibmvnic_crq crq;
5016032c5e82SThomas Falcon 
5017032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
5018032c5e82SThomas Falcon 	crq.request_map.first = IBMVNIC_CRQ_CMD;
5019032c5e82SThomas Falcon 	crq.request_map.cmd = REQUEST_MAP;
5020032c5e82SThomas Falcon 	crq.request_map.map_id = map_id;
5021032c5e82SThomas Falcon 	crq.request_map.ioba = cpu_to_be32(addr);
5022032c5e82SThomas Falcon 	crq.request_map.len = cpu_to_be32(len);
50239c4eaabdSThomas Falcon 	return ibmvnic_send_crq(adapter, &crq);
5024032c5e82SThomas Falcon }
5025032c5e82SThomas Falcon 
send_request_unmap(struct ibmvnic_adapter * adapter,u8 map_id)50269c4eaabdSThomas Falcon static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
5027032c5e82SThomas Falcon {
5028032c5e82SThomas Falcon 	union ibmvnic_crq crq;
5029032c5e82SThomas Falcon 
5030032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
5031032c5e82SThomas Falcon 	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
5032032c5e82SThomas Falcon 	crq.request_unmap.cmd = REQUEST_UNMAP;
5033032c5e82SThomas Falcon 	crq.request_unmap.map_id = map_id;
50349c4eaabdSThomas Falcon 	return ibmvnic_send_crq(adapter, &crq);
5035032c5e82SThomas Falcon }
5036032c5e82SThomas Falcon 
send_query_map(struct ibmvnic_adapter * adapter)503769980d02SLijun Pan static void send_query_map(struct ibmvnic_adapter *adapter)
5038032c5e82SThomas Falcon {
5039032c5e82SThomas Falcon 	union ibmvnic_crq crq;
5040032c5e82SThomas Falcon 
5041032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
5042032c5e82SThomas Falcon 	crq.query_map.first = IBMVNIC_CRQ_CMD;
5043032c5e82SThomas Falcon 	crq.query_map.cmd = QUERY_MAP;
5044032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5045032c5e82SThomas Falcon }
5046032c5e82SThomas Falcon 
5047032c5e82SThomas Falcon /* Send a series of CRQs requesting various capabilities of the VNIC server */
send_query_cap(struct ibmvnic_adapter * adapter)5048491099adSLijun Pan static void send_query_cap(struct ibmvnic_adapter *adapter)
5049032c5e82SThomas Falcon {
5050032c5e82SThomas Falcon 	union ibmvnic_crq crq;
5051151b6a5cSSukadev Bhattiprolu 	int cap_reqs;
5052032c5e82SThomas Falcon 
5053151b6a5cSSukadev Bhattiprolu 	/* We send out 25 QUERY_CAPABILITY CRQs below.  Initialize this count
5054151b6a5cSSukadev Bhattiprolu 	 * upfront. When the tasklet receives a response to all of these, it
5055151b6a5cSSukadev Bhattiprolu 	 * can send out the next protocol messaage (REQUEST_CAPABILITY).
5056151b6a5cSSukadev Bhattiprolu 	 */
5057151b6a5cSSukadev Bhattiprolu 	cap_reqs = 25;
5058151b6a5cSSukadev Bhattiprolu 
5059151b6a5cSSukadev Bhattiprolu 	atomic_set(&adapter->running_cap_crqs, cap_reqs);
5060151b6a5cSSukadev Bhattiprolu 
5061032c5e82SThomas Falcon 	memset(&crq, 0, sizeof(crq));
5062032c5e82SThomas Falcon 	crq.query_capability.first = IBMVNIC_CRQ_CMD;
5063032c5e82SThomas Falcon 	crq.query_capability.cmd = QUERY_CAPABILITY;
5064032c5e82SThomas Falcon 
5065032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
5066032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5067151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5068032c5e82SThomas Falcon 
5069032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
5070032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5071151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5072032c5e82SThomas Falcon 
5073032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
5074032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5075151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5076032c5e82SThomas Falcon 
5077032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
5078032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5079151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5080032c5e82SThomas Falcon 
5081032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
5082032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5083151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5084032c5e82SThomas Falcon 
5085032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
5086032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5087151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5088032c5e82SThomas Falcon 
5089032c5e82SThomas Falcon 	crq.query_capability.capability =
5090032c5e82SThomas Falcon 	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
5091032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5092151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5093032c5e82SThomas Falcon 
5094032c5e82SThomas Falcon 	crq.query_capability.capability =
5095032c5e82SThomas Falcon 	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
5096032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5097151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5098032c5e82SThomas Falcon 
5099032c5e82SThomas Falcon 	crq.query_capability.capability =
5100032c5e82SThomas Falcon 	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
5101032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5102151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5103032c5e82SThomas Falcon 
5104032c5e82SThomas Falcon 	crq.query_capability.capability =
5105032c5e82SThomas Falcon 	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
5106032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5107151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5108032c5e82SThomas Falcon 
5109032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
5110032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5111151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5112032c5e82SThomas Falcon 
5113032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
5114032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5115151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5116032c5e82SThomas Falcon 
5117032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
5118032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5119151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5120032c5e82SThomas Falcon 
5121032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
5122032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5123151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5124032c5e82SThomas Falcon 
5125032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
5126032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5127151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5128032c5e82SThomas Falcon 
5129032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
5130032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5131151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5132032c5e82SThomas Falcon 
51336052d5e2SMurilo Fossa Vicentini 	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
51346052d5e2SMurilo Fossa Vicentini 	ibmvnic_send_crq(adapter, &crq);
5135151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
51366052d5e2SMurilo Fossa Vicentini 
5137032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
5138032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5139151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5140032c5e82SThomas Falcon 
5141032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
5142032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5143151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5144032c5e82SThomas Falcon 
5145032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
5146032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5147151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5148032c5e82SThomas Falcon 
5149032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
5150032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5151151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5152032c5e82SThomas Falcon 
5153032c5e82SThomas Falcon 	crq.query_capability.capability =
5154032c5e82SThomas Falcon 			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
5155032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5156151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5157032c5e82SThomas Falcon 
5158032c5e82SThomas Falcon 	crq.query_capability.capability =
5159032c5e82SThomas Falcon 			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
5160032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5161151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5162032c5e82SThomas Falcon 
5163032c5e82SThomas Falcon 	crq.query_capability.capability =
5164032c5e82SThomas Falcon 			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
5165032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5166151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5167032c5e82SThomas Falcon 
5168032c5e82SThomas Falcon 	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
5169151b6a5cSSukadev Bhattiprolu 
5170032c5e82SThomas Falcon 	ibmvnic_send_crq(adapter, &crq);
5171151b6a5cSSukadev Bhattiprolu 	cap_reqs--;
5172151b6a5cSSukadev Bhattiprolu 
5173151b6a5cSSukadev Bhattiprolu 	/* Keep at end to catch any discrepancy between expected and actual
5174151b6a5cSSukadev Bhattiprolu 	 * CRQs sent.
5175151b6a5cSSukadev Bhattiprolu 	 */
5176151b6a5cSSukadev Bhattiprolu 	WARN_ON(cap_reqs != 0);
5177032c5e82SThomas Falcon }
5178032c5e82SThomas Falcon 
send_query_ip_offload(struct ibmvnic_adapter * adapter)517916e811feSLijun Pan static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
518016e811feSLijun Pan {
518116e811feSLijun Pan 	int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
518216e811feSLijun Pan 	struct device *dev = &adapter->vdev->dev;
518316e811feSLijun Pan 	union ibmvnic_crq crq;
518416e811feSLijun Pan 
518516e811feSLijun Pan 	adapter->ip_offload_tok =
518616e811feSLijun Pan 		dma_map_single(dev,
518716e811feSLijun Pan 			       &adapter->ip_offload_buf,
518816e811feSLijun Pan 			       buf_sz,
518916e811feSLijun Pan 			       DMA_FROM_DEVICE);
519016e811feSLijun Pan 
519116e811feSLijun Pan 	if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
519216e811feSLijun Pan 		if (!firmware_has_feature(FW_FEATURE_CMO))
519316e811feSLijun Pan 			dev_err(dev, "Couldn't map offload buffer\n");
519416e811feSLijun Pan 		return;
519516e811feSLijun Pan 	}
519616e811feSLijun Pan 
519716e811feSLijun Pan 	memset(&crq, 0, sizeof(crq));
519816e811feSLijun Pan 	crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
519916e811feSLijun Pan 	crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
520016e811feSLijun Pan 	crq.query_ip_offload.len = cpu_to_be32(buf_sz);
520116e811feSLijun Pan 	crq.query_ip_offload.ioba =
520216e811feSLijun Pan 	    cpu_to_be32(adapter->ip_offload_tok);
520316e811feSLijun Pan 
520416e811feSLijun Pan 	ibmvnic_send_crq(adapter, &crq);
520516e811feSLijun Pan }
520616e811feSLijun Pan 
send_control_ip_offload(struct ibmvnic_adapter * adapter)520746899bdeSLijun Pan static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
520846899bdeSLijun Pan {
520946899bdeSLijun Pan 	struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
521046899bdeSLijun Pan 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
521146899bdeSLijun Pan 	struct device *dev = &adapter->vdev->dev;
521246899bdeSLijun Pan 	netdev_features_t old_hw_features = 0;
521346899bdeSLijun Pan 	union ibmvnic_crq crq;
521446899bdeSLijun Pan 
521546899bdeSLijun Pan 	adapter->ip_offload_ctrl_tok =
521646899bdeSLijun Pan 		dma_map_single(dev,
521746899bdeSLijun Pan 			       ctrl_buf,
521846899bdeSLijun Pan 			       sizeof(adapter->ip_offload_ctrl),
521946899bdeSLijun Pan 			       DMA_TO_DEVICE);
522046899bdeSLijun Pan 
522146899bdeSLijun Pan 	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
522246899bdeSLijun Pan 		dev_err(dev, "Couldn't map ip offload control buffer\n");
522346899bdeSLijun Pan 		return;
522446899bdeSLijun Pan 	}
522546899bdeSLijun Pan 
522646899bdeSLijun Pan 	ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
522746899bdeSLijun Pan 	ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
522846899bdeSLijun Pan 	ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
522946899bdeSLijun Pan 	ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
523046899bdeSLijun Pan 	ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
523146899bdeSLijun Pan 	ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
523246899bdeSLijun Pan 	ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
523346899bdeSLijun Pan 	ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
523446899bdeSLijun Pan 	ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
523546899bdeSLijun Pan 	ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
523646899bdeSLijun Pan 
523746899bdeSLijun Pan 	/* large_rx disabled for now, additional features needed */
523846899bdeSLijun Pan 	ctrl_buf->large_rx_ipv4 = 0;
523946899bdeSLijun Pan 	ctrl_buf->large_rx_ipv6 = 0;
524046899bdeSLijun Pan 
524146899bdeSLijun Pan 	if (adapter->state != VNIC_PROBING) {
524246899bdeSLijun Pan 		old_hw_features = adapter->netdev->hw_features;
524346899bdeSLijun Pan 		adapter->netdev->hw_features = 0;
524446899bdeSLijun Pan 	}
524546899bdeSLijun Pan 
524646899bdeSLijun Pan 	adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
524746899bdeSLijun Pan 
524846899bdeSLijun Pan 	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
524946899bdeSLijun Pan 		adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
525046899bdeSLijun Pan 
525146899bdeSLijun Pan 	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
525246899bdeSLijun Pan 		adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
525346899bdeSLijun Pan 
525446899bdeSLijun Pan 	if ((adapter->netdev->features &
525546899bdeSLijun Pan 	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
525646899bdeSLijun Pan 		adapter->netdev->hw_features |= NETIF_F_RXCSUM;
525746899bdeSLijun Pan 
525846899bdeSLijun Pan 	if (buf->large_tx_ipv4)
525946899bdeSLijun Pan 		adapter->netdev->hw_features |= NETIF_F_TSO;
526046899bdeSLijun Pan 	if (buf->large_tx_ipv6)
526146899bdeSLijun Pan 		adapter->netdev->hw_features |= NETIF_F_TSO6;
526246899bdeSLijun Pan 
526346899bdeSLijun Pan 	if (adapter->state == VNIC_PROBING) {
526446899bdeSLijun Pan 		adapter->netdev->features |= adapter->netdev->hw_features;
526546899bdeSLijun Pan 	} else if (old_hw_features != adapter->netdev->hw_features) {
526646899bdeSLijun Pan 		netdev_features_t tmp = 0;
526746899bdeSLijun Pan 
526846899bdeSLijun Pan 		/* disable features no longer supported */
526946899bdeSLijun Pan 		adapter->netdev->features &= adapter->netdev->hw_features;
527046899bdeSLijun Pan 		/* turn on features now supported if previously enabled */
527146899bdeSLijun Pan 		tmp = (old_hw_features ^ adapter->netdev->hw_features) &
527246899bdeSLijun Pan 			adapter->netdev->hw_features;
527346899bdeSLijun Pan 		adapter->netdev->features |=
527446899bdeSLijun Pan 				tmp & adapter->netdev->wanted_features;
527546899bdeSLijun Pan 	}
527646899bdeSLijun Pan 
527746899bdeSLijun Pan 	memset(&crq, 0, sizeof(crq));
527846899bdeSLijun Pan 	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
527946899bdeSLijun Pan 	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
528046899bdeSLijun Pan 	crq.control_ip_offload.len =
528146899bdeSLijun Pan 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
528246899bdeSLijun Pan 	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
528346899bdeSLijun Pan 	ibmvnic_send_crq(adapter, &crq);
528446899bdeSLijun Pan }
528546899bdeSLijun Pan 
handle_vpd_size_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)52864e6759beSDesnes Augusto Nunes do Rosario static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
52874e6759beSDesnes Augusto Nunes do Rosario 				struct ibmvnic_adapter *adapter)
52884e6759beSDesnes Augusto Nunes do Rosario {
52894e6759beSDesnes Augusto Nunes do Rosario 	struct device *dev = &adapter->vdev->dev;
52904e6759beSDesnes Augusto Nunes do Rosario 
52914e6759beSDesnes Augusto Nunes do Rosario 	if (crq->get_vpd_size_rsp.rc.code) {
52924e6759beSDesnes Augusto Nunes do Rosario 		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
52934e6759beSDesnes Augusto Nunes do Rosario 			crq->get_vpd_size_rsp.rc.code);
52944e6759beSDesnes Augusto Nunes do Rosario 		complete(&adapter->fw_done);
52954e6759beSDesnes Augusto Nunes do Rosario 		return;
52964e6759beSDesnes Augusto Nunes do Rosario 	}
52974e6759beSDesnes Augusto Nunes do Rosario 
52984e6759beSDesnes Augusto Nunes do Rosario 	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
52994e6759beSDesnes Augusto Nunes do Rosario 	complete(&adapter->fw_done);
53004e6759beSDesnes Augusto Nunes do Rosario }
53014e6759beSDesnes Augusto Nunes do Rosario 
handle_vpd_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)53024e6759beSDesnes Augusto Nunes do Rosario static void handle_vpd_rsp(union ibmvnic_crq *crq,
53034e6759beSDesnes Augusto Nunes do Rosario 			   struct ibmvnic_adapter *adapter)
53044e6759beSDesnes Augusto Nunes do Rosario {
53054e6759beSDesnes Augusto Nunes do Rosario 	struct device *dev = &adapter->vdev->dev;
530621a2545bSDesnes Augusto Nunes do Rosario 	unsigned char *substr = NULL;
53074e6759beSDesnes Augusto Nunes do Rosario 	u8 fw_level_len = 0;
53084e6759beSDesnes Augusto Nunes do Rosario 
53094e6759beSDesnes Augusto Nunes do Rosario 	memset(adapter->fw_version, 0, 32);
53104e6759beSDesnes Augusto Nunes do Rosario 
53114e6759beSDesnes Augusto Nunes do Rosario 	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
53124e6759beSDesnes Augusto Nunes do Rosario 			 DMA_FROM_DEVICE);
53134e6759beSDesnes Augusto Nunes do Rosario 
53144e6759beSDesnes Augusto Nunes do Rosario 	if (crq->get_vpd_rsp.rc.code) {
53154e6759beSDesnes Augusto Nunes do Rosario 		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
53164e6759beSDesnes Augusto Nunes do Rosario 			crq->get_vpd_rsp.rc.code);
53174e6759beSDesnes Augusto Nunes do Rosario 		goto complete;
53184e6759beSDesnes Augusto Nunes do Rosario 	}
53194e6759beSDesnes Augusto Nunes do Rosario 
53204e6759beSDesnes Augusto Nunes do Rosario 	/* get the position of the firmware version info
53214e6759beSDesnes Augusto Nunes do Rosario 	 * located after the ASCII 'RM' substring in the buffer
53224e6759beSDesnes Augusto Nunes do Rosario 	 */
53234e6759beSDesnes Augusto Nunes do Rosario 	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
53244e6759beSDesnes Augusto Nunes do Rosario 	if (!substr) {
5325a107311dSDesnes Augusto Nunes do Rosario 		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
53264e6759beSDesnes Augusto Nunes do Rosario 		goto complete;
53274e6759beSDesnes Augusto Nunes do Rosario 	}
53284e6759beSDesnes Augusto Nunes do Rosario 
53294e6759beSDesnes Augusto Nunes do Rosario 	/* get length of firmware level ASCII substring */
53304e6759beSDesnes Augusto Nunes do Rosario 	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
53314e6759beSDesnes Augusto Nunes do Rosario 		fw_level_len = *(substr + 2);
53324e6759beSDesnes Augusto Nunes do Rosario 	} else {
53334e6759beSDesnes Augusto Nunes do Rosario 		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
53344e6759beSDesnes Augusto Nunes do Rosario 		goto complete;
53354e6759beSDesnes Augusto Nunes do Rosario 	}
53364e6759beSDesnes Augusto Nunes do Rosario 
53374e6759beSDesnes Augusto Nunes do Rosario 	/* copy firmware version string from vpd into adapter */
53384e6759beSDesnes Augusto Nunes do Rosario 	if ((substr + 3 + fw_level_len) <
53394e6759beSDesnes Augusto Nunes do Rosario 	    (adapter->vpd->buff + adapter->vpd->len)) {
534021a2545bSDesnes Augusto Nunes do Rosario 		strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
53414e6759beSDesnes Augusto Nunes do Rosario 	} else {
53424e6759beSDesnes Augusto Nunes do Rosario 		dev_info(dev, "FW substr extrapolated VPD buff\n");
53434e6759beSDesnes Augusto Nunes do Rosario 	}
53444e6759beSDesnes Augusto Nunes do Rosario 
53454e6759beSDesnes Augusto Nunes do Rosario complete:
534621a2545bSDesnes Augusto Nunes do Rosario 	if (adapter->fw_version[0] == '\0')
53470b217d3dSLijun Pan 		strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
53484e6759beSDesnes Augusto Nunes do Rosario 	complete(&adapter->fw_done);
53494e6759beSDesnes Augusto Nunes do Rosario }
53504e6759beSDesnes Augusto Nunes do Rosario 
handle_query_ip_offload_rsp(struct ibmvnic_adapter * adapter)5351032c5e82SThomas Falcon static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
5352032c5e82SThomas Falcon {
5353032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
5354032c5e82SThomas Falcon 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5355032c5e82SThomas Falcon 	int i;
5356032c5e82SThomas Falcon 
5357032c5e82SThomas Falcon 	dma_unmap_single(dev, adapter->ip_offload_tok,
5358032c5e82SThomas Falcon 			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
5359032c5e82SThomas Falcon 
5360032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
5361032c5e82SThomas Falcon 	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
5362032c5e82SThomas Falcon 		netdev_dbg(adapter->netdev, "%016lx\n",
5363429aa364SLijun Pan 			   ((unsigned long *)(buf))[i]);
5364032c5e82SThomas Falcon 
5365032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
5366032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
5367032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
5368032c5e82SThomas Falcon 		   buf->tcp_ipv4_chksum);
5369032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
5370032c5e82SThomas Falcon 		   buf->tcp_ipv6_chksum);
5371032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
5372032c5e82SThomas Falcon 		   buf->udp_ipv4_chksum);
5373032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
5374032c5e82SThomas Falcon 		   buf->udp_ipv6_chksum);
5375032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
5376032c5e82SThomas Falcon 		   buf->large_tx_ipv4);
5377032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
5378032c5e82SThomas Falcon 		   buf->large_tx_ipv6);
5379032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
5380032c5e82SThomas Falcon 		   buf->large_rx_ipv4);
5381032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
5382032c5e82SThomas Falcon 		   buf->large_rx_ipv6);
5383032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
5384032c5e82SThomas Falcon 		   buf->max_ipv4_header_size);
5385032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
5386032c5e82SThomas Falcon 		   buf->max_ipv6_header_size);
5387032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
5388032c5e82SThomas Falcon 		   buf->max_tcp_header_size);
5389032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
5390032c5e82SThomas Falcon 		   buf->max_udp_header_size);
5391032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
5392032c5e82SThomas Falcon 		   buf->max_large_tx_size);
5393032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
5394032c5e82SThomas Falcon 		   buf->max_large_rx_size);
5395032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
5396032c5e82SThomas Falcon 		   buf->ipv6_extension_header);
5397032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
5398032c5e82SThomas Falcon 		   buf->tcp_pseudosum_req);
5399032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
5400032c5e82SThomas Falcon 		   buf->num_ipv6_ext_headers);
5401032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
5402032c5e82SThomas Falcon 		   buf->off_ipv6_ext_headers);
5403032c5e82SThomas Falcon 
540446899bdeSLijun Pan 	send_control_ip_offload(adapter);
5405032c5e82SThomas Falcon }
5406032c5e82SThomas Falcon 
ibmvnic_fw_err_cause(u16 cause)5407c9008d33SThomas Falcon static const char *ibmvnic_fw_err_cause(u16 cause)
5408c9008d33SThomas Falcon {
5409c9008d33SThomas Falcon 	switch (cause) {
5410c9008d33SThomas Falcon 	case ADAPTER_PROBLEM:
5411c9008d33SThomas Falcon 		return "adapter problem";
5412c9008d33SThomas Falcon 	case BUS_PROBLEM:
5413c9008d33SThomas Falcon 		return "bus problem";
5414c9008d33SThomas Falcon 	case FW_PROBLEM:
5415c9008d33SThomas Falcon 		return "firmware problem";
5416c9008d33SThomas Falcon 	case DD_PROBLEM:
5417c9008d33SThomas Falcon 		return "device driver problem";
5418c9008d33SThomas Falcon 	case EEH_RECOVERY:
5419c9008d33SThomas Falcon 		return "EEH recovery";
5420c9008d33SThomas Falcon 	case FW_UPDATED:
5421c9008d33SThomas Falcon 		return "firmware updated";
5422c9008d33SThomas Falcon 	case LOW_MEMORY:
5423c9008d33SThomas Falcon 		return "low Memory";
5424c9008d33SThomas Falcon 	default:
5425c9008d33SThomas Falcon 		return "unknown";
5426c9008d33SThomas Falcon 	}
5427c9008d33SThomas Falcon }
5428c9008d33SThomas Falcon 
handle_error_indication(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)54292f9de9baSNathan Fontenot static void handle_error_indication(union ibmvnic_crq *crq,
54302f9de9baSNathan Fontenot 				    struct ibmvnic_adapter *adapter)
54312f9de9baSNathan Fontenot {
54322f9de9baSNathan Fontenot 	struct device *dev = &adapter->vdev->dev;
5433c9008d33SThomas Falcon 	u16 cause;
54342f9de9baSNathan Fontenot 
5435c9008d33SThomas Falcon 	cause = be16_to_cpu(crq->error_indication.error_cause);
5436c9008d33SThomas Falcon 
5437c9008d33SThomas Falcon 	dev_warn_ratelimited(dev,
5438c9008d33SThomas Falcon 			     "Firmware reports %serror, cause: %s. Starting recovery...\n",
54392f9de9baSNathan Fontenot 			     crq->error_indication.flags
54402f9de9baSNathan Fontenot 				& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
5441c9008d33SThomas Falcon 			     ibmvnic_fw_err_cause(cause));
54422f9de9baSNathan Fontenot 
5443ed651a10SNathan Fontenot 	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
5444ed651a10SNathan Fontenot 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
54458cb31cfcSJohn Allen 	else
54468cb31cfcSJohn Allen 		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
5447032c5e82SThomas Falcon }
5448032c5e82SThomas Falcon 
handle_change_mac_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5449f813614fSThomas Falcon static int handle_change_mac_rsp(union ibmvnic_crq *crq,
5450032c5e82SThomas Falcon 				 struct ibmvnic_adapter *adapter)
5451032c5e82SThomas Falcon {
5452032c5e82SThomas Falcon 	struct net_device *netdev = adapter->netdev;
5453032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
5454032c5e82SThomas Falcon 	long rc;
5455032c5e82SThomas Falcon 
5456032c5e82SThomas Falcon 	rc = crq->change_mac_addr_rsp.rc.code;
5457032c5e82SThomas Falcon 	if (rc) {
5458032c5e82SThomas Falcon 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
5459f813614fSThomas Falcon 		goto out;
5460032c5e82SThomas Falcon 	}
5461d9b0e599SLijun Pan 	/* crq->change_mac_addr.mac_addr is the requested one
5462d9b0e599SLijun Pan 	 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
5463d9b0e599SLijun Pan 	 */
5464f3956ebbSJakub Kicinski 	eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
5465d9b0e599SLijun Pan 	ether_addr_copy(adapter->mac_addr,
5466d9b0e599SLijun Pan 			&crq->change_mac_addr_rsp.mac_addr[0]);
5467f813614fSThomas Falcon out:
5468f813614fSThomas Falcon 	complete(&adapter->fw_done);
5469f813614fSThomas Falcon 	return rc;
5470032c5e82SThomas Falcon }
5471032c5e82SThomas Falcon 
handle_request_cap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5472032c5e82SThomas Falcon static void handle_request_cap_rsp(union ibmvnic_crq *crq,
5473032c5e82SThomas Falcon 				   struct ibmvnic_adapter *adapter)
5474032c5e82SThomas Falcon {
5475032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
5476032c5e82SThomas Falcon 	u64 *req_value;
5477032c5e82SThomas Falcon 	char *name;
5478032c5e82SThomas Falcon 
5479901e040aSThomas Falcon 	atomic_dec(&adapter->running_cap_crqs);
5480151b6a5cSSukadev Bhattiprolu 	netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
5481151b6a5cSSukadev Bhattiprolu 		   atomic_read(&adapter->running_cap_crqs));
5482032c5e82SThomas Falcon 	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
5483032c5e82SThomas Falcon 	case REQ_TX_QUEUES:
5484032c5e82SThomas Falcon 		req_value = &adapter->req_tx_queues;
5485032c5e82SThomas Falcon 		name = "tx";
5486032c5e82SThomas Falcon 		break;
5487032c5e82SThomas Falcon 	case REQ_RX_QUEUES:
5488032c5e82SThomas Falcon 		req_value = &adapter->req_rx_queues;
5489032c5e82SThomas Falcon 		name = "rx";
5490032c5e82SThomas Falcon 		break;
5491032c5e82SThomas Falcon 	case REQ_RX_ADD_QUEUES:
5492032c5e82SThomas Falcon 		req_value = &adapter->req_rx_add_queues;
5493032c5e82SThomas Falcon 		name = "rx_add";
5494032c5e82SThomas Falcon 		break;
5495032c5e82SThomas Falcon 	case REQ_TX_ENTRIES_PER_SUBCRQ:
5496032c5e82SThomas Falcon 		req_value = &adapter->req_tx_entries_per_subcrq;
5497032c5e82SThomas Falcon 		name = "tx_entries_per_subcrq";
5498032c5e82SThomas Falcon 		break;
5499032c5e82SThomas Falcon 	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
5500032c5e82SThomas Falcon 		req_value = &adapter->req_rx_add_entries_per_subcrq;
5501032c5e82SThomas Falcon 		name = "rx_add_entries_per_subcrq";
5502032c5e82SThomas Falcon 		break;
5503032c5e82SThomas Falcon 	case REQ_MTU:
5504032c5e82SThomas Falcon 		req_value = &adapter->req_mtu;
5505032c5e82SThomas Falcon 		name = "mtu";
5506032c5e82SThomas Falcon 		break;
5507032c5e82SThomas Falcon 	case PROMISC_REQUESTED:
5508032c5e82SThomas Falcon 		req_value = &adapter->promisc;
5509032c5e82SThomas Falcon 		name = "promisc";
5510032c5e82SThomas Falcon 		break;
5511032c5e82SThomas Falcon 	default:
5512032c5e82SThomas Falcon 		dev_err(dev, "Got invalid cap request rsp %d\n",
5513032c5e82SThomas Falcon 			crq->request_capability.capability);
5514032c5e82SThomas Falcon 		return;
5515032c5e82SThomas Falcon 	}
5516032c5e82SThomas Falcon 
5517032c5e82SThomas Falcon 	switch (crq->request_capability_rsp.rc.code) {
5518032c5e82SThomas Falcon 	case SUCCESS:
5519032c5e82SThomas Falcon 		break;
5520032c5e82SThomas Falcon 	case PARTIALSUCCESS:
5521032c5e82SThomas Falcon 		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
5522032c5e82SThomas Falcon 			 *req_value,
5523914789acSLijun Pan 			 (long)be64_to_cpu(crq->request_capability_rsp.number),
5524914789acSLijun Pan 			 name);
5525e7913803SJohn Allen 
5526e7913803SJohn Allen 		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
5527e7913803SJohn Allen 		    REQ_MTU) {
5528e7913803SJohn Allen 			pr_err("mtu of %llu is not supported. Reverting.\n",
5529e7913803SJohn Allen 			       *req_value);
5530e7913803SJohn Allen 			*req_value = adapter->fallback.mtu;
5531e7913803SJohn Allen 		} else {
5532e7913803SJohn Allen 			*req_value =
5533e7913803SJohn Allen 				be64_to_cpu(crq->request_capability_rsp.number);
5534e7913803SJohn Allen 		}
5535e7913803SJohn Allen 
553609081b9dSLijun Pan 		send_request_cap(adapter, 1);
5537032c5e82SThomas Falcon 		return;
5538032c5e82SThomas Falcon 	default:
5539032c5e82SThomas Falcon 		dev_err(dev, "Error %d in request cap rsp\n",
5540032c5e82SThomas Falcon 			crq->request_capability_rsp.rc.code);
5541032c5e82SThomas Falcon 		return;
5542032c5e82SThomas Falcon 	}
5543032c5e82SThomas Falcon 
5544032c5e82SThomas Falcon 	/* Done receiving requested capabilities, query IP offload support */
55453a5d9db7SSukadev Bhattiprolu 	if (atomic_read(&adapter->running_cap_crqs) == 0)
554616e811feSLijun Pan 		send_query_ip_offload(adapter);
5547032c5e82SThomas Falcon }
5548032c5e82SThomas Falcon 
handle_login_rsp(union ibmvnic_crq * login_rsp_crq,struct ibmvnic_adapter * adapter)5549032c5e82SThomas Falcon static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
5550032c5e82SThomas Falcon 			    struct ibmvnic_adapter *adapter)
5551032c5e82SThomas Falcon {
5552032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
5553c26eba03SJohn Allen 	struct net_device *netdev = adapter->netdev;
5554032c5e82SThomas Falcon 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
5555032c5e82SThomas Falcon 	struct ibmvnic_login_buffer *login = adapter->login_buf;
5556f3ae59c0SCristobal Forno 	u64 *tx_handle_array;
5557f3ae59c0SCristobal Forno 	u64 *rx_handle_array;
5558f3ae59c0SCristobal Forno 	int num_tx_pools;
5559f3ae59c0SCristobal Forno 	int num_rx_pools;
5560507ebe64SThomas Falcon 	u64 *size_array;
5561db17ba71SNick Child 	u32 rsp_len;
5562032c5e82SThomas Falcon 	int i;
5563032c5e82SThomas Falcon 
556476cdc5c5SSukadev Bhattiprolu 	/* CHECK: Test/set of login_pending does not need to be atomic
556576cdc5c5SSukadev Bhattiprolu 	 * because only ibmvnic_tasklet tests/clears this.
556676cdc5c5SSukadev Bhattiprolu 	 */
556776cdc5c5SSukadev Bhattiprolu 	if (!adapter->login_pending) {
556876cdc5c5SSukadev Bhattiprolu 		netdev_warn(netdev, "Ignoring unexpected login response\n");
556976cdc5c5SSukadev Bhattiprolu 		return 0;
557076cdc5c5SSukadev Bhattiprolu 	}
557176cdc5c5SSukadev Bhattiprolu 	adapter->login_pending = false;
557276cdc5c5SSukadev Bhattiprolu 
5573498cd8e4SJohn Allen 	/* If the number of queues requested can't be allocated by the
5574498cd8e4SJohn Allen 	 * server, the login response will return with code 1. We will need
5575498cd8e4SJohn Allen 	 * to resend the login buffer with fewer queues requested.
5576498cd8e4SJohn Allen 	 */
5577498cd8e4SJohn Allen 	if (login_rsp_crq->generic.rc.code) {
557864d92aa2SNathan Fontenot 		adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5579498cd8e4SJohn Allen 		complete(&adapter->init_done);
5580498cd8e4SJohn Allen 		return 0;
5581498cd8e4SJohn Allen 	}
5582498cd8e4SJohn Allen 
5583d437f5aaSSukadev Bhattiprolu 	if (adapter->failover_pending) {
5584d437f5aaSSukadev Bhattiprolu 		adapter->init_done_rc = -EAGAIN;
5585d437f5aaSSukadev Bhattiprolu 		netdev_dbg(netdev, "Failover pending, ignoring login response\n");
5586d437f5aaSSukadev Bhattiprolu 		complete(&adapter->init_done);
5587d437f5aaSSukadev Bhattiprolu 		/* login response buffer will be released on reset */
5588d437f5aaSSukadev Bhattiprolu 		return 0;
5589d437f5aaSSukadev Bhattiprolu 	}
5590d437f5aaSSukadev Bhattiprolu 
5591c26eba03SJohn Allen 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
5592c26eba03SJohn Allen 
5593032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5594032c5e82SThomas Falcon 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
5595032c5e82SThomas Falcon 		netdev_dbg(adapter->netdev, "%016lx\n",
5596429aa364SLijun Pan 			   ((unsigned long *)(adapter->login_rsp_buf))[i]);
5597032c5e82SThomas Falcon 	}
5598032c5e82SThomas Falcon 
5599032c5e82SThomas Falcon 	/* Sanity checks */
5600032c5e82SThomas Falcon 	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
5601032c5e82SThomas Falcon 	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
5602032c5e82SThomas Falcon 	     adapter->req_rx_add_queues !=
5603032c5e82SThomas Falcon 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
5604032c5e82SThomas Falcon 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
560531d6b403SDany Madden 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5606032c5e82SThomas Falcon 		return -EIO;
5607032c5e82SThomas Falcon 	}
5608db17ba71SNick Child 
5609db17ba71SNick Child 	rsp_len = be32_to_cpu(login_rsp->len);
5610db17ba71SNick Child 	if (be32_to_cpu(login->login_rsp_len) < rsp_len ||
5611db17ba71SNick Child 	    rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) ||
5612db17ba71SNick Child 	    rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) ||
5613db17ba71SNick Child 	    rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) ||
5614db17ba71SNick Child 	    rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) {
5615db17ba71SNick Child 		/* This can happen if a login request times out and there are
5616db17ba71SNick Child 		 * 2 outstanding login requests sent, the LOGIN_RSP crq
5617db17ba71SNick Child 		 * could have been for the older login request. So we are
5618db17ba71SNick Child 		 * parsing the newer response buffer which may be incomplete
5619db17ba71SNick Child 		 */
5620db17ba71SNick Child 		dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n");
5621db17ba71SNick Child 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5622db17ba71SNick Child 		return -EIO;
5623db17ba71SNick Child 	}
5624db17ba71SNick Child 
5625507ebe64SThomas Falcon 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5626507ebe64SThomas Falcon 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5627507ebe64SThomas Falcon 	/* variable buffer sizes are not supported, so just read the
5628507ebe64SThomas Falcon 	 * first entry.
5629507ebe64SThomas Falcon 	 */
5630507ebe64SThomas Falcon 	adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5631f3ae59c0SCristobal Forno 
5632f3ae59c0SCristobal Forno 	num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5633f3ae59c0SCristobal Forno 	num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5634f3ae59c0SCristobal Forno 
5635f3ae59c0SCristobal Forno 	tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5636f3ae59c0SCristobal Forno 				  be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5637f3ae59c0SCristobal Forno 	rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5638f3ae59c0SCristobal Forno 				  be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5639f3ae59c0SCristobal Forno 
5640f3ae59c0SCristobal Forno 	for (i = 0; i < num_tx_pools; i++)
5641f3ae59c0SCristobal Forno 		adapter->tx_scrq[i]->handle = tx_handle_array[i];
5642f3ae59c0SCristobal Forno 
5643f3ae59c0SCristobal Forno 	for (i = 0; i < num_rx_pools; i++)
5644f3ae59c0SCristobal Forno 		adapter->rx_scrq[i]->handle = rx_handle_array[i];
5645f3ae59c0SCristobal Forno 
5646507ebe64SThomas Falcon 	adapter->num_active_tx_scrqs = num_tx_pools;
5647507ebe64SThomas Falcon 	adapter->num_active_rx_scrqs = num_rx_pools;
5648f3ae59c0SCristobal Forno 	release_login_rsp_buffer(adapter);
5649a2c0f039SThomas Falcon 	release_login_buffer(adapter);
5650032c5e82SThomas Falcon 	complete(&adapter->init_done);
5651032c5e82SThomas Falcon 
5652032c5e82SThomas Falcon 	return 0;
5653032c5e82SThomas Falcon }
5654032c5e82SThomas Falcon 
handle_request_unmap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5655032c5e82SThomas Falcon static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
5656032c5e82SThomas Falcon 				     struct ibmvnic_adapter *adapter)
5657032c5e82SThomas Falcon {
5658032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
5659032c5e82SThomas Falcon 	long rc;
5660032c5e82SThomas Falcon 
5661032c5e82SThomas Falcon 	rc = crq->request_unmap_rsp.rc.code;
5662032c5e82SThomas Falcon 	if (rc)
5663032c5e82SThomas Falcon 		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
5664032c5e82SThomas Falcon }
5665032c5e82SThomas Falcon 
handle_query_map_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5666032c5e82SThomas Falcon static void handle_query_map_rsp(union ibmvnic_crq *crq,
5667032c5e82SThomas Falcon 				 struct ibmvnic_adapter *adapter)
5668032c5e82SThomas Falcon {
5669032c5e82SThomas Falcon 	struct net_device *netdev = adapter->netdev;
5670032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
5671032c5e82SThomas Falcon 	long rc;
5672032c5e82SThomas Falcon 
5673032c5e82SThomas Falcon 	rc = crq->query_map_rsp.rc.code;
5674032c5e82SThomas Falcon 	if (rc) {
5675032c5e82SThomas Falcon 		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5676032c5e82SThomas Falcon 		return;
5677032c5e82SThomas Falcon 	}
56780f2bf318SSukadev Bhattiprolu 	netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
56790f2bf318SSukadev Bhattiprolu 		   crq->query_map_rsp.page_size,
56800f2bf318SSukadev Bhattiprolu 		   __be32_to_cpu(crq->query_map_rsp.tot_pages),
56810f2bf318SSukadev Bhattiprolu 		   __be32_to_cpu(crq->query_map_rsp.free_pages));
5682032c5e82SThomas Falcon }
5683032c5e82SThomas Falcon 
handle_query_cap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5684032c5e82SThomas Falcon static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5685032c5e82SThomas Falcon 				 struct ibmvnic_adapter *adapter)
5686032c5e82SThomas Falcon {
5687032c5e82SThomas Falcon 	struct net_device *netdev = adapter->netdev;
5688032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
5689032c5e82SThomas Falcon 	long rc;
5690032c5e82SThomas Falcon 
5691901e040aSThomas Falcon 	atomic_dec(&adapter->running_cap_crqs);
5692032c5e82SThomas Falcon 	netdev_dbg(netdev, "Outstanding queries: %d\n",
5693901e040aSThomas Falcon 		   atomic_read(&adapter->running_cap_crqs));
5694032c5e82SThomas Falcon 	rc = crq->query_capability.rc.code;
5695032c5e82SThomas Falcon 	if (rc) {
5696032c5e82SThomas Falcon 		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5697032c5e82SThomas Falcon 		goto out;
5698032c5e82SThomas Falcon 	}
5699032c5e82SThomas Falcon 
5700032c5e82SThomas Falcon 	switch (be16_to_cpu(crq->query_capability.capability)) {
5701032c5e82SThomas Falcon 	case MIN_TX_QUEUES:
5702032c5e82SThomas Falcon 		adapter->min_tx_queues =
5703de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5704032c5e82SThomas Falcon 		netdev_dbg(netdev, "min_tx_queues = %lld\n",
5705032c5e82SThomas Falcon 			   adapter->min_tx_queues);
5706032c5e82SThomas Falcon 		break;
5707032c5e82SThomas Falcon 	case MIN_RX_QUEUES:
5708032c5e82SThomas Falcon 		adapter->min_rx_queues =
5709de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5710032c5e82SThomas Falcon 		netdev_dbg(netdev, "min_rx_queues = %lld\n",
5711032c5e82SThomas Falcon 			   adapter->min_rx_queues);
5712032c5e82SThomas Falcon 		break;
5713032c5e82SThomas Falcon 	case MIN_RX_ADD_QUEUES:
5714032c5e82SThomas Falcon 		adapter->min_rx_add_queues =
5715de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5716032c5e82SThomas Falcon 		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5717032c5e82SThomas Falcon 			   adapter->min_rx_add_queues);
5718032c5e82SThomas Falcon 		break;
5719032c5e82SThomas Falcon 	case MAX_TX_QUEUES:
5720032c5e82SThomas Falcon 		adapter->max_tx_queues =
5721de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5722032c5e82SThomas Falcon 		netdev_dbg(netdev, "max_tx_queues = %lld\n",
5723032c5e82SThomas Falcon 			   adapter->max_tx_queues);
5724032c5e82SThomas Falcon 		break;
5725032c5e82SThomas Falcon 	case MAX_RX_QUEUES:
5726032c5e82SThomas Falcon 		adapter->max_rx_queues =
5727de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5728032c5e82SThomas Falcon 		netdev_dbg(netdev, "max_rx_queues = %lld\n",
5729032c5e82SThomas Falcon 			   adapter->max_rx_queues);
5730032c5e82SThomas Falcon 		break;
5731032c5e82SThomas Falcon 	case MAX_RX_ADD_QUEUES:
5732032c5e82SThomas Falcon 		adapter->max_rx_add_queues =
5733de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5734032c5e82SThomas Falcon 		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5735032c5e82SThomas Falcon 			   adapter->max_rx_add_queues);
5736032c5e82SThomas Falcon 		break;
5737032c5e82SThomas Falcon 	case MIN_TX_ENTRIES_PER_SUBCRQ:
5738032c5e82SThomas Falcon 		adapter->min_tx_entries_per_subcrq =
5739de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5740032c5e82SThomas Falcon 		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5741032c5e82SThomas Falcon 			   adapter->min_tx_entries_per_subcrq);
5742032c5e82SThomas Falcon 		break;
5743032c5e82SThomas Falcon 	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5744032c5e82SThomas Falcon 		adapter->min_rx_add_entries_per_subcrq =
5745de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5746032c5e82SThomas Falcon 		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5747032c5e82SThomas Falcon 			   adapter->min_rx_add_entries_per_subcrq);
5748032c5e82SThomas Falcon 		break;
5749032c5e82SThomas Falcon 	case MAX_TX_ENTRIES_PER_SUBCRQ:
5750032c5e82SThomas Falcon 		adapter->max_tx_entries_per_subcrq =
5751de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5752032c5e82SThomas Falcon 		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5753032c5e82SThomas Falcon 			   adapter->max_tx_entries_per_subcrq);
5754032c5e82SThomas Falcon 		break;
5755032c5e82SThomas Falcon 	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5756032c5e82SThomas Falcon 		adapter->max_rx_add_entries_per_subcrq =
5757de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5758032c5e82SThomas Falcon 		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5759032c5e82SThomas Falcon 			   adapter->max_rx_add_entries_per_subcrq);
5760032c5e82SThomas Falcon 		break;
5761032c5e82SThomas Falcon 	case TCP_IP_OFFLOAD:
5762032c5e82SThomas Falcon 		adapter->tcp_ip_offload =
5763de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5764032c5e82SThomas Falcon 		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5765032c5e82SThomas Falcon 			   adapter->tcp_ip_offload);
5766032c5e82SThomas Falcon 		break;
5767032c5e82SThomas Falcon 	case PROMISC_SUPPORTED:
5768032c5e82SThomas Falcon 		adapter->promisc_supported =
5769de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5770032c5e82SThomas Falcon 		netdev_dbg(netdev, "promisc_supported = %lld\n",
5771032c5e82SThomas Falcon 			   adapter->promisc_supported);
5772032c5e82SThomas Falcon 		break;
5773032c5e82SThomas Falcon 	case MIN_MTU:
5774de89e854SThomas Falcon 		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5775f39f0d1eSThomas Falcon 		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5776032c5e82SThomas Falcon 		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5777032c5e82SThomas Falcon 		break;
5778032c5e82SThomas Falcon 	case MAX_MTU:
5779de89e854SThomas Falcon 		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5780f39f0d1eSThomas Falcon 		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5781032c5e82SThomas Falcon 		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5782032c5e82SThomas Falcon 		break;
5783032c5e82SThomas Falcon 	case MAX_MULTICAST_FILTERS:
5784032c5e82SThomas Falcon 		adapter->max_multicast_filters =
5785de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5786032c5e82SThomas Falcon 		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5787032c5e82SThomas Falcon 			   adapter->max_multicast_filters);
5788032c5e82SThomas Falcon 		break;
5789032c5e82SThomas Falcon 	case VLAN_HEADER_INSERTION:
5790032c5e82SThomas Falcon 		adapter->vlan_header_insertion =
5791de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5792032c5e82SThomas Falcon 		if (adapter->vlan_header_insertion)
5793032c5e82SThomas Falcon 			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5794032c5e82SThomas Falcon 		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5795032c5e82SThomas Falcon 			   adapter->vlan_header_insertion);
5796032c5e82SThomas Falcon 		break;
57976052d5e2SMurilo Fossa Vicentini 	case RX_VLAN_HEADER_INSERTION:
57986052d5e2SMurilo Fossa Vicentini 		adapter->rx_vlan_header_insertion =
57996052d5e2SMurilo Fossa Vicentini 		    be64_to_cpu(crq->query_capability.number);
58006052d5e2SMurilo Fossa Vicentini 		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
58016052d5e2SMurilo Fossa Vicentini 			   adapter->rx_vlan_header_insertion);
58026052d5e2SMurilo Fossa Vicentini 		break;
5803032c5e82SThomas Falcon 	case MAX_TX_SG_ENTRIES:
5804032c5e82SThomas Falcon 		adapter->max_tx_sg_entries =
5805de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5806032c5e82SThomas Falcon 		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5807032c5e82SThomas Falcon 			   adapter->max_tx_sg_entries);
5808032c5e82SThomas Falcon 		break;
5809032c5e82SThomas Falcon 	case RX_SG_SUPPORTED:
5810032c5e82SThomas Falcon 		adapter->rx_sg_supported =
5811de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5812032c5e82SThomas Falcon 		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5813032c5e82SThomas Falcon 			   adapter->rx_sg_supported);
5814032c5e82SThomas Falcon 		break;
5815032c5e82SThomas Falcon 	case OPT_TX_COMP_SUB_QUEUES:
5816032c5e82SThomas Falcon 		adapter->opt_tx_comp_sub_queues =
5817de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5818032c5e82SThomas Falcon 		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5819032c5e82SThomas Falcon 			   adapter->opt_tx_comp_sub_queues);
5820032c5e82SThomas Falcon 		break;
5821032c5e82SThomas Falcon 	case OPT_RX_COMP_QUEUES:
5822032c5e82SThomas Falcon 		adapter->opt_rx_comp_queues =
5823de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5824032c5e82SThomas Falcon 		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5825032c5e82SThomas Falcon 			   adapter->opt_rx_comp_queues);
5826032c5e82SThomas Falcon 		break;
5827032c5e82SThomas Falcon 	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5828032c5e82SThomas Falcon 		adapter->opt_rx_bufadd_q_per_rx_comp_q =
5829de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5830032c5e82SThomas Falcon 		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5831032c5e82SThomas Falcon 			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
5832032c5e82SThomas Falcon 		break;
5833032c5e82SThomas Falcon 	case OPT_TX_ENTRIES_PER_SUBCRQ:
5834032c5e82SThomas Falcon 		adapter->opt_tx_entries_per_subcrq =
5835de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5836032c5e82SThomas Falcon 		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5837032c5e82SThomas Falcon 			   adapter->opt_tx_entries_per_subcrq);
5838032c5e82SThomas Falcon 		break;
5839032c5e82SThomas Falcon 	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5840032c5e82SThomas Falcon 		adapter->opt_rxba_entries_per_subcrq =
5841de89e854SThomas Falcon 		    be64_to_cpu(crq->query_capability.number);
5842032c5e82SThomas Falcon 		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5843032c5e82SThomas Falcon 			   adapter->opt_rxba_entries_per_subcrq);
5844032c5e82SThomas Falcon 		break;
5845032c5e82SThomas Falcon 	case TX_RX_DESC_REQ:
5846032c5e82SThomas Falcon 		adapter->tx_rx_desc_req = crq->query_capability.number;
5847032c5e82SThomas Falcon 		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5848032c5e82SThomas Falcon 			   adapter->tx_rx_desc_req);
5849032c5e82SThomas Falcon 		break;
5850032c5e82SThomas Falcon 
5851032c5e82SThomas Falcon 	default:
5852032c5e82SThomas Falcon 		netdev_err(netdev, "Got invalid cap rsp %d\n",
5853032c5e82SThomas Falcon 			   crq->query_capability.capability);
5854032c5e82SThomas Falcon 	}
5855032c5e82SThomas Falcon 
5856032c5e82SThomas Falcon out:
58573a5d9db7SSukadev Bhattiprolu 	if (atomic_read(&adapter->running_cap_crqs) == 0)
585809081b9dSLijun Pan 		send_request_cap(adapter, 0);
5859032c5e82SThomas Falcon }
5860032c5e82SThomas Falcon 
send_query_phys_parms(struct ibmvnic_adapter * adapter)5861f8d6ae0dSMurilo Fossa Vicentini static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5862f8d6ae0dSMurilo Fossa Vicentini {
5863f8d6ae0dSMurilo Fossa Vicentini 	union ibmvnic_crq crq;
5864f8d6ae0dSMurilo Fossa Vicentini 	int rc;
5865f8d6ae0dSMurilo Fossa Vicentini 
5866f8d6ae0dSMurilo Fossa Vicentini 	memset(&crq, 0, sizeof(crq));
5867f8d6ae0dSMurilo Fossa Vicentini 	crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5868f8d6ae0dSMurilo Fossa Vicentini 	crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5869ff25dcb9SThomas Falcon 
5870ff25dcb9SThomas Falcon 	mutex_lock(&adapter->fw_lock);
5871ff25dcb9SThomas Falcon 	adapter->fw_done_rc = 0;
5872070eca95SThomas Falcon 	reinit_completion(&adapter->fw_done);
5873ff25dcb9SThomas Falcon 
5874f8d6ae0dSMurilo Fossa Vicentini 	rc = ibmvnic_send_crq(adapter, &crq);
5875ff25dcb9SThomas Falcon 	if (rc) {
5876ff25dcb9SThomas Falcon 		mutex_unlock(&adapter->fw_lock);
5877f8d6ae0dSMurilo Fossa Vicentini 		return rc;
5878ff25dcb9SThomas Falcon 	}
5879476d96caSThomas Falcon 
5880476d96caSThomas Falcon 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5881ff25dcb9SThomas Falcon 	if (rc) {
5882ff25dcb9SThomas Falcon 		mutex_unlock(&adapter->fw_lock);
5883476d96caSThomas Falcon 		return rc;
5884ff25dcb9SThomas Falcon 	}
5885476d96caSThomas Falcon 
5886ff25dcb9SThomas Falcon 	mutex_unlock(&adapter->fw_lock);
5887f8d6ae0dSMurilo Fossa Vicentini 	return adapter->fw_done_rc ? -EIO : 0;
5888f8d6ae0dSMurilo Fossa Vicentini }
5889f8d6ae0dSMurilo Fossa Vicentini 
handle_query_phys_parms_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5890f8d6ae0dSMurilo Fossa Vicentini static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5891f8d6ae0dSMurilo Fossa Vicentini 				       struct ibmvnic_adapter *adapter)
5892f8d6ae0dSMurilo Fossa Vicentini {
5893f8d6ae0dSMurilo Fossa Vicentini 	struct net_device *netdev = adapter->netdev;
5894f8d6ae0dSMurilo Fossa Vicentini 	int rc;
5895dd0f9d89SMurilo Fossa Vicentini 	__be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5896f8d6ae0dSMurilo Fossa Vicentini 
5897f8d6ae0dSMurilo Fossa Vicentini 	rc = crq->query_phys_parms_rsp.rc.code;
5898f8d6ae0dSMurilo Fossa Vicentini 	if (rc) {
5899f8d6ae0dSMurilo Fossa Vicentini 		netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5900f8d6ae0dSMurilo Fossa Vicentini 		return rc;
5901f8d6ae0dSMurilo Fossa Vicentini 	}
5902dd0f9d89SMurilo Fossa Vicentini 	switch (rspeed) {
5903f8d6ae0dSMurilo Fossa Vicentini 	case IBMVNIC_10MBPS:
5904f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_10;
5905f8d6ae0dSMurilo Fossa Vicentini 		break;
5906f8d6ae0dSMurilo Fossa Vicentini 	case IBMVNIC_100MBPS:
5907f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_100;
5908f8d6ae0dSMurilo Fossa Vicentini 		break;
5909f8d6ae0dSMurilo Fossa Vicentini 	case IBMVNIC_1GBPS:
5910f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_1000;
5911f8d6ae0dSMurilo Fossa Vicentini 		break;
5912b9cd795bSLijun Pan 	case IBMVNIC_10GBPS:
5913f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_10000;
5914f8d6ae0dSMurilo Fossa Vicentini 		break;
5915f8d6ae0dSMurilo Fossa Vicentini 	case IBMVNIC_25GBPS:
5916f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_25000;
5917f8d6ae0dSMurilo Fossa Vicentini 		break;
5918f8d6ae0dSMurilo Fossa Vicentini 	case IBMVNIC_40GBPS:
5919f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_40000;
5920f8d6ae0dSMurilo Fossa Vicentini 		break;
5921f8d6ae0dSMurilo Fossa Vicentini 	case IBMVNIC_50GBPS:
5922f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_50000;
5923f8d6ae0dSMurilo Fossa Vicentini 		break;
5924f8d6ae0dSMurilo Fossa Vicentini 	case IBMVNIC_100GBPS:
5925f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_100000;
5926f8d6ae0dSMurilo Fossa Vicentini 		break;
5927b9cd795bSLijun Pan 	case IBMVNIC_200GBPS:
5928b9cd795bSLijun Pan 		adapter->speed = SPEED_200000;
5929b9cd795bSLijun Pan 		break;
5930f8d6ae0dSMurilo Fossa Vicentini 	default:
5931dd0f9d89SMurilo Fossa Vicentini 		if (netif_carrier_ok(netdev))
5932dd0f9d89SMurilo Fossa Vicentini 			netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5933f8d6ae0dSMurilo Fossa Vicentini 		adapter->speed = SPEED_UNKNOWN;
5934f8d6ae0dSMurilo Fossa Vicentini 	}
5935f8d6ae0dSMurilo Fossa Vicentini 	if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5936f8d6ae0dSMurilo Fossa Vicentini 		adapter->duplex = DUPLEX_FULL;
5937f8d6ae0dSMurilo Fossa Vicentini 	else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5938f8d6ae0dSMurilo Fossa Vicentini 		adapter->duplex = DUPLEX_HALF;
5939f8d6ae0dSMurilo Fossa Vicentini 	else
5940f8d6ae0dSMurilo Fossa Vicentini 		adapter->duplex = DUPLEX_UNKNOWN;
5941f8d6ae0dSMurilo Fossa Vicentini 
5942f8d6ae0dSMurilo Fossa Vicentini 	return rc;
5943f8d6ae0dSMurilo Fossa Vicentini }
5944f8d6ae0dSMurilo Fossa Vicentini 
ibmvnic_handle_crq(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5945032c5e82SThomas Falcon static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5946032c5e82SThomas Falcon 			       struct ibmvnic_adapter *adapter)
5947032c5e82SThomas Falcon {
5948032c5e82SThomas Falcon 	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5949032c5e82SThomas Falcon 	struct net_device *netdev = adapter->netdev;
5950032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
5951993a82b0SMurilo Fossa Vicentini 	u64 *u64_crq = (u64 *)crq;
5952032c5e82SThomas Falcon 	long rc;
5953032c5e82SThomas Falcon 
5954032c5e82SThomas Falcon 	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5955429aa364SLijun Pan 		   (unsigned long)cpu_to_be64(u64_crq[0]),
5956429aa364SLijun Pan 		   (unsigned long)cpu_to_be64(u64_crq[1]));
5957032c5e82SThomas Falcon 	switch (gen_crq->first) {
5958032c5e82SThomas Falcon 	case IBMVNIC_CRQ_INIT_RSP:
5959032c5e82SThomas Falcon 		switch (gen_crq->cmd) {
5960032c5e82SThomas Falcon 		case IBMVNIC_CRQ_INIT:
5961032c5e82SThomas Falcon 			dev_info(dev, "Partner initialized\n");
5962017892c1SJohn Allen 			adapter->from_passive_init = true;
596376cdc5c5SSukadev Bhattiprolu 			/* Discard any stale login responses from prev reset.
596476cdc5c5SSukadev Bhattiprolu 			 * CHECK: should we clear even on INIT_COMPLETE?
596576cdc5c5SSukadev Bhattiprolu 			 */
596676cdc5c5SSukadev Bhattiprolu 			adapter->login_pending = false;
596776cdc5c5SSukadev Bhattiprolu 
596853f8b1b2SCristobal Forno 			if (adapter->state == VNIC_DOWN)
596953f8b1b2SCristobal Forno 				rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
597053f8b1b2SCristobal Forno 			else
5971ef66a1eaSSukadev Bhattiprolu 				rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
597253f8b1b2SCristobal Forno 
5973ef66a1eaSSukadev Bhattiprolu 			if (rc && rc != -EBUSY) {
5974ef66a1eaSSukadev Bhattiprolu 				/* We were unable to schedule the failover
5975ef66a1eaSSukadev Bhattiprolu 				 * reset either because the adapter was still
5976ef66a1eaSSukadev Bhattiprolu 				 * probing (eg: during kexec) or we could not
5977ef66a1eaSSukadev Bhattiprolu 				 * allocate memory. Clear the failover_pending
5978ef66a1eaSSukadev Bhattiprolu 				 * flag since no one else will. We ignore
5979ef66a1eaSSukadev Bhattiprolu 				 * EBUSY because it means either FAILOVER reset
5980ef66a1eaSSukadev Bhattiprolu 				 * is already scheduled or the adapter is
5981ef66a1eaSSukadev Bhattiprolu 				 * being removed.
5982ef66a1eaSSukadev Bhattiprolu 				 */
5983ef66a1eaSSukadev Bhattiprolu 				netdev_err(netdev,
5984ef66a1eaSSukadev Bhattiprolu 					   "Error %ld scheduling failover reset\n",
5985ef66a1eaSSukadev Bhattiprolu 					   rc);
5986ef66a1eaSSukadev Bhattiprolu 				adapter->failover_pending = false;
5987ef66a1eaSSukadev Bhattiprolu 			}
59886b278c0cSSukadev Bhattiprolu 
59896b278c0cSSukadev Bhattiprolu 			if (!completion_done(&adapter->init_done)) {
59906b278c0cSSukadev Bhattiprolu 				if (!adapter->init_done_rc)
59916b278c0cSSukadev Bhattiprolu 					adapter->init_done_rc = -EAGAIN;
5992765559b1SSukadev Bhattiprolu 				complete(&adapter->init_done);
59936b278c0cSSukadev Bhattiprolu 			}
59946b278c0cSSukadev Bhattiprolu 
5995032c5e82SThomas Falcon 			break;
5996032c5e82SThomas Falcon 		case IBMVNIC_CRQ_INIT_COMPLETE:
5997032c5e82SThomas Falcon 			dev_info(dev, "Partner initialization complete\n");
59985153698eSThomas Falcon 			adapter->crq.active = true;
5999032c5e82SThomas Falcon 			send_version_xchg(adapter);
6000032c5e82SThomas Falcon 			break;
6001032c5e82SThomas Falcon 		default:
6002032c5e82SThomas Falcon 			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
6003032c5e82SThomas Falcon 		}
6004032c5e82SThomas Falcon 		return;
6005032c5e82SThomas Falcon 	case IBMVNIC_CRQ_XPORT_EVENT:
6006ed651a10SNathan Fontenot 		netif_carrier_off(netdev);
60075153698eSThomas Falcon 		adapter->crq.active = false;
60082147e3d0SThomas Falcon 		/* terminate any thread waiting for a response
60092147e3d0SThomas Falcon 		 * from the device
60102147e3d0SThomas Falcon 		 */
60112147e3d0SThomas Falcon 		if (!completion_done(&adapter->fw_done)) {
60122147e3d0SThomas Falcon 			adapter->fw_done_rc = -EIO;
60132147e3d0SThomas Falcon 			complete(&adapter->fw_done);
60142147e3d0SThomas Falcon 		}
601536491f2dSSukadev Bhattiprolu 
601636491f2dSSukadev Bhattiprolu 		/* if we got here during crq-init, retry crq-init */
601736491f2dSSukadev Bhattiprolu 		if (!completion_done(&adapter->init_done)) {
601836491f2dSSukadev Bhattiprolu 			adapter->init_done_rc = -EAGAIN;
601936491f2dSSukadev Bhattiprolu 			complete(&adapter->init_done);
602036491f2dSSukadev Bhattiprolu 		}
602136491f2dSSukadev Bhattiprolu 
60222147e3d0SThomas Falcon 		if (!completion_done(&adapter->stats_done))
60232147e3d0SThomas Falcon 			complete(&adapter->stats_done);
60247ed5b31fSJuliet Kim 		if (test_bit(0, &adapter->resetting))
60252770a798SThomas Falcon 			adapter->force_reset_recovery = true;
6026032c5e82SThomas Falcon 		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
6027ed651a10SNathan Fontenot 			dev_info(dev, "Migrated, re-enabling adapter\n");
6028ed651a10SNathan Fontenot 			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
6029dfad09a6SThomas Falcon 		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
6030dfad09a6SThomas Falcon 			dev_info(dev, "Backing device failover detected\n");
60315a18e1e0SThomas Falcon 			adapter->failover_pending = true;
6032032c5e82SThomas Falcon 		} else {
6033032c5e82SThomas Falcon 			/* The adapter lost the connection */
6034032c5e82SThomas Falcon 			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
6035032c5e82SThomas Falcon 				gen_crq->cmd);
6036ed651a10SNathan Fontenot 			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
6037032c5e82SThomas Falcon 		}
6038032c5e82SThomas Falcon 		return;
6039032c5e82SThomas Falcon 	case IBMVNIC_CRQ_CMD_RSP:
6040032c5e82SThomas Falcon 		break;
6041032c5e82SThomas Falcon 	default:
6042032c5e82SThomas Falcon 		dev_err(dev, "Got an invalid msg type 0x%02x\n",
6043032c5e82SThomas Falcon 			gen_crq->first);
6044032c5e82SThomas Falcon 		return;
6045032c5e82SThomas Falcon 	}
6046032c5e82SThomas Falcon 
6047032c5e82SThomas Falcon 	switch (gen_crq->cmd) {
6048032c5e82SThomas Falcon 	case VERSION_EXCHANGE_RSP:
6049032c5e82SThomas Falcon 		rc = crq->version_exchange_rsp.rc.code;
6050032c5e82SThomas Falcon 		if (rc) {
6051032c5e82SThomas Falcon 			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
6052032c5e82SThomas Falcon 			break;
6053032c5e82SThomas Falcon 		}
6054032c5e82SThomas Falcon 		ibmvnic_version =
6055032c5e82SThomas Falcon 			    be16_to_cpu(crq->version_exchange_rsp.version);
605678468899SThomas Falcon 		dev_info(dev, "Partner protocol version is %d\n",
605778468899SThomas Falcon 			 ibmvnic_version);
6058491099adSLijun Pan 		send_query_cap(adapter);
6059032c5e82SThomas Falcon 		break;
6060032c5e82SThomas Falcon 	case QUERY_CAPABILITY_RSP:
6061032c5e82SThomas Falcon 		handle_query_cap_rsp(crq, adapter);
6062032c5e82SThomas Falcon 		break;
6063032c5e82SThomas Falcon 	case QUERY_MAP_RSP:
6064032c5e82SThomas Falcon 		handle_query_map_rsp(crq, adapter);
6065032c5e82SThomas Falcon 		break;
6066032c5e82SThomas Falcon 	case REQUEST_MAP_RSP:
6067f3be0cbcSThomas Falcon 		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
6068f3be0cbcSThomas Falcon 		complete(&adapter->fw_done);
6069032c5e82SThomas Falcon 		break;
6070032c5e82SThomas Falcon 	case REQUEST_UNMAP_RSP:
6071032c5e82SThomas Falcon 		handle_request_unmap_rsp(crq, adapter);
6072032c5e82SThomas Falcon 		break;
6073032c5e82SThomas Falcon 	case REQUEST_CAPABILITY_RSP:
6074032c5e82SThomas Falcon 		handle_request_cap_rsp(crq, adapter);
6075032c5e82SThomas Falcon 		break;
6076032c5e82SThomas Falcon 	case LOGIN_RSP:
6077032c5e82SThomas Falcon 		netdev_dbg(netdev, "Got Login Response\n");
6078032c5e82SThomas Falcon 		handle_login_rsp(crq, adapter);
6079032c5e82SThomas Falcon 		break;
6080032c5e82SThomas Falcon 	case LOGICAL_LINK_STATE_RSP:
608153da09e9SNathan Fontenot 		netdev_dbg(netdev,
608253da09e9SNathan Fontenot 			   "Got Logical Link State Response, state: %d rc: %d\n",
608353da09e9SNathan Fontenot 			   crq->logical_link_state_rsp.link_state,
608453da09e9SNathan Fontenot 			   crq->logical_link_state_rsp.rc.code);
6085032c5e82SThomas Falcon 		adapter->logical_link_state =
6086032c5e82SThomas Falcon 		    crq->logical_link_state_rsp.link_state;
608753da09e9SNathan Fontenot 		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
608853da09e9SNathan Fontenot 		complete(&adapter->init_done);
6089032c5e82SThomas Falcon 		break;
6090032c5e82SThomas Falcon 	case LINK_STATE_INDICATION:
6091032c5e82SThomas Falcon 		netdev_dbg(netdev, "Got Logical Link State Indication\n");
6092032c5e82SThomas Falcon 		adapter->phys_link_state =
6093032c5e82SThomas Falcon 		    crq->link_state_indication.phys_link_state;
6094032c5e82SThomas Falcon 		adapter->logical_link_state =
6095032c5e82SThomas Falcon 		    crq->link_state_indication.logical_link_state;
60960655f994SThomas Falcon 		if (adapter->phys_link_state && adapter->logical_link_state)
60970655f994SThomas Falcon 			netif_carrier_on(netdev);
60980655f994SThomas Falcon 		else
60990655f994SThomas Falcon 			netif_carrier_off(netdev);
6100032c5e82SThomas Falcon 		break;
6101032c5e82SThomas Falcon 	case CHANGE_MAC_ADDR_RSP:
6102032c5e82SThomas Falcon 		netdev_dbg(netdev, "Got MAC address change Response\n");
6103f813614fSThomas Falcon 		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
6104032c5e82SThomas Falcon 		break;
6105032c5e82SThomas Falcon 	case ERROR_INDICATION:
6106032c5e82SThomas Falcon 		netdev_dbg(netdev, "Got Error Indication\n");
6107032c5e82SThomas Falcon 		handle_error_indication(crq, adapter);
6108032c5e82SThomas Falcon 		break;
6109032c5e82SThomas Falcon 	case REQUEST_STATISTICS_RSP:
6110032c5e82SThomas Falcon 		netdev_dbg(netdev, "Got Statistics Response\n");
6111032c5e82SThomas Falcon 		complete(&adapter->stats_done);
6112032c5e82SThomas Falcon 		break;
6113032c5e82SThomas Falcon 	case QUERY_IP_OFFLOAD_RSP:
6114032c5e82SThomas Falcon 		netdev_dbg(netdev, "Got Query IP offload Response\n");
6115032c5e82SThomas Falcon 		handle_query_ip_offload_rsp(adapter);
6116032c5e82SThomas Falcon 		break;
6117032c5e82SThomas Falcon 	case MULTICAST_CTRL_RSP:
6118032c5e82SThomas Falcon 		netdev_dbg(netdev, "Got multicast control Response\n");
6119032c5e82SThomas Falcon 		break;
6120032c5e82SThomas Falcon 	case CONTROL_IP_OFFLOAD_RSP:
6121032c5e82SThomas Falcon 		netdev_dbg(netdev, "Got Control IP offload Response\n");
6122032c5e82SThomas Falcon 		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
6123032c5e82SThomas Falcon 				 sizeof(adapter->ip_offload_ctrl),
6124032c5e82SThomas Falcon 				 DMA_TO_DEVICE);
6125bd0b6723SJohn Allen 		complete(&adapter->init_done);
6126032c5e82SThomas Falcon 		break;
6127032c5e82SThomas Falcon 	case COLLECT_FW_TRACE_RSP:
6128032c5e82SThomas Falcon 		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
6129032c5e82SThomas Falcon 		complete(&adapter->fw_done);
6130032c5e82SThomas Falcon 		break;
61314e6759beSDesnes Augusto Nunes do Rosario 	case GET_VPD_SIZE_RSP:
61324e6759beSDesnes Augusto Nunes do Rosario 		handle_vpd_size_rsp(crq, adapter);
61334e6759beSDesnes Augusto Nunes do Rosario 		break;
61344e6759beSDesnes Augusto Nunes do Rosario 	case GET_VPD_RSP:
61354e6759beSDesnes Augusto Nunes do Rosario 		handle_vpd_rsp(crq, adapter);
61364e6759beSDesnes Augusto Nunes do Rosario 		break;
6137f8d6ae0dSMurilo Fossa Vicentini 	case QUERY_PHYS_PARMS_RSP:
6138f8d6ae0dSMurilo Fossa Vicentini 		adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
6139f8d6ae0dSMurilo Fossa Vicentini 		complete(&adapter->fw_done);
6140f8d6ae0dSMurilo Fossa Vicentini 		break;
6141032c5e82SThomas Falcon 	default:
6142032c5e82SThomas Falcon 		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
6143032c5e82SThomas Falcon 			   gen_crq->cmd);
6144032c5e82SThomas Falcon 	}
6145032c5e82SThomas Falcon }
6146032c5e82SThomas Falcon 
ibmvnic_interrupt(int irq,void * instance)6147032c5e82SThomas Falcon static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
6148032c5e82SThomas Falcon {
6149032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = instance;
61506c267b3dSThomas Falcon 
61516c267b3dSThomas Falcon 	tasklet_schedule(&adapter->tasklet);
61526c267b3dSThomas Falcon 	return IRQ_HANDLED;
61536c267b3dSThomas Falcon }
61546c267b3dSThomas Falcon 
ibmvnic_tasklet(struct tasklet_struct * t)6155aa7c3feeSAllen Pais static void ibmvnic_tasklet(struct tasklet_struct *t)
61566c267b3dSThomas Falcon {
6157aa7c3feeSAllen Pais 	struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
6158032c5e82SThomas Falcon 	struct ibmvnic_crq_queue *queue = &adapter->crq;
6159032c5e82SThomas Falcon 	union ibmvnic_crq *crq;
6160032c5e82SThomas Falcon 	unsigned long flags;
6161032c5e82SThomas Falcon 
6162032c5e82SThomas Falcon 	spin_lock_irqsave(&queue->lock, flags);
61633a5d9db7SSukadev Bhattiprolu 
6164032c5e82SThomas Falcon 	/* Pull all the valid messages off the CRQ */
6165032c5e82SThomas Falcon 	while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
6166e41aec79SLijun Pan 		/* This barrier makes sure ibmvnic_next_crq()'s
6167e41aec79SLijun Pan 		 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
6168e41aec79SLijun Pan 		 * before ibmvnic_handle_crq()'s
6169e41aec79SLijun Pan 		 * switch(gen_crq->first) and switch(gen_crq->cmd).
6170e41aec79SLijun Pan 		 */
6171e41aec79SLijun Pan 		dma_rmb();
6172032c5e82SThomas Falcon 		ibmvnic_handle_crq(crq, adapter);
6173032c5e82SThomas Falcon 		crq->generic.first = 0;
6174032c5e82SThomas Falcon 	}
61753a5d9db7SSukadev Bhattiprolu 
6176032c5e82SThomas Falcon 	spin_unlock_irqrestore(&queue->lock, flags);
6177032c5e82SThomas Falcon }
6178032c5e82SThomas Falcon 
ibmvnic_reenable_crq_queue(struct ibmvnic_adapter * adapter)6179032c5e82SThomas Falcon static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
6180032c5e82SThomas Falcon {
6181032c5e82SThomas Falcon 	struct vio_dev *vdev = adapter->vdev;
6182032c5e82SThomas Falcon 	int rc;
6183032c5e82SThomas Falcon 
6184032c5e82SThomas Falcon 	do {
6185032c5e82SThomas Falcon 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
6186032c5e82SThomas Falcon 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
6187032c5e82SThomas Falcon 
6188032c5e82SThomas Falcon 	if (rc)
6189032c5e82SThomas Falcon 		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
6190032c5e82SThomas Falcon 
6191032c5e82SThomas Falcon 	return rc;
6192032c5e82SThomas Falcon }
6193032c5e82SThomas Falcon 
ibmvnic_reset_crq(struct ibmvnic_adapter * adapter)6194032c5e82SThomas Falcon static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
6195032c5e82SThomas Falcon {
6196032c5e82SThomas Falcon 	struct ibmvnic_crq_queue *crq = &adapter->crq;
6197032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
6198032c5e82SThomas Falcon 	struct vio_dev *vdev = adapter->vdev;
6199032c5e82SThomas Falcon 	int rc;
6200032c5e82SThomas Falcon 
6201032c5e82SThomas Falcon 	/* Close the CRQ */
6202032c5e82SThomas Falcon 	do {
6203032c5e82SThomas Falcon 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6204032c5e82SThomas Falcon 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6205032c5e82SThomas Falcon 
6206032c5e82SThomas Falcon 	/* Clean out the queue */
62070e435befSLijun Pan 	if (!crq->msgs)
62080e435befSLijun Pan 		return -EINVAL;
62090e435befSLijun Pan 
6210032c5e82SThomas Falcon 	memset(crq->msgs, 0, PAGE_SIZE);
6211032c5e82SThomas Falcon 	crq->cur = 0;
62125153698eSThomas Falcon 	crq->active = false;
6213032c5e82SThomas Falcon 
6214032c5e82SThomas Falcon 	/* And re-open it again */
6215032c5e82SThomas Falcon 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6216032c5e82SThomas Falcon 				crq->msg_token, PAGE_SIZE);
6217032c5e82SThomas Falcon 
6218032c5e82SThomas Falcon 	if (rc == H_CLOSED)
6219032c5e82SThomas Falcon 		/* Adapter is good, but other end is not ready */
6220032c5e82SThomas Falcon 		dev_warn(dev, "Partner adapter not ready\n");
6221032c5e82SThomas Falcon 	else if (rc != 0)
6222032c5e82SThomas Falcon 		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
6223032c5e82SThomas Falcon 
6224032c5e82SThomas Falcon 	return rc;
6225032c5e82SThomas Falcon }
6226032c5e82SThomas Falcon 
release_crq_queue(struct ibmvnic_adapter * adapter)6227f992887cSNathan Fontenot static void release_crq_queue(struct ibmvnic_adapter *adapter)
6228032c5e82SThomas Falcon {
6229032c5e82SThomas Falcon 	struct ibmvnic_crq_queue *crq = &adapter->crq;
6230032c5e82SThomas Falcon 	struct vio_dev *vdev = adapter->vdev;
6231032c5e82SThomas Falcon 	long rc;
6232032c5e82SThomas Falcon 
6233f992887cSNathan Fontenot 	if (!crq->msgs)
6234f992887cSNathan Fontenot 		return;
6235f992887cSNathan Fontenot 
6236032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
6237032c5e82SThomas Falcon 	free_irq(vdev->irq, adapter);
62386c267b3dSThomas Falcon 	tasklet_kill(&adapter->tasklet);
6239032c5e82SThomas Falcon 	do {
6240032c5e82SThomas Falcon 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6241032c5e82SThomas Falcon 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6242032c5e82SThomas Falcon 
6243032c5e82SThomas Falcon 	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
6244032c5e82SThomas Falcon 			 DMA_BIDIRECTIONAL);
6245032c5e82SThomas Falcon 	free_page((unsigned long)crq->msgs);
6246f992887cSNathan Fontenot 	crq->msgs = NULL;
62475153698eSThomas Falcon 	crq->active = false;
6248032c5e82SThomas Falcon }
6249032c5e82SThomas Falcon 
init_crq_queue(struct ibmvnic_adapter * adapter)6250f992887cSNathan Fontenot static int init_crq_queue(struct ibmvnic_adapter *adapter)
6251032c5e82SThomas Falcon {
6252032c5e82SThomas Falcon 	struct ibmvnic_crq_queue *crq = &adapter->crq;
6253032c5e82SThomas Falcon 	struct device *dev = &adapter->vdev->dev;
6254032c5e82SThomas Falcon 	struct vio_dev *vdev = adapter->vdev;
6255032c5e82SThomas Falcon 	int rc, retrc = -ENOMEM;
6256032c5e82SThomas Falcon 
6257f992887cSNathan Fontenot 	if (crq->msgs)
6258f992887cSNathan Fontenot 		return 0;
6259f992887cSNathan Fontenot 
6260032c5e82SThomas Falcon 	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
6261032c5e82SThomas Falcon 	/* Should we allocate more than one page? */
6262032c5e82SThomas Falcon 
6263032c5e82SThomas Falcon 	if (!crq->msgs)
6264032c5e82SThomas Falcon 		return -ENOMEM;
6265032c5e82SThomas Falcon 
6266032c5e82SThomas Falcon 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
6267032c5e82SThomas Falcon 	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
6268032c5e82SThomas Falcon 					DMA_BIDIRECTIONAL);
6269032c5e82SThomas Falcon 	if (dma_mapping_error(dev, crq->msg_token))
6270032c5e82SThomas Falcon 		goto map_failed;
6271032c5e82SThomas Falcon 
6272032c5e82SThomas Falcon 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6273032c5e82SThomas Falcon 				crq->msg_token, PAGE_SIZE);
6274032c5e82SThomas Falcon 
6275032c5e82SThomas Falcon 	if (rc == H_RESOURCE)
6276032c5e82SThomas Falcon 		/* maybe kexecing and resource is busy. try a reset */
6277032c5e82SThomas Falcon 		rc = ibmvnic_reset_crq(adapter);
6278032c5e82SThomas Falcon 	retrc = rc;
6279032c5e82SThomas Falcon 
6280032c5e82SThomas Falcon 	if (rc == H_CLOSED) {
6281032c5e82SThomas Falcon 		dev_warn(dev, "Partner adapter not ready\n");
6282032c5e82SThomas Falcon 	} else if (rc) {
6283032c5e82SThomas Falcon 		dev_warn(dev, "Error %d opening adapter\n", rc);
6284032c5e82SThomas Falcon 		goto reg_crq_failed;
6285032c5e82SThomas Falcon 	}
6286032c5e82SThomas Falcon 
6287032c5e82SThomas Falcon 	retrc = 0;
6288032c5e82SThomas Falcon 
6289aa7c3feeSAllen Pais 	tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
62906c267b3dSThomas Falcon 
6291032c5e82SThomas Falcon 	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
6292e56e2515SMurilo Fossa Vicentini 	snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
6293e56e2515SMurilo Fossa Vicentini 		 adapter->vdev->unit_address);
6294e56e2515SMurilo Fossa Vicentini 	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
6295032c5e82SThomas Falcon 	if (rc) {
6296032c5e82SThomas Falcon 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
6297032c5e82SThomas Falcon 			vdev->irq, rc);
6298032c5e82SThomas Falcon 		goto req_irq_failed;
6299032c5e82SThomas Falcon 	}
6300032c5e82SThomas Falcon 
6301032c5e82SThomas Falcon 	rc = vio_enable_interrupts(vdev);
6302032c5e82SThomas Falcon 	if (rc) {
6303032c5e82SThomas Falcon 		dev_err(dev, "Error %d enabling interrupts\n", rc);
6304032c5e82SThomas Falcon 		goto req_irq_failed;
6305032c5e82SThomas Falcon 	}
6306032c5e82SThomas Falcon 
6307032c5e82SThomas Falcon 	crq->cur = 0;
6308032c5e82SThomas Falcon 	spin_lock_init(&crq->lock);
6309032c5e82SThomas Falcon 
63106e20d001SSukadev Bhattiprolu 	/* process any CRQs that were queued before we enabled interrupts */
63116e20d001SSukadev Bhattiprolu 	tasklet_schedule(&adapter->tasklet);
63126e20d001SSukadev Bhattiprolu 
6313032c5e82SThomas Falcon 	return retrc;
6314032c5e82SThomas Falcon 
6315032c5e82SThomas Falcon req_irq_failed:
63166c267b3dSThomas Falcon 	tasklet_kill(&adapter->tasklet);
6317032c5e82SThomas Falcon 	do {
6318032c5e82SThomas Falcon 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6319032c5e82SThomas Falcon 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6320032c5e82SThomas Falcon reg_crq_failed:
6321032c5e82SThomas Falcon 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
6322032c5e82SThomas Falcon map_failed:
6323032c5e82SThomas Falcon 	free_page((unsigned long)crq->msgs);
6324f992887cSNathan Fontenot 	crq->msgs = NULL;
6325032c5e82SThomas Falcon 	return retrc;
6326032c5e82SThomas Falcon }
6327032c5e82SThomas Falcon 
ibmvnic_reset_init(struct ibmvnic_adapter * adapter,bool reset)6328635e442fSLijun Pan static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
6329f6ef6408SJohn Allen {
6330f6ef6408SJohn Allen 	struct device *dev = &adapter->vdev->dev;
633198c41f04SDany Madden 	unsigned long timeout = msecs_to_jiffies(20000);
63326881b07fSMichal Suchanek 	u64 old_num_rx_queues = adapter->req_rx_queues;
63336881b07fSMichal Suchanek 	u64 old_num_tx_queues = adapter->req_tx_queues;
6334f6ef6408SJohn Allen 	int rc;
6335f6ef6408SJohn Allen 
6336017892c1SJohn Allen 	adapter->from_passive_init = false;
6337017892c1SJohn Allen 
6338fa68bfabSLijun Pan 	rc = ibmvnic_send_crq_init(adapter);
6339fa68bfabSLijun Pan 	if (rc) {
6340fa68bfabSLijun Pan 		dev_err(dev, "Send crq init failed with error %d\n", rc);
6341fa68bfabSLijun Pan 		return rc;
6342fa68bfabSLijun Pan 	}
6343fa68bfabSLijun Pan 
6344f6ef6408SJohn Allen 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
6345f6ef6408SJohn Allen 		dev_err(dev, "Initialization sequence timed out\n");
6346b6ee566cSDany Madden 		return -ETIMEDOUT;
6347017892c1SJohn Allen 	}
6348017892c1SJohn Allen 
63496a2fb0e9SNathan Fontenot 	if (adapter->init_done_rc) {
63506a2fb0e9SNathan Fontenot 		release_crq_queue(adapter);
6351ae16bf15SSukadev Bhattiprolu 		dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
63526a2fb0e9SNathan Fontenot 		return adapter->init_done_rc;
63536a2fb0e9SNathan Fontenot 	}
63546a2fb0e9SNathan Fontenot 
6355785a2b10SLijun Pan 	if (adapter->from_passive_init) {
6356785a2b10SLijun Pan 		adapter->state = VNIC_OPEN;
6357785a2b10SLijun Pan 		adapter->from_passive_init = false;
6358ae16bf15SSukadev Bhattiprolu 		dev_err(dev, "CRQ-init failed, passive-init\n");
6359b6ee566cSDany Madden 		return -EINVAL;
6360785a2b10SLijun Pan 	}
6361785a2b10SLijun Pan 
6362635e442fSLijun Pan 	if (reset &&
6363635e442fSLijun Pan 	    test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
636430f79625SNathan Fontenot 	    adapter->reset_reason != VNIC_RESET_MOBILITY) {
6365d7c0ef36SNathan Fontenot 		if (adapter->req_rx_queues != old_num_rx_queues ||
6366d7c0ef36SNathan Fontenot 		    adapter->req_tx_queues != old_num_tx_queues) {
6367d7c0ef36SNathan Fontenot 			release_sub_crqs(adapter, 0);
63681bb3c739SNathan Fontenot 			rc = init_sub_crqs(adapter);
6369d7c0ef36SNathan Fontenot 		} else {
63701b18f09dSRick Lindsley 			/* no need to reinitialize completely, but we do
63711b18f09dSRick Lindsley 			 * need to clean up transmits that were in flight
63721b18f09dSRick Lindsley 			 * when we processed the reset.  Failure to do so
63731b18f09dSRick Lindsley 			 * will confound the upper layer, usually TCP, by
63741b18f09dSRick Lindsley 			 * creating the illusion of transmits that are
63751b18f09dSRick Lindsley 			 * awaiting completion.
63761b18f09dSRick Lindsley 			 */
63771b18f09dSRick Lindsley 			clean_tx_pools(adapter);
63781b18f09dSRick Lindsley 
6379d7c0ef36SNathan Fontenot 			rc = reset_sub_crq_queues(adapter);
6380d7c0ef36SNathan Fontenot 		}
6381d7c0ef36SNathan Fontenot 	} else {
6382d7c0ef36SNathan Fontenot 		rc = init_sub_crqs(adapter);
6383d7c0ef36SNathan Fontenot 	}
6384d7c0ef36SNathan Fontenot 
63851bb3c739SNathan Fontenot 	if (rc) {
63861bb3c739SNathan Fontenot 		dev_err(dev, "Initialization of sub crqs failed\n");
63871bb3c739SNathan Fontenot 		release_crq_queue(adapter);
63885df969c3SThomas Falcon 		return rc;
63895df969c3SThomas Falcon 	}
63905df969c3SThomas Falcon 
63915df969c3SThomas Falcon 	rc = init_sub_crq_irqs(adapter);
63925df969c3SThomas Falcon 	if (rc) {
63935df969c3SThomas Falcon 		dev_err(dev, "Failed to initialize sub crq irqs\n");
63945df969c3SThomas Falcon 		release_crq_queue(adapter);
63951bb3c739SNathan Fontenot 	}
63961bb3c739SNathan Fontenot 
63971bb3c739SNathan Fontenot 	return rc;
6398f6ef6408SJohn Allen }
6399f6ef6408SJohn Allen 
640040c9db8aSThomas Falcon static struct device_attribute dev_attr_failover;
640140c9db8aSThomas Falcon 
ibmvnic_probe(struct vio_dev * dev,const struct vio_device_id * id)6402032c5e82SThomas Falcon static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
6403032c5e82SThomas Falcon {
6404032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter;
6405032c5e82SThomas Falcon 	struct net_device *netdev;
6406032c5e82SThomas Falcon 	unsigned char *mac_addr_p;
6407fd98693cSSukadev Bhattiprolu 	unsigned long flags;
640853f8b1b2SCristobal Forno 	bool init_success;
6409032c5e82SThomas Falcon 	int rc;
6410032c5e82SThomas Falcon 
6411032c5e82SThomas Falcon 	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
6412032c5e82SThomas Falcon 		dev->unit_address);
6413032c5e82SThomas Falcon 
6414032c5e82SThomas Falcon 	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
6415032c5e82SThomas Falcon 							VETH_MAC_ADDR, NULL);
6416032c5e82SThomas Falcon 	if (!mac_addr_p) {
6417032c5e82SThomas Falcon 		dev_err(&dev->dev,
6418032c5e82SThomas Falcon 			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
6419032c5e82SThomas Falcon 			__FILE__, __LINE__);
6420032c5e82SThomas Falcon 		return 0;
6421032c5e82SThomas Falcon 	}
6422032c5e82SThomas Falcon 
6423032c5e82SThomas Falcon 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
6424d45cc3a4SThomas Falcon 				   IBMVNIC_MAX_QUEUES);
6425032c5e82SThomas Falcon 	if (!netdev)
6426032c5e82SThomas Falcon 		return -ENOMEM;
6427032c5e82SThomas Falcon 
6428032c5e82SThomas Falcon 	adapter = netdev_priv(netdev);
642990c8014cSNathan Fontenot 	adapter->state = VNIC_PROBING;
6430032c5e82SThomas Falcon 	dev_set_drvdata(&dev->dev, netdev);
6431032c5e82SThomas Falcon 	adapter->vdev = dev;
6432032c5e82SThomas Falcon 	adapter->netdev = netdev;
643376cdc5c5SSukadev Bhattiprolu 	adapter->login_pending = false;
6434129854f0SSukadev Bhattiprolu 	memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
6435129854f0SSukadev Bhattiprolu 	/* map_ids start at 1, so ensure map_id 0 is always "in-use" */
6436129854f0SSukadev Bhattiprolu 	bitmap_set(adapter->map_ids, 0, 1);
6437032c5e82SThomas Falcon 
6438032c5e82SThomas Falcon 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
6439f3956ebbSJakub Kicinski 	eth_hw_addr_set(netdev, adapter->mac_addr);
6440032c5e82SThomas Falcon 	netdev->irq = dev->irq;
6441032c5e82SThomas Falcon 	netdev->netdev_ops = &ibmvnic_netdev_ops;
6442032c5e82SThomas Falcon 	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
6443032c5e82SThomas Falcon 	SET_NETDEV_DEV(netdev, &dev->dev);
6444032c5e82SThomas Falcon 
6445ed651a10SNathan Fontenot 	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
64467ed5b31fSJuliet Kim 	INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
64477ed5b31fSJuliet Kim 			  __ibmvnic_delayed_reset);
6448ed651a10SNathan Fontenot 	INIT_LIST_HEAD(&adapter->rwi_list);
64496c5c7489SThomas Falcon 	spin_lock_init(&adapter->rwi_lock);
64507d7195a0SJuliet Kim 	spin_lock_init(&adapter->state_lock);
6451ff25dcb9SThomas Falcon 	mutex_init(&adapter->fw_lock);
6452fd98693cSSukadev Bhattiprolu 	init_completion(&adapter->probe_done);
6453bbd669a8SThomas Falcon 	init_completion(&adapter->init_done);
6454070eca95SThomas Falcon 	init_completion(&adapter->fw_done);
6455070eca95SThomas Falcon 	init_completion(&adapter->reset_done);
6456070eca95SThomas Falcon 	init_completion(&adapter->stats_done);
64577ed5b31fSJuliet Kim 	clear_bit(0, &adapter->resetting);
6458489de956SSukadev Bhattiprolu 	adapter->prev_rx_buf_sz = 0;
6459bbd80930SSukadev Bhattiprolu 	adapter->prev_mtu = 0;
6460ed651a10SNathan Fontenot 
646153f8b1b2SCristobal Forno 	init_success = false;
64626a2fb0e9SNathan Fontenot 	do {
6463ae16bf15SSukadev Bhattiprolu 		reinit_init_done(adapter);
6464ae16bf15SSukadev Bhattiprolu 
6465f628ad53SSukadev Bhattiprolu 		/* clear any failovers we got in the previous pass
6466f628ad53SSukadev Bhattiprolu 		 * since we are reinitializing the CRQ
6467f628ad53SSukadev Bhattiprolu 		 */
6468f628ad53SSukadev Bhattiprolu 		adapter->failover_pending = false;
6469f628ad53SSukadev Bhattiprolu 
6470fd98693cSSukadev Bhattiprolu 		/* If we had already initialized CRQ, we may have one or
6471fd98693cSSukadev Bhattiprolu 		 * more resets queued already. Discard those and release
6472fd98693cSSukadev Bhattiprolu 		 * the CRQ before initializing the CRQ again.
6473fd98693cSSukadev Bhattiprolu 		 */
6474fd98693cSSukadev Bhattiprolu 		release_crq_queue(adapter);
6475fd98693cSSukadev Bhattiprolu 
6476fd98693cSSukadev Bhattiprolu 		/* Since we are still in PROBING state, __ibmvnic_reset()
6477fd98693cSSukadev Bhattiprolu 		 * will not access the ->rwi_list and since we released CRQ,
6478fd98693cSSukadev Bhattiprolu 		 * we won't get _new_ transport events. But there maybe an
6479fd98693cSSukadev Bhattiprolu 		 * ongoing ibmvnic_reset() call. So serialize access to
6480fd98693cSSukadev Bhattiprolu 		 * rwi_list. If we win the race, ibvmnic_reset() could add
6481fd98693cSSukadev Bhattiprolu 		 * a reset after we purged but thats ok - we just may end
6482fd98693cSSukadev Bhattiprolu 		 * up with an extra reset (i.e similar to having two or more
6483fd98693cSSukadev Bhattiprolu 		 * resets in the queue at once).
6484fd98693cSSukadev Bhattiprolu 		 * CHECK.
6485fd98693cSSukadev Bhattiprolu 		 */
6486fd98693cSSukadev Bhattiprolu 		spin_lock_irqsave(&adapter->rwi_lock, flags);
6487fd98693cSSukadev Bhattiprolu 		flush_reset_queue(adapter);
6488fd98693cSSukadev Bhattiprolu 		spin_unlock_irqrestore(&adapter->rwi_lock, flags);
6489fd98693cSSukadev Bhattiprolu 
649030f79625SNathan Fontenot 		rc = init_crq_queue(adapter);
649130f79625SNathan Fontenot 		if (rc) {
649230f79625SNathan Fontenot 			dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
649330f79625SNathan Fontenot 				rc);
649430f79625SNathan Fontenot 			goto ibmvnic_init_fail;
649530f79625SNathan Fontenot 		}
649630f79625SNathan Fontenot 
6497635e442fSLijun Pan 		rc = ibmvnic_reset_init(adapter, false);
64986b278c0cSSukadev Bhattiprolu 	} while (rc == -EAGAIN);
6499032c5e82SThomas Falcon 
650053f8b1b2SCristobal Forno 	/* We are ignoring the error from ibmvnic_reset_init() assuming that the
650153f8b1b2SCristobal Forno 	 * partner is not ready. CRQ is not active. When the partner becomes
650253f8b1b2SCristobal Forno 	 * ready, we will do the passive init reset.
650353f8b1b2SCristobal Forno 	 */
650453f8b1b2SCristobal Forno 
650553f8b1b2SCristobal Forno 	if (!rc)
650653f8b1b2SCristobal Forno 		init_success = true;
650753f8b1b2SCristobal Forno 
650807184213SThomas Falcon 	rc = init_stats_buffers(adapter);
650907184213SThomas Falcon 	if (rc)
651007184213SThomas Falcon 		goto ibmvnic_init_fail;
651107184213SThomas Falcon 
651207184213SThomas Falcon 	rc = init_stats_token(adapter);
651307184213SThomas Falcon 	if (rc)
651407184213SThomas Falcon 		goto ibmvnic_stats_fail;
651507184213SThomas Falcon 
651640c9db8aSThomas Falcon 	rc = device_create_file(&dev->dev, &dev_attr_failover);
65177c1885aeSNathan Fontenot 	if (rc)
651807184213SThomas Falcon 		goto ibmvnic_dev_file_err;
651940c9db8aSThomas Falcon 
6520e876a8a7SMick Tarsel 	netif_carrier_off(netdev);
6521032c5e82SThomas Falcon 
652253f8b1b2SCristobal Forno 	if (init_success) {
652390c8014cSNathan Fontenot 		adapter->state = VNIC_PROBED;
652453f8b1b2SCristobal Forno 		netdev->mtu = adapter->req_mtu - ETH_HLEN;
652553f8b1b2SCristobal Forno 		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
652653f8b1b2SCristobal Forno 		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
652753f8b1b2SCristobal Forno 	} else {
652853f8b1b2SCristobal Forno 		adapter->state = VNIC_DOWN;
652953f8b1b2SCristobal Forno 	}
6530c26eba03SJohn Allen 
6531c26eba03SJohn Allen 	adapter->wait_for_reset = false;
6532a86d5c68SDany Madden 	adapter->last_reset_time = jiffies;
6533570425f8SSukadev Bhattiprolu 
6534570425f8SSukadev Bhattiprolu 	rc = register_netdev(netdev);
6535570425f8SSukadev Bhattiprolu 	if (rc) {
6536570425f8SSukadev Bhattiprolu 		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
6537570425f8SSukadev Bhattiprolu 		goto ibmvnic_register_fail;
6538570425f8SSukadev Bhattiprolu 	}
6539570425f8SSukadev Bhattiprolu 	dev_info(&dev->dev, "ibmvnic registered\n");
6540570425f8SSukadev Bhattiprolu 
654192125c3aSNick Child 	rc = ibmvnic_cpu_notif_add(adapter);
654292125c3aSNick Child 	if (rc) {
654392125c3aSNick Child 		netdev_err(netdev, "Registering cpu notifier failed\n");
654492125c3aSNick Child 		goto cpu_notif_add_failed;
654592125c3aSNick Child 	}
654692125c3aSNick Child 
6547fd98693cSSukadev Bhattiprolu 	complete(&adapter->probe_done);
6548fd98693cSSukadev Bhattiprolu 
6549032c5e82SThomas Falcon 	return 0;
65507c1885aeSNathan Fontenot 
655192125c3aSNick Child cpu_notif_add_failed:
655292125c3aSNick Child 	unregister_netdev(netdev);
655392125c3aSNick Child 
65547c1885aeSNathan Fontenot ibmvnic_register_fail:
65557c1885aeSNathan Fontenot 	device_remove_file(&dev->dev, &dev_attr_failover);
65567c1885aeSNathan Fontenot 
655707184213SThomas Falcon ibmvnic_dev_file_err:
655807184213SThomas Falcon 	release_stats_token(adapter);
655907184213SThomas Falcon 
656007184213SThomas Falcon ibmvnic_stats_fail:
656107184213SThomas Falcon 	release_stats_buffers(adapter);
656207184213SThomas Falcon 
65637c1885aeSNathan Fontenot ibmvnic_init_fail:
6564d7c0ef36SNathan Fontenot 	release_sub_crqs(adapter, 1);
65657c1885aeSNathan Fontenot 	release_crq_queue(adapter);
6566fd98693cSSukadev Bhattiprolu 
6567fd98693cSSukadev Bhattiprolu 	/* cleanup worker thread after releasing CRQ so we don't get
6568fd98693cSSukadev Bhattiprolu 	 * transport events (i.e new work items for the worker thread).
6569fd98693cSSukadev Bhattiprolu 	 */
6570fd98693cSSukadev Bhattiprolu 	adapter->state = VNIC_REMOVING;
6571fd98693cSSukadev Bhattiprolu 	complete(&adapter->probe_done);
6572fd98693cSSukadev Bhattiprolu 	flush_work(&adapter->ibmvnic_reset);
6573fd98693cSSukadev Bhattiprolu 	flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6574fd98693cSSukadev Bhattiprolu 
6575fd98693cSSukadev Bhattiprolu 	flush_reset_queue(adapter);
6576fd98693cSSukadev Bhattiprolu 
6577ff25dcb9SThomas Falcon 	mutex_destroy(&adapter->fw_lock);
65787c1885aeSNathan Fontenot 	free_netdev(netdev);
65797c1885aeSNathan Fontenot 
65807c1885aeSNathan Fontenot 	return rc;
6581032c5e82SThomas Falcon }
6582032c5e82SThomas Falcon 
ibmvnic_remove(struct vio_dev * dev)6583386a966fSUwe Kleine-König static void ibmvnic_remove(struct vio_dev *dev)
6584032c5e82SThomas Falcon {
6585032c5e82SThomas Falcon 	struct net_device *netdev = dev_get_drvdata(&dev->dev);
658637489055SNathan Fontenot 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
65877d7195a0SJuliet Kim 	unsigned long flags;
65887d7195a0SJuliet Kim 
65897d7195a0SJuliet Kim 	spin_lock_irqsave(&adapter->state_lock, flags);
65904a41c421SSukadev Bhattiprolu 
65914a41c421SSukadev Bhattiprolu 	/* If ibmvnic_reset() is scheduling a reset, wait for it to
65924a41c421SSukadev Bhattiprolu 	 * finish. Then, set the state to REMOVING to prevent it from
65934a41c421SSukadev Bhattiprolu 	 * scheduling any more work and to have reset functions ignore
65944a41c421SSukadev Bhattiprolu 	 * any resets that have already been scheduled. Drop the lock
65954a41c421SSukadev Bhattiprolu 	 * after setting state, so __ibmvnic_reset() which is called
65964a41c421SSukadev Bhattiprolu 	 * from the flush_work() below, can make progress.
65974a41c421SSukadev Bhattiprolu 	 */
659869cdb794SJunlin Yang 	spin_lock(&adapter->rwi_lock);
659990c8014cSNathan Fontenot 	adapter->state = VNIC_REMOVING;
660069cdb794SJunlin Yang 	spin_unlock(&adapter->rwi_lock);
66014a41c421SSukadev Bhattiprolu 
66027d7195a0SJuliet Kim 	spin_unlock_irqrestore(&adapter->state_lock, flags);
66037d7195a0SJuliet Kim 
660492125c3aSNick Child 	ibmvnic_cpu_notif_remove(adapter);
660592125c3aSNick Child 
66066954a9e4SThomas Falcon 	flush_work(&adapter->ibmvnic_reset);
66076954a9e4SThomas Falcon 	flush_delayed_work(&adapter->ibmvnic_delayed_reset);
66086954a9e4SThomas Falcon 
6609a5681e20SJuliet Kim 	rtnl_lock();
6610a5681e20SJuliet Kim 	unregister_netdevice(netdev);
661137489055SNathan Fontenot 
661237489055SNathan Fontenot 	release_resources(adapter);
6613489de956SSukadev Bhattiprolu 	release_rx_pools(adapter);
6614bbd80930SSukadev Bhattiprolu 	release_tx_pools(adapter);
6615d7c0ef36SNathan Fontenot 	release_sub_crqs(adapter, 1);
661637489055SNathan Fontenot 	release_crq_queue(adapter);
661737489055SNathan Fontenot 
661853cc7721SThomas Falcon 	release_stats_token(adapter);
661953cc7721SThomas Falcon 	release_stats_buffers(adapter);
662053cc7721SThomas Falcon 
662190c8014cSNathan Fontenot 	adapter->state = VNIC_REMOVED;
662290c8014cSNathan Fontenot 
6623a5681e20SJuliet Kim 	rtnl_unlock();
6624ff25dcb9SThomas Falcon 	mutex_destroy(&adapter->fw_lock);
662540c9db8aSThomas Falcon 	device_remove_file(&dev->dev, &dev_attr_failover);
6626032c5e82SThomas Falcon 	free_netdev(netdev);
6627032c5e82SThomas Falcon 	dev_set_drvdata(&dev->dev, NULL);
6628032c5e82SThomas Falcon }
6629032c5e82SThomas Falcon 
failover_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)663040c9db8aSThomas Falcon static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
663140c9db8aSThomas Falcon 			      const char *buf, size_t count)
663240c9db8aSThomas Falcon {
663340c9db8aSThomas Falcon 	struct net_device *netdev = dev_get_drvdata(dev);
663440c9db8aSThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
663540c9db8aSThomas Falcon 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
663640c9db8aSThomas Falcon 	__be64 session_token;
663740c9db8aSThomas Falcon 	long rc;
663840c9db8aSThomas Falcon 
663940c9db8aSThomas Falcon 	if (!sysfs_streq(buf, "1"))
664040c9db8aSThomas Falcon 		return -EINVAL;
664140c9db8aSThomas Falcon 
664240c9db8aSThomas Falcon 	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
664340c9db8aSThomas Falcon 			 H_GET_SESSION_TOKEN, 0, 0, 0);
664440c9db8aSThomas Falcon 	if (rc) {
664540c9db8aSThomas Falcon 		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
664640c9db8aSThomas Falcon 			   rc);
6647334c4241SLijun Pan 		goto last_resort;
664840c9db8aSThomas Falcon 	}
664940c9db8aSThomas Falcon 
665040c9db8aSThomas Falcon 	session_token = (__be64)retbuf[0];
665140c9db8aSThomas Falcon 	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
665240c9db8aSThomas Falcon 		   be64_to_cpu(session_token));
665340c9db8aSThomas Falcon 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
665440c9db8aSThomas Falcon 				H_SESSION_ERR_DETECTED, session_token, 0, 0);
6655277f2bb1SSukadev Bhattiprolu 	if (rc) {
6656334c4241SLijun Pan 		netdev_err(netdev,
6657334c4241SLijun Pan 			   "H_VIOCTL initiated failover failed, rc %ld\n",
665840c9db8aSThomas Falcon 			   rc);
6659277f2bb1SSukadev Bhattiprolu 		goto last_resort;
6660277f2bb1SSukadev Bhattiprolu 	}
6661277f2bb1SSukadev Bhattiprolu 
6662277f2bb1SSukadev Bhattiprolu 	return count;
6663334c4241SLijun Pan 
6664334c4241SLijun Pan last_resort:
6665334c4241SLijun Pan 	netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
6666334c4241SLijun Pan 	ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
666740c9db8aSThomas Falcon 
666840c9db8aSThomas Falcon 	return count;
666940c9db8aSThomas Falcon }
66706cbaefb4SJoe Perches static DEVICE_ATTR_WO(failover);
667140c9db8aSThomas Falcon 
ibmvnic_get_desired_dma(struct vio_dev * vdev)6672032c5e82SThomas Falcon static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
6673032c5e82SThomas Falcon {
6674032c5e82SThomas Falcon 	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
6675032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter;
6676032c5e82SThomas Falcon 	struct iommu_table *tbl;
6677032c5e82SThomas Falcon 	unsigned long ret = 0;
6678032c5e82SThomas Falcon 	int i;
6679032c5e82SThomas Falcon 
6680032c5e82SThomas Falcon 	tbl = get_iommu_table_base(&vdev->dev);
6681032c5e82SThomas Falcon 
6682032c5e82SThomas Falcon 	/* netdev inits at probe time along with the structures we need below*/
6683032c5e82SThomas Falcon 	if (!netdev)
6684032c5e82SThomas Falcon 		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
6685032c5e82SThomas Falcon 
6686032c5e82SThomas Falcon 	adapter = netdev_priv(netdev);
6687032c5e82SThomas Falcon 
6688032c5e82SThomas Falcon 	ret += PAGE_SIZE; /* the crq message queue */
6689032c5e82SThomas Falcon 	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
6690032c5e82SThomas Falcon 
6691032c5e82SThomas Falcon 	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6692032c5e82SThomas Falcon 		ret += 4 * PAGE_SIZE; /* the scrq message queue */
6693032c5e82SThomas Falcon 
6694507ebe64SThomas Falcon 	for (i = 0; i < adapter->num_active_rx_pools; i++)
6695032c5e82SThomas Falcon 		ret += adapter->rx_pool[i].size *
6696032c5e82SThomas Falcon 		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6697032c5e82SThomas Falcon 
6698032c5e82SThomas Falcon 	return ret;
6699032c5e82SThomas Falcon }
6700032c5e82SThomas Falcon 
ibmvnic_resume(struct device * dev)6701032c5e82SThomas Falcon static int ibmvnic_resume(struct device *dev)
6702032c5e82SThomas Falcon {
6703032c5e82SThomas Falcon 	struct net_device *netdev = dev_get_drvdata(dev);
6704032c5e82SThomas Falcon 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6705032c5e82SThomas Falcon 
6706cb89ba24SJohn Allen 	if (adapter->state != VNIC_OPEN)
6707cb89ba24SJohn Allen 		return 0;
6708cb89ba24SJohn Allen 
6709a248878dSJohn Allen 	tasklet_schedule(&adapter->tasklet);
6710032c5e82SThomas Falcon 
6711032c5e82SThomas Falcon 	return 0;
6712032c5e82SThomas Falcon }
6713032c5e82SThomas Falcon 
67148c37bc67SArvind Yadav static const struct vio_device_id ibmvnic_device_table[] = {
6715032c5e82SThomas Falcon 	{"network", "IBM,vnic"},
6716032c5e82SThomas Falcon 	{"", "" }
6717032c5e82SThomas Falcon };
6718032c5e82SThomas Falcon MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
6719032c5e82SThomas Falcon 
6720032c5e82SThomas Falcon static const struct dev_pm_ops ibmvnic_pm_ops = {
6721032c5e82SThomas Falcon 	.resume = ibmvnic_resume
6722032c5e82SThomas Falcon };
6723032c5e82SThomas Falcon 
6724032c5e82SThomas Falcon static struct vio_driver ibmvnic_driver = {
6725032c5e82SThomas Falcon 	.id_table       = ibmvnic_device_table,
6726032c5e82SThomas Falcon 	.probe          = ibmvnic_probe,
6727032c5e82SThomas Falcon 	.remove         = ibmvnic_remove,
6728032c5e82SThomas Falcon 	.get_desired_dma = ibmvnic_get_desired_dma,
6729032c5e82SThomas Falcon 	.name		= ibmvnic_driver_name,
6730032c5e82SThomas Falcon 	.pm		= &ibmvnic_pm_ops,
6731032c5e82SThomas Falcon };
6732032c5e82SThomas Falcon 
6733032c5e82SThomas Falcon /* module functions */
ibmvnic_module_init(void)6734032c5e82SThomas Falcon static int __init ibmvnic_module_init(void)
6735032c5e82SThomas Falcon {
673692125c3aSNick Child 	int ret;
673792125c3aSNick Child 
673892125c3aSNick Child 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online",
673992125c3aSNick Child 				      ibmvnic_cpu_online,
674092125c3aSNick Child 				      ibmvnic_cpu_down_prep);
674192125c3aSNick Child 	if (ret < 0)
674292125c3aSNick Child 		goto out;
674392125c3aSNick Child 	ibmvnic_online = ret;
674492125c3aSNick Child 	ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead",
674592125c3aSNick Child 				      NULL, ibmvnic_cpu_dead);
674692125c3aSNick Child 	if (ret)
674792125c3aSNick Child 		goto err_dead;
674892125c3aSNick Child 
674992125c3aSNick Child 	ret = vio_register_driver(&ibmvnic_driver);
675092125c3aSNick Child 	if (ret)
675192125c3aSNick Child 		goto err_vio_register;
675292125c3aSNick Child 
6753032c5e82SThomas Falcon 	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
6754032c5e82SThomas Falcon 		IBMVNIC_DRIVER_VERSION);
6755032c5e82SThomas Falcon 
675692125c3aSNick Child 	return 0;
675792125c3aSNick Child err_vio_register:
675892125c3aSNick Child 	cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
675992125c3aSNick Child err_dead:
676092125c3aSNick Child 	cpuhp_remove_multi_state(ibmvnic_online);
676192125c3aSNick Child out:
676292125c3aSNick Child 	return ret;
6763032c5e82SThomas Falcon }
6764032c5e82SThomas Falcon 
ibmvnic_module_exit(void)6765032c5e82SThomas Falcon static void __exit ibmvnic_module_exit(void)
6766032c5e82SThomas Falcon {
6767032c5e82SThomas Falcon 	vio_unregister_driver(&ibmvnic_driver);
676892125c3aSNick Child 	cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
676992125c3aSNick Child 	cpuhp_remove_multi_state(ibmvnic_online);
6770032c5e82SThomas Falcon }
6771032c5e82SThomas Falcon 
6772032c5e82SThomas Falcon module_init(ibmvnic_module_init);
6773032c5e82SThomas Falcon module_exit(ibmvnic_module_exit);
6774