xref: /openbmc/linux/drivers/net/ethernet/ibm/ibmvnic.c (revision fa7f32422ea1ac276b45b96a540ed5981caaa61f)
1 /**************************************************************************/
2 /*                                                                        */
3 /*  IBM System i and System p Virtual NIC Device Driver                   */
4 /*  Copyright (C) 2014 IBM Corp.                                          */
5 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
6 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
7 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
8 /*                                                                        */
9 /*  This program is free software; you can redistribute it and/or modify  */
10 /*  it under the terms of the GNU General Public License as published by  */
11 /*  the Free Software Foundation; either version 2 of the License, or     */
12 /*  (at your option) any later version.                                   */
13 /*                                                                        */
14 /*  This program is distributed in the hope that it will be useful,       */
15 /*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
16 /*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
17 /*  GNU General Public License for more details.                          */
18 /*                                                                        */
19 /*  You should have received a copy of the GNU General Public License     */
20 /*  along with this program.                                              */
21 /*                                                                        */
22 /* This module contains the implementation of a virtual ethernet device   */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
24 /* option of the RS/6000 Platform Architecture to interface with virtual  */
25 /* ethernet NICs that are presented to the partition by the hypervisor.   */
26 /*									   */
27 /* Messages are passed between the VNIC driver and the VNIC server using  */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
31 /* are used by the driver to notify the server that a packet is           */
32 /* ready for transmission or that a buffer has been added to receive a    */
33 /* packet. Subsequently, sCRQs are used by the server to notify the       */
34 /* driver that a packet transmission has been completed or that a packet  */
35 /* has been received and placed in a waiting buffer.                      */
36 /*                                                                        */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
39 /* or receive has been completed, the VNIC driver is required to use      */
40 /* "long term mapping". This entails that large, continuous DMA mapped    */
41 /* buffers are allocated on driver initialization and these buffers are   */
42 /* then continuously reused to pass skbs to and from the VNIC server.     */
43 /*                                                                        */
44 /**************************************************************************/
45 
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/mm.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/in.h>
63 #include <linux/ip.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
73 #include <asm/vio.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/seq_file.h>
78 #include <linux/workqueue.h>
79 
80 #include "ibmvnic.h"
81 
82 static const char ibmvnic_driver_name[] = "ibmvnic";
83 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
84 
85 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
86 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
89 
90 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
91 static int ibmvnic_remove(struct vio_dev *);
92 static void release_sub_crqs(struct ibmvnic_adapter *);
93 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 		       union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103 			   struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105 			    struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107 			struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 					struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static void send_request_unmap(struct ibmvnic_adapter *, u8);
114 
115 struct ibmvnic_stat {
116 	char name[ETH_GSTRING_LEN];
117 	int offset;
118 };
119 
120 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
121 			     offsetof(struct ibmvnic_statistics, stat))
122 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
123 
124 static const struct ibmvnic_stat ibmvnic_stats[] = {
125 	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
126 	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
127 	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
128 	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
129 	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
130 	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
131 	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
132 	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
133 	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
134 	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
135 	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
136 	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
137 	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
138 	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
139 	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
140 	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
141 	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
142 	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
143 	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
144 	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
145 	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
146 	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
147 };
148 
149 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
150 			  unsigned long length, unsigned long *number,
151 			  unsigned long *irq)
152 {
153 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
154 	long rc;
155 
156 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
157 	*number = retbuf[0];
158 	*irq = retbuf[1];
159 
160 	return rc;
161 }
162 
163 /* net_device_ops functions */
164 
165 static void init_rx_pool(struct ibmvnic_adapter *adapter,
166 			 struct ibmvnic_rx_pool *rx_pool, int num, int index,
167 			 int buff_size, int active)
168 {
169 	netdev_dbg(adapter->netdev,
170 		   "Initializing rx_pool %d, %d buffs, %d bytes each\n",
171 		   index, num, buff_size);
172 	rx_pool->size = num;
173 	rx_pool->index = index;
174 	rx_pool->buff_size = buff_size;
175 	rx_pool->active = active;
176 }
177 
178 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
179 				struct ibmvnic_long_term_buff *ltb, int size)
180 {
181 	struct device *dev = &adapter->vdev->dev;
182 
183 	ltb->size = size;
184 	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
185 				       GFP_KERNEL);
186 
187 	if (!ltb->buff) {
188 		dev_err(dev, "Couldn't alloc long term buffer\n");
189 		return -ENOMEM;
190 	}
191 	ltb->map_id = adapter->map_id;
192 	adapter->map_id++;
193 	send_request_map(adapter, ltb->addr,
194 			 ltb->size, ltb->map_id);
195 	init_completion(&adapter->fw_done);
196 	wait_for_completion(&adapter->fw_done);
197 	return 0;
198 }
199 
200 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
201 				struct ibmvnic_long_term_buff *ltb)
202 {
203 	struct device *dev = &adapter->vdev->dev;
204 
205 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
206 	if (!adapter->failover)
207 		send_request_unmap(adapter, ltb->map_id);
208 }
209 
210 static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
211 			 struct ibmvnic_rx_pool *pool)
212 {
213 	struct device *dev = &adapter->vdev->dev;
214 	int i;
215 
216 	pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
217 	if (!pool->free_map)
218 		return -ENOMEM;
219 
220 	pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
221 				GFP_KERNEL);
222 
223 	if (!pool->rx_buff) {
224 		dev_err(dev, "Couldn't alloc rx buffers\n");
225 		kfree(pool->free_map);
226 		return -ENOMEM;
227 	}
228 
229 	if (alloc_long_term_buff(adapter, &pool->long_term_buff,
230 				 pool->size * pool->buff_size)) {
231 		kfree(pool->free_map);
232 		kfree(pool->rx_buff);
233 		return -ENOMEM;
234 	}
235 
236 	for (i = 0; i < pool->size; ++i)
237 		pool->free_map[i] = i;
238 
239 	atomic_set(&pool->available, 0);
240 	pool->next_alloc = 0;
241 	pool->next_free = 0;
242 
243 	return 0;
244 }
245 
246 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
247 			      struct ibmvnic_rx_pool *pool)
248 {
249 	int count = pool->size - atomic_read(&pool->available);
250 	struct device *dev = &adapter->vdev->dev;
251 	int buffers_added = 0;
252 	unsigned long lpar_rc;
253 	union sub_crq sub_crq;
254 	struct sk_buff *skb;
255 	unsigned int offset;
256 	dma_addr_t dma_addr;
257 	unsigned char *dst;
258 	u64 *handle_array;
259 	int shift = 0;
260 	int index;
261 	int i;
262 
263 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
264 				      be32_to_cpu(adapter->login_rsp_buf->
265 				      off_rxadd_subcrqs));
266 
267 	for (i = 0; i < count; ++i) {
268 		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
269 		if (!skb) {
270 			dev_err(dev, "Couldn't replenish rx buff\n");
271 			adapter->replenish_no_mem++;
272 			break;
273 		}
274 
275 		index = pool->free_map[pool->next_free];
276 
277 		if (pool->rx_buff[index].skb)
278 			dev_err(dev, "Inconsistent free_map!\n");
279 
280 		/* Copy the skb to the long term mapped DMA buffer */
281 		offset = index * pool->buff_size;
282 		dst = pool->long_term_buff.buff + offset;
283 		memset(dst, 0, pool->buff_size);
284 		dma_addr = pool->long_term_buff.addr + offset;
285 		pool->rx_buff[index].data = dst;
286 
287 		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
288 		pool->rx_buff[index].dma = dma_addr;
289 		pool->rx_buff[index].skb = skb;
290 		pool->rx_buff[index].pool_index = pool->index;
291 		pool->rx_buff[index].size = pool->buff_size;
292 
293 		memset(&sub_crq, 0, sizeof(sub_crq));
294 		sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
295 		sub_crq.rx_add.correlator =
296 		    cpu_to_be64((u64)&pool->rx_buff[index]);
297 		sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
298 		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
299 
300 		/* The length field of the sCRQ is defined to be 24 bits so the
301 		 * buffer size needs to be left shifted by a byte before it is
302 		 * converted to big endian to prevent the last byte from being
303 		 * truncated.
304 		 */
305 #ifdef __LITTLE_ENDIAN__
306 		shift = 8;
307 #endif
308 		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
309 
310 		lpar_rc = send_subcrq(adapter, handle_array[pool->index],
311 				      &sub_crq);
312 		if (lpar_rc != H_SUCCESS)
313 			goto failure;
314 
315 		buffers_added++;
316 		adapter->replenish_add_buff_success++;
317 		pool->next_free = (pool->next_free + 1) % pool->size;
318 	}
319 	atomic_add(buffers_added, &pool->available);
320 	return;
321 
322 failure:
323 	dev_info(dev, "replenish pools failure\n");
324 	pool->free_map[pool->next_free] = index;
325 	pool->rx_buff[index].skb = NULL;
326 	if (!dma_mapping_error(dev, dma_addr))
327 		dma_unmap_single(dev, dma_addr, pool->buff_size,
328 				 DMA_FROM_DEVICE);
329 
330 	dev_kfree_skb_any(skb);
331 	adapter->replenish_add_buff_failure++;
332 	atomic_add(buffers_added, &pool->available);
333 }
334 
335 static void replenish_pools(struct ibmvnic_adapter *adapter)
336 {
337 	int i;
338 
339 	if (adapter->migrated)
340 		return;
341 
342 	adapter->replenish_task_cycles++;
343 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
344 	     i++) {
345 		if (adapter->rx_pool[i].active)
346 			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
347 	}
348 }
349 
350 static void free_rx_pool(struct ibmvnic_adapter *adapter,
351 			 struct ibmvnic_rx_pool *pool)
352 {
353 	int i;
354 
355 	kfree(pool->free_map);
356 	pool->free_map = NULL;
357 
358 	if (!pool->rx_buff)
359 		return;
360 
361 	for (i = 0; i < pool->size; i++) {
362 		if (pool->rx_buff[i].skb) {
363 			dev_kfree_skb_any(pool->rx_buff[i].skb);
364 			pool->rx_buff[i].skb = NULL;
365 		}
366 	}
367 	kfree(pool->rx_buff);
368 	pool->rx_buff = NULL;
369 }
370 
371 static int ibmvnic_open(struct net_device *netdev)
372 {
373 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
374 	struct device *dev = &adapter->vdev->dev;
375 	struct ibmvnic_tx_pool *tx_pool;
376 	union ibmvnic_crq crq;
377 	int rxadd_subcrqs;
378 	u64 *size_array;
379 	int tx_subcrqs;
380 	int i, j;
381 
382 	rxadd_subcrqs =
383 	    be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
384 	tx_subcrqs =
385 	    be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
386 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
387 				  be32_to_cpu(adapter->login_rsp_buf->
388 					      off_rxadd_buff_size));
389 	adapter->map_id = 1;
390 	adapter->napi = kcalloc(adapter->req_rx_queues,
391 				sizeof(struct napi_struct), GFP_KERNEL);
392 	if (!adapter->napi)
393 		goto alloc_napi_failed;
394 	for (i = 0; i < adapter->req_rx_queues; i++) {
395 		netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
396 			       NAPI_POLL_WEIGHT);
397 		napi_enable(&adapter->napi[i]);
398 	}
399 	adapter->rx_pool =
400 	    kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
401 
402 	if (!adapter->rx_pool)
403 		goto rx_pool_arr_alloc_failed;
404 	send_map_query(adapter);
405 	for (i = 0; i < rxadd_subcrqs; i++) {
406 		init_rx_pool(adapter, &adapter->rx_pool[i],
407 			     IBMVNIC_BUFFS_PER_POOL, i,
408 			     be64_to_cpu(size_array[i]), 1);
409 		if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
410 			dev_err(dev, "Couldn't alloc rx pool\n");
411 			goto rx_pool_alloc_failed;
412 		}
413 	}
414 	adapter->tx_pool =
415 	    kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
416 
417 	if (!adapter->tx_pool)
418 		goto tx_pool_arr_alloc_failed;
419 	for (i = 0; i < tx_subcrqs; i++) {
420 		tx_pool = &adapter->tx_pool[i];
421 		tx_pool->tx_buff =
422 		    kcalloc(adapter->max_tx_entries_per_subcrq,
423 			    sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
424 		if (!tx_pool->tx_buff)
425 			goto tx_pool_alloc_failed;
426 
427 		if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
428 					 adapter->max_tx_entries_per_subcrq *
429 					 adapter->req_mtu))
430 			goto tx_ltb_alloc_failed;
431 
432 		tx_pool->free_map =
433 		    kcalloc(adapter->max_tx_entries_per_subcrq,
434 			    sizeof(int), GFP_KERNEL);
435 		if (!tx_pool->free_map)
436 			goto tx_fm_alloc_failed;
437 
438 		for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
439 			tx_pool->free_map[j] = j;
440 
441 		tx_pool->consumer_index = 0;
442 		tx_pool->producer_index = 0;
443 	}
444 	adapter->bounce_buffer_size =
445 	    (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
446 	adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
447 					 GFP_KERNEL);
448 	if (!adapter->bounce_buffer)
449 		goto bounce_alloc_failed;
450 
451 	adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
452 						    adapter->bounce_buffer_size,
453 						    DMA_TO_DEVICE);
454 	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
455 		dev_err(dev, "Couldn't map tx bounce buffer\n");
456 		goto bounce_map_failed;
457 	}
458 	replenish_pools(adapter);
459 
460 	/* We're ready to receive frames, enable the sub-crq interrupts and
461 	 * set the logical link state to up
462 	 */
463 	for (i = 0; i < adapter->req_rx_queues; i++)
464 		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
465 
466 	for (i = 0; i < adapter->req_tx_queues; i++)
467 		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
468 
469 	memset(&crq, 0, sizeof(crq));
470 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
471 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
472 	crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
473 	ibmvnic_send_crq(adapter, &crq);
474 
475 	netif_tx_start_all_queues(netdev);
476 
477 	return 0;
478 
479 bounce_map_failed:
480 	kfree(adapter->bounce_buffer);
481 bounce_alloc_failed:
482 	i = tx_subcrqs - 1;
483 	kfree(adapter->tx_pool[i].free_map);
484 tx_fm_alloc_failed:
485 	free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
486 tx_ltb_alloc_failed:
487 	kfree(adapter->tx_pool[i].tx_buff);
488 tx_pool_alloc_failed:
489 	for (j = 0; j < i; j++) {
490 		kfree(adapter->tx_pool[j].tx_buff);
491 		free_long_term_buff(adapter,
492 				    &adapter->tx_pool[j].long_term_buff);
493 		kfree(adapter->tx_pool[j].free_map);
494 	}
495 	kfree(adapter->tx_pool);
496 	adapter->tx_pool = NULL;
497 tx_pool_arr_alloc_failed:
498 	i = rxadd_subcrqs;
499 rx_pool_alloc_failed:
500 	for (j = 0; j < i; j++) {
501 		free_rx_pool(adapter, &adapter->rx_pool[j]);
502 		free_long_term_buff(adapter,
503 				    &adapter->rx_pool[j].long_term_buff);
504 	}
505 	kfree(adapter->rx_pool);
506 	adapter->rx_pool = NULL;
507 rx_pool_arr_alloc_failed:
508 	for (i = 0; i < adapter->req_rx_queues; i++)
509 		napi_enable(&adapter->napi[i]);
510 alloc_napi_failed:
511 	return -ENOMEM;
512 }
513 
514 static int ibmvnic_close(struct net_device *netdev)
515 {
516 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
517 	struct device *dev = &adapter->vdev->dev;
518 	union ibmvnic_crq crq;
519 	int i;
520 
521 	adapter->closing = true;
522 
523 	for (i = 0; i < adapter->req_rx_queues; i++)
524 		napi_disable(&adapter->napi[i]);
525 
526 	if (!adapter->failover)
527 		netif_tx_stop_all_queues(netdev);
528 
529 	if (adapter->bounce_buffer) {
530 		if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
531 			dma_unmap_single(&adapter->vdev->dev,
532 					 adapter->bounce_buffer_dma,
533 					 adapter->bounce_buffer_size,
534 					 DMA_BIDIRECTIONAL);
535 			adapter->bounce_buffer_dma = DMA_ERROR_CODE;
536 		}
537 		kfree(adapter->bounce_buffer);
538 		adapter->bounce_buffer = NULL;
539 	}
540 
541 	memset(&crq, 0, sizeof(crq));
542 	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
543 	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
544 	crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
545 	ibmvnic_send_crq(adapter, &crq);
546 
547 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
548 	     i++) {
549 		kfree(adapter->tx_pool[i].tx_buff);
550 		free_long_term_buff(adapter,
551 				    &adapter->tx_pool[i].long_term_buff);
552 		kfree(adapter->tx_pool[i].free_map);
553 	}
554 	kfree(adapter->tx_pool);
555 	adapter->tx_pool = NULL;
556 
557 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
558 	     i++) {
559 		free_rx_pool(adapter, &adapter->rx_pool[i]);
560 		free_long_term_buff(adapter,
561 				    &adapter->rx_pool[i].long_term_buff);
562 	}
563 	kfree(adapter->rx_pool);
564 	adapter->rx_pool = NULL;
565 
566 	adapter->closing = false;
567 
568 	return 0;
569 }
570 
571 /**
572  * build_hdr_data - creates L2/L3/L4 header data buffer
573  * @hdr_field - bitfield determining needed headers
574  * @skb - socket buffer
575  * @hdr_len - array of header lengths
576  * @tot_len - total length of data
577  *
578  * Reads hdr_field to determine which headers are needed by firmware.
579  * Builds a buffer containing these headers.  Saves individual header
580  * lengths and total buffer length to be used to build descriptors.
581  */
582 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
583 			  int *hdr_len, u8 *hdr_data)
584 {
585 	int len = 0;
586 	u8 *hdr;
587 
588 	hdr_len[0] = sizeof(struct ethhdr);
589 
590 	if (skb->protocol == htons(ETH_P_IP)) {
591 		hdr_len[1] = ip_hdr(skb)->ihl * 4;
592 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
593 			hdr_len[2] = tcp_hdrlen(skb);
594 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
595 			hdr_len[2] = sizeof(struct udphdr);
596 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
597 		hdr_len[1] = sizeof(struct ipv6hdr);
598 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
599 			hdr_len[2] = tcp_hdrlen(skb);
600 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
601 			hdr_len[2] = sizeof(struct udphdr);
602 	}
603 
604 	memset(hdr_data, 0, 120);
605 	if ((hdr_field >> 6) & 1) {
606 		hdr = skb_mac_header(skb);
607 		memcpy(hdr_data, hdr, hdr_len[0]);
608 		len += hdr_len[0];
609 	}
610 
611 	if ((hdr_field >> 5) & 1) {
612 		hdr = skb_network_header(skb);
613 		memcpy(hdr_data + len, hdr, hdr_len[1]);
614 		len += hdr_len[1];
615 	}
616 
617 	if ((hdr_field >> 4) & 1) {
618 		hdr = skb_transport_header(skb);
619 		memcpy(hdr_data + len, hdr, hdr_len[2]);
620 		len += hdr_len[2];
621 	}
622 	return len;
623 }
624 
625 /**
626  * create_hdr_descs - create header and header extension descriptors
627  * @hdr_field - bitfield determining needed headers
628  * @data - buffer containing header data
629  * @len - length of data buffer
630  * @hdr_len - array of individual header lengths
631  * @scrq_arr - descriptor array
632  *
633  * Creates header and, if needed, header extension descriptors and
634  * places them in a descriptor array, scrq_arr
635  */
636 
637 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
638 			     union sub_crq *scrq_arr)
639 {
640 	union sub_crq hdr_desc;
641 	int tmp_len = len;
642 	u8 *data, *cur;
643 	int tmp;
644 
645 	while (tmp_len > 0) {
646 		cur = hdr_data + len - tmp_len;
647 
648 		memset(&hdr_desc, 0, sizeof(hdr_desc));
649 		if (cur != hdr_data) {
650 			data = hdr_desc.hdr_ext.data;
651 			tmp = tmp_len > 29 ? 29 : tmp_len;
652 			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
653 			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
654 			hdr_desc.hdr_ext.len = tmp;
655 		} else {
656 			data = hdr_desc.hdr.data;
657 			tmp = tmp_len > 24 ? 24 : tmp_len;
658 			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
659 			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
660 			hdr_desc.hdr.len = tmp;
661 			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
662 			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
663 			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
664 			hdr_desc.hdr.flag = hdr_field << 1;
665 		}
666 		memcpy(data, cur, tmp);
667 		tmp_len -= tmp;
668 		*scrq_arr = hdr_desc;
669 		scrq_arr++;
670 	}
671 }
672 
673 /**
674  * build_hdr_descs_arr - build a header descriptor array
675  * @skb - socket buffer
676  * @num_entries - number of descriptors to be sent
677  * @subcrq - first TX descriptor
678  * @hdr_field - bit field determining which headers will be sent
679  *
680  * This function will build a TX descriptor array with applicable
681  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
682  */
683 
684 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
685 				int *num_entries, u8 hdr_field)
686 {
687 	int hdr_len[3] = {0, 0, 0};
688 	int tot_len, len;
689 	u8 *hdr_data = txbuff->hdr_data;
690 
691 	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
692 				 txbuff->hdr_data);
693 	len = tot_len;
694 	len -= 24;
695 	if (len > 0)
696 		num_entries += len % 29 ? len / 29 + 1 : len / 29;
697 	create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
698 			 txbuff->indir_arr + 1);
699 }
700 
701 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
702 {
703 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
704 	int queue_num = skb_get_queue_mapping(skb);
705 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
706 	struct device *dev = &adapter->vdev->dev;
707 	struct ibmvnic_tx_buff *tx_buff = NULL;
708 	struct ibmvnic_tx_pool *tx_pool;
709 	unsigned int tx_send_failed = 0;
710 	unsigned int tx_map_failed = 0;
711 	unsigned int tx_dropped = 0;
712 	unsigned int tx_packets = 0;
713 	unsigned int tx_bytes = 0;
714 	dma_addr_t data_dma_addr;
715 	struct netdev_queue *txq;
716 	bool used_bounce = false;
717 	unsigned long lpar_rc;
718 	union sub_crq tx_crq;
719 	unsigned int offset;
720 	int num_entries = 1;
721 	unsigned char *dst;
722 	u64 *handle_array;
723 	int index = 0;
724 	int ret = 0;
725 
726 	tx_pool = &adapter->tx_pool[queue_num];
727 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
728 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
729 				   be32_to_cpu(adapter->login_rsp_buf->
730 					       off_txsubm_subcrqs));
731 	if (adapter->migrated) {
732 		tx_send_failed++;
733 		tx_dropped++;
734 		ret = NETDEV_TX_BUSY;
735 		goto out;
736 	}
737 
738 	index = tx_pool->free_map[tx_pool->consumer_index];
739 	offset = index * adapter->req_mtu;
740 	dst = tx_pool->long_term_buff.buff + offset;
741 	memset(dst, 0, adapter->req_mtu);
742 	skb_copy_from_linear_data(skb, dst, skb->len);
743 	data_dma_addr = tx_pool->long_term_buff.addr + offset;
744 
745 	tx_pool->consumer_index =
746 	    (tx_pool->consumer_index + 1) %
747 		adapter->max_tx_entries_per_subcrq;
748 
749 	tx_buff = &tx_pool->tx_buff[index];
750 	tx_buff->skb = skb;
751 	tx_buff->data_dma[0] = data_dma_addr;
752 	tx_buff->data_len[0] = skb->len;
753 	tx_buff->index = index;
754 	tx_buff->pool_index = queue_num;
755 	tx_buff->last_frag = true;
756 	tx_buff->used_bounce = used_bounce;
757 
758 	memset(&tx_crq, 0, sizeof(tx_crq));
759 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
760 	tx_crq.v1.type = IBMVNIC_TX_DESC;
761 	tx_crq.v1.n_crq_elem = 1;
762 	tx_crq.v1.n_sge = 1;
763 	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
764 	tx_crq.v1.correlator = cpu_to_be32(index);
765 	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
766 	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
767 	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
768 
769 	if (adapter->vlan_header_insertion) {
770 		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
771 		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
772 	}
773 
774 	if (skb->protocol == htons(ETH_P_IP)) {
775 		if (ip_hdr(skb)->version == 4)
776 			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
777 		else if (ip_hdr(skb)->version == 6)
778 			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
779 
780 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
781 			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
782 		else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
783 			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
784 	}
785 
786 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
787 		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
788 		hdrs += 2;
789 	}
790 	/* determine if l2/3/4 headers are sent to firmware */
791 	if ((*hdrs >> 7) & 1 &&
792 	    (skb->protocol == htons(ETH_P_IP) ||
793 	     skb->protocol == htons(ETH_P_IPV6))) {
794 		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
795 		tx_crq.v1.n_crq_elem = num_entries;
796 		tx_buff->indir_arr[0] = tx_crq;
797 		tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
798 						    sizeof(tx_buff->indir_arr),
799 						    DMA_TO_DEVICE);
800 		if (dma_mapping_error(dev, tx_buff->indir_dma)) {
801 			if (!firmware_has_feature(FW_FEATURE_CMO))
802 				dev_err(dev, "tx: unable to map descriptor array\n");
803 			tx_map_failed++;
804 			tx_dropped++;
805 			ret = NETDEV_TX_BUSY;
806 			goto out;
807 		}
808 		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
809 					       (u64)tx_buff->indir_dma,
810 					       (u64)num_entries);
811 	} else {
812 		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
813 				      &tx_crq);
814 	}
815 	if (lpar_rc != H_SUCCESS) {
816 		dev_err(dev, "tx failed with code %ld\n", lpar_rc);
817 
818 		if (tx_pool->consumer_index == 0)
819 			tx_pool->consumer_index =
820 				adapter->max_tx_entries_per_subcrq - 1;
821 		else
822 			tx_pool->consumer_index--;
823 
824 		tx_send_failed++;
825 		tx_dropped++;
826 		ret = NETDEV_TX_BUSY;
827 		goto out;
828 	}
829 	tx_packets++;
830 	tx_bytes += skb->len;
831 	txq->trans_start = jiffies;
832 	ret = NETDEV_TX_OK;
833 
834 out:
835 	netdev->stats.tx_dropped += tx_dropped;
836 	netdev->stats.tx_bytes += tx_bytes;
837 	netdev->stats.tx_packets += tx_packets;
838 	adapter->tx_send_failed += tx_send_failed;
839 	adapter->tx_map_failed += tx_map_failed;
840 
841 	return ret;
842 }
843 
844 static void ibmvnic_set_multi(struct net_device *netdev)
845 {
846 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
847 	struct netdev_hw_addr *ha;
848 	union ibmvnic_crq crq;
849 
850 	memset(&crq, 0, sizeof(crq));
851 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
852 	crq.request_capability.cmd = REQUEST_CAPABILITY;
853 
854 	if (netdev->flags & IFF_PROMISC) {
855 		if (!adapter->promisc_supported)
856 			return;
857 	} else {
858 		if (netdev->flags & IFF_ALLMULTI) {
859 			/* Accept all multicast */
860 			memset(&crq, 0, sizeof(crq));
861 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
862 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
863 			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
864 			ibmvnic_send_crq(adapter, &crq);
865 		} else if (netdev_mc_empty(netdev)) {
866 			/* Reject all multicast */
867 			memset(&crq, 0, sizeof(crq));
868 			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
869 			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
870 			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
871 			ibmvnic_send_crq(adapter, &crq);
872 		} else {
873 			/* Accept one or more multicast(s) */
874 			netdev_for_each_mc_addr(ha, netdev) {
875 				memset(&crq, 0, sizeof(crq));
876 				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
877 				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
878 				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
879 				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
880 						ha->addr);
881 				ibmvnic_send_crq(adapter, &crq);
882 			}
883 		}
884 	}
885 }
886 
887 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
888 {
889 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
890 	struct sockaddr *addr = p;
891 	union ibmvnic_crq crq;
892 
893 	if (!is_valid_ether_addr(addr->sa_data))
894 		return -EADDRNOTAVAIL;
895 
896 	memset(&crq, 0, sizeof(crq));
897 	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
898 	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
899 	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
900 	ibmvnic_send_crq(adapter, &crq);
901 	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
902 	return 0;
903 }
904 
905 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
906 {
907 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
908 
909 	if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
910 		return -EINVAL;
911 
912 	netdev->mtu = new_mtu;
913 	return 0;
914 }
915 
916 static void ibmvnic_tx_timeout(struct net_device *dev)
917 {
918 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
919 	int rc;
920 
921 	/* Adapter timed out, resetting it */
922 	release_sub_crqs(adapter);
923 	rc = ibmvnic_reset_crq(adapter);
924 	if (rc)
925 		dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
926 	else
927 		ibmvnic_send_crq_init(adapter);
928 }
929 
930 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
931 				  struct ibmvnic_rx_buff *rx_buff)
932 {
933 	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
934 
935 	rx_buff->skb = NULL;
936 
937 	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
938 	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
939 
940 	atomic_dec(&pool->available);
941 }
942 
943 static int ibmvnic_poll(struct napi_struct *napi, int budget)
944 {
945 	struct net_device *netdev = napi->dev;
946 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
947 	int scrq_num = (int)(napi - adapter->napi);
948 	int frames_processed = 0;
949 restart_poll:
950 	while (frames_processed < budget) {
951 		struct sk_buff *skb;
952 		struct ibmvnic_rx_buff *rx_buff;
953 		union sub_crq *next;
954 		u32 length;
955 		u16 offset;
956 		u8 flags = 0;
957 
958 		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
959 			break;
960 		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
961 		rx_buff =
962 		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
963 							  rx_comp.correlator);
964 		/* do error checking */
965 		if (next->rx_comp.rc) {
966 			netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
967 			/* free the entry */
968 			next->rx_comp.first = 0;
969 			remove_buff_from_pool(adapter, rx_buff);
970 			break;
971 		}
972 
973 		length = be32_to_cpu(next->rx_comp.len);
974 		offset = be16_to_cpu(next->rx_comp.off_frame_data);
975 		flags = next->rx_comp.flags;
976 		skb = rx_buff->skb;
977 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
978 					length);
979 		skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
980 		/* free the entry */
981 		next->rx_comp.first = 0;
982 		remove_buff_from_pool(adapter, rx_buff);
983 
984 		skb_put(skb, length);
985 		skb->protocol = eth_type_trans(skb, netdev);
986 
987 		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
988 		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
989 			skb->ip_summed = CHECKSUM_UNNECESSARY;
990 		}
991 
992 		length = skb->len;
993 		napi_gro_receive(napi, skb); /* send it up */
994 		netdev->stats.rx_packets++;
995 		netdev->stats.rx_bytes += length;
996 		frames_processed++;
997 	}
998 	replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
999 
1000 	if (frames_processed < budget) {
1001 		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1002 		napi_complete(napi);
1003 		if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1004 		    napi_reschedule(napi)) {
1005 			disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1006 			goto restart_poll;
1007 		}
1008 	}
1009 	return frames_processed;
1010 }
1011 
1012 #ifdef CONFIG_NET_POLL_CONTROLLER
1013 static void ibmvnic_netpoll_controller(struct net_device *dev)
1014 {
1015 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
1016 	int i;
1017 
1018 	replenish_pools(netdev_priv(dev));
1019 	for (i = 0; i < adapter->req_rx_queues; i++)
1020 		ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1021 				     adapter->rx_scrq[i]);
1022 }
1023 #endif
1024 
1025 static const struct net_device_ops ibmvnic_netdev_ops = {
1026 	.ndo_open		= ibmvnic_open,
1027 	.ndo_stop		= ibmvnic_close,
1028 	.ndo_start_xmit		= ibmvnic_xmit,
1029 	.ndo_set_rx_mode	= ibmvnic_set_multi,
1030 	.ndo_set_mac_address	= ibmvnic_set_mac,
1031 	.ndo_validate_addr	= eth_validate_addr,
1032 	.ndo_change_mtu		= ibmvnic_change_mtu,
1033 	.ndo_tx_timeout		= ibmvnic_tx_timeout,
1034 #ifdef CONFIG_NET_POLL_CONTROLLER
1035 	.ndo_poll_controller	= ibmvnic_netpoll_controller,
1036 #endif
1037 };
1038 
1039 /* ethtool functions */
1040 
1041 static int ibmvnic_get_settings(struct net_device *netdev,
1042 				struct ethtool_cmd *cmd)
1043 {
1044 	cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1045 			  SUPPORTED_FIBRE);
1046 	cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1047 			    ADVERTISED_FIBRE);
1048 	ethtool_cmd_speed_set(cmd, SPEED_1000);
1049 	cmd->duplex = DUPLEX_FULL;
1050 	cmd->port = PORT_FIBRE;
1051 	cmd->phy_address = 0;
1052 	cmd->transceiver = XCVR_INTERNAL;
1053 	cmd->autoneg = AUTONEG_ENABLE;
1054 	cmd->maxtxpkt = 0;
1055 	cmd->maxrxpkt = 1;
1056 	return 0;
1057 }
1058 
1059 static void ibmvnic_get_drvinfo(struct net_device *dev,
1060 				struct ethtool_drvinfo *info)
1061 {
1062 	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1063 	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1064 }
1065 
1066 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1067 {
1068 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1069 
1070 	return adapter->msg_enable;
1071 }
1072 
1073 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1074 {
1075 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1076 
1077 	adapter->msg_enable = data;
1078 }
1079 
1080 static u32 ibmvnic_get_link(struct net_device *netdev)
1081 {
1082 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1083 
1084 	/* Don't need to send a query because we request a logical link up at
1085 	 * init and then we wait for link state indications
1086 	 */
1087 	return adapter->logical_link_state;
1088 }
1089 
1090 static void ibmvnic_get_ringparam(struct net_device *netdev,
1091 				  struct ethtool_ringparam *ring)
1092 {
1093 	ring->rx_max_pending = 0;
1094 	ring->tx_max_pending = 0;
1095 	ring->rx_mini_max_pending = 0;
1096 	ring->rx_jumbo_max_pending = 0;
1097 	ring->rx_pending = 0;
1098 	ring->tx_pending = 0;
1099 	ring->rx_mini_pending = 0;
1100 	ring->rx_jumbo_pending = 0;
1101 }
1102 
1103 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1104 {
1105 	int i;
1106 
1107 	if (stringset != ETH_SS_STATS)
1108 		return;
1109 
1110 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1111 		memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1112 }
1113 
1114 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1115 {
1116 	switch (sset) {
1117 	case ETH_SS_STATS:
1118 		return ARRAY_SIZE(ibmvnic_stats);
1119 	default:
1120 		return -EOPNOTSUPP;
1121 	}
1122 }
1123 
1124 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1125 				      struct ethtool_stats *stats, u64 *data)
1126 {
1127 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
1128 	union ibmvnic_crq crq;
1129 	int i;
1130 
1131 	memset(&crq, 0, sizeof(crq));
1132 	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1133 	crq.request_statistics.cmd = REQUEST_STATISTICS;
1134 	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1135 	crq.request_statistics.len =
1136 	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
1137 	ibmvnic_send_crq(adapter, &crq);
1138 
1139 	/* Wait for data to be written */
1140 	init_completion(&adapter->stats_done);
1141 	wait_for_completion(&adapter->stats_done);
1142 
1143 	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1144 		data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1145 }
1146 
1147 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1148 	.get_settings		= ibmvnic_get_settings,
1149 	.get_drvinfo		= ibmvnic_get_drvinfo,
1150 	.get_msglevel		= ibmvnic_get_msglevel,
1151 	.set_msglevel		= ibmvnic_set_msglevel,
1152 	.get_link		= ibmvnic_get_link,
1153 	.get_ringparam		= ibmvnic_get_ringparam,
1154 	.get_strings            = ibmvnic_get_strings,
1155 	.get_sset_count         = ibmvnic_get_sset_count,
1156 	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
1157 };
1158 
1159 /* Routines for managing CRQs/sCRQs  */
1160 
1161 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1162 				  struct ibmvnic_sub_crq_queue *scrq)
1163 {
1164 	struct device *dev = &adapter->vdev->dev;
1165 	long rc;
1166 
1167 	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1168 
1169 	/* Close the sub-crqs */
1170 	do {
1171 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1172 					adapter->vdev->unit_address,
1173 					scrq->crq_num);
1174 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1175 
1176 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1177 			 DMA_BIDIRECTIONAL);
1178 	free_pages((unsigned long)scrq->msgs, 2);
1179 	kfree(scrq);
1180 }
1181 
1182 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1183 							*adapter)
1184 {
1185 	struct device *dev = &adapter->vdev->dev;
1186 	struct ibmvnic_sub_crq_queue *scrq;
1187 	int rc;
1188 
1189 	scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1190 	if (!scrq)
1191 		return NULL;
1192 
1193 	scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
1194 	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1195 	if (!scrq->msgs) {
1196 		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1197 		goto zero_page_failed;
1198 	}
1199 
1200 	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1201 					 DMA_BIDIRECTIONAL);
1202 	if (dma_mapping_error(dev, scrq->msg_token)) {
1203 		dev_warn(dev, "Couldn't map crq queue messages page\n");
1204 		goto map_failed;
1205 	}
1206 
1207 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1208 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1209 
1210 	if (rc == H_RESOURCE)
1211 		rc = ibmvnic_reset_crq(adapter);
1212 
1213 	if (rc == H_CLOSED) {
1214 		dev_warn(dev, "Partner adapter not ready, waiting.\n");
1215 	} else if (rc) {
1216 		dev_warn(dev, "Error %d registering sub-crq\n", rc);
1217 		goto reg_failed;
1218 	}
1219 
1220 	scrq->adapter = adapter;
1221 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1222 	scrq->cur = 0;
1223 	scrq->rx_skb_top = NULL;
1224 	spin_lock_init(&scrq->lock);
1225 
1226 	netdev_dbg(adapter->netdev,
1227 		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1228 		   scrq->crq_num, scrq->hw_irq, scrq->irq);
1229 
1230 	return scrq;
1231 
1232 reg_failed:
1233 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1234 			 DMA_BIDIRECTIONAL);
1235 map_failed:
1236 	free_pages((unsigned long)scrq->msgs, 2);
1237 zero_page_failed:
1238 	kfree(scrq);
1239 
1240 	return NULL;
1241 }
1242 
1243 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1244 {
1245 	int i;
1246 
1247 	if (adapter->tx_scrq) {
1248 		for (i = 0; i < adapter->req_tx_queues; i++)
1249 			if (adapter->tx_scrq[i]) {
1250 				free_irq(adapter->tx_scrq[i]->irq,
1251 					 adapter->tx_scrq[i]);
1252 				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1253 				release_sub_crq_queue(adapter,
1254 						      adapter->tx_scrq[i]);
1255 			}
1256 		adapter->tx_scrq = NULL;
1257 	}
1258 
1259 	if (adapter->rx_scrq) {
1260 		for (i = 0; i < adapter->req_rx_queues; i++)
1261 			if (adapter->rx_scrq[i]) {
1262 				free_irq(adapter->rx_scrq[i]->irq,
1263 					 adapter->rx_scrq[i]);
1264 				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1265 				release_sub_crq_queue(adapter,
1266 						      adapter->rx_scrq[i]);
1267 			}
1268 		adapter->rx_scrq = NULL;
1269 	}
1270 
1271 	adapter->requested_caps = 0;
1272 }
1273 
1274 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1275 {
1276 	int i;
1277 
1278 	if (adapter->tx_scrq) {
1279 		for (i = 0; i < adapter->req_tx_queues; i++)
1280 			if (adapter->tx_scrq[i])
1281 				release_sub_crq_queue(adapter,
1282 						      adapter->tx_scrq[i]);
1283 		adapter->tx_scrq = NULL;
1284 	}
1285 
1286 	if (adapter->rx_scrq) {
1287 		for (i = 0; i < adapter->req_rx_queues; i++)
1288 			if (adapter->rx_scrq[i])
1289 				release_sub_crq_queue(adapter,
1290 						      adapter->rx_scrq[i]);
1291 		adapter->rx_scrq = NULL;
1292 	}
1293 
1294 	adapter->requested_caps = 0;
1295 }
1296 
1297 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1298 			    struct ibmvnic_sub_crq_queue *scrq)
1299 {
1300 	struct device *dev = &adapter->vdev->dev;
1301 	unsigned long rc;
1302 
1303 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1304 				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1305 	if (rc)
1306 		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1307 			scrq->hw_irq, rc);
1308 	return rc;
1309 }
1310 
1311 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1312 			   struct ibmvnic_sub_crq_queue *scrq)
1313 {
1314 	struct device *dev = &adapter->vdev->dev;
1315 	unsigned long rc;
1316 
1317 	if (scrq->hw_irq > 0x100000000ULL) {
1318 		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1319 		return 1;
1320 	}
1321 
1322 	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1323 				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1324 	if (rc)
1325 		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1326 			scrq->hw_irq, rc);
1327 	return rc;
1328 }
1329 
1330 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1331 			       struct ibmvnic_sub_crq_queue *scrq)
1332 {
1333 	struct device *dev = &adapter->vdev->dev;
1334 	struct ibmvnic_tx_buff *txbuff;
1335 	union sub_crq *next;
1336 	int index;
1337 	int i, j;
1338 	u8 first;
1339 
1340 restart_loop:
1341 	while (pending_scrq(adapter, scrq)) {
1342 		unsigned int pool = scrq->pool_index;
1343 
1344 		next = ibmvnic_next_scrq(adapter, scrq);
1345 		for (i = 0; i < next->tx_comp.num_comps; i++) {
1346 			if (next->tx_comp.rcs[i]) {
1347 				dev_err(dev, "tx error %x\n",
1348 					next->tx_comp.rcs[i]);
1349 				continue;
1350 			}
1351 			index = be32_to_cpu(next->tx_comp.correlators[i]);
1352 			txbuff = &adapter->tx_pool[pool].tx_buff[index];
1353 
1354 			for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1355 				if (!txbuff->data_dma[j])
1356 					continue;
1357 
1358 				txbuff->data_dma[j] = 0;
1359 				txbuff->used_bounce = false;
1360 			}
1361 			/* if sub_crq was sent indirectly */
1362 			first = txbuff->indir_arr[0].generic.first;
1363 			if (first == IBMVNIC_CRQ_CMD) {
1364 				dma_unmap_single(dev, txbuff->indir_dma,
1365 						 sizeof(txbuff->indir_arr),
1366 						 DMA_TO_DEVICE);
1367 			}
1368 
1369 			if (txbuff->last_frag)
1370 				dev_kfree_skb_any(txbuff->skb);
1371 
1372 			adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1373 						     producer_index] = index;
1374 			adapter->tx_pool[pool].producer_index =
1375 			    (adapter->tx_pool[pool].producer_index + 1) %
1376 			    adapter->max_tx_entries_per_subcrq;
1377 		}
1378 		/* remove tx_comp scrq*/
1379 		next->tx_comp.first = 0;
1380 	}
1381 
1382 	enable_scrq_irq(adapter, scrq);
1383 
1384 	if (pending_scrq(adapter, scrq)) {
1385 		disable_scrq_irq(adapter, scrq);
1386 		goto restart_loop;
1387 	}
1388 
1389 	return 0;
1390 }
1391 
1392 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1393 {
1394 	struct ibmvnic_sub_crq_queue *scrq = instance;
1395 	struct ibmvnic_adapter *adapter = scrq->adapter;
1396 
1397 	disable_scrq_irq(adapter, scrq);
1398 	ibmvnic_complete_tx(adapter, scrq);
1399 
1400 	return IRQ_HANDLED;
1401 }
1402 
1403 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1404 {
1405 	struct ibmvnic_sub_crq_queue *scrq = instance;
1406 	struct ibmvnic_adapter *adapter = scrq->adapter;
1407 
1408 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1409 		disable_scrq_irq(adapter, scrq);
1410 		__napi_schedule(&adapter->napi[scrq->scrq_num]);
1411 	}
1412 
1413 	return IRQ_HANDLED;
1414 }
1415 
1416 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1417 {
1418 	struct device *dev = &adapter->vdev->dev;
1419 	struct ibmvnic_sub_crq_queue *scrq;
1420 	int i = 0, j = 0;
1421 	int rc = 0;
1422 
1423 	for (i = 0; i < adapter->req_tx_queues; i++) {
1424 		scrq = adapter->tx_scrq[i];
1425 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1426 
1427 		if (!scrq->irq) {
1428 			rc = -EINVAL;
1429 			dev_err(dev, "Error mapping irq\n");
1430 			goto req_tx_irq_failed;
1431 		}
1432 
1433 		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1434 				 0, "ibmvnic_tx", scrq);
1435 
1436 		if (rc) {
1437 			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1438 				scrq->irq, rc);
1439 			irq_dispose_mapping(scrq->irq);
1440 			goto req_rx_irq_failed;
1441 		}
1442 	}
1443 
1444 	for (i = 0; i < adapter->req_rx_queues; i++) {
1445 		scrq = adapter->rx_scrq[i];
1446 		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1447 		if (!scrq->irq) {
1448 			rc = -EINVAL;
1449 			dev_err(dev, "Error mapping irq\n");
1450 			goto req_rx_irq_failed;
1451 		}
1452 		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1453 				 0, "ibmvnic_rx", scrq);
1454 		if (rc) {
1455 			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1456 				scrq->irq, rc);
1457 			irq_dispose_mapping(scrq->irq);
1458 			goto req_rx_irq_failed;
1459 		}
1460 	}
1461 	return rc;
1462 
1463 req_rx_irq_failed:
1464 	for (j = 0; j < i; j++)
1465 		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1466 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1467 	i = adapter->req_tx_queues;
1468 req_tx_irq_failed:
1469 	for (j = 0; j < i; j++)
1470 		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1471 		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1472 	release_sub_crqs_no_irqs(adapter);
1473 	return rc;
1474 }
1475 
1476 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1477 {
1478 	struct device *dev = &adapter->vdev->dev;
1479 	struct ibmvnic_sub_crq_queue **allqueues;
1480 	int registered_queues = 0;
1481 	union ibmvnic_crq crq;
1482 	int total_queues;
1483 	int more = 0;
1484 	int i;
1485 
1486 	if (!retry) {
1487 		/* Sub-CRQ entries are 32 byte long */
1488 		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1489 
1490 		if (adapter->min_tx_entries_per_subcrq > entries_page ||
1491 		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
1492 			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1493 			goto allqueues_failed;
1494 		}
1495 
1496 		/* Get the minimum between the queried max and the entries
1497 		 * that fit in our PAGE_SIZE
1498 		 */
1499 		adapter->req_tx_entries_per_subcrq =
1500 		    adapter->max_tx_entries_per_subcrq > entries_page ?
1501 		    entries_page : adapter->max_tx_entries_per_subcrq;
1502 		adapter->req_rx_add_entries_per_subcrq =
1503 		    adapter->max_rx_add_entries_per_subcrq > entries_page ?
1504 		    entries_page : adapter->max_rx_add_entries_per_subcrq;
1505 
1506 		/* Choosing the maximum number of queues supported by firmware*/
1507 		adapter->req_tx_queues = adapter->max_tx_queues;
1508 		adapter->req_rx_queues = adapter->max_rx_queues;
1509 		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1510 
1511 		adapter->req_mtu = adapter->max_mtu;
1512 	}
1513 
1514 	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1515 
1516 	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1517 	if (!allqueues)
1518 		goto allqueues_failed;
1519 
1520 	for (i = 0; i < total_queues; i++) {
1521 		allqueues[i] = init_sub_crq_queue(adapter);
1522 		if (!allqueues[i]) {
1523 			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1524 			break;
1525 		}
1526 		registered_queues++;
1527 	}
1528 
1529 	/* Make sure we were able to register the minimum number of queues */
1530 	if (registered_queues <
1531 	    adapter->min_tx_queues + adapter->min_rx_queues) {
1532 		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
1533 		goto tx_failed;
1534 	}
1535 
1536 	/* Distribute the failed allocated queues*/
1537 	for (i = 0; i < total_queues - registered_queues + more ; i++) {
1538 		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1539 		switch (i % 3) {
1540 		case 0:
1541 			if (adapter->req_rx_queues > adapter->min_rx_queues)
1542 				adapter->req_rx_queues--;
1543 			else
1544 				more++;
1545 			break;
1546 		case 1:
1547 			if (adapter->req_tx_queues > adapter->min_tx_queues)
1548 				adapter->req_tx_queues--;
1549 			else
1550 				more++;
1551 			break;
1552 		}
1553 	}
1554 
1555 	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1556 				   sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1557 	if (!adapter->tx_scrq)
1558 		goto tx_failed;
1559 
1560 	for (i = 0; i < adapter->req_tx_queues; i++) {
1561 		adapter->tx_scrq[i] = allqueues[i];
1562 		adapter->tx_scrq[i]->pool_index = i;
1563 	}
1564 
1565 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1566 				   sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1567 	if (!adapter->rx_scrq)
1568 		goto rx_failed;
1569 
1570 	for (i = 0; i < adapter->req_rx_queues; i++) {
1571 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1572 		adapter->rx_scrq[i]->scrq_num = i;
1573 	}
1574 
1575 	memset(&crq, 0, sizeof(crq));
1576 	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1577 	crq.request_capability.cmd = REQUEST_CAPABILITY;
1578 
1579 	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1580 	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1581 	ibmvnic_send_crq(adapter, &crq);
1582 
1583 	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1584 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1585 	ibmvnic_send_crq(adapter, &crq);
1586 
1587 	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1588 	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1589 	ibmvnic_send_crq(adapter, &crq);
1590 
1591 	crq.request_capability.capability =
1592 	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1593 	crq.request_capability.number =
1594 	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1595 	ibmvnic_send_crq(adapter, &crq);
1596 
1597 	crq.request_capability.capability =
1598 	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1599 	crq.request_capability.number =
1600 	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1601 	ibmvnic_send_crq(adapter, &crq);
1602 
1603 	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1604 	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1605 	ibmvnic_send_crq(adapter, &crq);
1606 
1607 	if (adapter->netdev->flags & IFF_PROMISC) {
1608 		if (adapter->promisc_supported) {
1609 			crq.request_capability.capability =
1610 			    cpu_to_be16(PROMISC_REQUESTED);
1611 			crq.request_capability.number = cpu_to_be64(1);
1612 			ibmvnic_send_crq(adapter, &crq);
1613 		}
1614 	} else {
1615 		crq.request_capability.capability =
1616 		    cpu_to_be16(PROMISC_REQUESTED);
1617 		crq.request_capability.number = cpu_to_be64(0);
1618 		ibmvnic_send_crq(adapter, &crq);
1619 	}
1620 
1621 	kfree(allqueues);
1622 
1623 	return;
1624 
1625 rx_failed:
1626 	kfree(adapter->tx_scrq);
1627 	adapter->tx_scrq = NULL;
1628 tx_failed:
1629 	for (i = 0; i < registered_queues; i++)
1630 		release_sub_crq_queue(adapter, allqueues[i]);
1631 	kfree(allqueues);
1632 allqueues_failed:
1633 	ibmvnic_remove(adapter->vdev);
1634 }
1635 
1636 static int pending_scrq(struct ibmvnic_adapter *adapter,
1637 			struct ibmvnic_sub_crq_queue *scrq)
1638 {
1639 	union sub_crq *entry = &scrq->msgs[scrq->cur];
1640 
1641 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1642 		return 1;
1643 	else
1644 		return 0;
1645 }
1646 
1647 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1648 					struct ibmvnic_sub_crq_queue *scrq)
1649 {
1650 	union sub_crq *entry;
1651 	unsigned long flags;
1652 
1653 	spin_lock_irqsave(&scrq->lock, flags);
1654 	entry = &scrq->msgs[scrq->cur];
1655 	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1656 		if (++scrq->cur == scrq->size)
1657 			scrq->cur = 0;
1658 	} else {
1659 		entry = NULL;
1660 	}
1661 	spin_unlock_irqrestore(&scrq->lock, flags);
1662 
1663 	return entry;
1664 }
1665 
1666 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1667 {
1668 	struct ibmvnic_crq_queue *queue = &adapter->crq;
1669 	union ibmvnic_crq *crq;
1670 
1671 	crq = &queue->msgs[queue->cur];
1672 	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1673 		if (++queue->cur == queue->size)
1674 			queue->cur = 0;
1675 	} else {
1676 		crq = NULL;
1677 	}
1678 
1679 	return crq;
1680 }
1681 
1682 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1683 		       union sub_crq *sub_crq)
1684 {
1685 	unsigned int ua = adapter->vdev->unit_address;
1686 	struct device *dev = &adapter->vdev->dev;
1687 	u64 *u64_crq = (u64 *)sub_crq;
1688 	int rc;
1689 
1690 	netdev_dbg(adapter->netdev,
1691 		   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1692 		   (unsigned long int)cpu_to_be64(remote_handle),
1693 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
1694 		   (unsigned long int)cpu_to_be64(u64_crq[1]),
1695 		   (unsigned long int)cpu_to_be64(u64_crq[2]),
1696 		   (unsigned long int)cpu_to_be64(u64_crq[3]));
1697 
1698 	/* Make sure the hypervisor sees the complete request */
1699 	mb();
1700 
1701 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1702 				cpu_to_be64(remote_handle),
1703 				cpu_to_be64(u64_crq[0]),
1704 				cpu_to_be64(u64_crq[1]),
1705 				cpu_to_be64(u64_crq[2]),
1706 				cpu_to_be64(u64_crq[3]));
1707 
1708 	if (rc) {
1709 		if (rc == H_CLOSED)
1710 			dev_warn(dev, "CRQ Queue closed\n");
1711 		dev_err(dev, "Send error (rc=%d)\n", rc);
1712 	}
1713 
1714 	return rc;
1715 }
1716 
1717 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1718 				u64 remote_handle, u64 ioba, u64 num_entries)
1719 {
1720 	unsigned int ua = adapter->vdev->unit_address;
1721 	struct device *dev = &adapter->vdev->dev;
1722 	int rc;
1723 
1724 	/* Make sure the hypervisor sees the complete request */
1725 	mb();
1726 	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1727 				cpu_to_be64(remote_handle),
1728 				ioba, num_entries);
1729 
1730 	if (rc) {
1731 		if (rc == H_CLOSED)
1732 			dev_warn(dev, "CRQ Queue closed\n");
1733 		dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1734 	}
1735 
1736 	return rc;
1737 }
1738 
1739 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1740 			    union ibmvnic_crq *crq)
1741 {
1742 	unsigned int ua = adapter->vdev->unit_address;
1743 	struct device *dev = &adapter->vdev->dev;
1744 	u64 *u64_crq = (u64 *)crq;
1745 	int rc;
1746 
1747 	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1748 		   (unsigned long int)cpu_to_be64(u64_crq[0]),
1749 		   (unsigned long int)cpu_to_be64(u64_crq[1]));
1750 
1751 	/* Make sure the hypervisor sees the complete request */
1752 	mb();
1753 
1754 	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1755 				cpu_to_be64(u64_crq[0]),
1756 				cpu_to_be64(u64_crq[1]));
1757 
1758 	if (rc) {
1759 		if (rc == H_CLOSED)
1760 			dev_warn(dev, "CRQ Queue closed\n");
1761 		dev_warn(dev, "Send error (rc=%d)\n", rc);
1762 	}
1763 
1764 	return rc;
1765 }
1766 
1767 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1768 {
1769 	union ibmvnic_crq crq;
1770 
1771 	memset(&crq, 0, sizeof(crq));
1772 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1773 	crq.generic.cmd = IBMVNIC_CRQ_INIT;
1774 	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1775 
1776 	return ibmvnic_send_crq(adapter, &crq);
1777 }
1778 
1779 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1780 {
1781 	union ibmvnic_crq crq;
1782 
1783 	memset(&crq, 0, sizeof(crq));
1784 	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1785 	crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1786 	netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1787 
1788 	return ibmvnic_send_crq(adapter, &crq);
1789 }
1790 
1791 static int send_version_xchg(struct ibmvnic_adapter *adapter)
1792 {
1793 	union ibmvnic_crq crq;
1794 
1795 	memset(&crq, 0, sizeof(crq));
1796 	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1797 	crq.version_exchange.cmd = VERSION_EXCHANGE;
1798 	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1799 
1800 	return ibmvnic_send_crq(adapter, &crq);
1801 }
1802 
1803 static void send_login(struct ibmvnic_adapter *adapter)
1804 {
1805 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1806 	struct ibmvnic_login_buffer *login_buffer;
1807 	struct ibmvnic_inflight_cmd *inflight_cmd;
1808 	struct device *dev = &adapter->vdev->dev;
1809 	dma_addr_t rsp_buffer_token;
1810 	dma_addr_t buffer_token;
1811 	size_t rsp_buffer_size;
1812 	union ibmvnic_crq crq;
1813 	unsigned long flags;
1814 	size_t buffer_size;
1815 	__be64 *tx_list_p;
1816 	__be64 *rx_list_p;
1817 	int i;
1818 
1819 	buffer_size =
1820 	    sizeof(struct ibmvnic_login_buffer) +
1821 	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1822 
1823 	login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1824 	if (!login_buffer)
1825 		goto buf_alloc_failed;
1826 
1827 	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1828 				      DMA_TO_DEVICE);
1829 	if (dma_mapping_error(dev, buffer_token)) {
1830 		dev_err(dev, "Couldn't map login buffer\n");
1831 		goto buf_map_failed;
1832 	}
1833 
1834 	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1835 			  sizeof(u64) * adapter->req_tx_queues +
1836 			  sizeof(u64) * adapter->req_rx_queues +
1837 			  sizeof(u64) * adapter->req_rx_queues +
1838 			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
1839 
1840 	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1841 	if (!login_rsp_buffer)
1842 		goto buf_rsp_alloc_failed;
1843 
1844 	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1845 					  rsp_buffer_size, DMA_FROM_DEVICE);
1846 	if (dma_mapping_error(dev, rsp_buffer_token)) {
1847 		dev_err(dev, "Couldn't map login rsp buffer\n");
1848 		goto buf_rsp_map_failed;
1849 	}
1850 	inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1851 	if (!inflight_cmd) {
1852 		dev_err(dev, "Couldn't allocate inflight_cmd\n");
1853 		goto inflight_alloc_failed;
1854 	}
1855 	adapter->login_buf = login_buffer;
1856 	adapter->login_buf_token = buffer_token;
1857 	adapter->login_buf_sz = buffer_size;
1858 	adapter->login_rsp_buf = login_rsp_buffer;
1859 	adapter->login_rsp_buf_token = rsp_buffer_token;
1860 	adapter->login_rsp_buf_sz = rsp_buffer_size;
1861 
1862 	login_buffer->len = cpu_to_be32(buffer_size);
1863 	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1864 	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1865 	login_buffer->off_txcomp_subcrqs =
1866 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1867 	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1868 	login_buffer->off_rxcomp_subcrqs =
1869 	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1870 			sizeof(u64) * adapter->req_tx_queues);
1871 	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1872 	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1873 
1874 	tx_list_p = (__be64 *)((char *)login_buffer +
1875 				      sizeof(struct ibmvnic_login_buffer));
1876 	rx_list_p = (__be64 *)((char *)login_buffer +
1877 				      sizeof(struct ibmvnic_login_buffer) +
1878 				      sizeof(u64) * adapter->req_tx_queues);
1879 
1880 	for (i = 0; i < adapter->req_tx_queues; i++) {
1881 		if (adapter->tx_scrq[i]) {
1882 			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1883 						   crq_num);
1884 		}
1885 	}
1886 
1887 	for (i = 0; i < adapter->req_rx_queues; i++) {
1888 		if (adapter->rx_scrq[i]) {
1889 			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1890 						   crq_num);
1891 		}
1892 	}
1893 
1894 	netdev_dbg(adapter->netdev, "Login Buffer:\n");
1895 	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1896 		netdev_dbg(adapter->netdev, "%016lx\n",
1897 			   ((unsigned long int *)(adapter->login_buf))[i]);
1898 	}
1899 
1900 	memset(&crq, 0, sizeof(crq));
1901 	crq.login.first = IBMVNIC_CRQ_CMD;
1902 	crq.login.cmd = LOGIN;
1903 	crq.login.ioba = cpu_to_be32(buffer_token);
1904 	crq.login.len = cpu_to_be32(buffer_size);
1905 
1906 	memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1907 
1908 	spin_lock_irqsave(&adapter->inflight_lock, flags);
1909 	list_add_tail(&inflight_cmd->list, &adapter->inflight);
1910 	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1911 
1912 	ibmvnic_send_crq(adapter, &crq);
1913 
1914 	return;
1915 
1916 inflight_alloc_failed:
1917 	dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1918 			 DMA_FROM_DEVICE);
1919 buf_rsp_map_failed:
1920 	kfree(login_rsp_buffer);
1921 buf_rsp_alloc_failed:
1922 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1923 buf_map_failed:
1924 	kfree(login_buffer);
1925 buf_alloc_failed:
1926 	return;
1927 }
1928 
1929 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1930 			     u32 len, u8 map_id)
1931 {
1932 	union ibmvnic_crq crq;
1933 
1934 	memset(&crq, 0, sizeof(crq));
1935 	crq.request_map.first = IBMVNIC_CRQ_CMD;
1936 	crq.request_map.cmd = REQUEST_MAP;
1937 	crq.request_map.map_id = map_id;
1938 	crq.request_map.ioba = cpu_to_be32(addr);
1939 	crq.request_map.len = cpu_to_be32(len);
1940 	ibmvnic_send_crq(adapter, &crq);
1941 }
1942 
1943 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1944 {
1945 	union ibmvnic_crq crq;
1946 
1947 	memset(&crq, 0, sizeof(crq));
1948 	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1949 	crq.request_unmap.cmd = REQUEST_UNMAP;
1950 	crq.request_unmap.map_id = map_id;
1951 	ibmvnic_send_crq(adapter, &crq);
1952 }
1953 
1954 static void send_map_query(struct ibmvnic_adapter *adapter)
1955 {
1956 	union ibmvnic_crq crq;
1957 
1958 	memset(&crq, 0, sizeof(crq));
1959 	crq.query_map.first = IBMVNIC_CRQ_CMD;
1960 	crq.query_map.cmd = QUERY_MAP;
1961 	ibmvnic_send_crq(adapter, &crq);
1962 }
1963 
1964 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1965 static void send_cap_queries(struct ibmvnic_adapter *adapter)
1966 {
1967 	union ibmvnic_crq crq;
1968 
1969 	atomic_set(&adapter->running_cap_queries, 0);
1970 	memset(&crq, 0, sizeof(crq));
1971 	crq.query_capability.first = IBMVNIC_CRQ_CMD;
1972 	crq.query_capability.cmd = QUERY_CAPABILITY;
1973 
1974 	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1975 	atomic_inc(&adapter->running_cap_queries);
1976 	ibmvnic_send_crq(adapter, &crq);
1977 
1978 	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1979 	atomic_inc(&adapter->running_cap_queries);
1980 	ibmvnic_send_crq(adapter, &crq);
1981 
1982 	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1983 	atomic_inc(&adapter->running_cap_queries);
1984 	ibmvnic_send_crq(adapter, &crq);
1985 
1986 	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1987 	atomic_inc(&adapter->running_cap_queries);
1988 	ibmvnic_send_crq(adapter, &crq);
1989 
1990 	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1991 	atomic_inc(&adapter->running_cap_queries);
1992 	ibmvnic_send_crq(adapter, &crq);
1993 
1994 	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1995 	atomic_inc(&adapter->running_cap_queries);
1996 	ibmvnic_send_crq(adapter, &crq);
1997 
1998 	crq.query_capability.capability =
1999 	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2000 	atomic_inc(&adapter->running_cap_queries);
2001 	ibmvnic_send_crq(adapter, &crq);
2002 
2003 	crq.query_capability.capability =
2004 	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2005 	atomic_inc(&adapter->running_cap_queries);
2006 	ibmvnic_send_crq(adapter, &crq);
2007 
2008 	crq.query_capability.capability =
2009 	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2010 	atomic_inc(&adapter->running_cap_queries);
2011 	ibmvnic_send_crq(adapter, &crq);
2012 
2013 	crq.query_capability.capability =
2014 	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2015 	atomic_inc(&adapter->running_cap_queries);
2016 	ibmvnic_send_crq(adapter, &crq);
2017 
2018 	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2019 	atomic_inc(&adapter->running_cap_queries);
2020 	ibmvnic_send_crq(adapter, &crq);
2021 
2022 	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2023 	atomic_inc(&adapter->running_cap_queries);
2024 	ibmvnic_send_crq(adapter, &crq);
2025 
2026 	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2027 	atomic_inc(&adapter->running_cap_queries);
2028 	ibmvnic_send_crq(adapter, &crq);
2029 
2030 	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2031 	atomic_inc(&adapter->running_cap_queries);
2032 	ibmvnic_send_crq(adapter, &crq);
2033 
2034 	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2035 	atomic_inc(&adapter->running_cap_queries);
2036 	ibmvnic_send_crq(adapter, &crq);
2037 
2038 	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2039 	atomic_inc(&adapter->running_cap_queries);
2040 	ibmvnic_send_crq(adapter, &crq);
2041 
2042 	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2043 	atomic_inc(&adapter->running_cap_queries);
2044 	ibmvnic_send_crq(adapter, &crq);
2045 
2046 	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2047 	atomic_inc(&adapter->running_cap_queries);
2048 	ibmvnic_send_crq(adapter, &crq);
2049 
2050 	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2051 	atomic_inc(&adapter->running_cap_queries);
2052 	ibmvnic_send_crq(adapter, &crq);
2053 
2054 	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2055 	atomic_inc(&adapter->running_cap_queries);
2056 	ibmvnic_send_crq(adapter, &crq);
2057 
2058 	crq.query_capability.capability =
2059 			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2060 	atomic_inc(&adapter->running_cap_queries);
2061 	ibmvnic_send_crq(adapter, &crq);
2062 
2063 	crq.query_capability.capability =
2064 			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2065 	atomic_inc(&adapter->running_cap_queries);
2066 	ibmvnic_send_crq(adapter, &crq);
2067 
2068 	crq.query_capability.capability =
2069 			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2070 	atomic_inc(&adapter->running_cap_queries);
2071 	ibmvnic_send_crq(adapter, &crq);
2072 
2073 	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2074 	atomic_inc(&adapter->running_cap_queries);
2075 	ibmvnic_send_crq(adapter, &crq);
2076 }
2077 
2078 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2079 {
2080 	struct device *dev = &adapter->vdev->dev;
2081 	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2082 	union ibmvnic_crq crq;
2083 	int i;
2084 
2085 	dma_unmap_single(dev, adapter->ip_offload_tok,
2086 			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2087 
2088 	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2089 	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2090 		netdev_dbg(adapter->netdev, "%016lx\n",
2091 			   ((unsigned long int *)(buf))[i]);
2092 
2093 	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2094 	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2095 	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2096 		   buf->tcp_ipv4_chksum);
2097 	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2098 		   buf->tcp_ipv6_chksum);
2099 	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2100 		   buf->udp_ipv4_chksum);
2101 	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2102 		   buf->udp_ipv6_chksum);
2103 	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2104 		   buf->large_tx_ipv4);
2105 	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2106 		   buf->large_tx_ipv6);
2107 	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2108 		   buf->large_rx_ipv4);
2109 	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2110 		   buf->large_rx_ipv6);
2111 	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2112 		   buf->max_ipv4_header_size);
2113 	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2114 		   buf->max_ipv6_header_size);
2115 	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2116 		   buf->max_tcp_header_size);
2117 	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2118 		   buf->max_udp_header_size);
2119 	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2120 		   buf->max_large_tx_size);
2121 	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2122 		   buf->max_large_rx_size);
2123 	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2124 		   buf->ipv6_extension_header);
2125 	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2126 		   buf->tcp_pseudosum_req);
2127 	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2128 		   buf->num_ipv6_ext_headers);
2129 	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2130 		   buf->off_ipv6_ext_headers);
2131 
2132 	adapter->ip_offload_ctrl_tok =
2133 	    dma_map_single(dev, &adapter->ip_offload_ctrl,
2134 			   sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2135 
2136 	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2137 		dev_err(dev, "Couldn't map ip offload control buffer\n");
2138 		return;
2139 	}
2140 
2141 	adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2142 	adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2143 	adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2144 	adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2145 	adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2146 
2147 	/* large_tx/rx disabled for now, additional features needed */
2148 	adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2149 	adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2150 	adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2151 	adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2152 
2153 	adapter->netdev->features = NETIF_F_GSO;
2154 
2155 	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2156 		adapter->netdev->features |= NETIF_F_IP_CSUM;
2157 
2158 	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2159 		adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2160 
2161 	if ((adapter->netdev->features &
2162 	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2163 		adapter->netdev->features |= NETIF_F_RXCSUM;
2164 
2165 	memset(&crq, 0, sizeof(crq));
2166 	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2167 	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2168 	crq.control_ip_offload.len =
2169 	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2170 	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2171 	ibmvnic_send_crq(adapter, &crq);
2172 }
2173 
2174 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2175 				  struct ibmvnic_adapter *adapter)
2176 {
2177 	struct device *dev = &adapter->vdev->dev;
2178 	struct ibmvnic_error_buff *error_buff, *tmp;
2179 	unsigned long flags;
2180 	bool found = false;
2181 	int i;
2182 
2183 	if (!crq->request_error_rsp.rc.code) {
2184 		dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2185 			 crq->request_error_rsp.rc.code);
2186 		return;
2187 	}
2188 
2189 	spin_lock_irqsave(&adapter->error_list_lock, flags);
2190 	list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2191 		if (error_buff->error_id == crq->request_error_rsp.error_id) {
2192 			found = true;
2193 			list_del(&error_buff->list);
2194 			break;
2195 		}
2196 	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2197 
2198 	if (!found) {
2199 		dev_err(dev, "Couldn't find error id %x\n",
2200 			crq->request_error_rsp.error_id);
2201 		return;
2202 	}
2203 
2204 	dev_err(dev, "Detailed info for error id %x:",
2205 		crq->request_error_rsp.error_id);
2206 
2207 	for (i = 0; i < error_buff->len; i++) {
2208 		pr_cont("%02x", (int)error_buff->buff[i]);
2209 		if (i % 8 == 7)
2210 			pr_cont(" ");
2211 	}
2212 	pr_cont("\n");
2213 
2214 	dma_unmap_single(dev, error_buff->dma, error_buff->len,
2215 			 DMA_FROM_DEVICE);
2216 	kfree(error_buff->buff);
2217 	kfree(error_buff);
2218 }
2219 
2220 static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2221 				 struct ibmvnic_adapter *adapter)
2222 {
2223 	int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2224 	struct ibmvnic_inflight_cmd *inflight_cmd;
2225 	struct device *dev = &adapter->vdev->dev;
2226 	union ibmvnic_crq newcrq;
2227 	unsigned long flags;
2228 
2229 	/* allocate and map buffer */
2230 	adapter->dump_data = kmalloc(len, GFP_KERNEL);
2231 	if (!adapter->dump_data) {
2232 		complete(&adapter->fw_done);
2233 		return;
2234 	}
2235 
2236 	adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2237 						  DMA_FROM_DEVICE);
2238 
2239 	if (dma_mapping_error(dev, adapter->dump_data_token)) {
2240 		if (!firmware_has_feature(FW_FEATURE_CMO))
2241 			dev_err(dev, "Couldn't map dump data\n");
2242 		kfree(adapter->dump_data);
2243 		complete(&adapter->fw_done);
2244 		return;
2245 	}
2246 
2247 	inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2248 	if (!inflight_cmd) {
2249 		dma_unmap_single(dev, adapter->dump_data_token, len,
2250 				 DMA_FROM_DEVICE);
2251 		kfree(adapter->dump_data);
2252 		complete(&adapter->fw_done);
2253 		return;
2254 	}
2255 
2256 	memset(&newcrq, 0, sizeof(newcrq));
2257 	newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2258 	newcrq.request_dump.cmd = REQUEST_DUMP;
2259 	newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2260 	newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2261 
2262 	memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2263 
2264 	spin_lock_irqsave(&adapter->inflight_lock, flags);
2265 	list_add_tail(&inflight_cmd->list, &adapter->inflight);
2266 	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2267 
2268 	ibmvnic_send_crq(adapter, &newcrq);
2269 }
2270 
2271 static void handle_error_indication(union ibmvnic_crq *crq,
2272 				    struct ibmvnic_adapter *adapter)
2273 {
2274 	int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2275 	struct ibmvnic_inflight_cmd *inflight_cmd;
2276 	struct device *dev = &adapter->vdev->dev;
2277 	struct ibmvnic_error_buff *error_buff;
2278 	union ibmvnic_crq new_crq;
2279 	unsigned long flags;
2280 
2281 	dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2282 		crq->error_indication.
2283 		    flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2284 		crq->error_indication.error_id,
2285 		crq->error_indication.error_cause);
2286 
2287 	error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2288 	if (!error_buff)
2289 		return;
2290 
2291 	error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2292 	if (!error_buff->buff) {
2293 		kfree(error_buff);
2294 		return;
2295 	}
2296 
2297 	error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2298 					 DMA_FROM_DEVICE);
2299 	if (dma_mapping_error(dev, error_buff->dma)) {
2300 		if (!firmware_has_feature(FW_FEATURE_CMO))
2301 			dev_err(dev, "Couldn't map error buffer\n");
2302 		kfree(error_buff->buff);
2303 		kfree(error_buff);
2304 		return;
2305 	}
2306 
2307 	inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2308 	if (!inflight_cmd) {
2309 		dma_unmap_single(dev, error_buff->dma, detail_len,
2310 				 DMA_FROM_DEVICE);
2311 		kfree(error_buff->buff);
2312 		kfree(error_buff);
2313 		return;
2314 	}
2315 
2316 	error_buff->len = detail_len;
2317 	error_buff->error_id = crq->error_indication.error_id;
2318 
2319 	spin_lock_irqsave(&adapter->error_list_lock, flags);
2320 	list_add_tail(&error_buff->list, &adapter->errors);
2321 	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2322 
2323 	memset(&new_crq, 0, sizeof(new_crq));
2324 	new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2325 	new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2326 	new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2327 	new_crq.request_error_info.len = cpu_to_be32(detail_len);
2328 	new_crq.request_error_info.error_id = crq->error_indication.error_id;
2329 
2330 	memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2331 
2332 	spin_lock_irqsave(&adapter->inflight_lock, flags);
2333 	list_add_tail(&inflight_cmd->list, &adapter->inflight);
2334 	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2335 
2336 	ibmvnic_send_crq(adapter, &new_crq);
2337 }
2338 
2339 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2340 				  struct ibmvnic_adapter *adapter)
2341 {
2342 	struct net_device *netdev = adapter->netdev;
2343 	struct device *dev = &adapter->vdev->dev;
2344 	long rc;
2345 
2346 	rc = crq->change_mac_addr_rsp.rc.code;
2347 	if (rc) {
2348 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2349 		return;
2350 	}
2351 	memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2352 	       ETH_ALEN);
2353 }
2354 
2355 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2356 				   struct ibmvnic_adapter *adapter)
2357 {
2358 	struct device *dev = &adapter->vdev->dev;
2359 	u64 *req_value;
2360 	char *name;
2361 
2362 	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2363 	case REQ_TX_QUEUES:
2364 		req_value = &adapter->req_tx_queues;
2365 		name = "tx";
2366 		break;
2367 	case REQ_RX_QUEUES:
2368 		req_value = &adapter->req_rx_queues;
2369 		name = "rx";
2370 		break;
2371 	case REQ_RX_ADD_QUEUES:
2372 		req_value = &adapter->req_rx_add_queues;
2373 		name = "rx_add";
2374 		break;
2375 	case REQ_TX_ENTRIES_PER_SUBCRQ:
2376 		req_value = &adapter->req_tx_entries_per_subcrq;
2377 		name = "tx_entries_per_subcrq";
2378 		break;
2379 	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2380 		req_value = &adapter->req_rx_add_entries_per_subcrq;
2381 		name = "rx_add_entries_per_subcrq";
2382 		break;
2383 	case REQ_MTU:
2384 		req_value = &adapter->req_mtu;
2385 		name = "mtu";
2386 		break;
2387 	case PROMISC_REQUESTED:
2388 		req_value = &adapter->promisc;
2389 		name = "promisc";
2390 		break;
2391 	default:
2392 		dev_err(dev, "Got invalid cap request rsp %d\n",
2393 			crq->request_capability.capability);
2394 		return;
2395 	}
2396 
2397 	switch (crq->request_capability_rsp.rc.code) {
2398 	case SUCCESS:
2399 		break;
2400 	case PARTIALSUCCESS:
2401 		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2402 			 *req_value,
2403 			 (long int)be32_to_cpu(crq->request_capability_rsp.
2404 					       number), name);
2405 		release_sub_crqs_no_irqs(adapter);
2406 		*req_value = be32_to_cpu(crq->request_capability_rsp.number);
2407 		init_sub_crqs(adapter, 1);
2408 		return;
2409 	default:
2410 		dev_err(dev, "Error %d in request cap rsp\n",
2411 			crq->request_capability_rsp.rc.code);
2412 		return;
2413 	}
2414 
2415 	/* Done receiving requested capabilities, query IP offload support */
2416 	if (++adapter->requested_caps == 7) {
2417 		union ibmvnic_crq newcrq;
2418 		int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2419 		struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2420 		    &adapter->ip_offload_buf;
2421 
2422 		adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2423 							 buf_sz,
2424 							 DMA_FROM_DEVICE);
2425 
2426 		if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2427 			if (!firmware_has_feature(FW_FEATURE_CMO))
2428 				dev_err(dev, "Couldn't map offload buffer\n");
2429 			return;
2430 		}
2431 
2432 		memset(&newcrq, 0, sizeof(newcrq));
2433 		newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2434 		newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2435 		newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2436 		newcrq.query_ip_offload.ioba =
2437 		    cpu_to_be32(adapter->ip_offload_tok);
2438 
2439 		ibmvnic_send_crq(adapter, &newcrq);
2440 	}
2441 }
2442 
2443 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2444 			    struct ibmvnic_adapter *adapter)
2445 {
2446 	struct device *dev = &adapter->vdev->dev;
2447 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2448 	struct ibmvnic_login_buffer *login = adapter->login_buf;
2449 	union ibmvnic_crq crq;
2450 	int i;
2451 
2452 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2453 			 DMA_BIDIRECTIONAL);
2454 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
2455 			 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2456 
2457 	/* If the number of queues requested can't be allocated by the
2458 	 * server, the login response will return with code 1. We will need
2459 	 * to resend the login buffer with fewer queues requested.
2460 	 */
2461 	if (login_rsp_crq->generic.rc.code) {
2462 		adapter->renegotiate = true;
2463 		complete(&adapter->init_done);
2464 		return 0;
2465 	}
2466 
2467 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2468 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2469 		netdev_dbg(adapter->netdev, "%016lx\n",
2470 			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2471 	}
2472 
2473 	/* Sanity checks */
2474 	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2475 	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
2476 	     adapter->req_rx_add_queues !=
2477 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2478 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2479 		ibmvnic_remove(adapter->vdev);
2480 		return -EIO;
2481 	}
2482 	complete(&adapter->init_done);
2483 
2484 	memset(&crq, 0, sizeof(crq));
2485 	crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2486 	crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2487 	ibmvnic_send_crq(adapter, &crq);
2488 
2489 	return 0;
2490 }
2491 
2492 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2493 				   struct ibmvnic_adapter *adapter)
2494 {
2495 	struct device *dev = &adapter->vdev->dev;
2496 	u8 map_id = crq->request_map_rsp.map_id;
2497 	int tx_subcrqs;
2498 	int rx_subcrqs;
2499 	long rc;
2500 	int i;
2501 
2502 	tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2503 	rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2504 
2505 	rc = crq->request_map_rsp.rc.code;
2506 	if (rc) {
2507 		dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2508 		adapter->map_id--;
2509 		/* need to find and zero tx/rx_pool map_id */
2510 		for (i = 0; i < tx_subcrqs; i++) {
2511 			if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2512 				adapter->tx_pool[i].long_term_buff.map_id = 0;
2513 		}
2514 		for (i = 0; i < rx_subcrqs; i++) {
2515 			if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2516 				adapter->rx_pool[i].long_term_buff.map_id = 0;
2517 		}
2518 	}
2519 	complete(&adapter->fw_done);
2520 }
2521 
2522 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2523 				     struct ibmvnic_adapter *adapter)
2524 {
2525 	struct device *dev = &adapter->vdev->dev;
2526 	long rc;
2527 
2528 	rc = crq->request_unmap_rsp.rc.code;
2529 	if (rc)
2530 		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2531 }
2532 
2533 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2534 				 struct ibmvnic_adapter *adapter)
2535 {
2536 	struct net_device *netdev = adapter->netdev;
2537 	struct device *dev = &adapter->vdev->dev;
2538 	long rc;
2539 
2540 	rc = crq->query_map_rsp.rc.code;
2541 	if (rc) {
2542 		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2543 		return;
2544 	}
2545 	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2546 		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2547 		   crq->query_map_rsp.free_pages);
2548 }
2549 
2550 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2551 				 struct ibmvnic_adapter *adapter)
2552 {
2553 	struct net_device *netdev = adapter->netdev;
2554 	struct device *dev = &adapter->vdev->dev;
2555 	long rc;
2556 
2557 	atomic_dec(&adapter->running_cap_queries);
2558 	netdev_dbg(netdev, "Outstanding queries: %d\n",
2559 		   atomic_read(&adapter->running_cap_queries));
2560 	rc = crq->query_capability.rc.code;
2561 	if (rc) {
2562 		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2563 		goto out;
2564 	}
2565 
2566 	switch (be16_to_cpu(crq->query_capability.capability)) {
2567 	case MIN_TX_QUEUES:
2568 		adapter->min_tx_queues =
2569 		    be64_to_cpu(crq->query_capability.number);
2570 		netdev_dbg(netdev, "min_tx_queues = %lld\n",
2571 			   adapter->min_tx_queues);
2572 		break;
2573 	case MIN_RX_QUEUES:
2574 		adapter->min_rx_queues =
2575 		    be64_to_cpu(crq->query_capability.number);
2576 		netdev_dbg(netdev, "min_rx_queues = %lld\n",
2577 			   adapter->min_rx_queues);
2578 		break;
2579 	case MIN_RX_ADD_QUEUES:
2580 		adapter->min_rx_add_queues =
2581 		    be64_to_cpu(crq->query_capability.number);
2582 		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2583 			   adapter->min_rx_add_queues);
2584 		break;
2585 	case MAX_TX_QUEUES:
2586 		adapter->max_tx_queues =
2587 		    be64_to_cpu(crq->query_capability.number);
2588 		netdev_dbg(netdev, "max_tx_queues = %lld\n",
2589 			   adapter->max_tx_queues);
2590 		break;
2591 	case MAX_RX_QUEUES:
2592 		adapter->max_rx_queues =
2593 		    be64_to_cpu(crq->query_capability.number);
2594 		netdev_dbg(netdev, "max_rx_queues = %lld\n",
2595 			   adapter->max_rx_queues);
2596 		break;
2597 	case MAX_RX_ADD_QUEUES:
2598 		adapter->max_rx_add_queues =
2599 		    be64_to_cpu(crq->query_capability.number);
2600 		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2601 			   adapter->max_rx_add_queues);
2602 		break;
2603 	case MIN_TX_ENTRIES_PER_SUBCRQ:
2604 		adapter->min_tx_entries_per_subcrq =
2605 		    be64_to_cpu(crq->query_capability.number);
2606 		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2607 			   adapter->min_tx_entries_per_subcrq);
2608 		break;
2609 	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2610 		adapter->min_rx_add_entries_per_subcrq =
2611 		    be64_to_cpu(crq->query_capability.number);
2612 		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2613 			   adapter->min_rx_add_entries_per_subcrq);
2614 		break;
2615 	case MAX_TX_ENTRIES_PER_SUBCRQ:
2616 		adapter->max_tx_entries_per_subcrq =
2617 		    be64_to_cpu(crq->query_capability.number);
2618 		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2619 			   adapter->max_tx_entries_per_subcrq);
2620 		break;
2621 	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2622 		adapter->max_rx_add_entries_per_subcrq =
2623 		    be64_to_cpu(crq->query_capability.number);
2624 		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2625 			   adapter->max_rx_add_entries_per_subcrq);
2626 		break;
2627 	case TCP_IP_OFFLOAD:
2628 		adapter->tcp_ip_offload =
2629 		    be64_to_cpu(crq->query_capability.number);
2630 		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2631 			   adapter->tcp_ip_offload);
2632 		break;
2633 	case PROMISC_SUPPORTED:
2634 		adapter->promisc_supported =
2635 		    be64_to_cpu(crq->query_capability.number);
2636 		netdev_dbg(netdev, "promisc_supported = %lld\n",
2637 			   adapter->promisc_supported);
2638 		break;
2639 	case MIN_MTU:
2640 		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2641 		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2642 		break;
2643 	case MAX_MTU:
2644 		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2645 		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2646 		break;
2647 	case MAX_MULTICAST_FILTERS:
2648 		adapter->max_multicast_filters =
2649 		    be64_to_cpu(crq->query_capability.number);
2650 		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2651 			   adapter->max_multicast_filters);
2652 		break;
2653 	case VLAN_HEADER_INSERTION:
2654 		adapter->vlan_header_insertion =
2655 		    be64_to_cpu(crq->query_capability.number);
2656 		if (adapter->vlan_header_insertion)
2657 			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2658 		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2659 			   adapter->vlan_header_insertion);
2660 		break;
2661 	case MAX_TX_SG_ENTRIES:
2662 		adapter->max_tx_sg_entries =
2663 		    be64_to_cpu(crq->query_capability.number);
2664 		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2665 			   adapter->max_tx_sg_entries);
2666 		break;
2667 	case RX_SG_SUPPORTED:
2668 		adapter->rx_sg_supported =
2669 		    be64_to_cpu(crq->query_capability.number);
2670 		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2671 			   adapter->rx_sg_supported);
2672 		break;
2673 	case OPT_TX_COMP_SUB_QUEUES:
2674 		adapter->opt_tx_comp_sub_queues =
2675 		    be64_to_cpu(crq->query_capability.number);
2676 		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2677 			   adapter->opt_tx_comp_sub_queues);
2678 		break;
2679 	case OPT_RX_COMP_QUEUES:
2680 		adapter->opt_rx_comp_queues =
2681 		    be64_to_cpu(crq->query_capability.number);
2682 		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2683 			   adapter->opt_rx_comp_queues);
2684 		break;
2685 	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2686 		adapter->opt_rx_bufadd_q_per_rx_comp_q =
2687 		    be64_to_cpu(crq->query_capability.number);
2688 		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2689 			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
2690 		break;
2691 	case OPT_TX_ENTRIES_PER_SUBCRQ:
2692 		adapter->opt_tx_entries_per_subcrq =
2693 		    be64_to_cpu(crq->query_capability.number);
2694 		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2695 			   adapter->opt_tx_entries_per_subcrq);
2696 		break;
2697 	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2698 		adapter->opt_rxba_entries_per_subcrq =
2699 		    be64_to_cpu(crq->query_capability.number);
2700 		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2701 			   adapter->opt_rxba_entries_per_subcrq);
2702 		break;
2703 	case TX_RX_DESC_REQ:
2704 		adapter->tx_rx_desc_req = crq->query_capability.number;
2705 		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2706 			   adapter->tx_rx_desc_req);
2707 		break;
2708 
2709 	default:
2710 		netdev_err(netdev, "Got invalid cap rsp %d\n",
2711 			   crq->query_capability.capability);
2712 	}
2713 
2714 out:
2715 	if (atomic_read(&adapter->running_cap_queries) == 0)
2716 		init_sub_crqs(adapter, 0);
2717 		/* We're done querying the capabilities, initialize sub-crqs */
2718 }
2719 
2720 static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2721 				   struct ibmvnic_adapter *adapter)
2722 {
2723 	u8 correlator = crq->control_ras_rsp.correlator;
2724 	struct device *dev = &adapter->vdev->dev;
2725 	bool found = false;
2726 	int i;
2727 
2728 	if (crq->control_ras_rsp.rc.code) {
2729 		dev_warn(dev, "Control ras failed rc=%d\n",
2730 			 crq->control_ras_rsp.rc.code);
2731 		return;
2732 	}
2733 
2734 	for (i = 0; i < adapter->ras_comp_num; i++) {
2735 		if (adapter->ras_comps[i].correlator == correlator) {
2736 			found = true;
2737 			break;
2738 		}
2739 	}
2740 
2741 	if (!found) {
2742 		dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2743 		return;
2744 	}
2745 
2746 	switch (crq->control_ras_rsp.op) {
2747 	case IBMVNIC_TRACE_LEVEL:
2748 		adapter->ras_comps[i].trace_level = crq->control_ras.level;
2749 		break;
2750 	case IBMVNIC_ERROR_LEVEL:
2751 		adapter->ras_comps[i].error_check_level =
2752 		    crq->control_ras.level;
2753 		break;
2754 	case IBMVNIC_TRACE_PAUSE:
2755 		adapter->ras_comp_int[i].paused = 1;
2756 		break;
2757 	case IBMVNIC_TRACE_RESUME:
2758 		adapter->ras_comp_int[i].paused = 0;
2759 		break;
2760 	case IBMVNIC_TRACE_ON:
2761 		adapter->ras_comps[i].trace_on = 1;
2762 		break;
2763 	case IBMVNIC_TRACE_OFF:
2764 		adapter->ras_comps[i].trace_on = 0;
2765 		break;
2766 	case IBMVNIC_CHG_TRACE_BUFF_SZ:
2767 		/* trace_buff_sz is 3 bytes, stuff it into an int */
2768 		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2769 		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2770 		    crq->control_ras_rsp.trace_buff_sz[0];
2771 		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2772 		    crq->control_ras_rsp.trace_buff_sz[1];
2773 		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2774 		    crq->control_ras_rsp.trace_buff_sz[2];
2775 		break;
2776 	default:
2777 		dev_err(dev, "invalid op %d on control_ras_rsp",
2778 			crq->control_ras_rsp.op);
2779 	}
2780 }
2781 
2782 static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2783 			  loff_t *ppos)
2784 {
2785 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2786 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2787 	struct device *dev = &adapter->vdev->dev;
2788 	struct ibmvnic_fw_trace_entry *trace;
2789 	int num = ras_comp_int->num;
2790 	union ibmvnic_crq crq;
2791 	dma_addr_t trace_tok;
2792 
2793 	if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2794 		return 0;
2795 
2796 	trace =
2797 	    dma_alloc_coherent(dev,
2798 			       be32_to_cpu(adapter->ras_comps[num].
2799 					   trace_buff_size), &trace_tok,
2800 			       GFP_KERNEL);
2801 	if (!trace) {
2802 		dev_err(dev, "Couldn't alloc trace buffer\n");
2803 		return 0;
2804 	}
2805 
2806 	memset(&crq, 0, sizeof(crq));
2807 	crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2808 	crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2809 	crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2810 	crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2811 	crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2812 	ibmvnic_send_crq(adapter, &crq);
2813 
2814 	init_completion(&adapter->fw_done);
2815 	wait_for_completion(&adapter->fw_done);
2816 
2817 	if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2818 		len =
2819 		    be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2820 		    *ppos;
2821 
2822 	copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2823 
2824 	dma_free_coherent(dev,
2825 			  be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2826 			  trace, trace_tok);
2827 	*ppos += len;
2828 	return len;
2829 }
2830 
2831 static const struct file_operations trace_ops = {
2832 	.owner		= THIS_MODULE,
2833 	.open		= simple_open,
2834 	.read		= trace_read,
2835 };
2836 
2837 static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2838 			   loff_t *ppos)
2839 {
2840 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2841 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2842 	int num = ras_comp_int->num;
2843 	char buff[5]; /*  1 or 0 plus \n and \0 */
2844 	int size;
2845 
2846 	size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2847 
2848 	if (*ppos >= size)
2849 		return 0;
2850 
2851 	copy_to_user(user_buf, buff, size);
2852 	*ppos += size;
2853 	return size;
2854 }
2855 
2856 static ssize_t paused_write(struct file *file, const char __user *user_buf,
2857 			    size_t len, loff_t *ppos)
2858 {
2859 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2860 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2861 	int num = ras_comp_int->num;
2862 	union ibmvnic_crq crq;
2863 	unsigned long val;
2864 	char buff[9]; /* decimal max int plus \n and \0 */
2865 
2866 	copy_from_user(buff, user_buf, sizeof(buff));
2867 	val = kstrtoul(buff, 10, NULL);
2868 
2869 	adapter->ras_comp_int[num].paused = val ? 1 : 0;
2870 
2871 	memset(&crq, 0, sizeof(crq));
2872 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2873 	crq.control_ras.cmd = CONTROL_RAS;
2874 	crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2875 	crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2876 	ibmvnic_send_crq(adapter, &crq);
2877 
2878 	return len;
2879 }
2880 
2881 static const struct file_operations paused_ops = {
2882 	.owner		= THIS_MODULE,
2883 	.open		= simple_open,
2884 	.read		= paused_read,
2885 	.write		= paused_write,
2886 };
2887 
2888 static ssize_t tracing_read(struct file *file, char __user *user_buf,
2889 			    size_t len, loff_t *ppos)
2890 {
2891 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2892 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2893 	int num = ras_comp_int->num;
2894 	char buff[5]; /*  1 or 0 plus \n and \0 */
2895 	int size;
2896 
2897 	size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2898 
2899 	if (*ppos >= size)
2900 		return 0;
2901 
2902 	copy_to_user(user_buf, buff, size);
2903 	*ppos += size;
2904 	return size;
2905 }
2906 
2907 static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2908 			     size_t len, loff_t *ppos)
2909 {
2910 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2911 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2912 	int num = ras_comp_int->num;
2913 	union ibmvnic_crq crq;
2914 	unsigned long val;
2915 	char buff[9]; /* decimal max int plus \n and \0 */
2916 
2917 	copy_from_user(buff, user_buf, sizeof(buff));
2918 	val = kstrtoul(buff, 10, NULL);
2919 
2920 	memset(&crq, 0, sizeof(crq));
2921 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2922 	crq.control_ras.cmd = CONTROL_RAS;
2923 	crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2924 	crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2925 
2926 	return len;
2927 }
2928 
2929 static const struct file_operations tracing_ops = {
2930 	.owner		= THIS_MODULE,
2931 	.open		= simple_open,
2932 	.read		= tracing_read,
2933 	.write		= tracing_write,
2934 };
2935 
2936 static ssize_t error_level_read(struct file *file, char __user *user_buf,
2937 				size_t len, loff_t *ppos)
2938 {
2939 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2940 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2941 	int num = ras_comp_int->num;
2942 	char buff[5]; /* decimal max char plus \n and \0 */
2943 	int size;
2944 
2945 	size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2946 
2947 	if (*ppos >= size)
2948 		return 0;
2949 
2950 	copy_to_user(user_buf, buff, size);
2951 	*ppos += size;
2952 	return size;
2953 }
2954 
2955 static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2956 				 size_t len, loff_t *ppos)
2957 {
2958 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2959 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2960 	int num = ras_comp_int->num;
2961 	union ibmvnic_crq crq;
2962 	unsigned long val;
2963 	char buff[9]; /* decimal max int plus \n and \0 */
2964 
2965 	copy_from_user(buff, user_buf, sizeof(buff));
2966 	val = kstrtoul(buff, 10, NULL);
2967 
2968 	if (val > 9)
2969 		val = 9;
2970 
2971 	memset(&crq, 0, sizeof(crq));
2972 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2973 	crq.control_ras.cmd = CONTROL_RAS;
2974 	crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2975 	crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2976 	crq.control_ras.level = val;
2977 	ibmvnic_send_crq(adapter, &crq);
2978 
2979 	return len;
2980 }
2981 
2982 static const struct file_operations error_level_ops = {
2983 	.owner		= THIS_MODULE,
2984 	.open		= simple_open,
2985 	.read		= error_level_read,
2986 	.write		= error_level_write,
2987 };
2988 
2989 static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2990 				size_t len, loff_t *ppos)
2991 {
2992 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2993 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2994 	int num = ras_comp_int->num;
2995 	char buff[5]; /* decimal max char plus \n and \0 */
2996 	int size;
2997 
2998 	size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
2999 	if (*ppos >= size)
3000 		return 0;
3001 
3002 	copy_to_user(user_buf, buff, size);
3003 	*ppos += size;
3004 	return size;
3005 }
3006 
3007 static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
3008 				 size_t len, loff_t *ppos)
3009 {
3010 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3011 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3012 	union ibmvnic_crq crq;
3013 	unsigned long val;
3014 	char buff[9]; /* decimal max int plus \n and \0 */
3015 
3016 	copy_from_user(buff, user_buf, sizeof(buff));
3017 	val = kstrtoul(buff, 10, NULL);
3018 	if (val > 9)
3019 		val = 9;
3020 
3021 	memset(&crq, 0, sizeof(crq));
3022 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
3023 	crq.control_ras.cmd = CONTROL_RAS;
3024 	crq.control_ras.correlator =
3025 	    adapter->ras_comps[ras_comp_int->num].correlator;
3026 	crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3027 	crq.control_ras.level = val;
3028 	ibmvnic_send_crq(adapter, &crq);
3029 
3030 	return len;
3031 }
3032 
3033 static const struct file_operations trace_level_ops = {
3034 	.owner		= THIS_MODULE,
3035 	.open		= simple_open,
3036 	.read		= trace_level_read,
3037 	.write		= trace_level_write,
3038 };
3039 
3040 static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3041 				    size_t len, loff_t *ppos)
3042 {
3043 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3044 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3045 	int num = ras_comp_int->num;
3046 	char buff[9]; /* decimal max int plus \n and \0 */
3047 	int size;
3048 
3049 	size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3050 	if (*ppos >= size)
3051 		return 0;
3052 
3053 	copy_to_user(user_buf, buff, size);
3054 	*ppos += size;
3055 	return size;
3056 }
3057 
3058 static ssize_t trace_buff_size_write(struct file *file,
3059 				     const char __user *user_buf, size_t len,
3060 				     loff_t *ppos)
3061 {
3062 	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3063 	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3064 	union ibmvnic_crq crq;
3065 	unsigned long val;
3066 	char buff[9]; /* decimal max int plus \n and \0 */
3067 
3068 	copy_from_user(buff, user_buf, sizeof(buff));
3069 	val = kstrtoul(buff, 10, NULL);
3070 
3071 	memset(&crq, 0, sizeof(crq));
3072 	crq.control_ras.first = IBMVNIC_CRQ_CMD;
3073 	crq.control_ras.cmd = CONTROL_RAS;
3074 	crq.control_ras.correlator =
3075 	    adapter->ras_comps[ras_comp_int->num].correlator;
3076 	crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3077 	/* trace_buff_sz is 3 bytes, stuff an int into it */
3078 	crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3079 	crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3080 	crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3081 	ibmvnic_send_crq(adapter, &crq);
3082 
3083 	return len;
3084 }
3085 
3086 static const struct file_operations trace_size_ops = {
3087 	.owner		= THIS_MODULE,
3088 	.open		= simple_open,
3089 	.read		= trace_buff_size_read,
3090 	.write		= trace_buff_size_write,
3091 };
3092 
3093 static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3094 					 struct ibmvnic_adapter *adapter)
3095 {
3096 	struct device *dev = &adapter->vdev->dev;
3097 	struct dentry *dir_ent;
3098 	struct dentry *ent;
3099 	int i;
3100 
3101 	debugfs_remove_recursive(adapter->ras_comps_ent);
3102 
3103 	adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3104 						    adapter->debugfs_dir);
3105 	if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3106 		dev_info(dev, "debugfs create ras_comps dir failed\n");
3107 		return;
3108 	}
3109 
3110 	for (i = 0; i < adapter->ras_comp_num; i++) {
3111 		dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3112 					     adapter->ras_comps_ent);
3113 		if (!dir_ent || IS_ERR(dir_ent)) {
3114 			dev_info(dev, "debugfs create %s dir failed\n",
3115 				 adapter->ras_comps[i].name);
3116 			continue;
3117 		}
3118 
3119 		adapter->ras_comp_int[i].adapter = adapter;
3120 		adapter->ras_comp_int[i].num = i;
3121 		adapter->ras_comp_int[i].desc_blob.data =
3122 		    &adapter->ras_comps[i].description;
3123 		adapter->ras_comp_int[i].desc_blob.size =
3124 		    sizeof(adapter->ras_comps[i].description);
3125 
3126 		/* Don't need to remember the dentry's because the debugfs dir
3127 		 * gets removed recursively
3128 		 */
3129 		ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3130 					  &adapter->ras_comp_int[i].desc_blob);
3131 		ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3132 					  dir_ent, &adapter->ras_comp_int[i],
3133 					  &trace_size_ops);
3134 		ent = debugfs_create_file("trace_level",
3135 					  S_IRUGO |
3136 					  (adapter->ras_comps[i].trace_level !=
3137 					   0xFF  ? S_IWUSR : 0),
3138 					   dir_ent, &adapter->ras_comp_int[i],
3139 					   &trace_level_ops);
3140 		ent = debugfs_create_file("error_level",
3141 					  S_IRUGO |
3142 					  (adapter->
3143 					   ras_comps[i].error_check_level !=
3144 					   0xFF ? S_IWUSR : 0),
3145 					  dir_ent, &adapter->ras_comp_int[i],
3146 					  &trace_level_ops);
3147 		ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3148 					  dir_ent, &adapter->ras_comp_int[i],
3149 					  &tracing_ops);
3150 		ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3151 					  dir_ent, &adapter->ras_comp_int[i],
3152 					  &paused_ops);
3153 		ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3154 					  &adapter->ras_comp_int[i],
3155 					  &trace_ops);
3156 	}
3157 }
3158 
3159 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3160 					    struct ibmvnic_adapter *adapter)
3161 {
3162 	int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3163 	struct device *dev = &adapter->vdev->dev;
3164 	union ibmvnic_crq newcrq;
3165 
3166 	adapter->ras_comps = dma_alloc_coherent(dev, len,
3167 						&adapter->ras_comps_tok,
3168 						GFP_KERNEL);
3169 	if (!adapter->ras_comps) {
3170 		if (!firmware_has_feature(FW_FEATURE_CMO))
3171 			dev_err(dev, "Couldn't alloc fw comps buffer\n");
3172 		return;
3173 	}
3174 
3175 	adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3176 					sizeof(struct ibmvnic_fw_comp_internal),
3177 					GFP_KERNEL);
3178 	if (!adapter->ras_comp_int)
3179 		dma_free_coherent(dev, len, adapter->ras_comps,
3180 				  adapter->ras_comps_tok);
3181 
3182 	memset(&newcrq, 0, sizeof(newcrq));
3183 	newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3184 	newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3185 	newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3186 	newcrq.request_ras_comps.len = cpu_to_be32(len);
3187 	ibmvnic_send_crq(adapter, &newcrq);
3188 }
3189 
3190 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3191 {
3192 	struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
3193 	struct device *dev = &adapter->vdev->dev;
3194 	struct ibmvnic_error_buff *error_buff, *tmp2;
3195 	unsigned long flags;
3196 	unsigned long flags2;
3197 
3198 	spin_lock_irqsave(&adapter->inflight_lock, flags);
3199 	list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
3200 		switch (inflight_cmd->crq.generic.cmd) {
3201 		case LOGIN:
3202 			dma_unmap_single(dev, adapter->login_buf_token,
3203 					 adapter->login_buf_sz,
3204 					 DMA_BIDIRECTIONAL);
3205 			dma_unmap_single(dev, adapter->login_rsp_buf_token,
3206 					 adapter->login_rsp_buf_sz,
3207 					 DMA_BIDIRECTIONAL);
3208 			kfree(adapter->login_rsp_buf);
3209 			kfree(adapter->login_buf);
3210 			break;
3211 		case REQUEST_DUMP:
3212 			complete(&adapter->fw_done);
3213 			break;
3214 		case REQUEST_ERROR_INFO:
3215 			spin_lock_irqsave(&adapter->error_list_lock, flags2);
3216 			list_for_each_entry_safe(error_buff, tmp2,
3217 						 &adapter->errors, list) {
3218 				dma_unmap_single(dev, error_buff->dma,
3219 						 error_buff->len,
3220 						 DMA_FROM_DEVICE);
3221 				kfree(error_buff->buff);
3222 				list_del(&error_buff->list);
3223 				kfree(error_buff);
3224 			}
3225 			spin_unlock_irqrestore(&adapter->error_list_lock,
3226 					       flags2);
3227 			break;
3228 		}
3229 		list_del(&inflight_cmd->list);
3230 		kfree(inflight_cmd);
3231 	}
3232 	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3233 }
3234 
3235 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3236 			       struct ibmvnic_adapter *adapter)
3237 {
3238 	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3239 	struct net_device *netdev = adapter->netdev;
3240 	struct device *dev = &adapter->vdev->dev;
3241 	long rc;
3242 
3243 	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3244 		   ((unsigned long int *)crq)[0],
3245 		   ((unsigned long int *)crq)[1]);
3246 	switch (gen_crq->first) {
3247 	case IBMVNIC_CRQ_INIT_RSP:
3248 		switch (gen_crq->cmd) {
3249 		case IBMVNIC_CRQ_INIT:
3250 			dev_info(dev, "Partner initialized\n");
3251 			/* Send back a response */
3252 			rc = ibmvnic_send_crq_init_complete(adapter);
3253 			if (!rc)
3254 				schedule_work(&adapter->vnic_crq_init);
3255 			else
3256 				dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3257 			break;
3258 		case IBMVNIC_CRQ_INIT_COMPLETE:
3259 			dev_info(dev, "Partner initialization complete\n");
3260 			send_version_xchg(adapter);
3261 			break;
3262 		default:
3263 			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3264 		}
3265 		return;
3266 	case IBMVNIC_CRQ_XPORT_EVENT:
3267 		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3268 			dev_info(dev, "Re-enabling adapter\n");
3269 			adapter->migrated = true;
3270 			ibmvnic_free_inflight(adapter);
3271 			release_sub_crqs(adapter);
3272 			rc = ibmvnic_reenable_crq_queue(adapter);
3273 			if (rc)
3274 				dev_err(dev, "Error after enable rc=%ld\n", rc);
3275 			adapter->migrated = false;
3276 			rc = ibmvnic_send_crq_init(adapter);
3277 			if (rc)
3278 				dev_err(dev, "Error sending init rc=%ld\n", rc);
3279 		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3280 			dev_info(dev, "Backing device failover detected\n");
3281 			netif_carrier_off(netdev);
3282 			adapter->failover = true;
3283 		} else {
3284 			/* The adapter lost the connection */
3285 			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3286 				gen_crq->cmd);
3287 			ibmvnic_free_inflight(adapter);
3288 			release_sub_crqs(adapter);
3289 		}
3290 		return;
3291 	case IBMVNIC_CRQ_CMD_RSP:
3292 		break;
3293 	default:
3294 		dev_err(dev, "Got an invalid msg type 0x%02x\n",
3295 			gen_crq->first);
3296 		return;
3297 	}
3298 
3299 	switch (gen_crq->cmd) {
3300 	case VERSION_EXCHANGE_RSP:
3301 		rc = crq->version_exchange_rsp.rc.code;
3302 		if (rc) {
3303 			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3304 			break;
3305 		}
3306 		dev_info(dev, "Partner protocol version is %d\n",
3307 			 crq->version_exchange_rsp.version);
3308 		if (be16_to_cpu(crq->version_exchange_rsp.version) <
3309 		    ibmvnic_version)
3310 			ibmvnic_version =
3311 			    be16_to_cpu(crq->version_exchange_rsp.version);
3312 		send_cap_queries(adapter);
3313 		break;
3314 	case QUERY_CAPABILITY_RSP:
3315 		handle_query_cap_rsp(crq, adapter);
3316 		break;
3317 	case QUERY_MAP_RSP:
3318 		handle_query_map_rsp(crq, adapter);
3319 		break;
3320 	case REQUEST_MAP_RSP:
3321 		handle_request_map_rsp(crq, adapter);
3322 		break;
3323 	case REQUEST_UNMAP_RSP:
3324 		handle_request_unmap_rsp(crq, adapter);
3325 		break;
3326 	case REQUEST_CAPABILITY_RSP:
3327 		handle_request_cap_rsp(crq, adapter);
3328 		break;
3329 	case LOGIN_RSP:
3330 		netdev_dbg(netdev, "Got Login Response\n");
3331 		handle_login_rsp(crq, adapter);
3332 		break;
3333 	case LOGICAL_LINK_STATE_RSP:
3334 		netdev_dbg(netdev, "Got Logical Link State Response\n");
3335 		adapter->logical_link_state =
3336 		    crq->logical_link_state_rsp.link_state;
3337 		break;
3338 	case LINK_STATE_INDICATION:
3339 		netdev_dbg(netdev, "Got Logical Link State Indication\n");
3340 		adapter->phys_link_state =
3341 		    crq->link_state_indication.phys_link_state;
3342 		adapter->logical_link_state =
3343 		    crq->link_state_indication.logical_link_state;
3344 		break;
3345 	case CHANGE_MAC_ADDR_RSP:
3346 		netdev_dbg(netdev, "Got MAC address change Response\n");
3347 		handle_change_mac_rsp(crq, adapter);
3348 		break;
3349 	case ERROR_INDICATION:
3350 		netdev_dbg(netdev, "Got Error Indication\n");
3351 		handle_error_indication(crq, adapter);
3352 		break;
3353 	case REQUEST_ERROR_RSP:
3354 		netdev_dbg(netdev, "Got Error Detail Response\n");
3355 		handle_error_info_rsp(crq, adapter);
3356 		break;
3357 	case REQUEST_STATISTICS_RSP:
3358 		netdev_dbg(netdev, "Got Statistics Response\n");
3359 		complete(&adapter->stats_done);
3360 		break;
3361 	case REQUEST_DUMP_SIZE_RSP:
3362 		netdev_dbg(netdev, "Got Request Dump Size Response\n");
3363 		handle_dump_size_rsp(crq, adapter);
3364 		break;
3365 	case REQUEST_DUMP_RSP:
3366 		netdev_dbg(netdev, "Got Request Dump Response\n");
3367 		complete(&adapter->fw_done);
3368 		break;
3369 	case QUERY_IP_OFFLOAD_RSP:
3370 		netdev_dbg(netdev, "Got Query IP offload Response\n");
3371 		handle_query_ip_offload_rsp(adapter);
3372 		break;
3373 	case MULTICAST_CTRL_RSP:
3374 		netdev_dbg(netdev, "Got multicast control Response\n");
3375 		break;
3376 	case CONTROL_IP_OFFLOAD_RSP:
3377 		netdev_dbg(netdev, "Got Control IP offload Response\n");
3378 		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3379 				 sizeof(adapter->ip_offload_ctrl),
3380 				 DMA_TO_DEVICE);
3381 		/* We're done with the queries, perform the login */
3382 		send_login(adapter);
3383 		break;
3384 	case REQUEST_RAS_COMP_NUM_RSP:
3385 		netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3386 		if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3387 			netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3388 			break;
3389 		}
3390 		adapter->ras_comp_num =
3391 		    be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3392 		handle_request_ras_comp_num_rsp(crq, adapter);
3393 		break;
3394 	case REQUEST_RAS_COMPS_RSP:
3395 		netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3396 		handle_request_ras_comps_rsp(crq, adapter);
3397 		break;
3398 	case CONTROL_RAS_RSP:
3399 		netdev_dbg(netdev, "Got Control RAS Response\n");
3400 		handle_control_ras_rsp(crq, adapter);
3401 		break;
3402 	case COLLECT_FW_TRACE_RSP:
3403 		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3404 		complete(&adapter->fw_done);
3405 		break;
3406 	default:
3407 		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3408 			   gen_crq->cmd);
3409 	}
3410 }
3411 
3412 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3413 {
3414 	struct ibmvnic_adapter *adapter = instance;
3415 	struct ibmvnic_crq_queue *queue = &adapter->crq;
3416 	struct vio_dev *vdev = adapter->vdev;
3417 	union ibmvnic_crq *crq;
3418 	unsigned long flags;
3419 	bool done = false;
3420 
3421 	spin_lock_irqsave(&queue->lock, flags);
3422 	vio_disable_interrupts(vdev);
3423 	while (!done) {
3424 		/* Pull all the valid messages off the CRQ */
3425 		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3426 			ibmvnic_handle_crq(crq, adapter);
3427 			crq->generic.first = 0;
3428 		}
3429 		vio_enable_interrupts(vdev);
3430 		crq = ibmvnic_next_crq(adapter);
3431 		if (crq) {
3432 			vio_disable_interrupts(vdev);
3433 			ibmvnic_handle_crq(crq, adapter);
3434 			crq->generic.first = 0;
3435 		} else {
3436 			done = true;
3437 		}
3438 	}
3439 	spin_unlock_irqrestore(&queue->lock, flags);
3440 	return IRQ_HANDLED;
3441 }
3442 
3443 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3444 {
3445 	struct vio_dev *vdev = adapter->vdev;
3446 	int rc;
3447 
3448 	do {
3449 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3450 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3451 
3452 	if (rc)
3453 		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3454 
3455 	return rc;
3456 }
3457 
3458 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3459 {
3460 	struct ibmvnic_crq_queue *crq = &adapter->crq;
3461 	struct device *dev = &adapter->vdev->dev;
3462 	struct vio_dev *vdev = adapter->vdev;
3463 	int rc;
3464 
3465 	/* Close the CRQ */
3466 	do {
3467 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3468 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3469 
3470 	/* Clean out the queue */
3471 	memset(crq->msgs, 0, PAGE_SIZE);
3472 	crq->cur = 0;
3473 
3474 	/* And re-open it again */
3475 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3476 				crq->msg_token, PAGE_SIZE);
3477 
3478 	if (rc == H_CLOSED)
3479 		/* Adapter is good, but other end is not ready */
3480 		dev_warn(dev, "Partner adapter not ready\n");
3481 	else if (rc != 0)
3482 		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3483 
3484 	return rc;
3485 }
3486 
3487 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3488 {
3489 	struct ibmvnic_crq_queue *crq = &adapter->crq;
3490 	struct vio_dev *vdev = adapter->vdev;
3491 	long rc;
3492 
3493 	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3494 	free_irq(vdev->irq, adapter);
3495 	do {
3496 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3497 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3498 
3499 	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3500 			 DMA_BIDIRECTIONAL);
3501 	free_page((unsigned long)crq->msgs);
3502 }
3503 
3504 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3505 {
3506 	struct ibmvnic_crq_queue *crq = &adapter->crq;
3507 	struct device *dev = &adapter->vdev->dev;
3508 	struct vio_dev *vdev = adapter->vdev;
3509 	int rc, retrc = -ENOMEM;
3510 
3511 	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3512 	/* Should we allocate more than one page? */
3513 
3514 	if (!crq->msgs)
3515 		return -ENOMEM;
3516 
3517 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3518 	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3519 					DMA_BIDIRECTIONAL);
3520 	if (dma_mapping_error(dev, crq->msg_token))
3521 		goto map_failed;
3522 
3523 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3524 				crq->msg_token, PAGE_SIZE);
3525 
3526 	if (rc == H_RESOURCE)
3527 		/* maybe kexecing and resource is busy. try a reset */
3528 		rc = ibmvnic_reset_crq(adapter);
3529 	retrc = rc;
3530 
3531 	if (rc == H_CLOSED) {
3532 		dev_warn(dev, "Partner adapter not ready\n");
3533 	} else if (rc) {
3534 		dev_warn(dev, "Error %d opening adapter\n", rc);
3535 		goto reg_crq_failed;
3536 	}
3537 
3538 	retrc = 0;
3539 
3540 	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3541 	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3542 			 adapter);
3543 	if (rc) {
3544 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3545 			vdev->irq, rc);
3546 		goto req_irq_failed;
3547 	}
3548 
3549 	rc = vio_enable_interrupts(vdev);
3550 	if (rc) {
3551 		dev_err(dev, "Error %d enabling interrupts\n", rc);
3552 		goto req_irq_failed;
3553 	}
3554 
3555 	crq->cur = 0;
3556 	spin_lock_init(&crq->lock);
3557 
3558 	return retrc;
3559 
3560 req_irq_failed:
3561 	do {
3562 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3563 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3564 reg_crq_failed:
3565 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3566 map_failed:
3567 	free_page((unsigned long)crq->msgs);
3568 	return retrc;
3569 }
3570 
3571 /* debugfs for dump */
3572 static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3573 {
3574 	struct net_device *netdev = seq->private;
3575 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3576 	struct device *dev = &adapter->vdev->dev;
3577 	union ibmvnic_crq crq;
3578 
3579 	memset(&crq, 0, sizeof(crq));
3580 	crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3581 	crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3582 	ibmvnic_send_crq(adapter, &crq);
3583 
3584 	init_completion(&adapter->fw_done);
3585 	wait_for_completion(&adapter->fw_done);
3586 
3587 	seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3588 
3589 	dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3590 			 DMA_BIDIRECTIONAL);
3591 
3592 	kfree(adapter->dump_data);
3593 
3594 	return 0;
3595 }
3596 
3597 static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3598 {
3599 	return single_open(file, ibmvnic_dump_show, inode->i_private);
3600 }
3601 
3602 static const struct file_operations ibmvnic_dump_ops = {
3603 	.owner          = THIS_MODULE,
3604 	.open           = ibmvnic_dump_open,
3605 	.read           = seq_read,
3606 	.llseek         = seq_lseek,
3607 	.release        = single_release,
3608 };
3609 
3610 static void handle_crq_init_rsp(struct work_struct *work)
3611 {
3612 	struct ibmvnic_adapter *adapter = container_of(work,
3613 						       struct ibmvnic_adapter,
3614 						       vnic_crq_init);
3615 	struct device *dev = &adapter->vdev->dev;
3616 	struct net_device *netdev = adapter->netdev;
3617 	unsigned long timeout = msecs_to_jiffies(30000);
3618 	bool restart = false;
3619 	int rc;
3620 
3621 	if (adapter->failover) {
3622 		release_sub_crqs(adapter);
3623 		if (netif_running(netdev)) {
3624 			netif_tx_disable(netdev);
3625 			ibmvnic_close(netdev);
3626 			restart = true;
3627 		}
3628 	}
3629 
3630 	send_version_xchg(adapter);
3631 	reinit_completion(&adapter->init_done);
3632 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3633 		dev_err(dev, "Passive init timeout\n");
3634 		goto task_failed;
3635 	}
3636 
3637 	do {
3638 		if (adapter->renegotiate) {
3639 			adapter->renegotiate = false;
3640 			release_sub_crqs_no_irqs(adapter);
3641 			send_cap_queries(adapter);
3642 
3643 			reinit_completion(&adapter->init_done);
3644 			if (!wait_for_completion_timeout(&adapter->init_done,
3645 							 timeout)) {
3646 				dev_err(dev, "Passive init timeout\n");
3647 				goto task_failed;
3648 			}
3649 		}
3650 	} while (adapter->renegotiate);
3651 	rc = init_sub_crq_irqs(adapter);
3652 
3653 	if (rc)
3654 		goto task_failed;
3655 
3656 	netdev->real_num_tx_queues = adapter->req_tx_queues;
3657 
3658 	if (adapter->failover) {
3659 		adapter->failover = false;
3660 		if (restart) {
3661 			rc = ibmvnic_open(netdev);
3662 			if (rc)
3663 				goto restart_failed;
3664 		}
3665 		netif_carrier_on(netdev);
3666 		return;
3667 	}
3668 
3669 	rc = register_netdev(netdev);
3670 	if (rc) {
3671 		dev_err(dev,
3672 			"failed to register netdev rc=%d\n", rc);
3673 		goto register_failed;
3674 	}
3675 	dev_info(dev, "ibmvnic registered\n");
3676 
3677 	return;
3678 
3679 restart_failed:
3680 	dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
3681 register_failed:
3682 	release_sub_crqs(adapter);
3683 task_failed:
3684 	dev_err(dev, "Passive initialization was not successful\n");
3685 }
3686 
3687 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3688 {
3689 	unsigned long timeout = msecs_to_jiffies(30000);
3690 	struct ibmvnic_adapter *adapter;
3691 	struct net_device *netdev;
3692 	unsigned char *mac_addr_p;
3693 	struct dentry *ent;
3694 	char buf[16]; /* debugfs name buf */
3695 	int rc;
3696 
3697 	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3698 		dev->unit_address);
3699 
3700 	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3701 							VETH_MAC_ADDR, NULL);
3702 	if (!mac_addr_p) {
3703 		dev_err(&dev->dev,
3704 			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3705 			__FILE__, __LINE__);
3706 		return 0;
3707 	}
3708 
3709 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3710 				   IBMVNIC_MAX_TX_QUEUES);
3711 	if (!netdev)
3712 		return -ENOMEM;
3713 
3714 	adapter = netdev_priv(netdev);
3715 	dev_set_drvdata(&dev->dev, netdev);
3716 	adapter->vdev = dev;
3717 	adapter->netdev = netdev;
3718 	adapter->failover = false;
3719 
3720 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
3721 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3722 	netdev->irq = dev->irq;
3723 	netdev->netdev_ops = &ibmvnic_netdev_ops;
3724 	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3725 	SET_NETDEV_DEV(netdev, &dev->dev);
3726 
3727 	INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3728 
3729 	spin_lock_init(&adapter->stats_lock);
3730 
3731 	rc = ibmvnic_init_crq_queue(adapter);
3732 	if (rc) {
3733 		dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3734 		goto free_netdev;
3735 	}
3736 
3737 	INIT_LIST_HEAD(&adapter->errors);
3738 	INIT_LIST_HEAD(&adapter->inflight);
3739 	spin_lock_init(&adapter->error_list_lock);
3740 	spin_lock_init(&adapter->inflight_lock);
3741 
3742 	adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3743 					      sizeof(struct ibmvnic_statistics),
3744 					      DMA_FROM_DEVICE);
3745 	if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3746 		if (!firmware_has_feature(FW_FEATURE_CMO))
3747 			dev_err(&dev->dev, "Couldn't map stats buffer\n");
3748 		rc = -ENOMEM;
3749 		goto free_crq;
3750 	}
3751 
3752 	snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3753 	ent = debugfs_create_dir(buf, NULL);
3754 	if (!ent || IS_ERR(ent)) {
3755 		dev_info(&dev->dev, "debugfs create directory failed\n");
3756 		adapter->debugfs_dir = NULL;
3757 	} else {
3758 		adapter->debugfs_dir = ent;
3759 		ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3760 					  netdev, &ibmvnic_dump_ops);
3761 		if (!ent || IS_ERR(ent)) {
3762 			dev_info(&dev->dev,
3763 				 "debugfs create dump file failed\n");
3764 			adapter->debugfs_dump = NULL;
3765 		} else {
3766 			adapter->debugfs_dump = ent;
3767 		}
3768 	}
3769 	ibmvnic_send_crq_init(adapter);
3770 
3771 	init_completion(&adapter->init_done);
3772 	if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3773 		return 0;
3774 
3775 	do {
3776 		if (adapter->renegotiate) {
3777 			adapter->renegotiate = false;
3778 			release_sub_crqs_no_irqs(adapter);
3779 			send_cap_queries(adapter);
3780 
3781 			reinit_completion(&adapter->init_done);
3782 			if (!wait_for_completion_timeout(&adapter->init_done,
3783 							 timeout))
3784 				return 0;
3785 		}
3786 	} while (adapter->renegotiate);
3787 
3788 	rc = init_sub_crq_irqs(adapter);
3789 	if (rc) {
3790 		dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
3791 		goto free_debugfs;
3792 	}
3793 
3794 	netdev->real_num_tx_queues = adapter->req_tx_queues;
3795 
3796 	rc = register_netdev(netdev);
3797 	if (rc) {
3798 		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3799 		goto free_sub_crqs;
3800 	}
3801 	dev_info(&dev->dev, "ibmvnic registered\n");
3802 
3803 	return 0;
3804 
3805 free_sub_crqs:
3806 	release_sub_crqs(adapter);
3807 free_debugfs:
3808 	if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3809 		debugfs_remove_recursive(adapter->debugfs_dir);
3810 free_crq:
3811 	ibmvnic_release_crq_queue(adapter);
3812 free_netdev:
3813 	free_netdev(netdev);
3814 	return rc;
3815 }
3816 
3817 static int ibmvnic_remove(struct vio_dev *dev)
3818 {
3819 	struct net_device *netdev = dev_get_drvdata(&dev->dev);
3820 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3821 
3822 	unregister_netdev(netdev);
3823 
3824 	release_sub_crqs(adapter);
3825 
3826 	ibmvnic_release_crq_queue(adapter);
3827 
3828 	if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3829 		debugfs_remove_recursive(adapter->debugfs_dir);
3830 
3831 	if (adapter->ras_comps)
3832 		dma_free_coherent(&dev->dev,
3833 				  adapter->ras_comp_num *
3834 				  sizeof(struct ibmvnic_fw_component),
3835 				  adapter->ras_comps, adapter->ras_comps_tok);
3836 
3837 	kfree(adapter->ras_comp_int);
3838 
3839 	free_netdev(netdev);
3840 	dev_set_drvdata(&dev->dev, NULL);
3841 
3842 	return 0;
3843 }
3844 
3845 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3846 {
3847 	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3848 	struct ibmvnic_adapter *adapter;
3849 	struct iommu_table *tbl;
3850 	unsigned long ret = 0;
3851 	int i;
3852 
3853 	tbl = get_iommu_table_base(&vdev->dev);
3854 
3855 	/* netdev inits at probe time along with the structures we need below*/
3856 	if (!netdev)
3857 		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3858 
3859 	adapter = netdev_priv(netdev);
3860 
3861 	ret += PAGE_SIZE; /* the crq message queue */
3862 	ret += adapter->bounce_buffer_size;
3863 	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3864 
3865 	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3866 		ret += 4 * PAGE_SIZE; /* the scrq message queue */
3867 
3868 	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3869 	     i++)
3870 		ret += adapter->rx_pool[i].size *
3871 		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3872 
3873 	return ret;
3874 }
3875 
3876 static int ibmvnic_resume(struct device *dev)
3877 {
3878 	struct net_device *netdev = dev_get_drvdata(dev);
3879 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3880 	int i;
3881 
3882 	/* kick the interrupt handlers just in case we lost an interrupt */
3883 	for (i = 0; i < adapter->req_rx_queues; i++)
3884 		ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3885 				     adapter->rx_scrq[i]);
3886 
3887 	return 0;
3888 }
3889 
3890 static struct vio_device_id ibmvnic_device_table[] = {
3891 	{"network", "IBM,vnic"},
3892 	{"", "" }
3893 };
3894 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3895 
3896 static const struct dev_pm_ops ibmvnic_pm_ops = {
3897 	.resume = ibmvnic_resume
3898 };
3899 
3900 static struct vio_driver ibmvnic_driver = {
3901 	.id_table       = ibmvnic_device_table,
3902 	.probe          = ibmvnic_probe,
3903 	.remove         = ibmvnic_remove,
3904 	.get_desired_dma = ibmvnic_get_desired_dma,
3905 	.name		= ibmvnic_driver_name,
3906 	.pm		= &ibmvnic_pm_ops,
3907 };
3908 
3909 /* module functions */
3910 static int __init ibmvnic_module_init(void)
3911 {
3912 	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3913 		IBMVNIC_DRIVER_VERSION);
3914 
3915 	return vio_register_driver(&ibmvnic_driver);
3916 }
3917 
3918 static void __exit ibmvnic_module_exit(void)
3919 {
3920 	vio_unregister_driver(&ibmvnic_driver);
3921 }
3922 
3923 module_init(ibmvnic_module_init);
3924 module_exit(ibmvnic_module_exit);
3925