xref: /openbmc/linux/drivers/net/ethernet/ibm/ibmveth.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2  * IBM Power Virtual Ethernet Device Driver
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2003, 2010
19  *
20  * Authors: Dave Larson <larson1@us.ibm.com>
21  *	    Santiago Leon <santil@linux.vnet.ibm.com>
22  *	    Brian King <brking@linux.vnet.ibm.com>
23  *	    Robert Jennings <rcj@linux.vnet.ibm.com>
24  *	    Anton Blanchard <anton@au.ibm.com>
25  */
26 
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/init.h>
37 #include <linux/interrupt.h>
38 #include <linux/mm.h>
39 #include <linux/pm.h>
40 #include <linux/ethtool.h>
41 #include <linux/in.h>
42 #include <linux/ip.h>
43 #include <linux/ipv6.h>
44 #include <linux/slab.h>
45 #include <asm/hvcall.h>
46 #include <linux/atomic.h>
47 #include <asm/vio.h>
48 #include <asm/iommu.h>
49 #include <asm/firmware.h>
50 
51 #include "ibmveth.h"
52 
53 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
54 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
55 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
56 
57 static struct kobj_type ktype_veth_pool;
58 
59 
60 static const char ibmveth_driver_name[] = "ibmveth";
61 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
62 #define ibmveth_driver_version "1.04"
63 
64 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
65 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(ibmveth_driver_version);
68 
69 static unsigned int tx_copybreak __read_mostly = 128;
70 module_param(tx_copybreak, uint, 0644);
71 MODULE_PARM_DESC(tx_copybreak,
72 	"Maximum size of packet that is copied to a new buffer on transmit");
73 
74 static unsigned int rx_copybreak __read_mostly = 128;
75 module_param(rx_copybreak, uint, 0644);
76 MODULE_PARM_DESC(rx_copybreak,
77 	"Maximum size of packet that is copied to a new buffer on receive");
78 
79 static unsigned int rx_flush __read_mostly = 0;
80 module_param(rx_flush, uint, 0644);
81 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
82 
83 struct ibmveth_stat {
84 	char name[ETH_GSTRING_LEN];
85 	int offset;
86 };
87 
88 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
89 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
90 
91 struct ibmveth_stat ibmveth_stats[] = {
92 	{ "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
93 	{ "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
94 	{ "replenish_add_buff_failure",
95 			IBMVETH_STAT_OFF(replenish_add_buff_failure) },
96 	{ "replenish_add_buff_success",
97 			IBMVETH_STAT_OFF(replenish_add_buff_success) },
98 	{ "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
99 	{ "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
100 	{ "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
101 	{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
102 	{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
103 	{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
104 };
105 
106 /* simple methods of getting data from the current rxq entry */
107 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
108 {
109 	return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
110 }
111 
112 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
113 {
114 	return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
115 			IBMVETH_RXQ_TOGGLE_SHIFT;
116 }
117 
118 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
119 {
120 	return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
121 }
122 
123 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
124 {
125 	return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
126 }
127 
128 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
129 {
130 	return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
131 }
132 
133 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
134 {
135 	return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
136 }
137 
138 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
139 {
140 	return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
141 }
142 
143 /* setup the initial settings for a buffer pool */
144 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
145 				     u32 pool_index, u32 pool_size,
146 				     u32 buff_size, u32 pool_active)
147 {
148 	pool->size = pool_size;
149 	pool->index = pool_index;
150 	pool->buff_size = buff_size;
151 	pool->threshold = pool_size * 7 / 8;
152 	pool->active = pool_active;
153 }
154 
155 /* allocate and setup an buffer pool - called during open */
156 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
157 {
158 	int i;
159 
160 	pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
161 
162 	if (!pool->free_map)
163 		return -1;
164 
165 	pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
166 	if (!pool->dma_addr) {
167 		kfree(pool->free_map);
168 		pool->free_map = NULL;
169 		return -1;
170 	}
171 
172 	pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
173 
174 	if (!pool->skbuff) {
175 		kfree(pool->dma_addr);
176 		pool->dma_addr = NULL;
177 
178 		kfree(pool->free_map);
179 		pool->free_map = NULL;
180 		return -1;
181 	}
182 
183 	memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
184 
185 	for (i = 0; i < pool->size; ++i)
186 		pool->free_map[i] = i;
187 
188 	atomic_set(&pool->available, 0);
189 	pool->producer_index = 0;
190 	pool->consumer_index = 0;
191 
192 	return 0;
193 }
194 
195 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
196 {
197 	unsigned long offset;
198 
199 	for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
200 		asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
201 }
202 
203 /* replenish the buffers for a pool.  note that we don't need to
204  * skb_reserve these since they are used for incoming...
205  */
206 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
207 					  struct ibmveth_buff_pool *pool)
208 {
209 	u32 i;
210 	u32 count = pool->size - atomic_read(&pool->available);
211 	u32 buffers_added = 0;
212 	struct sk_buff *skb;
213 	unsigned int free_index, index;
214 	u64 correlator;
215 	unsigned long lpar_rc;
216 	dma_addr_t dma_addr;
217 
218 	mb();
219 
220 	for (i = 0; i < count; ++i) {
221 		union ibmveth_buf_desc desc;
222 
223 		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
224 
225 		if (!skb) {
226 			netdev_dbg(adapter->netdev,
227 				   "replenish: unable to allocate skb\n");
228 			adapter->replenish_no_mem++;
229 			break;
230 		}
231 
232 		free_index = pool->consumer_index;
233 		pool->consumer_index++;
234 		if (pool->consumer_index >= pool->size)
235 			pool->consumer_index = 0;
236 		index = pool->free_map[free_index];
237 
238 		BUG_ON(index == IBM_VETH_INVALID_MAP);
239 		BUG_ON(pool->skbuff[index] != NULL);
240 
241 		dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
242 				pool->buff_size, DMA_FROM_DEVICE);
243 
244 		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
245 			goto failure;
246 
247 		pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
248 		pool->dma_addr[index] = dma_addr;
249 		pool->skbuff[index] = skb;
250 
251 		correlator = ((u64)pool->index << 32) | index;
252 		*(u64 *)skb->data = correlator;
253 
254 		desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
255 		desc.fields.address = dma_addr;
256 
257 		if (rx_flush) {
258 			unsigned int len = min(pool->buff_size,
259 						adapter->netdev->mtu +
260 						IBMVETH_BUFF_OH);
261 			ibmveth_flush_buffer(skb->data, len);
262 		}
263 		lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
264 						   desc.desc);
265 
266 		if (lpar_rc != H_SUCCESS) {
267 			goto failure;
268 		} else {
269 			buffers_added++;
270 			adapter->replenish_add_buff_success++;
271 		}
272 	}
273 
274 	mb();
275 	atomic_add(buffers_added, &(pool->available));
276 	return;
277 
278 failure:
279 	pool->free_map[free_index] = index;
280 	pool->skbuff[index] = NULL;
281 	if (pool->consumer_index == 0)
282 		pool->consumer_index = pool->size - 1;
283 	else
284 		pool->consumer_index--;
285 	if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
286 		dma_unmap_single(&adapter->vdev->dev,
287 		                 pool->dma_addr[index], pool->buff_size,
288 		                 DMA_FROM_DEVICE);
289 	dev_kfree_skb_any(skb);
290 	adapter->replenish_add_buff_failure++;
291 
292 	mb();
293 	atomic_add(buffers_added, &(pool->available));
294 }
295 
296 /* replenish routine */
297 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
298 {
299 	int i;
300 
301 	adapter->replenish_task_cycles++;
302 
303 	for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
304 		struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
305 
306 		if (pool->active &&
307 		    (atomic_read(&pool->available) < pool->threshold))
308 			ibmveth_replenish_buffer_pool(adapter, pool);
309 	}
310 
311 	adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
312 						4096 - 8);
313 }
314 
315 /* empty and free ana buffer pool - also used to do cleanup in error paths */
316 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
317 				     struct ibmveth_buff_pool *pool)
318 {
319 	int i;
320 
321 	kfree(pool->free_map);
322 	pool->free_map = NULL;
323 
324 	if (pool->skbuff && pool->dma_addr) {
325 		for (i = 0; i < pool->size; ++i) {
326 			struct sk_buff *skb = pool->skbuff[i];
327 			if (skb) {
328 				dma_unmap_single(&adapter->vdev->dev,
329 						 pool->dma_addr[i],
330 						 pool->buff_size,
331 						 DMA_FROM_DEVICE);
332 				dev_kfree_skb_any(skb);
333 				pool->skbuff[i] = NULL;
334 			}
335 		}
336 	}
337 
338 	if (pool->dma_addr) {
339 		kfree(pool->dma_addr);
340 		pool->dma_addr = NULL;
341 	}
342 
343 	if (pool->skbuff) {
344 		kfree(pool->skbuff);
345 		pool->skbuff = NULL;
346 	}
347 }
348 
349 /* remove a buffer from a pool */
350 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
351 					    u64 correlator)
352 {
353 	unsigned int pool  = correlator >> 32;
354 	unsigned int index = correlator & 0xffffffffUL;
355 	unsigned int free_index;
356 	struct sk_buff *skb;
357 
358 	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
359 	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
360 
361 	skb = adapter->rx_buff_pool[pool].skbuff[index];
362 
363 	BUG_ON(skb == NULL);
364 
365 	adapter->rx_buff_pool[pool].skbuff[index] = NULL;
366 
367 	dma_unmap_single(&adapter->vdev->dev,
368 			 adapter->rx_buff_pool[pool].dma_addr[index],
369 			 adapter->rx_buff_pool[pool].buff_size,
370 			 DMA_FROM_DEVICE);
371 
372 	free_index = adapter->rx_buff_pool[pool].producer_index;
373 	adapter->rx_buff_pool[pool].producer_index++;
374 	if (adapter->rx_buff_pool[pool].producer_index >=
375 	    adapter->rx_buff_pool[pool].size)
376 		adapter->rx_buff_pool[pool].producer_index = 0;
377 	adapter->rx_buff_pool[pool].free_map[free_index] = index;
378 
379 	mb();
380 
381 	atomic_dec(&(adapter->rx_buff_pool[pool].available));
382 }
383 
384 /* get the current buffer on the rx queue */
385 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
386 {
387 	u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
388 	unsigned int pool = correlator >> 32;
389 	unsigned int index = correlator & 0xffffffffUL;
390 
391 	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
392 	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
393 
394 	return adapter->rx_buff_pool[pool].skbuff[index];
395 }
396 
397 /* recycle the current buffer on the rx queue */
398 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
399 {
400 	u32 q_index = adapter->rx_queue.index;
401 	u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
402 	unsigned int pool = correlator >> 32;
403 	unsigned int index = correlator & 0xffffffffUL;
404 	union ibmveth_buf_desc desc;
405 	unsigned long lpar_rc;
406 	int ret = 1;
407 
408 	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
409 	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
410 
411 	if (!adapter->rx_buff_pool[pool].active) {
412 		ibmveth_rxq_harvest_buffer(adapter);
413 		ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
414 		goto out;
415 	}
416 
417 	desc.fields.flags_len = IBMVETH_BUF_VALID |
418 		adapter->rx_buff_pool[pool].buff_size;
419 	desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
420 
421 	lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
422 
423 	if (lpar_rc != H_SUCCESS) {
424 		netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
425 			   "during recycle rc=%ld", lpar_rc);
426 		ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
427 		ret = 0;
428 	}
429 
430 	if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
431 		adapter->rx_queue.index = 0;
432 		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
433 	}
434 
435 out:
436 	return ret;
437 }
438 
439 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
440 {
441 	ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
442 
443 	if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
444 		adapter->rx_queue.index = 0;
445 		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
446 	}
447 }
448 
449 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
450 {
451 	int i;
452 	struct device *dev = &adapter->vdev->dev;
453 
454 	if (adapter->buffer_list_addr != NULL) {
455 		if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
456 			dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
457 					DMA_BIDIRECTIONAL);
458 			adapter->buffer_list_dma = DMA_ERROR_CODE;
459 		}
460 		free_page((unsigned long)adapter->buffer_list_addr);
461 		adapter->buffer_list_addr = NULL;
462 	}
463 
464 	if (adapter->filter_list_addr != NULL) {
465 		if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
466 			dma_unmap_single(dev, adapter->filter_list_dma, 4096,
467 					DMA_BIDIRECTIONAL);
468 			adapter->filter_list_dma = DMA_ERROR_CODE;
469 		}
470 		free_page((unsigned long)adapter->filter_list_addr);
471 		adapter->filter_list_addr = NULL;
472 	}
473 
474 	if (adapter->rx_queue.queue_addr != NULL) {
475 		dma_free_coherent(dev, adapter->rx_queue.queue_len,
476 				  adapter->rx_queue.queue_addr,
477 				  adapter->rx_queue.queue_dma);
478 		adapter->rx_queue.queue_addr = NULL;
479 	}
480 
481 	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
482 		if (adapter->rx_buff_pool[i].active)
483 			ibmveth_free_buffer_pool(adapter,
484 						 &adapter->rx_buff_pool[i]);
485 
486 	if (adapter->bounce_buffer != NULL) {
487 		if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
488 			dma_unmap_single(&adapter->vdev->dev,
489 					adapter->bounce_buffer_dma,
490 					adapter->netdev->mtu + IBMVETH_BUFF_OH,
491 					DMA_BIDIRECTIONAL);
492 			adapter->bounce_buffer_dma = DMA_ERROR_CODE;
493 		}
494 		kfree(adapter->bounce_buffer);
495 		adapter->bounce_buffer = NULL;
496 	}
497 }
498 
499 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
500         union ibmveth_buf_desc rxq_desc, u64 mac_address)
501 {
502 	int rc, try_again = 1;
503 
504 	/*
505 	 * After a kexec the adapter will still be open, so our attempt to
506 	 * open it will fail. So if we get a failure we free the adapter and
507 	 * try again, but only once.
508 	 */
509 retry:
510 	rc = h_register_logical_lan(adapter->vdev->unit_address,
511 				    adapter->buffer_list_dma, rxq_desc.desc,
512 				    adapter->filter_list_dma, mac_address);
513 
514 	if (rc != H_SUCCESS && try_again) {
515 		do {
516 			rc = h_free_logical_lan(adapter->vdev->unit_address);
517 		} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
518 
519 		try_again = 0;
520 		goto retry;
521 	}
522 
523 	return rc;
524 }
525 
526 static int ibmveth_open(struct net_device *netdev)
527 {
528 	struct ibmveth_adapter *adapter = netdev_priv(netdev);
529 	u64 mac_address = 0;
530 	int rxq_entries = 1;
531 	unsigned long lpar_rc;
532 	int rc;
533 	union ibmveth_buf_desc rxq_desc;
534 	int i;
535 	struct device *dev;
536 
537 	netdev_dbg(netdev, "open starting\n");
538 
539 	napi_enable(&adapter->napi);
540 
541 	for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
542 		rxq_entries += adapter->rx_buff_pool[i].size;
543 
544 	adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
545 	adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
546 
547 	if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
548 		netdev_err(netdev, "unable to allocate filter or buffer list "
549 			   "pages\n");
550 		rc = -ENOMEM;
551 		goto err_out;
552 	}
553 
554 	dev = &adapter->vdev->dev;
555 
556 	adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
557 						rxq_entries;
558 	adapter->rx_queue.queue_addr =
559 	    dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
560 			       &adapter->rx_queue.queue_dma, GFP_KERNEL);
561 
562 	if (!adapter->rx_queue.queue_addr) {
563 		netdev_err(netdev, "unable to allocate rx queue pages\n");
564 		rc = -ENOMEM;
565 		goto err_out;
566 	}
567 
568 	adapter->buffer_list_dma = dma_map_single(dev,
569 			adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
570 	adapter->filter_list_dma = dma_map_single(dev,
571 			adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
572 
573 	if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
574 	    (dma_mapping_error(dev, adapter->filter_list_dma))) {
575 		netdev_err(netdev, "unable to map filter or buffer list "
576 			   "pages\n");
577 		rc = -ENOMEM;
578 		goto err_out;
579 	}
580 
581 	adapter->rx_queue.index = 0;
582 	adapter->rx_queue.num_slots = rxq_entries;
583 	adapter->rx_queue.toggle = 1;
584 
585 	memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
586 	mac_address = mac_address >> 16;
587 
588 	rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
589 					adapter->rx_queue.queue_len;
590 	rxq_desc.fields.address = adapter->rx_queue.queue_dma;
591 
592 	netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
593 	netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
594 	netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
595 
596 	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
597 
598 	lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
599 
600 	if (lpar_rc != H_SUCCESS) {
601 		netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
602 			   lpar_rc);
603 		netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
604 			   "desc:0x%llx MAC:0x%llx\n",
605 				     adapter->buffer_list_dma,
606 				     adapter->filter_list_dma,
607 				     rxq_desc.desc,
608 				     mac_address);
609 		rc = -ENONET;
610 		goto err_out;
611 	}
612 
613 	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
614 		if (!adapter->rx_buff_pool[i].active)
615 			continue;
616 		if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
617 			netdev_err(netdev, "unable to alloc pool\n");
618 			adapter->rx_buff_pool[i].active = 0;
619 			rc = -ENOMEM;
620 			goto err_out;
621 		}
622 	}
623 
624 	netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
625 	rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
626 			 netdev);
627 	if (rc != 0) {
628 		netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
629 			   netdev->irq, rc);
630 		do {
631 			lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
632 		} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
633 
634 		goto err_out;
635 	}
636 
637 	adapter->bounce_buffer =
638 	    kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
639 	if (!adapter->bounce_buffer) {
640 		rc = -ENOMEM;
641 		goto err_out_free_irq;
642 	}
643 	adapter->bounce_buffer_dma =
644 	    dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
645 			   netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
646 	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
647 		netdev_err(netdev, "unable to map bounce buffer\n");
648 		rc = -ENOMEM;
649 		goto err_out_free_irq;
650 	}
651 
652 	netdev_dbg(netdev, "initial replenish cycle\n");
653 	ibmveth_interrupt(netdev->irq, netdev);
654 
655 	netif_start_queue(netdev);
656 
657 	netdev_dbg(netdev, "open complete\n");
658 
659 	return 0;
660 
661 err_out_free_irq:
662 	free_irq(netdev->irq, netdev);
663 err_out:
664 	ibmveth_cleanup(adapter);
665 	napi_disable(&adapter->napi);
666 	return rc;
667 }
668 
669 static int ibmveth_close(struct net_device *netdev)
670 {
671 	struct ibmveth_adapter *adapter = netdev_priv(netdev);
672 	long lpar_rc;
673 
674 	netdev_dbg(netdev, "close starting\n");
675 
676 	napi_disable(&adapter->napi);
677 
678 	if (!adapter->pool_config)
679 		netif_stop_queue(netdev);
680 
681 	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
682 
683 	do {
684 		lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
685 	} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
686 
687 	if (lpar_rc != H_SUCCESS) {
688 		netdev_err(netdev, "h_free_logical_lan failed with %lx, "
689 			   "continuing with close\n", lpar_rc);
690 	}
691 
692 	free_irq(netdev->irq, netdev);
693 
694 	adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
695 						4096 - 8);
696 
697 	ibmveth_cleanup(adapter);
698 
699 	netdev_dbg(netdev, "close complete\n");
700 
701 	return 0;
702 }
703 
704 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
705 {
706 	cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
707 				SUPPORTED_FIBRE);
708 	cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
709 				ADVERTISED_FIBRE);
710 	ethtool_cmd_speed_set(cmd, SPEED_1000);
711 	cmd->duplex = DUPLEX_FULL;
712 	cmd->port = PORT_FIBRE;
713 	cmd->phy_address = 0;
714 	cmd->transceiver = XCVR_INTERNAL;
715 	cmd->autoneg = AUTONEG_ENABLE;
716 	cmd->maxtxpkt = 0;
717 	cmd->maxrxpkt = 1;
718 	return 0;
719 }
720 
721 static void netdev_get_drvinfo(struct net_device *dev,
722 			       struct ethtool_drvinfo *info)
723 {
724 	strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
725 	strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
726 }
727 
728 static netdev_features_t ibmveth_fix_features(struct net_device *dev,
729 	netdev_features_t features)
730 {
731 	/*
732 	 * Since the ibmveth firmware interface does not have the
733 	 * concept of separate tx/rx checksum offload enable, if rx
734 	 * checksum is disabled we also have to disable tx checksum
735 	 * offload. Once we disable rx checksum offload, we are no
736 	 * longer allowed to send tx buffers that are not properly
737 	 * checksummed.
738 	 */
739 
740 	if (!(features & NETIF_F_RXCSUM))
741 		features &= ~NETIF_F_ALL_CSUM;
742 
743 	return features;
744 }
745 
746 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
747 {
748 	struct ibmveth_adapter *adapter = netdev_priv(dev);
749 	unsigned long set_attr, clr_attr, ret_attr;
750 	unsigned long set_attr6, clr_attr6;
751 	long ret, ret4, ret6;
752 	int rc1 = 0, rc2 = 0;
753 	int restart = 0;
754 
755 	if (netif_running(dev)) {
756 		restart = 1;
757 		adapter->pool_config = 1;
758 		ibmveth_close(dev);
759 		adapter->pool_config = 0;
760 	}
761 
762 	set_attr = 0;
763 	clr_attr = 0;
764 	set_attr6 = 0;
765 	clr_attr6 = 0;
766 
767 	if (data) {
768 		set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
769 		set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
770 	} else {
771 		clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
772 		clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
773 	}
774 
775 	ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
776 
777 	if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
778 	    !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
779 	    (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
780 		ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
781 					 set_attr, &ret_attr);
782 
783 		if (ret4 != H_SUCCESS) {
784 			netdev_err(dev, "unable to change IPv4 checksum "
785 					"offload settings. %d rc=%ld\n",
786 					data, ret4);
787 
788 			h_illan_attributes(adapter->vdev->unit_address,
789 					   set_attr, clr_attr, &ret_attr);
790 
791 			if (data == 1)
792 				dev->features &= ~NETIF_F_IP_CSUM;
793 
794 		} else {
795 			adapter->fw_ipv4_csum_support = data;
796 		}
797 
798 		ret6 = h_illan_attributes(adapter->vdev->unit_address,
799 					 clr_attr6, set_attr6, &ret_attr);
800 
801 		if (ret6 != H_SUCCESS) {
802 			netdev_err(dev, "unable to change IPv6 checksum "
803 					"offload settings. %d rc=%ld\n",
804 					data, ret6);
805 
806 			h_illan_attributes(adapter->vdev->unit_address,
807 					   set_attr6, clr_attr6, &ret_attr);
808 
809 			if (data == 1)
810 				dev->features &= ~NETIF_F_IPV6_CSUM;
811 
812 		} else
813 			adapter->fw_ipv6_csum_support = data;
814 
815 		if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
816 			adapter->rx_csum = data;
817 		else
818 			rc1 = -EIO;
819 	} else {
820 		rc1 = -EIO;
821 		netdev_err(dev, "unable to change checksum offload settings."
822 				     " %d rc=%ld ret_attr=%lx\n", data, ret,
823 				     ret_attr);
824 	}
825 
826 	if (restart)
827 		rc2 = ibmveth_open(dev);
828 
829 	return rc1 ? rc1 : rc2;
830 }
831 
832 static int ibmveth_set_features(struct net_device *dev,
833 	netdev_features_t features)
834 {
835 	struct ibmveth_adapter *adapter = netdev_priv(dev);
836 	int rx_csum = !!(features & NETIF_F_RXCSUM);
837 	int rc;
838 
839 	if (rx_csum == adapter->rx_csum)
840 		return 0;
841 
842 	rc = ibmveth_set_csum_offload(dev, rx_csum);
843 	if (rc && !adapter->rx_csum)
844 		dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
845 
846 	return rc;
847 }
848 
849 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
850 {
851 	int i;
852 
853 	if (stringset != ETH_SS_STATS)
854 		return;
855 
856 	for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
857 		memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
858 }
859 
860 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
861 {
862 	switch (sset) {
863 	case ETH_SS_STATS:
864 		return ARRAY_SIZE(ibmveth_stats);
865 	default:
866 		return -EOPNOTSUPP;
867 	}
868 }
869 
870 static void ibmveth_get_ethtool_stats(struct net_device *dev,
871 				      struct ethtool_stats *stats, u64 *data)
872 {
873 	int i;
874 	struct ibmveth_adapter *adapter = netdev_priv(dev);
875 
876 	for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
877 		data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
878 }
879 
880 static const struct ethtool_ops netdev_ethtool_ops = {
881 	.get_drvinfo		= netdev_get_drvinfo,
882 	.get_settings		= netdev_get_settings,
883 	.get_link		= ethtool_op_get_link,
884 	.get_strings		= ibmveth_get_strings,
885 	.get_sset_count		= ibmveth_get_sset_count,
886 	.get_ethtool_stats	= ibmveth_get_ethtool_stats,
887 };
888 
889 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
890 {
891 	return -EOPNOTSUPP;
892 }
893 
894 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
895 
896 static int ibmveth_send(struct ibmveth_adapter *adapter,
897 			union ibmveth_buf_desc *descs)
898 {
899 	unsigned long correlator;
900 	unsigned int retry_count;
901 	unsigned long ret;
902 
903 	/*
904 	 * The retry count sets a maximum for the number of broadcast and
905 	 * multicast destinations within the system.
906 	 */
907 	retry_count = 1024;
908 	correlator = 0;
909 	do {
910 		ret = h_send_logical_lan(adapter->vdev->unit_address,
911 					     descs[0].desc, descs[1].desc,
912 					     descs[2].desc, descs[3].desc,
913 					     descs[4].desc, descs[5].desc,
914 					     correlator, &correlator);
915 	} while ((ret == H_BUSY) && (retry_count--));
916 
917 	if (ret != H_SUCCESS && ret != H_DROPPED) {
918 		netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
919 			   "with rc=%ld\n", ret);
920 		return 1;
921 	}
922 
923 	return 0;
924 }
925 
926 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
927 				      struct net_device *netdev)
928 {
929 	struct ibmveth_adapter *adapter = netdev_priv(netdev);
930 	unsigned int desc_flags;
931 	union ibmveth_buf_desc descs[6];
932 	int last, i;
933 	int force_bounce = 0;
934 	dma_addr_t dma_addr;
935 
936 	/*
937 	 * veth handles a maximum of 6 segments including the header, so
938 	 * we have to linearize the skb if there are more than this.
939 	 */
940 	if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
941 		netdev->stats.tx_dropped++;
942 		goto out;
943 	}
944 
945 	/* veth can't checksum offload UDP */
946 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
947 	    ((skb->protocol == htons(ETH_P_IP) &&
948 	      ip_hdr(skb)->protocol != IPPROTO_TCP) ||
949 	     (skb->protocol == htons(ETH_P_IPV6) &&
950 	      ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
951 	    skb_checksum_help(skb)) {
952 
953 		netdev_err(netdev, "tx: failed to checksum packet\n");
954 		netdev->stats.tx_dropped++;
955 		goto out;
956 	}
957 
958 	desc_flags = IBMVETH_BUF_VALID;
959 
960 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
961 		unsigned char *buf = skb_transport_header(skb) +
962 						skb->csum_offset;
963 
964 		desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
965 
966 		/* Need to zero out the checksum */
967 		buf[0] = 0;
968 		buf[1] = 0;
969 	}
970 
971 retry_bounce:
972 	memset(descs, 0, sizeof(descs));
973 
974 	/*
975 	 * If a linear packet is below the rx threshold then
976 	 * copy it into the static bounce buffer. This avoids the
977 	 * cost of a TCE insert and remove.
978 	 */
979 	if (force_bounce || (!skb_is_nonlinear(skb) &&
980 				(skb->len < tx_copybreak))) {
981 		skb_copy_from_linear_data(skb, adapter->bounce_buffer,
982 					  skb->len);
983 
984 		descs[0].fields.flags_len = desc_flags | skb->len;
985 		descs[0].fields.address = adapter->bounce_buffer_dma;
986 
987 		if (ibmveth_send(adapter, descs)) {
988 			adapter->tx_send_failed++;
989 			netdev->stats.tx_dropped++;
990 		} else {
991 			netdev->stats.tx_packets++;
992 			netdev->stats.tx_bytes += skb->len;
993 		}
994 
995 		goto out;
996 	}
997 
998 	/* Map the header */
999 	dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1000 				  skb_headlen(skb), DMA_TO_DEVICE);
1001 	if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1002 		goto map_failed;
1003 
1004 	descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1005 	descs[0].fields.address = dma_addr;
1006 
1007 	/* Map the frags */
1008 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1009 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1010 
1011 		dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1012 					    skb_frag_size(frag), DMA_TO_DEVICE);
1013 
1014 		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1015 			goto map_failed_frags;
1016 
1017 		descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1018 		descs[i+1].fields.address = dma_addr;
1019 	}
1020 
1021 	if (ibmveth_send(adapter, descs)) {
1022 		adapter->tx_send_failed++;
1023 		netdev->stats.tx_dropped++;
1024 	} else {
1025 		netdev->stats.tx_packets++;
1026 		netdev->stats.tx_bytes += skb->len;
1027 	}
1028 
1029 	dma_unmap_single(&adapter->vdev->dev,
1030 			 descs[0].fields.address,
1031 			 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1032 			 DMA_TO_DEVICE);
1033 
1034 	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1035 		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1036 			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1037 			       DMA_TO_DEVICE);
1038 
1039 out:
1040 	dev_kfree_skb(skb);
1041 	return NETDEV_TX_OK;
1042 
1043 map_failed_frags:
1044 	last = i+1;
1045 	for (i = 0; i < last; i++)
1046 		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1047 			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1048 			       DMA_TO_DEVICE);
1049 
1050 map_failed:
1051 	if (!firmware_has_feature(FW_FEATURE_CMO))
1052 		netdev_err(netdev, "tx: unable to map xmit buffer\n");
1053 	adapter->tx_map_failed++;
1054 	skb_linearize(skb);
1055 	force_bounce = 1;
1056 	goto retry_bounce;
1057 }
1058 
1059 static int ibmveth_poll(struct napi_struct *napi, int budget)
1060 {
1061 	struct ibmveth_adapter *adapter =
1062 			container_of(napi, struct ibmveth_adapter, napi);
1063 	struct net_device *netdev = adapter->netdev;
1064 	int frames_processed = 0;
1065 	unsigned long lpar_rc;
1066 
1067 restart_poll:
1068 	do {
1069 		if (!ibmveth_rxq_pending_buffer(adapter))
1070 			break;
1071 
1072 		smp_rmb();
1073 		if (!ibmveth_rxq_buffer_valid(adapter)) {
1074 			wmb(); /* suggested by larson1 */
1075 			adapter->rx_invalid_buffer++;
1076 			netdev_dbg(netdev, "recycling invalid buffer\n");
1077 			ibmveth_rxq_recycle_buffer(adapter);
1078 		} else {
1079 			struct sk_buff *skb, *new_skb;
1080 			int length = ibmveth_rxq_frame_length(adapter);
1081 			int offset = ibmveth_rxq_frame_offset(adapter);
1082 			int csum_good = ibmveth_rxq_csum_good(adapter);
1083 
1084 			skb = ibmveth_rxq_get_buffer(adapter);
1085 
1086 			new_skb = NULL;
1087 			if (length < rx_copybreak)
1088 				new_skb = netdev_alloc_skb(netdev, length);
1089 
1090 			if (new_skb) {
1091 				skb_copy_to_linear_data(new_skb,
1092 							skb->data + offset,
1093 							length);
1094 				if (rx_flush)
1095 					ibmveth_flush_buffer(skb->data,
1096 						length + offset);
1097 				if (!ibmveth_rxq_recycle_buffer(adapter))
1098 					kfree_skb(skb);
1099 				skb = new_skb;
1100 			} else {
1101 				ibmveth_rxq_harvest_buffer(adapter);
1102 				skb_reserve(skb, offset);
1103 			}
1104 
1105 			skb_put(skb, length);
1106 			skb->protocol = eth_type_trans(skb, netdev);
1107 
1108 			if (csum_good)
1109 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1110 
1111 			netif_receive_skb(skb);	/* send it up */
1112 
1113 			netdev->stats.rx_packets++;
1114 			netdev->stats.rx_bytes += length;
1115 			frames_processed++;
1116 		}
1117 	} while (frames_processed < budget);
1118 
1119 	ibmveth_replenish_task(adapter);
1120 
1121 	if (frames_processed < budget) {
1122 		/* We think we are done - reenable interrupts,
1123 		 * then check once more to make sure we are done.
1124 		 */
1125 		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1126 				       VIO_IRQ_ENABLE);
1127 
1128 		BUG_ON(lpar_rc != H_SUCCESS);
1129 
1130 		napi_complete(napi);
1131 
1132 		if (ibmveth_rxq_pending_buffer(adapter) &&
1133 		    napi_reschedule(napi)) {
1134 			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1135 					       VIO_IRQ_DISABLE);
1136 			goto restart_poll;
1137 		}
1138 	}
1139 
1140 	return frames_processed;
1141 }
1142 
1143 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1144 {
1145 	struct net_device *netdev = dev_instance;
1146 	struct ibmveth_adapter *adapter = netdev_priv(netdev);
1147 	unsigned long lpar_rc;
1148 
1149 	if (napi_schedule_prep(&adapter->napi)) {
1150 		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1151 				       VIO_IRQ_DISABLE);
1152 		BUG_ON(lpar_rc != H_SUCCESS);
1153 		__napi_schedule(&adapter->napi);
1154 	}
1155 	return IRQ_HANDLED;
1156 }
1157 
1158 static void ibmveth_set_multicast_list(struct net_device *netdev)
1159 {
1160 	struct ibmveth_adapter *adapter = netdev_priv(netdev);
1161 	unsigned long lpar_rc;
1162 
1163 	if ((netdev->flags & IFF_PROMISC) ||
1164 	    (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1165 		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1166 					   IbmVethMcastEnableRecv |
1167 					   IbmVethMcastDisableFiltering,
1168 					   0);
1169 		if (lpar_rc != H_SUCCESS) {
1170 			netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1171 				   "entering promisc mode\n", lpar_rc);
1172 		}
1173 	} else {
1174 		struct netdev_hw_addr *ha;
1175 		/* clear the filter table & disable filtering */
1176 		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1177 					   IbmVethMcastEnableRecv |
1178 					   IbmVethMcastDisableFiltering |
1179 					   IbmVethMcastClearFilterTable,
1180 					   0);
1181 		if (lpar_rc != H_SUCCESS) {
1182 			netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1183 				   "attempting to clear filter table\n",
1184 				   lpar_rc);
1185 		}
1186 		/* add the addresses to the filter table */
1187 		netdev_for_each_mc_addr(ha, netdev) {
1188 			/* add the multicast address to the filter table */
1189 			unsigned long mcast_addr = 0;
1190 			memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1191 			lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1192 						   IbmVethMcastAddFilter,
1193 						   mcast_addr);
1194 			if (lpar_rc != H_SUCCESS) {
1195 				netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1196 					   "when adding an entry to the filter "
1197 					   "table\n", lpar_rc);
1198 			}
1199 		}
1200 
1201 		/* re-enable filtering */
1202 		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1203 					   IbmVethMcastEnableFiltering,
1204 					   0);
1205 		if (lpar_rc != H_SUCCESS) {
1206 			netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1207 				   "enabling filtering\n", lpar_rc);
1208 		}
1209 	}
1210 }
1211 
1212 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1213 {
1214 	struct ibmveth_adapter *adapter = netdev_priv(dev);
1215 	struct vio_dev *viodev = adapter->vdev;
1216 	int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1217 	int i, rc;
1218 	int need_restart = 0;
1219 
1220 	if (new_mtu < IBMVETH_MIN_MTU)
1221 		return -EINVAL;
1222 
1223 	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1224 		if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1225 			break;
1226 
1227 	if (i == IBMVETH_NUM_BUFF_POOLS)
1228 		return -EINVAL;
1229 
1230 	/* Deactivate all the buffer pools so that the next loop can activate
1231 	   only the buffer pools necessary to hold the new MTU */
1232 	if (netif_running(adapter->netdev)) {
1233 		need_restart = 1;
1234 		adapter->pool_config = 1;
1235 		ibmveth_close(adapter->netdev);
1236 		adapter->pool_config = 0;
1237 	}
1238 
1239 	/* Look for an active buffer pool that can hold the new MTU */
1240 	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1241 		adapter->rx_buff_pool[i].active = 1;
1242 
1243 		if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1244 			dev->mtu = new_mtu;
1245 			vio_cmo_set_dev_desired(viodev,
1246 						ibmveth_get_desired_dma
1247 						(viodev));
1248 			if (need_restart) {
1249 				return ibmveth_open(adapter->netdev);
1250 			}
1251 			return 0;
1252 		}
1253 	}
1254 
1255 	if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1256 		return rc;
1257 
1258 	return -EINVAL;
1259 }
1260 
1261 #ifdef CONFIG_NET_POLL_CONTROLLER
1262 static void ibmveth_poll_controller(struct net_device *dev)
1263 {
1264 	ibmveth_replenish_task(netdev_priv(dev));
1265 	ibmveth_interrupt(dev->irq, dev);
1266 }
1267 #endif
1268 
1269 /**
1270  * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1271  *
1272  * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1273  *
1274  * Return value:
1275  *	Number of bytes of IO data the driver will need to perform well.
1276  */
1277 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1278 {
1279 	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1280 	struct ibmveth_adapter *adapter;
1281 	unsigned long ret;
1282 	int i;
1283 	int rxqentries = 1;
1284 
1285 	/* netdev inits at probe time along with the structures we need below*/
1286 	if (netdev == NULL)
1287 		return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1288 
1289 	adapter = netdev_priv(netdev);
1290 
1291 	ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1292 	ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1293 
1294 	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1295 		/* add the size of the active receive buffers */
1296 		if (adapter->rx_buff_pool[i].active)
1297 			ret +=
1298 			    adapter->rx_buff_pool[i].size *
1299 			    IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1300 			            buff_size);
1301 		rxqentries += adapter->rx_buff_pool[i].size;
1302 	}
1303 	/* add the size of the receive queue entries */
1304 	ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1305 
1306 	return ret;
1307 }
1308 
1309 static const struct net_device_ops ibmveth_netdev_ops = {
1310 	.ndo_open		= ibmveth_open,
1311 	.ndo_stop		= ibmveth_close,
1312 	.ndo_start_xmit		= ibmveth_start_xmit,
1313 	.ndo_set_rx_mode	= ibmveth_set_multicast_list,
1314 	.ndo_do_ioctl		= ibmveth_ioctl,
1315 	.ndo_change_mtu		= ibmveth_change_mtu,
1316 	.ndo_fix_features	= ibmveth_fix_features,
1317 	.ndo_set_features	= ibmveth_set_features,
1318 	.ndo_validate_addr	= eth_validate_addr,
1319 	.ndo_set_mac_address	= eth_mac_addr,
1320 #ifdef CONFIG_NET_POLL_CONTROLLER
1321 	.ndo_poll_controller	= ibmveth_poll_controller,
1322 #endif
1323 };
1324 
1325 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1326 {
1327 	int rc, i;
1328 	struct net_device *netdev;
1329 	struct ibmveth_adapter *adapter;
1330 	unsigned char *mac_addr_p;
1331 	unsigned int *mcastFilterSize_p;
1332 
1333 	dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1334 		dev->unit_address);
1335 
1336 	mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1337 							NULL);
1338 	if (!mac_addr_p) {
1339 		dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1340 		return -EINVAL;
1341 	}
1342 
1343 	mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1344 						VETH_MCAST_FILTER_SIZE, NULL);
1345 	if (!mcastFilterSize_p) {
1346 		dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1347 			"attribute\n");
1348 		return -EINVAL;
1349 	}
1350 
1351 	netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1352 
1353 	if (!netdev)
1354 		return -ENOMEM;
1355 
1356 	adapter = netdev_priv(netdev);
1357 	dev_set_drvdata(&dev->dev, netdev);
1358 
1359 	adapter->vdev = dev;
1360 	adapter->netdev = netdev;
1361 	adapter->mcastFilterSize = *mcastFilterSize_p;
1362 	adapter->pool_config = 0;
1363 
1364 	netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1365 
1366 	/*
1367 	 * Some older boxes running PHYP non-natively have an OF that returns
1368 	 * a 8-byte local-mac-address field (and the first 2 bytes have to be
1369 	 * ignored) while newer boxes' OF return a 6-byte field. Note that
1370 	 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1371 	 * The RPA doc specifies that the first byte must be 10b, so we'll
1372 	 * just look for it to solve this 8 vs. 6 byte field issue
1373 	 */
1374 	if ((*mac_addr_p & 0x3) != 0x02)
1375 		mac_addr_p += 2;
1376 
1377 	adapter->mac_addr = 0;
1378 	memcpy(&adapter->mac_addr, mac_addr_p, 6);
1379 
1380 	netdev->irq = dev->irq;
1381 	netdev->netdev_ops = &ibmveth_netdev_ops;
1382 	netdev->ethtool_ops = &netdev_ethtool_ops;
1383 	SET_NETDEV_DEV(netdev, &dev->dev);
1384 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1385 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1386 	netdev->features |= netdev->hw_features;
1387 
1388 	memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1389 
1390 	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1391 		struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1392 		int error;
1393 
1394 		ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1395 					 pool_count[i], pool_size[i],
1396 					 pool_active[i]);
1397 		error = kobject_init_and_add(kobj, &ktype_veth_pool,
1398 					     &dev->dev.kobj, "pool%d", i);
1399 		if (!error)
1400 			kobject_uevent(kobj, KOBJ_ADD);
1401 	}
1402 
1403 	netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1404 
1405 	adapter->buffer_list_dma = DMA_ERROR_CODE;
1406 	adapter->filter_list_dma = DMA_ERROR_CODE;
1407 	adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1408 
1409 	netdev_dbg(netdev, "registering netdev...\n");
1410 
1411 	ibmveth_set_features(netdev, netdev->features);
1412 
1413 	rc = register_netdev(netdev);
1414 
1415 	if (rc) {
1416 		netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1417 		free_netdev(netdev);
1418 		return rc;
1419 	}
1420 
1421 	netdev_dbg(netdev, "registered\n");
1422 
1423 	return 0;
1424 }
1425 
1426 static int ibmveth_remove(struct vio_dev *dev)
1427 {
1428 	struct net_device *netdev = dev_get_drvdata(&dev->dev);
1429 	struct ibmveth_adapter *adapter = netdev_priv(netdev);
1430 	int i;
1431 
1432 	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1433 		kobject_put(&adapter->rx_buff_pool[i].kobj);
1434 
1435 	unregister_netdev(netdev);
1436 
1437 	free_netdev(netdev);
1438 	dev_set_drvdata(&dev->dev, NULL);
1439 
1440 	return 0;
1441 }
1442 
1443 static struct attribute veth_active_attr;
1444 static struct attribute veth_num_attr;
1445 static struct attribute veth_size_attr;
1446 
1447 static ssize_t veth_pool_show(struct kobject *kobj,
1448 			      struct attribute *attr, char *buf)
1449 {
1450 	struct ibmveth_buff_pool *pool = container_of(kobj,
1451 						      struct ibmveth_buff_pool,
1452 						      kobj);
1453 
1454 	if (attr == &veth_active_attr)
1455 		return sprintf(buf, "%d\n", pool->active);
1456 	else if (attr == &veth_num_attr)
1457 		return sprintf(buf, "%d\n", pool->size);
1458 	else if (attr == &veth_size_attr)
1459 		return sprintf(buf, "%d\n", pool->buff_size);
1460 	return 0;
1461 }
1462 
1463 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1464 			       const char *buf, size_t count)
1465 {
1466 	struct ibmveth_buff_pool *pool = container_of(kobj,
1467 						      struct ibmveth_buff_pool,
1468 						      kobj);
1469 	struct net_device *netdev = dev_get_drvdata(
1470 	    container_of(kobj->parent, struct device, kobj));
1471 	struct ibmveth_adapter *adapter = netdev_priv(netdev);
1472 	long value = simple_strtol(buf, NULL, 10);
1473 	long rc;
1474 
1475 	if (attr == &veth_active_attr) {
1476 		if (value && !pool->active) {
1477 			if (netif_running(netdev)) {
1478 				if (ibmveth_alloc_buffer_pool(pool)) {
1479 					netdev_err(netdev,
1480 						   "unable to alloc pool\n");
1481 					return -ENOMEM;
1482 				}
1483 				pool->active = 1;
1484 				adapter->pool_config = 1;
1485 				ibmveth_close(netdev);
1486 				adapter->pool_config = 0;
1487 				if ((rc = ibmveth_open(netdev)))
1488 					return rc;
1489 			} else {
1490 				pool->active = 1;
1491 			}
1492 		} else if (!value && pool->active) {
1493 			int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1494 			int i;
1495 			/* Make sure there is a buffer pool with buffers that
1496 			   can hold a packet of the size of the MTU */
1497 			for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1498 				if (pool == &adapter->rx_buff_pool[i])
1499 					continue;
1500 				if (!adapter->rx_buff_pool[i].active)
1501 					continue;
1502 				if (mtu <= adapter->rx_buff_pool[i].buff_size)
1503 					break;
1504 			}
1505 
1506 			if (i == IBMVETH_NUM_BUFF_POOLS) {
1507 				netdev_err(netdev, "no active pool >= MTU\n");
1508 				return -EPERM;
1509 			}
1510 
1511 			if (netif_running(netdev)) {
1512 				adapter->pool_config = 1;
1513 				ibmveth_close(netdev);
1514 				pool->active = 0;
1515 				adapter->pool_config = 0;
1516 				if ((rc = ibmveth_open(netdev)))
1517 					return rc;
1518 			}
1519 			pool->active = 0;
1520 		}
1521 	} else if (attr == &veth_num_attr) {
1522 		if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1523 			return -EINVAL;
1524 		} else {
1525 			if (netif_running(netdev)) {
1526 				adapter->pool_config = 1;
1527 				ibmveth_close(netdev);
1528 				adapter->pool_config = 0;
1529 				pool->size = value;
1530 				if ((rc = ibmveth_open(netdev)))
1531 					return rc;
1532 			} else {
1533 				pool->size = value;
1534 			}
1535 		}
1536 	} else if (attr == &veth_size_attr) {
1537 		if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1538 			return -EINVAL;
1539 		} else {
1540 			if (netif_running(netdev)) {
1541 				adapter->pool_config = 1;
1542 				ibmveth_close(netdev);
1543 				adapter->pool_config = 0;
1544 				pool->buff_size = value;
1545 				if ((rc = ibmveth_open(netdev)))
1546 					return rc;
1547 			} else {
1548 				pool->buff_size = value;
1549 			}
1550 		}
1551 	}
1552 
1553 	/* kick the interrupt handler to allocate/deallocate pools */
1554 	ibmveth_interrupt(netdev->irq, netdev);
1555 	return count;
1556 }
1557 
1558 
1559 #define ATTR(_name, _mode)				\
1560 	struct attribute veth_##_name##_attr = {	\
1561 	.name = __stringify(_name), .mode = _mode,	\
1562 	};
1563 
1564 static ATTR(active, 0644);
1565 static ATTR(num, 0644);
1566 static ATTR(size, 0644);
1567 
1568 static struct attribute *veth_pool_attrs[] = {
1569 	&veth_active_attr,
1570 	&veth_num_attr,
1571 	&veth_size_attr,
1572 	NULL,
1573 };
1574 
1575 static const struct sysfs_ops veth_pool_ops = {
1576 	.show   = veth_pool_show,
1577 	.store  = veth_pool_store,
1578 };
1579 
1580 static struct kobj_type ktype_veth_pool = {
1581 	.release        = NULL,
1582 	.sysfs_ops      = &veth_pool_ops,
1583 	.default_attrs  = veth_pool_attrs,
1584 };
1585 
1586 static int ibmveth_resume(struct device *dev)
1587 {
1588 	struct net_device *netdev = dev_get_drvdata(dev);
1589 	ibmveth_interrupt(netdev->irq, netdev);
1590 	return 0;
1591 }
1592 
1593 static struct vio_device_id ibmveth_device_table[] = {
1594 	{ "network", "IBM,l-lan"},
1595 	{ "", "" }
1596 };
1597 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1598 
1599 static struct dev_pm_ops ibmveth_pm_ops = {
1600 	.resume = ibmveth_resume
1601 };
1602 
1603 static struct vio_driver ibmveth_driver = {
1604 	.id_table	= ibmveth_device_table,
1605 	.probe		= ibmveth_probe,
1606 	.remove		= ibmveth_remove,
1607 	.get_desired_dma = ibmveth_get_desired_dma,
1608 	.name		= ibmveth_driver_name,
1609 	.pm		= &ibmveth_pm_ops,
1610 };
1611 
1612 static int __init ibmveth_module_init(void)
1613 {
1614 	printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1615 	       ibmveth_driver_string, ibmveth_driver_version);
1616 
1617 	return vio_register_driver(&ibmveth_driver);
1618 }
1619 
1620 static void __exit ibmveth_module_exit(void)
1621 {
1622 	vio_unregister_driver(&ibmveth_driver);
1623 }
1624 
1625 module_init(ibmveth_module_init);
1626 module_exit(ibmveth_module_exit);
1627