xref: /openbmc/linux/drivers/net/ethernet/sun/sunvnet.c (revision 8e845f4c)
1 /* sunvnet.c: Sun LDOM Virtual Network Driver.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/etherdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/if_vlan.h>
19 
20 #include <asm/vio.h>
21 #include <asm/ldc.h>
22 
23 #include "sunvnet.h"
24 
25 #define DRV_MODULE_NAME		"sunvnet"
26 #define DRV_MODULE_VERSION	"1.0"
27 #define DRV_MODULE_RELDATE	"June 25, 2007"
28 
29 static char version[] =
30 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
31 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
32 MODULE_DESCRIPTION("Sun LDOM virtual network driver");
33 MODULE_LICENSE("GPL");
34 MODULE_VERSION(DRV_MODULE_VERSION);
35 
36 /* Heuristic for the number of times to exponentially backoff and
37  * retry sending an LDC trigger when EAGAIN is encountered
38  */
39 #define	VNET_MAX_RETRIES	10
40 
41 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
42 
43 /* Ordered from largest major to lowest */
44 static struct vio_version vnet_versions[] = {
45 	{ .major = 1, .minor = 6 },
46 	{ .major = 1, .minor = 0 },
47 };
48 
49 static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
50 {
51 	return vio_dring_avail(dr, VNET_TX_RING_SIZE);
52 }
53 
54 static int vnet_handle_unknown(struct vnet_port *port, void *arg)
55 {
56 	struct vio_msg_tag *pkt = arg;
57 
58 	pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
59 	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
60 	pr_err("Resetting connection\n");
61 
62 	ldc_disconnect(port->vio.lp);
63 
64 	return -ECONNRESET;
65 }
66 
67 static int vnet_send_attr(struct vio_driver_state *vio)
68 {
69 	struct vnet_port *port = to_vnet_port(vio);
70 	struct net_device *dev = port->vp->dev;
71 	struct vio_net_attr_info pkt;
72 	int framelen = ETH_FRAME_LEN;
73 	int i;
74 
75 	memset(&pkt, 0, sizeof(pkt));
76 	pkt.tag.type = VIO_TYPE_CTRL;
77 	pkt.tag.stype = VIO_SUBTYPE_INFO;
78 	pkt.tag.stype_env = VIO_ATTR_INFO;
79 	pkt.tag.sid = vio_send_sid(vio);
80 	if (vio_version_before(vio, 1, 2))
81 		pkt.xfer_mode = VIO_DRING_MODE;
82 	else
83 		pkt.xfer_mode = VIO_NEW_DRING_MODE;
84 	pkt.addr_type = VNET_ADDR_ETHERMAC;
85 	pkt.ack_freq = 0;
86 	for (i = 0; i < 6; i++)
87 		pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
88 	if (vio_version_after(vio, 1, 3)) {
89 		if (port->rmtu) {
90 			port->rmtu = min(VNET_MAXPACKET, port->rmtu);
91 			pkt.mtu = port->rmtu;
92 		} else {
93 			port->rmtu = VNET_MAXPACKET;
94 			pkt.mtu = port->rmtu;
95 		}
96 		if (vio_version_after_eq(vio, 1, 6))
97 			pkt.options = VIO_TX_DRING;
98 	} else if (vio_version_before(vio, 1, 3)) {
99 		pkt.mtu = framelen;
100 	} else { /* v1.3 */
101 		pkt.mtu = framelen + VLAN_HLEN;
102 	}
103 
104 	pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
105 	pkt.cflags = 0;
106 
107 	viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
108 	       "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
109 	       "cflags[0x%04x] lso_max[%u]\n",
110 	       pkt.xfer_mode, pkt.addr_type,
111 	       (unsigned long long)pkt.addr,
112 	       pkt.ack_freq, pkt.plnk_updt, pkt.options,
113 	       (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
114 
115 
116 	return vio_ldc_send(vio, &pkt, sizeof(pkt));
117 }
118 
119 static int handle_attr_info(struct vio_driver_state *vio,
120 			    struct vio_net_attr_info *pkt)
121 {
122 	struct vnet_port *port = to_vnet_port(vio);
123 	u64	localmtu;
124 	u8	xfer_mode;
125 
126 	viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
127 	       "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
128 	       " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
129 	       pkt->xfer_mode, pkt->addr_type,
130 	       (unsigned long long)pkt->addr,
131 	       pkt->ack_freq, pkt->plnk_updt, pkt->options,
132 	       (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
133 	       pkt->ipv4_lso_maxlen);
134 
135 	pkt->tag.sid = vio_send_sid(vio);
136 
137 	xfer_mode = pkt->xfer_mode;
138 	/* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
139 	if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
140 		xfer_mode = VIO_NEW_DRING_MODE;
141 
142 	/* MTU negotiation:
143 	 *	< v1.3 - ETH_FRAME_LEN exactly
144 	 *	> v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
145 	 *			pkt->mtu for ACK
146 	 *	= v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
147 	 */
148 	if (vio_version_before(vio, 1, 3)) {
149 		localmtu = ETH_FRAME_LEN;
150 	} else if (vio_version_after(vio, 1, 3)) {
151 		localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
152 		localmtu = min(pkt->mtu, localmtu);
153 		pkt->mtu = localmtu;
154 	} else { /* v1.3 */
155 		localmtu = ETH_FRAME_LEN + VLAN_HLEN;
156 	}
157 	port->rmtu = localmtu;
158 
159 	/* for version >= 1.6, ACK packet mode we support */
160 	if (vio_version_after_eq(vio, 1, 6)) {
161 		pkt->xfer_mode = VIO_NEW_DRING_MODE;
162 		pkt->options = VIO_TX_DRING;
163 	}
164 
165 	if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
166 	    pkt->addr_type != VNET_ADDR_ETHERMAC ||
167 	    pkt->mtu != localmtu) {
168 		viodbg(HS, "SEND NET ATTR NACK\n");
169 
170 		pkt->tag.stype = VIO_SUBTYPE_NACK;
171 
172 		(void) vio_ldc_send(vio, pkt, sizeof(*pkt));
173 
174 		return -ECONNRESET;
175 	} else {
176 		viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
177 		       "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
178 		       "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
179 		       pkt->xfer_mode, pkt->addr_type,
180 		       (unsigned long long)pkt->addr,
181 		       pkt->ack_freq, pkt->plnk_updt, pkt->options,
182 		       (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
183 		       pkt->ipv4_lso_maxlen);
184 
185 		pkt->tag.stype = VIO_SUBTYPE_ACK;
186 
187 		return vio_ldc_send(vio, pkt, sizeof(*pkt));
188 	}
189 
190 }
191 
192 static int handle_attr_ack(struct vio_driver_state *vio,
193 			   struct vio_net_attr_info *pkt)
194 {
195 	viodbg(HS, "GOT NET ATTR ACK\n");
196 
197 	return 0;
198 }
199 
200 static int handle_attr_nack(struct vio_driver_state *vio,
201 			    struct vio_net_attr_info *pkt)
202 {
203 	viodbg(HS, "GOT NET ATTR NACK\n");
204 
205 	return -ECONNRESET;
206 }
207 
208 static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
209 {
210 	struct vio_net_attr_info *pkt = arg;
211 
212 	switch (pkt->tag.stype) {
213 	case VIO_SUBTYPE_INFO:
214 		return handle_attr_info(vio, pkt);
215 
216 	case VIO_SUBTYPE_ACK:
217 		return handle_attr_ack(vio, pkt);
218 
219 	case VIO_SUBTYPE_NACK:
220 		return handle_attr_nack(vio, pkt);
221 
222 	default:
223 		return -ECONNRESET;
224 	}
225 }
226 
227 static void vnet_handshake_complete(struct vio_driver_state *vio)
228 {
229 	struct vio_dring_state *dr;
230 
231 	dr = &vio->drings[VIO_DRIVER_RX_RING];
232 	dr->snd_nxt = dr->rcv_nxt = 1;
233 
234 	dr = &vio->drings[VIO_DRIVER_TX_RING];
235 	dr->snd_nxt = dr->rcv_nxt = 1;
236 }
237 
238 /* The hypervisor interface that implements copying to/from imported
239  * memory from another domain requires that copies are done to 8-byte
240  * aligned buffers, and that the lengths of such copies are also 8-byte
241  * multiples.
242  *
243  * So we align skb->data to an 8-byte multiple and pad-out the data
244  * area so we can round the copy length up to the next multiple of
245  * 8 for the copy.
246  *
247  * The transmitter puts the actual start of the packet 6 bytes into
248  * the buffer it sends over, so that the IP headers after the ethernet
249  * header are aligned properly.  These 6 bytes are not in the descriptor
250  * length, they are simply implied.  This offset is represented using
251  * the VNET_PACKET_SKIP macro.
252  */
253 static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
254 					   unsigned int len)
255 {
256 	struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
257 	unsigned long addr, off;
258 
259 	if (unlikely(!skb))
260 		return NULL;
261 
262 	addr = (unsigned long) skb->data;
263 	off = ((addr + 7UL) & ~7UL) - addr;
264 	if (off)
265 		skb_reserve(skb, off);
266 
267 	return skb;
268 }
269 
270 static int vnet_rx_one(struct vnet_port *port, unsigned int len,
271 		       struct ldc_trans_cookie *cookies, int ncookies)
272 {
273 	struct net_device *dev = port->vp->dev;
274 	unsigned int copy_len;
275 	struct sk_buff *skb;
276 	int err;
277 
278 	err = -EMSGSIZE;
279 	if (unlikely(len < ETH_ZLEN || len > port->rmtu)) {
280 		dev->stats.rx_length_errors++;
281 		goto out_dropped;
282 	}
283 
284 	skb = alloc_and_align_skb(dev, len);
285 	err = -ENOMEM;
286 	if (unlikely(!skb)) {
287 		dev->stats.rx_missed_errors++;
288 		goto out_dropped;
289 	}
290 
291 	copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
292 	skb_put(skb, copy_len);
293 	err = ldc_copy(port->vio.lp, LDC_COPY_IN,
294 		       skb->data, copy_len, 0,
295 		       cookies, ncookies);
296 	if (unlikely(err < 0)) {
297 		dev->stats.rx_frame_errors++;
298 		goto out_free_skb;
299 	}
300 
301 	skb_pull(skb, VNET_PACKET_SKIP);
302 	skb_trim(skb, len);
303 	skb->protocol = eth_type_trans(skb, dev);
304 
305 	dev->stats.rx_packets++;
306 	dev->stats.rx_bytes += len;
307 
308 	netif_rx(skb);
309 
310 	return 0;
311 
312 out_free_skb:
313 	kfree_skb(skb);
314 
315 out_dropped:
316 	dev->stats.rx_dropped++;
317 	return err;
318 }
319 
320 static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
321 			 u32 start, u32 end, u8 vio_dring_state)
322 {
323 	struct vio_dring_data hdr = {
324 		.tag = {
325 			.type		= VIO_TYPE_DATA,
326 			.stype		= VIO_SUBTYPE_ACK,
327 			.stype_env	= VIO_DRING_DATA,
328 			.sid		= vio_send_sid(&port->vio),
329 		},
330 		.dring_ident		= dr->ident,
331 		.start_idx		= start,
332 		.end_idx		= end,
333 		.state			= vio_dring_state,
334 	};
335 	int err, delay;
336 	int retries = 0;
337 
338 	hdr.seq = dr->snd_nxt;
339 	delay = 1;
340 	do {
341 		err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
342 		if (err > 0) {
343 			dr->snd_nxt++;
344 			break;
345 		}
346 		udelay(delay);
347 		if ((delay <<= 1) > 128)
348 			delay = 128;
349 		if (retries++ > VNET_MAX_RETRIES) {
350 			pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
351 				port->raddr[0], port->raddr[1],
352 				port->raddr[2], port->raddr[3],
353 				port->raddr[4], port->raddr[5]);
354 			break;
355 		}
356 	} while (err == -EAGAIN);
357 
358 	if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
359 		port->stop_rx_idx = end;
360 		port->stop_rx = true;
361 	} else {
362 		port->stop_rx_idx = 0;
363 		port->stop_rx = false;
364 	}
365 
366 	return err;
367 }
368 
369 static u32 next_idx(u32 idx, struct vio_dring_state *dr)
370 {
371 	if (++idx == dr->num_entries)
372 		idx = 0;
373 	return idx;
374 }
375 
376 static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
377 {
378 	if (idx == 0)
379 		idx = dr->num_entries - 1;
380 	else
381 		idx--;
382 
383 	return idx;
384 }
385 
386 static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
387 					struct vio_dring_state *dr,
388 					u32 index)
389 {
390 	struct vio_net_desc *desc = port->vio.desc_buf;
391 	int err;
392 
393 	err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
394 				  (index * dr->entry_size),
395 				  dr->cookies, dr->ncookies);
396 	if (err < 0)
397 		return ERR_PTR(err);
398 
399 	return desc;
400 }
401 
402 static int put_rx_desc(struct vnet_port *port,
403 		       struct vio_dring_state *dr,
404 		       struct vio_net_desc *desc,
405 		       u32 index)
406 {
407 	int err;
408 
409 	err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
410 				  (index * dr->entry_size),
411 				  dr->cookies, dr->ncookies);
412 	if (err < 0)
413 		return err;
414 
415 	return 0;
416 }
417 
418 static int vnet_walk_rx_one(struct vnet_port *port,
419 			    struct vio_dring_state *dr,
420 			    u32 index, int *needs_ack)
421 {
422 	struct vio_net_desc *desc = get_rx_desc(port, dr, index);
423 	struct vio_driver_state *vio = &port->vio;
424 	int err;
425 
426 	if (IS_ERR(desc))
427 		return PTR_ERR(desc);
428 
429 	if (desc->hdr.state != VIO_DESC_READY)
430 		return 1;
431 
432 	rmb();
433 
434 	viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
435 	       desc->hdr.state, desc->hdr.ack,
436 	       desc->size, desc->ncookies,
437 	       desc->cookies[0].cookie_addr,
438 	       desc->cookies[0].cookie_size);
439 
440 	err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
441 	if (err == -ECONNRESET)
442 		return err;
443 	desc->hdr.state = VIO_DESC_DONE;
444 	err = put_rx_desc(port, dr, desc, index);
445 	if (err < 0)
446 		return err;
447 	*needs_ack = desc->hdr.ack;
448 	return 0;
449 }
450 
451 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
452 			u32 start, u32 end)
453 {
454 	struct vio_driver_state *vio = &port->vio;
455 	int ack_start = -1, ack_end = -1;
456 
457 	end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
458 
459 	viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
460 
461 	while (start != end) {
462 		int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
463 		if (err == -ECONNRESET)
464 			return err;
465 		if (err != 0)
466 			break;
467 		if (ack_start == -1)
468 			ack_start = start;
469 		ack_end = start;
470 		start = next_idx(start, dr);
471 		if (ack && start != end) {
472 			err = vnet_send_ack(port, dr, ack_start, ack_end,
473 					    VIO_DRING_ACTIVE);
474 			if (err == -ECONNRESET)
475 				return err;
476 			ack_start = -1;
477 		}
478 	}
479 	if (unlikely(ack_start == -1))
480 		ack_start = ack_end = prev_idx(start, dr);
481 	return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
482 }
483 
484 static int vnet_rx(struct vnet_port *port, void *msgbuf)
485 {
486 	struct vio_dring_data *pkt = msgbuf;
487 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
488 	struct vio_driver_state *vio = &port->vio;
489 
490 	viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
491 	       pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
492 
493 	if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
494 		return 0;
495 	if (unlikely(pkt->seq != dr->rcv_nxt)) {
496 		pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
497 		       pkt->seq, dr->rcv_nxt);
498 		return 0;
499 	}
500 
501 	dr->rcv_nxt++;
502 
503 	/* XXX Validate pkt->start_idx and pkt->end_idx XXX */
504 
505 	return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
506 }
507 
508 static int idx_is_pending(struct vio_dring_state *dr, u32 end)
509 {
510 	u32 idx = dr->cons;
511 	int found = 0;
512 
513 	while (idx != dr->prod) {
514 		if (idx == end) {
515 			found = 1;
516 			break;
517 		}
518 		idx = next_idx(idx, dr);
519 	}
520 	return found;
521 }
522 
523 static int vnet_ack(struct vnet_port *port, void *msgbuf)
524 {
525 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
526 	struct vio_dring_data *pkt = msgbuf;
527 	struct net_device *dev;
528 	struct vnet *vp;
529 	u32 end;
530 	struct vio_net_desc *desc;
531 	if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
532 		return 0;
533 
534 	end = pkt->end_idx;
535 	if (unlikely(!idx_is_pending(dr, end)))
536 		return 0;
537 
538 	/* sync for race conditions with vnet_start_xmit() and tell xmit it
539 	 * is time to send a trigger.
540 	 */
541 	dr->cons = next_idx(end, dr);
542 	desc = vio_dring_entry(dr, dr->cons);
543 	if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
544 		/* vnet_start_xmit() just populated this dring but missed
545 		 * sending the "start" LDC message to the consumer.
546 		 * Send a "start" trigger on its behalf.
547 		 */
548 		if (__vnet_tx_trigger(port, dr->cons) > 0)
549 			port->start_cons = false;
550 		else
551 			port->start_cons = true;
552 	} else {
553 		port->start_cons = true;
554 	}
555 
556 
557 	vp = port->vp;
558 	dev = vp->dev;
559 	if (unlikely(netif_queue_stopped(dev) &&
560 		     vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
561 		return 1;
562 
563 	return 0;
564 }
565 
566 static int vnet_nack(struct vnet_port *port, void *msgbuf)
567 {
568 	/* XXX just reset or similar XXX */
569 	return 0;
570 }
571 
572 static int handle_mcast(struct vnet_port *port, void *msgbuf)
573 {
574 	struct vio_net_mcast_info *pkt = msgbuf;
575 
576 	if (pkt->tag.stype != VIO_SUBTYPE_ACK)
577 		pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
578 		       port->vp->dev->name,
579 		       pkt->tag.type,
580 		       pkt->tag.stype,
581 		       pkt->tag.stype_env,
582 		       pkt->tag.sid);
583 
584 	return 0;
585 }
586 
587 static void maybe_tx_wakeup(unsigned long param)
588 {
589 	struct vnet *vp = (struct vnet *)param;
590 	struct net_device *dev = vp->dev;
591 
592 	netif_tx_lock(dev);
593 	if (likely(netif_queue_stopped(dev))) {
594 		struct vnet_port *port;
595 		int wake = 1;
596 
597 		list_for_each_entry(port, &vp->port_list, list) {
598 			struct vio_dring_state *dr;
599 
600 			dr = &port->vio.drings[VIO_DRIVER_TX_RING];
601 			if (vnet_tx_dring_avail(dr) <
602 			    VNET_TX_WAKEUP_THRESH(dr)) {
603 				wake = 0;
604 				break;
605 			}
606 		}
607 		if (wake)
608 			netif_wake_queue(dev);
609 	}
610 	netif_tx_unlock(dev);
611 }
612 
613 static void vnet_event(void *arg, int event)
614 {
615 	struct vnet_port *port = arg;
616 	struct vio_driver_state *vio = &port->vio;
617 	unsigned long flags;
618 	int tx_wakeup, err;
619 
620 	spin_lock_irqsave(&vio->lock, flags);
621 
622 	if (unlikely(event == LDC_EVENT_RESET ||
623 		     event == LDC_EVENT_UP)) {
624 		vio_link_state_change(vio, event);
625 		spin_unlock_irqrestore(&vio->lock, flags);
626 
627 		if (event == LDC_EVENT_RESET) {
628 			port->rmtu = 0;
629 			vio_port_up(vio);
630 		}
631 		return;
632 	}
633 
634 	if (unlikely(event != LDC_EVENT_DATA_READY)) {
635 		pr_warn("Unexpected LDC event %d\n", event);
636 		spin_unlock_irqrestore(&vio->lock, flags);
637 		return;
638 	}
639 
640 	tx_wakeup = err = 0;
641 	while (1) {
642 		union {
643 			struct vio_msg_tag tag;
644 			u64 raw[8];
645 		} msgbuf;
646 
647 		err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
648 		if (unlikely(err < 0)) {
649 			if (err == -ECONNRESET)
650 				vio_conn_reset(vio);
651 			break;
652 		}
653 		if (err == 0)
654 			break;
655 		viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
656 		       msgbuf.tag.type,
657 		       msgbuf.tag.stype,
658 		       msgbuf.tag.stype_env,
659 		       msgbuf.tag.sid);
660 		err = vio_validate_sid(vio, &msgbuf.tag);
661 		if (err < 0)
662 			break;
663 
664 		if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
665 			if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
666 				err = vnet_rx(port, &msgbuf);
667 			} else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
668 				err = vnet_ack(port, &msgbuf);
669 				if (err > 0)
670 					tx_wakeup |= err;
671 			} else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
672 				err = vnet_nack(port, &msgbuf);
673 			}
674 		} else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
675 			if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
676 				err = handle_mcast(port, &msgbuf);
677 			else
678 				err = vio_control_pkt_engine(vio, &msgbuf);
679 			if (err)
680 				break;
681 		} else {
682 			err = vnet_handle_unknown(port, &msgbuf);
683 		}
684 		if (err == -ECONNRESET)
685 			break;
686 	}
687 	spin_unlock(&vio->lock);
688 	/* Kick off a tasklet to wake the queue.  We cannot call
689 	 * maybe_tx_wakeup directly here because we could deadlock on
690 	 * netif_tx_lock() with dev_watchdog()
691 	 */
692 	if (unlikely(tx_wakeup && err != -ECONNRESET))
693 		tasklet_schedule(&port->vp->vnet_tx_wakeup);
694 
695 	local_irq_restore(flags);
696 }
697 
698 static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
699 {
700 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
701 	struct vio_dring_data hdr = {
702 		.tag = {
703 			.type		= VIO_TYPE_DATA,
704 			.stype		= VIO_SUBTYPE_INFO,
705 			.stype_env	= VIO_DRING_DATA,
706 			.sid		= vio_send_sid(&port->vio),
707 		},
708 		.dring_ident		= dr->ident,
709 		.start_idx		= start,
710 		.end_idx		= (u32) -1,
711 	};
712 	int err, delay;
713 	int retries = 0;
714 
715 	if (port->stop_rx) {
716 		err = vnet_send_ack(port,
717 				    &port->vio.drings[VIO_DRIVER_RX_RING],
718 				    port->stop_rx_idx, -1,
719 				    VIO_DRING_STOPPED);
720 		if (err <= 0)
721 			return err;
722 	}
723 
724 	hdr.seq = dr->snd_nxt;
725 	delay = 1;
726 	do {
727 		err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
728 		if (err > 0) {
729 			dr->snd_nxt++;
730 			break;
731 		}
732 		udelay(delay);
733 		if ((delay <<= 1) > 128)
734 			delay = 128;
735 		if (retries++ > VNET_MAX_RETRIES)
736 			break;
737 	} while (err == -EAGAIN);
738 
739 	return err;
740 }
741 
742 static inline bool port_is_up(struct vnet_port *vnet)
743 {
744 	struct vio_driver_state *vio = &vnet->vio;
745 
746 	return !!(vio->hs_state & VIO_HS_COMPLETE);
747 }
748 
749 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
750 {
751 	unsigned int hash = vnet_hashfn(skb->data);
752 	struct hlist_head *hp = &vp->port_hash[hash];
753 	struct vnet_port *port;
754 
755 	hlist_for_each_entry(port, hp, hash) {
756 		if (!port_is_up(port))
757 			continue;
758 		if (ether_addr_equal(port->raddr, skb->data))
759 			return port;
760 	}
761 	list_for_each_entry(port, &vp->port_list, list) {
762 		if (!port->switch_port)
763 			continue;
764 		if (!port_is_up(port))
765 			continue;
766 		return port;
767 	}
768 	return NULL;
769 }
770 
771 struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
772 {
773 	struct vnet_port *ret;
774 	unsigned long flags;
775 
776 	spin_lock_irqsave(&vp->lock, flags);
777 	ret = __tx_port_find(vp, skb);
778 	spin_unlock_irqrestore(&vp->lock, flags);
779 
780 	return ret;
781 }
782 
783 static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
784 					  unsigned *pending)
785 {
786 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
787 	struct sk_buff *skb = NULL;
788 	int i, txi;
789 
790 	*pending = 0;
791 
792 	txi = dr->prod-1;
793 	if (txi < 0)
794 		txi = VNET_TX_RING_SIZE-1;
795 
796 	for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
797 		struct vio_net_desc *d;
798 
799 		d = vio_dring_entry(dr, txi);
800 
801 		if (d->hdr.state == VIO_DESC_DONE) {
802 			if (port->tx_bufs[txi].skb) {
803 				BUG_ON(port->tx_bufs[txi].skb->next);
804 
805 				port->tx_bufs[txi].skb->next = skb;
806 				skb = port->tx_bufs[txi].skb;
807 				port->tx_bufs[txi].skb = NULL;
808 
809 				ldc_unmap(port->vio.lp,
810 					  port->tx_bufs[txi].cookies,
811 					  port->tx_bufs[txi].ncookies);
812 			}
813 			d->hdr.state = VIO_DESC_FREE;
814 		} else if (d->hdr.state == VIO_DESC_READY) {
815 			(*pending)++;
816 		} else if (d->hdr.state == VIO_DESC_FREE) {
817 			break;
818 		}
819 		--txi;
820 		if (txi < 0)
821 			txi = VNET_TX_RING_SIZE-1;
822 	}
823 	return skb;
824 }
825 
826 static inline void vnet_free_skbs(struct sk_buff *skb)
827 {
828 	struct sk_buff *next;
829 
830 	while (skb) {
831 		next = skb->next;
832 		skb->next = NULL;
833 		dev_kfree_skb(skb);
834 		skb = next;
835 	}
836 }
837 
838 static void vnet_clean_timer_expire(unsigned long port0)
839 {
840 	struct vnet_port *port = (struct vnet_port *)port0;
841 	struct sk_buff *freeskbs;
842 	unsigned pending;
843 	unsigned long flags;
844 
845 	spin_lock_irqsave(&port->vio.lock, flags);
846 	freeskbs = vnet_clean_tx_ring(port, &pending);
847 	spin_unlock_irqrestore(&port->vio.lock, flags);
848 
849 	vnet_free_skbs(freeskbs);
850 
851 	if (pending)
852 		(void)mod_timer(&port->clean_timer,
853 				jiffies + VNET_CLEAN_TIMEOUT);
854 	 else
855 		del_timer(&port->clean_timer);
856 }
857 
858 static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
859 					     int *plen)
860 {
861 	struct sk_buff *nskb;
862 	int len, pad;
863 
864 	len = skb->len;
865 	pad = 0;
866 	if (len < ETH_ZLEN) {
867 		pad += ETH_ZLEN - skb->len;
868 		len += pad;
869 	}
870 	len += VNET_PACKET_SKIP;
871 	pad += 8 - (len & 7);
872 	len += 8 - (len & 7);
873 
874 	if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
875 	    skb_tailroom(skb) < pad ||
876 	    skb_headroom(skb) < VNET_PACKET_SKIP) {
877 		nskb = alloc_and_align_skb(skb->dev, skb->len);
878 		skb_reserve(nskb, VNET_PACKET_SKIP);
879 		if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
880 			dev_kfree_skb(nskb);
881 			dev_kfree_skb(skb);
882 			return NULL;
883 		}
884 		(void)skb_put(nskb, skb->len);
885 		dev_kfree_skb(skb);
886 		skb = nskb;
887 	}
888 
889 	*pstart = skb->data - VNET_PACKET_SKIP;
890 	*plen = len;
891 	return skb;
892 }
893 
894 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
895 {
896 	struct vnet *vp = netdev_priv(dev);
897 	struct vnet_port *port = tx_port_find(vp, skb);
898 	struct vio_dring_state *dr;
899 	struct vio_net_desc *d;
900 	unsigned long flags;
901 	unsigned int len;
902 	struct sk_buff *freeskbs = NULL;
903 	int i, err, txi;
904 	void *start = NULL;
905 	int nlen = 0;
906 	unsigned pending = 0;
907 
908 	if (unlikely(!port))
909 		goto out_dropped;
910 
911 	skb = vnet_skb_shape(skb, &start, &nlen);
912 
913 	if (unlikely(!skb))
914 		goto out_dropped;
915 
916 	spin_lock_irqsave(&port->vio.lock, flags);
917 
918 	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
919 	if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
920 		if (!netif_queue_stopped(dev)) {
921 			netif_stop_queue(dev);
922 
923 			/* This is a hard error, log it. */
924 			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
925 			dev->stats.tx_errors++;
926 		}
927 		spin_unlock_irqrestore(&port->vio.lock, flags);
928 		return NETDEV_TX_BUSY;
929 	}
930 
931 	d = vio_dring_cur(dr);
932 
933 	txi = dr->prod;
934 
935 	freeskbs = vnet_clean_tx_ring(port, &pending);
936 
937 	BUG_ON(port->tx_bufs[txi].skb);
938 
939 	len = skb->len;
940 	if (len < ETH_ZLEN)
941 		len = ETH_ZLEN;
942 
943 	port->tx_bufs[txi].skb = skb;
944 	skb = NULL;
945 
946 	err = ldc_map_single(port->vio.lp, start, nlen,
947 			     port->tx_bufs[txi].cookies, 2,
948 			     (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
949 	if (err < 0) {
950 		netdev_info(dev, "tx buffer map error %d\n", err);
951 		goto out_dropped_unlock;
952 	}
953 	port->tx_bufs[txi].ncookies = err;
954 
955 	/* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
956 	 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
957 	 * the protocol itself does not require it as long as the peer
958 	 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
959 	 *
960 	 * An ACK for every packet in the ring is expensive as the
961 	 * sending of LDC messages is slow and affects performance.
962 	 */
963 	d->hdr.ack = VIO_ACK_DISABLE;
964 	d->size = len;
965 	d->ncookies = port->tx_bufs[txi].ncookies;
966 	for (i = 0; i < d->ncookies; i++)
967 		d->cookies[i] = port->tx_bufs[txi].cookies[i];
968 
969 	/* This has to be a non-SMP write barrier because we are writing
970 	 * to memory which is shared with the peer LDOM.
971 	 */
972 	wmb();
973 
974 	d->hdr.state = VIO_DESC_READY;
975 
976 	/* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
977 	 * to notify the consumer that some descriptors are READY.
978 	 * After that "start" trigger, no additional triggers are needed until
979 	 * a DRING_STOPPED is received from the consumer. The dr->cons field
980 	 * (set up by vnet_ack()) has the value of the next dring index
981 	 * that has not yet been ack-ed. We send a "start" trigger here
982 	 * if, and only if, start_cons is true (reset it afterward). Conversely,
983 	 * vnet_ack() should check if the dring corresponding to cons
984 	 * is marked READY, but start_cons was false.
985 	 * If so, vnet_ack() should send out the missed "start" trigger.
986 	 *
987 	 * Note that the wmb() above makes sure the cookies et al. are
988 	 * not globally visible before the VIO_DESC_READY, and that the
989 	 * stores are ordered correctly by the compiler. The consumer will
990 	 * not proceed until the VIO_DESC_READY is visible assuring that
991 	 * the consumer does not observe anything related to descriptors
992 	 * out of order. The HV trap from the LDC start trigger is the
993 	 * producer to consumer announcement that work is available to the
994 	 * consumer
995 	 */
996 	if (!port->start_cons)
997 		goto ldc_start_done; /* previous trigger suffices */
998 
999 	err = __vnet_tx_trigger(port, dr->cons);
1000 	if (unlikely(err < 0)) {
1001 		netdev_info(dev, "TX trigger error %d\n", err);
1002 		d->hdr.state = VIO_DESC_FREE;
1003 		dev->stats.tx_carrier_errors++;
1004 		goto out_dropped_unlock;
1005 	}
1006 
1007 ldc_start_done:
1008 	port->start_cons = false;
1009 
1010 	dev->stats.tx_packets++;
1011 	dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1012 
1013 	dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1014 	if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
1015 		netif_stop_queue(dev);
1016 		if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1017 			netif_wake_queue(dev);
1018 	}
1019 
1020 	spin_unlock_irqrestore(&port->vio.lock, flags);
1021 
1022 	vnet_free_skbs(freeskbs);
1023 
1024 	(void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1025 
1026 	return NETDEV_TX_OK;
1027 
1028 out_dropped_unlock:
1029 	spin_unlock_irqrestore(&port->vio.lock, flags);
1030 
1031 out_dropped:
1032 	if (skb)
1033 		dev_kfree_skb(skb);
1034 	vnet_free_skbs(freeskbs);
1035 	if (pending)
1036 		(void)mod_timer(&port->clean_timer,
1037 				jiffies + VNET_CLEAN_TIMEOUT);
1038 	else
1039 		del_timer(&port->clean_timer);
1040 	dev->stats.tx_dropped++;
1041 	return NETDEV_TX_OK;
1042 }
1043 
1044 static void vnet_tx_timeout(struct net_device *dev)
1045 {
1046 	/* XXX Implement me XXX */
1047 }
1048 
1049 static int vnet_open(struct net_device *dev)
1050 {
1051 	netif_carrier_on(dev);
1052 	netif_start_queue(dev);
1053 
1054 	return 0;
1055 }
1056 
1057 static int vnet_close(struct net_device *dev)
1058 {
1059 	netif_stop_queue(dev);
1060 	netif_carrier_off(dev);
1061 
1062 	return 0;
1063 }
1064 
1065 static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1066 {
1067 	struct vnet_mcast_entry *m;
1068 
1069 	for (m = vp->mcast_list; m; m = m->next) {
1070 		if (ether_addr_equal(m->addr, addr))
1071 			return m;
1072 	}
1073 	return NULL;
1074 }
1075 
1076 static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1077 {
1078 	struct netdev_hw_addr *ha;
1079 
1080 	netdev_for_each_mc_addr(ha, dev) {
1081 		struct vnet_mcast_entry *m;
1082 
1083 		m = __vnet_mc_find(vp, ha->addr);
1084 		if (m) {
1085 			m->hit = 1;
1086 			continue;
1087 		}
1088 
1089 		if (!m) {
1090 			m = kzalloc(sizeof(*m), GFP_ATOMIC);
1091 			if (!m)
1092 				continue;
1093 			memcpy(m->addr, ha->addr, ETH_ALEN);
1094 			m->hit = 1;
1095 
1096 			m->next = vp->mcast_list;
1097 			vp->mcast_list = m;
1098 		}
1099 	}
1100 }
1101 
1102 static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1103 {
1104 	struct vio_net_mcast_info info;
1105 	struct vnet_mcast_entry *m, **pp;
1106 	int n_addrs;
1107 
1108 	memset(&info, 0, sizeof(info));
1109 
1110 	info.tag.type = VIO_TYPE_CTRL;
1111 	info.tag.stype = VIO_SUBTYPE_INFO;
1112 	info.tag.stype_env = VNET_MCAST_INFO;
1113 	info.tag.sid = vio_send_sid(&port->vio);
1114 	info.set = 1;
1115 
1116 	n_addrs = 0;
1117 	for (m = vp->mcast_list; m; m = m->next) {
1118 		if (m->sent)
1119 			continue;
1120 		m->sent = 1;
1121 		memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1122 		       m->addr, ETH_ALEN);
1123 		if (++n_addrs == VNET_NUM_MCAST) {
1124 			info.count = n_addrs;
1125 
1126 			(void) vio_ldc_send(&port->vio, &info,
1127 					    sizeof(info));
1128 			n_addrs = 0;
1129 		}
1130 	}
1131 	if (n_addrs) {
1132 		info.count = n_addrs;
1133 		(void) vio_ldc_send(&port->vio, &info, sizeof(info));
1134 	}
1135 
1136 	info.set = 0;
1137 
1138 	n_addrs = 0;
1139 	pp = &vp->mcast_list;
1140 	while ((m = *pp) != NULL) {
1141 		if (m->hit) {
1142 			m->hit = 0;
1143 			pp = &m->next;
1144 			continue;
1145 		}
1146 
1147 		memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1148 		       m->addr, ETH_ALEN);
1149 		if (++n_addrs == VNET_NUM_MCAST) {
1150 			info.count = n_addrs;
1151 			(void) vio_ldc_send(&port->vio, &info,
1152 					    sizeof(info));
1153 			n_addrs = 0;
1154 		}
1155 
1156 		*pp = m->next;
1157 		kfree(m);
1158 	}
1159 	if (n_addrs) {
1160 		info.count = n_addrs;
1161 		(void) vio_ldc_send(&port->vio, &info, sizeof(info));
1162 	}
1163 }
1164 
1165 static void vnet_set_rx_mode(struct net_device *dev)
1166 {
1167 	struct vnet *vp = netdev_priv(dev);
1168 	struct vnet_port *port;
1169 	unsigned long flags;
1170 
1171 	spin_lock_irqsave(&vp->lock, flags);
1172 	if (!list_empty(&vp->port_list)) {
1173 		port = list_entry(vp->port_list.next, struct vnet_port, list);
1174 
1175 		if (port->switch_port) {
1176 			__update_mc_list(vp, dev);
1177 			__send_mc_list(vp, port);
1178 		}
1179 	}
1180 	spin_unlock_irqrestore(&vp->lock, flags);
1181 }
1182 
1183 static int vnet_change_mtu(struct net_device *dev, int new_mtu)
1184 {
1185 	if (new_mtu != ETH_DATA_LEN)
1186 		return -EINVAL;
1187 
1188 	dev->mtu = new_mtu;
1189 	return 0;
1190 }
1191 
1192 static int vnet_set_mac_addr(struct net_device *dev, void *p)
1193 {
1194 	return -EINVAL;
1195 }
1196 
1197 static void vnet_get_drvinfo(struct net_device *dev,
1198 			     struct ethtool_drvinfo *info)
1199 {
1200 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1201 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1202 }
1203 
1204 static u32 vnet_get_msglevel(struct net_device *dev)
1205 {
1206 	struct vnet *vp = netdev_priv(dev);
1207 	return vp->msg_enable;
1208 }
1209 
1210 static void vnet_set_msglevel(struct net_device *dev, u32 value)
1211 {
1212 	struct vnet *vp = netdev_priv(dev);
1213 	vp->msg_enable = value;
1214 }
1215 
1216 static const struct ethtool_ops vnet_ethtool_ops = {
1217 	.get_drvinfo		= vnet_get_drvinfo,
1218 	.get_msglevel		= vnet_get_msglevel,
1219 	.set_msglevel		= vnet_set_msglevel,
1220 	.get_link		= ethtool_op_get_link,
1221 };
1222 
1223 static void vnet_port_free_tx_bufs(struct vnet_port *port)
1224 {
1225 	struct vio_dring_state *dr;
1226 	int i;
1227 
1228 	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1229 	if (dr->base) {
1230 		ldc_free_exp_dring(port->vio.lp, dr->base,
1231 				   (dr->entry_size * dr->num_entries),
1232 				   dr->cookies, dr->ncookies);
1233 		dr->base = NULL;
1234 		dr->entry_size = 0;
1235 		dr->num_entries = 0;
1236 		dr->pending = 0;
1237 		dr->ncookies = 0;
1238 	}
1239 
1240 	for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1241 		struct vio_net_desc *d;
1242 		void *skb = port->tx_bufs[i].skb;
1243 
1244 		if (!skb)
1245 			continue;
1246 
1247 		d = vio_dring_entry(dr, i);
1248 		if (d->hdr.state == VIO_DESC_READY)
1249 			pr_warn("active transmit buffers freed\n");
1250 
1251 		ldc_unmap(port->vio.lp,
1252 			  port->tx_bufs[i].cookies,
1253 			  port->tx_bufs[i].ncookies);
1254 		dev_kfree_skb(skb);
1255 		port->tx_bufs[i].skb = NULL;
1256 		d->hdr.state = VIO_DESC_FREE;
1257 	}
1258 }
1259 
1260 static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
1261 {
1262 	struct vio_dring_state *dr;
1263 	unsigned long len;
1264 	int i, err, ncookies;
1265 	void *dring;
1266 
1267 	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1268 
1269 	len = (VNET_TX_RING_SIZE *
1270 	       (sizeof(struct vio_net_desc) +
1271 		(sizeof(struct ldc_trans_cookie) * 2)));
1272 
1273 	ncookies = VIO_MAX_RING_COOKIES;
1274 	dring = ldc_alloc_exp_dring(port->vio.lp, len,
1275 				    dr->cookies, &ncookies,
1276 				    (LDC_MAP_SHADOW |
1277 				     LDC_MAP_DIRECT |
1278 				     LDC_MAP_RW));
1279 	if (IS_ERR(dring)) {
1280 		err = PTR_ERR(dring);
1281 		goto err_out;
1282 	}
1283 
1284 	dr->base = dring;
1285 	dr->entry_size = (sizeof(struct vio_net_desc) +
1286 			  (sizeof(struct ldc_trans_cookie) * 2));
1287 	dr->num_entries = VNET_TX_RING_SIZE;
1288 	dr->prod = dr->cons = 0;
1289 	port->start_cons  = true; /* need an initial trigger */
1290 	dr->pending = VNET_TX_RING_SIZE;
1291 	dr->ncookies = ncookies;
1292 
1293 	for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1294 		struct vio_net_desc *d;
1295 
1296 		d = vio_dring_entry(dr, i);
1297 		d->hdr.state = VIO_DESC_FREE;
1298 	}
1299 	return 0;
1300 
1301 err_out:
1302 	vnet_port_free_tx_bufs(port);
1303 
1304 	return err;
1305 }
1306 
1307 static LIST_HEAD(vnet_list);
1308 static DEFINE_MUTEX(vnet_list_mutex);
1309 
1310 static const struct net_device_ops vnet_ops = {
1311 	.ndo_open		= vnet_open,
1312 	.ndo_stop		= vnet_close,
1313 	.ndo_set_rx_mode	= vnet_set_rx_mode,
1314 	.ndo_set_mac_address	= vnet_set_mac_addr,
1315 	.ndo_validate_addr	= eth_validate_addr,
1316 	.ndo_tx_timeout		= vnet_tx_timeout,
1317 	.ndo_change_mtu		= vnet_change_mtu,
1318 	.ndo_start_xmit		= vnet_start_xmit,
1319 };
1320 
1321 static struct vnet *vnet_new(const u64 *local_mac)
1322 {
1323 	struct net_device *dev;
1324 	struct vnet *vp;
1325 	int err, i;
1326 
1327 	dev = alloc_etherdev(sizeof(*vp));
1328 	if (!dev)
1329 		return ERR_PTR(-ENOMEM);
1330 	dev->needed_headroom = VNET_PACKET_SKIP + 8;
1331 	dev->needed_tailroom = 8;
1332 
1333 	for (i = 0; i < ETH_ALEN; i++)
1334 		dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
1335 
1336 	vp = netdev_priv(dev);
1337 
1338 	spin_lock_init(&vp->lock);
1339 	tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp);
1340 	vp->dev = dev;
1341 
1342 	INIT_LIST_HEAD(&vp->port_list);
1343 	for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
1344 		INIT_HLIST_HEAD(&vp->port_hash[i]);
1345 	INIT_LIST_HEAD(&vp->list);
1346 	vp->local_mac = *local_mac;
1347 
1348 	dev->netdev_ops = &vnet_ops;
1349 	dev->ethtool_ops = &vnet_ethtool_ops;
1350 	dev->watchdog_timeo = VNET_TX_TIMEOUT;
1351 
1352 	err = register_netdev(dev);
1353 	if (err) {
1354 		pr_err("Cannot register net device, aborting\n");
1355 		goto err_out_free_dev;
1356 	}
1357 
1358 	netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
1359 
1360 	list_add(&vp->list, &vnet_list);
1361 
1362 	return vp;
1363 
1364 err_out_free_dev:
1365 	free_netdev(dev);
1366 
1367 	return ERR_PTR(err);
1368 }
1369 
1370 static struct vnet *vnet_find_or_create(const u64 *local_mac)
1371 {
1372 	struct vnet *iter, *vp;
1373 
1374 	mutex_lock(&vnet_list_mutex);
1375 	vp = NULL;
1376 	list_for_each_entry(iter, &vnet_list, list) {
1377 		if (iter->local_mac == *local_mac) {
1378 			vp = iter;
1379 			break;
1380 		}
1381 	}
1382 	if (!vp)
1383 		vp = vnet_new(local_mac);
1384 	mutex_unlock(&vnet_list_mutex);
1385 
1386 	return vp;
1387 }
1388 
1389 static void vnet_cleanup(void)
1390 {
1391 	struct vnet *vp;
1392 	struct net_device *dev;
1393 
1394 	mutex_lock(&vnet_list_mutex);
1395 	while (!list_empty(&vnet_list)) {
1396 		vp = list_first_entry(&vnet_list, struct vnet, list);
1397 		list_del(&vp->list);
1398 		dev = vp->dev;
1399 		tasklet_kill(&vp->vnet_tx_wakeup);
1400 		/* vio_unregister_driver() should have cleaned up port_list */
1401 		BUG_ON(!list_empty(&vp->port_list));
1402 		unregister_netdev(dev);
1403 		free_netdev(dev);
1404 	}
1405 	mutex_unlock(&vnet_list_mutex);
1406 }
1407 
1408 static const char *local_mac_prop = "local-mac-address";
1409 
1410 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1411 						u64 port_node)
1412 {
1413 	const u64 *local_mac = NULL;
1414 	u64 a;
1415 
1416 	mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
1417 		u64 target = mdesc_arc_target(hp, a);
1418 		const char *name;
1419 
1420 		name = mdesc_get_property(hp, target, "name", NULL);
1421 		if (!name || strcmp(name, "network"))
1422 			continue;
1423 
1424 		local_mac = mdesc_get_property(hp, target,
1425 					       local_mac_prop, NULL);
1426 		if (local_mac)
1427 			break;
1428 	}
1429 	if (!local_mac)
1430 		return ERR_PTR(-ENODEV);
1431 
1432 	return vnet_find_or_create(local_mac);
1433 }
1434 
1435 static struct ldc_channel_config vnet_ldc_cfg = {
1436 	.event		= vnet_event,
1437 	.mtu		= 64,
1438 	.mode		= LDC_MODE_UNRELIABLE,
1439 };
1440 
1441 static struct vio_driver_ops vnet_vio_ops = {
1442 	.send_attr		= vnet_send_attr,
1443 	.handle_attr		= vnet_handle_attr,
1444 	.handshake_complete	= vnet_handshake_complete,
1445 };
1446 
1447 static void print_version(void)
1448 {
1449 	printk_once(KERN_INFO "%s", version);
1450 }
1451 
1452 const char *remote_macaddr_prop = "remote-mac-address";
1453 
1454 static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1455 {
1456 	struct mdesc_handle *hp;
1457 	struct vnet_port *port;
1458 	unsigned long flags;
1459 	struct vnet *vp;
1460 	const u64 *rmac;
1461 	int len, i, err, switch_port;
1462 
1463 	print_version();
1464 
1465 	hp = mdesc_grab();
1466 
1467 	vp = vnet_find_parent(hp, vdev->mp);
1468 	if (IS_ERR(vp)) {
1469 		pr_err("Cannot find port parent vnet\n");
1470 		err = PTR_ERR(vp);
1471 		goto err_out_put_mdesc;
1472 	}
1473 
1474 	rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
1475 	err = -ENODEV;
1476 	if (!rmac) {
1477 		pr_err("Port lacks %s property\n", remote_macaddr_prop);
1478 		goto err_out_put_mdesc;
1479 	}
1480 
1481 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1482 	err = -ENOMEM;
1483 	if (!port)
1484 		goto err_out_put_mdesc;
1485 
1486 	for (i = 0; i < ETH_ALEN; i++)
1487 		port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
1488 
1489 	port->vp = vp;
1490 
1491 	err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
1492 			      vnet_versions, ARRAY_SIZE(vnet_versions),
1493 			      &vnet_vio_ops, vp->dev->name);
1494 	if (err)
1495 		goto err_out_free_port;
1496 
1497 	err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
1498 	if (err)
1499 		goto err_out_free_port;
1500 
1501 	err = vnet_port_alloc_tx_bufs(port);
1502 	if (err)
1503 		goto err_out_free_ldc;
1504 
1505 	INIT_HLIST_NODE(&port->hash);
1506 	INIT_LIST_HEAD(&port->list);
1507 
1508 	switch_port = 0;
1509 	if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
1510 		switch_port = 1;
1511 	port->switch_port = switch_port;
1512 
1513 	spin_lock_irqsave(&vp->lock, flags);
1514 	if (switch_port)
1515 		list_add(&port->list, &vp->port_list);
1516 	else
1517 		list_add_tail(&port->list, &vp->port_list);
1518 	hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
1519 	spin_unlock_irqrestore(&vp->lock, flags);
1520 
1521 	dev_set_drvdata(&vdev->dev, port);
1522 
1523 	pr_info("%s: PORT ( remote-mac %pM%s )\n",
1524 		vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
1525 
1526 	setup_timer(&port->clean_timer, vnet_clean_timer_expire,
1527 		    (unsigned long)port);
1528 
1529 	vio_port_up(&port->vio);
1530 
1531 	mdesc_release(hp);
1532 
1533 	return 0;
1534 
1535 err_out_free_ldc:
1536 	vio_ldc_free(&port->vio);
1537 
1538 err_out_free_port:
1539 	kfree(port);
1540 
1541 err_out_put_mdesc:
1542 	mdesc_release(hp);
1543 	return err;
1544 }
1545 
1546 static int vnet_port_remove(struct vio_dev *vdev)
1547 {
1548 	struct vnet_port *port = dev_get_drvdata(&vdev->dev);
1549 
1550 	if (port) {
1551 		struct vnet *vp = port->vp;
1552 		unsigned long flags;
1553 
1554 		del_timer_sync(&port->vio.timer);
1555 		del_timer_sync(&port->clean_timer);
1556 
1557 		spin_lock_irqsave(&vp->lock, flags);
1558 		list_del(&port->list);
1559 		hlist_del(&port->hash);
1560 		spin_unlock_irqrestore(&vp->lock, flags);
1561 
1562 		vnet_port_free_tx_bufs(port);
1563 		vio_ldc_free(&port->vio);
1564 
1565 		dev_set_drvdata(&vdev->dev, NULL);
1566 
1567 		kfree(port);
1568 
1569 	}
1570 	return 0;
1571 }
1572 
1573 static const struct vio_device_id vnet_port_match[] = {
1574 	{
1575 		.type = "vnet-port",
1576 	},
1577 	{},
1578 };
1579 MODULE_DEVICE_TABLE(vio, vnet_port_match);
1580 
1581 static struct vio_driver vnet_port_driver = {
1582 	.id_table	= vnet_port_match,
1583 	.probe		= vnet_port_probe,
1584 	.remove		= vnet_port_remove,
1585 	.name		= "vnet_port",
1586 };
1587 
1588 static int __init vnet_init(void)
1589 {
1590 	return vio_register_driver(&vnet_port_driver);
1591 }
1592 
1593 static void __exit vnet_exit(void)
1594 {
1595 	vio_unregister_driver(&vnet_port_driver);
1596 	vnet_cleanup();
1597 }
1598 
1599 module_init(vnet_init);
1600 module_exit(vnet_exit);
1601