xref: /openbmc/linux/arch/sparc/kernel/viohs.c (revision 612a462a)
1 /* viohs.c: LDOM Virtual I/O handshake helper layer.
2  *
3  * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/export.h>
8 #include <linux/string.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/sched/clock.h>
12 #include <linux/slab.h>
13 
14 #include <asm/ldc.h>
15 #include <asm/vio.h>
16 
17 int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
18 {
19 	int err, limit = 1000;
20 
21 	err = -EINVAL;
22 	while (limit-- > 0) {
23 		err = ldc_write(vio->lp, data, len);
24 		if (!err || (err != -EAGAIN))
25 			break;
26 		udelay(1);
27 	}
28 
29 	return err;
30 }
31 EXPORT_SYMBOL(vio_ldc_send);
32 
33 static int send_ctrl(struct vio_driver_state *vio,
34 		     struct vio_msg_tag *tag, int len)
35 {
36 	tag->sid = vio_send_sid(vio);
37 	return vio_ldc_send(vio, tag, len);
38 }
39 
40 static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
41 {
42 	tag->type = type;
43 	tag->stype = stype;
44 	tag->stype_env = stype_env;
45 }
46 
47 static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
48 {
49 	struct vio_ver_info pkt;
50 
51 	vio->_local_sid = (u32) sched_clock();
52 
53 	memset(&pkt, 0, sizeof(pkt));
54 	init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
55 	pkt.major = major;
56 	pkt.minor = minor;
57 	pkt.dev_class = vio->dev_class;
58 
59 	viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
60 	       major, minor, vio->dev_class);
61 
62 	return send_ctrl(vio, &pkt.tag, sizeof(pkt));
63 }
64 
65 static int start_handshake(struct vio_driver_state *vio)
66 {
67 	int err;
68 
69 	viodbg(HS, "START HANDSHAKE\n");
70 
71 	vio->hs_state = VIO_HS_INVALID;
72 
73 	err = send_version(vio,
74 			   vio->ver_table[0].major,
75 			   vio->ver_table[0].minor);
76 	if (err < 0)
77 		return err;
78 
79 	return 0;
80 }
81 
82 static void flush_rx_dring(struct vio_driver_state *vio)
83 {
84 	struct vio_dring_state *dr;
85 	u64 ident;
86 
87 	BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
88 
89 	dr = &vio->drings[VIO_DRIVER_RX_RING];
90 	ident = dr->ident;
91 
92 	BUG_ON(!vio->desc_buf);
93 	kfree(vio->desc_buf);
94 	vio->desc_buf = NULL;
95 
96 	memset(dr, 0, sizeof(*dr));
97 	dr->ident = ident;
98 }
99 
100 void vio_link_state_change(struct vio_driver_state *vio, int event)
101 {
102 	if (event == LDC_EVENT_UP) {
103 		vio->hs_state = VIO_HS_INVALID;
104 
105 		switch (vio->dev_class) {
106 		case VDEV_NETWORK:
107 		case VDEV_NETWORK_SWITCH:
108 			vio->dr_state = (VIO_DR_STATE_TXREQ |
109 					 VIO_DR_STATE_RXREQ);
110 			break;
111 
112 		case VDEV_DISK:
113 			vio->dr_state = VIO_DR_STATE_TXREQ;
114 			break;
115 		case VDEV_DISK_SERVER:
116 			vio->dr_state = VIO_DR_STATE_RXREQ;
117 			break;
118 		}
119 		start_handshake(vio);
120 	} else if (event == LDC_EVENT_RESET) {
121 		vio->hs_state = VIO_HS_INVALID;
122 
123 		if (vio->dr_state & VIO_DR_STATE_RXREG)
124 			flush_rx_dring(vio);
125 
126 		vio->dr_state = 0x00;
127 		memset(&vio->ver, 0, sizeof(vio->ver));
128 
129 		ldc_disconnect(vio->lp);
130 	}
131 }
132 EXPORT_SYMBOL(vio_link_state_change);
133 
134 static int handshake_failure(struct vio_driver_state *vio)
135 {
136 	struct vio_dring_state *dr;
137 
138 	/* XXX Put policy here...  Perhaps start a timer to fire
139 	 * XXX in 100 ms, which will bring the link up and retry
140 	 * XXX the handshake.
141 	 */
142 
143 	viodbg(HS, "HANDSHAKE FAILURE\n");
144 
145 	vio->dr_state &= ~(VIO_DR_STATE_TXREG |
146 			   VIO_DR_STATE_RXREG);
147 
148 	dr = &vio->drings[VIO_DRIVER_RX_RING];
149 	memset(dr, 0, sizeof(*dr));
150 
151 	kfree(vio->desc_buf);
152 	vio->desc_buf = NULL;
153 	vio->desc_buf_len = 0;
154 
155 	vio->hs_state = VIO_HS_INVALID;
156 
157 	return -ECONNRESET;
158 }
159 
160 static int process_unknown(struct vio_driver_state *vio, void *arg)
161 {
162 	struct vio_msg_tag *pkt = arg;
163 
164 	viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
165 	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
166 
167 	printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
168 	       vio->vdev->channel_id);
169 
170 	ldc_disconnect(vio->lp);
171 
172 	return -ECONNRESET;
173 }
174 
175 static int send_dreg(struct vio_driver_state *vio)
176 {
177 	struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
178 	union {
179 		struct vio_dring_register pkt;
180 		char all[sizeof(struct vio_dring_register) +
181 			 (sizeof(struct ldc_trans_cookie) *
182 			  dr->ncookies)];
183 	} u;
184 	int i;
185 
186 	memset(&u, 0, sizeof(u));
187 	init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
188 	u.pkt.dring_ident = 0;
189 	u.pkt.num_descr = dr->num_entries;
190 	u.pkt.descr_size = dr->entry_size;
191 	u.pkt.options = VIO_TX_DRING;
192 	u.pkt.num_cookies = dr->ncookies;
193 
194 	viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
195 	       "ncookies[%u]\n",
196 	       u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
197 	       u.pkt.num_cookies);
198 
199 	for (i = 0; i < dr->ncookies; i++) {
200 		u.pkt.cookies[i] = dr->cookies[i];
201 
202 		viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
203 		       i,
204 		       (unsigned long long) u.pkt.cookies[i].cookie_addr,
205 		       (unsigned long long) u.pkt.cookies[i].cookie_size);
206 	}
207 
208 	return send_ctrl(vio, &u.pkt.tag, sizeof(u));
209 }
210 
211 static int send_rdx(struct vio_driver_state *vio)
212 {
213 	struct vio_rdx pkt;
214 
215 	memset(&pkt, 0, sizeof(pkt));
216 
217 	init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
218 
219 	viodbg(HS, "SEND RDX INFO\n");
220 
221 	return send_ctrl(vio, &pkt.tag, sizeof(pkt));
222 }
223 
224 static int send_attr(struct vio_driver_state *vio)
225 {
226 	if (!vio->ops)
227 		return -EINVAL;
228 
229 	return vio->ops->send_attr(vio);
230 }
231 
232 static struct vio_version *find_by_major(struct vio_driver_state *vio,
233 					 u16 major)
234 {
235 	struct vio_version *ret = NULL;
236 	int i;
237 
238 	for (i = 0; i < vio->ver_table_entries; i++) {
239 		struct vio_version *v = &vio->ver_table[i];
240 		if (v->major <= major) {
241 			ret = v;
242 			break;
243 		}
244 	}
245 	return ret;
246 }
247 
248 static int process_ver_info(struct vio_driver_state *vio,
249 			    struct vio_ver_info *pkt)
250 {
251 	struct vio_version *vap;
252 	int err;
253 
254 	viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
255 	       pkt->major, pkt->minor, pkt->dev_class);
256 
257 	if (vio->hs_state != VIO_HS_INVALID) {
258 		/* XXX Perhaps invoke start_handshake? XXX */
259 		memset(&vio->ver, 0, sizeof(vio->ver));
260 		vio->hs_state = VIO_HS_INVALID;
261 	}
262 
263 	vap = find_by_major(vio, pkt->major);
264 
265 	vio->_peer_sid = pkt->tag.sid;
266 
267 	if (!vap) {
268 		pkt->tag.stype = VIO_SUBTYPE_NACK;
269 		pkt->major = 0;
270 		pkt->minor = 0;
271 		viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
272 		err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
273 	} else if (vap->major != pkt->major) {
274 		pkt->tag.stype = VIO_SUBTYPE_NACK;
275 		pkt->major = vap->major;
276 		pkt->minor = vap->minor;
277 		viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
278 		       pkt->major, pkt->minor);
279 		err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
280 	} else {
281 		struct vio_version ver = {
282 			.major = pkt->major,
283 			.minor = pkt->minor,
284 		};
285 		if (ver.minor > vap->minor)
286 			ver.minor = vap->minor;
287 		pkt->minor = ver.minor;
288 		pkt->tag.stype = VIO_SUBTYPE_ACK;
289 		pkt->dev_class = vio->dev_class;
290 		viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
291 		       pkt->major, pkt->minor);
292 		err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
293 		if (err > 0) {
294 			vio->ver = ver;
295 			vio->hs_state = VIO_HS_GOTVERS;
296 		}
297 	}
298 	if (err < 0)
299 		return handshake_failure(vio);
300 
301 	return 0;
302 }
303 
304 static int process_ver_ack(struct vio_driver_state *vio,
305 			   struct vio_ver_info *pkt)
306 {
307 	viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
308 	       pkt->major, pkt->minor, pkt->dev_class);
309 
310 	if (vio->hs_state & VIO_HS_GOTVERS) {
311 		if (vio->ver.major != pkt->major ||
312 		    vio->ver.minor != pkt->minor) {
313 			pkt->tag.stype = VIO_SUBTYPE_NACK;
314 			(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
315 			return handshake_failure(vio);
316 		}
317 	} else {
318 		vio->ver.major = pkt->major;
319 		vio->ver.minor = pkt->minor;
320 		vio->hs_state = VIO_HS_GOTVERS;
321 	}
322 
323 	switch (vio->dev_class) {
324 	case VDEV_NETWORK:
325 	case VDEV_DISK:
326 		if (send_attr(vio) < 0)
327 			return handshake_failure(vio);
328 		break;
329 
330 	default:
331 		break;
332 	}
333 
334 	return 0;
335 }
336 
337 static int process_ver_nack(struct vio_driver_state *vio,
338 			    struct vio_ver_info *pkt)
339 {
340 	struct vio_version *nver;
341 
342 	viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
343 	       pkt->major, pkt->minor, pkt->dev_class);
344 
345 	if (pkt->major == 0 && pkt->minor == 0)
346 		return handshake_failure(vio);
347 	nver = find_by_major(vio, pkt->major);
348 	if (!nver)
349 		return handshake_failure(vio);
350 
351 	if (send_version(vio, nver->major, nver->minor) < 0)
352 		return handshake_failure(vio);
353 
354 	return 0;
355 }
356 
357 static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
358 {
359 	switch (pkt->tag.stype) {
360 	case VIO_SUBTYPE_INFO:
361 		return process_ver_info(vio, pkt);
362 
363 	case VIO_SUBTYPE_ACK:
364 		return process_ver_ack(vio, pkt);
365 
366 	case VIO_SUBTYPE_NACK:
367 		return process_ver_nack(vio, pkt);
368 
369 	default:
370 		return handshake_failure(vio);
371 	}
372 }
373 
374 static int process_attr(struct vio_driver_state *vio, void *pkt)
375 {
376 	int err;
377 
378 	if (!(vio->hs_state & VIO_HS_GOTVERS))
379 		return handshake_failure(vio);
380 
381 	if (!vio->ops)
382 		return 0;
383 
384 	err = vio->ops->handle_attr(vio, pkt);
385 	if (err < 0) {
386 		return handshake_failure(vio);
387 	} else {
388 		vio->hs_state |= VIO_HS_GOT_ATTR;
389 
390 		if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
391 		    !(vio->hs_state & VIO_HS_SENT_DREG)) {
392 			if (send_dreg(vio) < 0)
393 				return handshake_failure(vio);
394 
395 			vio->hs_state |= VIO_HS_SENT_DREG;
396 		}
397 	}
398 
399 	return 0;
400 }
401 
402 static int all_drings_registered(struct vio_driver_state *vio)
403 {
404 	int need_rx, need_tx;
405 
406 	need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
407 	need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
408 
409 	if (need_rx &&
410 	    !(vio->dr_state & VIO_DR_STATE_RXREG))
411 		return 0;
412 
413 	if (need_tx &&
414 	    !(vio->dr_state & VIO_DR_STATE_TXREG))
415 		return 0;
416 
417 	return 1;
418 }
419 
420 static int process_dreg_info(struct vio_driver_state *vio,
421 			     struct vio_dring_register *pkt)
422 {
423 	struct vio_dring_state *dr;
424 	int i, len;
425 
426 	viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
427 	       "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
428 	       (unsigned long long) pkt->dring_ident,
429 	       pkt->num_descr, pkt->descr_size, pkt->options,
430 	       pkt->num_cookies);
431 
432 	if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
433 		goto send_nack;
434 
435 	if (vio->dr_state & VIO_DR_STATE_RXREG)
436 		goto send_nack;
437 
438 	/* v1.6 and higher, ACK with desired, supported mode, or NACK */
439 	if (vio_version_after_eq(vio, 1, 6)) {
440 		if (!(pkt->options & VIO_TX_DRING))
441 			goto send_nack;
442 		pkt->options = VIO_TX_DRING;
443 	}
444 
445 	BUG_ON(vio->desc_buf);
446 
447 	vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
448 	if (!vio->desc_buf)
449 		goto send_nack;
450 
451 	vio->desc_buf_len = pkt->descr_size;
452 
453 	dr = &vio->drings[VIO_DRIVER_RX_RING];
454 
455 	dr->num_entries = pkt->num_descr;
456 	dr->entry_size = pkt->descr_size;
457 	dr->ncookies = pkt->num_cookies;
458 	for (i = 0; i < dr->ncookies; i++) {
459 		dr->cookies[i] = pkt->cookies[i];
460 
461 		viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
462 		       i,
463 		       (unsigned long long)
464 		       pkt->cookies[i].cookie_addr,
465 		       (unsigned long long)
466 		       pkt->cookies[i].cookie_size);
467 	}
468 
469 	pkt->tag.stype = VIO_SUBTYPE_ACK;
470 	pkt->dring_ident = ++dr->ident;
471 
472 	viodbg(HS, "SEND DRING_REG ACK ident[%llx] "
473 	       "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
474 	       (unsigned long long) pkt->dring_ident,
475 	       pkt->num_descr, pkt->descr_size, pkt->options,
476 	       pkt->num_cookies);
477 
478 	len = (sizeof(*pkt) +
479 	       (dr->ncookies * sizeof(struct ldc_trans_cookie)));
480 	if (send_ctrl(vio, &pkt->tag, len) < 0)
481 		goto send_nack;
482 
483 	vio->dr_state |= VIO_DR_STATE_RXREG;
484 
485 	return 0;
486 
487 send_nack:
488 	pkt->tag.stype = VIO_SUBTYPE_NACK;
489 	viodbg(HS, "SEND DRING_REG NACK\n");
490 	(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
491 
492 	return handshake_failure(vio);
493 }
494 
495 static int process_dreg_ack(struct vio_driver_state *vio,
496 			    struct vio_dring_register *pkt)
497 {
498 	struct vio_dring_state *dr;
499 
500 	viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
501 	       "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
502 	       (unsigned long long) pkt->dring_ident,
503 	       pkt->num_descr, pkt->descr_size, pkt->options,
504 	       pkt->num_cookies);
505 
506 	dr = &vio->drings[VIO_DRIVER_TX_RING];
507 
508 	if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
509 		return handshake_failure(vio);
510 
511 	dr->ident = pkt->dring_ident;
512 	vio->dr_state |= VIO_DR_STATE_TXREG;
513 
514 	if (all_drings_registered(vio)) {
515 		if (send_rdx(vio) < 0)
516 			return handshake_failure(vio);
517 		vio->hs_state = VIO_HS_SENT_RDX;
518 	}
519 	return 0;
520 }
521 
522 static int process_dreg_nack(struct vio_driver_state *vio,
523 			     struct vio_dring_register *pkt)
524 {
525 	viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
526 	       "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
527 	       (unsigned long long) pkt->dring_ident,
528 	       pkt->num_descr, pkt->descr_size, pkt->options,
529 	       pkt->num_cookies);
530 
531 	return handshake_failure(vio);
532 }
533 
534 static int process_dreg(struct vio_driver_state *vio,
535 			struct vio_dring_register *pkt)
536 {
537 	if (!(vio->hs_state & VIO_HS_GOTVERS))
538 		return handshake_failure(vio);
539 
540 	switch (pkt->tag.stype) {
541 	case VIO_SUBTYPE_INFO:
542 		return process_dreg_info(vio, pkt);
543 
544 	case VIO_SUBTYPE_ACK:
545 		return process_dreg_ack(vio, pkt);
546 
547 	case VIO_SUBTYPE_NACK:
548 		return process_dreg_nack(vio, pkt);
549 
550 	default:
551 		return handshake_failure(vio);
552 	}
553 }
554 
555 static int process_dunreg(struct vio_driver_state *vio,
556 			  struct vio_dring_unregister *pkt)
557 {
558 	struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
559 
560 	viodbg(HS, "GOT DRING_UNREG\n");
561 
562 	if (pkt->dring_ident != dr->ident)
563 		return 0;
564 
565 	vio->dr_state &= ~VIO_DR_STATE_RXREG;
566 
567 	memset(dr, 0, sizeof(*dr));
568 
569 	kfree(vio->desc_buf);
570 	vio->desc_buf = NULL;
571 	vio->desc_buf_len = 0;
572 
573 	return 0;
574 }
575 
576 static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
577 {
578 	viodbg(HS, "GOT RDX INFO\n");
579 
580 	pkt->tag.stype = VIO_SUBTYPE_ACK;
581 	viodbg(HS, "SEND RDX ACK\n");
582 	if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
583 		return handshake_failure(vio);
584 
585 	vio->hs_state |= VIO_HS_SENT_RDX_ACK;
586 	return 0;
587 }
588 
589 static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
590 {
591 	viodbg(HS, "GOT RDX ACK\n");
592 
593 	if (!(vio->hs_state & VIO_HS_SENT_RDX))
594 		return handshake_failure(vio);
595 
596 	vio->hs_state |= VIO_HS_GOT_RDX_ACK;
597 	return 0;
598 }
599 
600 static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
601 {
602 	viodbg(HS, "GOT RDX NACK\n");
603 
604 	return handshake_failure(vio);
605 }
606 
607 static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
608 {
609 	if (!all_drings_registered(vio))
610 		handshake_failure(vio);
611 
612 	switch (pkt->tag.stype) {
613 	case VIO_SUBTYPE_INFO:
614 		return process_rdx_info(vio, pkt);
615 
616 	case VIO_SUBTYPE_ACK:
617 		return process_rdx_ack(vio, pkt);
618 
619 	case VIO_SUBTYPE_NACK:
620 		return process_rdx_nack(vio, pkt);
621 
622 	default:
623 		return handshake_failure(vio);
624 	}
625 }
626 
627 int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
628 {
629 	struct vio_msg_tag *tag = pkt;
630 	u8 prev_state = vio->hs_state;
631 	int err;
632 
633 	switch (tag->stype_env) {
634 	case VIO_VER_INFO:
635 		err = process_ver(vio, pkt);
636 		break;
637 
638 	case VIO_ATTR_INFO:
639 		err = process_attr(vio, pkt);
640 		break;
641 
642 	case VIO_DRING_REG:
643 		err = process_dreg(vio, pkt);
644 		break;
645 
646 	case VIO_DRING_UNREG:
647 		err = process_dunreg(vio, pkt);
648 		break;
649 
650 	case VIO_RDX:
651 		err = process_rdx(vio, pkt);
652 		break;
653 
654 	default:
655 		err = process_unknown(vio, pkt);
656 		break;
657 	}
658 
659 	if (!err &&
660 	    vio->hs_state != prev_state &&
661 	    (vio->hs_state & VIO_HS_COMPLETE)) {
662 		if (vio->ops)
663 			vio->ops->handshake_complete(vio);
664 	}
665 
666 	return err;
667 }
668 EXPORT_SYMBOL(vio_control_pkt_engine);
669 
670 void vio_conn_reset(struct vio_driver_state *vio)
671 {
672 }
673 EXPORT_SYMBOL(vio_conn_reset);
674 
675 /* The issue is that the Solaris virtual disk server just mirrors the
676  * SID values it gets from the client peer.  So we work around that
677  * here in vio_{validate,send}_sid() so that the drivers don't need
678  * to be aware of this crap.
679  */
680 int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
681 {
682 	u32 sid;
683 
684 	/* Always let VERSION+INFO packets through unchecked, they
685 	 * define the new SID.
686 	 */
687 	if (tp->type == VIO_TYPE_CTRL &&
688 	    tp->stype == VIO_SUBTYPE_INFO &&
689 	    tp->stype_env == VIO_VER_INFO)
690 		return 0;
691 
692 	/* Ok, now figure out which SID to use.  */
693 	switch (vio->dev_class) {
694 	case VDEV_NETWORK:
695 	case VDEV_NETWORK_SWITCH:
696 	case VDEV_DISK_SERVER:
697 	default:
698 		sid = vio->_peer_sid;
699 		break;
700 
701 	case VDEV_DISK:
702 		sid = vio->_local_sid;
703 		break;
704 	}
705 
706 	if (sid == tp->sid)
707 		return 0;
708 	viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
709 	       tp->sid, vio->_peer_sid, vio->_local_sid);
710 	return -EINVAL;
711 }
712 EXPORT_SYMBOL(vio_validate_sid);
713 
714 u32 vio_send_sid(struct vio_driver_state *vio)
715 {
716 	switch (vio->dev_class) {
717 	case VDEV_NETWORK:
718 	case VDEV_NETWORK_SWITCH:
719 	case VDEV_DISK:
720 	default:
721 		return vio->_local_sid;
722 
723 	case VDEV_DISK_SERVER:
724 		return vio->_peer_sid;
725 	}
726 }
727 EXPORT_SYMBOL(vio_send_sid);
728 
729 int vio_ldc_alloc(struct vio_driver_state *vio,
730 			 struct ldc_channel_config *base_cfg,
731 			 void *event_arg)
732 {
733 	struct ldc_channel_config cfg = *base_cfg;
734 	struct ldc_channel *lp;
735 
736 	cfg.tx_irq = vio->vdev->tx_irq;
737 	cfg.rx_irq = vio->vdev->rx_irq;
738 
739 	lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
740 	if (IS_ERR(lp))
741 		return PTR_ERR(lp);
742 
743 	vio->lp = lp;
744 
745 	return 0;
746 }
747 EXPORT_SYMBOL(vio_ldc_alloc);
748 
749 void vio_ldc_free(struct vio_driver_state *vio)
750 {
751 	ldc_free(vio->lp);
752 	vio->lp = NULL;
753 
754 	kfree(vio->desc_buf);
755 	vio->desc_buf = NULL;
756 	vio->desc_buf_len = 0;
757 }
758 EXPORT_SYMBOL(vio_ldc_free);
759 
760 void vio_port_up(struct vio_driver_state *vio)
761 {
762 	unsigned long flags;
763 	int err, state;
764 
765 	spin_lock_irqsave(&vio->lock, flags);
766 
767 	state = ldc_state(vio->lp);
768 
769 	err = 0;
770 	if (state == LDC_STATE_INIT) {
771 		err = ldc_bind(vio->lp);
772 		if (err)
773 			printk(KERN_WARNING "%s: Port %lu bind failed, "
774 			       "err=%d\n",
775 			       vio->name, vio->vdev->channel_id, err);
776 	}
777 
778 	if (!err) {
779 		if (ldc_mode(vio->lp) == LDC_MODE_RAW)
780 			ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
781 		else
782 			err = ldc_connect(vio->lp);
783 
784 		if (err)
785 			printk(KERN_WARNING "%s: Port %lu connect failed, "
786 			       "err=%d\n",
787 			       vio->name, vio->vdev->channel_id, err);
788 	}
789 	if (err) {
790 		unsigned long expires = jiffies + HZ;
791 
792 		expires = round_jiffies(expires);
793 		mod_timer(&vio->timer, expires);
794 	}
795 
796 	spin_unlock_irqrestore(&vio->lock, flags);
797 }
798 EXPORT_SYMBOL(vio_port_up);
799 
800 static void vio_port_timer(unsigned long _arg)
801 {
802 	struct vio_driver_state *vio = (struct vio_driver_state *) _arg;
803 
804 	vio_port_up(vio);
805 }
806 
807 int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
808 		    u8 dev_class, struct vio_version *ver_table,
809 		    int ver_table_size, struct vio_driver_ops *ops,
810 		    char *name)
811 {
812 	switch (dev_class) {
813 	case VDEV_NETWORK:
814 	case VDEV_NETWORK_SWITCH:
815 	case VDEV_DISK:
816 	case VDEV_DISK_SERVER:
817 	case VDEV_CONSOLE_CON:
818 		break;
819 
820 	default:
821 		return -EINVAL;
822 	}
823 
824 	if (dev_class == VDEV_NETWORK ||
825 	    dev_class == VDEV_NETWORK_SWITCH ||
826 	    dev_class == VDEV_DISK ||
827 	    dev_class == VDEV_DISK_SERVER) {
828 		if (!ops || !ops->send_attr || !ops->handle_attr ||
829 		    !ops->handshake_complete)
830 			return -EINVAL;
831 	}
832 
833 	if (!ver_table || ver_table_size < 0)
834 		return -EINVAL;
835 
836 	if (!name)
837 		return -EINVAL;
838 
839 	spin_lock_init(&vio->lock);
840 
841 	vio->name = name;
842 
843 	vio->dev_class = dev_class;
844 	vio->vdev = vdev;
845 
846 	vio->ver_table = ver_table;
847 	vio->ver_table_entries = ver_table_size;
848 
849 	vio->ops = ops;
850 
851 	setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio);
852 
853 	return 0;
854 }
855 EXPORT_SYMBOL(vio_driver_init);
856