xref: /openbmc/linux/drivers/block/sunvdc.c (revision e1f7c9ee)
1 /* sunvdc.c: Sun LDOM Virtual Disk Client.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4  */
5 
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/blkdev.h>
10 #include <linux/hdreg.h>
11 #include <linux/genhd.h>
12 #include <linux/cdrom.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/completion.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/scatterlist.h>
20 
21 #include <asm/vio.h>
22 #include <asm/ldc.h>
23 
24 #define DRV_MODULE_NAME		"sunvdc"
25 #define PFX DRV_MODULE_NAME	": "
26 #define DRV_MODULE_VERSION	"1.1"
27 #define DRV_MODULE_RELDATE	"February 13, 2013"
28 
29 static char version[] =
30 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
31 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
32 MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
33 MODULE_LICENSE("GPL");
34 MODULE_VERSION(DRV_MODULE_VERSION);
35 
36 #define VDC_TX_RING_SIZE	512
37 
38 #define WAITING_FOR_LINK_UP	0x01
39 #define WAITING_FOR_TX_SPACE	0x02
40 #define WAITING_FOR_GEN_CMD	0x04
41 #define WAITING_FOR_ANY		-1
42 
43 struct vdc_req_entry {
44 	struct request		*req;
45 };
46 
47 struct vdc_port {
48 	struct vio_driver_state	vio;
49 
50 	struct gendisk		*disk;
51 
52 	struct vdc_completion	*cmp;
53 
54 	u64			req_id;
55 	u64			seq;
56 	struct vdc_req_entry	rq_arr[VDC_TX_RING_SIZE];
57 
58 	unsigned long		ring_cookies;
59 
60 	u64			max_xfer_size;
61 	u32			vdisk_block_size;
62 
63 	/* The server fills these in for us in the disk attribute
64 	 * ACK packet.
65 	 */
66 	u64			operations;
67 	u32			vdisk_size;
68 	u8			vdisk_type;
69 	u8			vdisk_mtype;
70 
71 	char			disk_name[32];
72 };
73 
74 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
75 {
76 	return container_of(vio, struct vdc_port, vio);
77 }
78 
79 /* Ordered from largest major to lowest */
80 static struct vio_version vdc_versions[] = {
81 	{ .major = 1, .minor = 1 },
82 	{ .major = 1, .minor = 0 },
83 };
84 
85 static inline int vdc_version_supported(struct vdc_port *port,
86 					u16 major, u16 minor)
87 {
88 	return port->vio.ver.major == major && port->vio.ver.minor >= minor;
89 }
90 
91 #define VDCBLK_NAME	"vdisk"
92 static int vdc_major;
93 #define PARTITION_SHIFT	3
94 
95 static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
96 {
97 	return vio_dring_avail(dr, VDC_TX_RING_SIZE);
98 }
99 
100 static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
101 {
102 	struct gendisk *disk = bdev->bd_disk;
103 	sector_t nsect = get_capacity(disk);
104 	sector_t cylinders = nsect;
105 
106 	geo->heads = 0xff;
107 	geo->sectors = 0x3f;
108 	sector_div(cylinders, geo->heads * geo->sectors);
109 	geo->cylinders = cylinders;
110 	if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
111 		geo->cylinders = 0xffff;
112 
113 	return 0;
114 }
115 
116 /* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
117  * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
118  * Needed to be able to install inside an ldom from an iso image.
119  */
120 static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
121 		     unsigned command, unsigned long argument)
122 {
123 	int i;
124 	struct gendisk *disk;
125 
126 	switch (command) {
127 	case CDROMMULTISESSION:
128 		pr_debug(PFX "Multisession CDs not supported\n");
129 		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
130 			if (put_user(0, (char __user *)(argument + i)))
131 				return -EFAULT;
132 		return 0;
133 
134 	case CDROM_GET_CAPABILITY:
135 		disk = bdev->bd_disk;
136 
137 		if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
138 			return 0;
139 		return -EINVAL;
140 
141 	default:
142 		pr_debug(PFX "ioctl %08x not supported\n", command);
143 		return -EINVAL;
144 	}
145 }
146 
147 static const struct block_device_operations vdc_fops = {
148 	.owner		= THIS_MODULE,
149 	.getgeo		= vdc_getgeo,
150 	.ioctl		= vdc_ioctl,
151 };
152 
153 static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
154 {
155 	if (vio->cmp &&
156 	    (waiting_for == -1 ||
157 	     vio->cmp->waiting_for == waiting_for)) {
158 		vio->cmp->err = err;
159 		complete(&vio->cmp->com);
160 		vio->cmp = NULL;
161 	}
162 }
163 
164 static void vdc_handshake_complete(struct vio_driver_state *vio)
165 {
166 	vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
167 }
168 
169 static int vdc_handle_unknown(struct vdc_port *port, void *arg)
170 {
171 	struct vio_msg_tag *pkt = arg;
172 
173 	printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
174 	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
175 	printk(KERN_ERR PFX "Resetting connection.\n");
176 
177 	ldc_disconnect(port->vio.lp);
178 
179 	return -ECONNRESET;
180 }
181 
182 static int vdc_send_attr(struct vio_driver_state *vio)
183 {
184 	struct vdc_port *port = to_vdc_port(vio);
185 	struct vio_disk_attr_info pkt;
186 
187 	memset(&pkt, 0, sizeof(pkt));
188 
189 	pkt.tag.type = VIO_TYPE_CTRL;
190 	pkt.tag.stype = VIO_SUBTYPE_INFO;
191 	pkt.tag.stype_env = VIO_ATTR_INFO;
192 	pkt.tag.sid = vio_send_sid(vio);
193 
194 	pkt.xfer_mode = VIO_DRING_MODE;
195 	pkt.vdisk_block_size = port->vdisk_block_size;
196 	pkt.max_xfer_size = port->max_xfer_size;
197 
198 	viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
199 	       pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
200 
201 	return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
202 }
203 
204 static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
205 {
206 	struct vdc_port *port = to_vdc_port(vio);
207 	struct vio_disk_attr_info *pkt = arg;
208 
209 	viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
210 	       "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
211 	       pkt->tag.stype, pkt->operations,
212 	       pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
213 	       pkt->xfer_mode, pkt->vdisk_block_size,
214 	       pkt->max_xfer_size);
215 
216 	if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
217 		switch (pkt->vdisk_type) {
218 		case VD_DISK_TYPE_DISK:
219 		case VD_DISK_TYPE_SLICE:
220 			break;
221 
222 		default:
223 			printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
224 			       vio->name, pkt->vdisk_type);
225 			return -ECONNRESET;
226 		}
227 
228 		if (pkt->vdisk_block_size > port->vdisk_block_size) {
229 			printk(KERN_ERR PFX "%s: BLOCK size increased "
230 			       "%u --> %u\n",
231 			       vio->name,
232 			       port->vdisk_block_size, pkt->vdisk_block_size);
233 			return -ECONNRESET;
234 		}
235 
236 		port->operations = pkt->operations;
237 		port->vdisk_type = pkt->vdisk_type;
238 		if (vdc_version_supported(port, 1, 1)) {
239 			port->vdisk_size = pkt->vdisk_size;
240 			port->vdisk_mtype = pkt->vdisk_mtype;
241 		}
242 		if (pkt->max_xfer_size < port->max_xfer_size)
243 			port->max_xfer_size = pkt->max_xfer_size;
244 		port->vdisk_block_size = pkt->vdisk_block_size;
245 		return 0;
246 	} else {
247 		printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
248 
249 		return -ECONNRESET;
250 	}
251 }
252 
253 static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
254 {
255 	int err = desc->status;
256 
257 	vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
258 }
259 
260 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
261 			unsigned int index)
262 {
263 	struct vio_disk_desc *desc = vio_dring_entry(dr, index);
264 	struct vdc_req_entry *rqe = &port->rq_arr[index];
265 	struct request *req;
266 
267 	if (unlikely(desc->hdr.state != VIO_DESC_DONE))
268 		return;
269 
270 	ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
271 	desc->hdr.state = VIO_DESC_FREE;
272 	dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
273 
274 	req = rqe->req;
275 	if (req == NULL) {
276 		vdc_end_special(port, desc);
277 		return;
278 	}
279 
280 	rqe->req = NULL;
281 
282 	__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
283 
284 	/* restart blk queue when ring is half emptied */
285 	if (blk_queue_stopped(port->disk->queue) &&
286 	    vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
287 		blk_start_queue(port->disk->queue);
288 }
289 
290 static int vdc_ack(struct vdc_port *port, void *msgbuf)
291 {
292 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
293 	struct vio_dring_data *pkt = msgbuf;
294 
295 	if (unlikely(pkt->dring_ident != dr->ident ||
296 		     pkt->start_idx != pkt->end_idx ||
297 		     pkt->start_idx >= VDC_TX_RING_SIZE))
298 		return 0;
299 
300 	vdc_end_one(port, dr, pkt->start_idx);
301 
302 	return 0;
303 }
304 
305 static int vdc_nack(struct vdc_port *port, void *msgbuf)
306 {
307 	/* XXX Implement me XXX */
308 	return 0;
309 }
310 
311 static void vdc_event(void *arg, int event)
312 {
313 	struct vdc_port *port = arg;
314 	struct vio_driver_state *vio = &port->vio;
315 	unsigned long flags;
316 	int err;
317 
318 	spin_lock_irqsave(&vio->lock, flags);
319 
320 	if (unlikely(event == LDC_EVENT_RESET ||
321 		     event == LDC_EVENT_UP)) {
322 		vio_link_state_change(vio, event);
323 		spin_unlock_irqrestore(&vio->lock, flags);
324 		return;
325 	}
326 
327 	if (unlikely(event != LDC_EVENT_DATA_READY)) {
328 		printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
329 		spin_unlock_irqrestore(&vio->lock, flags);
330 		return;
331 	}
332 
333 	err = 0;
334 	while (1) {
335 		union {
336 			struct vio_msg_tag tag;
337 			u64 raw[8];
338 		} msgbuf;
339 
340 		err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
341 		if (unlikely(err < 0)) {
342 			if (err == -ECONNRESET)
343 				vio_conn_reset(vio);
344 			break;
345 		}
346 		if (err == 0)
347 			break;
348 		viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
349 		       msgbuf.tag.type,
350 		       msgbuf.tag.stype,
351 		       msgbuf.tag.stype_env,
352 		       msgbuf.tag.sid);
353 		err = vio_validate_sid(vio, &msgbuf.tag);
354 		if (err < 0)
355 			break;
356 
357 		if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
358 			if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
359 				err = vdc_ack(port, &msgbuf);
360 			else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
361 				err = vdc_nack(port, &msgbuf);
362 			else
363 				err = vdc_handle_unknown(port, &msgbuf);
364 		} else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
365 			err = vio_control_pkt_engine(vio, &msgbuf);
366 		} else {
367 			err = vdc_handle_unknown(port, &msgbuf);
368 		}
369 		if (err < 0)
370 			break;
371 	}
372 	if (err < 0)
373 		vdc_finish(&port->vio, err, WAITING_FOR_ANY);
374 	spin_unlock_irqrestore(&vio->lock, flags);
375 }
376 
377 static int __vdc_tx_trigger(struct vdc_port *port)
378 {
379 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
380 	struct vio_dring_data hdr = {
381 		.tag = {
382 			.type		= VIO_TYPE_DATA,
383 			.stype		= VIO_SUBTYPE_INFO,
384 			.stype_env	= VIO_DRING_DATA,
385 			.sid		= vio_send_sid(&port->vio),
386 		},
387 		.dring_ident		= dr->ident,
388 		.start_idx		= dr->prod,
389 		.end_idx		= dr->prod,
390 	};
391 	int err, delay;
392 
393 	hdr.seq = dr->snd_nxt;
394 	delay = 1;
395 	do {
396 		err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
397 		if (err > 0) {
398 			dr->snd_nxt++;
399 			break;
400 		}
401 		udelay(delay);
402 		if ((delay <<= 1) > 128)
403 			delay = 128;
404 	} while (err == -EAGAIN);
405 
406 	return err;
407 }
408 
409 static int __send_request(struct request *req)
410 {
411 	struct vdc_port *port = req->rq_disk->private_data;
412 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
413 	struct scatterlist sg[port->ring_cookies];
414 	struct vdc_req_entry *rqe;
415 	struct vio_disk_desc *desc;
416 	unsigned int map_perm;
417 	int nsg, err, i;
418 	u64 len;
419 	u8 op;
420 
421 	map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
422 
423 	if (rq_data_dir(req) == READ) {
424 		map_perm |= LDC_MAP_W;
425 		op = VD_OP_BREAD;
426 	} else {
427 		map_perm |= LDC_MAP_R;
428 		op = VD_OP_BWRITE;
429 	}
430 
431 	sg_init_table(sg, port->ring_cookies);
432 	nsg = blk_rq_map_sg(req->q, req, sg);
433 
434 	len = 0;
435 	for (i = 0; i < nsg; i++)
436 		len += sg[i].length;
437 
438 	desc = vio_dring_cur(dr);
439 
440 	err = ldc_map_sg(port->vio.lp, sg, nsg,
441 			 desc->cookies, port->ring_cookies,
442 			 map_perm);
443 	if (err < 0) {
444 		printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
445 		return err;
446 	}
447 
448 	rqe = &port->rq_arr[dr->prod];
449 	rqe->req = req;
450 
451 	desc->hdr.ack = VIO_ACK_ENABLE;
452 	desc->req_id = port->req_id;
453 	desc->operation = op;
454 	if (port->vdisk_type == VD_DISK_TYPE_DISK) {
455 		desc->slice = 0xff;
456 	} else {
457 		desc->slice = 0;
458 	}
459 	desc->status = ~0;
460 	desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
461 	desc->size = len;
462 	desc->ncookies = err;
463 
464 	/* This has to be a non-SMP write barrier because we are writing
465 	 * to memory which is shared with the peer LDOM.
466 	 */
467 	wmb();
468 	desc->hdr.state = VIO_DESC_READY;
469 
470 	err = __vdc_tx_trigger(port);
471 	if (err < 0) {
472 		printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
473 	} else {
474 		port->req_id++;
475 		dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
476 	}
477 
478 	return err;
479 }
480 
481 static void do_vdc_request(struct request_queue *rq)
482 {
483 	struct request *req;
484 
485 	while ((req = blk_peek_request(rq)) != NULL) {
486 		struct vdc_port *port;
487 		struct vio_dring_state *dr;
488 
489 		port = req->rq_disk->private_data;
490 		dr = &port->vio.drings[VIO_DRIVER_TX_RING];
491 		if (unlikely(vdc_tx_dring_avail(dr) < 1))
492 			goto wait;
493 
494 		blk_start_request(req);
495 
496 		if (__send_request(req) < 0) {
497 			blk_requeue_request(rq, req);
498 wait:
499 			/* Avoid pointless unplugs. */
500 			blk_stop_queue(rq);
501 			break;
502 		}
503 	}
504 }
505 
506 static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
507 {
508 	struct vio_dring_state *dr;
509 	struct vio_completion comp;
510 	struct vio_disk_desc *desc;
511 	unsigned int map_perm;
512 	unsigned long flags;
513 	int op_len, err;
514 	void *req_buf;
515 
516 	if (!(((u64)1 << (u64)op) & port->operations))
517 		return -EOPNOTSUPP;
518 
519 	switch (op) {
520 	case VD_OP_BREAD:
521 	case VD_OP_BWRITE:
522 	default:
523 		return -EINVAL;
524 
525 	case VD_OP_FLUSH:
526 		op_len = 0;
527 		map_perm = 0;
528 		break;
529 
530 	case VD_OP_GET_WCE:
531 		op_len = sizeof(u32);
532 		map_perm = LDC_MAP_W;
533 		break;
534 
535 	case VD_OP_SET_WCE:
536 		op_len = sizeof(u32);
537 		map_perm = LDC_MAP_R;
538 		break;
539 
540 	case VD_OP_GET_VTOC:
541 		op_len = sizeof(struct vio_disk_vtoc);
542 		map_perm = LDC_MAP_W;
543 		break;
544 
545 	case VD_OP_SET_VTOC:
546 		op_len = sizeof(struct vio_disk_vtoc);
547 		map_perm = LDC_MAP_R;
548 		break;
549 
550 	case VD_OP_GET_DISKGEOM:
551 		op_len = sizeof(struct vio_disk_geom);
552 		map_perm = LDC_MAP_W;
553 		break;
554 
555 	case VD_OP_SET_DISKGEOM:
556 		op_len = sizeof(struct vio_disk_geom);
557 		map_perm = LDC_MAP_R;
558 		break;
559 
560 	case VD_OP_SCSICMD:
561 		op_len = 16;
562 		map_perm = LDC_MAP_RW;
563 		break;
564 
565 	case VD_OP_GET_DEVID:
566 		op_len = sizeof(struct vio_disk_devid);
567 		map_perm = LDC_MAP_W;
568 		break;
569 
570 	case VD_OP_GET_EFI:
571 	case VD_OP_SET_EFI:
572 		return -EOPNOTSUPP;
573 		break;
574 	};
575 
576 	map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
577 
578 	op_len = (op_len + 7) & ~7;
579 	req_buf = kzalloc(op_len, GFP_KERNEL);
580 	if (!req_buf)
581 		return -ENOMEM;
582 
583 	if (len > op_len)
584 		len = op_len;
585 
586 	if (map_perm & LDC_MAP_R)
587 		memcpy(req_buf, buf, len);
588 
589 	spin_lock_irqsave(&port->vio.lock, flags);
590 
591 	dr = &port->vio.drings[VIO_DRIVER_TX_RING];
592 
593 	/* XXX If we want to use this code generically we have to
594 	 * XXX handle TX ring exhaustion etc.
595 	 */
596 	desc = vio_dring_cur(dr);
597 
598 	err = ldc_map_single(port->vio.lp, req_buf, op_len,
599 			     desc->cookies, port->ring_cookies,
600 			     map_perm);
601 	if (err < 0) {
602 		spin_unlock_irqrestore(&port->vio.lock, flags);
603 		kfree(req_buf);
604 		return err;
605 	}
606 
607 	init_completion(&comp.com);
608 	comp.waiting_for = WAITING_FOR_GEN_CMD;
609 	port->vio.cmp = &comp;
610 
611 	desc->hdr.ack = VIO_ACK_ENABLE;
612 	desc->req_id = port->req_id;
613 	desc->operation = op;
614 	desc->slice = 0;
615 	desc->status = ~0;
616 	desc->offset = 0;
617 	desc->size = op_len;
618 	desc->ncookies = err;
619 
620 	/* This has to be a non-SMP write barrier because we are writing
621 	 * to memory which is shared with the peer LDOM.
622 	 */
623 	wmb();
624 	desc->hdr.state = VIO_DESC_READY;
625 
626 	err = __vdc_tx_trigger(port);
627 	if (err >= 0) {
628 		port->req_id++;
629 		dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
630 		spin_unlock_irqrestore(&port->vio.lock, flags);
631 
632 		wait_for_completion(&comp.com);
633 		err = comp.err;
634 	} else {
635 		port->vio.cmp = NULL;
636 		spin_unlock_irqrestore(&port->vio.lock, flags);
637 	}
638 
639 	if (map_perm & LDC_MAP_W)
640 		memcpy(buf, req_buf, len);
641 
642 	kfree(req_buf);
643 
644 	return err;
645 }
646 
647 static int vdc_alloc_tx_ring(struct vdc_port *port)
648 {
649 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
650 	unsigned long len, entry_size;
651 	int ncookies;
652 	void *dring;
653 
654 	entry_size = sizeof(struct vio_disk_desc) +
655 		(sizeof(struct ldc_trans_cookie) * port->ring_cookies);
656 	len = (VDC_TX_RING_SIZE * entry_size);
657 
658 	ncookies = VIO_MAX_RING_COOKIES;
659 	dring = ldc_alloc_exp_dring(port->vio.lp, len,
660 				    dr->cookies, &ncookies,
661 				    (LDC_MAP_SHADOW |
662 				     LDC_MAP_DIRECT |
663 				     LDC_MAP_RW));
664 	if (IS_ERR(dring))
665 		return PTR_ERR(dring);
666 
667 	dr->base = dring;
668 	dr->entry_size = entry_size;
669 	dr->num_entries = VDC_TX_RING_SIZE;
670 	dr->prod = dr->cons = 0;
671 	dr->pending = VDC_TX_RING_SIZE;
672 	dr->ncookies = ncookies;
673 
674 	return 0;
675 }
676 
677 static void vdc_free_tx_ring(struct vdc_port *port)
678 {
679 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
680 
681 	if (dr->base) {
682 		ldc_free_exp_dring(port->vio.lp, dr->base,
683 				   (dr->entry_size * dr->num_entries),
684 				   dr->cookies, dr->ncookies);
685 		dr->base = NULL;
686 		dr->entry_size = 0;
687 		dr->num_entries = 0;
688 		dr->pending = 0;
689 		dr->ncookies = 0;
690 	}
691 }
692 
693 static int probe_disk(struct vdc_port *port)
694 {
695 	struct vio_completion comp;
696 	struct request_queue *q;
697 	struct gendisk *g;
698 	int err;
699 
700 	init_completion(&comp.com);
701 	comp.err = 0;
702 	comp.waiting_for = WAITING_FOR_LINK_UP;
703 	port->vio.cmp = &comp;
704 
705 	vio_port_up(&port->vio);
706 
707 	wait_for_completion(&comp.com);
708 	if (comp.err)
709 		return comp.err;
710 
711 	if (vdc_version_supported(port, 1, 1)) {
712 		/* vdisk_size should be set during the handshake, if it wasn't
713 		 * then the underlying disk is reserved by another system
714 		 */
715 		if (port->vdisk_size == -1)
716 			return -ENODEV;
717 	} else {
718 		struct vio_disk_geom geom;
719 
720 		err = generic_request(port, VD_OP_GET_DISKGEOM,
721 				      &geom, sizeof(geom));
722 		if (err < 0) {
723 			printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
724 			       "error %d\n", err);
725 			return err;
726 		}
727 		port->vdisk_size = ((u64)geom.num_cyl *
728 				    (u64)geom.num_hd *
729 				    (u64)geom.num_sec);
730 	}
731 
732 	q = blk_init_queue(do_vdc_request, &port->vio.lock);
733 	if (!q) {
734 		printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
735 		       port->vio.name);
736 		return -ENOMEM;
737 	}
738 	g = alloc_disk(1 << PARTITION_SHIFT);
739 	if (!g) {
740 		printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
741 		       port->vio.name);
742 		blk_cleanup_queue(q);
743 		return -ENOMEM;
744 	}
745 
746 	port->disk = g;
747 
748 	/* Each segment in a request is up to an aligned page in size. */
749 	blk_queue_segment_boundary(q, PAGE_SIZE - 1);
750 	blk_queue_max_segment_size(q, PAGE_SIZE);
751 
752 	blk_queue_max_segments(q, port->ring_cookies);
753 	blk_queue_max_hw_sectors(q, port->max_xfer_size);
754 	g->major = vdc_major;
755 	g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
756 	strcpy(g->disk_name, port->disk_name);
757 
758 	g->fops = &vdc_fops;
759 	g->queue = q;
760 	g->private_data = port;
761 	g->driverfs_dev = &port->vio.vdev->dev;
762 
763 	set_capacity(g, port->vdisk_size);
764 
765 	if (vdc_version_supported(port, 1, 1)) {
766 		switch (port->vdisk_mtype) {
767 		case VD_MEDIA_TYPE_CD:
768 			pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
769 			g->flags |= GENHD_FL_CD;
770 			g->flags |= GENHD_FL_REMOVABLE;
771 			set_disk_ro(g, 1);
772 			break;
773 
774 		case VD_MEDIA_TYPE_DVD:
775 			pr_info(PFX "Virtual DVD %s\n", port->disk_name);
776 			g->flags |= GENHD_FL_CD;
777 			g->flags |= GENHD_FL_REMOVABLE;
778 			set_disk_ro(g, 1);
779 			break;
780 
781 		case VD_MEDIA_TYPE_FIXED:
782 			pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
783 			break;
784 		}
785 	}
786 
787 	pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
788 	       g->disk_name,
789 	       port->vdisk_size, (port->vdisk_size >> (20 - 9)),
790 	       port->vio.ver.major, port->vio.ver.minor);
791 
792 	add_disk(g);
793 
794 	return 0;
795 }
796 
797 static struct ldc_channel_config vdc_ldc_cfg = {
798 	.event		= vdc_event,
799 	.mtu		= 64,
800 	.mode		= LDC_MODE_UNRELIABLE,
801 };
802 
803 static struct vio_driver_ops vdc_vio_ops = {
804 	.send_attr		= vdc_send_attr,
805 	.handle_attr		= vdc_handle_attr,
806 	.handshake_complete	= vdc_handshake_complete,
807 };
808 
809 static void print_version(void)
810 {
811 	static int version_printed;
812 
813 	if (version_printed++ == 0)
814 		printk(KERN_INFO "%s", version);
815 }
816 
817 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
818 {
819 	struct mdesc_handle *hp;
820 	struct vdc_port *port;
821 	int err;
822 
823 	print_version();
824 
825 	hp = mdesc_grab();
826 
827 	err = -ENODEV;
828 	if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
829 		printk(KERN_ERR PFX "Port id [%llu] too large.\n",
830 		       vdev->dev_no);
831 		goto err_out_release_mdesc;
832 	}
833 
834 	port = kzalloc(sizeof(*port), GFP_KERNEL);
835 	err = -ENOMEM;
836 	if (!port) {
837 		printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
838 		goto err_out_release_mdesc;
839 	}
840 
841 	if (vdev->dev_no >= 26)
842 		snprintf(port->disk_name, sizeof(port->disk_name),
843 			 VDCBLK_NAME "%c%c",
844 			 'a' + ((int)vdev->dev_no / 26) - 1,
845 			 'a' + ((int)vdev->dev_no % 26));
846 	else
847 		snprintf(port->disk_name, sizeof(port->disk_name),
848 			 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
849 	port->vdisk_size = -1;
850 
851 	err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
852 			      vdc_versions, ARRAY_SIZE(vdc_versions),
853 			      &vdc_vio_ops, port->disk_name);
854 	if (err)
855 		goto err_out_free_port;
856 
857 	port->vdisk_block_size = 512;
858 	port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
859 	port->ring_cookies = ((port->max_xfer_size *
860 			       port->vdisk_block_size) / PAGE_SIZE) + 2;
861 
862 	err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
863 	if (err)
864 		goto err_out_free_port;
865 
866 	err = vdc_alloc_tx_ring(port);
867 	if (err)
868 		goto err_out_free_ldc;
869 
870 	err = probe_disk(port);
871 	if (err)
872 		goto err_out_free_tx_ring;
873 
874 	dev_set_drvdata(&vdev->dev, port);
875 
876 	mdesc_release(hp);
877 
878 	return 0;
879 
880 err_out_free_tx_ring:
881 	vdc_free_tx_ring(port);
882 
883 err_out_free_ldc:
884 	vio_ldc_free(&port->vio);
885 
886 err_out_free_port:
887 	kfree(port);
888 
889 err_out_release_mdesc:
890 	mdesc_release(hp);
891 	return err;
892 }
893 
894 static int vdc_port_remove(struct vio_dev *vdev)
895 {
896 	struct vdc_port *port = dev_get_drvdata(&vdev->dev);
897 
898 	if (port) {
899 		del_timer_sync(&port->vio.timer);
900 
901 		vdc_free_tx_ring(port);
902 		vio_ldc_free(&port->vio);
903 
904 		dev_set_drvdata(&vdev->dev, NULL);
905 
906 		kfree(port);
907 	}
908 	return 0;
909 }
910 
911 static const struct vio_device_id vdc_port_match[] = {
912 	{
913 		.type = "vdc-port",
914 	},
915 	{},
916 };
917 MODULE_DEVICE_TABLE(vio, vdc_port_match);
918 
919 static struct vio_driver vdc_port_driver = {
920 	.id_table	= vdc_port_match,
921 	.probe		= vdc_port_probe,
922 	.remove		= vdc_port_remove,
923 	.name		= "vdc_port",
924 };
925 
926 static int __init vdc_init(void)
927 {
928 	int err;
929 
930 	err = register_blkdev(0, VDCBLK_NAME);
931 	if (err < 0)
932 		goto out_err;
933 
934 	vdc_major = err;
935 
936 	err = vio_register_driver(&vdc_port_driver);
937 	if (err)
938 		goto out_unregister_blkdev;
939 
940 	return 0;
941 
942 out_unregister_blkdev:
943 	unregister_blkdev(vdc_major, VDCBLK_NAME);
944 	vdc_major = 0;
945 
946 out_err:
947 	return err;
948 }
949 
950 static void __exit vdc_exit(void)
951 {
952 	vio_unregister_driver(&vdc_port_driver);
953 	unregister_blkdev(vdc_major, VDCBLK_NAME);
954 }
955 
956 module_init(vdc_init);
957 module_exit(vdc_exit);
958