xref: /openbmc/linux/drivers/misc/ibmvmc.c (revision 93f5715e)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * IBM Power Systems Virtual Management Channel Support.
4  *
5  * Copyright (c) 2004, 2018 IBM Corp.
6  *   Dave Engebretsen engebret@us.ibm.com
7  *   Steven Royer seroyer@linux.vnet.ibm.com
8  *   Adam Reznechek adreznec@linux.vnet.ibm.com
9  *   Bryant G. Ly <bryantly@linux.vnet.ibm.com>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/major.h>
16 #include <linux/string.h>
17 #include <linux/fcntl.h>
18 #include <linux/slab.h>
19 #include <linux/poll.h>
20 #include <linux/init.h>
21 #include <linux/fs.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/percpu.h>
25 #include <linux/delay.h>
26 #include <linux/uaccess.h>
27 #include <linux/io.h>
28 #include <linux/miscdevice.h>
29 #include <linux/sched/signal.h>
30 
31 #include <asm/byteorder.h>
32 #include <asm/irq.h>
33 #include <asm/vio.h>
34 
35 #include "ibmvmc.h"
36 
37 #define IBMVMC_DRIVER_VERSION "1.0"
38 
39 /*
40  * Static global variables
41  */
42 static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
43 
44 static const char ibmvmc_driver_name[] = "ibmvmc";
45 
46 static struct ibmvmc_struct ibmvmc;
47 static struct ibmvmc_hmc hmcs[MAX_HMCS];
48 static struct crq_server_adapter ibmvmc_adapter;
49 
50 static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
51 static int ibmvmc_max_hmcs = DEFAULT_HMCS;
52 static int ibmvmc_max_mtu = DEFAULT_MTU;
53 
54 static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
55 			       u64 dliobn, u64 dlioba)
56 {
57 	long rc = 0;
58 
59 	/* Ensure all writes to source memory are visible before hcall */
60 	dma_wmb();
61 	pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
62 		 length, sliobn, slioba, dliobn, dlioba);
63 	rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
64 				dliobn, dlioba);
65 	pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
66 
67 	return rc;
68 }
69 
70 static inline void h_free_crq(uint32_t unit_address)
71 {
72 	long rc = 0;
73 
74 	do {
75 		if (H_IS_LONG_BUSY(rc))
76 			msleep(get_longbusy_msecs(rc));
77 
78 		rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
79 	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
80 }
81 
82 /**
83  * h_request_vmc: - request a hypervisor virtual management channel device
84  * @vmc_index: drc index of the vmc device created
85  *
86  * Requests the hypervisor create a new virtual management channel device,
87  * allowing this partition to send hypervisor virtualization control
88  * commands.
89  *
90  * Return:
91  *	0 - Success
92  *	Non-zero - Failure
93  */
94 static inline long h_request_vmc(u32 *vmc_index)
95 {
96 	long rc = 0;
97 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
98 
99 	do {
100 		if (H_IS_LONG_BUSY(rc))
101 			msleep(get_longbusy_msecs(rc));
102 
103 		/* Call to request the VMC device from phyp */
104 		rc = plpar_hcall(H_REQUEST_VMC, retbuf);
105 		pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
106 		*vmc_index = retbuf[0];
107 	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
108 
109 	return rc;
110 }
111 
112 /* routines for managing a command/response queue */
113 /**
114  * ibmvmc_handle_event: - Interrupt handler for crq events
115  * @irq:        number of irq to handle, not used
116  * @dev_instance: crq_server_adapter that received interrupt
117  *
118  * Disables interrupts and schedules ibmvmc_task
119  *
120  * Always returns IRQ_HANDLED
121  */
122 static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
123 {
124 	struct crq_server_adapter *adapter =
125 		(struct crq_server_adapter *)dev_instance;
126 
127 	vio_disable_interrupts(to_vio_dev(adapter->dev));
128 	tasklet_schedule(&adapter->work_task);
129 
130 	return IRQ_HANDLED;
131 }
132 
133 /**
134  * ibmvmc_release_crq_queue - Release CRQ Queue
135  *
136  * @adapter:	crq_server_adapter struct
137  *
138  * Return:
139  *	0 - Success
140  *	Non-Zero - Failure
141  */
142 static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
143 {
144 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
145 	struct crq_queue *queue = &adapter->queue;
146 
147 	free_irq(vdev->irq, (void *)adapter);
148 	tasklet_kill(&adapter->work_task);
149 
150 	if (adapter->reset_task)
151 		kthread_stop(adapter->reset_task);
152 
153 	h_free_crq(vdev->unit_address);
154 	dma_unmap_single(adapter->dev,
155 			 queue->msg_token,
156 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
157 	free_page((unsigned long)queue->msgs);
158 }
159 
160 /**
161  * ibmvmc_reset_crq_queue - Reset CRQ Queue
162  *
163  * @adapter:	crq_server_adapter struct
164  *
165  * This function calls h_free_crq and then calls H_REG_CRQ and does all the
166  * bookkeeping to get us back to where we can communicate.
167  *
168  * Return:
169  *	0 - Success
170  *	Non-Zero - Failure
171  */
172 static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
173 {
174 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
175 	struct crq_queue *queue = &adapter->queue;
176 	int rc = 0;
177 
178 	/* Close the CRQ */
179 	h_free_crq(vdev->unit_address);
180 
181 	/* Clean out the queue */
182 	memset(queue->msgs, 0x00, PAGE_SIZE);
183 	queue->cur = 0;
184 
185 	/* And re-open it again */
186 	rc = plpar_hcall_norets(H_REG_CRQ,
187 				vdev->unit_address,
188 				queue->msg_token, PAGE_SIZE);
189 	if (rc == 2)
190 		/* Adapter is good, but other end is not ready */
191 		dev_warn(adapter->dev, "Partner adapter not ready\n");
192 	else if (rc != 0)
193 		dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
194 
195 	return rc;
196 }
197 
198 /**
199  * crq_queue_next_crq: - Returns the next entry in message queue
200  * @queue:      crq_queue to use
201  *
202  * Returns pointer to next entry in queue, or NULL if there are no new
203  * entried in the CRQ.
204  */
205 static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
206 {
207 	struct ibmvmc_crq_msg *crq;
208 	unsigned long flags;
209 
210 	spin_lock_irqsave(&queue->lock, flags);
211 	crq = &queue->msgs[queue->cur];
212 	if (crq->valid & 0x80) {
213 		if (++queue->cur == queue->size)
214 			queue->cur = 0;
215 
216 		/* Ensure the read of the valid bit occurs before reading any
217 		 * other bits of the CRQ entry
218 		 */
219 		dma_rmb();
220 	} else {
221 		crq = NULL;
222 	}
223 
224 	spin_unlock_irqrestore(&queue->lock, flags);
225 
226 	return crq;
227 }
228 
229 /**
230  * ibmvmc_send_crq - Send CRQ
231  *
232  * @adapter:	crq_server_adapter struct
233  * @word1:	Word1 Data field
234  * @word2:	Word2 Data field
235  *
236  * Return:
237  *	0 - Success
238  *	Non-Zero - Failure
239  */
240 static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
241 			    u64 word1, u64 word2)
242 {
243 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
244 	long rc = 0;
245 
246 	dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
247 		vdev->unit_address, word1, word2);
248 
249 	/*
250 	 * Ensure the command buffer is flushed to memory before handing it
251 	 * over to the other side to prevent it from fetching any stale data.
252 	 */
253 	dma_wmb();
254 	rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
255 	dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
256 
257 	return rc;
258 }
259 
260 /**
261  * alloc_dma_buffer - Create DMA Buffer
262  *
263  * @vdev:	vio_dev struct
264  * @size:	Size field
265  * @dma_handle:	DMA address field
266  *
267  * Allocates memory for the command queue and maps remote memory into an
268  * ioba.
269  *
270  * Returns a pointer to the buffer
271  */
272 static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
273 			      dma_addr_t *dma_handle)
274 {
275 	/* allocate memory */
276 	void *buffer = kzalloc(size, GFP_ATOMIC);
277 
278 	if (!buffer) {
279 		*dma_handle = 0;
280 		return NULL;
281 	}
282 
283 	/* DMA map */
284 	*dma_handle = dma_map_single(&vdev->dev, buffer, size,
285 				     DMA_BIDIRECTIONAL);
286 
287 	if (dma_mapping_error(&vdev->dev, *dma_handle)) {
288 		*dma_handle = 0;
289 		kzfree(buffer);
290 		return NULL;
291 	}
292 
293 	return buffer;
294 }
295 
296 /**
297  * free_dma_buffer - Free DMA Buffer
298  *
299  * @vdev:	vio_dev struct
300  * @size:	Size field
301  * @vaddr:	Address field
302  * @dma_handle:	DMA address field
303  *
304  * Releases memory for a command queue and unmaps mapped remote memory.
305  */
306 static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
307 			    dma_addr_t dma_handle)
308 {
309 	/* DMA unmap */
310 	dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
311 
312 	/* deallocate memory */
313 	kzfree(vaddr);
314 }
315 
316 /**
317  * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer
318  *
319  * @hmc_index:	HMC Index Field
320  *
321  * Return:
322  *	Pointer to ibmvmc_buffer
323  */
324 static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
325 {
326 	struct ibmvmc_buffer *buffer;
327 	struct ibmvmc_buffer *ret_buf = NULL;
328 	unsigned long i;
329 
330 	if (hmc_index > ibmvmc.max_hmc_index)
331 		return NULL;
332 
333 	buffer = hmcs[hmc_index].buffer;
334 
335 	for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
336 		if (buffer[i].valid && buffer[i].free &&
337 		    buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
338 			buffer[i].free = 0;
339 			ret_buf = &buffer[i];
340 			break;
341 		}
342 	}
343 
344 	return ret_buf;
345 }
346 
347 /**
348  * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer
349  *
350  * @adapter:	crq_server_adapter struct
351  * @hmc_index:	Hmc Index field
352  *
353  * Return:
354  *	Pointer to ibmvmc_buffer
355  */
356 static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
357 							u8 hmc_index)
358 {
359 	struct ibmvmc_buffer *buffer;
360 	struct ibmvmc_buffer *ret_buf = NULL;
361 	unsigned long i;
362 
363 	if (hmc_index > ibmvmc.max_hmc_index) {
364 		dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
365 			 hmc_index);
366 		return NULL;
367 	}
368 
369 	buffer = hmcs[hmc_index].buffer;
370 
371 	for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
372 		if (buffer[i].free &&
373 		    buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
374 			buffer[i].free = 0;
375 			ret_buf = &buffer[i];
376 			break;
377 		}
378 	}
379 
380 	return ret_buf;
381 }
382 
383 /**
384  * ibmvmc_free_hmc_buffer - Free an HMC Buffer
385  *
386  * @hmc:	ibmvmc_hmc struct
387  * @buffer:	ibmvmc_buffer struct
388  *
389  */
390 static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
391 				   struct ibmvmc_buffer *buffer)
392 {
393 	unsigned long flags;
394 
395 	spin_lock_irqsave(&hmc->lock, flags);
396 	buffer->free = 1;
397 	spin_unlock_irqrestore(&hmc->lock, flags);
398 }
399 
400 /**
401  * ibmvmc_count_hmc_buffers - Count HMC Buffers
402  *
403  * @hmc_index:	HMC Index field
404  * @valid:	Valid number of buffers field
405  * @free:	Free number of buffers field
406  *
407  */
408 static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
409 				     unsigned int *free)
410 {
411 	struct ibmvmc_buffer *buffer;
412 	unsigned long i;
413 	unsigned long flags;
414 
415 	if (hmc_index > ibmvmc.max_hmc_index)
416 		return;
417 
418 	if (!valid || !free)
419 		return;
420 
421 	*valid = 0; *free = 0;
422 
423 	buffer = hmcs[hmc_index].buffer;
424 	spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
425 
426 	for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
427 		if (buffer[i].valid) {
428 			*valid = *valid + 1;
429 			if (buffer[i].free)
430 				*free = *free + 1;
431 		}
432 	}
433 
434 	spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
435 }
436 
437 /**
438  * ibmvmc_get_free_hmc - Get Free HMC
439  *
440  * Return:
441  *	Pointer to an available HMC Connection
442  *	Null otherwise
443  */
444 static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
445 {
446 	unsigned long i;
447 	unsigned long flags;
448 
449 	/*
450 	 * Find an available HMC connection.
451 	 */
452 	for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
453 		spin_lock_irqsave(&hmcs[i].lock, flags);
454 		if (hmcs[i].state == ibmhmc_state_free) {
455 			hmcs[i].index = i;
456 			hmcs[i].state = ibmhmc_state_initial;
457 			spin_unlock_irqrestore(&hmcs[i].lock, flags);
458 			return &hmcs[i];
459 		}
460 		spin_unlock_irqrestore(&hmcs[i].lock, flags);
461 	}
462 
463 	return NULL;
464 }
465 
466 /**
467  * ibmvmc_return_hmc - Return an HMC Connection
468  *
469  * @hmc:		ibmvmc_hmc struct
470  * @release_readers:	Number of readers connected to session
471  *
472  * This function releases the HMC connections back into the pool.
473  *
474  * Return:
475  *	0 - Success
476  *	Non-zero - Failure
477  */
478 static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
479 {
480 	struct ibmvmc_buffer *buffer;
481 	struct crq_server_adapter *adapter;
482 	struct vio_dev *vdev;
483 	unsigned long i;
484 	unsigned long flags;
485 
486 	if (!hmc || !hmc->adapter)
487 		return -EIO;
488 
489 	if (release_readers) {
490 		if (hmc->file_session) {
491 			struct ibmvmc_file_session *session = hmc->file_session;
492 
493 			session->valid = 0;
494 			wake_up_interruptible(&ibmvmc_read_wait);
495 		}
496 	}
497 
498 	adapter = hmc->adapter;
499 	vdev = to_vio_dev(adapter->dev);
500 
501 	spin_lock_irqsave(&hmc->lock, flags);
502 	hmc->index = 0;
503 	hmc->state = ibmhmc_state_free;
504 	hmc->queue_head = 0;
505 	hmc->queue_tail = 0;
506 	buffer = hmc->buffer;
507 	for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
508 		if (buffer[i].valid) {
509 			free_dma_buffer(vdev,
510 					ibmvmc.max_mtu,
511 					buffer[i].real_addr_local,
512 					buffer[i].dma_addr_local);
513 			dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
514 		}
515 		memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
516 
517 		hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
518 	}
519 
520 	spin_unlock_irqrestore(&hmc->lock, flags);
521 
522 	return 0;
523 }
524 
525 /**
526  * ibmvmc_send_open - Interface Open
527  * @buffer: Pointer to ibmvmc_buffer struct
528  * @hmc: Pointer to ibmvmc_hmc struct
529  *
530  * This command is sent by the management partition as the result of a
531  * management partition device request. It causes the hypervisor to
532  * prepare a set of data buffers for the management application connection
533  * indicated HMC idx. A unique HMC Idx would be used if multiple management
534  * applications running concurrently were desired. Before responding to this
535  * command, the hypervisor must provide the management partition with at
536  * least one of these new buffers via the Add Buffer. This indicates whether
537  * the messages are inbound or outbound from the hypervisor.
538  *
539  * Return:
540  *	0 - Success
541  *	Non-zero - Failure
542  */
543 static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
544 			    struct ibmvmc_hmc *hmc)
545 {
546 	struct ibmvmc_crq_msg crq_msg;
547 	struct crq_server_adapter *adapter;
548 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
549 	int rc = 0;
550 
551 	if (!hmc || !hmc->adapter)
552 		return -EIO;
553 
554 	adapter = hmc->adapter;
555 
556 	dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
557 		(unsigned long)buffer->size, (unsigned long)adapter->liobn,
558 		(unsigned long)buffer->dma_addr_local,
559 		(unsigned long)adapter->riobn,
560 		(unsigned long)buffer->dma_addr_remote);
561 
562 	rc = h_copy_rdma(buffer->size,
563 			 adapter->liobn,
564 			 buffer->dma_addr_local,
565 			 adapter->riobn,
566 			 buffer->dma_addr_remote);
567 	if (rc) {
568 		dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
569 			rc);
570 		return -EIO;
571 	}
572 
573 	hmc->state = ibmhmc_state_opening;
574 
575 	crq_msg.valid = 0x80;
576 	crq_msg.type = VMC_MSG_OPEN;
577 	crq_msg.status = 0;
578 	crq_msg.var1.rsvd = 0;
579 	crq_msg.hmc_session = hmc->session;
580 	crq_msg.hmc_index = hmc->index;
581 	crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
582 	crq_msg.rsvd = 0;
583 	crq_msg.var3.rsvd = 0;
584 
585 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
586 			be64_to_cpu(crq_as_u64[1]));
587 
588 	return rc;
589 }
590 
591 /**
592  * ibmvmc_send_close - Interface Close
593  * @hmc: Pointer to ibmvmc_hmc struct
594  *
595  * This command is sent by the management partition to terminate a
596  * management application to hypervisor connection. When this command is
597  * sent, the management partition has quiesced all I/O operations to all
598  * buffers associated with this management application connection, and
599  * has freed any storage for these buffers.
600  *
601  * Return:
602  *	0 - Success
603  *	Non-zero - Failure
604  */
605 static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
606 {
607 	struct ibmvmc_crq_msg crq_msg;
608 	struct crq_server_adapter *adapter;
609 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
610 	int rc = 0;
611 
612 	if (!hmc || !hmc->adapter)
613 		return -EIO;
614 
615 	adapter = hmc->adapter;
616 
617 	dev_info(adapter->dev, "CRQ send: close\n");
618 
619 	crq_msg.valid = 0x80;
620 	crq_msg.type = VMC_MSG_CLOSE;
621 	crq_msg.status = 0;
622 	crq_msg.var1.rsvd = 0;
623 	crq_msg.hmc_session = hmc->session;
624 	crq_msg.hmc_index = hmc->index;
625 	crq_msg.var2.rsvd = 0;
626 	crq_msg.rsvd = 0;
627 	crq_msg.var3.rsvd = 0;
628 
629 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
630 			be64_to_cpu(crq_as_u64[1]));
631 
632 	return rc;
633 }
634 
635 /**
636  * ibmvmc_send_capabilities - Send VMC Capabilities
637  *
638  * @adapter:	crq_server_adapter struct
639  *
640  * The capabilities message is an administrative message sent after the CRQ
641  * initialization sequence of messages and is used to exchange VMC capabilities
642  * between the management partition and the hypervisor. The management
643  * partition must send this message and the hypervisor must respond with VMC
644  * capabilities Response message before HMC interface message can begin. Any
645  * HMC interface messages received before the exchange of capabilities has
646  * complete are dropped.
647  *
648  * Return:
649  *	0 - Success
650  */
651 static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
652 {
653 	struct ibmvmc_admin_crq_msg crq_msg;
654 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
655 
656 	dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
657 	crq_msg.valid = 0x80;
658 	crq_msg.type = VMC_MSG_CAP;
659 	crq_msg.status = 0;
660 	crq_msg.rsvd[0] = 0;
661 	crq_msg.rsvd[1] = 0;
662 	crq_msg.max_hmc = ibmvmc_max_hmcs;
663 	crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
664 	crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
665 	crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
666 	crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
667 
668 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
669 			be64_to_cpu(crq_as_u64[1]));
670 
671 	ibmvmc.state = ibmvmc_state_capabilities;
672 
673 	return 0;
674 }
675 
676 /**
677  * ibmvmc_send_add_buffer_resp - Add Buffer Response
678  *
679  * @adapter:	crq_server_adapter struct
680  * @status:	Status field
681  * @hmc_session: HMC Session field
682  * @hmc_index:	HMC Index field
683  * @buffer_id:	Buffer Id field
684  *
685  * This command is sent by the management partition to the hypervisor in
686  * response to the Add Buffer message. The Status field indicates the result of
687  * the command.
688  *
689  * Return:
690  *	0 - Success
691  */
692 static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
693 				       u8 status, u8 hmc_session,
694 				       u8 hmc_index, u16 buffer_id)
695 {
696 	struct ibmvmc_crq_msg crq_msg;
697 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
698 
699 	dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
700 	crq_msg.valid = 0x80;
701 	crq_msg.type = VMC_MSG_ADD_BUF_RESP;
702 	crq_msg.status = status;
703 	crq_msg.var1.rsvd = 0;
704 	crq_msg.hmc_session = hmc_session;
705 	crq_msg.hmc_index = hmc_index;
706 	crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
707 	crq_msg.rsvd = 0;
708 	crq_msg.var3.rsvd = 0;
709 
710 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
711 			be64_to_cpu(crq_as_u64[1]));
712 
713 	return 0;
714 }
715 
716 /**
717  * ibmvmc_send_rem_buffer_resp - Remove Buffer Response
718  *
719  * @adapter:	crq_server_adapter struct
720  * @status:	Status field
721  * @hmc_session: HMC Session field
722  * @hmc_index:	HMC Index field
723  * @buffer_id:	Buffer Id field
724  *
725  * This command is sent by the management partition to the hypervisor in
726  * response to the Remove Buffer message. The Buffer ID field indicates
727  * which buffer the management partition selected to remove. The Status
728  * field indicates the result of the command.
729  *
730  * Return:
731  *	0 - Success
732  */
733 static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
734 				       u8 status, u8 hmc_session,
735 				       u8 hmc_index, u16 buffer_id)
736 {
737 	struct ibmvmc_crq_msg crq_msg;
738 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
739 
740 	dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
741 	crq_msg.valid = 0x80;
742 	crq_msg.type = VMC_MSG_REM_BUF_RESP;
743 	crq_msg.status = status;
744 	crq_msg.var1.rsvd = 0;
745 	crq_msg.hmc_session = hmc_session;
746 	crq_msg.hmc_index = hmc_index;
747 	crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
748 	crq_msg.rsvd = 0;
749 	crq_msg.var3.rsvd = 0;
750 
751 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
752 			be64_to_cpu(crq_as_u64[1]));
753 
754 	return 0;
755 }
756 
757 /**
758  * ibmvmc_send_msg - Signal Message
759  *
760  * @adapter:	crq_server_adapter struct
761  * @buffer:	ibmvmc_buffer struct
762  * @hmc:	ibmvmc_hmc struct
763  * @msg_length:	message length field
764  *
765  * This command is sent between the management partition and the hypervisor
766  * in order to signal the arrival of an HMC protocol message. The command
767  * can be sent by both the management partition and the hypervisor. It is
768  * used for all traffic between the management application and the hypervisor,
769  * regardless of who initiated the communication.
770  *
771  * There is no response to this message.
772  *
773  * Return:
774  *	0 - Success
775  *	Non-zero - Failure
776  */
777 static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
778 			   struct ibmvmc_buffer *buffer,
779 			   struct ibmvmc_hmc *hmc, int msg_len)
780 {
781 	struct ibmvmc_crq_msg crq_msg;
782 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
783 	int rc = 0;
784 
785 	dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
786 	rc = h_copy_rdma(msg_len,
787 			 adapter->liobn,
788 			 buffer->dma_addr_local,
789 			 adapter->riobn,
790 			 buffer->dma_addr_remote);
791 	if (rc) {
792 		dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
793 			rc);
794 		return rc;
795 	}
796 
797 	crq_msg.valid = 0x80;
798 	crq_msg.type = VMC_MSG_SIGNAL;
799 	crq_msg.status = 0;
800 	crq_msg.var1.rsvd = 0;
801 	crq_msg.hmc_session = hmc->session;
802 	crq_msg.hmc_index = hmc->index;
803 	crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
804 	crq_msg.var3.msg_len = cpu_to_be32(msg_len);
805 	dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
806 		be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
807 
808 	buffer->owner = VMC_BUF_OWNER_HV;
809 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
810 			be64_to_cpu(crq_as_u64[1]));
811 
812 	return rc;
813 }
814 
815 /**
816  * ibmvmc_open - Open Session
817  *
818  * @inode:	inode struct
819  * @file:	file struct
820  *
821  * Return:
822  *	0 - Success
823  */
824 static int ibmvmc_open(struct inode *inode, struct file *file)
825 {
826 	struct ibmvmc_file_session *session;
827 	int rc = 0;
828 
829 	pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
830 		 (unsigned long)inode, (unsigned long)file,
831 		 ibmvmc.state);
832 
833 	session = kzalloc(sizeof(*session), GFP_KERNEL);
834 	session->file = file;
835 	file->private_data = session;
836 
837 	return rc;
838 }
839 
840 /**
841  * ibmvmc_close - Close Session
842  *
843  * @inode:	inode struct
844  * @file:	file struct
845  *
846  * Return:
847  *	0 - Success
848  *	Non-zero - Failure
849  */
850 static int ibmvmc_close(struct inode *inode, struct file *file)
851 {
852 	struct ibmvmc_file_session *session;
853 	struct ibmvmc_hmc *hmc;
854 	int rc = 0;
855 	unsigned long flags;
856 
857 	pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
858 		 (unsigned long)file, ibmvmc.state);
859 
860 	session = file->private_data;
861 	if (!session)
862 		return -EIO;
863 
864 	hmc = session->hmc;
865 	if (hmc) {
866 		if (!hmc->adapter)
867 			return -EIO;
868 
869 		if (ibmvmc.state == ibmvmc_state_failed) {
870 			dev_warn(hmc->adapter->dev, "close: state_failed\n");
871 			return -EIO;
872 		}
873 
874 		spin_lock_irqsave(&hmc->lock, flags);
875 		if (hmc->state >= ibmhmc_state_opening) {
876 			rc = ibmvmc_send_close(hmc);
877 			if (rc)
878 				dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
879 		}
880 		spin_unlock_irqrestore(&hmc->lock, flags);
881 	}
882 
883 	kzfree(session);
884 
885 	return rc;
886 }
887 
888 /**
889  * ibmvmc_read - Read
890  *
891  * @file:	file struct
892  * @buf:	Character buffer
893  * @nbytes:	Size in bytes
894  * @ppos:	Offset
895  *
896  * Return:
897  *	0 - Success
898  *	Non-zero - Failure
899  */
900 static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
901 			   loff_t *ppos)
902 {
903 	struct ibmvmc_file_session *session;
904 	struct ibmvmc_hmc *hmc;
905 	struct crq_server_adapter *adapter;
906 	struct ibmvmc_buffer *buffer;
907 	ssize_t n;
908 	ssize_t retval = 0;
909 	unsigned long flags;
910 	DEFINE_WAIT(wait);
911 
912 	pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
913 		 (unsigned long)file, (unsigned long)buf,
914 		 (unsigned long)nbytes);
915 
916 	if (nbytes == 0)
917 		return 0;
918 
919 	if (nbytes > ibmvmc.max_mtu) {
920 		pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
921 			(unsigned int)nbytes);
922 		return -EINVAL;
923 	}
924 
925 	session = file->private_data;
926 	if (!session) {
927 		pr_warn("ibmvmc: read: no session\n");
928 		return -EIO;
929 	}
930 
931 	hmc = session->hmc;
932 	if (!hmc) {
933 		pr_warn("ibmvmc: read: no hmc\n");
934 		return -EIO;
935 	}
936 
937 	adapter = hmc->adapter;
938 	if (!adapter) {
939 		pr_warn("ibmvmc: read: no adapter\n");
940 		return -EIO;
941 	}
942 
943 	do {
944 		prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
945 
946 		spin_lock_irqsave(&hmc->lock, flags);
947 		if (hmc->queue_tail != hmc->queue_head)
948 			/* Data is available */
949 			break;
950 
951 		spin_unlock_irqrestore(&hmc->lock, flags);
952 
953 		if (!session->valid) {
954 			retval = -EBADFD;
955 			goto out;
956 		}
957 		if (file->f_flags & O_NONBLOCK) {
958 			retval = -EAGAIN;
959 			goto out;
960 		}
961 
962 		schedule();
963 
964 		if (signal_pending(current)) {
965 			retval = -ERESTARTSYS;
966 			goto out;
967 		}
968 	} while (1);
969 
970 	buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
971 	hmc->queue_tail++;
972 	if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
973 		hmc->queue_tail = 0;
974 	spin_unlock_irqrestore(&hmc->lock, flags);
975 
976 	nbytes = min_t(size_t, nbytes, buffer->msg_len);
977 	n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
978 	dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
979 	ibmvmc_free_hmc_buffer(hmc, buffer);
980 	retval = nbytes;
981 
982 	if (n) {
983 		dev_warn(adapter->dev, "read: copy to user failed.\n");
984 		retval = -EFAULT;
985 	}
986 
987  out:
988 	finish_wait(&ibmvmc_read_wait, &wait);
989 	dev_dbg(adapter->dev, "read: out %ld\n", retval);
990 	return retval;
991 }
992 
993 /**
994  * ibmvmc_poll - Poll
995  *
996  * @file:	file struct
997  * @wait:	Poll Table
998  *
999  * Return:
1000  *	poll.h return values
1001  */
1002 static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
1003 {
1004 	struct ibmvmc_file_session *session;
1005 	struct ibmvmc_hmc *hmc;
1006 	unsigned int mask = 0;
1007 
1008 	session = file->private_data;
1009 	if (!session)
1010 		return 0;
1011 
1012 	hmc = session->hmc;
1013 	if (!hmc)
1014 		return 0;
1015 
1016 	poll_wait(file, &ibmvmc_read_wait, wait);
1017 
1018 	if (hmc->queue_head != hmc->queue_tail)
1019 		mask |= POLLIN | POLLRDNORM;
1020 
1021 	return mask;
1022 }
1023 
1024 /**
1025  * ibmvmc_write - Write
1026  *
1027  * @file:	file struct
1028  * @buf:	Character buffer
1029  * @count:	Count field
1030  * @ppos:	Offset
1031  *
1032  * Return:
1033  *	0 - Success
1034  *	Non-zero - Failure
1035  */
1036 static ssize_t ibmvmc_write(struct file *file, const char *buffer,
1037 			    size_t count, loff_t *ppos)
1038 {
1039 	struct ibmvmc_buffer *vmc_buffer;
1040 	struct ibmvmc_file_session *session;
1041 	struct crq_server_adapter *adapter;
1042 	struct ibmvmc_hmc *hmc;
1043 	unsigned char *buf;
1044 	unsigned long flags;
1045 	size_t bytes;
1046 	const char *p = buffer;
1047 	size_t c = count;
1048 	int ret = 0;
1049 
1050 	session = file->private_data;
1051 	if (!session)
1052 		return -EIO;
1053 
1054 	hmc = session->hmc;
1055 	if (!hmc)
1056 		return -EIO;
1057 
1058 	spin_lock_irqsave(&hmc->lock, flags);
1059 	if (hmc->state == ibmhmc_state_free) {
1060 		/* HMC connection is not valid (possibly was reset under us). */
1061 		ret = -EIO;
1062 		goto out;
1063 	}
1064 
1065 	adapter = hmc->adapter;
1066 	if (!adapter) {
1067 		ret = -EIO;
1068 		goto out;
1069 	}
1070 
1071 	if (count > ibmvmc.max_mtu) {
1072 		dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
1073 			 (unsigned long)count);
1074 		ret = -EIO;
1075 		goto out;
1076 	}
1077 
1078 	/* Waiting for the open resp message to the ioctl(1) - retry */
1079 	if (hmc->state == ibmhmc_state_opening) {
1080 		ret = -EBUSY;
1081 		goto out;
1082 	}
1083 
1084 	/* Make sure the ioctl() was called & the open msg sent, and that
1085 	 * the HMC connection has not failed.
1086 	 */
1087 	if (hmc->state != ibmhmc_state_ready) {
1088 		ret = -EIO;
1089 		goto out;
1090 	}
1091 
1092 	vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1093 	if (!vmc_buffer) {
1094 		/* No buffer available for the msg send, or we have not yet
1095 		 * completed the open/open_resp sequence.  Retry until this is
1096 		 * complete.
1097 		 */
1098 		ret = -EBUSY;
1099 		goto out;
1100 	}
1101 	if (!vmc_buffer->real_addr_local) {
1102 		dev_err(adapter->dev, "no buffer storage assigned\n");
1103 		ret = -EIO;
1104 		goto out;
1105 	}
1106 	buf = vmc_buffer->real_addr_local;
1107 
1108 	while (c > 0) {
1109 		bytes = min_t(size_t, c, vmc_buffer->size);
1110 
1111 		bytes -= copy_from_user(buf, p, bytes);
1112 		if (!bytes) {
1113 			ret = -EFAULT;
1114 			goto out;
1115 		}
1116 		c -= bytes;
1117 		p += bytes;
1118 	}
1119 	if (p == buffer)
1120 		goto out;
1121 
1122 	file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
1123 	mark_inode_dirty(file->f_path.dentry->d_inode);
1124 
1125 	dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
1126 		(unsigned long)file, (unsigned long)count);
1127 
1128 	ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
1129 	ret = p - buffer;
1130  out:
1131 	spin_unlock_irqrestore(&hmc->lock, flags);
1132 	return (ssize_t)(ret);
1133 }
1134 
1135 /**
1136  * ibmvmc_setup_hmc - Setup the HMC
1137  *
1138  * @session:	ibmvmc_file_session struct
1139  *
1140  * Return:
1141  *	0 - Success
1142  *	Non-zero - Failure
1143  */
1144 static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
1145 {
1146 	struct ibmvmc_hmc *hmc;
1147 	unsigned int valid, free, index;
1148 
1149 	if (ibmvmc.state == ibmvmc_state_failed) {
1150 		pr_warn("ibmvmc: Reserve HMC: state_failed\n");
1151 		return -EIO;
1152 	}
1153 
1154 	if (ibmvmc.state < ibmvmc_state_ready) {
1155 		pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
1156 		return -EAGAIN;
1157 	}
1158 
1159 	/* Device is busy until capabilities have been exchanged and we
1160 	 * have a generic buffer for each possible HMC connection.
1161 	 */
1162 	for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
1163 		valid = 0;
1164 		ibmvmc_count_hmc_buffers(index, &valid, &free);
1165 		if (valid == 0) {
1166 			pr_warn("ibmvmc: buffers not ready for index %d\n",
1167 				index);
1168 			return -ENOBUFS;
1169 		}
1170 	}
1171 
1172 	/* Get an hmc object, and transition to ibmhmc_state_initial */
1173 	hmc = ibmvmc_get_free_hmc();
1174 	if (!hmc) {
1175 		pr_warn("%s: free hmc not found\n", __func__);
1176 		return -EBUSY;
1177 	}
1178 
1179 	hmc->session = hmc->session + 1;
1180 	if (hmc->session == 0xff)
1181 		hmc->session = 1;
1182 
1183 	session->hmc = hmc;
1184 	hmc->adapter = &ibmvmc_adapter;
1185 	hmc->file_session = session;
1186 	session->valid = 1;
1187 
1188 	return 0;
1189 }
1190 
1191 /**
1192  * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID
1193  *
1194  * @session:	ibmvmc_file_session struct
1195  * @new_hmc_id:	HMC id field
1196  *
1197  * IOCTL command to setup the hmc id
1198  *
1199  * Return:
1200  *	0 - Success
1201  *	Non-zero - Failure
1202  */
1203 static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
1204 				  unsigned char __user *new_hmc_id)
1205 {
1206 	struct ibmvmc_hmc *hmc;
1207 	struct ibmvmc_buffer *buffer;
1208 	size_t bytes;
1209 	char print_buffer[HMC_ID_LEN + 1];
1210 	unsigned long flags;
1211 	long rc = 0;
1212 
1213 	/* Reserve HMC session */
1214 	hmc = session->hmc;
1215 	if (!hmc) {
1216 		rc = ibmvmc_setup_hmc(session);
1217 		if (rc)
1218 			return rc;
1219 
1220 		hmc = session->hmc;
1221 		if (!hmc) {
1222 			pr_err("ibmvmc: setup_hmc success but no hmc\n");
1223 			return -EIO;
1224 		}
1225 	}
1226 
1227 	if (hmc->state != ibmhmc_state_initial) {
1228 		pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
1229 			hmc->state);
1230 		return -EIO;
1231 	}
1232 
1233 	bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
1234 	if (bytes)
1235 		return -EFAULT;
1236 
1237 	/* Send Open Session command */
1238 	spin_lock_irqsave(&hmc->lock, flags);
1239 	buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1240 	spin_unlock_irqrestore(&hmc->lock, flags);
1241 
1242 	if (!buffer || !buffer->real_addr_local) {
1243 		pr_warn("ibmvmc: sethmcid: no buffer available\n");
1244 		return -EIO;
1245 	}
1246 
1247 	/* Make sure buffer is NULL terminated before trying to print it */
1248 	memset(print_buffer, 0, HMC_ID_LEN + 1);
1249 	strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
1250 	pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
1251 
1252 	memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
1253 	/* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
1254 	rc = ibmvmc_send_open(buffer, hmc);
1255 
1256 	return rc;
1257 }
1258 
1259 /**
1260  * ibmvmc_ioctl_query - IOCTL Query
1261  *
1262  * @session:	ibmvmc_file_session struct
1263  * @ret_struct:	ibmvmc_query_struct
1264  *
1265  * Return:
1266  *	0 - Success
1267  *	Non-zero - Failure
1268  */
1269 static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
1270 			       struct ibmvmc_query_struct __user *ret_struct)
1271 {
1272 	struct ibmvmc_query_struct query_struct;
1273 	size_t bytes;
1274 
1275 	memset(&query_struct, 0, sizeof(query_struct));
1276 	query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
1277 	query_struct.state = ibmvmc.state;
1278 	query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
1279 
1280 	bytes = copy_to_user(ret_struct, &query_struct,
1281 			     sizeof(query_struct));
1282 	if (bytes)
1283 		return -EFAULT;
1284 
1285 	return 0;
1286 }
1287 
1288 /**
1289  * ibmvmc_ioctl_requestvmc - IOCTL Request VMC
1290  *
1291  * @session:	ibmvmc_file_session struct
1292  * @ret_vmc_index:	VMC Index
1293  *
1294  * Return:
1295  *	0 - Success
1296  *	Non-zero - Failure
1297  */
1298 static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
1299 				    u32 __user *ret_vmc_index)
1300 {
1301 	/* TODO: (adreznec) Add locking to control multiple process access */
1302 	size_t bytes;
1303 	long rc;
1304 	u32 vmc_drc_index;
1305 
1306 	/* Call to request the VMC device from phyp*/
1307 	rc = h_request_vmc(&vmc_drc_index);
1308 	pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
1309 
1310 	if (rc == H_SUCCESS) {
1311 		rc = 0;
1312 	} else if (rc == H_FUNCTION) {
1313 		pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
1314 		return -EPERM;
1315 	} else if (rc == H_AUTHORITY) {
1316 		pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
1317 		return -EPERM;
1318 	} else if (rc == H_HARDWARE) {
1319 		pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
1320 		return -EIO;
1321 	} else if (rc == H_RESOURCE) {
1322 		pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
1323 		return -ENODEV;
1324 	} else if (rc == H_NOT_AVAILABLE) {
1325 		pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
1326 		return -EPERM;
1327 	} else if (rc == H_PARAMETER) {
1328 		pr_err("ibmvmc: requestvmc: invalid parameter\n");
1329 		return -EINVAL;
1330 	}
1331 
1332 	/* Success, set the vmc index in global struct */
1333 	ibmvmc.vmc_drc_index = vmc_drc_index;
1334 
1335 	bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
1336 			     sizeof(*ret_vmc_index));
1337 	if (bytes) {
1338 		pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
1339 		return -EFAULT;
1340 	}
1341 	return rc;
1342 }
1343 
1344 /**
1345  * ibmvmc_ioctl - IOCTL
1346  *
1347  * @session:	ibmvmc_file_session struct
1348  * @cmd:	cmd field
1349  * @arg:	Argument field
1350  *
1351  * Return:
1352  *	0 - Success
1353  *	Non-zero - Failure
1354  */
1355 static long ibmvmc_ioctl(struct file *file,
1356 			 unsigned int cmd, unsigned long arg)
1357 {
1358 	struct ibmvmc_file_session *session = file->private_data;
1359 
1360 	pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
1361 		 (unsigned long)file, cmd, arg,
1362 		 (unsigned long)session);
1363 
1364 	if (!session) {
1365 		pr_warn("ibmvmc: ioctl: no session\n");
1366 		return -EIO;
1367 	}
1368 
1369 	switch (cmd) {
1370 	case VMC_IOCTL_SETHMCID:
1371 		return ibmvmc_ioctl_sethmcid(session,
1372 			(unsigned char __user *)arg);
1373 	case VMC_IOCTL_QUERY:
1374 		return ibmvmc_ioctl_query(session,
1375 			(struct ibmvmc_query_struct __user *)arg);
1376 	case VMC_IOCTL_REQUESTVMC:
1377 		return ibmvmc_ioctl_requestvmc(session,
1378 			(unsigned int __user *)arg);
1379 	default:
1380 		pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
1381 		return -EINVAL;
1382 	}
1383 }
1384 
1385 static const struct file_operations ibmvmc_fops = {
1386 	.owner		= THIS_MODULE,
1387 	.read		= ibmvmc_read,
1388 	.write		= ibmvmc_write,
1389 	.poll		= ibmvmc_poll,
1390 	.unlocked_ioctl	= ibmvmc_ioctl,
1391 	.open           = ibmvmc_open,
1392 	.release        = ibmvmc_close,
1393 };
1394 
1395 /**
1396  * ibmvmc_add_buffer - Add Buffer
1397  *
1398  * @adapter: crq_server_adapter struct
1399  * @crq:	ibmvmc_crq_msg struct
1400  *
1401  * This message transfers a buffer from hypervisor ownership to management
1402  * partition ownership. The LIOBA is obtained from the virtual TCE table
1403  * associated with the hypervisor side of the VMC device, and points to a
1404  * buffer of size MTU (as established in the capabilities exchange).
1405  *
1406  * Typical flow for ading buffers:
1407  * 1. A new management application connection is opened by the management
1408  *	partition.
1409  * 2. The hypervisor assigns new buffers for the traffic associated with
1410  *	that connection.
1411  * 3. The hypervisor sends VMC Add Buffer messages to the management
1412  *	partition, informing it of the new buffers.
1413  * 4. The hypervisor sends an HMC protocol message (to the management
1414  *	application) notifying it of the new buffers. This informs the
1415  *	application that it has buffers available for sending HMC
1416  *	commands.
1417  *
1418  * Return:
1419  *	0 - Success
1420  *	Non-zero - Failure
1421  */
1422 static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
1423 			     struct ibmvmc_crq_msg *crq)
1424 {
1425 	struct ibmvmc_buffer *buffer;
1426 	u8 hmc_index;
1427 	u8 hmc_session;
1428 	u16 buffer_id;
1429 	unsigned long flags;
1430 	int rc = 0;
1431 
1432 	if (!crq)
1433 		return -1;
1434 
1435 	hmc_session = crq->hmc_session;
1436 	hmc_index = crq->hmc_index;
1437 	buffer_id = be16_to_cpu(crq->var2.buffer_id);
1438 
1439 	if (hmc_index > ibmvmc.max_hmc_index) {
1440 		dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
1441 			hmc_index);
1442 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1443 					    hmc_session, hmc_index, buffer_id);
1444 		return -1;
1445 	}
1446 
1447 	if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1448 		dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
1449 			buffer_id);
1450 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1451 					    hmc_session, hmc_index, buffer_id);
1452 		return -1;
1453 	}
1454 
1455 	spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1456 	buffer = &hmcs[hmc_index].buffer[buffer_id];
1457 
1458 	if (buffer->real_addr_local || buffer->dma_addr_local) {
1459 		dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
1460 			 (unsigned long)buffer_id);
1461 		spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1462 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1463 					    hmc_session, hmc_index, buffer_id);
1464 		return -1;
1465 	}
1466 
1467 	buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
1468 						   ibmvmc.max_mtu,
1469 						   &buffer->dma_addr_local);
1470 
1471 	if (!buffer->real_addr_local) {
1472 		dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
1473 		spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1474 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
1475 					    hmc_session, hmc_index, buffer_id);
1476 		return -1;
1477 	}
1478 
1479 	buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
1480 	buffer->size = ibmvmc.max_mtu;
1481 	buffer->owner = crq->var1.owner;
1482 	buffer->free = 1;
1483 	/* Must ensure valid==1 is observable only after all other fields are */
1484 	dma_wmb();
1485 	buffer->valid = 1;
1486 	buffer->id = buffer_id;
1487 
1488 	dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
1489 	dev_dbg(adapter->dev, "   index: %d, session: %d, buffer: 0x%x, owner: %d\n",
1490 		hmc_index, hmc_session, buffer_id, buffer->owner);
1491 	dev_dbg(adapter->dev, "   local: 0x%x, remote: 0x%x\n",
1492 		(u32)buffer->dma_addr_local,
1493 		(u32)buffer->dma_addr_remote);
1494 	spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1495 
1496 	ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1497 				    hmc_index, buffer_id);
1498 
1499 	return rc;
1500 }
1501 
1502 /**
1503  * ibmvmc_rem_buffer - Remove Buffer
1504  *
1505  * @adapter: crq_server_adapter struct
1506  * @crq:	ibmvmc_crq_msg struct
1507  *
1508  * This message requests an HMC buffer to be transferred from management
1509  * partition ownership to hypervisor ownership. The management partition may
1510  * not be able to satisfy the request at a particular point in time if all its
1511  * buffers are in use. The management partition requires a depth of at least
1512  * one inbound buffer to allow management application commands to flow to the
1513  * hypervisor. It is, therefore, an interface error for the hypervisor to
1514  * attempt to remove the management partition's last buffer.
1515  *
1516  * The hypervisor is expected to manage buffer usage with the management
1517  * application directly and inform the management partition when buffers may be
1518  * removed. The typical flow for removing buffers:
1519  *
1520  * 1. The management application no longer needs a communication path to a
1521  *	particular hypervisor function. That function is closed.
1522  * 2. The hypervisor and the management application quiesce all traffic to that
1523  *	function. The hypervisor requests a reduction in buffer pool size.
1524  * 3. The management application acknowledges the reduction in buffer pool size.
1525  * 4. The hypervisor sends a Remove Buffer message to the management partition,
1526  *	informing it of the reduction in buffers.
1527  * 5. The management partition verifies it can remove the buffer. This is
1528  *	possible if buffers have been quiesced.
1529  *
1530  * Return:
1531  *	0 - Success
1532  *	Non-zero - Failure
1533  */
1534 /*
1535  * The hypervisor requested that we pick an unused buffer, and return it.
1536  * Before sending the buffer back, we free any storage associated with the
1537  * buffer.
1538  */
1539 static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
1540 			     struct ibmvmc_crq_msg *crq)
1541 {
1542 	struct ibmvmc_buffer *buffer;
1543 	u8 hmc_index;
1544 	u8 hmc_session;
1545 	u16 buffer_id = 0;
1546 	unsigned long flags;
1547 	int rc = 0;
1548 
1549 	if (!crq)
1550 		return -1;
1551 
1552 	hmc_session = crq->hmc_session;
1553 	hmc_index = crq->hmc_index;
1554 
1555 	if (hmc_index > ibmvmc.max_hmc_index) {
1556 		dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
1557 			 hmc_index);
1558 		ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1559 					    hmc_session, hmc_index, buffer_id);
1560 		return -1;
1561 	}
1562 
1563 	spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1564 	buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
1565 	if (!buffer) {
1566 		dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
1567 		spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1568 		ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
1569 					    hmc_session, hmc_index,
1570 					    VMC_INVALID_BUFFER_ID);
1571 		return -1;
1572 	}
1573 
1574 	buffer_id = buffer->id;
1575 
1576 	if (buffer->valid)
1577 		free_dma_buffer(to_vio_dev(adapter->dev),
1578 				ibmvmc.max_mtu,
1579 				buffer->real_addr_local,
1580 				buffer->dma_addr_local);
1581 
1582 	memset(buffer, 0, sizeof(struct ibmvmc_buffer));
1583 	spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1584 
1585 	dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
1586 	ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1587 				    hmc_index, buffer_id);
1588 
1589 	return rc;
1590 }
1591 
1592 static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
1593 			   struct ibmvmc_crq_msg *crq)
1594 {
1595 	struct ibmvmc_buffer *buffer;
1596 	struct ibmvmc_hmc *hmc;
1597 	unsigned long msg_len;
1598 	u8 hmc_index;
1599 	u8 hmc_session;
1600 	u16 buffer_id;
1601 	unsigned long flags;
1602 	int rc = 0;
1603 
1604 	if (!crq)
1605 		return -1;
1606 
1607 	/* Hypervisor writes CRQs directly into our memory in big endian */
1608 	dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
1609 		be64_to_cpu(*((unsigned long *)crq)),
1610 		be64_to_cpu(*(((unsigned long *)crq) + 1)));
1611 
1612 	hmc_session = crq->hmc_session;
1613 	hmc_index = crq->hmc_index;
1614 	buffer_id = be16_to_cpu(crq->var2.buffer_id);
1615 	msg_len = be32_to_cpu(crq->var3.msg_len);
1616 
1617 	if (hmc_index > ibmvmc.max_hmc_index) {
1618 		dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
1619 			hmc_index);
1620 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1621 					    hmc_session, hmc_index, buffer_id);
1622 		return -1;
1623 	}
1624 
1625 	if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1626 		dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
1627 			buffer_id);
1628 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1629 					    hmc_session, hmc_index, buffer_id);
1630 		return -1;
1631 	}
1632 
1633 	hmc = &hmcs[hmc_index];
1634 	spin_lock_irqsave(&hmc->lock, flags);
1635 
1636 	if (hmc->state == ibmhmc_state_free) {
1637 		dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
1638 			hmc->state);
1639 		/* HMC connection is not valid (possibly was reset under us). */
1640 		spin_unlock_irqrestore(&hmc->lock, flags);
1641 		return -1;
1642 	}
1643 
1644 	buffer = &hmc->buffer[buffer_id];
1645 
1646 	if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
1647 		dev_err(adapter->dev, "Recv_msg: not valid, or not HV.  0x%x 0x%x\n",
1648 			buffer->valid, buffer->owner);
1649 		spin_unlock_irqrestore(&hmc->lock, flags);
1650 		return -1;
1651 	}
1652 
1653 	/* RDMA the data into the partition. */
1654 	rc = h_copy_rdma(msg_len,
1655 			 adapter->riobn,
1656 			 buffer->dma_addr_remote,
1657 			 adapter->liobn,
1658 			 buffer->dma_addr_local);
1659 
1660 	dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
1661 		(unsigned int)msg_len, (unsigned int)buffer_id,
1662 		(unsigned int)hmc->queue_head, (unsigned int)hmc_index);
1663 	buffer->msg_len = msg_len;
1664 	buffer->free = 0;
1665 	buffer->owner = VMC_BUF_OWNER_ALPHA;
1666 
1667 	if (rc) {
1668 		dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
1669 			rc);
1670 		spin_unlock_irqrestore(&hmc->lock, flags);
1671 		return -1;
1672 	}
1673 
1674 	/* Must be locked because read operates on the same data */
1675 	hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
1676 	hmc->queue_head++;
1677 	if (hmc->queue_head == ibmvmc_max_buf_pool_size)
1678 		hmc->queue_head = 0;
1679 
1680 	if (hmc->queue_head == hmc->queue_tail)
1681 		dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
1682 
1683 	spin_unlock_irqrestore(&hmc->lock, flags);
1684 
1685 	wake_up_interruptible(&ibmvmc_read_wait);
1686 
1687 	return 0;
1688 }
1689 
1690 /**
1691  * ibmvmc_process_capabilities - Process Capabilities
1692  *
1693  * @adapter:	crq_server_adapter struct
1694  * @crqp:	ibmvmc_crq_msg struct
1695  *
1696  */
1697 static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
1698 					struct ibmvmc_crq_msg *crqp)
1699 {
1700 	struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
1701 
1702 	if ((be16_to_cpu(crq->version) >> 8) !=
1703 			(IBMVMC_PROTOCOL_VERSION >> 8)) {
1704 		dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
1705 			be16_to_cpu(crq->version),
1706 			IBMVMC_PROTOCOL_VERSION);
1707 		ibmvmc.state = ibmvmc_state_failed;
1708 		return;
1709 	}
1710 
1711 	ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
1712 	ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
1713 					    be16_to_cpu(crq->pool_size));
1714 	ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
1715 	ibmvmc.state = ibmvmc_state_ready;
1716 
1717 	dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
1718 		 ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
1719 		 ibmvmc.max_hmc_index);
1720 }
1721 
1722 /**
1723  * ibmvmc_validate_hmc_session - Validate HMC Session
1724  *
1725  * @adapter:	crq_server_adapter struct
1726  * @crq:	ibmvmc_crq_msg struct
1727  *
1728  * Return:
1729  *	0 - Success
1730  *	Non-zero - Failure
1731  */
1732 static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
1733 				       struct ibmvmc_crq_msg *crq)
1734 {
1735 	unsigned char hmc_index;
1736 
1737 	hmc_index = crq->hmc_index;
1738 
1739 	if (crq->hmc_session == 0)
1740 		return 0;
1741 
1742 	if (hmc_index > ibmvmc.max_hmc_index)
1743 		return -1;
1744 
1745 	if (hmcs[hmc_index].session != crq->hmc_session) {
1746 		dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
1747 			 hmcs[hmc_index].session, crq->hmc_session);
1748 		return -1;
1749 	}
1750 
1751 	return 0;
1752 }
1753 
1754 /**
1755  * ibmvmc_reset - Reset
1756  *
1757  * @adapter:	crq_server_adapter struct
1758  * @xport_event:	export_event field
1759  *
1760  * Closes all HMC sessions and conditionally schedules a CRQ reset.
1761  * @xport_event: If true, the partner closed their CRQ; we don't need to reset.
1762  *               If false, we need to schedule a CRQ reset.
1763  */
1764 static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
1765 {
1766 	int i;
1767 
1768 	if (ibmvmc.state != ibmvmc_state_sched_reset) {
1769 		dev_info(adapter->dev, "*** Reset to initial state.\n");
1770 		for (i = 0; i < ibmvmc_max_hmcs; i++)
1771 			ibmvmc_return_hmc(&hmcs[i], xport_event);
1772 
1773 		if (xport_event) {
1774 			/* CRQ was closed by the partner.  We don't need to do
1775 			 * anything except set ourself to the correct state to
1776 			 * handle init msgs.
1777 			 */
1778 			ibmvmc.state = ibmvmc_state_crqinit;
1779 		} else {
1780 			/* The partner did not close their CRQ - instead, we're
1781 			 * closing the CRQ on our end. Need to schedule this
1782 			 * for process context, because CRQ reset may require a
1783 			 * sleep.
1784 			 *
1785 			 * Setting ibmvmc.state here immediately prevents
1786 			 * ibmvmc_open from completing until the reset
1787 			 * completes in process context.
1788 			 */
1789 			ibmvmc.state = ibmvmc_state_sched_reset;
1790 			dev_dbg(adapter->dev, "Device reset scheduled");
1791 			wake_up_interruptible(&adapter->reset_wait_queue);
1792 		}
1793 	}
1794 }
1795 
1796 /**
1797  * ibmvmc_reset_task - Reset Task
1798  *
1799  * @data:	Data field
1800  *
1801  * Performs a CRQ reset of the VMC device in process context.
1802  * NOTE: This function should not be called directly, use ibmvmc_reset.
1803  */
1804 static int ibmvmc_reset_task(void *data)
1805 {
1806 	struct crq_server_adapter *adapter = data;
1807 	int rc;
1808 
1809 	set_user_nice(current, -20);
1810 
1811 	while (!kthread_should_stop()) {
1812 		wait_event_interruptible(adapter->reset_wait_queue,
1813 			(ibmvmc.state == ibmvmc_state_sched_reset) ||
1814 			kthread_should_stop());
1815 
1816 		if (kthread_should_stop())
1817 			break;
1818 
1819 		dev_dbg(adapter->dev, "CRQ resetting in process context");
1820 		tasklet_disable(&adapter->work_task);
1821 
1822 		rc = ibmvmc_reset_crq_queue(adapter);
1823 
1824 		if (rc != H_SUCCESS && rc != H_RESOURCE) {
1825 			dev_err(adapter->dev, "Error initializing CRQ.  rc = 0x%x\n",
1826 				rc);
1827 			ibmvmc.state = ibmvmc_state_failed;
1828 		} else {
1829 			ibmvmc.state = ibmvmc_state_crqinit;
1830 
1831 			if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
1832 			    != 0 && rc != H_RESOURCE)
1833 				dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
1834 		}
1835 
1836 		vio_enable_interrupts(to_vio_dev(adapter->dev));
1837 		tasklet_enable(&adapter->work_task);
1838 	}
1839 
1840 	return 0;
1841 }
1842 
1843 /**
1844  * ibmvmc_process_open_resp - Process Open Response
1845  *
1846  * @crq: ibmvmc_crq_msg struct
1847  * @adapter:    crq_server_adapter struct
1848  *
1849  * This command is sent by the hypervisor in response to the Interface
1850  * Open message. When this message is received, the indicated buffer is
1851  * again available for management partition use.
1852  */
1853 static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
1854 				     struct crq_server_adapter *adapter)
1855 {
1856 	unsigned char hmc_index;
1857 	unsigned short buffer_id;
1858 
1859 	hmc_index = crq->hmc_index;
1860 	if (hmc_index > ibmvmc.max_hmc_index) {
1861 		/* Why would PHYP give an index > max negotiated? */
1862 		ibmvmc_reset(adapter, false);
1863 		return;
1864 	}
1865 
1866 	if (crq->status) {
1867 		dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
1868 			 crq->status);
1869 		ibmvmc_return_hmc(&hmcs[hmc_index], false);
1870 		return;
1871 	}
1872 
1873 	if (hmcs[hmc_index].state == ibmhmc_state_opening) {
1874 		buffer_id = be16_to_cpu(crq->var2.buffer_id);
1875 		if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1876 			dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
1877 				buffer_id);
1878 			hmcs[hmc_index].state = ibmhmc_state_failed;
1879 		} else {
1880 			ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
1881 					       &hmcs[hmc_index].buffer[buffer_id]);
1882 			hmcs[hmc_index].state = ibmhmc_state_ready;
1883 			dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
1884 		}
1885 	} else {
1886 		dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
1887 			 hmcs[hmc_index].state);
1888 	}
1889 }
1890 
1891 /**
1892  * ibmvmc_process_close_resp - Process Close Response
1893  *
1894  * @crq: ibmvmc_crq_msg struct
1895  * @adapter:    crq_server_adapter struct
1896  *
1897  * This command is sent by the hypervisor in response to the managemant
1898  * application Interface Close message.
1899  *
1900  * If the close fails, simply reset the entire driver as the state of the VMC
1901  * must be in tough shape.
1902  */
1903 static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
1904 				      struct crq_server_adapter *adapter)
1905 {
1906 	unsigned char hmc_index;
1907 
1908 	hmc_index = crq->hmc_index;
1909 	if (hmc_index > ibmvmc.max_hmc_index) {
1910 		ibmvmc_reset(adapter, false);
1911 		return;
1912 	}
1913 
1914 	if (crq->status) {
1915 		dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
1916 			 crq->status);
1917 		ibmvmc_reset(adapter, false);
1918 		return;
1919 	}
1920 
1921 	ibmvmc_return_hmc(&hmcs[hmc_index], false);
1922 }
1923 
1924 /**
1925  * ibmvmc_crq_process - Process CRQ
1926  *
1927  * @adapter:    crq_server_adapter struct
1928  * @crq:	ibmvmc_crq_msg struct
1929  *
1930  * Process the CRQ message based upon the type of message received.
1931  *
1932  */
1933 static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
1934 			       struct ibmvmc_crq_msg *crq)
1935 {
1936 	switch (crq->type) {
1937 	case VMC_MSG_CAP_RESP:
1938 		dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
1939 			crq->type);
1940 		if (ibmvmc.state == ibmvmc_state_capabilities)
1941 			ibmvmc_process_capabilities(adapter, crq);
1942 		else
1943 			dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
1944 				 ibmvmc.state);
1945 		break;
1946 	case VMC_MSG_OPEN_RESP:
1947 		dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
1948 			crq->type);
1949 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1950 			ibmvmc_process_open_resp(crq, adapter);
1951 		break;
1952 	case VMC_MSG_ADD_BUF:
1953 		dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
1954 			crq->type);
1955 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1956 			ibmvmc_add_buffer(adapter, crq);
1957 		break;
1958 	case VMC_MSG_REM_BUF:
1959 		dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
1960 			crq->type);
1961 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1962 			ibmvmc_rem_buffer(adapter, crq);
1963 		break;
1964 	case VMC_MSG_SIGNAL:
1965 		dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
1966 			crq->type);
1967 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1968 			ibmvmc_recv_msg(adapter, crq);
1969 		break;
1970 	case VMC_MSG_CLOSE_RESP:
1971 		dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
1972 			crq->type);
1973 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1974 			ibmvmc_process_close_resp(crq, adapter);
1975 		break;
1976 	case VMC_MSG_CAP:
1977 	case VMC_MSG_OPEN:
1978 	case VMC_MSG_CLOSE:
1979 	case VMC_MSG_ADD_BUF_RESP:
1980 	case VMC_MSG_REM_BUF_RESP:
1981 		dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
1982 			 crq->type);
1983 		break;
1984 	default:
1985 		dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
1986 			 crq->type);
1987 		break;
1988 	}
1989 }
1990 
1991 /**
1992  * ibmvmc_handle_crq_init - Handle CRQ Init
1993  *
1994  * @crq:	ibmvmc_crq_msg struct
1995  * @adapter:	crq_server_adapter struct
1996  *
1997  * Handle the type of crq initialization based on whether
1998  * it is a message or a response.
1999  *
2000  */
2001 static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
2002 				   struct crq_server_adapter *adapter)
2003 {
2004 	switch (crq->type) {
2005 	case 0x01:	/* Initialization message */
2006 		dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
2007 			ibmvmc.state);
2008 		if (ibmvmc.state == ibmvmc_state_crqinit) {
2009 			/* Send back a response */
2010 			if (ibmvmc_send_crq(adapter, 0xC002000000000000,
2011 					    0) == 0)
2012 				ibmvmc_send_capabilities(adapter);
2013 			else
2014 				dev_err(adapter->dev, " Unable to send init rsp\n");
2015 		} else {
2016 			dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
2017 				ibmvmc.state, ibmvmc.max_mtu);
2018 		}
2019 
2020 		break;
2021 	case 0x02:	/* Initialization response */
2022 		dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
2023 			ibmvmc.state);
2024 		if (ibmvmc.state == ibmvmc_state_crqinit)
2025 			ibmvmc_send_capabilities(adapter);
2026 		break;
2027 	default:
2028 		dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
2029 			 (unsigned long)crq->type);
2030 	}
2031 }
2032 
2033 /**
2034  * ibmvmc_handle_crq - Handle CRQ
2035  *
2036  * @crq:	ibmvmc_crq_msg struct
2037  * @adapter:	crq_server_adapter struct
2038  *
2039  * Read the command elements from the command queue and execute the
2040  * requests based upon the type of crq message.
2041  *
2042  */
2043 static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
2044 			      struct crq_server_adapter *adapter)
2045 {
2046 	switch (crq->valid) {
2047 	case 0xC0:		/* initialization */
2048 		ibmvmc_handle_crq_init(crq, adapter);
2049 		break;
2050 	case 0xFF:	/* Hypervisor telling us the connection is closed */
2051 		dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
2052 		ibmvmc_reset(adapter, true);
2053 		break;
2054 	case 0x80:	/* real payload */
2055 		ibmvmc_crq_process(adapter, crq);
2056 		break;
2057 	default:
2058 		dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
2059 			 crq->valid);
2060 		break;
2061 	}
2062 }
2063 
2064 static void ibmvmc_task(unsigned long data)
2065 {
2066 	struct crq_server_adapter *adapter =
2067 		(struct crq_server_adapter *)data;
2068 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
2069 	struct ibmvmc_crq_msg *crq;
2070 	int done = 0;
2071 
2072 	while (!done) {
2073 		/* Pull all the valid messages off the CRQ */
2074 		while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
2075 			ibmvmc_handle_crq(crq, adapter);
2076 			crq->valid = 0x00;
2077 			/* CRQ reset was requested, stop processing CRQs.
2078 			 * Interrupts will be re-enabled by the reset task.
2079 			 */
2080 			if (ibmvmc.state == ibmvmc_state_sched_reset)
2081 				return;
2082 		}
2083 
2084 		vio_enable_interrupts(vdev);
2085 		crq = crq_queue_next_crq(&adapter->queue);
2086 		if (crq) {
2087 			vio_disable_interrupts(vdev);
2088 			ibmvmc_handle_crq(crq, adapter);
2089 			crq->valid = 0x00;
2090 			/* CRQ reset was requested, stop processing CRQs.
2091 			 * Interrupts will be re-enabled by the reset task.
2092 			 */
2093 			if (ibmvmc.state == ibmvmc_state_sched_reset)
2094 				return;
2095 		} else {
2096 			done = 1;
2097 		}
2098 	}
2099 }
2100 
2101 /**
2102  * ibmvmc_init_crq_queue - Init CRQ Queue
2103  *
2104  * @adapter:	crq_server_adapter struct
2105  *
2106  * Return:
2107  *	0 - Success
2108  *	Non-zero - Failure
2109  */
2110 static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
2111 {
2112 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
2113 	struct crq_queue *queue = &adapter->queue;
2114 	int rc = 0;
2115 	int retrc = 0;
2116 
2117 	queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
2118 
2119 	if (!queue->msgs)
2120 		goto malloc_failed;
2121 
2122 	queue->size = PAGE_SIZE / sizeof(*queue->msgs);
2123 
2124 	queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
2125 					  queue->size * sizeof(*queue->msgs),
2126 					  DMA_BIDIRECTIONAL);
2127 
2128 	if (dma_mapping_error(adapter->dev, queue->msg_token))
2129 		goto map_failed;
2130 
2131 	retrc = plpar_hcall_norets(H_REG_CRQ,
2132 				   vdev->unit_address,
2133 				   queue->msg_token, PAGE_SIZE);
2134 	retrc = rc;
2135 
2136 	if (rc == H_RESOURCE)
2137 		rc = ibmvmc_reset_crq_queue(adapter);
2138 
2139 	if (rc == 2) {
2140 		dev_warn(adapter->dev, "Partner adapter not ready\n");
2141 		retrc = 0;
2142 	} else if (rc != 0) {
2143 		dev_err(adapter->dev, "Error %d opening adapter\n", rc);
2144 		goto reg_crq_failed;
2145 	}
2146 
2147 	queue->cur = 0;
2148 	spin_lock_init(&queue->lock);
2149 
2150 	tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
2151 
2152 	if (request_irq(vdev->irq,
2153 			ibmvmc_handle_event,
2154 			0, "ibmvmc", (void *)adapter) != 0) {
2155 		dev_err(adapter->dev, "couldn't register irq 0x%x\n",
2156 			vdev->irq);
2157 		goto req_irq_failed;
2158 	}
2159 
2160 	rc = vio_enable_interrupts(vdev);
2161 	if (rc != 0) {
2162 		dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
2163 		goto req_irq_failed;
2164 	}
2165 
2166 	return retrc;
2167 
2168 req_irq_failed:
2169 	/* Cannot have any work since we either never got our IRQ registered,
2170 	 * or never got interrupts enabled
2171 	 */
2172 	tasklet_kill(&adapter->work_task);
2173 	h_free_crq(vdev->unit_address);
2174 reg_crq_failed:
2175 	dma_unmap_single(adapter->dev,
2176 			 queue->msg_token,
2177 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
2178 map_failed:
2179 	free_page((unsigned long)queue->msgs);
2180 malloc_failed:
2181 	return -ENOMEM;
2182 }
2183 
2184 /* Fill in the liobn and riobn fields on the adapter */
2185 static int read_dma_window(struct vio_dev *vdev,
2186 			   struct crq_server_adapter *adapter)
2187 {
2188 	const __be32 *dma_window;
2189 	const __be32 *prop;
2190 
2191 	/* TODO Using of_parse_dma_window would be better, but it doesn't give
2192 	 * a way to read multiple windows without already knowing the size of
2193 	 * a window or the number of windows
2194 	 */
2195 	dma_window =
2196 		(const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
2197 						NULL);
2198 	if (!dma_window) {
2199 		dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
2200 		return -1;
2201 	}
2202 
2203 	adapter->liobn = be32_to_cpu(*dma_window);
2204 	dma_window++;
2205 
2206 	prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2207 						NULL);
2208 	if (!prop) {
2209 		dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
2210 		dma_window++;
2211 	} else {
2212 		dma_window += be32_to_cpu(*prop);
2213 	}
2214 
2215 	prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2216 						NULL);
2217 	if (!prop) {
2218 		dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
2219 		dma_window++;
2220 	} else {
2221 		dma_window += be32_to_cpu(*prop);
2222 	}
2223 
2224 	/* dma_window should point to the second window now */
2225 	adapter->riobn = be32_to_cpu(*dma_window);
2226 
2227 	return 0;
2228 }
2229 
2230 static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2231 {
2232 	struct crq_server_adapter *adapter = &ibmvmc_adapter;
2233 	int rc;
2234 
2235 	dev_set_drvdata(&vdev->dev, NULL);
2236 	memset(adapter, 0, sizeof(*adapter));
2237 	adapter->dev = &vdev->dev;
2238 
2239 	dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
2240 
2241 	rc = read_dma_window(vdev, adapter);
2242 	if (rc != 0) {
2243 		ibmvmc.state = ibmvmc_state_failed;
2244 		return -1;
2245 	}
2246 
2247 	dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
2248 		adapter->liobn, adapter->riobn);
2249 
2250 	init_waitqueue_head(&adapter->reset_wait_queue);
2251 	adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
2252 	if (IS_ERR(adapter->reset_task)) {
2253 		dev_err(adapter->dev, "Failed to start reset thread\n");
2254 		ibmvmc.state = ibmvmc_state_failed;
2255 		rc = PTR_ERR(adapter->reset_task);
2256 		adapter->reset_task = NULL;
2257 		return rc;
2258 	}
2259 
2260 	rc = ibmvmc_init_crq_queue(adapter);
2261 	if (rc != 0 && rc != H_RESOURCE) {
2262 		dev_err(adapter->dev, "Error initializing CRQ.  rc = 0x%x\n",
2263 			rc);
2264 		ibmvmc.state = ibmvmc_state_failed;
2265 		goto crq_failed;
2266 	}
2267 
2268 	ibmvmc.state = ibmvmc_state_crqinit;
2269 
2270 	/* Try to send an initialization message.  Note that this is allowed
2271 	 * to fail if the other end is not acive.  In that case we just wait
2272 	 * for the other side to initialize.
2273 	 */
2274 	if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
2275 	    rc != H_RESOURCE)
2276 		dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
2277 
2278 	dev_set_drvdata(&vdev->dev, adapter);
2279 
2280 	return 0;
2281 
2282 crq_failed:
2283 	kthread_stop(adapter->reset_task);
2284 	adapter->reset_task = NULL;
2285 	return -EPERM;
2286 }
2287 
2288 static int ibmvmc_remove(struct vio_dev *vdev)
2289 {
2290 	struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
2291 
2292 	dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
2293 		 vdev->unit_address);
2294 	ibmvmc_release_crq_queue(adapter);
2295 
2296 	return 0;
2297 }
2298 
2299 static struct vio_device_id ibmvmc_device_table[] = {
2300 	{ "ibm,vmc", "IBM,vmc" },
2301 	{ "", "" }
2302 };
2303 MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
2304 
2305 static struct vio_driver ibmvmc_driver = {
2306 	.name        = ibmvmc_driver_name,
2307 	.id_table    = ibmvmc_device_table,
2308 	.probe       = ibmvmc_probe,
2309 	.remove      = ibmvmc_remove,
2310 };
2311 
2312 static void __init ibmvmc_scrub_module_parms(void)
2313 {
2314 	if (ibmvmc_max_mtu > MAX_MTU) {
2315 		pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
2316 		ibmvmc_max_mtu = MAX_MTU;
2317 	} else if (ibmvmc_max_mtu < MIN_MTU) {
2318 		pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
2319 		ibmvmc_max_mtu = MIN_MTU;
2320 	}
2321 
2322 	if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
2323 		pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
2324 			MAX_BUF_POOL_SIZE);
2325 		ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
2326 	} else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
2327 		pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
2328 			MIN_BUF_POOL_SIZE);
2329 		ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
2330 	}
2331 
2332 	if (ibmvmc_max_hmcs > MAX_HMCS) {
2333 		pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
2334 		ibmvmc_max_hmcs = MAX_HMCS;
2335 	} else if (ibmvmc_max_hmcs < MIN_HMCS) {
2336 		pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
2337 		ibmvmc_max_hmcs = MIN_HMCS;
2338 	}
2339 }
2340 
2341 static struct miscdevice ibmvmc_miscdev = {
2342 	.name = ibmvmc_driver_name,
2343 	.minor = MISC_DYNAMIC_MINOR,
2344 	.fops = &ibmvmc_fops,
2345 };
2346 
2347 static int __init ibmvmc_module_init(void)
2348 {
2349 	int rc, i, j;
2350 
2351 	ibmvmc.state = ibmvmc_state_initial;
2352 	pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
2353 
2354 	rc = misc_register(&ibmvmc_miscdev);
2355 	if (rc) {
2356 		pr_err("ibmvmc: misc registration failed\n");
2357 		goto misc_register_failed;
2358 	}
2359 	pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
2360 		ibmvmc_miscdev.minor);
2361 
2362 	/* Initialize data structures */
2363 	memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
2364 	for (i = 0; i < MAX_HMCS; i++) {
2365 		spin_lock_init(&hmcs[i].lock);
2366 		hmcs[i].state = ibmhmc_state_free;
2367 		for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
2368 			hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
2369 	}
2370 
2371 	/* Sanity check module parms */
2372 	ibmvmc_scrub_module_parms();
2373 
2374 	/*
2375 	 * Initialize some reasonable values.  Might be negotiated smaller
2376 	 * values during the capabilities exchange.
2377 	 */
2378 	ibmvmc.max_mtu = ibmvmc_max_mtu;
2379 	ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
2380 	ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
2381 
2382 	rc = vio_register_driver(&ibmvmc_driver);
2383 
2384 	if (rc) {
2385 		pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
2386 		goto vio_reg_failed;
2387 	}
2388 
2389 	return 0;
2390 
2391 vio_reg_failed:
2392 	misc_deregister(&ibmvmc_miscdev);
2393 misc_register_failed:
2394 	return rc;
2395 }
2396 
2397 static void __exit ibmvmc_module_exit(void)
2398 {
2399 	pr_info("ibmvmc: module exit\n");
2400 	vio_unregister_driver(&ibmvmc_driver);
2401 	misc_deregister(&ibmvmc_miscdev);
2402 }
2403 
2404 module_init(ibmvmc_module_init);
2405 module_exit(ibmvmc_module_exit);
2406 
2407 module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
2408 		   int, 0644);
2409 MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
2410 module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
2411 MODULE_PARM_DESC(max_hmcs, "Max HMCs");
2412 module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
2413 MODULE_PARM_DESC(max_mtu, "Max MTU");
2414 
2415 MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
2416 MODULE_DESCRIPTION("IBM VMC");
2417 MODULE_VERSION(IBMVMC_DRIVER_VERSION);
2418 MODULE_LICENSE("GPL v2");
2419