1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *			Linux MegaRAID device driver
5  *
6  * Copyright (c) 2003-2004  LSI Logic Corporation.
7  *
8  * FILE		: megaraid_mm.c
9  * Version	: v2.20.2.7 (Jul 16 2006)
10  *
11  * Common management module
12  */
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/mutex.h>
16 #include "megaraid_mm.h"
17 
18 
19 // Entry points for char node driver
20 static DEFINE_MUTEX(mraid_mm_mutex);
21 static int mraid_mm_open(struct inode *, struct file *);
22 static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
23 
24 
25 // routines to convert to and from the old the format
26 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
27 static int kioc_to_mimd(uioc_t *, mimd_t __user *);
28 
29 
30 // Helper functions
31 static int handle_drvrcmd(void __user *, uint8_t, int *);
32 static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
33 static void ioctl_done(uioc_t *);
34 static void lld_timedout(struct timer_list *);
35 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
36 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
37 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
38 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
39 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
40 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
41 static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
42 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
43 
44 MODULE_AUTHOR("LSI Logic Corporation");
45 MODULE_DESCRIPTION("LSI Logic Management Module");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(LSI_COMMON_MOD_VERSION);
48 
49 static int dbglevel = CL_ANN;
50 module_param_named(dlevel, dbglevel, int, 0);
51 MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
52 
53 EXPORT_SYMBOL(mraid_mm_register_adp);
54 EXPORT_SYMBOL(mraid_mm_unregister_adp);
55 EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
56 
57 static uint32_t drvr_ver	= 0x02200207;
58 
59 static int adapters_count_g;
60 static struct list_head adapters_list_g;
61 
62 static wait_queue_head_t wait_q;
63 
64 static const struct file_operations lsi_fops = {
65 	.open	= mraid_mm_open,
66 	.unlocked_ioctl = mraid_mm_unlocked_ioctl,
67 	.compat_ioctl = compat_ptr_ioctl,
68 	.owner	= THIS_MODULE,
69 	.llseek = noop_llseek,
70 };
71 
72 static struct miscdevice megaraid_mm_dev = {
73 	.minor	= MISC_DYNAMIC_MINOR,
74 	.name   = "megadev0",
75 	.fops   = &lsi_fops,
76 };
77 
78 /**
79  * mraid_mm_open - open routine for char node interface
80  * @inode	: unused
81  * @filep	: unused
82  *
83  * Allow ioctl operations by apps only if they have superuser privilege.
84  */
85 static int
86 mraid_mm_open(struct inode *inode, struct file *filep)
87 {
88 	/*
89 	 * Only allow superuser to access private ioctl interface
90 	 */
91 	if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
92 
93 	return 0;
94 }
95 
96 /**
97  * mraid_mm_ioctl - module entry-point for ioctls
98  * @inode	: inode (ignored)
99  * @filep	: file operations pointer (ignored)
100  * @cmd		: ioctl command
101  * @arg		: user ioctl packet
102  */
103 static int
104 mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
105 {
106 	uioc_t		*kioc;
107 	char		signature[EXT_IOCTL_SIGN_SZ]	= {0};
108 	int		rval;
109 	mraid_mmadp_t	*adp;
110 	uint8_t		old_ioctl;
111 	int		drvrcmd_rval;
112 	void __user *argp = (void __user *)arg;
113 
114 	/*
115 	 * Make sure only USCSICMD are issued through this interface.
116 	 * MIMD application would still fire different command.
117 	 */
118 
119 	if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
120 		return (-EINVAL);
121 	}
122 
123 	/*
124 	 * Look for signature to see if this is the new or old ioctl format.
125 	 */
126 	if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
127 		con_log(CL_ANN, (KERN_WARNING
128 			"megaraid cmm: copy from usr addr failed\n"));
129 		return (-EFAULT);
130 	}
131 
132 	if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
133 		old_ioctl = 0;
134 	else
135 		old_ioctl = 1;
136 
137 	/*
138 	 * At present, we don't support the new ioctl packet
139 	 */
140 	if (!old_ioctl )
141 		return (-EINVAL);
142 
143 	/*
144 	 * If it is a driver ioctl (as opposed to fw ioctls), then we can
145 	 * handle the command locally. rval > 0 means it is not a drvr cmd
146 	 */
147 	rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
148 
149 	if (rval < 0)
150 		return rval;
151 	else if (rval == 0)
152 		return drvrcmd_rval;
153 
154 	rval = 0;
155 	if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
156 		return rval;
157 	}
158 
159 	/*
160 	 * Check if adapter can accept ioctl. We may have marked it offline
161 	 * if any previous kioc had timedout on this controller.
162 	 */
163 	if (!adp->quiescent) {
164 		con_log(CL_ANN, (KERN_WARNING
165 			"megaraid cmm: controller cannot accept cmds due to "
166 			"earlier errors\n" ));
167 		return -EFAULT;
168 	}
169 
170 	/*
171 	 * The following call will block till a kioc is available
172 	 * or return NULL if the list head is empty for the pointer
173 	 * of type mraid_mmapt passed to mraid_mm_alloc_kioc
174 	 */
175 	kioc = mraid_mm_alloc_kioc(adp);
176 	if (!kioc)
177 		return -ENXIO;
178 
179 	/*
180 	 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
181 	 */
182 	if ((rval = mimd_to_kioc(argp, adp, kioc))) {
183 		mraid_mm_dealloc_kioc(adp, kioc);
184 		return rval;
185 	}
186 
187 	kioc->done = ioctl_done;
188 
189 	/*
190 	 * Issue the IOCTL to the low level driver. After the IOCTL completes
191 	 * release the kioc if and only if it was _not_ timedout. If it was
192 	 * timedout, that means that resources are still with low level driver.
193 	 */
194 	if ((rval = lld_ioctl(adp, kioc))) {
195 
196 		if (!kioc->timedout)
197 			mraid_mm_dealloc_kioc(adp, kioc);
198 
199 		return rval;
200 	}
201 
202 	/*
203 	 * Convert the kioc back to user space
204 	 */
205 	rval = kioc_to_mimd(kioc, argp);
206 
207 	/*
208 	 * Return the kioc to free pool
209 	 */
210 	mraid_mm_dealloc_kioc(adp, kioc);
211 
212 	return rval;
213 }
214 
215 static long
216 mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
217 		        unsigned long arg)
218 {
219 	int err;
220 
221 	mutex_lock(&mraid_mm_mutex);
222 	err = mraid_mm_ioctl(filep, cmd, arg);
223 	mutex_unlock(&mraid_mm_mutex);
224 
225 	return err;
226 }
227 
228 /**
229  * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
230  * @umimd	: User space mimd_t ioctl packet
231  * @rval	: returned success/error status
232  *
233  * The function return value is a pointer to the located @adapter.
234  */
235 static mraid_mmadp_t *
236 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
237 {
238 	mraid_mmadp_t	*adapter;
239 	mimd_t		mimd;
240 	uint32_t	adapno;
241 	int		iterator;
242 
243 
244 	if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
245 		*rval = -EFAULT;
246 		return NULL;
247 	}
248 
249 	adapno = GETADAP(mimd.ui.fcs.adapno);
250 
251 	if (adapno >= adapters_count_g) {
252 		*rval = -ENODEV;
253 		return NULL;
254 	}
255 
256 	adapter = NULL;
257 	iterator = 0;
258 
259 	list_for_each_entry(adapter, &adapters_list_g, list) {
260 		if (iterator++ == adapno) break;
261 	}
262 
263 	if (!adapter) {
264 		*rval = -ENODEV;
265 		return NULL;
266 	}
267 
268 	return adapter;
269 }
270 
271 /**
272  * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
273  * @arg		: packet sent by the user app
274  * @old_ioctl	: mimd if 1; uioc otherwise
275  * @rval	: pointer for command's returned value (not function status)
276  */
277 static int
278 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
279 {
280 	mimd_t		__user *umimd;
281 	mimd_t		kmimd;
282 	uint8_t		opcode;
283 	uint8_t		subopcode;
284 
285 	if (old_ioctl)
286 		goto old_packet;
287 	else
288 		goto new_packet;
289 
290 new_packet:
291 	return (-ENOTSUPP);
292 
293 old_packet:
294 	*rval = 0;
295 	umimd = arg;
296 
297 	if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
298 		return (-EFAULT);
299 
300 	opcode		= kmimd.ui.fcs.opcode;
301 	subopcode	= kmimd.ui.fcs.subopcode;
302 
303 	/*
304 	 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
305 	 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
306 	 * indicate that we cannot handle this.
307 	 */
308 	if (opcode != 0x82)
309 		return 1;
310 
311 	switch (subopcode) {
312 
313 	case MEGAIOC_QDRVRVER:
314 
315 		if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
316 			return (-EFAULT);
317 
318 		return 0;
319 
320 	case MEGAIOC_QNADAP:
321 
322 		*rval = adapters_count_g;
323 
324 		if (copy_to_user(kmimd.data, &adapters_count_g,
325 				sizeof(uint32_t)))
326 			return (-EFAULT);
327 
328 		return 0;
329 
330 	default:
331 		/* cannot handle */
332 		return 1;
333 	}
334 
335 	return 0;
336 }
337 
338 
339 /**
340  * mimd_to_kioc	- Converter from old to new ioctl format
341  * @umimd	: user space old MIMD IOCTL
342  * @adp		: adapter softstate
343  * @kioc	: kernel space new format IOCTL
344  *
345  * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
346  * new packet is in kernel space so that driver can perform operations on it
347  * freely.
348  */
349 
350 static int
351 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
352 {
353 	mbox64_t		*mbox64;
354 	mbox_t			*mbox;
355 	mraid_passthru_t	*pthru32;
356 	uint32_t		adapno;
357 	uint8_t			opcode;
358 	uint8_t			subopcode;
359 	mimd_t			mimd;
360 
361 	if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
362 		return (-EFAULT);
363 
364 	/*
365 	 * Applications are not allowed to send extd pthru
366 	 */
367 	if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
368 			(mimd.mbox[0] == MBOXCMD_EXTPTHRU))
369 		return (-EINVAL);
370 
371 	opcode		= mimd.ui.fcs.opcode;
372 	subopcode	= mimd.ui.fcs.subopcode;
373 	adapno		= GETADAP(mimd.ui.fcs.adapno);
374 
375 	if (adapno >= adapters_count_g)
376 		return (-ENODEV);
377 
378 	kioc->adapno	= adapno;
379 	kioc->mb_type	= MBOX_LEGACY;
380 	kioc->app_type	= APPTYPE_MIMD;
381 
382 	switch (opcode) {
383 
384 	case 0x82:
385 
386 		if (subopcode == MEGAIOC_QADAPINFO) {
387 
388 			kioc->opcode	= GET_ADAP_INFO;
389 			kioc->data_dir	= UIOC_RD;
390 			kioc->xferlen	= sizeof(mraid_hba_info_t);
391 
392 			if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
393 				return (-ENOMEM);
394 		}
395 		else {
396 			con_log(CL_ANN, (KERN_WARNING
397 					"megaraid cmm: Invalid subop\n"));
398 			return (-EINVAL);
399 		}
400 
401 		break;
402 
403 	case 0x81:
404 
405 		kioc->opcode		= MBOX_CMD;
406 		kioc->xferlen		= mimd.ui.fcs.length;
407 		kioc->user_data_len	= kioc->xferlen;
408 		kioc->user_data		= mimd.ui.fcs.buffer;
409 
410 		if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
411 			return (-ENOMEM);
412 
413 		if (mimd.outlen) kioc->data_dir  = UIOC_RD;
414 		if (mimd.inlen) kioc->data_dir |= UIOC_WR;
415 
416 		break;
417 
418 	case 0x80:
419 
420 		kioc->opcode		= MBOX_CMD;
421 		kioc->xferlen		= (mimd.outlen > mimd.inlen) ?
422 						mimd.outlen : mimd.inlen;
423 		kioc->user_data_len	= kioc->xferlen;
424 		kioc->user_data		= mimd.data;
425 
426 		if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
427 			return (-ENOMEM);
428 
429 		if (mimd.outlen) kioc->data_dir  = UIOC_RD;
430 		if (mimd.inlen) kioc->data_dir |= UIOC_WR;
431 
432 		break;
433 
434 	default:
435 		return (-EINVAL);
436 	}
437 
438 	/*
439 	 * If driver command, nothing else to do
440 	 */
441 	if (opcode == 0x82)
442 		return 0;
443 
444 	/*
445 	 * This is a mailbox cmd; copy the mailbox from mimd
446 	 */
447 	mbox64	= (mbox64_t *)((unsigned long)kioc->cmdbuf);
448 	mbox	= &mbox64->mbox32;
449 	memcpy(mbox, mimd.mbox, 14);
450 
451 	if (mbox->cmd != MBOXCMD_PASSTHRU) {	// regular DCMD
452 
453 		mbox->xferaddr	= (uint32_t)kioc->buf_paddr;
454 
455 		if (kioc->data_dir & UIOC_WR) {
456 			if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
457 							kioc->xferlen)) {
458 				return (-EFAULT);
459 			}
460 		}
461 
462 		return 0;
463 	}
464 
465 	/*
466 	 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
467 	 * Just like in above case, the beginning for memblk is treated as
468 	 * a mailbox. The passthru will begin at next 1K boundary. And the
469 	 * data will start 1K after that.
470 	 */
471 	pthru32			= kioc->pthru32;
472 	kioc->user_pthru	= &umimd->pthru;
473 	mbox->xferaddr		= (uint32_t)kioc->pthru32_h;
474 
475 	if (copy_from_user(pthru32, kioc->user_pthru,
476 			sizeof(mraid_passthru_t))) {
477 		return (-EFAULT);
478 	}
479 
480 	pthru32->dataxferaddr	= kioc->buf_paddr;
481 	if (kioc->data_dir & UIOC_WR) {
482 		if (pthru32->dataxferlen > kioc->xferlen)
483 			return -EINVAL;
484 		if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
485 						pthru32->dataxferlen)) {
486 			return (-EFAULT);
487 		}
488 	}
489 
490 	return 0;
491 }
492 
493 /**
494  * mraid_mm_attch_buf - Attach a free dma buffer for required size
495  * @adp		: Adapter softstate
496  * @kioc	: kioc that the buffer needs to be attached to
497  * @xferlen	: required length for buffer
498  *
499  * First we search for a pool with smallest buffer that is >= @xferlen. If
500  * that pool has no free buffer, we will try for the next bigger size. If none
501  * is available, we will try to allocate the smallest buffer that is >=
502  * @xferlen and attach it the pool.
503  */
504 static int
505 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
506 {
507 	mm_dmapool_t	*pool;
508 	int		right_pool = -1;
509 	unsigned long	flags;
510 	int		i;
511 
512 	kioc->pool_index	= -1;
513 	kioc->buf_vaddr		= NULL;
514 	kioc->buf_paddr		= 0;
515 	kioc->free_buf		= 0;
516 
517 	/*
518 	 * We need xferlen amount of memory. See if we can get it from our
519 	 * dma pools. If we don't get exact size, we will try bigger buffer
520 	 */
521 
522 	for (i = 0; i < MAX_DMA_POOLS; i++) {
523 
524 		pool = &adp->dma_pool_list[i];
525 
526 		if (xferlen > pool->buf_size)
527 			continue;
528 
529 		if (right_pool == -1)
530 			right_pool = i;
531 
532 		spin_lock_irqsave(&pool->lock, flags);
533 
534 		if (!pool->in_use) {
535 
536 			pool->in_use		= 1;
537 			kioc->pool_index	= i;
538 			kioc->buf_vaddr		= pool->vaddr;
539 			kioc->buf_paddr		= pool->paddr;
540 
541 			spin_unlock_irqrestore(&pool->lock, flags);
542 			return 0;
543 		}
544 		else {
545 			spin_unlock_irqrestore(&pool->lock, flags);
546 			continue;
547 		}
548 	}
549 
550 	/*
551 	 * If xferlen doesn't match any of our pools, return error
552 	 */
553 	if (right_pool == -1)
554 		return -EINVAL;
555 
556 	/*
557 	 * We did not get any buffer from the preallocated pool. Let us try
558 	 * to allocate one new buffer. NOTE: This is a blocking call.
559 	 */
560 	pool = &adp->dma_pool_list[right_pool];
561 
562 	spin_lock_irqsave(&pool->lock, flags);
563 
564 	kioc->pool_index	= right_pool;
565 	kioc->free_buf		= 1;
566 	kioc->buf_vaddr		= dma_pool_alloc(pool->handle, GFP_ATOMIC,
567 							&kioc->buf_paddr);
568 	spin_unlock_irqrestore(&pool->lock, flags);
569 
570 	if (!kioc->buf_vaddr)
571 		return -ENOMEM;
572 
573 	return 0;
574 }
575 
576 /**
577  * mraid_mm_alloc_kioc - Returns a uioc_t from free list
578  * @adp	: Adapter softstate for this module
579  *
580  * The kioc_semaphore is initialized with number of kioc nodes in the
581  * free kioc pool. If the kioc pool is empty, this function blocks till
582  * a kioc becomes free.
583  */
584 static uioc_t *
585 mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
586 {
587 	uioc_t			*kioc;
588 	struct list_head*	head;
589 	unsigned long		flags;
590 
591 	down(&adp->kioc_semaphore);
592 
593 	spin_lock_irqsave(&adp->kioc_pool_lock, flags);
594 
595 	head = &adp->kioc_pool;
596 
597 	if (list_empty(head)) {
598 		up(&adp->kioc_semaphore);
599 		spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
600 
601 		con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
602 		return NULL;
603 	}
604 
605 	kioc = list_entry(head->next, uioc_t, list);
606 	list_del_init(&kioc->list);
607 
608 	spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
609 
610 	memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
611 	memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
612 
613 	kioc->buf_vaddr		= NULL;
614 	kioc->buf_paddr		= 0;
615 	kioc->pool_index	=-1;
616 	kioc->free_buf		= 0;
617 	kioc->user_data		= NULL;
618 	kioc->user_data_len	= 0;
619 	kioc->user_pthru	= NULL;
620 	kioc->timedout		= 0;
621 
622 	return kioc;
623 }
624 
625 /**
626  * mraid_mm_dealloc_kioc - Return kioc to free pool
627  * @adp		: Adapter softstate
628  * @kioc	: uioc_t node to be returned to free pool
629  */
630 static void
631 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
632 {
633 	mm_dmapool_t	*pool;
634 	unsigned long	flags;
635 
636 	if (kioc->pool_index != -1) {
637 		pool = &adp->dma_pool_list[kioc->pool_index];
638 
639 		/* This routine may be called in non-isr context also */
640 		spin_lock_irqsave(&pool->lock, flags);
641 
642 		/*
643 		 * While attaching the dma buffer, if we didn't get the
644 		 * required buffer from the pool, we would have allocated
645 		 * it at the run time and set the free_buf flag. We must
646 		 * free that buffer. Otherwise, just mark that the buffer is
647 		 * not in use
648 		 */
649 		if (kioc->free_buf == 1)
650 			dma_pool_free(pool->handle, kioc->buf_vaddr,
651 							kioc->buf_paddr);
652 		else
653 			pool->in_use = 0;
654 
655 		spin_unlock_irqrestore(&pool->lock, flags);
656 	}
657 
658 	/* Return the kioc to the free pool */
659 	spin_lock_irqsave(&adp->kioc_pool_lock, flags);
660 	list_add(&kioc->list, &adp->kioc_pool);
661 	spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
662 
663 	/* increment the free kioc count */
664 	up(&adp->kioc_semaphore);
665 
666 	return;
667 }
668 
669 /**
670  * lld_ioctl - Routine to issue ioctl to low level drvr
671  * @adp		: The adapter handle
672  * @kioc	: The ioctl packet with kernel addresses
673  */
674 static int
675 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
676 {
677 	int			rval;
678 	struct uioc_timeout	timeout = { };
679 
680 	kioc->status	= -ENODATA;
681 	rval		= adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
682 
683 	if (rval) return rval;
684 
685 	/*
686 	 * Start the timer
687 	 */
688 	if (adp->timeout > 0) {
689 		timeout.uioc = kioc;
690 		timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
691 
692 		timeout.timer.expires	= jiffies + adp->timeout * HZ;
693 
694 		add_timer(&timeout.timer);
695 	}
696 
697 	/*
698 	 * Wait till the low level driver completes the ioctl. After this
699 	 * call, the ioctl either completed successfully or timedout.
700 	 */
701 	wait_event(wait_q, (kioc->status != -ENODATA));
702 	if (timeout.timer.function) {
703 		del_timer_sync(&timeout.timer);
704 		destroy_timer_on_stack(&timeout.timer);
705 	}
706 
707 	/*
708 	 * If the command had timedout, we mark the controller offline
709 	 * before returning
710 	 */
711 	if (kioc->timedout) {
712 		adp->quiescent = 0;
713 	}
714 
715 	return kioc->status;
716 }
717 
718 
719 /**
720  * ioctl_done - callback from the low level driver
721  * @kioc	: completed ioctl packet
722  */
723 static void
724 ioctl_done(uioc_t *kioc)
725 {
726 	uint32_t	adapno;
727 	int		iterator;
728 	mraid_mmadp_t*	adapter;
729 
730 	/*
731 	 * When the kioc returns from driver, make sure it still doesn't
732 	 * have ENODATA in status. Otherwise, driver will hang on wait_event
733 	 * forever
734 	 */
735 	if (kioc->status == -ENODATA) {
736 		con_log(CL_ANN, (KERN_WARNING
737 			"megaraid cmm: lld didn't change status!\n"));
738 
739 		kioc->status = -EINVAL;
740 	}
741 
742 	/*
743 	 * Check if this kioc was timedout before. If so, nobody is waiting
744 	 * on this kioc. We don't have to wake up anybody. Instead, we just
745 	 * have to free the kioc
746 	 */
747 	if (kioc->timedout) {
748 		iterator	= 0;
749 		adapter		= NULL;
750 		adapno		= kioc->adapno;
751 
752 		con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
753 					"ioctl that was timedout before\n"));
754 
755 		list_for_each_entry(adapter, &adapters_list_g, list) {
756 			if (iterator++ == adapno) break;
757 		}
758 
759 		kioc->timedout = 0;
760 
761 		if (adapter) {
762 			mraid_mm_dealloc_kioc( adapter, kioc );
763 		}
764 	}
765 	else {
766 		wake_up(&wait_q);
767 	}
768 }
769 
770 
771 /**
772  * lld_timedout	- callback from the expired timer
773  * @t		: timer that timed out
774  */
775 static void
776 lld_timedout(struct timer_list *t)
777 {
778 	struct uioc_timeout *timeout = from_timer(timeout, t, timer);
779 	uioc_t *kioc	= timeout->uioc;
780 
781 	kioc->status 	= -ETIME;
782 	kioc->timedout	= 1;
783 
784 	con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
785 
786 	wake_up(&wait_q);
787 }
788 
789 
790 /**
791  * kioc_to_mimd	- Converter from new back to old format
792  * @kioc	: Kernel space IOCTL packet (successfully issued)
793  * @mimd	: User space MIMD packet
794  */
795 static int
796 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
797 {
798 	mimd_t			kmimd;
799 	uint8_t			opcode;
800 	uint8_t			subopcode;
801 
802 	mbox64_t		*mbox64;
803 	mraid_passthru_t	__user *upthru32;
804 	mraid_passthru_t	*kpthru32;
805 	mcontroller_t		cinfo;
806 	mraid_hba_info_t	*hinfo;
807 
808 
809 	if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
810 		return (-EFAULT);
811 
812 	opcode		= kmimd.ui.fcs.opcode;
813 	subopcode	= kmimd.ui.fcs.subopcode;
814 
815 	if (opcode == 0x82) {
816 		switch (subopcode) {
817 
818 		case MEGAIOC_QADAPINFO:
819 
820 			hinfo = (mraid_hba_info_t *)(unsigned long)
821 					kioc->buf_vaddr;
822 
823 			hinfo_to_cinfo(hinfo, &cinfo);
824 
825 			if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
826 				return (-EFAULT);
827 
828 			return 0;
829 
830 		default:
831 			return (-EINVAL);
832 		}
833 
834 		return 0;
835 	}
836 
837 	mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
838 
839 	if (kioc->user_pthru) {
840 
841 		upthru32 = kioc->user_pthru;
842 		kpthru32 = kioc->pthru32;
843 
844 		if (copy_to_user(&upthru32->scsistatus,
845 					&kpthru32->scsistatus,
846 					sizeof(uint8_t))) {
847 			return (-EFAULT);
848 		}
849 	}
850 
851 	if (kioc->user_data) {
852 		if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
853 					kioc->user_data_len)) {
854 			return (-EFAULT);
855 		}
856 	}
857 
858 	if (copy_to_user(&mimd->mbox[17],
859 			&mbox64->mbox32.status, sizeof(uint8_t))) {
860 		return (-EFAULT);
861 	}
862 
863 	return 0;
864 }
865 
866 
867 /**
868  * hinfo_to_cinfo - Convert new format hba info into old format
869  * @hinfo	: New format, more comprehensive adapter info
870  * @cinfo	: Old format adapter info to support mimd_t apps
871  */
872 static void
873 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
874 {
875 	if (!hinfo || !cinfo)
876 		return;
877 
878 	cinfo->base		= hinfo->baseport;
879 	cinfo->irq		= hinfo->irq;
880 	cinfo->numldrv		= hinfo->num_ldrv;
881 	cinfo->pcibus		= hinfo->pci_bus;
882 	cinfo->pcidev		= hinfo->pci_slot;
883 	cinfo->pcifun		= PCI_FUNC(hinfo->pci_dev_fn);
884 	cinfo->pciid		= hinfo->pci_device_id;
885 	cinfo->pcivendor	= hinfo->pci_vendor_id;
886 	cinfo->pcislot		= hinfo->pci_slot;
887 	cinfo->uid		= hinfo->unique_id;
888 }
889 
890 
891 /**
892  * mraid_mm_register_adp - Registration routine for low level drivers
893  * @lld_adp	: Adapter object
894  */
895 int
896 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
897 {
898 	mraid_mmadp_t	*adapter;
899 	mbox64_t	*mbox_list;
900 	uioc_t		*kioc;
901 	uint32_t	rval;
902 	int		i;
903 
904 
905 	if (lld_adp->drvr_type != DRVRTYPE_MBOX)
906 		return (-EINVAL);
907 
908 	adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
909 
910 	if (!adapter)
911 		return -ENOMEM;
912 
913 
914 	adapter->unique_id	= lld_adp->unique_id;
915 	adapter->drvr_type	= lld_adp->drvr_type;
916 	adapter->drvr_data	= lld_adp->drvr_data;
917 	adapter->pdev		= lld_adp->pdev;
918 	adapter->issue_uioc	= lld_adp->issue_uioc;
919 	adapter->timeout	= lld_adp->timeout;
920 	adapter->max_kioc	= lld_adp->max_kioc;
921 	adapter->quiescent	= 1;
922 
923 	/*
924 	 * Allocate single blocks of memory for all required kiocs,
925 	 * mailboxes and passthru structures.
926 	 */
927 	adapter->kioc_list	= kmalloc_array(lld_adp->max_kioc,
928 						  sizeof(uioc_t),
929 						  GFP_KERNEL);
930 	adapter->mbox_list	= kmalloc_array(lld_adp->max_kioc,
931 						  sizeof(mbox64_t),
932 						  GFP_KERNEL);
933 	adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
934 						&adapter->pdev->dev,
935 						sizeof(mraid_passthru_t),
936 						16, 0);
937 
938 	if (!adapter->kioc_list || !adapter->mbox_list ||
939 			!adapter->pthru_dma_pool) {
940 
941 		con_log(CL_ANN, (KERN_WARNING
942 			"megaraid cmm: out of memory, %s %d\n", __func__,
943 			__LINE__));
944 
945 		rval = (-ENOMEM);
946 
947 		goto memalloc_error;
948 	}
949 
950 	/*
951 	 * Slice kioc_list and make a kioc_pool with the individiual kiocs
952 	 */
953 	INIT_LIST_HEAD(&adapter->kioc_pool);
954 	spin_lock_init(&adapter->kioc_pool_lock);
955 	sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
956 
957 	mbox_list	= (mbox64_t *)adapter->mbox_list;
958 
959 	for (i = 0; i < lld_adp->max_kioc; i++) {
960 
961 		kioc		= adapter->kioc_list + i;
962 		kioc->cmdbuf	= (uint64_t)(unsigned long)(mbox_list + i);
963 		kioc->pthru32	= dma_pool_alloc(adapter->pthru_dma_pool,
964 						GFP_KERNEL, &kioc->pthru32_h);
965 
966 		if (!kioc->pthru32) {
967 
968 			con_log(CL_ANN, (KERN_WARNING
969 				"megaraid cmm: out of memory, %s %d\n",
970 					__func__, __LINE__));
971 
972 			rval = (-ENOMEM);
973 
974 			goto pthru_dma_pool_error;
975 		}
976 
977 		list_add_tail(&kioc->list, &adapter->kioc_pool);
978 	}
979 
980 	// Setup the dma pools for data buffers
981 	if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
982 		goto dma_pool_error;
983 	}
984 
985 	list_add_tail(&adapter->list, &adapters_list_g);
986 
987 	adapters_count_g++;
988 
989 	return 0;
990 
991 dma_pool_error:
992 	/* Do nothing */
993 
994 pthru_dma_pool_error:
995 
996 	for (i = 0; i < lld_adp->max_kioc; i++) {
997 		kioc = adapter->kioc_list + i;
998 		if (kioc->pthru32) {
999 			dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
1000 				kioc->pthru32_h);
1001 		}
1002 	}
1003 
1004 memalloc_error:
1005 
1006 	kfree(adapter->kioc_list);
1007 	kfree(adapter->mbox_list);
1008 
1009 	dma_pool_destroy(adapter->pthru_dma_pool);
1010 
1011 	kfree(adapter);
1012 
1013 	return rval;
1014 }
1015 
1016 
1017 /**
1018  * mraid_mm_adapter_app_handle - return the application handle for this adapter
1019  * @unique_id	: adapter unique identifier
1020  *
1021  * For the given driver data, locate the adapter in our global list and
1022  * return the corresponding handle, which is also used by applications to
1023  * uniquely identify an adapter.
1024  *
1025  * Return adapter handle if found in the list.
1026  * Return 0 if adapter could not be located, should never happen though.
1027  */
1028 uint32_t
1029 mraid_mm_adapter_app_handle(uint32_t unique_id)
1030 {
1031 	mraid_mmadp_t	*adapter;
1032 	mraid_mmadp_t	*tmp;
1033 	int		index = 0;
1034 
1035 	list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1036 
1037 		if (adapter->unique_id == unique_id) {
1038 
1039 			return MKADAP(index);
1040 		}
1041 
1042 		index++;
1043 	}
1044 
1045 	return 0;
1046 }
1047 
1048 
1049 /**
1050  * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1051  * @adp	: Adapter softstate
1052  *
1053  * We maintain a pool of dma buffers per each adapter. Each pool has one
1054  * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1055  * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1056  * dont' want to waste too much memory by allocating more buffers per each
1057  * pool.
1058  */
1059 static int
1060 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1061 {
1062 	mm_dmapool_t	*pool;
1063 	int		bufsize;
1064 	int		i;
1065 
1066 	/*
1067 	 * Create MAX_DMA_POOLS number of pools
1068 	 */
1069 	bufsize = MRAID_MM_INIT_BUFF_SIZE;
1070 
1071 	for (i = 0; i < MAX_DMA_POOLS; i++){
1072 
1073 		pool = &adp->dma_pool_list[i];
1074 
1075 		pool->buf_size = bufsize;
1076 		spin_lock_init(&pool->lock);
1077 
1078 		pool->handle = dma_pool_create("megaraid mm data buffer",
1079 						&adp->pdev->dev, bufsize,
1080 						16, 0);
1081 
1082 		if (!pool->handle) {
1083 			goto dma_pool_setup_error;
1084 		}
1085 
1086 		pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
1087 							&pool->paddr);
1088 
1089 		if (!pool->vaddr)
1090 			goto dma_pool_setup_error;
1091 
1092 		bufsize = bufsize * 2;
1093 	}
1094 
1095 	return 0;
1096 
1097 dma_pool_setup_error:
1098 
1099 	mraid_mm_teardown_dma_pools(adp);
1100 	return (-ENOMEM);
1101 }
1102 
1103 
1104 /**
1105  * mraid_mm_unregister_adp - Unregister routine for low level drivers
1106  * @unique_id	: UID of the adpater
1107  *
1108  * Assumes no outstanding ioctls to llds.
1109  */
1110 int
1111 mraid_mm_unregister_adp(uint32_t unique_id)
1112 {
1113 	mraid_mmadp_t	*adapter;
1114 	mraid_mmadp_t	*tmp;
1115 
1116 	list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1117 
1118 
1119 		if (adapter->unique_id == unique_id) {
1120 
1121 			adapters_count_g--;
1122 
1123 			list_del_init(&adapter->list);
1124 
1125 			mraid_mm_free_adp_resources(adapter);
1126 
1127 			kfree(adapter);
1128 
1129 			con_log(CL_ANN, (
1130 				"megaraid cmm: Unregistered one adapter:%#x\n",
1131 				unique_id));
1132 
1133 			return 0;
1134 		}
1135 	}
1136 
1137 	return (-ENODEV);
1138 }
1139 
1140 /**
1141  * mraid_mm_free_adp_resources - Free adapter softstate
1142  * @adp	: Adapter softstate
1143  */
1144 static void
1145 mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1146 {
1147 	uioc_t	*kioc;
1148 	int	i;
1149 
1150 	mraid_mm_teardown_dma_pools(adp);
1151 
1152 	for (i = 0; i < adp->max_kioc; i++) {
1153 
1154 		kioc = adp->kioc_list + i;
1155 
1156 		dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1157 				kioc->pthru32_h);
1158 	}
1159 
1160 	kfree(adp->kioc_list);
1161 	kfree(adp->mbox_list);
1162 
1163 	dma_pool_destroy(adp->pthru_dma_pool);
1164 
1165 
1166 	return;
1167 }
1168 
1169 
1170 /**
1171  * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1172  * @adp	: Adapter softstate
1173  */
1174 static void
1175 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1176 {
1177 	int		i;
1178 	mm_dmapool_t	*pool;
1179 
1180 	for (i = 0; i < MAX_DMA_POOLS; i++) {
1181 
1182 		pool = &adp->dma_pool_list[i];
1183 
1184 		if (pool->handle) {
1185 
1186 			if (pool->vaddr)
1187 				dma_pool_free(pool->handle, pool->vaddr,
1188 							pool->paddr);
1189 
1190 			dma_pool_destroy(pool->handle);
1191 			pool->handle = NULL;
1192 		}
1193 	}
1194 
1195 	return;
1196 }
1197 
1198 /**
1199  * mraid_mm_init	- Module entry point
1200  */
1201 static int __init
1202 mraid_mm_init(void)
1203 {
1204 	int err;
1205 
1206 	// Announce the driver version
1207 	con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1208 		LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1209 
1210 	err = misc_register(&megaraid_mm_dev);
1211 	if (err < 0) {
1212 		con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1213 		return err;
1214 	}
1215 
1216 	init_waitqueue_head(&wait_q);
1217 
1218 	INIT_LIST_HEAD(&adapters_list_g);
1219 
1220 	return 0;
1221 }
1222 
1223 
1224 /**
1225  * mraid_mm_exit	- Module exit point
1226  */
1227 static void __exit
1228 mraid_mm_exit(void)
1229 {
1230 	con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1231 
1232 	misc_deregister(&megaraid_mm_dev);
1233 }
1234 
1235 module_init(mraid_mm_init);
1236 module_exit(mraid_mm_exit);
1237 
1238 /* vi: set ts=8 sw=8 tw=78: */
1239