1 /*
2  * ipmi_msghandler.c
3  *
4  * Incoming and outgoing message routing for an IPMI interface.
5  *
6  * Author: MontaVista Software, Inc.
7  *         Corey Minyard <minyard@mvista.com>
8  *         source@mvista.com
9  *
10  * Copyright 2002 MontaVista Software Inc.
11  *
12  *  This program is free software; you can redistribute it and/or modify it
13  *  under the terms of the GNU General Public License as published by the
14  *  Free Software Foundation; either version 2 of the License, or (at your
15  *  option) any later version.
16  *
17  *
18  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  *  You should have received a copy of the GNU General Public License along
30  *  with this program; if not, write to the Free Software Foundation, Inc.,
31  *  675 Mass Ave, Cambridge, MA 02139, USA.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/poll.h>
38 #include <linux/spinlock.h>
39 #include <linux/mutex.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/ipmi_smi.h>
43 #include <linux/notifier.h>
44 #include <linux/init.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
47 
48 #define PFX "IPMI message handler: "
49 
50 #define IPMI_DRIVER_VERSION "39.1"
51 
52 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53 static int ipmi_init_msghandler(void);
54 
55 static int initialized;
56 
57 #ifdef CONFIG_PROC_FS
58 static struct proc_dir_entry *proc_ipmi_root;
59 #endif /* CONFIG_PROC_FS */
60 
61 /* Remain in auto-maintenance mode for this amount of time (in ms). */
62 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
63 
64 #define MAX_EVENTS_IN_QUEUE	25
65 
66 /* Don't let a message sit in a queue forever, always time it with at lest
67    the max message timer.  This is in milliseconds. */
68 #define MAX_MSG_TIMEOUT		60000
69 
70 
71 /*
72  * The main "user" data structure.
73  */
74 struct ipmi_user
75 {
76 	struct list_head link;
77 
78 	/* Set to "0" when the user is destroyed. */
79 	int valid;
80 
81 	struct kref refcount;
82 
83 	/* The upper layer that handles receive messages. */
84 	struct ipmi_user_hndl *handler;
85 	void             *handler_data;
86 
87 	/* The interface this user is bound to. */
88 	ipmi_smi_t intf;
89 
90 	/* Does this interface receive IPMI events? */
91 	int gets_events;
92 };
93 
94 struct cmd_rcvr
95 {
96 	struct list_head link;
97 
98 	ipmi_user_t   user;
99 	unsigned char netfn;
100 	unsigned char cmd;
101 	unsigned int  chans;
102 
103 	/*
104 	 * This is used to form a linked lised during mass deletion.
105 	 * Since this is in an RCU list, we cannot use the link above
106 	 * or change any data until the RCU period completes.  So we
107 	 * use this next variable during mass deletion so we can have
108 	 * a list and don't have to wait and restart the search on
109 	 * every individual deletion of a command. */
110 	struct cmd_rcvr *next;
111 };
112 
113 struct seq_table
114 {
115 	unsigned int         inuse : 1;
116 	unsigned int         broadcast : 1;
117 
118 	unsigned long        timeout;
119 	unsigned long        orig_timeout;
120 	unsigned int         retries_left;
121 
122 	/* To verify on an incoming send message response that this is
123            the message that the response is for, we keep a sequence id
124            and increment it every time we send a message. */
125 	long                 seqid;
126 
127 	/* This is held so we can properly respond to the message on a
128            timeout, and it is used to hold the temporary data for
129            retransmission, too. */
130 	struct ipmi_recv_msg *recv_msg;
131 };
132 
133 /* Store the information in a msgid (long) to allow us to find a
134    sequence table entry from the msgid. */
135 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
136 
137 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
138 	do {								\
139 		seq = ((msgid >> 26) & 0x3f);				\
140 		seqid = (msgid & 0x3fffff);				\
141         } while (0)
142 
143 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
144 
145 struct ipmi_channel
146 {
147 	unsigned char medium;
148 	unsigned char protocol;
149 
150 	/* My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
151 	   but may be changed by the user. */
152 	unsigned char address;
153 
154 	/* My LUN.  This should generally stay the SMS LUN, but just in
155 	   case... */
156 	unsigned char lun;
157 };
158 
159 #ifdef CONFIG_PROC_FS
160 struct ipmi_proc_entry
161 {
162 	char                   *name;
163 	struct ipmi_proc_entry *next;
164 };
165 #endif
166 
167 struct bmc_device
168 {
169 	struct platform_device *dev;
170 	struct ipmi_device_id  id;
171 	unsigned char          guid[16];
172 	int                    guid_set;
173 
174 	struct kref	       refcount;
175 
176 	/* bmc device attributes */
177 	struct device_attribute device_id_attr;
178 	struct device_attribute provides_dev_sdrs_attr;
179 	struct device_attribute revision_attr;
180 	struct device_attribute firmware_rev_attr;
181 	struct device_attribute version_attr;
182 	struct device_attribute add_dev_support_attr;
183 	struct device_attribute manufacturer_id_attr;
184 	struct device_attribute product_id_attr;
185 	struct device_attribute guid_attr;
186 	struct device_attribute aux_firmware_rev_attr;
187 };
188 
189 #define IPMI_IPMB_NUM_SEQ	64
190 #define IPMI_MAX_CHANNELS       16
191 struct ipmi_smi
192 {
193 	/* What interface number are we? */
194 	int intf_num;
195 
196 	struct kref refcount;
197 
198 	/* Used for a list of interfaces. */
199 	struct list_head link;
200 
201 	/* The list of upper layers that are using me.  seq_lock
202 	 * protects this. */
203 	struct list_head users;
204 
205 	/* Information to supply to users. */
206 	unsigned char ipmi_version_major;
207 	unsigned char ipmi_version_minor;
208 
209 	/* Used for wake ups at startup. */
210 	wait_queue_head_t waitq;
211 
212 	struct bmc_device *bmc;
213 	char *my_dev_name;
214 	char *sysfs_name;
215 
216 	/* This is the lower-layer's sender routine.  Note that you
217 	 * must either be holding the ipmi_interfaces_mutex or be in
218 	 * an umpreemptible region to use this.  You must fetch the
219 	 * value into a local variable and make sure it is not NULL. */
220 	struct ipmi_smi_handlers *handlers;
221 	void                     *send_info;
222 
223 #ifdef CONFIG_PROC_FS
224 	/* A list of proc entries for this interface. */
225 	struct mutex           proc_entry_lock;
226 	struct ipmi_proc_entry *proc_entries;
227 #endif
228 
229 	/* Driver-model device for the system interface. */
230 	struct device          *si_dev;
231 
232 	/* A table of sequence numbers for this interface.  We use the
233            sequence numbers for IPMB messages that go out of the
234            interface to match them up with their responses.  A routine
235            is called periodically to time the items in this list. */
236 	spinlock_t       seq_lock;
237 	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
238 	int curr_seq;
239 
240 	/* Messages that were delayed for some reason (out of memory,
241            for instance), will go in here to be processed later in a
242            periodic timer interrupt. */
243 	spinlock_t       waiting_msgs_lock;
244 	struct list_head waiting_msgs;
245 
246 	/* The list of command receivers that are registered for commands
247 	   on this interface. */
248 	struct mutex     cmd_rcvrs_mutex;
249 	struct list_head cmd_rcvrs;
250 
251 	/* Events that were queues because no one was there to receive
252            them. */
253 	spinlock_t       events_lock; /* For dealing with event stuff. */
254 	struct list_head waiting_events;
255 	unsigned int     waiting_events_count; /* How many events in queue? */
256 	int              delivering_events;
257 
258 	/* The event receiver for my BMC, only really used at panic
259 	   shutdown as a place to store this. */
260 	unsigned char event_receiver;
261 	unsigned char event_receiver_lun;
262 	unsigned char local_sel_device;
263 	unsigned char local_event_generator;
264 
265 	/* For handling of maintenance mode. */
266 	int maintenance_mode;
267 	int maintenance_mode_enable;
268 	int auto_maintenance_timeout;
269 	spinlock_t maintenance_mode_lock; /* Used in a timer... */
270 
271 	/* A cheap hack, if this is non-null and a message to an
272 	   interface comes in with a NULL user, call this routine with
273 	   it.  Note that the message will still be freed by the
274 	   caller.  This only works on the system interface. */
275 	void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
276 
277 	/* When we are scanning the channels for an SMI, this will
278 	   tell which channel we are scanning. */
279 	int curr_channel;
280 
281 	/* Channel information */
282 	struct ipmi_channel channels[IPMI_MAX_CHANNELS];
283 
284 	/* Proc FS stuff. */
285 	struct proc_dir_entry *proc_dir;
286 	char                  proc_dir_name[10];
287 
288 	spinlock_t   counter_lock; /* For making counters atomic. */
289 
290 	/* Commands we got that were invalid. */
291 	unsigned int sent_invalid_commands;
292 
293 	/* Commands we sent to the MC. */
294 	unsigned int sent_local_commands;
295 	/* Responses from the MC that were delivered to a user. */
296 	unsigned int handled_local_responses;
297 	/* Responses from the MC that were not delivered to a user. */
298 	unsigned int unhandled_local_responses;
299 
300 	/* Commands we sent out to the IPMB bus. */
301 	unsigned int sent_ipmb_commands;
302 	/* Commands sent on the IPMB that had errors on the SEND CMD */
303 	unsigned int sent_ipmb_command_errs;
304 	/* Each retransmit increments this count. */
305 	unsigned int retransmitted_ipmb_commands;
306 	/* When a message times out (runs out of retransmits) this is
307            incremented. */
308 	unsigned int timed_out_ipmb_commands;
309 
310 	/* This is like above, but for broadcasts.  Broadcasts are
311            *not* included in the above count (they are expected to
312            time out). */
313 	unsigned int timed_out_ipmb_broadcasts;
314 
315 	/* Responses I have sent to the IPMB bus. */
316 	unsigned int sent_ipmb_responses;
317 
318 	/* The response was delivered to the user. */
319 	unsigned int handled_ipmb_responses;
320 	/* The response had invalid data in it. */
321 	unsigned int invalid_ipmb_responses;
322 	/* The response didn't have anyone waiting for it. */
323 	unsigned int unhandled_ipmb_responses;
324 
325 	/* Commands we sent out to the IPMB bus. */
326 	unsigned int sent_lan_commands;
327 	/* Commands sent on the IPMB that had errors on the SEND CMD */
328 	unsigned int sent_lan_command_errs;
329 	/* Each retransmit increments this count. */
330 	unsigned int retransmitted_lan_commands;
331 	/* When a message times out (runs out of retransmits) this is
332            incremented. */
333 	unsigned int timed_out_lan_commands;
334 
335 	/* Responses I have sent to the IPMB bus. */
336 	unsigned int sent_lan_responses;
337 
338 	/* The response was delivered to the user. */
339 	unsigned int handled_lan_responses;
340 	/* The response had invalid data in it. */
341 	unsigned int invalid_lan_responses;
342 	/* The response didn't have anyone waiting for it. */
343 	unsigned int unhandled_lan_responses;
344 
345 	/* The command was delivered to the user. */
346 	unsigned int handled_commands;
347 	/* The command had invalid data in it. */
348 	unsigned int invalid_commands;
349 	/* The command didn't have anyone waiting for it. */
350 	unsigned int unhandled_commands;
351 
352 	/* Invalid data in an event. */
353 	unsigned int invalid_events;
354 	/* Events that were received with the proper format. */
355 	unsigned int events;
356 };
357 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
358 
359 /**
360  * The driver model view of the IPMI messaging driver.
361  */
362 static struct device_driver ipmidriver = {
363 	.name = "ipmi",
364 	.bus = &platform_bus_type
365 };
366 static DEFINE_MUTEX(ipmidriver_mutex);
367 
368 static LIST_HEAD(ipmi_interfaces);
369 static DEFINE_MUTEX(ipmi_interfaces_mutex);
370 
371 /* List of watchers that want to know when smi's are added and
372    deleted. */
373 static LIST_HEAD(smi_watchers);
374 static DEFINE_MUTEX(smi_watchers_mutex);
375 
376 
377 static void free_recv_msg_list(struct list_head *q)
378 {
379 	struct ipmi_recv_msg *msg, *msg2;
380 
381 	list_for_each_entry_safe(msg, msg2, q, link) {
382 		list_del(&msg->link);
383 		ipmi_free_recv_msg(msg);
384 	}
385 }
386 
387 static void free_smi_msg_list(struct list_head *q)
388 {
389 	struct ipmi_smi_msg *msg, *msg2;
390 
391 	list_for_each_entry_safe(msg, msg2, q, link) {
392 		list_del(&msg->link);
393 		ipmi_free_smi_msg(msg);
394 	}
395 }
396 
397 static void clean_up_interface_data(ipmi_smi_t intf)
398 {
399 	int              i;
400 	struct cmd_rcvr  *rcvr, *rcvr2;
401 	struct list_head list;
402 
403 	free_smi_msg_list(&intf->waiting_msgs);
404 	free_recv_msg_list(&intf->waiting_events);
405 
406 	/*
407 	 * Wholesale remove all the entries from the list in the
408 	 * interface and wait for RCU to know that none are in use.
409 	 */
410 	mutex_lock(&intf->cmd_rcvrs_mutex);
411 	INIT_LIST_HEAD(&list);
412 	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
413 	mutex_unlock(&intf->cmd_rcvrs_mutex);
414 
415 	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
416 		kfree(rcvr);
417 
418 	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
419 		if ((intf->seq_table[i].inuse)
420 		    && (intf->seq_table[i].recv_msg))
421 		{
422 			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
423 		}
424 	}
425 }
426 
427 static void intf_free(struct kref *ref)
428 {
429 	ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
430 
431 	clean_up_interface_data(intf);
432 	kfree(intf);
433 }
434 
435 struct watcher_entry {
436 	int              intf_num;
437 	ipmi_smi_t       intf;
438 	struct list_head link;
439 };
440 
441 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
442 {
443 	ipmi_smi_t intf;
444 	LIST_HEAD(to_deliver);
445 	struct watcher_entry *e, *e2;
446 
447 	mutex_lock(&smi_watchers_mutex);
448 
449 	mutex_lock(&ipmi_interfaces_mutex);
450 
451 	/* Build a list of things to deliver. */
452 	list_for_each_entry(intf, &ipmi_interfaces, link) {
453 		if (intf->intf_num == -1)
454 			continue;
455 		e = kmalloc(sizeof(*e), GFP_KERNEL);
456 		if (!e)
457 			goto out_err;
458 		kref_get(&intf->refcount);
459 		e->intf = intf;
460 		e->intf_num = intf->intf_num;
461 		list_add_tail(&e->link, &to_deliver);
462 	}
463 
464 	/* We will succeed, so add it to the list. */
465 	list_add(&watcher->link, &smi_watchers);
466 
467 	mutex_unlock(&ipmi_interfaces_mutex);
468 
469 	list_for_each_entry_safe(e, e2, &to_deliver, link) {
470 		list_del(&e->link);
471 		watcher->new_smi(e->intf_num, e->intf->si_dev);
472 		kref_put(&e->intf->refcount, intf_free);
473 		kfree(e);
474 	}
475 
476 	mutex_unlock(&smi_watchers_mutex);
477 
478 	return 0;
479 
480  out_err:
481 	mutex_unlock(&ipmi_interfaces_mutex);
482 	mutex_unlock(&smi_watchers_mutex);
483 	list_for_each_entry_safe(e, e2, &to_deliver, link) {
484 		list_del(&e->link);
485 		kref_put(&e->intf->refcount, intf_free);
486 		kfree(e);
487 	}
488 	return -ENOMEM;
489 }
490 
491 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
492 {
493 	mutex_lock(&smi_watchers_mutex);
494 	list_del(&(watcher->link));
495 	mutex_unlock(&smi_watchers_mutex);
496 	return 0;
497 }
498 
499 /*
500  * Must be called with smi_watchers_mutex held.
501  */
502 static void
503 call_smi_watchers(int i, struct device *dev)
504 {
505 	struct ipmi_smi_watcher *w;
506 
507 	list_for_each_entry(w, &smi_watchers, link) {
508 		if (try_module_get(w->owner)) {
509 			w->new_smi(i, dev);
510 			module_put(w->owner);
511 		}
512 	}
513 }
514 
515 static int
516 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
517 {
518 	if (addr1->addr_type != addr2->addr_type)
519 		return 0;
520 
521 	if (addr1->channel != addr2->channel)
522 		return 0;
523 
524 	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
525 		struct ipmi_system_interface_addr *smi_addr1
526 		    = (struct ipmi_system_interface_addr *) addr1;
527 		struct ipmi_system_interface_addr *smi_addr2
528 		    = (struct ipmi_system_interface_addr *) addr2;
529 		return (smi_addr1->lun == smi_addr2->lun);
530 	}
531 
532 	if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
533 	    || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
534 	{
535 		struct ipmi_ipmb_addr *ipmb_addr1
536 		    = (struct ipmi_ipmb_addr *) addr1;
537 		struct ipmi_ipmb_addr *ipmb_addr2
538 		    = (struct ipmi_ipmb_addr *) addr2;
539 
540 		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
541 			&& (ipmb_addr1->lun == ipmb_addr2->lun));
542 	}
543 
544 	if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
545 		struct ipmi_lan_addr *lan_addr1
546 			= (struct ipmi_lan_addr *) addr1;
547 		struct ipmi_lan_addr *lan_addr2
548 		    = (struct ipmi_lan_addr *) addr2;
549 
550 		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
551 			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
552 			&& (lan_addr1->session_handle
553 			    == lan_addr2->session_handle)
554 			&& (lan_addr1->lun == lan_addr2->lun));
555 	}
556 
557 	return 1;
558 }
559 
560 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
561 {
562 	if (len < sizeof(struct ipmi_system_interface_addr)) {
563 		return -EINVAL;
564 	}
565 
566 	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
567 		if (addr->channel != IPMI_BMC_CHANNEL)
568 			return -EINVAL;
569 		return 0;
570 	}
571 
572 	if ((addr->channel == IPMI_BMC_CHANNEL)
573 	    || (addr->channel >= IPMI_MAX_CHANNELS)
574 	    || (addr->channel < 0))
575 		return -EINVAL;
576 
577 	if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
578 	    || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
579 	{
580 		if (len < sizeof(struct ipmi_ipmb_addr)) {
581 			return -EINVAL;
582 		}
583 		return 0;
584 	}
585 
586 	if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
587 		if (len < sizeof(struct ipmi_lan_addr)) {
588 			return -EINVAL;
589 		}
590 		return 0;
591 	}
592 
593 	return -EINVAL;
594 }
595 
596 unsigned int ipmi_addr_length(int addr_type)
597 {
598 	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
599 		return sizeof(struct ipmi_system_interface_addr);
600 
601 	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
602 	    || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
603 	{
604 		return sizeof(struct ipmi_ipmb_addr);
605 	}
606 
607 	if (addr_type == IPMI_LAN_ADDR_TYPE)
608 		return sizeof(struct ipmi_lan_addr);
609 
610 	return 0;
611 }
612 
613 static void deliver_response(struct ipmi_recv_msg *msg)
614 {
615 	if (!msg->user) {
616 		ipmi_smi_t    intf = msg->user_msg_data;
617 		unsigned long flags;
618 
619 		/* Special handling for NULL users. */
620 		if (intf->null_user_handler) {
621 			intf->null_user_handler(intf, msg);
622 			spin_lock_irqsave(&intf->counter_lock, flags);
623 			intf->handled_local_responses++;
624 			spin_unlock_irqrestore(&intf->counter_lock, flags);
625 		} else {
626 			/* No handler, so give up. */
627 			spin_lock_irqsave(&intf->counter_lock, flags);
628 			intf->unhandled_local_responses++;
629 			spin_unlock_irqrestore(&intf->counter_lock, flags);
630 		}
631 		ipmi_free_recv_msg(msg);
632 	} else {
633 		ipmi_user_t user = msg->user;
634 		user->handler->ipmi_recv_hndl(msg, user->handler_data);
635 	}
636 }
637 
638 static void
639 deliver_err_response(struct ipmi_recv_msg *msg, int err)
640 {
641 	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
642 	msg->msg_data[0] = err;
643 	msg->msg.netfn |= 1; /* Convert to a response. */
644 	msg->msg.data_len = 1;
645 	msg->msg.data = msg->msg_data;
646 	deliver_response(msg);
647 }
648 
649 /* Find the next sequence number not being used and add the given
650    message with the given timeout to the sequence table.  This must be
651    called with the interface's seq_lock held. */
652 static int intf_next_seq(ipmi_smi_t           intf,
653 			 struct ipmi_recv_msg *recv_msg,
654 			 unsigned long        timeout,
655 			 int                  retries,
656 			 int                  broadcast,
657 			 unsigned char        *seq,
658 			 long                 *seqid)
659 {
660 	int          rv = 0;
661 	unsigned int i;
662 
663 	for (i = intf->curr_seq;
664 	     (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
665 	     i = (i+1)%IPMI_IPMB_NUM_SEQ)
666 	{
667 		if (!intf->seq_table[i].inuse)
668 			break;
669 	}
670 
671 	if (!intf->seq_table[i].inuse) {
672 		intf->seq_table[i].recv_msg = recv_msg;
673 
674 		/* Start with the maximum timeout, when the send response
675 		   comes in we will start the real timer. */
676 		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
677 		intf->seq_table[i].orig_timeout = timeout;
678 		intf->seq_table[i].retries_left = retries;
679 		intf->seq_table[i].broadcast = broadcast;
680 		intf->seq_table[i].inuse = 1;
681 		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
682 		*seq = i;
683 		*seqid = intf->seq_table[i].seqid;
684 		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
685 	} else {
686 		rv = -EAGAIN;
687 	}
688 
689 	return rv;
690 }
691 
692 /* Return the receive message for the given sequence number and
693    release the sequence number so it can be reused.  Some other data
694    is passed in to be sure the message matches up correctly (to help
695    guard against message coming in after their timeout and the
696    sequence number being reused). */
697 static int intf_find_seq(ipmi_smi_t           intf,
698 			 unsigned char        seq,
699 			 short                channel,
700 			 unsigned char        cmd,
701 			 unsigned char        netfn,
702 			 struct ipmi_addr     *addr,
703 			 struct ipmi_recv_msg **recv_msg)
704 {
705 	int           rv = -ENODEV;
706 	unsigned long flags;
707 
708 	if (seq >= IPMI_IPMB_NUM_SEQ)
709 		return -EINVAL;
710 
711 	spin_lock_irqsave(&(intf->seq_lock), flags);
712 	if (intf->seq_table[seq].inuse) {
713 		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
714 
715 		if ((msg->addr.channel == channel)
716 		    && (msg->msg.cmd == cmd)
717 		    && (msg->msg.netfn == netfn)
718 		    && (ipmi_addr_equal(addr, &(msg->addr))))
719 		{
720 			*recv_msg = msg;
721 			intf->seq_table[seq].inuse = 0;
722 			rv = 0;
723 		}
724 	}
725 	spin_unlock_irqrestore(&(intf->seq_lock), flags);
726 
727 	return rv;
728 }
729 
730 
731 /* Start the timer for a specific sequence table entry. */
732 static int intf_start_seq_timer(ipmi_smi_t intf,
733 				long       msgid)
734 {
735 	int           rv = -ENODEV;
736 	unsigned long flags;
737 	unsigned char seq;
738 	unsigned long seqid;
739 
740 
741 	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
742 
743 	spin_lock_irqsave(&(intf->seq_lock), flags);
744 	/* We do this verification because the user can be deleted
745            while a message is outstanding. */
746 	if ((intf->seq_table[seq].inuse)
747 	    && (intf->seq_table[seq].seqid == seqid))
748 	{
749 		struct seq_table *ent = &(intf->seq_table[seq]);
750 		ent->timeout = ent->orig_timeout;
751 		rv = 0;
752 	}
753 	spin_unlock_irqrestore(&(intf->seq_lock), flags);
754 
755 	return rv;
756 }
757 
758 /* Got an error for the send message for a specific sequence number. */
759 static int intf_err_seq(ipmi_smi_t   intf,
760 			long         msgid,
761 			unsigned int err)
762 {
763 	int                  rv = -ENODEV;
764 	unsigned long        flags;
765 	unsigned char        seq;
766 	unsigned long        seqid;
767 	struct ipmi_recv_msg *msg = NULL;
768 
769 
770 	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
771 
772 	spin_lock_irqsave(&(intf->seq_lock), flags);
773 	/* We do this verification because the user can be deleted
774            while a message is outstanding. */
775 	if ((intf->seq_table[seq].inuse)
776 	    && (intf->seq_table[seq].seqid == seqid))
777 	{
778 		struct seq_table *ent = &(intf->seq_table[seq]);
779 
780 		ent->inuse = 0;
781 		msg = ent->recv_msg;
782 		rv = 0;
783 	}
784 	spin_unlock_irqrestore(&(intf->seq_lock), flags);
785 
786 	if (msg)
787 		deliver_err_response(msg, err);
788 
789 	return rv;
790 }
791 
792 
793 int ipmi_create_user(unsigned int          if_num,
794 		     struct ipmi_user_hndl *handler,
795 		     void                  *handler_data,
796 		     ipmi_user_t           *user)
797 {
798 	unsigned long flags;
799 	ipmi_user_t   new_user;
800 	int           rv = 0;
801 	ipmi_smi_t    intf;
802 
803 	/* There is no module usecount here, because it's not
804            required.  Since this can only be used by and called from
805            other modules, they will implicitly use this module, and
806            thus this can't be removed unless the other modules are
807            removed. */
808 
809 	if (handler == NULL)
810 		return -EINVAL;
811 
812 	/* Make sure the driver is actually initialized, this handles
813 	   problems with initialization order. */
814 	if (!initialized) {
815 		rv = ipmi_init_msghandler();
816 		if (rv)
817 			return rv;
818 
819 		/* The init code doesn't return an error if it was turned
820 		   off, but it won't initialize.  Check that. */
821 		if (!initialized)
822 			return -ENODEV;
823 	}
824 
825 	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
826 	if (!new_user)
827 		return -ENOMEM;
828 
829 	mutex_lock(&ipmi_interfaces_mutex);
830 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
831 		if (intf->intf_num == if_num)
832 			goto found;
833 	}
834 	/* Not found, return an error */
835 	rv = -EINVAL;
836 	goto out_kfree;
837 
838  found:
839 	/* Note that each existing user holds a refcount to the interface. */
840 	kref_get(&intf->refcount);
841 
842 	kref_init(&new_user->refcount);
843 	new_user->handler = handler;
844 	new_user->handler_data = handler_data;
845 	new_user->intf = intf;
846 	new_user->gets_events = 0;
847 
848 	if (!try_module_get(intf->handlers->owner)) {
849 		rv = -ENODEV;
850 		goto out_kref;
851 	}
852 
853 	if (intf->handlers->inc_usecount) {
854 		rv = intf->handlers->inc_usecount(intf->send_info);
855 		if (rv) {
856 			module_put(intf->handlers->owner);
857 			goto out_kref;
858 		}
859 	}
860 
861 	/* Hold the lock so intf->handlers is guaranteed to be good
862 	 * until now */
863 	mutex_unlock(&ipmi_interfaces_mutex);
864 
865 	new_user->valid = 1;
866 	spin_lock_irqsave(&intf->seq_lock, flags);
867 	list_add_rcu(&new_user->link, &intf->users);
868 	spin_unlock_irqrestore(&intf->seq_lock, flags);
869 	*user = new_user;
870 	return 0;
871 
872 out_kref:
873 	kref_put(&intf->refcount, intf_free);
874 out_kfree:
875 	mutex_unlock(&ipmi_interfaces_mutex);
876 	kfree(new_user);
877 	return rv;
878 }
879 
880 static void free_user(struct kref *ref)
881 {
882 	ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
883 	kfree(user);
884 }
885 
886 int ipmi_destroy_user(ipmi_user_t user)
887 {
888 	ipmi_smi_t       intf = user->intf;
889 	int              i;
890 	unsigned long    flags;
891 	struct cmd_rcvr  *rcvr;
892 	struct cmd_rcvr  *rcvrs = NULL;
893 
894 	user->valid = 0;
895 
896 	/* Remove the user from the interface's sequence table. */
897 	spin_lock_irqsave(&intf->seq_lock, flags);
898 	list_del_rcu(&user->link);
899 
900 	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
901 		if (intf->seq_table[i].inuse
902 		    && (intf->seq_table[i].recv_msg->user == user))
903 		{
904 			intf->seq_table[i].inuse = 0;
905 			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
906 		}
907 	}
908 	spin_unlock_irqrestore(&intf->seq_lock, flags);
909 
910 	/*
911 	 * Remove the user from the command receiver's table.  First
912 	 * we build a list of everything (not using the standard link,
913 	 * since other things may be using it till we do
914 	 * synchronize_rcu()) then free everything in that list.
915 	 */
916 	mutex_lock(&intf->cmd_rcvrs_mutex);
917 	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
918 		if (rcvr->user == user) {
919 			list_del_rcu(&rcvr->link);
920 			rcvr->next = rcvrs;
921 			rcvrs = rcvr;
922 		}
923 	}
924 	mutex_unlock(&intf->cmd_rcvrs_mutex);
925 	synchronize_rcu();
926 	while (rcvrs) {
927 		rcvr = rcvrs;
928 		rcvrs = rcvr->next;
929 		kfree(rcvr);
930 	}
931 
932 	mutex_lock(&ipmi_interfaces_mutex);
933 	if (intf->handlers) {
934 		module_put(intf->handlers->owner);
935 		if (intf->handlers->dec_usecount)
936 			intf->handlers->dec_usecount(intf->send_info);
937 	}
938 	mutex_unlock(&ipmi_interfaces_mutex);
939 
940 	kref_put(&intf->refcount, intf_free);
941 
942 	kref_put(&user->refcount, free_user);
943 
944 	return 0;
945 }
946 
947 void ipmi_get_version(ipmi_user_t   user,
948 		      unsigned char *major,
949 		      unsigned char *minor)
950 {
951 	*major = user->intf->ipmi_version_major;
952 	*minor = user->intf->ipmi_version_minor;
953 }
954 
955 int ipmi_set_my_address(ipmi_user_t   user,
956 			unsigned int  channel,
957 			unsigned char address)
958 {
959 	if (channel >= IPMI_MAX_CHANNELS)
960 		return -EINVAL;
961 	user->intf->channels[channel].address = address;
962 	return 0;
963 }
964 
965 int ipmi_get_my_address(ipmi_user_t   user,
966 			unsigned int  channel,
967 			unsigned char *address)
968 {
969 	if (channel >= IPMI_MAX_CHANNELS)
970 		return -EINVAL;
971 	*address = user->intf->channels[channel].address;
972 	return 0;
973 }
974 
975 int ipmi_set_my_LUN(ipmi_user_t   user,
976 		    unsigned int  channel,
977 		    unsigned char LUN)
978 {
979 	if (channel >= IPMI_MAX_CHANNELS)
980 		return -EINVAL;
981 	user->intf->channels[channel].lun = LUN & 0x3;
982 	return 0;
983 }
984 
985 int ipmi_get_my_LUN(ipmi_user_t   user,
986 		    unsigned int  channel,
987 		    unsigned char *address)
988 {
989 	if (channel >= IPMI_MAX_CHANNELS)
990 		return -EINVAL;
991 	*address = user->intf->channels[channel].lun;
992 	return 0;
993 }
994 
995 int ipmi_get_maintenance_mode(ipmi_user_t user)
996 {
997 	int           mode;
998 	unsigned long flags;
999 
1000 	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1001 	mode = user->intf->maintenance_mode;
1002 	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1003 
1004 	return mode;
1005 }
1006 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1007 
1008 static void maintenance_mode_update(ipmi_smi_t intf)
1009 {
1010 	if (intf->handlers->set_maintenance_mode)
1011 		intf->handlers->set_maintenance_mode(
1012 			intf->send_info, intf->maintenance_mode_enable);
1013 }
1014 
1015 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1016 {
1017 	int           rv = 0;
1018 	unsigned long flags;
1019 	ipmi_smi_t    intf = user->intf;
1020 
1021 	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1022 	if (intf->maintenance_mode != mode) {
1023 		switch (mode) {
1024 		case IPMI_MAINTENANCE_MODE_AUTO:
1025 			intf->maintenance_mode = mode;
1026 			intf->maintenance_mode_enable
1027 				= (intf->auto_maintenance_timeout > 0);
1028 			break;
1029 
1030 		case IPMI_MAINTENANCE_MODE_OFF:
1031 			intf->maintenance_mode = mode;
1032 			intf->maintenance_mode_enable = 0;
1033 			break;
1034 
1035 		case IPMI_MAINTENANCE_MODE_ON:
1036 			intf->maintenance_mode = mode;
1037 			intf->maintenance_mode_enable = 1;
1038 			break;
1039 
1040 		default:
1041 			rv = -EINVAL;
1042 			goto out_unlock;
1043 		}
1044 
1045 		maintenance_mode_update(intf);
1046 	}
1047  out_unlock:
1048 	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1049 
1050 	return rv;
1051 }
1052 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1053 
1054 int ipmi_set_gets_events(ipmi_user_t user, int val)
1055 {
1056 	unsigned long        flags;
1057 	ipmi_smi_t           intf = user->intf;
1058 	struct ipmi_recv_msg *msg, *msg2;
1059 	struct list_head     msgs;
1060 
1061 	INIT_LIST_HEAD(&msgs);
1062 
1063 	spin_lock_irqsave(&intf->events_lock, flags);
1064 	user->gets_events = val;
1065 
1066 	if (intf->delivering_events)
1067 		/*
1068 		 * Another thread is delivering events for this, so
1069 		 * let it handle any new events.
1070 		 */
1071 		goto out;
1072 
1073 	/* Deliver any queued events. */
1074 	while (user->gets_events && !list_empty(&intf->waiting_events)) {
1075 		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1076 			list_move_tail(&msg->link, &msgs);
1077 		intf->waiting_events_count = 0;
1078 
1079 		intf->delivering_events = 1;
1080 		spin_unlock_irqrestore(&intf->events_lock, flags);
1081 
1082 		list_for_each_entry_safe(msg, msg2, &msgs, link) {
1083 			msg->user = user;
1084 			kref_get(&user->refcount);
1085 			deliver_response(msg);
1086 		}
1087 
1088 		spin_lock_irqsave(&intf->events_lock, flags);
1089 		intf->delivering_events = 0;
1090 	}
1091 
1092  out:
1093 	spin_unlock_irqrestore(&intf->events_lock, flags);
1094 
1095 	return 0;
1096 }
1097 
1098 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t    intf,
1099 				      unsigned char netfn,
1100 				      unsigned char cmd,
1101 				      unsigned char chan)
1102 {
1103 	struct cmd_rcvr *rcvr;
1104 
1105 	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1106 		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1107 					&& (rcvr->chans & (1 << chan)))
1108 			return rcvr;
1109 	}
1110 	return NULL;
1111 }
1112 
1113 static int is_cmd_rcvr_exclusive(ipmi_smi_t    intf,
1114 				 unsigned char netfn,
1115 				 unsigned char cmd,
1116 				 unsigned int  chans)
1117 {
1118 	struct cmd_rcvr *rcvr;
1119 
1120 	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1121 		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1122 					&& (rcvr->chans & chans))
1123 			return 0;
1124 	}
1125 	return 1;
1126 }
1127 
1128 int ipmi_register_for_cmd(ipmi_user_t   user,
1129 			  unsigned char netfn,
1130 			  unsigned char cmd,
1131 			  unsigned int  chans)
1132 {
1133 	ipmi_smi_t      intf = user->intf;
1134 	struct cmd_rcvr *rcvr;
1135 	int             rv = 0;
1136 
1137 
1138 	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1139 	if (!rcvr)
1140 		return -ENOMEM;
1141 	rcvr->cmd = cmd;
1142 	rcvr->netfn = netfn;
1143 	rcvr->chans = chans;
1144 	rcvr->user = user;
1145 
1146 	mutex_lock(&intf->cmd_rcvrs_mutex);
1147 	/* Make sure the command/netfn is not already registered. */
1148 	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1149 		rv = -EBUSY;
1150 		goto out_unlock;
1151 	}
1152 
1153 	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1154 
1155  out_unlock:
1156 	mutex_unlock(&intf->cmd_rcvrs_mutex);
1157 	if (rv)
1158 		kfree(rcvr);
1159 
1160 	return rv;
1161 }
1162 
1163 int ipmi_unregister_for_cmd(ipmi_user_t   user,
1164 			    unsigned char netfn,
1165 			    unsigned char cmd,
1166 			    unsigned int  chans)
1167 {
1168 	ipmi_smi_t      intf = user->intf;
1169 	struct cmd_rcvr *rcvr;
1170 	struct cmd_rcvr *rcvrs = NULL;
1171 	int i, rv = -ENOENT;
1172 
1173 	mutex_lock(&intf->cmd_rcvrs_mutex);
1174 	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1175 		if (((1 << i) & chans) == 0)
1176 			continue;
1177 		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1178 		if (rcvr == NULL)
1179 			continue;
1180 		if (rcvr->user == user) {
1181 			rv = 0;
1182 			rcvr->chans &= ~chans;
1183 			if (rcvr->chans == 0) {
1184 				list_del_rcu(&rcvr->link);
1185 				rcvr->next = rcvrs;
1186 				rcvrs = rcvr;
1187 			}
1188 		}
1189 	}
1190 	mutex_unlock(&intf->cmd_rcvrs_mutex);
1191 	synchronize_rcu();
1192 	while (rcvrs) {
1193 		rcvr = rcvrs;
1194 		rcvrs = rcvr->next;
1195 		kfree(rcvr);
1196 	}
1197 	return rv;
1198 }
1199 
1200 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1201 {
1202 	ipmi_smi_t intf = user->intf;
1203 	if (intf->handlers)
1204 		intf->handlers->set_run_to_completion(intf->send_info, val);
1205 }
1206 
1207 static unsigned char
1208 ipmb_checksum(unsigned char *data, int size)
1209 {
1210 	unsigned char csum = 0;
1211 
1212 	for (; size > 0; size--, data++)
1213 		csum += *data;
1214 
1215 	return -csum;
1216 }
1217 
1218 static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1219 				   struct kernel_ipmi_msg *msg,
1220 				   struct ipmi_ipmb_addr *ipmb_addr,
1221 				   long                  msgid,
1222 				   unsigned char         ipmb_seq,
1223 				   int                   broadcast,
1224 				   unsigned char         source_address,
1225 				   unsigned char         source_lun)
1226 {
1227 	int i = broadcast;
1228 
1229 	/* Format the IPMB header data. */
1230 	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1231 	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1232 	smi_msg->data[2] = ipmb_addr->channel;
1233 	if (broadcast)
1234 		smi_msg->data[3] = 0;
1235 	smi_msg->data[i+3] = ipmb_addr->slave_addr;
1236 	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1237 	smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1238 	smi_msg->data[i+6] = source_address;
1239 	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1240 	smi_msg->data[i+8] = msg->cmd;
1241 
1242 	/* Now tack on the data to the message. */
1243 	if (msg->data_len > 0)
1244 		memcpy(&(smi_msg->data[i+9]), msg->data,
1245 		       msg->data_len);
1246 	smi_msg->data_size = msg->data_len + 9;
1247 
1248 	/* Now calculate the checksum and tack it on. */
1249 	smi_msg->data[i+smi_msg->data_size]
1250 		= ipmb_checksum(&(smi_msg->data[i+6]),
1251 				smi_msg->data_size-6);
1252 
1253 	/* Add on the checksum size and the offset from the
1254 	   broadcast. */
1255 	smi_msg->data_size += 1 + i;
1256 
1257 	smi_msg->msgid = msgid;
1258 }
1259 
1260 static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1261 				  struct kernel_ipmi_msg *msg,
1262 				  struct ipmi_lan_addr  *lan_addr,
1263 				  long                  msgid,
1264 				  unsigned char         ipmb_seq,
1265 				  unsigned char         source_lun)
1266 {
1267 	/* Format the IPMB header data. */
1268 	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1269 	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1270 	smi_msg->data[2] = lan_addr->channel;
1271 	smi_msg->data[3] = lan_addr->session_handle;
1272 	smi_msg->data[4] = lan_addr->remote_SWID;
1273 	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1274 	smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1275 	smi_msg->data[7] = lan_addr->local_SWID;
1276 	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1277 	smi_msg->data[9] = msg->cmd;
1278 
1279 	/* Now tack on the data to the message. */
1280 	if (msg->data_len > 0)
1281 		memcpy(&(smi_msg->data[10]), msg->data,
1282 		       msg->data_len);
1283 	smi_msg->data_size = msg->data_len + 10;
1284 
1285 	/* Now calculate the checksum and tack it on. */
1286 	smi_msg->data[smi_msg->data_size]
1287 		= ipmb_checksum(&(smi_msg->data[7]),
1288 				smi_msg->data_size-7);
1289 
1290 	/* Add on the checksum size and the offset from the
1291 	   broadcast. */
1292 	smi_msg->data_size += 1;
1293 
1294 	smi_msg->msgid = msgid;
1295 }
1296 
1297 /* Separate from ipmi_request so that the user does not have to be
1298    supplied in certain circumstances (mainly at panic time).  If
1299    messages are supplied, they will be freed, even if an error
1300    occurs. */
1301 static int i_ipmi_request(ipmi_user_t          user,
1302 			  ipmi_smi_t           intf,
1303 			  struct ipmi_addr     *addr,
1304 			  long                 msgid,
1305 			  struct kernel_ipmi_msg *msg,
1306 			  void                 *user_msg_data,
1307 			  void                 *supplied_smi,
1308 			  struct ipmi_recv_msg *supplied_recv,
1309 			  int                  priority,
1310 			  unsigned char        source_address,
1311 			  unsigned char        source_lun,
1312 			  int                  retries,
1313 			  unsigned int         retry_time_ms)
1314 {
1315 	int                      rv = 0;
1316 	struct ipmi_smi_msg      *smi_msg;
1317 	struct ipmi_recv_msg     *recv_msg;
1318 	unsigned long            flags;
1319 	struct ipmi_smi_handlers *handlers;
1320 
1321 
1322 	if (supplied_recv) {
1323 		recv_msg = supplied_recv;
1324 	} else {
1325 		recv_msg = ipmi_alloc_recv_msg();
1326 		if (recv_msg == NULL) {
1327 			return -ENOMEM;
1328 		}
1329 	}
1330 	recv_msg->user_msg_data = user_msg_data;
1331 
1332 	if (supplied_smi) {
1333 		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1334 	} else {
1335 		smi_msg = ipmi_alloc_smi_msg();
1336 		if (smi_msg == NULL) {
1337 			ipmi_free_recv_msg(recv_msg);
1338 			return -ENOMEM;
1339 		}
1340 	}
1341 
1342 	rcu_read_lock();
1343 	handlers = intf->handlers;
1344 	if (!handlers) {
1345 		rv = -ENODEV;
1346 		goto out_err;
1347 	}
1348 
1349 	recv_msg->user = user;
1350 	if (user)
1351 		kref_get(&user->refcount);
1352 	recv_msg->msgid = msgid;
1353 	/* Store the message to send in the receive message so timeout
1354 	   responses can get the proper response data. */
1355 	recv_msg->msg = *msg;
1356 
1357 	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1358 		struct ipmi_system_interface_addr *smi_addr;
1359 
1360 		if (msg->netfn & 1) {
1361 			/* Responses are not allowed to the SMI. */
1362 			rv = -EINVAL;
1363 			goto out_err;
1364 		}
1365 
1366 		smi_addr = (struct ipmi_system_interface_addr *) addr;
1367 		if (smi_addr->lun > 3) {
1368 			spin_lock_irqsave(&intf->counter_lock, flags);
1369 			intf->sent_invalid_commands++;
1370 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1371 			rv = -EINVAL;
1372 			goto out_err;
1373 		}
1374 
1375 		memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1376 
1377 		if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1378 		    && ((msg->cmd == IPMI_SEND_MSG_CMD)
1379 			|| (msg->cmd == IPMI_GET_MSG_CMD)
1380 			|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1381 		{
1382 			/* We don't let the user do these, since we manage
1383 			   the sequence numbers. */
1384 			spin_lock_irqsave(&intf->counter_lock, flags);
1385 			intf->sent_invalid_commands++;
1386 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1387 			rv = -EINVAL;
1388 			goto out_err;
1389 		}
1390 
1391 		if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1392 		      && ((msg->cmd == IPMI_COLD_RESET_CMD)
1393 			  || (msg->cmd == IPMI_WARM_RESET_CMD)))
1394 		     || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
1395 		{
1396 			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1397 			intf->auto_maintenance_timeout
1398 				= IPMI_MAINTENANCE_MODE_TIMEOUT;
1399 			if (!intf->maintenance_mode
1400 			    && !intf->maintenance_mode_enable)
1401 			{
1402 				intf->maintenance_mode_enable = 1;
1403 				maintenance_mode_update(intf);
1404 			}
1405 			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1406 					       flags);
1407 		}
1408 
1409 		if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1410 			spin_lock_irqsave(&intf->counter_lock, flags);
1411 			intf->sent_invalid_commands++;
1412 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1413 			rv = -EMSGSIZE;
1414 			goto out_err;
1415 		}
1416 
1417 		smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1418 		smi_msg->data[1] = msg->cmd;
1419 		smi_msg->msgid = msgid;
1420 		smi_msg->user_data = recv_msg;
1421 		if (msg->data_len > 0)
1422 			memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1423 		smi_msg->data_size = msg->data_len + 2;
1424 		spin_lock_irqsave(&intf->counter_lock, flags);
1425 		intf->sent_local_commands++;
1426 		spin_unlock_irqrestore(&intf->counter_lock, flags);
1427 	} else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1428 		   || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1429 	{
1430 		struct ipmi_ipmb_addr *ipmb_addr;
1431 		unsigned char         ipmb_seq;
1432 		long                  seqid;
1433 		int                   broadcast = 0;
1434 
1435 		if (addr->channel >= IPMI_MAX_CHANNELS) {
1436 		        spin_lock_irqsave(&intf->counter_lock, flags);
1437 			intf->sent_invalid_commands++;
1438 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1439 			rv = -EINVAL;
1440 			goto out_err;
1441 		}
1442 
1443 		if (intf->channels[addr->channel].medium
1444 		    != IPMI_CHANNEL_MEDIUM_IPMB)
1445 		{
1446 			spin_lock_irqsave(&intf->counter_lock, flags);
1447 			intf->sent_invalid_commands++;
1448 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1449 			rv = -EINVAL;
1450 			goto out_err;
1451 		}
1452 
1453 		if (retries < 0) {
1454 		    if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1455 			retries = 0; /* Don't retry broadcasts. */
1456 		    else
1457 			retries = 4;
1458 		}
1459 		if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1460 		    /* Broadcasts add a zero at the beginning of the
1461 		       message, but otherwise is the same as an IPMB
1462 		       address. */
1463 		    addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1464 		    broadcast = 1;
1465 		}
1466 
1467 
1468 		/* Default to 1 second retries. */
1469 		if (retry_time_ms == 0)
1470 		    retry_time_ms = 1000;
1471 
1472 		/* 9 for the header and 1 for the checksum, plus
1473                    possibly one for the broadcast. */
1474 		if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1475 			spin_lock_irqsave(&intf->counter_lock, flags);
1476 			intf->sent_invalid_commands++;
1477 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1478 			rv = -EMSGSIZE;
1479 			goto out_err;
1480 		}
1481 
1482 		ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1483 		if (ipmb_addr->lun > 3) {
1484 			spin_lock_irqsave(&intf->counter_lock, flags);
1485 			intf->sent_invalid_commands++;
1486 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1487 			rv = -EINVAL;
1488 			goto out_err;
1489 		}
1490 
1491 		memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1492 
1493 		if (recv_msg->msg.netfn & 0x1) {
1494 			/* It's a response, so use the user's sequence
1495                            from msgid. */
1496 			spin_lock_irqsave(&intf->counter_lock, flags);
1497 			intf->sent_ipmb_responses++;
1498 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1499 			format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1500 					msgid, broadcast,
1501 					source_address, source_lun);
1502 
1503 			/* Save the receive message so we can use it
1504 			   to deliver the response. */
1505 			smi_msg->user_data = recv_msg;
1506 		} else {
1507 			/* It's a command, so get a sequence for it. */
1508 
1509 			spin_lock_irqsave(&(intf->seq_lock), flags);
1510 
1511 			spin_lock(&intf->counter_lock);
1512 			intf->sent_ipmb_commands++;
1513 			spin_unlock(&intf->counter_lock);
1514 
1515 			/* Create a sequence number with a 1 second
1516                            timeout and 4 retries. */
1517 			rv = intf_next_seq(intf,
1518 					   recv_msg,
1519 					   retry_time_ms,
1520 					   retries,
1521 					   broadcast,
1522 					   &ipmb_seq,
1523 					   &seqid);
1524 			if (rv) {
1525 				/* We have used up all the sequence numbers,
1526 				   probably, so abort. */
1527 				spin_unlock_irqrestore(&(intf->seq_lock),
1528 						       flags);
1529 				goto out_err;
1530 			}
1531 
1532 			/* Store the sequence number in the message,
1533                            so that when the send message response
1534                            comes back we can start the timer. */
1535 			format_ipmb_msg(smi_msg, msg, ipmb_addr,
1536 					STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1537 					ipmb_seq, broadcast,
1538 					source_address, source_lun);
1539 
1540 			/* Copy the message into the recv message data, so we
1541 			   can retransmit it later if necessary. */
1542 			memcpy(recv_msg->msg_data, smi_msg->data,
1543 			       smi_msg->data_size);
1544 			recv_msg->msg.data = recv_msg->msg_data;
1545 			recv_msg->msg.data_len = smi_msg->data_size;
1546 
1547 			/* We don't unlock until here, because we need
1548                            to copy the completed message into the
1549                            recv_msg before we release the lock.
1550                            Otherwise, race conditions may bite us.  I
1551                            know that's pretty paranoid, but I prefer
1552                            to be correct. */
1553 			spin_unlock_irqrestore(&(intf->seq_lock), flags);
1554 		}
1555 	} else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1556 		struct ipmi_lan_addr  *lan_addr;
1557 		unsigned char         ipmb_seq;
1558 		long                  seqid;
1559 
1560 		if (addr->channel >= IPMI_MAX_CHANNELS) {
1561 			spin_lock_irqsave(&intf->counter_lock, flags);
1562 			intf->sent_invalid_commands++;
1563 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1564 			rv = -EINVAL;
1565 			goto out_err;
1566 		}
1567 
1568 		if ((intf->channels[addr->channel].medium
1569 		    != IPMI_CHANNEL_MEDIUM_8023LAN)
1570 		    && (intf->channels[addr->channel].medium
1571 			!= IPMI_CHANNEL_MEDIUM_ASYNC))
1572 		{
1573 			spin_lock_irqsave(&intf->counter_lock, flags);
1574 			intf->sent_invalid_commands++;
1575 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1576 			rv = -EINVAL;
1577 			goto out_err;
1578 		}
1579 
1580 		retries = 4;
1581 
1582 		/* Default to 1 second retries. */
1583 		if (retry_time_ms == 0)
1584 		    retry_time_ms = 1000;
1585 
1586 		/* 11 for the header and 1 for the checksum. */
1587 		if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1588 			spin_lock_irqsave(&intf->counter_lock, flags);
1589 			intf->sent_invalid_commands++;
1590 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1591 			rv = -EMSGSIZE;
1592 			goto out_err;
1593 		}
1594 
1595 		lan_addr = (struct ipmi_lan_addr *) addr;
1596 		if (lan_addr->lun > 3) {
1597 			spin_lock_irqsave(&intf->counter_lock, flags);
1598 			intf->sent_invalid_commands++;
1599 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1600 			rv = -EINVAL;
1601 			goto out_err;
1602 		}
1603 
1604 		memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1605 
1606 		if (recv_msg->msg.netfn & 0x1) {
1607 			/* It's a response, so use the user's sequence
1608                            from msgid. */
1609 			spin_lock_irqsave(&intf->counter_lock, flags);
1610 			intf->sent_lan_responses++;
1611 			spin_unlock_irqrestore(&intf->counter_lock, flags);
1612 			format_lan_msg(smi_msg, msg, lan_addr, msgid,
1613 				       msgid, source_lun);
1614 
1615 			/* Save the receive message so we can use it
1616 			   to deliver the response. */
1617 			smi_msg->user_data = recv_msg;
1618 		} else {
1619 			/* It's a command, so get a sequence for it. */
1620 
1621 			spin_lock_irqsave(&(intf->seq_lock), flags);
1622 
1623 			spin_lock(&intf->counter_lock);
1624 			intf->sent_lan_commands++;
1625 			spin_unlock(&intf->counter_lock);
1626 
1627 			/* Create a sequence number with a 1 second
1628                            timeout and 4 retries. */
1629 			rv = intf_next_seq(intf,
1630 					   recv_msg,
1631 					   retry_time_ms,
1632 					   retries,
1633 					   0,
1634 					   &ipmb_seq,
1635 					   &seqid);
1636 			if (rv) {
1637 				/* We have used up all the sequence numbers,
1638 				   probably, so abort. */
1639 				spin_unlock_irqrestore(&(intf->seq_lock),
1640 						       flags);
1641 				goto out_err;
1642 			}
1643 
1644 			/* Store the sequence number in the message,
1645                            so that when the send message response
1646                            comes back we can start the timer. */
1647 			format_lan_msg(smi_msg, msg, lan_addr,
1648 				       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1649 				       ipmb_seq, source_lun);
1650 
1651 			/* Copy the message into the recv message data, so we
1652 			   can retransmit it later if necessary. */
1653 			memcpy(recv_msg->msg_data, smi_msg->data,
1654 			       smi_msg->data_size);
1655 			recv_msg->msg.data = recv_msg->msg_data;
1656 			recv_msg->msg.data_len = smi_msg->data_size;
1657 
1658 			/* We don't unlock until here, because we need
1659                            to copy the completed message into the
1660                            recv_msg before we release the lock.
1661                            Otherwise, race conditions may bite us.  I
1662                            know that's pretty paranoid, but I prefer
1663                            to be correct. */
1664 			spin_unlock_irqrestore(&(intf->seq_lock), flags);
1665 		}
1666 	} else {
1667 	    /* Unknown address type. */
1668 		spin_lock_irqsave(&intf->counter_lock, flags);
1669 		intf->sent_invalid_commands++;
1670 		spin_unlock_irqrestore(&intf->counter_lock, flags);
1671 		rv = -EINVAL;
1672 		goto out_err;
1673 	}
1674 
1675 #ifdef DEBUG_MSGING
1676 	{
1677 		int m;
1678 		for (m = 0; m < smi_msg->data_size; m++)
1679 			printk(" %2.2x", smi_msg->data[m]);
1680 		printk("\n");
1681 	}
1682 #endif
1683 
1684 	handlers->sender(intf->send_info, smi_msg, priority);
1685 	rcu_read_unlock();
1686 
1687 	return 0;
1688 
1689  out_err:
1690 	rcu_read_unlock();
1691 	ipmi_free_smi_msg(smi_msg);
1692 	ipmi_free_recv_msg(recv_msg);
1693 	return rv;
1694 }
1695 
1696 static int check_addr(ipmi_smi_t       intf,
1697 		      struct ipmi_addr *addr,
1698 		      unsigned char    *saddr,
1699 		      unsigned char    *lun)
1700 {
1701 	if (addr->channel >= IPMI_MAX_CHANNELS)
1702 		return -EINVAL;
1703 	*lun = intf->channels[addr->channel].lun;
1704 	*saddr = intf->channels[addr->channel].address;
1705 	return 0;
1706 }
1707 
1708 int ipmi_request_settime(ipmi_user_t      user,
1709 			 struct ipmi_addr *addr,
1710 			 long             msgid,
1711 			 struct kernel_ipmi_msg  *msg,
1712 			 void             *user_msg_data,
1713 			 int              priority,
1714 			 int              retries,
1715 			 unsigned int     retry_time_ms)
1716 {
1717 	unsigned char saddr, lun;
1718 	int           rv;
1719 
1720 	if (!user)
1721 		return -EINVAL;
1722 	rv = check_addr(user->intf, addr, &saddr, &lun);
1723 	if (rv)
1724 		return rv;
1725 	return i_ipmi_request(user,
1726 			      user->intf,
1727 			      addr,
1728 			      msgid,
1729 			      msg,
1730 			      user_msg_data,
1731 			      NULL, NULL,
1732 			      priority,
1733 			      saddr,
1734 			      lun,
1735 			      retries,
1736 			      retry_time_ms);
1737 }
1738 
1739 int ipmi_request_supply_msgs(ipmi_user_t          user,
1740 			     struct ipmi_addr     *addr,
1741 			     long                 msgid,
1742 			     struct kernel_ipmi_msg *msg,
1743 			     void                 *user_msg_data,
1744 			     void                 *supplied_smi,
1745 			     struct ipmi_recv_msg *supplied_recv,
1746 			     int                  priority)
1747 {
1748 	unsigned char saddr, lun;
1749 	int           rv;
1750 
1751 	if (!user)
1752 		return -EINVAL;
1753 	rv = check_addr(user->intf, addr, &saddr, &lun);
1754 	if (rv)
1755 		return rv;
1756 	return i_ipmi_request(user,
1757 			      user->intf,
1758 			      addr,
1759 			      msgid,
1760 			      msg,
1761 			      user_msg_data,
1762 			      supplied_smi,
1763 			      supplied_recv,
1764 			      priority,
1765 			      saddr,
1766 			      lun,
1767 			      -1, 0);
1768 }
1769 
1770 #ifdef CONFIG_PROC_FS
1771 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1772 			       int count, int *eof, void *data)
1773 {
1774 	char       *out = (char *) page;
1775 	ipmi_smi_t intf = data;
1776 	int        i;
1777 	int        rv = 0;
1778 
1779 	for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1780 		rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1781 	out[rv-1] = '\n'; /* Replace the final space with a newline */
1782 	out[rv] = '\0';
1783 	rv++;
1784 	return rv;
1785 }
1786 
1787 static int version_file_read_proc(char *page, char **start, off_t off,
1788 				  int count, int *eof, void *data)
1789 {
1790 	char       *out = (char *) page;
1791 	ipmi_smi_t intf = data;
1792 
1793 	return sprintf(out, "%d.%d\n",
1794 		       ipmi_version_major(&intf->bmc->id),
1795 		       ipmi_version_minor(&intf->bmc->id));
1796 }
1797 
1798 static int stat_file_read_proc(char *page, char **start, off_t off,
1799 			       int count, int *eof, void *data)
1800 {
1801 	char       *out = (char *) page;
1802 	ipmi_smi_t intf = data;
1803 
1804 	out += sprintf(out, "sent_invalid_commands:       %d\n",
1805 		       intf->sent_invalid_commands);
1806 	out += sprintf(out, "sent_local_commands:         %d\n",
1807 		       intf->sent_local_commands);
1808 	out += sprintf(out, "handled_local_responses:     %d\n",
1809 		       intf->handled_local_responses);
1810 	out += sprintf(out, "unhandled_local_responses:   %d\n",
1811 		       intf->unhandled_local_responses);
1812 	out += sprintf(out, "sent_ipmb_commands:          %d\n",
1813 		       intf->sent_ipmb_commands);
1814 	out += sprintf(out, "sent_ipmb_command_errs:      %d\n",
1815 		       intf->sent_ipmb_command_errs);
1816 	out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1817 		       intf->retransmitted_ipmb_commands);
1818 	out += sprintf(out, "timed_out_ipmb_commands:     %d\n",
1819 		       intf->timed_out_ipmb_commands);
1820 	out += sprintf(out, "timed_out_ipmb_broadcasts:   %d\n",
1821 		       intf->timed_out_ipmb_broadcasts);
1822 	out += sprintf(out, "sent_ipmb_responses:         %d\n",
1823 		       intf->sent_ipmb_responses);
1824 	out += sprintf(out, "handled_ipmb_responses:      %d\n",
1825 		       intf->handled_ipmb_responses);
1826 	out += sprintf(out, "invalid_ipmb_responses:      %d\n",
1827 		       intf->invalid_ipmb_responses);
1828 	out += sprintf(out, "unhandled_ipmb_responses:    %d\n",
1829 		       intf->unhandled_ipmb_responses);
1830 	out += sprintf(out, "sent_lan_commands:           %d\n",
1831 		       intf->sent_lan_commands);
1832 	out += sprintf(out, "sent_lan_command_errs:       %d\n",
1833 		       intf->sent_lan_command_errs);
1834 	out += sprintf(out, "retransmitted_lan_commands:  %d\n",
1835 		       intf->retransmitted_lan_commands);
1836 	out += sprintf(out, "timed_out_lan_commands:      %d\n",
1837 		       intf->timed_out_lan_commands);
1838 	out += sprintf(out, "sent_lan_responses:          %d\n",
1839 		       intf->sent_lan_responses);
1840 	out += sprintf(out, "handled_lan_responses:       %d\n",
1841 		       intf->handled_lan_responses);
1842 	out += sprintf(out, "invalid_lan_responses:       %d\n",
1843 		       intf->invalid_lan_responses);
1844 	out += sprintf(out, "unhandled_lan_responses:     %d\n",
1845 		       intf->unhandled_lan_responses);
1846 	out += sprintf(out, "handled_commands:            %d\n",
1847 		       intf->handled_commands);
1848 	out += sprintf(out, "invalid_commands:            %d\n",
1849 		       intf->invalid_commands);
1850 	out += sprintf(out, "unhandled_commands:          %d\n",
1851 		       intf->unhandled_commands);
1852 	out += sprintf(out, "invalid_events:              %d\n",
1853 		       intf->invalid_events);
1854 	out += sprintf(out, "events:                      %d\n",
1855 		       intf->events);
1856 
1857 	return (out - ((char *) page));
1858 }
1859 #endif /* CONFIG_PROC_FS */
1860 
1861 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1862 			    read_proc_t *read_proc, write_proc_t *write_proc,
1863 			    void *data, struct module *owner)
1864 {
1865 	int                    rv = 0;
1866 #ifdef CONFIG_PROC_FS
1867 	struct proc_dir_entry  *file;
1868 	struct ipmi_proc_entry *entry;
1869 
1870 	/* Create a list element. */
1871 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1872 	if (!entry)
1873 		return -ENOMEM;
1874 	entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1875 	if (!entry->name) {
1876 		kfree(entry);
1877 		return -ENOMEM;
1878 	}
1879 	strcpy(entry->name, name);
1880 
1881 	file = create_proc_entry(name, 0, smi->proc_dir);
1882 	if (!file) {
1883 		kfree(entry->name);
1884 		kfree(entry);
1885 		rv = -ENOMEM;
1886 	} else {
1887 		file->data = data;
1888 		file->read_proc = read_proc;
1889 		file->write_proc = write_proc;
1890 		file->owner = owner;
1891 
1892 		mutex_lock(&smi->proc_entry_lock);
1893 		/* Stick it on the list. */
1894 		entry->next = smi->proc_entries;
1895 		smi->proc_entries = entry;
1896 		mutex_unlock(&smi->proc_entry_lock);
1897 	}
1898 #endif /* CONFIG_PROC_FS */
1899 
1900 	return rv;
1901 }
1902 
1903 static int add_proc_entries(ipmi_smi_t smi, int num)
1904 {
1905 	int rv = 0;
1906 
1907 #ifdef CONFIG_PROC_FS
1908 	sprintf(smi->proc_dir_name, "%d", num);
1909 	smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1910 	if (!smi->proc_dir)
1911 		rv = -ENOMEM;
1912 	else {
1913 		smi->proc_dir->owner = THIS_MODULE;
1914 	}
1915 
1916 	if (rv == 0)
1917 		rv = ipmi_smi_add_proc_entry(smi, "stats",
1918 					     stat_file_read_proc, NULL,
1919 					     smi, THIS_MODULE);
1920 
1921 	if (rv == 0)
1922 		rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1923 					     ipmb_file_read_proc, NULL,
1924 					     smi, THIS_MODULE);
1925 
1926 	if (rv == 0)
1927 		rv = ipmi_smi_add_proc_entry(smi, "version",
1928 					     version_file_read_proc, NULL,
1929 					     smi, THIS_MODULE);
1930 #endif /* CONFIG_PROC_FS */
1931 
1932 	return rv;
1933 }
1934 
1935 static void remove_proc_entries(ipmi_smi_t smi)
1936 {
1937 #ifdef CONFIG_PROC_FS
1938 	struct ipmi_proc_entry *entry;
1939 
1940 	mutex_lock(&smi->proc_entry_lock);
1941 	while (smi->proc_entries) {
1942 		entry = smi->proc_entries;
1943 		smi->proc_entries = entry->next;
1944 
1945 		remove_proc_entry(entry->name, smi->proc_dir);
1946 		kfree(entry->name);
1947 		kfree(entry);
1948 	}
1949 	mutex_unlock(&smi->proc_entry_lock);
1950 	remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1951 #endif /* CONFIG_PROC_FS */
1952 }
1953 
1954 static int __find_bmc_guid(struct device *dev, void *data)
1955 {
1956 	unsigned char *id = data;
1957 	struct bmc_device *bmc = dev_get_drvdata(dev);
1958 	return memcmp(bmc->guid, id, 16) == 0;
1959 }
1960 
1961 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1962 					     unsigned char *guid)
1963 {
1964 	struct device *dev;
1965 
1966 	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1967 	if (dev)
1968 		return dev_get_drvdata(dev);
1969 	else
1970 		return NULL;
1971 }
1972 
1973 struct prod_dev_id {
1974 	unsigned int  product_id;
1975 	unsigned char device_id;
1976 };
1977 
1978 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1979 {
1980 	struct prod_dev_id *id = data;
1981 	struct bmc_device *bmc = dev_get_drvdata(dev);
1982 
1983 	return (bmc->id.product_id == id->product_id
1984 		&& bmc->id.device_id == id->device_id);
1985 }
1986 
1987 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1988 	struct device_driver *drv,
1989 	unsigned int product_id, unsigned char device_id)
1990 {
1991 	struct prod_dev_id id = {
1992 		.product_id = product_id,
1993 		.device_id = device_id,
1994 	};
1995 	struct device *dev;
1996 
1997 	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1998 	if (dev)
1999 		return dev_get_drvdata(dev);
2000 	else
2001 		return NULL;
2002 }
2003 
2004 static ssize_t device_id_show(struct device *dev,
2005 			      struct device_attribute *attr,
2006 			      char *buf)
2007 {
2008 	struct bmc_device *bmc = dev_get_drvdata(dev);
2009 
2010 	return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2011 }
2012 
2013 static ssize_t provides_dev_sdrs_show(struct device *dev,
2014 				      struct device_attribute *attr,
2015 				      char *buf)
2016 {
2017 	struct bmc_device *bmc = dev_get_drvdata(dev);
2018 
2019 	return snprintf(buf, 10, "%u\n",
2020 			(bmc->id.device_revision & 0x80) >> 7);
2021 }
2022 
2023 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2024 			     char *buf)
2025 {
2026 	struct bmc_device *bmc = dev_get_drvdata(dev);
2027 
2028 	return snprintf(buf, 20, "%u\n",
2029 			bmc->id.device_revision & 0x0F);
2030 }
2031 
2032 static ssize_t firmware_rev_show(struct device *dev,
2033 				 struct device_attribute *attr,
2034 				 char *buf)
2035 {
2036 	struct bmc_device *bmc = dev_get_drvdata(dev);
2037 
2038 	return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2039 			bmc->id.firmware_revision_2);
2040 }
2041 
2042 static ssize_t ipmi_version_show(struct device *dev,
2043 				 struct device_attribute *attr,
2044 				 char *buf)
2045 {
2046 	struct bmc_device *bmc = dev_get_drvdata(dev);
2047 
2048 	return snprintf(buf, 20, "%u.%u\n",
2049 			ipmi_version_major(&bmc->id),
2050 			ipmi_version_minor(&bmc->id));
2051 }
2052 
2053 static ssize_t add_dev_support_show(struct device *dev,
2054 				    struct device_attribute *attr,
2055 				    char *buf)
2056 {
2057 	struct bmc_device *bmc = dev_get_drvdata(dev);
2058 
2059 	return snprintf(buf, 10, "0x%02x\n",
2060 			bmc->id.additional_device_support);
2061 }
2062 
2063 static ssize_t manufacturer_id_show(struct device *dev,
2064 				    struct device_attribute *attr,
2065 				    char *buf)
2066 {
2067 	struct bmc_device *bmc = dev_get_drvdata(dev);
2068 
2069 	return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2070 }
2071 
2072 static ssize_t product_id_show(struct device *dev,
2073 			       struct device_attribute *attr,
2074 			       char *buf)
2075 {
2076 	struct bmc_device *bmc = dev_get_drvdata(dev);
2077 
2078 	return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2079 }
2080 
2081 static ssize_t aux_firmware_rev_show(struct device *dev,
2082 				     struct device_attribute *attr,
2083 				     char *buf)
2084 {
2085 	struct bmc_device *bmc = dev_get_drvdata(dev);
2086 
2087 	return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2088 			bmc->id.aux_firmware_revision[3],
2089 			bmc->id.aux_firmware_revision[2],
2090 			bmc->id.aux_firmware_revision[1],
2091 			bmc->id.aux_firmware_revision[0]);
2092 }
2093 
2094 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2095 			 char *buf)
2096 {
2097 	struct bmc_device *bmc = dev_get_drvdata(dev);
2098 
2099 	return snprintf(buf, 100, "%Lx%Lx\n",
2100 			(long long) bmc->guid[0],
2101 			(long long) bmc->guid[8]);
2102 }
2103 
2104 static void remove_files(struct bmc_device *bmc)
2105 {
2106 	if (!bmc->dev)
2107 		return;
2108 
2109 	device_remove_file(&bmc->dev->dev,
2110 			   &bmc->device_id_attr);
2111 	device_remove_file(&bmc->dev->dev,
2112 			   &bmc->provides_dev_sdrs_attr);
2113 	device_remove_file(&bmc->dev->dev,
2114 			   &bmc->revision_attr);
2115 	device_remove_file(&bmc->dev->dev,
2116 			   &bmc->firmware_rev_attr);
2117 	device_remove_file(&bmc->dev->dev,
2118 			   &bmc->version_attr);
2119 	device_remove_file(&bmc->dev->dev,
2120 			   &bmc->add_dev_support_attr);
2121 	device_remove_file(&bmc->dev->dev,
2122 			   &bmc->manufacturer_id_attr);
2123 	device_remove_file(&bmc->dev->dev,
2124 			   &bmc->product_id_attr);
2125 
2126 	if (bmc->id.aux_firmware_revision_set)
2127 		device_remove_file(&bmc->dev->dev,
2128 				   &bmc->aux_firmware_rev_attr);
2129 	if (bmc->guid_set)
2130 		device_remove_file(&bmc->dev->dev,
2131 				   &bmc->guid_attr);
2132 }
2133 
2134 static void
2135 cleanup_bmc_device(struct kref *ref)
2136 {
2137 	struct bmc_device *bmc;
2138 
2139 	bmc = container_of(ref, struct bmc_device, refcount);
2140 
2141 	remove_files(bmc);
2142 	platform_device_unregister(bmc->dev);
2143 	kfree(bmc);
2144 }
2145 
2146 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2147 {
2148 	struct bmc_device *bmc = intf->bmc;
2149 
2150 	if (intf->sysfs_name) {
2151 		sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2152 		kfree(intf->sysfs_name);
2153 		intf->sysfs_name = NULL;
2154 	}
2155 	if (intf->my_dev_name) {
2156 		sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2157 		kfree(intf->my_dev_name);
2158 		intf->my_dev_name = NULL;
2159 	}
2160 
2161 	mutex_lock(&ipmidriver_mutex);
2162 	kref_put(&bmc->refcount, cleanup_bmc_device);
2163 	intf->bmc = NULL;
2164 	mutex_unlock(&ipmidriver_mutex);
2165 }
2166 
2167 static int create_files(struct bmc_device *bmc)
2168 {
2169 	int err;
2170 
2171 	bmc->device_id_attr.attr.name = "device_id";
2172 	bmc->device_id_attr.attr.mode = S_IRUGO;
2173 	bmc->device_id_attr.show = device_id_show;
2174 
2175 	bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2176 	bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2177 	bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2178 
2179 	bmc->revision_attr.attr.name = "revision";
2180 	bmc->revision_attr.attr.mode = S_IRUGO;
2181 	bmc->revision_attr.show = revision_show;
2182 
2183 	bmc->firmware_rev_attr.attr.name = "firmware_revision";
2184 	bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2185 	bmc->firmware_rev_attr.show = firmware_rev_show;
2186 
2187 	bmc->version_attr.attr.name = "ipmi_version";
2188 	bmc->version_attr.attr.mode = S_IRUGO;
2189 	bmc->version_attr.show = ipmi_version_show;
2190 
2191 	bmc->add_dev_support_attr.attr.name = "additional_device_support";
2192 	bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2193 	bmc->add_dev_support_attr.show = add_dev_support_show;
2194 
2195 	bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2196 	bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2197 	bmc->manufacturer_id_attr.show = manufacturer_id_show;
2198 
2199 	bmc->product_id_attr.attr.name = "product_id";
2200 	bmc->product_id_attr.attr.mode = S_IRUGO;
2201 	bmc->product_id_attr.show = product_id_show;
2202 
2203 	bmc->guid_attr.attr.name = "guid";
2204 	bmc->guid_attr.attr.mode = S_IRUGO;
2205 	bmc->guid_attr.show = guid_show;
2206 
2207 	bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2208 	bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2209 	bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2210 
2211 	err = device_create_file(&bmc->dev->dev,
2212 			   &bmc->device_id_attr);
2213 	if (err) goto out;
2214 	err = device_create_file(&bmc->dev->dev,
2215 			   &bmc->provides_dev_sdrs_attr);
2216 	if (err) goto out_devid;
2217 	err = device_create_file(&bmc->dev->dev,
2218 			   &bmc->revision_attr);
2219 	if (err) goto out_sdrs;
2220 	err = device_create_file(&bmc->dev->dev,
2221 			   &bmc->firmware_rev_attr);
2222 	if (err) goto out_rev;
2223 	err = device_create_file(&bmc->dev->dev,
2224 			   &bmc->version_attr);
2225 	if (err) goto out_firm;
2226 	err = device_create_file(&bmc->dev->dev,
2227 			   &bmc->add_dev_support_attr);
2228 	if (err) goto out_version;
2229 	err = device_create_file(&bmc->dev->dev,
2230 			   &bmc->manufacturer_id_attr);
2231 	if (err) goto out_add_dev;
2232 	err = device_create_file(&bmc->dev->dev,
2233 			   &bmc->product_id_attr);
2234 	if (err) goto out_manu;
2235 	if (bmc->id.aux_firmware_revision_set) {
2236 		err = device_create_file(&bmc->dev->dev,
2237 				   &bmc->aux_firmware_rev_attr);
2238 		if (err) goto out_prod_id;
2239 	}
2240 	if (bmc->guid_set) {
2241 		err = device_create_file(&bmc->dev->dev,
2242 				   &bmc->guid_attr);
2243 		if (err) goto out_aux_firm;
2244 	}
2245 
2246 	return 0;
2247 
2248 out_aux_firm:
2249 	if (bmc->id.aux_firmware_revision_set)
2250 		device_remove_file(&bmc->dev->dev,
2251 				   &bmc->aux_firmware_rev_attr);
2252 out_prod_id:
2253 	device_remove_file(&bmc->dev->dev,
2254 			   &bmc->product_id_attr);
2255 out_manu:
2256 	device_remove_file(&bmc->dev->dev,
2257 			   &bmc->manufacturer_id_attr);
2258 out_add_dev:
2259 	device_remove_file(&bmc->dev->dev,
2260 			   &bmc->add_dev_support_attr);
2261 out_version:
2262 	device_remove_file(&bmc->dev->dev,
2263 			   &bmc->version_attr);
2264 out_firm:
2265 	device_remove_file(&bmc->dev->dev,
2266 			   &bmc->firmware_rev_attr);
2267 out_rev:
2268 	device_remove_file(&bmc->dev->dev,
2269 			   &bmc->revision_attr);
2270 out_sdrs:
2271 	device_remove_file(&bmc->dev->dev,
2272 			   &bmc->provides_dev_sdrs_attr);
2273 out_devid:
2274 	device_remove_file(&bmc->dev->dev,
2275 			   &bmc->device_id_attr);
2276 out:
2277 	return err;
2278 }
2279 
2280 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2281 			     const char *sysfs_name)
2282 {
2283 	int               rv;
2284 	struct bmc_device *bmc = intf->bmc;
2285 	struct bmc_device *old_bmc;
2286 	int               size;
2287 	char              dummy[1];
2288 
2289 	mutex_lock(&ipmidriver_mutex);
2290 
2291 	/*
2292 	 * Try to find if there is an bmc_device struct
2293 	 * representing the interfaced BMC already
2294 	 */
2295 	if (bmc->guid_set)
2296 		old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2297 	else
2298 		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2299 						    bmc->id.product_id,
2300 						    bmc->id.device_id);
2301 
2302 	/*
2303 	 * If there is already an bmc_device, free the new one,
2304 	 * otherwise register the new BMC device
2305 	 */
2306 	if (old_bmc) {
2307 		kfree(bmc);
2308 		intf->bmc = old_bmc;
2309 		bmc = old_bmc;
2310 
2311 		kref_get(&bmc->refcount);
2312 		mutex_unlock(&ipmidriver_mutex);
2313 
2314 		printk(KERN_INFO
2315 		       "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2316 		       " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2317 		       bmc->id.manufacturer_id,
2318 		       bmc->id.product_id,
2319 		       bmc->id.device_id);
2320 	} else {
2321 		char name[14];
2322 		unsigned char orig_dev_id = bmc->id.device_id;
2323 		int warn_printed = 0;
2324 
2325 		snprintf(name, sizeof(name),
2326 			 "ipmi_bmc.%4.4x", bmc->id.product_id);
2327 
2328 		while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2329 						 bmc->id.product_id,
2330 						 bmc->id.device_id)) {
2331 			if (!warn_printed) {
2332 				printk(KERN_WARNING PFX
2333 				       "This machine has two different BMCs"
2334 				       " with the same product id and device"
2335 				       " id.  This is an error in the"
2336 				       " firmware, but incrementing the"
2337 				       " device id to work around the problem."
2338 				       " Prod ID = 0x%x, Dev ID = 0x%x\n",
2339 				       bmc->id.product_id, bmc->id.device_id);
2340 				warn_printed = 1;
2341 			}
2342 			bmc->id.device_id++; /* Wraps at 255 */
2343 			if (bmc->id.device_id == orig_dev_id) {
2344 				printk(KERN_ERR PFX
2345 				       "Out of device ids!\n");
2346 				break;
2347 			}
2348 		}
2349 
2350 		bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2351 		if (!bmc->dev) {
2352 			mutex_unlock(&ipmidriver_mutex);
2353 			printk(KERN_ERR
2354 			       "ipmi_msghandler:"
2355 			       " Unable to allocate platform device\n");
2356 			return -ENOMEM;
2357 		}
2358 		bmc->dev->dev.driver = &ipmidriver;
2359 		dev_set_drvdata(&bmc->dev->dev, bmc);
2360 		kref_init(&bmc->refcount);
2361 
2362 		rv = platform_device_add(bmc->dev);
2363 		mutex_unlock(&ipmidriver_mutex);
2364 		if (rv) {
2365 			platform_device_put(bmc->dev);
2366 			bmc->dev = NULL;
2367 			printk(KERN_ERR
2368 			       "ipmi_msghandler:"
2369 			       " Unable to register bmc device: %d\n",
2370 			       rv);
2371 			/* Don't go to out_err, you can only do that if
2372 			   the device is registered already. */
2373 			return rv;
2374 		}
2375 
2376 		rv = create_files(bmc);
2377 		if (rv) {
2378 			mutex_lock(&ipmidriver_mutex);
2379 			platform_device_unregister(bmc->dev);
2380 			mutex_unlock(&ipmidriver_mutex);
2381 
2382 			return rv;
2383 		}
2384 
2385 		printk(KERN_INFO
2386 		       "ipmi: Found new BMC (man_id: 0x%6.6x, "
2387 		       " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2388 		       bmc->id.manufacturer_id,
2389 		       bmc->id.product_id,
2390 		       bmc->id.device_id);
2391 	}
2392 
2393 	/*
2394 	 * create symlink from system interface device to bmc device
2395 	 * and back.
2396 	 */
2397 	intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2398 	if (!intf->sysfs_name) {
2399 		rv = -ENOMEM;
2400 		printk(KERN_ERR
2401 		       "ipmi_msghandler: allocate link to BMC: %d\n",
2402 		       rv);
2403 		goto out_err;
2404 	}
2405 
2406 	rv = sysfs_create_link(&intf->si_dev->kobj,
2407 			       &bmc->dev->dev.kobj, intf->sysfs_name);
2408 	if (rv) {
2409 		kfree(intf->sysfs_name);
2410 		intf->sysfs_name = NULL;
2411 		printk(KERN_ERR
2412 		       "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2413 		       rv);
2414 		goto out_err;
2415 	}
2416 
2417 	size = snprintf(dummy, 0, "ipmi%d", ifnum);
2418 	intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2419 	if (!intf->my_dev_name) {
2420 		kfree(intf->sysfs_name);
2421 		intf->sysfs_name = NULL;
2422 		rv = -ENOMEM;
2423 		printk(KERN_ERR
2424 		       "ipmi_msghandler: allocate link from BMC: %d\n",
2425 		       rv);
2426 		goto out_err;
2427 	}
2428 	snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2429 
2430 	rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2431 			       intf->my_dev_name);
2432 	if (rv) {
2433 		kfree(intf->sysfs_name);
2434 		intf->sysfs_name = NULL;
2435 		kfree(intf->my_dev_name);
2436 		intf->my_dev_name = NULL;
2437 		printk(KERN_ERR
2438 		       "ipmi_msghandler:"
2439 		       " Unable to create symlink to bmc: %d\n",
2440 		       rv);
2441 		goto out_err;
2442 	}
2443 
2444 	return 0;
2445 
2446 out_err:
2447 	ipmi_bmc_unregister(intf);
2448 	return rv;
2449 }
2450 
2451 static int
2452 send_guid_cmd(ipmi_smi_t intf, int chan)
2453 {
2454 	struct kernel_ipmi_msg            msg;
2455 	struct ipmi_system_interface_addr si;
2456 
2457 	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2458 	si.channel = IPMI_BMC_CHANNEL;
2459 	si.lun = 0;
2460 
2461 	msg.netfn = IPMI_NETFN_APP_REQUEST;
2462 	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2463 	msg.data = NULL;
2464 	msg.data_len = 0;
2465 	return i_ipmi_request(NULL,
2466 			      intf,
2467 			      (struct ipmi_addr *) &si,
2468 			      0,
2469 			      &msg,
2470 			      intf,
2471 			      NULL,
2472 			      NULL,
2473 			      0,
2474 			      intf->channels[0].address,
2475 			      intf->channels[0].lun,
2476 			      -1, 0);
2477 }
2478 
2479 static void
2480 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2481 {
2482 	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2483 	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2484 	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2485 		/* Not for me */
2486 		return;
2487 
2488 	if (msg->msg.data[0] != 0) {
2489 		/* Error from getting the GUID, the BMC doesn't have one. */
2490 		intf->bmc->guid_set = 0;
2491 		goto out;
2492 	}
2493 
2494 	if (msg->msg.data_len < 17) {
2495 		intf->bmc->guid_set = 0;
2496 		printk(KERN_WARNING PFX
2497 		       "guid_handler: The GUID response from the BMC was too"
2498 		       " short, it was %d but should have been 17.  Assuming"
2499 		       " GUID is not available.\n",
2500 		       msg->msg.data_len);
2501 		goto out;
2502 	}
2503 
2504 	memcpy(intf->bmc->guid, msg->msg.data, 16);
2505 	intf->bmc->guid_set = 1;
2506  out:
2507 	wake_up(&intf->waitq);
2508 }
2509 
2510 static void
2511 get_guid(ipmi_smi_t intf)
2512 {
2513 	int rv;
2514 
2515 	intf->bmc->guid_set = 0x2;
2516 	intf->null_user_handler = guid_handler;
2517 	rv = send_guid_cmd(intf, 0);
2518 	if (rv)
2519 		/* Send failed, no GUID available. */
2520 		intf->bmc->guid_set = 0;
2521 	wait_event(intf->waitq, intf->bmc->guid_set != 2);
2522 	intf->null_user_handler = NULL;
2523 }
2524 
2525 static int
2526 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2527 {
2528 	struct kernel_ipmi_msg            msg;
2529 	unsigned char                     data[1];
2530 	struct ipmi_system_interface_addr si;
2531 
2532 	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2533 	si.channel = IPMI_BMC_CHANNEL;
2534 	si.lun = 0;
2535 
2536 	msg.netfn = IPMI_NETFN_APP_REQUEST;
2537 	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2538 	msg.data = data;
2539 	msg.data_len = 1;
2540 	data[0] = chan;
2541 	return i_ipmi_request(NULL,
2542 			      intf,
2543 			      (struct ipmi_addr *) &si,
2544 			      0,
2545 			      &msg,
2546 			      intf,
2547 			      NULL,
2548 			      NULL,
2549 			      0,
2550 			      intf->channels[0].address,
2551 			      intf->channels[0].lun,
2552 			      -1, 0);
2553 }
2554 
2555 static void
2556 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2557 {
2558 	int rv = 0;
2559 	int chan;
2560 
2561 	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2562 	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2563 	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2564 	{
2565 		/* It's the one we want */
2566 		if (msg->msg.data[0] != 0) {
2567 			/* Got an error from the channel, just go on. */
2568 
2569 			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2570 				/* If the MC does not support this
2571 				   command, that is legal.  We just
2572 				   assume it has one IPMB at channel
2573 				   zero. */
2574 				intf->channels[0].medium
2575 					= IPMI_CHANNEL_MEDIUM_IPMB;
2576 				intf->channels[0].protocol
2577 					= IPMI_CHANNEL_PROTOCOL_IPMB;
2578 				rv = -ENOSYS;
2579 
2580 				intf->curr_channel = IPMI_MAX_CHANNELS;
2581 				wake_up(&intf->waitq);
2582 				goto out;
2583 			}
2584 			goto next_channel;
2585 		}
2586 		if (msg->msg.data_len < 4) {
2587 			/* Message not big enough, just go on. */
2588 			goto next_channel;
2589 		}
2590 		chan = intf->curr_channel;
2591 		intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2592 		intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2593 
2594 	next_channel:
2595 		intf->curr_channel++;
2596 		if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2597 			wake_up(&intf->waitq);
2598 		else
2599 			rv = send_channel_info_cmd(intf, intf->curr_channel);
2600 
2601 		if (rv) {
2602 			/* Got an error somehow, just give up. */
2603 			intf->curr_channel = IPMI_MAX_CHANNELS;
2604 			wake_up(&intf->waitq);
2605 
2606 			printk(KERN_WARNING PFX
2607 			       "Error sending channel information: %d\n",
2608 			       rv);
2609 		}
2610 	}
2611  out:
2612 	return;
2613 }
2614 
2615 void ipmi_poll_interface(ipmi_user_t user)
2616 {
2617 	ipmi_smi_t intf = user->intf;
2618 
2619 	if (intf->handlers->poll)
2620 		intf->handlers->poll(intf->send_info);
2621 }
2622 
2623 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2624 		      void		       *send_info,
2625 		      struct ipmi_device_id    *device_id,
2626 		      struct device            *si_dev,
2627 		      const char               *sysfs_name,
2628 		      unsigned char            slave_addr)
2629 {
2630 	int              i, j;
2631 	int              rv;
2632 	ipmi_smi_t       intf;
2633 	ipmi_smi_t       tintf;
2634 	struct list_head *link;
2635 
2636 	/* Make sure the driver is actually initialized, this handles
2637 	   problems with initialization order. */
2638 	if (!initialized) {
2639 		rv = ipmi_init_msghandler();
2640 		if (rv)
2641 			return rv;
2642 		/* The init code doesn't return an error if it was turned
2643 		   off, but it won't initialize.  Check that. */
2644 		if (!initialized)
2645 			return -ENODEV;
2646 	}
2647 
2648 	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
2649 	if (!intf)
2650 		return -ENOMEM;
2651 
2652 	intf->ipmi_version_major = ipmi_version_major(device_id);
2653 	intf->ipmi_version_minor = ipmi_version_minor(device_id);
2654 
2655 	intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2656 	if (!intf->bmc) {
2657 		kfree(intf);
2658 		return -ENOMEM;
2659 	}
2660 	intf->intf_num = -1; /* Mark it invalid for now. */
2661 	kref_init(&intf->refcount);
2662 	intf->bmc->id = *device_id;
2663 	intf->si_dev = si_dev;
2664 	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2665 		intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2666 		intf->channels[j].lun = 2;
2667 	}
2668 	if (slave_addr != 0)
2669 		intf->channels[0].address = slave_addr;
2670 	INIT_LIST_HEAD(&intf->users);
2671 	intf->handlers = handlers;
2672 	intf->send_info = send_info;
2673 	spin_lock_init(&intf->seq_lock);
2674 	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2675 		intf->seq_table[j].inuse = 0;
2676 		intf->seq_table[j].seqid = 0;
2677 	}
2678 	intf->curr_seq = 0;
2679 #ifdef CONFIG_PROC_FS
2680 	mutex_init(&intf->proc_entry_lock);
2681 #endif
2682 	spin_lock_init(&intf->waiting_msgs_lock);
2683 	INIT_LIST_HEAD(&intf->waiting_msgs);
2684 	spin_lock_init(&intf->events_lock);
2685 	INIT_LIST_HEAD(&intf->waiting_events);
2686 	intf->waiting_events_count = 0;
2687 	mutex_init(&intf->cmd_rcvrs_mutex);
2688 	spin_lock_init(&intf->maintenance_mode_lock);
2689 	INIT_LIST_HEAD(&intf->cmd_rcvrs);
2690 	init_waitqueue_head(&intf->waitq);
2691 
2692 	spin_lock_init(&intf->counter_lock);
2693 	intf->proc_dir = NULL;
2694 
2695 	mutex_lock(&smi_watchers_mutex);
2696 	mutex_lock(&ipmi_interfaces_mutex);
2697 	/* Look for a hole in the numbers. */
2698 	i = 0;
2699 	link = &ipmi_interfaces;
2700 	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2701 		if (tintf->intf_num != i) {
2702 			link = &tintf->link;
2703 			break;
2704 		}
2705 		i++;
2706 	}
2707 	/* Add the new interface in numeric order. */
2708 	if (i == 0)
2709 		list_add_rcu(&intf->link, &ipmi_interfaces);
2710 	else
2711 		list_add_tail_rcu(&intf->link, link);
2712 
2713 	rv = handlers->start_processing(send_info, intf);
2714 	if (rv)
2715 		goto out;
2716 
2717 	get_guid(intf);
2718 
2719 	if ((intf->ipmi_version_major > 1)
2720 	    || ((intf->ipmi_version_major == 1)
2721 		&& (intf->ipmi_version_minor >= 5)))
2722 	{
2723 		/* Start scanning the channels to see what is
2724 		   available. */
2725 		intf->null_user_handler = channel_handler;
2726 		intf->curr_channel = 0;
2727 		rv = send_channel_info_cmd(intf, 0);
2728 		if (rv)
2729 			goto out;
2730 
2731 		/* Wait for the channel info to be read. */
2732 		wait_event(intf->waitq,
2733 			   intf->curr_channel >= IPMI_MAX_CHANNELS);
2734 		intf->null_user_handler = NULL;
2735 	} else {
2736 		/* Assume a single IPMB channel at zero. */
2737 		intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2738 		intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2739 	}
2740 
2741 	if (rv == 0)
2742 		rv = add_proc_entries(intf, i);
2743 
2744 	rv = ipmi_bmc_register(intf, i, sysfs_name);
2745 
2746  out:
2747 	if (rv) {
2748 		if (intf->proc_dir)
2749 			remove_proc_entries(intf);
2750 		intf->handlers = NULL;
2751 		list_del_rcu(&intf->link);
2752 		mutex_unlock(&ipmi_interfaces_mutex);
2753 		mutex_unlock(&smi_watchers_mutex);
2754 		synchronize_rcu();
2755 		kref_put(&intf->refcount, intf_free);
2756 	} else {
2757 		/*
2758 		 * Keep memory order straight for RCU readers.  Make
2759 		 * sure everything else is committed to memory before
2760 		 * setting intf_num to mark the interface valid.
2761 		 */
2762 		smp_wmb();
2763 		intf->intf_num = i;
2764 		mutex_unlock(&ipmi_interfaces_mutex);
2765 		/* After this point the interface is legal to use. */
2766 		call_smi_watchers(i, intf->si_dev);
2767 		mutex_unlock(&smi_watchers_mutex);
2768 	}
2769 
2770 	return rv;
2771 }
2772 
2773 static void cleanup_smi_msgs(ipmi_smi_t intf)
2774 {
2775 	int              i;
2776 	struct seq_table *ent;
2777 
2778 	/* No need for locks, the interface is down. */
2779 	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2780 		ent = &(intf->seq_table[i]);
2781 		if (!ent->inuse)
2782 			continue;
2783 		deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2784 	}
2785 }
2786 
2787 int ipmi_unregister_smi(ipmi_smi_t intf)
2788 {
2789 	struct ipmi_smi_watcher *w;
2790 	int    intf_num = intf->intf_num;
2791 
2792 	ipmi_bmc_unregister(intf);
2793 
2794 	mutex_lock(&smi_watchers_mutex);
2795 	mutex_lock(&ipmi_interfaces_mutex);
2796 	intf->intf_num = -1;
2797 	intf->handlers = NULL;
2798 	list_del_rcu(&intf->link);
2799 	mutex_unlock(&ipmi_interfaces_mutex);
2800 	synchronize_rcu();
2801 
2802 	cleanup_smi_msgs(intf);
2803 
2804 	remove_proc_entries(intf);
2805 
2806 	/* Call all the watcher interfaces to tell them that
2807 	   an interface is gone. */
2808 	list_for_each_entry(w, &smi_watchers, link)
2809 		w->smi_gone(intf_num);
2810 	mutex_unlock(&smi_watchers_mutex);
2811 
2812 	kref_put(&intf->refcount, intf_free);
2813 	return 0;
2814 }
2815 
2816 static int handle_ipmb_get_msg_rsp(ipmi_smi_t          intf,
2817 				   struct ipmi_smi_msg *msg)
2818 {
2819 	struct ipmi_ipmb_addr ipmb_addr;
2820 	struct ipmi_recv_msg  *recv_msg;
2821 	unsigned long         flags;
2822 
2823 
2824 	/* This is 11, not 10, because the response must contain a
2825 	 * completion code. */
2826 	if (msg->rsp_size < 11) {
2827 		/* Message not big enough, just ignore it. */
2828 		spin_lock_irqsave(&intf->counter_lock, flags);
2829 		intf->invalid_ipmb_responses++;
2830 		spin_unlock_irqrestore(&intf->counter_lock, flags);
2831 		return 0;
2832 	}
2833 
2834 	if (msg->rsp[2] != 0) {
2835 		/* An error getting the response, just ignore it. */
2836 		return 0;
2837 	}
2838 
2839 	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2840 	ipmb_addr.slave_addr = msg->rsp[6];
2841 	ipmb_addr.channel = msg->rsp[3] & 0x0f;
2842 	ipmb_addr.lun = msg->rsp[7] & 3;
2843 
2844 	/* It's a response from a remote entity.  Look up the sequence
2845 	   number and handle the response. */
2846 	if (intf_find_seq(intf,
2847 			  msg->rsp[7] >> 2,
2848 			  msg->rsp[3] & 0x0f,
2849 			  msg->rsp[8],
2850 			  (msg->rsp[4] >> 2) & (~1),
2851 			  (struct ipmi_addr *) &(ipmb_addr),
2852 			  &recv_msg))
2853 	{
2854 		/* We were unable to find the sequence number,
2855 		   so just nuke the message. */
2856 		spin_lock_irqsave(&intf->counter_lock, flags);
2857 		intf->unhandled_ipmb_responses++;
2858 		spin_unlock_irqrestore(&intf->counter_lock, flags);
2859 		return 0;
2860 	}
2861 
2862 	memcpy(recv_msg->msg_data,
2863 	       &(msg->rsp[9]),
2864 	       msg->rsp_size - 9);
2865 	/* THe other fields matched, so no need to set them, except
2866            for netfn, which needs to be the response that was
2867            returned, not the request value. */
2868 	recv_msg->msg.netfn = msg->rsp[4] >> 2;
2869 	recv_msg->msg.data = recv_msg->msg_data;
2870 	recv_msg->msg.data_len = msg->rsp_size - 10;
2871 	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2872 	spin_lock_irqsave(&intf->counter_lock, flags);
2873 	intf->handled_ipmb_responses++;
2874 	spin_unlock_irqrestore(&intf->counter_lock, flags);
2875 	deliver_response(recv_msg);
2876 
2877 	return 0;
2878 }
2879 
2880 static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
2881 				   struct ipmi_smi_msg *msg)
2882 {
2883 	struct cmd_rcvr          *rcvr;
2884 	int                      rv = 0;
2885 	unsigned char            netfn;
2886 	unsigned char            cmd;
2887 	unsigned char            chan;
2888 	ipmi_user_t              user = NULL;
2889 	struct ipmi_ipmb_addr    *ipmb_addr;
2890 	struct ipmi_recv_msg     *recv_msg;
2891 	unsigned long            flags;
2892 	struct ipmi_smi_handlers *handlers;
2893 
2894 	if (msg->rsp_size < 10) {
2895 		/* Message not big enough, just ignore it. */
2896 		spin_lock_irqsave(&intf->counter_lock, flags);
2897 		intf->invalid_commands++;
2898 		spin_unlock_irqrestore(&intf->counter_lock, flags);
2899 		return 0;
2900 	}
2901 
2902 	if (msg->rsp[2] != 0) {
2903 		/* An error getting the response, just ignore it. */
2904 		return 0;
2905 	}
2906 
2907 	netfn = msg->rsp[4] >> 2;
2908 	cmd = msg->rsp[8];
2909 	chan = msg->rsp[3] & 0xf;
2910 
2911 	rcu_read_lock();
2912 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2913 	if (rcvr) {
2914 		user = rcvr->user;
2915 		kref_get(&user->refcount);
2916 	} else
2917 		user = NULL;
2918 	rcu_read_unlock();
2919 
2920 	if (user == NULL) {
2921 		/* We didn't find a user, deliver an error response. */
2922 		spin_lock_irqsave(&intf->counter_lock, flags);
2923 		intf->unhandled_commands++;
2924 		spin_unlock_irqrestore(&intf->counter_lock, flags);
2925 
2926 		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2927 		msg->data[1] = IPMI_SEND_MSG_CMD;
2928 		msg->data[2] = msg->rsp[3];
2929 		msg->data[3] = msg->rsp[6];
2930                 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2931 		msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2932 		msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2933                 /* rqseq/lun */
2934                 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2935 		msg->data[8] = msg->rsp[8]; /* cmd */
2936 		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2937 		msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2938 		msg->data_size = 11;
2939 
2940 #ifdef DEBUG_MSGING
2941 	{
2942 		int m;
2943 		printk("Invalid command:");
2944 		for (m = 0; m < msg->data_size; m++)
2945 			printk(" %2.2x", msg->data[m]);
2946 		printk("\n");
2947 	}
2948 #endif
2949 		rcu_read_lock();
2950 		handlers = intf->handlers;
2951 		if (handlers) {
2952 			handlers->sender(intf->send_info, msg, 0);
2953 			/* We used the message, so return the value
2954 			   that causes it to not be freed or
2955 			   queued. */
2956 			rv = -1;
2957 		}
2958 		rcu_read_unlock();
2959 	} else {
2960 		/* Deliver the message to the user. */
2961 		spin_lock_irqsave(&intf->counter_lock, flags);
2962 		intf->handled_commands++;
2963 		spin_unlock_irqrestore(&intf->counter_lock, flags);
2964 
2965 		recv_msg = ipmi_alloc_recv_msg();
2966 		if (!recv_msg) {
2967 			/* We couldn't allocate memory for the
2968                            message, so requeue it for handling
2969                            later. */
2970 			rv = 1;
2971 			kref_put(&user->refcount, free_user);
2972 		} else {
2973 			/* Extract the source address from the data. */
2974 			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2975 			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2976 			ipmb_addr->slave_addr = msg->rsp[6];
2977 			ipmb_addr->lun = msg->rsp[7] & 3;
2978 			ipmb_addr->channel = msg->rsp[3] & 0xf;
2979 
2980 			/* Extract the rest of the message information
2981 			   from the IPMB header.*/
2982 			recv_msg->user = user;
2983 			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2984 			recv_msg->msgid = msg->rsp[7] >> 2;
2985 			recv_msg->msg.netfn = msg->rsp[4] >> 2;
2986 			recv_msg->msg.cmd = msg->rsp[8];
2987 			recv_msg->msg.data = recv_msg->msg_data;
2988 
2989 			/* We chop off 10, not 9 bytes because the checksum
2990 			   at the end also needs to be removed. */
2991 			recv_msg->msg.data_len = msg->rsp_size - 10;
2992 			memcpy(recv_msg->msg_data,
2993 			       &(msg->rsp[9]),
2994 			       msg->rsp_size - 10);
2995 			deliver_response(recv_msg);
2996 		}
2997 	}
2998 
2999 	return rv;
3000 }
3001 
3002 static int handle_lan_get_msg_rsp(ipmi_smi_t          intf,
3003 				  struct ipmi_smi_msg *msg)
3004 {
3005 	struct ipmi_lan_addr  lan_addr;
3006 	struct ipmi_recv_msg  *recv_msg;
3007 	unsigned long         flags;
3008 
3009 
3010 	/* This is 13, not 12, because the response must contain a
3011 	 * completion code. */
3012 	if (msg->rsp_size < 13) {
3013 		/* Message not big enough, just ignore it. */
3014 		spin_lock_irqsave(&intf->counter_lock, flags);
3015 		intf->invalid_lan_responses++;
3016 		spin_unlock_irqrestore(&intf->counter_lock, flags);
3017 		return 0;
3018 	}
3019 
3020 	if (msg->rsp[2] != 0) {
3021 		/* An error getting the response, just ignore it. */
3022 		return 0;
3023 	}
3024 
3025 	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3026 	lan_addr.session_handle = msg->rsp[4];
3027 	lan_addr.remote_SWID = msg->rsp[8];
3028 	lan_addr.local_SWID = msg->rsp[5];
3029 	lan_addr.channel = msg->rsp[3] & 0x0f;
3030 	lan_addr.privilege = msg->rsp[3] >> 4;
3031 	lan_addr.lun = msg->rsp[9] & 3;
3032 
3033 	/* It's a response from a remote entity.  Look up the sequence
3034 	   number and handle the response. */
3035 	if (intf_find_seq(intf,
3036 			  msg->rsp[9] >> 2,
3037 			  msg->rsp[3] & 0x0f,
3038 			  msg->rsp[10],
3039 			  (msg->rsp[6] >> 2) & (~1),
3040 			  (struct ipmi_addr *) &(lan_addr),
3041 			  &recv_msg))
3042 	{
3043 		/* We were unable to find the sequence number,
3044 		   so just nuke the message. */
3045 		spin_lock_irqsave(&intf->counter_lock, flags);
3046 		intf->unhandled_lan_responses++;
3047 		spin_unlock_irqrestore(&intf->counter_lock, flags);
3048 		return 0;
3049 	}
3050 
3051 	memcpy(recv_msg->msg_data,
3052 	       &(msg->rsp[11]),
3053 	       msg->rsp_size - 11);
3054 	/* The other fields matched, so no need to set them, except
3055            for netfn, which needs to be the response that was
3056            returned, not the request value. */
3057 	recv_msg->msg.netfn = msg->rsp[6] >> 2;
3058 	recv_msg->msg.data = recv_msg->msg_data;
3059 	recv_msg->msg.data_len = msg->rsp_size - 12;
3060 	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3061 	spin_lock_irqsave(&intf->counter_lock, flags);
3062 	intf->handled_lan_responses++;
3063 	spin_unlock_irqrestore(&intf->counter_lock, flags);
3064 	deliver_response(recv_msg);
3065 
3066 	return 0;
3067 }
3068 
3069 static int handle_lan_get_msg_cmd(ipmi_smi_t          intf,
3070 				  struct ipmi_smi_msg *msg)
3071 {
3072 	struct cmd_rcvr          *rcvr;
3073 	int                      rv = 0;
3074 	unsigned char            netfn;
3075 	unsigned char            cmd;
3076 	unsigned char            chan;
3077 	ipmi_user_t              user = NULL;
3078 	struct ipmi_lan_addr     *lan_addr;
3079 	struct ipmi_recv_msg     *recv_msg;
3080 	unsigned long            flags;
3081 
3082 	if (msg->rsp_size < 12) {
3083 		/* Message not big enough, just ignore it. */
3084 		spin_lock_irqsave(&intf->counter_lock, flags);
3085 		intf->invalid_commands++;
3086 		spin_unlock_irqrestore(&intf->counter_lock, flags);
3087 		return 0;
3088 	}
3089 
3090 	if (msg->rsp[2] != 0) {
3091 		/* An error getting the response, just ignore it. */
3092 		return 0;
3093 	}
3094 
3095 	netfn = msg->rsp[6] >> 2;
3096 	cmd = msg->rsp[10];
3097 	chan = msg->rsp[3] & 0xf;
3098 
3099 	rcu_read_lock();
3100 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3101 	if (rcvr) {
3102 		user = rcvr->user;
3103 		kref_get(&user->refcount);
3104 	} else
3105 		user = NULL;
3106 	rcu_read_unlock();
3107 
3108 	if (user == NULL) {
3109 		/* We didn't find a user, just give up. */
3110 		spin_lock_irqsave(&intf->counter_lock, flags);
3111 		intf->unhandled_commands++;
3112 		spin_unlock_irqrestore(&intf->counter_lock, flags);
3113 
3114 		rv = 0; /* Don't do anything with these messages, just
3115 			   allow them to be freed. */
3116 	} else {
3117 		/* Deliver the message to the user. */
3118 		spin_lock_irqsave(&intf->counter_lock, flags);
3119 		intf->handled_commands++;
3120 		spin_unlock_irqrestore(&intf->counter_lock, flags);
3121 
3122 		recv_msg = ipmi_alloc_recv_msg();
3123 		if (!recv_msg) {
3124 			/* We couldn't allocate memory for the
3125                            message, so requeue it for handling
3126                            later. */
3127 			rv = 1;
3128 			kref_put(&user->refcount, free_user);
3129 		} else {
3130 			/* Extract the source address from the data. */
3131 			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3132 			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3133 			lan_addr->session_handle = msg->rsp[4];
3134 			lan_addr->remote_SWID = msg->rsp[8];
3135 			lan_addr->local_SWID = msg->rsp[5];
3136 			lan_addr->lun = msg->rsp[9] & 3;
3137 			lan_addr->channel = msg->rsp[3] & 0xf;
3138 			lan_addr->privilege = msg->rsp[3] >> 4;
3139 
3140 			/* Extract the rest of the message information
3141 			   from the IPMB header.*/
3142 			recv_msg->user = user;
3143 			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3144 			recv_msg->msgid = msg->rsp[9] >> 2;
3145 			recv_msg->msg.netfn = msg->rsp[6] >> 2;
3146 			recv_msg->msg.cmd = msg->rsp[10];
3147 			recv_msg->msg.data = recv_msg->msg_data;
3148 
3149 			/* We chop off 12, not 11 bytes because the checksum
3150 			   at the end also needs to be removed. */
3151 			recv_msg->msg.data_len = msg->rsp_size - 12;
3152 			memcpy(recv_msg->msg_data,
3153 			       &(msg->rsp[11]),
3154 			       msg->rsp_size - 12);
3155 			deliver_response(recv_msg);
3156 		}
3157 	}
3158 
3159 	return rv;
3160 }
3161 
3162 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3163 				     struct ipmi_smi_msg  *msg)
3164 {
3165 	struct ipmi_system_interface_addr *smi_addr;
3166 
3167 	recv_msg->msgid = 0;
3168 	smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3169 	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3170 	smi_addr->channel = IPMI_BMC_CHANNEL;
3171 	smi_addr->lun = msg->rsp[0] & 3;
3172 	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3173 	recv_msg->msg.netfn = msg->rsp[0] >> 2;
3174 	recv_msg->msg.cmd = msg->rsp[1];
3175 	memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3176 	recv_msg->msg.data = recv_msg->msg_data;
3177 	recv_msg->msg.data_len = msg->rsp_size - 3;
3178 }
3179 
3180 static int handle_read_event_rsp(ipmi_smi_t          intf,
3181 				 struct ipmi_smi_msg *msg)
3182 {
3183 	struct ipmi_recv_msg *recv_msg, *recv_msg2;
3184 	struct list_head     msgs;
3185 	ipmi_user_t          user;
3186 	int                  rv = 0;
3187 	int                  deliver_count = 0;
3188 	unsigned long        flags;
3189 
3190 	if (msg->rsp_size < 19) {
3191 		/* Message is too small to be an IPMB event. */
3192 		spin_lock_irqsave(&intf->counter_lock, flags);
3193 		intf->invalid_events++;
3194 		spin_unlock_irqrestore(&intf->counter_lock, flags);
3195 		return 0;
3196 	}
3197 
3198 	if (msg->rsp[2] != 0) {
3199 		/* An error getting the event, just ignore it. */
3200 		return 0;
3201 	}
3202 
3203 	INIT_LIST_HEAD(&msgs);
3204 
3205 	spin_lock_irqsave(&intf->events_lock, flags);
3206 
3207 	spin_lock(&intf->counter_lock);
3208 	intf->events++;
3209 	spin_unlock(&intf->counter_lock);
3210 
3211 	/* Allocate and fill in one message for every user that is getting
3212 	   events. */
3213 	rcu_read_lock();
3214 	list_for_each_entry_rcu(user, &intf->users, link) {
3215 		if (!user->gets_events)
3216 			continue;
3217 
3218 		recv_msg = ipmi_alloc_recv_msg();
3219 		if (!recv_msg) {
3220 			rcu_read_unlock();
3221 			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3222 						 link) {
3223 				list_del(&recv_msg->link);
3224 				ipmi_free_recv_msg(recv_msg);
3225 			}
3226 			/* We couldn't allocate memory for the
3227                            message, so requeue it for handling
3228                            later. */
3229 			rv = 1;
3230 			goto out;
3231 		}
3232 
3233 		deliver_count++;
3234 
3235 		copy_event_into_recv_msg(recv_msg, msg);
3236 		recv_msg->user = user;
3237 		kref_get(&user->refcount);
3238 		list_add_tail(&(recv_msg->link), &msgs);
3239 	}
3240 	rcu_read_unlock();
3241 
3242 	if (deliver_count) {
3243 		/* Now deliver all the messages. */
3244 		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3245 			list_del(&recv_msg->link);
3246 			deliver_response(recv_msg);
3247 		}
3248 	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3249 		/* No one to receive the message, put it in queue if there's
3250 		   not already too many things in the queue. */
3251 		recv_msg = ipmi_alloc_recv_msg();
3252 		if (!recv_msg) {
3253 			/* We couldn't allocate memory for the
3254                            message, so requeue it for handling
3255                            later. */
3256 			rv = 1;
3257 			goto out;
3258 		}
3259 
3260 		copy_event_into_recv_msg(recv_msg, msg);
3261 		list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3262 		intf->waiting_events_count++;
3263 	} else {
3264 		/* There's too many things in the queue, discard this
3265 		   message. */
3266 		printk(KERN_WARNING PFX "Event queue full, discarding an"
3267 		       " incoming event\n");
3268 	}
3269 
3270  out:
3271 	spin_unlock_irqrestore(&(intf->events_lock), flags);
3272 
3273 	return rv;
3274 }
3275 
3276 static int handle_bmc_rsp(ipmi_smi_t          intf,
3277 			  struct ipmi_smi_msg *msg)
3278 {
3279 	struct ipmi_recv_msg *recv_msg;
3280 	unsigned long        flags;
3281 	struct ipmi_user     *user;
3282 
3283 	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3284 	if (recv_msg == NULL)
3285 	{
3286 		printk(KERN_WARNING"IPMI message received with no owner. This\n"
3287 			"could be because of a malformed message, or\n"
3288 			"because of a hardware error.  Contact your\n"
3289 			"hardware vender for assistance\n");
3290 		return 0;
3291 	}
3292 
3293 	user = recv_msg->user;
3294 	/* Make sure the user still exists. */
3295 	if (user && !user->valid) {
3296 		/* The user for the message went away, so give up. */
3297 		spin_lock_irqsave(&intf->counter_lock, flags);
3298 		intf->unhandled_local_responses++;
3299 		spin_unlock_irqrestore(&intf->counter_lock, flags);
3300 		ipmi_free_recv_msg(recv_msg);
3301 	} else {
3302 		struct ipmi_system_interface_addr *smi_addr;
3303 
3304 		spin_lock_irqsave(&intf->counter_lock, flags);
3305 		intf->handled_local_responses++;
3306 		spin_unlock_irqrestore(&intf->counter_lock, flags);
3307 		recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3308 		recv_msg->msgid = msg->msgid;
3309 		smi_addr = ((struct ipmi_system_interface_addr *)
3310 			    &(recv_msg->addr));
3311 		smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3312 		smi_addr->channel = IPMI_BMC_CHANNEL;
3313 		smi_addr->lun = msg->rsp[0] & 3;
3314 		recv_msg->msg.netfn = msg->rsp[0] >> 2;
3315 		recv_msg->msg.cmd = msg->rsp[1];
3316 		memcpy(recv_msg->msg_data,
3317 		       &(msg->rsp[2]),
3318 		       msg->rsp_size - 2);
3319 		recv_msg->msg.data = recv_msg->msg_data;
3320 		recv_msg->msg.data_len = msg->rsp_size - 2;
3321 		deliver_response(recv_msg);
3322 	}
3323 
3324 	return 0;
3325 }
3326 
3327 /* Handle a new message.  Return 1 if the message should be requeued,
3328    0 if the message should be freed, or -1 if the message should not
3329    be freed or requeued. */
3330 static int handle_new_recv_msg(ipmi_smi_t          intf,
3331 			       struct ipmi_smi_msg *msg)
3332 {
3333 	int requeue;
3334 	int chan;
3335 
3336 #ifdef DEBUG_MSGING
3337 	int m;
3338 	printk("Recv:");
3339 	for (m = 0; m < msg->rsp_size; m++)
3340 		printk(" %2.2x", msg->rsp[m]);
3341 	printk("\n");
3342 #endif
3343 	if (msg->rsp_size < 2) {
3344 		/* Message is too small to be correct. */
3345 		printk(KERN_WARNING PFX "BMC returned to small a message"
3346 		       " for netfn %x cmd %x, got %d bytes\n",
3347 		       (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3348 
3349 		/* Generate an error response for the message. */
3350 		msg->rsp[0] = msg->data[0] | (1 << 2);
3351 		msg->rsp[1] = msg->data[1];
3352 		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3353 		msg->rsp_size = 3;
3354 	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3355 		   || (msg->rsp[1] != msg->data[1]))		  /* Command */
3356 	{
3357 		/* The response is not even marginally correct. */
3358 		printk(KERN_WARNING PFX "BMC returned incorrect response,"
3359 		       " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3360 		       (msg->data[0] >> 2) | 1, msg->data[1],
3361 		       msg->rsp[0] >> 2, msg->rsp[1]);
3362 
3363 		/* Generate an error response for the message. */
3364 		msg->rsp[0] = msg->data[0] | (1 << 2);
3365 		msg->rsp[1] = msg->data[1];
3366 		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3367 		msg->rsp_size = 3;
3368 	}
3369 
3370 	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3371 	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3372 	    && (msg->user_data != NULL))
3373 	{
3374 		/* It's a response to a response we sent.  For this we
3375 		   deliver a send message response to the user. */
3376 		struct ipmi_recv_msg     *recv_msg = msg->user_data;
3377 
3378 		requeue = 0;
3379 		if (msg->rsp_size < 2)
3380 			/* Message is too small to be correct. */
3381 			goto out;
3382 
3383 		chan = msg->data[2] & 0x0f;
3384 		if (chan >= IPMI_MAX_CHANNELS)
3385 			/* Invalid channel number */
3386 			goto out;
3387 
3388 		if (!recv_msg)
3389 			goto out;
3390 
3391 		/* Make sure the user still exists. */
3392 		if (!recv_msg->user || !recv_msg->user->valid)
3393 			goto out;
3394 
3395 		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3396 		recv_msg->msg.data = recv_msg->msg_data;
3397 		recv_msg->msg.data_len = 1;
3398 		recv_msg->msg_data[0] = msg->rsp[2];
3399 		deliver_response(recv_msg);
3400 	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3401 		   && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3402 	{
3403 		/* It's from the receive queue. */
3404 		chan = msg->rsp[3] & 0xf;
3405 		if (chan >= IPMI_MAX_CHANNELS) {
3406 			/* Invalid channel number */
3407 			requeue = 0;
3408 			goto out;
3409 		}
3410 
3411 		switch (intf->channels[chan].medium) {
3412 		case IPMI_CHANNEL_MEDIUM_IPMB:
3413 			if (msg->rsp[4] & 0x04) {
3414 				/* It's a response, so find the
3415 				   requesting message and send it up. */
3416 				requeue = handle_ipmb_get_msg_rsp(intf, msg);
3417 			} else {
3418 				/* It's a command to the SMS from some other
3419 				   entity.  Handle that. */
3420 				requeue = handle_ipmb_get_msg_cmd(intf, msg);
3421 			}
3422 			break;
3423 
3424 		case IPMI_CHANNEL_MEDIUM_8023LAN:
3425 		case IPMI_CHANNEL_MEDIUM_ASYNC:
3426 			if (msg->rsp[6] & 0x04) {
3427 				/* It's a response, so find the
3428 				   requesting message and send it up. */
3429 				requeue = handle_lan_get_msg_rsp(intf, msg);
3430 			} else {
3431 				/* It's a command to the SMS from some other
3432 				   entity.  Handle that. */
3433 				requeue = handle_lan_get_msg_cmd(intf, msg);
3434 			}
3435 			break;
3436 
3437 		default:
3438 			/* We don't handle the channel type, so just
3439 			 * free the message. */
3440 			requeue = 0;
3441 		}
3442 
3443 	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3444 		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3445 	{
3446 		/* It's an asyncronous event. */
3447 		requeue = handle_read_event_rsp(intf, msg);
3448 	} else {
3449 		/* It's a response from the local BMC. */
3450 		requeue = handle_bmc_rsp(intf, msg);
3451 	}
3452 
3453  out:
3454 	return requeue;
3455 }
3456 
3457 /* Handle a new message from the lower layer. */
3458 void ipmi_smi_msg_received(ipmi_smi_t          intf,
3459 			   struct ipmi_smi_msg *msg)
3460 {
3461 	unsigned long flags;
3462 	int           rv;
3463 
3464 
3465 	if ((msg->data_size >= 2)
3466 	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3467 	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
3468 	    && (msg->user_data == NULL))
3469 	{
3470 		/* This is the local response to a command send, start
3471                    the timer for these.  The user_data will not be
3472                    NULL if this is a response send, and we will let
3473                    response sends just go through. */
3474 
3475 		/* Check for errors, if we get certain errors (ones
3476                    that mean basically we can try again later), we
3477                    ignore them and start the timer.  Otherwise we
3478                    report the error immediately. */
3479 		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3480 		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3481 		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3482 		    && (msg->rsp[2] != IPMI_BUS_ERR)
3483 		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3484 		{
3485 			int chan = msg->rsp[3] & 0xf;
3486 
3487 			/* Got an error sending the message, handle it. */
3488 			spin_lock_irqsave(&intf->counter_lock, flags);
3489 			if (chan >= IPMI_MAX_CHANNELS)
3490 				; /* This shouldn't happen */
3491 			else if ((intf->channels[chan].medium
3492 				  == IPMI_CHANNEL_MEDIUM_8023LAN)
3493 				 || (intf->channels[chan].medium
3494 				     == IPMI_CHANNEL_MEDIUM_ASYNC))
3495 				intf->sent_lan_command_errs++;
3496 			else
3497 				intf->sent_ipmb_command_errs++;
3498 			spin_unlock_irqrestore(&intf->counter_lock, flags);
3499 			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3500 		} else {
3501 			/* The message was sent, start the timer. */
3502 			intf_start_seq_timer(intf, msg->msgid);
3503 		}
3504 
3505 		ipmi_free_smi_msg(msg);
3506 		goto out;
3507 	}
3508 
3509 	/* To preserve message order, if the list is not empty, we
3510            tack this message onto the end of the list. */
3511 	spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3512 	if (!list_empty(&intf->waiting_msgs)) {
3513 		list_add_tail(&msg->link, &intf->waiting_msgs);
3514 		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3515 		goto out;
3516 	}
3517 	spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3518 
3519 	rv = handle_new_recv_msg(intf, msg);
3520 	if (rv > 0) {
3521 		/* Could not handle the message now, just add it to a
3522                    list to handle later. */
3523 		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3524 		list_add_tail(&msg->link, &intf->waiting_msgs);
3525 		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3526 	} else if (rv == 0) {
3527 		ipmi_free_smi_msg(msg);
3528 	}
3529 
3530  out:
3531 	return;
3532 }
3533 
3534 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3535 {
3536 	ipmi_user_t user;
3537 
3538 	rcu_read_lock();
3539 	list_for_each_entry_rcu(user, &intf->users, link) {
3540 		if (!user->handler->ipmi_watchdog_pretimeout)
3541 			continue;
3542 
3543 		user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3544 	}
3545 	rcu_read_unlock();
3546 }
3547 
3548 
3549 static struct ipmi_smi_msg *
3550 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3551 		  unsigned char seq, long seqid)
3552 {
3553 	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3554 	if (!smi_msg)
3555 		/* If we can't allocate the message, then just return, we
3556 		   get 4 retries, so this should be ok. */
3557 		return NULL;
3558 
3559 	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3560 	smi_msg->data_size = recv_msg->msg.data_len;
3561 	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3562 
3563 #ifdef DEBUG_MSGING
3564 	{
3565 		int m;
3566 		printk("Resend: ");
3567 		for (m = 0; m < smi_msg->data_size; m++)
3568 			printk(" %2.2x", smi_msg->data[m]);
3569 		printk("\n");
3570 	}
3571 #endif
3572 	return smi_msg;
3573 }
3574 
3575 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3576 			      struct list_head *timeouts, long timeout_period,
3577 			      int slot, unsigned long *flags)
3578 {
3579 	struct ipmi_recv_msg     *msg;
3580 	struct ipmi_smi_handlers *handlers;
3581 
3582 	if (intf->intf_num == -1)
3583 		return;
3584 
3585 	if (!ent->inuse)
3586 		return;
3587 
3588 	ent->timeout -= timeout_period;
3589 	if (ent->timeout > 0)
3590 		return;
3591 
3592 	if (ent->retries_left == 0) {
3593 		/* The message has used all its retries. */
3594 		ent->inuse = 0;
3595 		msg = ent->recv_msg;
3596 		list_add_tail(&msg->link, timeouts);
3597 		spin_lock(&intf->counter_lock);
3598 		if (ent->broadcast)
3599 			intf->timed_out_ipmb_broadcasts++;
3600 		else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3601 			intf->timed_out_lan_commands++;
3602 		else
3603 			intf->timed_out_ipmb_commands++;
3604 		spin_unlock(&intf->counter_lock);
3605 	} else {
3606 		struct ipmi_smi_msg *smi_msg;
3607 		/* More retries, send again. */
3608 
3609 		/* Start with the max timer, set to normal
3610 		   timer after the message is sent. */
3611 		ent->timeout = MAX_MSG_TIMEOUT;
3612 		ent->retries_left--;
3613 		spin_lock(&intf->counter_lock);
3614 		if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3615 			intf->retransmitted_lan_commands++;
3616 		else
3617 			intf->retransmitted_ipmb_commands++;
3618 		spin_unlock(&intf->counter_lock);
3619 
3620 		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3621 					    ent->seqid);
3622 		if (!smi_msg)
3623 			return;
3624 
3625 		spin_unlock_irqrestore(&intf->seq_lock, *flags);
3626 
3627 		/* Send the new message.  We send with a zero
3628 		 * priority.  It timed out, I doubt time is
3629 		 * that critical now, and high priority
3630 		 * messages are really only for messages to the
3631 		 * local MC, which don't get resent. */
3632 		handlers = intf->handlers;
3633 		if (handlers)
3634 			intf->handlers->sender(intf->send_info,
3635 					       smi_msg, 0);
3636 		else
3637 			ipmi_free_smi_msg(smi_msg);
3638 
3639 		spin_lock_irqsave(&intf->seq_lock, *flags);
3640 	}
3641 }
3642 
3643 static void ipmi_timeout_handler(long timeout_period)
3644 {
3645 	ipmi_smi_t           intf;
3646 	struct list_head     timeouts;
3647 	struct ipmi_recv_msg *msg, *msg2;
3648 	struct ipmi_smi_msg  *smi_msg, *smi_msg2;
3649 	unsigned long        flags;
3650 	int                  i;
3651 
3652 	rcu_read_lock();
3653 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3654 		/* See if any waiting messages need to be processed. */
3655 		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3656 		list_for_each_entry_safe(smi_msg, smi_msg2,
3657 					 &intf->waiting_msgs, link) {
3658 			if (!handle_new_recv_msg(intf, smi_msg)) {
3659 				list_del(&smi_msg->link);
3660 				ipmi_free_smi_msg(smi_msg);
3661 			} else {
3662 				/* To preserve message order, quit if we
3663 				   can't handle a message. */
3664 				break;
3665 			}
3666 		}
3667 		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3668 
3669 		/* Go through the seq table and find any messages that
3670 		   have timed out, putting them in the timeouts
3671 		   list. */
3672 		INIT_LIST_HEAD(&timeouts);
3673 		spin_lock_irqsave(&intf->seq_lock, flags);
3674 		for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3675 			check_msg_timeout(intf, &(intf->seq_table[i]),
3676 					  &timeouts, timeout_period, i,
3677 					  &flags);
3678 		spin_unlock_irqrestore(&intf->seq_lock, flags);
3679 
3680 		list_for_each_entry_safe(msg, msg2, &timeouts, link)
3681 			deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3682 
3683 		/*
3684 		 * Maintenance mode handling.  Check the timeout
3685 		 * optimistically before we claim the lock.  It may
3686 		 * mean a timeout gets missed occasionally, but that
3687 		 * only means the timeout gets extended by one period
3688 		 * in that case.  No big deal, and it avoids the lock
3689 		 * most of the time.
3690 		 */
3691 		if (intf->auto_maintenance_timeout > 0) {
3692 			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
3693 			if (intf->auto_maintenance_timeout > 0) {
3694 				intf->auto_maintenance_timeout
3695 					-= timeout_period;
3696 				if (!intf->maintenance_mode
3697 				    && (intf->auto_maintenance_timeout <= 0))
3698 				{
3699 					intf->maintenance_mode_enable = 0;
3700 					maintenance_mode_update(intf);
3701 				}
3702 			}
3703 			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
3704 					       flags);
3705 		}
3706 	}
3707 	rcu_read_unlock();
3708 }
3709 
3710 static void ipmi_request_event(void)
3711 {
3712 	ipmi_smi_t               intf;
3713 	struct ipmi_smi_handlers *handlers;
3714 
3715 	rcu_read_lock();
3716 	/* Called from the timer, no need to check if handlers is
3717 	 * valid. */
3718 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3719 		/* No event requests when in maintenance mode. */
3720 		if (intf->maintenance_mode_enable)
3721 			continue;
3722 
3723 		handlers = intf->handlers;
3724 		if (handlers)
3725 			handlers->request_events(intf->send_info);
3726 	}
3727 	rcu_read_unlock();
3728 }
3729 
3730 static struct timer_list ipmi_timer;
3731 
3732 /* Call every ~100 ms. */
3733 #define IPMI_TIMEOUT_TIME	100
3734 
3735 /* How many jiffies does it take to get to the timeout time. */
3736 #define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
3737 
3738 /* Request events from the queue every second (this is the number of
3739    IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
3740    future, IPMI will add a way to know immediately if an event is in
3741    the queue and this silliness can go away. */
3742 #define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))
3743 
3744 static atomic_t stop_operation;
3745 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3746 
3747 static void ipmi_timeout(unsigned long data)
3748 {
3749 	if (atomic_read(&stop_operation))
3750 		return;
3751 
3752 	ticks_to_req_ev--;
3753 	if (ticks_to_req_ev == 0) {
3754 		ipmi_request_event();
3755 		ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3756 	}
3757 
3758 	ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3759 
3760 	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3761 }
3762 
3763 
3764 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3765 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3766 
3767 /* FIXME - convert these to slabs. */
3768 static void free_smi_msg(struct ipmi_smi_msg *msg)
3769 {
3770 	atomic_dec(&smi_msg_inuse_count);
3771 	kfree(msg);
3772 }
3773 
3774 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3775 {
3776 	struct ipmi_smi_msg *rv;
3777 	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3778 	if (rv) {
3779 		rv->done = free_smi_msg;
3780 		rv->user_data = NULL;
3781 		atomic_inc(&smi_msg_inuse_count);
3782 	}
3783 	return rv;
3784 }
3785 
3786 static void free_recv_msg(struct ipmi_recv_msg *msg)
3787 {
3788 	atomic_dec(&recv_msg_inuse_count);
3789 	kfree(msg);
3790 }
3791 
3792 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3793 {
3794 	struct ipmi_recv_msg *rv;
3795 
3796 	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3797 	if (rv) {
3798 		rv->user = NULL;
3799 		rv->done = free_recv_msg;
3800 		atomic_inc(&recv_msg_inuse_count);
3801 	}
3802 	return rv;
3803 }
3804 
3805 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3806 {
3807 	if (msg->user)
3808 		kref_put(&msg->user->refcount, free_user);
3809 	msg->done(msg);
3810 }
3811 
3812 #ifdef CONFIG_IPMI_PANIC_EVENT
3813 
3814 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3815 {
3816 }
3817 
3818 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3819 {
3820 }
3821 
3822 #ifdef CONFIG_IPMI_PANIC_STRING
3823 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3824 {
3825 	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3826 	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3827 	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3828 	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3829 	{
3830 		/* A get event receiver command, save it. */
3831 		intf->event_receiver = msg->msg.data[1];
3832 		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3833 	}
3834 }
3835 
3836 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3837 {
3838 	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3839 	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3840 	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3841 	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3842 	{
3843 		/* A get device id command, save if we are an event
3844 		   receiver or generator. */
3845 		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3846 		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3847 	}
3848 }
3849 #endif
3850 
3851 static void send_panic_events(char *str)
3852 {
3853 	struct kernel_ipmi_msg            msg;
3854 	ipmi_smi_t                        intf;
3855 	unsigned char                     data[16];
3856 	struct ipmi_system_interface_addr *si;
3857 	struct ipmi_addr                  addr;
3858 	struct ipmi_smi_msg               smi_msg;
3859 	struct ipmi_recv_msg              recv_msg;
3860 
3861 	si = (struct ipmi_system_interface_addr *) &addr;
3862 	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3863 	si->channel = IPMI_BMC_CHANNEL;
3864 	si->lun = 0;
3865 
3866 	/* Fill in an event telling that we have failed. */
3867 	msg.netfn = 0x04; /* Sensor or Event. */
3868 	msg.cmd = 2; /* Platform event command. */
3869 	msg.data = data;
3870 	msg.data_len = 8;
3871 	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3872 	data[1] = 0x03; /* This is for IPMI 1.0. */
3873 	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3874 	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3875 	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3876 
3877 	/* Put a few breadcrumbs in.  Hopefully later we can add more things
3878 	   to make the panic events more useful. */
3879 	if (str) {
3880 		data[3] = str[0];
3881 		data[6] = str[1];
3882 		data[7] = str[2];
3883 	}
3884 
3885 	smi_msg.done = dummy_smi_done_handler;
3886 	recv_msg.done = dummy_recv_done_handler;
3887 
3888 	/* For every registered interface, send the event. */
3889 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3890 		if (!intf->handlers)
3891 			/* Interface is not ready. */
3892 			continue;
3893 
3894 		/* Send the event announcing the panic. */
3895 		intf->handlers->set_run_to_completion(intf->send_info, 1);
3896 		i_ipmi_request(NULL,
3897 			       intf,
3898 			       &addr,
3899 			       0,
3900 			       &msg,
3901 			       intf,
3902 			       &smi_msg,
3903 			       &recv_msg,
3904 			       0,
3905 			       intf->channels[0].address,
3906 			       intf->channels[0].lun,
3907 			       0, 1); /* Don't retry, and don't wait. */
3908 	}
3909 
3910 #ifdef CONFIG_IPMI_PANIC_STRING
3911 	/* On every interface, dump a bunch of OEM event holding the
3912 	   string. */
3913 	if (!str)
3914 		return;
3915 
3916 	/* For every registered interface, send the event. */
3917 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3918 		char                  *p = str;
3919 		struct ipmi_ipmb_addr *ipmb;
3920 		int                   j;
3921 
3922 		if (intf->intf_num == -1)
3923 			/* Interface was not ready yet. */
3924 			continue;
3925 
3926 		/*
3927 		 * intf_num is used as an marker to tell if the
3928 		 * interface is valid.  Thus we need a read barrier to
3929 		 * make sure data fetched before checking intf_num
3930 		 * won't be used.
3931 		 */
3932 		smp_rmb();
3933 
3934 		/* First job here is to figure out where to send the
3935 		   OEM events.  There's no way in IPMI to send OEM
3936 		   events using an event send command, so we have to
3937 		   find the SEL to put them in and stick them in
3938 		   there. */
3939 
3940 		/* Get capabilities from the get device id. */
3941 		intf->local_sel_device = 0;
3942 		intf->local_event_generator = 0;
3943 		intf->event_receiver = 0;
3944 
3945 		/* Request the device info from the local MC. */
3946 		msg.netfn = IPMI_NETFN_APP_REQUEST;
3947 		msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3948 		msg.data = NULL;
3949 		msg.data_len = 0;
3950 		intf->null_user_handler = device_id_fetcher;
3951 		i_ipmi_request(NULL,
3952 			       intf,
3953 			       &addr,
3954 			       0,
3955 			       &msg,
3956 			       intf,
3957 			       &smi_msg,
3958 			       &recv_msg,
3959 			       0,
3960 			       intf->channels[0].address,
3961 			       intf->channels[0].lun,
3962 			       0, 1); /* Don't retry, and don't wait. */
3963 
3964 		if (intf->local_event_generator) {
3965 			/* Request the event receiver from the local MC. */
3966 			msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3967 			msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3968 			msg.data = NULL;
3969 			msg.data_len = 0;
3970 			intf->null_user_handler = event_receiver_fetcher;
3971 			i_ipmi_request(NULL,
3972 				       intf,
3973 				       &addr,
3974 				       0,
3975 				       &msg,
3976 				       intf,
3977 				       &smi_msg,
3978 				       &recv_msg,
3979 				       0,
3980 				       intf->channels[0].address,
3981 				       intf->channels[0].lun,
3982 				       0, 1); /* no retry, and no wait. */
3983 		}
3984 		intf->null_user_handler = NULL;
3985 
3986 		/* Validate the event receiver.  The low bit must not
3987 		   be 1 (it must be a valid IPMB address), it cannot
3988 		   be zero, and it must not be my address. */
3989                 if (((intf->event_receiver & 1) == 0)
3990 		    && (intf->event_receiver != 0)
3991 		    && (intf->event_receiver != intf->channels[0].address))
3992 		{
3993 			/* The event receiver is valid, send an IPMB
3994 			   message. */
3995 			ipmb = (struct ipmi_ipmb_addr *) &addr;
3996 			ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3997 			ipmb->channel = 0; /* FIXME - is this right? */
3998 			ipmb->lun = intf->event_receiver_lun;
3999 			ipmb->slave_addr = intf->event_receiver;
4000 		} else if (intf->local_sel_device) {
4001 			/* The event receiver was not valid (or was
4002 			   me), but I am an SEL device, just dump it
4003 			   in my SEL. */
4004 			si = (struct ipmi_system_interface_addr *) &addr;
4005 			si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4006 			si->channel = IPMI_BMC_CHANNEL;
4007 			si->lun = 0;
4008 		} else
4009 			continue; /* No where to send the event. */
4010 
4011 
4012 		msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4013 		msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4014 		msg.data = data;
4015 		msg.data_len = 16;
4016 
4017 		j = 0;
4018 		while (*p) {
4019 			int size = strlen(p);
4020 
4021 			if (size > 11)
4022 				size = 11;
4023 			data[0] = 0;
4024 			data[1] = 0;
4025 			data[2] = 0xf0; /* OEM event without timestamp. */
4026 			data[3] = intf->channels[0].address;
4027 			data[4] = j++; /* sequence # */
4028 			/* Always give 11 bytes, so strncpy will fill
4029 			   it with zeroes for me. */
4030 			strncpy(data+5, p, 11);
4031 			p += size;
4032 
4033 			i_ipmi_request(NULL,
4034 				       intf,
4035 				       &addr,
4036 				       0,
4037 				       &msg,
4038 				       intf,
4039 				       &smi_msg,
4040 				       &recv_msg,
4041 				       0,
4042 				       intf->channels[0].address,
4043 				       intf->channels[0].lun,
4044 				       0, 1); /* no retry, and no wait. */
4045 		}
4046 	}
4047 #endif /* CONFIG_IPMI_PANIC_STRING */
4048 }
4049 #endif /* CONFIG_IPMI_PANIC_EVENT */
4050 
4051 static int has_panicked;
4052 
4053 static int panic_event(struct notifier_block *this,
4054 		       unsigned long         event,
4055                        void                  *ptr)
4056 {
4057 	ipmi_smi_t intf;
4058 
4059 	if (has_panicked)
4060 		return NOTIFY_DONE;
4061 	has_panicked = 1;
4062 
4063 	/* For every registered interface, set it to run to completion. */
4064 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4065 		if (!intf->handlers)
4066 			/* Interface is not ready. */
4067 			continue;
4068 
4069 		intf->handlers->set_run_to_completion(intf->send_info, 1);
4070 	}
4071 
4072 #ifdef CONFIG_IPMI_PANIC_EVENT
4073 	send_panic_events(ptr);
4074 #endif
4075 
4076 	return NOTIFY_DONE;
4077 }
4078 
4079 static struct notifier_block panic_block = {
4080 	.notifier_call	= panic_event,
4081 	.next		= NULL,
4082 	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
4083 };
4084 
4085 static int ipmi_init_msghandler(void)
4086 {
4087 	int rv;
4088 
4089 	if (initialized)
4090 		return 0;
4091 
4092 	rv = driver_register(&ipmidriver);
4093 	if (rv) {
4094 		printk(KERN_ERR PFX "Could not register IPMI driver\n");
4095 		return rv;
4096 	}
4097 
4098 	printk(KERN_INFO "ipmi message handler version "
4099 	       IPMI_DRIVER_VERSION "\n");
4100 
4101 #ifdef CONFIG_PROC_FS
4102 	proc_ipmi_root = proc_mkdir("ipmi", NULL);
4103 	if (!proc_ipmi_root) {
4104 	    printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4105 	    return -ENOMEM;
4106 	}
4107 
4108 	proc_ipmi_root->owner = THIS_MODULE;
4109 #endif /* CONFIG_PROC_FS */
4110 
4111 	setup_timer(&ipmi_timer, ipmi_timeout, 0);
4112 	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4113 
4114 	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4115 
4116 	initialized = 1;
4117 
4118 	return 0;
4119 }
4120 
4121 static __init int ipmi_init_msghandler_mod(void)
4122 {
4123 	ipmi_init_msghandler();
4124 	return 0;
4125 }
4126 
4127 static __exit void cleanup_ipmi(void)
4128 {
4129 	int count;
4130 
4131 	if (!initialized)
4132 		return;
4133 
4134 	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4135 
4136 	/* This can't be called if any interfaces exist, so no worry about
4137 	   shutting down the interfaces. */
4138 
4139 	/* Tell the timer to stop, then wait for it to stop.  This avoids
4140 	   problems with race conditions removing the timer here. */
4141 	atomic_inc(&stop_operation);
4142 	del_timer_sync(&ipmi_timer);
4143 
4144 #ifdef CONFIG_PROC_FS
4145 	remove_proc_entry(proc_ipmi_root->name, NULL);
4146 #endif /* CONFIG_PROC_FS */
4147 
4148 	driver_unregister(&ipmidriver);
4149 
4150 	initialized = 0;
4151 
4152 	/* Check for buffer leaks. */
4153 	count = atomic_read(&smi_msg_inuse_count);
4154 	if (count != 0)
4155 		printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4156 		       count);
4157 	count = atomic_read(&recv_msg_inuse_count);
4158 	if (count != 0)
4159 		printk(KERN_WARNING PFX "recv message count %d at exit\n",
4160 		       count);
4161 }
4162 module_exit(cleanup_ipmi);
4163 
4164 module_init(ipmi_init_msghandler_mod);
4165 MODULE_LICENSE("GPL");
4166 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4167 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4168 MODULE_VERSION(IPMI_DRIVER_VERSION);
4169 
4170 EXPORT_SYMBOL(ipmi_create_user);
4171 EXPORT_SYMBOL(ipmi_destroy_user);
4172 EXPORT_SYMBOL(ipmi_get_version);
4173 EXPORT_SYMBOL(ipmi_request_settime);
4174 EXPORT_SYMBOL(ipmi_request_supply_msgs);
4175 EXPORT_SYMBOL(ipmi_poll_interface);
4176 EXPORT_SYMBOL(ipmi_register_smi);
4177 EXPORT_SYMBOL(ipmi_unregister_smi);
4178 EXPORT_SYMBOL(ipmi_register_for_cmd);
4179 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4180 EXPORT_SYMBOL(ipmi_smi_msg_received);
4181 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4182 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4183 EXPORT_SYMBOL(ipmi_addr_length);
4184 EXPORT_SYMBOL(ipmi_validate_addr);
4185 EXPORT_SYMBOL(ipmi_set_gets_events);
4186 EXPORT_SYMBOL(ipmi_smi_watcher_register);
4187 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4188 EXPORT_SYMBOL(ipmi_set_my_address);
4189 EXPORT_SYMBOL(ipmi_get_my_address);
4190 EXPORT_SYMBOL(ipmi_set_my_LUN);
4191 EXPORT_SYMBOL(ipmi_get_my_LUN);
4192 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4193 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
4194 EXPORT_SYMBOL(ipmi_free_recv_msg);
4195