4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/irq.h>
55 #ifdef CONFIG_HIGH_RES_TIMERS
56 #include <linux/hrtime.h>
57 # if defined(schedule_next_int)
58 /* Old high-res timer code, do translations. */
59 # define get_arch_cycles(a) quick_update_jiffies_sub(a)
60 # define arch_cycles_per_jiffy cycles_per_jiffies
62 static inline void add_usec_to_timer(struct timer_list *t, long v)
64 t->sub_expires += nsec_to_arch_cycle(v * 1000);
65 while (t->sub_expires >= arch_cycles_per_jiffy)
68 t->sub_expires -= arch_cycles_per_jiffy;
72 #include <linux/interrupt.h>
73 #include <linux/rcupdate.h>
74 #include <linux/ipmi_smi.h>
76 #include "ipmi_si_sm.h"
77 #include <linux/init.h>
79 #define IPMI_SI_VERSION "v31"
81 /* Measure times between events in the driver. */
84 /* Call every 10 ms. */
85 #define SI_TIMEOUT_TIME_USEC 10000
86 #define SI_USEC_PER_JIFFY (1000000/HZ)
87 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
88 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
96 SI_CLEARING_FLAGS_THEN_SET_IRQ,
98 SI_ENABLE_INTERRUPTS1,
100 /* FIXME - add watchdog stuff. */
104 SI_KCS, SI_SMIC, SI_BT
110 struct si_sm_data *si_sm;
111 struct si_sm_handlers *handlers;
112 enum si_type si_type;
115 struct list_head xmit_msgs;
116 struct list_head hp_xmit_msgs;
117 struct ipmi_smi_msg *curr_msg;
118 enum si_intf_state si_state;
120 /* Used to handle the various types of I/O that can occur with
123 int (*io_setup)(struct smi_info *info);
124 void (*io_cleanup)(struct smi_info *info);
125 int (*irq_setup)(struct smi_info *info);
126 void (*irq_cleanup)(struct smi_info *info);
127 unsigned int io_size;
129 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
130 is set to hold the flags until we are done handling everything
132 #define RECEIVE_MSG_AVAIL 0x01
133 #define EVENT_MSG_BUFFER_FULL 0x02
134 #define WDT_PRE_TIMEOUT_INT 0x08
135 unsigned char msg_flags;
137 /* If set to true, this will request events the next time the
138 state machine is idle. */
141 /* If true, run the state machine to completion on every send
142 call. Generally used after a panic to make sure stuff goes
144 int run_to_completion;
146 /* The I/O port of an SI interface. */
149 /* zero if no irq; */
152 /* The timer for this si. */
153 struct timer_list si_timer;
155 /* The time (in jiffies) the last timeout occurred at. */
156 unsigned long last_timeout_jiffies;
158 /* Used to gracefully stop the timer without race conditions. */
159 volatile int stop_operation;
160 volatile int timer_stopped;
162 /* The driver will disable interrupts when it gets into a
163 situation where it cannot handle messages due to lack of
164 memory. Once that situation clears up, it will re-enable
166 int interrupt_disabled;
168 unsigned char ipmi_si_dev_rev;
169 unsigned char ipmi_si_fw_rev_major;
170 unsigned char ipmi_si_fw_rev_minor;
171 unsigned char ipmi_version_major;
172 unsigned char ipmi_version_minor;
174 /* Counters and things for the proc filesystem. */
175 spinlock_t count_lock;
176 unsigned long short_timeouts;
177 unsigned long long_timeouts;
178 unsigned long timeout_restarts;
180 unsigned long interrupts;
181 unsigned long attentions;
182 unsigned long flag_fetches;
183 unsigned long hosed_count;
184 unsigned long complete_transactions;
185 unsigned long events;
186 unsigned long watchdog_pretimeouts;
187 unsigned long incoming_messages;
190 static void si_restart_short_timer(struct smi_info *smi_info);
192 static void deliver_recv_msg(struct smi_info *smi_info,
193 struct ipmi_smi_msg *msg)
195 /* Deliver the message to the upper layer with the lock
197 spin_unlock(&(smi_info->si_lock));
198 ipmi_smi_msg_received(smi_info->intf, msg);
199 spin_lock(&(smi_info->si_lock));
202 static void return_hosed_msg(struct smi_info *smi_info)
204 struct ipmi_smi_msg *msg = smi_info->curr_msg;
206 /* Make it a reponse */
207 msg->rsp[0] = msg->data[0] | 4;
208 msg->rsp[1] = msg->data[1];
209 msg->rsp[2] = 0xFF; /* Unknown error. */
212 smi_info->curr_msg = NULL;
213 deliver_recv_msg(smi_info, msg);
216 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
219 struct list_head *entry = NULL;
224 /* No need to save flags, we aleady have interrupts off and we
225 already hold the SMI lock. */
226 spin_lock(&(smi_info->msg_lock));
228 /* Pick the high priority queue first. */
229 if (! list_empty(&(smi_info->hp_xmit_msgs))) {
230 entry = smi_info->hp_xmit_msgs.next;
231 } else if (! list_empty(&(smi_info->xmit_msgs))) {
232 entry = smi_info->xmit_msgs.next;
236 smi_info->curr_msg = NULL;
242 smi_info->curr_msg = list_entry(entry,
247 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
249 err = smi_info->handlers->start_transaction(
251 smi_info->curr_msg->data,
252 smi_info->curr_msg->data_size);
254 return_hosed_msg(smi_info);
257 rv = SI_SM_CALL_WITHOUT_DELAY;
259 spin_unlock(&(smi_info->msg_lock));
264 static void start_enable_irq(struct smi_info *smi_info)
266 unsigned char msg[2];
268 /* If we are enabling interrupts, we have to tell the
270 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
271 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
273 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
274 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
277 static void start_clear_flags(struct smi_info *smi_info)
279 unsigned char msg[3];
281 /* Make sure the watchdog pre-timeout flag is not set at startup. */
282 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
283 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
284 msg[2] = WDT_PRE_TIMEOUT_INT;
286 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
287 smi_info->si_state = SI_CLEARING_FLAGS;
290 /* When we have a situtaion where we run out of memory and cannot
291 allocate messages, we just leave them in the BMC and run the system
292 polled until we can allocate some memory. Once we have some
293 memory, we will re-enable the interrupt. */
294 static inline void disable_si_irq(struct smi_info *smi_info)
296 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
297 disable_irq_nosync(smi_info->irq);
298 smi_info->interrupt_disabled = 1;
302 static inline void enable_si_irq(struct smi_info *smi_info)
304 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
305 enable_irq(smi_info->irq);
306 smi_info->interrupt_disabled = 0;
310 static void handle_flags(struct smi_info *smi_info)
312 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
313 /* Watchdog pre-timeout */
314 spin_lock(&smi_info->count_lock);
315 smi_info->watchdog_pretimeouts++;
316 spin_unlock(&smi_info->count_lock);
318 start_clear_flags(smi_info);
319 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
320 spin_unlock(&(smi_info->si_lock));
321 ipmi_smi_watchdog_pretimeout(smi_info->intf);
322 spin_lock(&(smi_info->si_lock));
323 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
324 /* Messages available. */
325 smi_info->curr_msg = ipmi_alloc_smi_msg();
326 if (!smi_info->curr_msg) {
327 disable_si_irq(smi_info);
328 smi_info->si_state = SI_NORMAL;
331 enable_si_irq(smi_info);
333 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
334 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
335 smi_info->curr_msg->data_size = 2;
337 smi_info->handlers->start_transaction(
339 smi_info->curr_msg->data,
340 smi_info->curr_msg->data_size);
341 smi_info->si_state = SI_GETTING_MESSAGES;
342 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
343 /* Events available. */
344 smi_info->curr_msg = ipmi_alloc_smi_msg();
345 if (!smi_info->curr_msg) {
346 disable_si_irq(smi_info);
347 smi_info->si_state = SI_NORMAL;
350 enable_si_irq(smi_info);
352 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
353 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
354 smi_info->curr_msg->data_size = 2;
356 smi_info->handlers->start_transaction(
358 smi_info->curr_msg->data,
359 smi_info->curr_msg->data_size);
360 smi_info->si_state = SI_GETTING_EVENTS;
362 smi_info->si_state = SI_NORMAL;
366 static void handle_transaction_done(struct smi_info *smi_info)
368 struct ipmi_smi_msg *msg;
373 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
375 switch (smi_info->si_state) {
377 if (!smi_info->curr_msg)
380 smi_info->curr_msg->rsp_size
381 = smi_info->handlers->get_result(
383 smi_info->curr_msg->rsp,
384 IPMI_MAX_MSG_LENGTH);
386 /* Do this here becase deliver_recv_msg() releases the
387 lock, and a new message can be put in during the
388 time the lock is released. */
389 msg = smi_info->curr_msg;
390 smi_info->curr_msg = NULL;
391 deliver_recv_msg(smi_info, msg);
394 case SI_GETTING_FLAGS:
396 unsigned char msg[4];
399 /* We got the flags from the SMI, now handle them. */
400 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
402 /* Error fetching flags, just give up for
404 smi_info->si_state = SI_NORMAL;
405 } else if (len < 3) {
406 /* Hmm, no flags. That's technically illegal, but
407 don't use uninitialized data. */
408 smi_info->si_state = SI_NORMAL;
410 smi_info->msg_flags = msg[3];
411 handle_flags(smi_info);
416 case SI_CLEARING_FLAGS:
417 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
419 unsigned char msg[3];
421 /* We cleared the flags. */
422 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
424 /* Error clearing flags */
426 "ipmi_si: Error clearing flags: %2.2x\n",
429 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
430 start_enable_irq(smi_info);
432 smi_info->si_state = SI_NORMAL;
436 case SI_GETTING_EVENTS:
438 smi_info->curr_msg->rsp_size
439 = smi_info->handlers->get_result(
441 smi_info->curr_msg->rsp,
442 IPMI_MAX_MSG_LENGTH);
444 /* Do this here becase deliver_recv_msg() releases the
445 lock, and a new message can be put in during the
446 time the lock is released. */
447 msg = smi_info->curr_msg;
448 smi_info->curr_msg = NULL;
449 if (msg->rsp[2] != 0) {
450 /* Error getting event, probably done. */
453 /* Take off the event flag. */
454 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
456 spin_lock(&smi_info->count_lock);
458 spin_unlock(&smi_info->count_lock);
460 deliver_recv_msg(smi_info, msg);
462 handle_flags(smi_info);
466 case SI_GETTING_MESSAGES:
468 smi_info->curr_msg->rsp_size
469 = smi_info->handlers->get_result(
471 smi_info->curr_msg->rsp,
472 IPMI_MAX_MSG_LENGTH);
474 /* Do this here becase deliver_recv_msg() releases the
475 lock, and a new message can be put in during the
476 time the lock is released. */
477 msg = smi_info->curr_msg;
478 smi_info->curr_msg = NULL;
479 if (msg->rsp[2] != 0) {
480 /* Error getting event, probably done. */
483 /* Take off the msg flag. */
484 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
486 spin_lock(&smi_info->count_lock);
487 smi_info->incoming_messages++;
488 spin_unlock(&smi_info->count_lock);
490 deliver_recv_msg(smi_info, msg);
492 handle_flags(smi_info);
496 case SI_ENABLE_INTERRUPTS1:
498 unsigned char msg[4];
500 /* We got the flags from the SMI, now handle them. */
501 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
504 "ipmi_si: Could not enable interrupts"
505 ", failed get, using polled mode.\n");
506 smi_info->si_state = SI_NORMAL;
508 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
509 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
510 msg[2] = msg[3] | 1; /* enable msg queue int */
511 smi_info->handlers->start_transaction(
512 smi_info->si_sm, msg, 3);
513 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
518 case SI_ENABLE_INTERRUPTS2:
520 unsigned char msg[4];
522 /* We got the flags from the SMI, now handle them. */
523 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
526 "ipmi_si: Could not enable interrupts"
527 ", failed set, using polled mode.\n");
529 smi_info->si_state = SI_NORMAL;
535 /* Called on timeouts and events. Timeouts should pass the elapsed
536 time, interrupts should pass in zero. */
537 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
540 enum si_sm_result si_sm_result;
543 /* There used to be a loop here that waited a little while
544 (around 25us) before giving up. That turned out to be
545 pointless, the minimum delays I was seeing were in the 300us
546 range, which is far too long to wait in an interrupt. So
547 we just run until the state machine tells us something
548 happened or it needs a delay. */
549 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
551 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
553 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
556 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
558 spin_lock(&smi_info->count_lock);
559 smi_info->complete_transactions++;
560 spin_unlock(&smi_info->count_lock);
562 handle_transaction_done(smi_info);
563 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
565 else if (si_sm_result == SI_SM_HOSED)
567 spin_lock(&smi_info->count_lock);
568 smi_info->hosed_count++;
569 spin_unlock(&smi_info->count_lock);
571 if (smi_info->curr_msg != NULL) {
572 /* If we were handling a user message, format
573 a response to send to the upper layer to
574 tell it about the error. */
575 return_hosed_msg(smi_info);
577 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
578 smi_info->si_state = SI_NORMAL;
581 /* We prefer handling attn over new messages. */
582 if (si_sm_result == SI_SM_ATTN)
584 unsigned char msg[2];
586 spin_lock(&smi_info->count_lock);
587 smi_info->attentions++;
588 spin_unlock(&smi_info->count_lock);
590 /* Got a attn, send down a get message flags to see
591 what's causing it. It would be better to handle
592 this in the upper layer, but due to the way
593 interrupts work with the SMI, that's not really
595 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
596 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
598 smi_info->handlers->start_transaction(
599 smi_info->si_sm, msg, 2);
600 smi_info->si_state = SI_GETTING_FLAGS;
604 /* If we are currently idle, try to start the next message. */
605 if (si_sm_result == SI_SM_IDLE) {
606 spin_lock(&smi_info->count_lock);
608 spin_unlock(&smi_info->count_lock);
610 si_sm_result = start_next_msg(smi_info);
611 if (si_sm_result != SI_SM_IDLE)
615 if ((si_sm_result == SI_SM_IDLE)
616 && (atomic_read(&smi_info->req_events)))
618 /* We are idle and the upper layer requested that I fetch
620 unsigned char msg[2];
622 spin_lock(&smi_info->count_lock);
623 smi_info->flag_fetches++;
624 spin_unlock(&smi_info->count_lock);
626 atomic_set(&smi_info->req_events, 0);
627 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
628 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
630 smi_info->handlers->start_transaction(
631 smi_info->si_sm, msg, 2);
632 smi_info->si_state = SI_GETTING_FLAGS;
639 static void sender(void *send_info,
640 struct ipmi_smi_msg *msg,
643 struct smi_info *smi_info = send_info;
644 enum si_sm_result result;
650 spin_lock_irqsave(&(smi_info->msg_lock), flags);
653 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
656 if (smi_info->run_to_completion) {
657 /* If we are running to completion, then throw it in
658 the list and run transactions until everything is
659 clear. Priority doesn't matter here. */
660 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
662 /* We have to release the msg lock and claim the smi
663 lock in this case, because of race conditions. */
664 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
666 spin_lock_irqsave(&(smi_info->si_lock), flags);
667 result = smi_event_handler(smi_info, 0);
668 while (result != SI_SM_IDLE) {
669 udelay(SI_SHORT_TIMEOUT_USEC);
670 result = smi_event_handler(smi_info,
671 SI_SHORT_TIMEOUT_USEC);
673 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
677 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
679 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
682 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
684 spin_lock_irqsave(&(smi_info->si_lock), flags);
685 if ((smi_info->si_state == SI_NORMAL)
686 && (smi_info->curr_msg == NULL))
688 start_next_msg(smi_info);
689 si_restart_short_timer(smi_info);
691 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
694 static void set_run_to_completion(void *send_info, int i_run_to_completion)
696 struct smi_info *smi_info = send_info;
697 enum si_sm_result result;
700 spin_lock_irqsave(&(smi_info->si_lock), flags);
702 smi_info->run_to_completion = i_run_to_completion;
703 if (i_run_to_completion) {
704 result = smi_event_handler(smi_info, 0);
705 while (result != SI_SM_IDLE) {
706 udelay(SI_SHORT_TIMEOUT_USEC);
707 result = smi_event_handler(smi_info,
708 SI_SHORT_TIMEOUT_USEC);
712 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
715 static void request_events(void *send_info)
717 struct smi_info *smi_info = send_info;
719 atomic_set(&smi_info->req_events, 1);
722 static int initialized = 0;
724 /* Must be called with interrupts off and with the si_lock held. */
725 static void si_restart_short_timer(struct smi_info *smi_info)
727 #if defined(CONFIG_HIGH_RES_TIMERS)
729 unsigned long jiffies_now;
731 if (del_timer(&(smi_info->si_timer))) {
732 /* If we don't delete the timer, then it will go off
733 immediately, anyway. So we only process if we
734 actually delete the timer. */
736 /* We already have irqsave on, so no need for it
738 read_lock(&xtime_lock);
739 jiffies_now = jiffies;
740 smi_info->si_timer.expires = jiffies_now;
741 smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now);
743 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
745 add_timer(&(smi_info->si_timer));
746 spin_lock_irqsave(&smi_info->count_lock, flags);
747 smi_info->timeout_restarts++;
748 spin_unlock_irqrestore(&smi_info->count_lock, flags);
753 static void smi_timeout(unsigned long data)
755 struct smi_info *smi_info = (struct smi_info *) data;
756 enum si_sm_result smi_result;
758 unsigned long jiffies_now;
759 unsigned long time_diff;
764 if (smi_info->stop_operation) {
765 smi_info->timer_stopped = 1;
769 spin_lock_irqsave(&(smi_info->si_lock), flags);
772 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
774 jiffies_now = jiffies;
775 time_diff = ((jiffies_now - smi_info->last_timeout_jiffies)
776 * SI_USEC_PER_JIFFY);
777 smi_result = smi_event_handler(smi_info, time_diff);
779 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
781 smi_info->last_timeout_jiffies = jiffies_now;
783 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
784 /* Running with interrupts, only do long timeouts. */
785 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
786 spin_lock_irqsave(&smi_info->count_lock, flags);
787 smi_info->long_timeouts++;
788 spin_unlock_irqrestore(&smi_info->count_lock, flags);
792 /* If the state machine asks for a short delay, then shorten
793 the timer timeout. */
794 if (smi_result == SI_SM_CALL_WITH_DELAY) {
795 spin_lock_irqsave(&smi_info->count_lock, flags);
796 smi_info->short_timeouts++;
797 spin_unlock_irqrestore(&smi_info->count_lock, flags);
798 #if defined(CONFIG_HIGH_RES_TIMERS)
799 read_lock(&xtime_lock);
800 smi_info->si_timer.expires = jiffies;
801 smi_info->si_timer.sub_expires
802 = get_arch_cycles(smi_info->si_timer.expires);
803 read_unlock(&xtime_lock);
804 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
806 smi_info->si_timer.expires = jiffies + 1;
809 spin_lock_irqsave(&smi_info->count_lock, flags);
810 smi_info->long_timeouts++;
811 spin_unlock_irqrestore(&smi_info->count_lock, flags);
812 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
813 #if defined(CONFIG_HIGH_RES_TIMERS)
814 smi_info->si_timer.sub_expires = 0;
819 add_timer(&(smi_info->si_timer));
822 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
824 struct smi_info *smi_info = data;
830 spin_lock_irqsave(&(smi_info->si_lock), flags);
832 spin_lock(&smi_info->count_lock);
833 smi_info->interrupts++;
834 spin_unlock(&smi_info->count_lock);
836 if (smi_info->stop_operation)
841 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
843 smi_event_handler(smi_info, 0);
845 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
849 static struct ipmi_smi_handlers handlers =
851 .owner = THIS_MODULE,
853 .request_events = request_events,
854 .set_run_to_completion = set_run_to_completion
857 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
858 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
860 #define SI_MAX_PARMS 4
861 #define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
862 static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
863 { NULL, NULL, NULL, NULL };
865 #define DEVICE_NAME "ipmi_si"
867 #define DEFAULT_KCS_IO_PORT 0xca2
868 #define DEFAULT_SMIC_IO_PORT 0xca9
869 #define DEFAULT_BT_IO_PORT 0xe4
871 static int si_trydefaults = 1;
872 static char *si_type[SI_MAX_PARMS] = { NULL, NULL, NULL, NULL };
873 #define MAX_SI_TYPE_STR 30
874 static char si_type_str[MAX_SI_TYPE_STR];
875 static unsigned long addrs[SI_MAX_PARMS] = { 0, 0, 0, 0 };
876 static int num_addrs = 0;
877 static unsigned int ports[SI_MAX_PARMS] = { 0, 0, 0, 0 };
878 static int num_ports = 0;
879 static int irqs[SI_MAX_PARMS] = { 0, 0, 0, 0 };
880 static int num_irqs = 0;
883 module_param_named(trydefaults, si_trydefaults, bool, 0);
884 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
885 " default scan of the KCS and SMIC interface at the standard"
887 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
888 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
889 " interface separated by commas. The types are 'kcs',"
890 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
891 " the first interface to kcs and the second to bt");
892 module_param_array(addrs, long, num_addrs, 0);
893 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
894 " addresses separated by commas. Only use if an interface"
895 " is in memory. Otherwise, set it to zero or leave"
897 module_param_array(ports, int, num_ports, 0);
898 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
899 " addresses separated by commas. Only use if an interface"
900 " is a port. Otherwise, set it to zero or leave"
902 module_param_array(irqs, int, num_irqs, 0);
903 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
904 " addresses separated by commas. Only use if an interface"
905 " has an interrupt. Otherwise, set it to zero or leave"
908 #define IPMI_MEM_ADDR_SPACE 1
909 #define IPMI_IO_ADDR_SPACE 2
911 #if defined(CONFIG_ACPI_INTERPETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)
912 static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
916 for (i = 0; i < SI_MAX_PARMS; ++i) {
917 /* Don't check our address. */
920 if (si_type[i] != NULL) {
921 if ((addr_space == IPMI_MEM_ADDR_SPACE &&
922 base_addr == addrs[i]) ||
923 (addr_space == IPMI_IO_ADDR_SPACE &&
924 base_addr == ports[i]))
935 static int std_irq_setup(struct smi_info *info)
942 rv = request_irq(info->irq,
949 "ipmi_si: %s unable to claim interrupt %d,"
951 DEVICE_NAME, info->irq);
954 printk(" Using irq %d\n", info->irq);
960 static void std_irq_cleanup(struct smi_info *info)
965 free_irq(info->irq, info);
968 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
970 unsigned int *addr = io->info;
972 return inb((*addr)+offset);
975 static void port_outb(struct si_sm_io *io, unsigned int offset,
978 unsigned int *addr = io->info;
980 outb(b, (*addr)+offset);
983 static int port_setup(struct smi_info *info)
985 unsigned int *addr = info->io.info;
987 if (!addr || (!*addr))
990 if (request_region(*addr, info->io_size, DEVICE_NAME) == NULL)
995 static void port_cleanup(struct smi_info *info)
997 unsigned int *addr = info->io.info;
1000 release_region (*addr, info->io_size);
1004 static int try_init_port(int intf_num, struct smi_info **new_info)
1006 struct smi_info *info;
1008 if (!ports[intf_num])
1011 if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1015 info = kmalloc(sizeof(*info), GFP_KERNEL);
1017 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1020 memset(info, 0, sizeof(*info));
1022 info->io_setup = port_setup;
1023 info->io_cleanup = port_cleanup;
1024 info->io.inputb = port_inb;
1025 info->io.outputb = port_outb;
1026 info->io.info = &(ports[intf_num]);
1027 info->io.addr = NULL;
1029 info->irq_setup = NULL;
1032 if (si_type[intf_num] == NULL)
1033 si_type[intf_num] = "kcs";
1035 printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1036 si_type[intf_num], ports[intf_num]);
1040 static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
1042 return readb((io->addr)+offset);
1045 static void mem_outb(struct si_sm_io *io, unsigned int offset,
1048 writeb(b, (io->addr)+offset);
1051 static int mem_setup(struct smi_info *info)
1053 unsigned long *addr = info->io.info;
1055 if (!addr || (!*addr))
1058 if (request_mem_region(*addr, info->io_size, DEVICE_NAME) == NULL)
1061 info->io.addr = ioremap(*addr, info->io_size);
1062 if (info->io.addr == NULL) {
1063 release_mem_region(*addr, info->io_size);
1069 static void mem_cleanup(struct smi_info *info)
1071 unsigned long *addr = info->io.info;
1073 if (info->io.addr) {
1074 iounmap(info->io.addr);
1075 release_mem_region(*addr, info->io_size);
1080 static int try_init_mem(int intf_num, struct smi_info **new_info)
1082 struct smi_info *info;
1084 if (!addrs[intf_num])
1087 if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1091 info = kmalloc(sizeof(*info), GFP_KERNEL);
1093 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1096 memset(info, 0, sizeof(*info));
1098 info->io_setup = mem_setup;
1099 info->io_cleanup = mem_cleanup;
1100 info->io.inputb = mem_inb;
1101 info->io.outputb = mem_outb;
1102 info->io.info = (void *) addrs[intf_num];
1103 info->io.addr = NULL;
1105 info->irq_setup = NULL;
1108 if (si_type[intf_num] == NULL)
1109 si_type[intf_num] = "kcs";
1111 printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
1112 si_type[intf_num], addrs[intf_num]);
1117 #ifdef CONFIG_ACPI_INTERPRETER
1119 #include <linux/acpi.h>
1121 /* Once we get an ACPI failure, we don't try any more, because we go
1122 through the tables sequentially. Once we don't find a table, there
1124 static int acpi_failure = 0;
1126 /* For GPE-type interrupts. */
1127 void ipmi_acpi_gpe(void *context)
1129 struct smi_info *smi_info = context;
1130 unsigned long flags;
1135 spin_lock_irqsave(&(smi_info->si_lock), flags);
1137 spin_lock(&smi_info->count_lock);
1138 smi_info->interrupts++;
1139 spin_unlock(&smi_info->count_lock);
1141 if (smi_info->stop_operation)
1145 do_gettimeofday(&t);
1146 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1148 smi_event_handler(smi_info, 0);
1150 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1153 static int acpi_gpe_irq_setup(struct smi_info *info)
1160 /* FIXME - is level triggered right? */
1161 status = acpi_install_gpe_handler(NULL,
1163 ACPI_GPE_LEVEL_TRIGGERED,
1166 if (status != AE_OK) {
1168 "ipmi_si: %s unable to claim ACPI GPE %d,"
1169 " running polled\n",
1170 DEVICE_NAME, info->irq);
1174 printk(" Using ACPI GPE %d\n", info->irq);
1180 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1185 acpi_remove_gpe_handler(NULL, info->irq, ipmi_acpi_gpe);
1190 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1201 s8 CreatorRevision[4];
1204 s16 SpecificationRevision;
1207 * Bit 0 - SCI interrupt supported
1208 * Bit 1 - I/O APIC/SAPIC
1212 /* If bit 0 of InterruptType is set, then this is the SCI
1213 interrupt in the GPEx_STS register. */
1218 /* If bit 1 of InterruptType is set, then this is the I/O
1219 APIC/SAPIC interrupt. */
1220 u32 GlobalSystemInterrupt;
1222 /* The actual register address. */
1223 struct acpi_generic_address addr;
1227 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1230 static int try_init_acpi(int intf_num, struct smi_info **new_info)
1232 struct smi_info *info;
1234 struct SPMITable *spmi;
1241 status = acpi_get_firmware_table("SPMI", intf_num+1,
1242 ACPI_LOGICAL_ADDRESSING,
1243 (struct acpi_table_header **) &spmi);
1244 if (status != AE_OK) {
1249 if (spmi->IPMIlegacy != 1) {
1250 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1254 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1255 addr_space = IPMI_MEM_ADDR_SPACE;
1257 addr_space = IPMI_IO_ADDR_SPACE;
1258 if (!is_new_interface(-1, addr_space, spmi->addr.address))
1261 /* Figure out the interface type. */
1262 switch (spmi->InterfaceType)
1265 si_type[intf_num] = "kcs";
1269 si_type[intf_num] = "smic";
1273 si_type[intf_num] = "bt";
1277 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1278 spmi->InterfaceType);
1282 info = kmalloc(sizeof(*info), GFP_KERNEL);
1284 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1287 memset(info, 0, sizeof(*info));
1289 if (spmi->InterruptType & 1) {
1290 /* We've got a GPE interrupt. */
1291 info->irq = spmi->GPE;
1292 info->irq_setup = acpi_gpe_irq_setup;
1293 info->irq_cleanup = acpi_gpe_irq_cleanup;
1294 } else if (spmi->InterruptType & 2) {
1295 /* We've got an APIC/SAPIC interrupt. */
1296 info->irq = spmi->GlobalSystemInterrupt;
1297 info->irq_setup = std_irq_setup;
1298 info->irq_cleanup = std_irq_cleanup;
1300 /* Use the default interrupt setting. */
1302 info->irq_setup = NULL;
1305 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1307 info->io_setup = mem_setup;
1308 info->io_cleanup = mem_cleanup;
1309 addrs[intf_num] = spmi->addr.address;
1310 info->io.inputb = mem_inb;
1311 info->io.outputb = mem_outb;
1312 info->io.info = &(addrs[intf_num]);
1313 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1315 info->io_setup = port_setup;
1316 info->io_cleanup = port_cleanup;
1317 ports[intf_num] = spmi->addr.address;
1318 info->io.inputb = port_inb;
1319 info->io.outputb = port_outb;
1320 info->io.info = &(ports[intf_num]);
1323 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1329 printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1330 si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1337 typedef struct dmi_ipmi_data
1341 unsigned long base_addr;
1345 typedef struct dmi_header
1352 static int decode_dmi(dmi_header_t *dm, dmi_ipmi_data_t *ipmi_data)
1354 u8 *data = (u8 *)dm;
1355 unsigned long base_addr;
1357 ipmi_data->type = data[0x04];
1359 memcpy(&base_addr,&data[0x08],sizeof(unsigned long));
1360 if (base_addr & 1) {
1362 base_addr &= 0xFFFE;
1363 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1367 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
1370 ipmi_data->base_addr = base_addr;
1371 ipmi_data->irq = data[0x11];
1373 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr))
1376 memset(ipmi_data,0,sizeof(dmi_ipmi_data_t));
1381 static int dmi_table(u32 base, int len, int num,
1382 dmi_ipmi_data_t *ipmi_data)
1385 struct dmi_header *dm;
1390 buf = ioremap(base, len);
1396 while(i<num && (data - buf) < len)
1398 dm=(dmi_header_t *)data;
1400 if((data-buf+dm->length) >= len)
1403 if (dm->type == 38) {
1404 if (decode_dmi(dm, ipmi_data) == 0) {
1411 while((data-buf) < len && (*data || data[1]))
1421 inline static int dmi_checksum(u8 *buf)
1431 static int dmi_iterator(dmi_ipmi_data_t *ipmi_data)
1436 #ifdef CONFIG_SIMNOW
1442 isa_memcpy_fromio(buf, fp, 15);
1443 if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
1445 u16 num=buf[13]<<8|buf[12];
1446 u16 len=buf[7]<<8|buf[6];
1447 u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
1449 if(dmi_table(base, len, num, ipmi_data) == 0)
1458 static int try_init_smbios(int intf_num, struct smi_info **new_info)
1460 struct smi_info *info;
1461 dmi_ipmi_data_t ipmi_data;
1465 status = dmi_iterator(&ipmi_data);
1470 switch(ipmi_data.type) {
1471 case 0x01: /* KCS */
1472 si_type[intf_num] = "kcs";
1474 case 0x02: /* SMIC */
1475 si_type[intf_num] = "smic";
1478 si_type[intf_num] = "bt";
1481 printk("ipmi_si: Unknown SMBIOS SI type.\n");
1485 info = kmalloc(sizeof(*info), GFP_KERNEL);
1487 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1490 memset(info, 0, sizeof(*info));
1492 if (ipmi_data.addr_space == 1) {
1494 info->io_setup = mem_setup;
1495 info->io_cleanup = mem_cleanup;
1496 addrs[intf_num] = ipmi_data.base_addr;
1497 info->io.inputb = mem_inb;
1498 info->io.outputb = mem_outb;
1499 info->io.info = &(addrs[intf_num]);
1500 } else if (ipmi_data.addr_space == 2) {
1502 info->io_setup = port_setup;
1503 info->io_cleanup = port_cleanup;
1504 ports[intf_num] = ipmi_data.base_addr;
1505 info->io.inputb = port_inb;
1506 info->io.outputb = port_outb;
1507 info->io.info = &(ports[intf_num]);
1510 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
1514 irqs[intf_num] = ipmi_data.irq;
1518 printk("ipmi_si: Found SMBIOS-specified state machine at %s"
1520 io_type, (unsigned long)ipmi_data.base_addr);
1523 #endif /* CONFIG_X86 */
1527 #define PCI_ERMC_CLASSCODE 0x0C0700
1528 #define PCI_HP_VENDOR_ID 0x103C
1529 #define PCI_MMC_DEVICE_ID 0x121A
1530 #define PCI_MMC_ADDR_CW 0x10
1532 /* Avoid more than one attempt to probe pci smic. */
1533 static int pci_smic_checked = 0;
1535 static int find_pci_smic(int intf_num, struct smi_info **new_info)
1537 struct smi_info *info;
1539 struct pci_dev *pci_dev = NULL;
1543 if (pci_smic_checked)
1546 pci_smic_checked = 1;
1548 if ((pci_dev = pci_find_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID,
1551 else if ((pci_dev = pci_find_class(PCI_ERMC_CLASSCODE, NULL)) &&
1552 pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)
1557 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1561 "ipmi_si: pci_read_config_word() failed (%d).\n",
1566 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1567 if (!(base_addr & 0x0001))
1570 "ipmi_si: memory mapped I/O not supported for PCI"
1575 base_addr &= 0xFFFE;
1577 /* Data register starts at base address + 1 in eRMC */
1580 if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr))
1583 info = kmalloc(sizeof(*info), GFP_KERNEL);
1585 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1588 memset(info, 0, sizeof(*info));
1590 info->io_setup = port_setup;
1591 info->io_cleanup = port_cleanup;
1592 ports[intf_num] = base_addr;
1593 info->io.inputb = port_inb;
1594 info->io.outputb = port_outb;
1595 info->io.info = &(ports[intf_num]);
1599 irqs[intf_num] = pci_dev->irq;
1600 si_type[intf_num] = "smic";
1602 printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
1603 (long unsigned int) base_addr);
1607 #endif /* CONFIG_PCI */
1609 static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1612 if (find_pci_smic(intf_num, new_info)==0)
1615 /* Include other methods here. */
1621 static int try_get_dev_id(struct smi_info *smi_info)
1623 unsigned char msg[2];
1624 unsigned char *resp;
1625 unsigned long resp_len;
1626 enum si_sm_result smi_result;
1629 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1633 /* Do a Get Device ID command, since it comes back with some
1635 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1636 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1637 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1639 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1642 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1643 set_current_state(TASK_UNINTERRUPTIBLE);
1644 schedule_timeout(1);
1645 smi_result = smi_info->handlers->event(
1646 smi_info->si_sm, 100);
1648 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1650 smi_result = smi_info->handlers->event(
1651 smi_info->si_sm, 0);
1656 if (smi_result == SI_SM_HOSED) {
1657 /* We couldn't get the state machine to run, so whatever's at
1658 the port is probably not an IPMI SMI interface. */
1663 /* Otherwise, we got some data. */
1664 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1665 resp, IPMI_MAX_MSG_LENGTH);
1667 /* That's odd, it should be longer. */
1672 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1673 /* That's odd, it shouldn't be able to fail. */
1678 /* Record info from the get device id, in case we need it. */
1679 smi_info->ipmi_si_dev_rev = resp[4] & 0xf;
1680 smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f;
1681 smi_info->ipmi_si_fw_rev_minor = resp[6];
1682 smi_info->ipmi_version_major = resp[7] & 0xf;
1683 smi_info->ipmi_version_minor = resp[7] >> 4;
1690 static int type_file_read_proc(char *page, char **start, off_t off,
1691 int count, int *eof, void *data)
1693 char *out = (char *) page;
1694 struct smi_info *smi = data;
1696 switch (smi->si_type) {
1698 return sprintf(out, "kcs\n");
1700 return sprintf(out, "smic\n");
1702 return sprintf(out, "bt\n");
1708 static int stat_file_read_proc(char *page, char **start, off_t off,
1709 int count, int *eof, void *data)
1711 char *out = (char *) page;
1712 struct smi_info *smi = data;
1714 out += sprintf(out, "interrupts_enabled: %d\n",
1715 smi->irq && !smi->interrupt_disabled);
1716 out += sprintf(out, "short_timeouts: %ld\n",
1717 smi->short_timeouts);
1718 out += sprintf(out, "long_timeouts: %ld\n",
1719 smi->long_timeouts);
1720 out += sprintf(out, "timeout_restarts: %ld\n",
1721 smi->timeout_restarts);
1722 out += sprintf(out, "idles: %ld\n",
1724 out += sprintf(out, "interrupts: %ld\n",
1726 out += sprintf(out, "attentions: %ld\n",
1728 out += sprintf(out, "flag_fetches: %ld\n",
1730 out += sprintf(out, "hosed_count: %ld\n",
1732 out += sprintf(out, "complete_transactions: %ld\n",
1733 smi->complete_transactions);
1734 out += sprintf(out, "events: %ld\n",
1736 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
1737 smi->watchdog_pretimeouts);
1738 out += sprintf(out, "incoming_messages: %ld\n",
1739 smi->incoming_messages);
1741 return (out - ((char *) page));
1744 /* Returns 0 if initialized, or negative on an error. */
1745 static int init_one_smi(int intf_num, struct smi_info **smi)
1748 struct smi_info *new_smi;
1751 rv = try_init_mem(intf_num, &new_smi);
1753 rv = try_init_port(intf_num, &new_smi);
1754 #ifdef CONFIG_ACPI_INTERPRETER
1755 if ((rv) && (si_trydefaults)) {
1756 rv = try_init_acpi(intf_num, &new_smi);
1760 if ((rv) && (si_trydefaults)) {
1761 rv = try_init_smbios(intf_num, &new_smi);
1764 if ((rv) && (si_trydefaults)) {
1765 rv = try_init_plug_and_play(intf_num, &new_smi);
1772 /* So we know not to free it unless we have allocated one. */
1773 new_smi->intf = NULL;
1774 new_smi->si_sm = NULL;
1775 new_smi->handlers = 0;
1777 if (!new_smi->irq_setup) {
1778 new_smi->irq = irqs[intf_num];
1779 new_smi->irq_setup = std_irq_setup;
1780 new_smi->irq_cleanup = std_irq_cleanup;
1783 /* Default to KCS if no type is specified. */
1784 if (si_type[intf_num] == NULL) {
1786 si_type[intf_num] = "kcs";
1793 /* Set up the state machine to use. */
1794 if (strcmp(si_type[intf_num], "kcs") == 0) {
1795 new_smi->handlers = &kcs_smi_handlers;
1796 new_smi->si_type = SI_KCS;
1797 } else if (strcmp(si_type[intf_num], "smic") == 0) {
1798 new_smi->handlers = &smic_smi_handlers;
1799 new_smi->si_type = SI_SMIC;
1800 } else if (strcmp(si_type[intf_num], "bt") == 0) {
1801 new_smi->handlers = &bt_smi_handlers;
1802 new_smi->si_type = SI_BT;
1804 /* No support for anything else yet. */
1809 /* Allocate the state machine's data and initialize it. */
1810 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
1811 if (!new_smi->si_sm) {
1812 printk(" Could not allocate state machine memory\n");
1816 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
1819 /* Now that we know the I/O size, we can set up the I/O. */
1820 rv = new_smi->io_setup(new_smi);
1822 printk(" Could not set up I/O space\n");
1826 spin_lock_init(&(new_smi->si_lock));
1827 spin_lock_init(&(new_smi->msg_lock));
1828 spin_lock_init(&(new_smi->count_lock));
1830 /* Do low-level detection first. */
1831 if (new_smi->handlers->detect(new_smi->si_sm)) {
1836 /* Attempt a get device id command. If it fails, we probably
1837 don't have a SMI here. */
1838 rv = try_get_dev_id(new_smi);
1842 /* Try to claim any interrupts. */
1843 new_smi->irq_setup(new_smi);
1845 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
1846 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
1847 new_smi->curr_msg = NULL;
1848 atomic_set(&new_smi->req_events, 0);
1849 new_smi->run_to_completion = 0;
1851 rv = ipmi_register_smi(&handlers,
1853 new_smi->ipmi_version_major,
1854 new_smi->ipmi_version_minor,
1858 "ipmi_si: Unable to register device: error %d\n",
1863 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
1864 type_file_read_proc, NULL,
1865 new_smi, THIS_MODULE);
1868 "ipmi_si: Unable to create proc entry: %d\n",
1873 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
1874 stat_file_read_proc, NULL,
1875 new_smi, THIS_MODULE);
1878 "ipmi_si: Unable to create proc entry: %d\n",
1883 start_clear_flags(new_smi);
1885 /* IRQ is defined to be set when non-zero. */
1887 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
1889 new_smi->interrupt_disabled = 0;
1890 new_smi->timer_stopped = 0;
1891 new_smi->stop_operation = 0;
1893 init_timer(&(new_smi->si_timer));
1894 new_smi->si_timer.data = (long) new_smi;
1895 new_smi->si_timer.function = smi_timeout;
1896 new_smi->last_timeout_jiffies = jiffies;
1897 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
1898 add_timer(&(new_smi->si_timer));
1902 printk(" IPMI %s interface initialized\n", si_type[intf_num]);
1908 ipmi_unregister_smi(new_smi->intf);
1910 new_smi->irq_cleanup(new_smi);
1911 if (new_smi->si_sm) {
1912 if (new_smi->handlers)
1913 new_smi->handlers->cleanup(new_smi->si_sm);
1914 kfree(new_smi->si_sm);
1916 new_smi->io_cleanup(new_smi);
1920 static __init int init_ipmi_si(void)
1931 /* Parse out the si_type string into its components. */
1934 for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) {
1936 str = strchr(str, ',');
1946 printk(KERN_INFO "IPMI System Interface driver version "
1948 if (kcs_smi_handlers.version)
1949 printk(", KCS version %s", kcs_smi_handlers.version);
1950 if (smic_smi_handlers.version)
1951 printk(", SMIC version %s", smic_smi_handlers.version);
1952 if (bt_smi_handlers.version)
1953 printk(", BT version %s", bt_smi_handlers.version);
1956 rv = init_one_smi(0, &(smi_infos[pos]));
1957 if (rv && !ports[0] && si_trydefaults) {
1958 /* If we are trying defaults and the initial port is
1959 not set, then set it. */
1961 ports[0] = DEFAULT_KCS_IO_PORT;
1962 rv = init_one_smi(0, &(smi_infos[pos]));
1964 /* No KCS - try SMIC */
1965 si_type[0] = "smic";
1966 ports[0] = DEFAULT_SMIC_IO_PORT;
1967 rv = init_one_smi(0, &(smi_infos[pos]));
1970 /* No SMIC - try BT */
1972 ports[0] = DEFAULT_BT_IO_PORT;
1973 rv = init_one_smi(0, &(smi_infos[pos]));
1979 for (i=1; i < SI_MAX_PARMS; i++) {
1980 rv = init_one_smi(i, &(smi_infos[pos]));
1985 if (smi_infos[0] == NULL) {
1986 printk("ipmi_si: Unable to find any System Interface(s)\n");
1992 module_init(init_ipmi_si);
1994 void __exit cleanup_one_si(struct smi_info *to_clean)
1997 unsigned long flags;
2002 /* Tell the timer and interrupt handlers that we are shutting
2004 spin_lock_irqsave(&(to_clean->si_lock), flags);
2005 spin_lock(&(to_clean->msg_lock));
2007 to_clean->stop_operation = 1;
2009 to_clean->irq_cleanup(to_clean);
2011 spin_unlock(&(to_clean->msg_lock));
2012 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2014 /* Wait until we know that we are out of any interrupt
2015 handlers might have been running before we freed the
2017 synchronize_kernel();
2019 /* Wait for the timer to stop. This avoids problems with race
2020 conditions removing the timer here. */
2021 while (!to_clean->timer_stopped) {
2022 set_current_state(TASK_UNINTERRUPTIBLE);
2023 schedule_timeout(1);
2026 rv = ipmi_unregister_smi(to_clean->intf);
2029 "ipmi_si: Unable to unregister device: errno=%d\n",
2033 to_clean->handlers->cleanup(to_clean->si_sm);
2035 kfree(to_clean->si_sm);
2037 to_clean->io_cleanup(to_clean);
2040 static __exit void cleanup_ipmi_si(void)
2047 for (i=0; i<SI_MAX_DRIVERS; i++) {
2048 cleanup_one_si(smi_infos[i]);
2051 module_exit(cleanup_ipmi_si);
2053 MODULE_LICENSE("GPL");