patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/irq.h>
55 #ifdef CONFIG_HIGH_RES_TIMERS
56 #include <linux/hrtime.h>
57 # if defined(schedule_next_int)
58 /* Old high-res timer code, do translations. */
59 #  define get_arch_cycles(a) quick_update_jiffies_sub(a)
60 #  define arch_cycles_per_jiffy cycles_per_jiffies
61 # endif
62 static inline void add_usec_to_timer(struct timer_list *t, long v)
63 {
64         t->sub_expires += nsec_to_arch_cycle(v * 1000);
65         while (t->sub_expires >= arch_cycles_per_jiffy)
66         {
67                 t->expires++;
68                 t->sub_expires -= arch_cycles_per_jiffy;
69         }
70 }
71 #endif
72 #include <linux/interrupt.h>
73 #include <linux/rcupdate.h>
74 #include <linux/ipmi_smi.h>
75 #include <asm/io.h>
76 #include "ipmi_si_sm.h"
77 #include <linux/init.h>
78
79 #define IPMI_SI_VERSION "v31"
80
81 /* Measure times between events in the driver. */
82 #undef DEBUG_TIMING
83
84 /* Call every 10 ms. */
85 #define SI_TIMEOUT_TIME_USEC    10000
86 #define SI_USEC_PER_JIFFY       (1000000/HZ)
87 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
88 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
89                                        short timeout */
90
91 enum si_intf_state {
92         SI_NORMAL,
93         SI_GETTING_FLAGS,
94         SI_GETTING_EVENTS,
95         SI_CLEARING_FLAGS,
96         SI_CLEARING_FLAGS_THEN_SET_IRQ,
97         SI_GETTING_MESSAGES,
98         SI_ENABLE_INTERRUPTS1,
99         SI_ENABLE_INTERRUPTS2
100         /* FIXME - add watchdog stuff. */
101 };
102
103 enum si_type {
104     SI_KCS, SI_SMIC, SI_BT
105 };
106
107 struct smi_info
108 {
109         ipmi_smi_t             intf;
110         struct si_sm_data      *si_sm;
111         struct si_sm_handlers  *handlers;
112         enum si_type           si_type;
113         spinlock_t             si_lock;
114         spinlock_t             msg_lock;
115         struct list_head       xmit_msgs;
116         struct list_head       hp_xmit_msgs;
117         struct ipmi_smi_msg    *curr_msg;
118         enum si_intf_state     si_state;
119
120         /* Used to handle the various types of I/O that can occur with
121            IPMI */
122         struct si_sm_io io;
123         int (*io_setup)(struct smi_info *info);
124         void (*io_cleanup)(struct smi_info *info);
125         int (*irq_setup)(struct smi_info *info);
126         void (*irq_cleanup)(struct smi_info *info);
127         unsigned int io_size;
128
129         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
130            is set to hold the flags until we are done handling everything
131            from the flags. */
132 #define RECEIVE_MSG_AVAIL       0x01
133 #define EVENT_MSG_BUFFER_FULL   0x02
134 #define WDT_PRE_TIMEOUT_INT     0x08
135         unsigned char       msg_flags;
136
137         /* If set to true, this will request events the next time the
138            state machine is idle. */
139         atomic_t            req_events;
140
141         /* If true, run the state machine to completion on every send
142            call.  Generally used after a panic to make sure stuff goes
143            out. */
144         int                 run_to_completion;
145
146         /* The I/O port of an SI interface. */
147         int                 port;
148
149         /* zero if no irq; */
150         int                 irq;
151
152         /* The timer for this si. */
153         struct timer_list   si_timer;
154
155         /* The time (in jiffies) the last timeout occurred at. */
156         unsigned long       last_timeout_jiffies;
157
158         /* Used to gracefully stop the timer without race conditions. */
159         volatile int        stop_operation;
160         volatile int        timer_stopped;
161
162         /* The driver will disable interrupts when it gets into a
163            situation where it cannot handle messages due to lack of
164            memory.  Once that situation clears up, it will re-enable
165            interrupts. */
166         int interrupt_disabled;
167
168         unsigned char ipmi_si_dev_rev;
169         unsigned char ipmi_si_fw_rev_major;
170         unsigned char ipmi_si_fw_rev_minor;
171         unsigned char ipmi_version_major;
172         unsigned char ipmi_version_minor;
173
174         /* Counters and things for the proc filesystem. */
175         spinlock_t count_lock;
176         unsigned long short_timeouts;
177         unsigned long long_timeouts;
178         unsigned long timeout_restarts;
179         unsigned long idles;
180         unsigned long interrupts;
181         unsigned long attentions;
182         unsigned long flag_fetches;
183         unsigned long hosed_count;
184         unsigned long complete_transactions;
185         unsigned long events;
186         unsigned long watchdog_pretimeouts;
187         unsigned long incoming_messages;
188 };
189
190 static void si_restart_short_timer(struct smi_info *smi_info);
191
192 static void deliver_recv_msg(struct smi_info *smi_info,
193                              struct ipmi_smi_msg *msg)
194 {
195         /* Deliver the message to the upper layer with the lock
196            released. */
197         spin_unlock(&(smi_info->si_lock));
198         ipmi_smi_msg_received(smi_info->intf, msg);
199         spin_lock(&(smi_info->si_lock));
200 }
201
202 static void return_hosed_msg(struct smi_info *smi_info)
203 {
204         struct ipmi_smi_msg *msg = smi_info->curr_msg;
205
206         /* Make it a reponse */
207         msg->rsp[0] = msg->data[0] | 4;
208         msg->rsp[1] = msg->data[1];
209         msg->rsp[2] = 0xFF; /* Unknown error. */
210         msg->rsp_size = 3;
211
212         smi_info->curr_msg = NULL;
213         deliver_recv_msg(smi_info, msg);
214 }
215
216 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
217 {
218         int              rv;
219         struct list_head *entry = NULL;
220 #ifdef DEBUG_TIMING
221         struct timeval t;
222 #endif
223
224         /* No need to save flags, we aleady have interrupts off and we
225            already hold the SMI lock. */
226         spin_lock(&(smi_info->msg_lock));
227
228         /* Pick the high priority queue first. */
229         if (! list_empty(&(smi_info->hp_xmit_msgs))) {
230                 entry = smi_info->hp_xmit_msgs.next;
231         } else if (! list_empty(&(smi_info->xmit_msgs))) {
232                 entry = smi_info->xmit_msgs.next;
233         }
234
235         if (!entry) {
236                 smi_info->curr_msg = NULL;
237                 rv = SI_SM_IDLE;
238         } else {
239                 int err;
240
241                 list_del(entry);
242                 smi_info->curr_msg = list_entry(entry,
243                                                 struct ipmi_smi_msg,
244                                                 link);
245 #ifdef DEBUG_TIMING
246                 do_gettimeofday(&t);
247                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
248 #endif
249                 err = smi_info->handlers->start_transaction(
250                         smi_info->si_sm,
251                         smi_info->curr_msg->data,
252                         smi_info->curr_msg->data_size);
253                 if (err) {
254                         return_hosed_msg(smi_info);
255                 }
256
257                 rv = SI_SM_CALL_WITHOUT_DELAY;
258         }
259         spin_unlock(&(smi_info->msg_lock));
260
261         return rv;
262 }
263
264 static void start_enable_irq(struct smi_info *smi_info)
265 {
266         unsigned char msg[2];
267
268         /* If we are enabling interrupts, we have to tell the
269            BMC to use them. */
270         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
271         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
272
273         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
274         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
275 }
276
277 static void start_clear_flags(struct smi_info *smi_info)
278 {
279         unsigned char msg[3];
280
281         /* Make sure the watchdog pre-timeout flag is not set at startup. */
282         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
283         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
284         msg[2] = WDT_PRE_TIMEOUT_INT;
285
286         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
287         smi_info->si_state = SI_CLEARING_FLAGS;
288 }
289
290 /* When we have a situtaion where we run out of memory and cannot
291    allocate messages, we just leave them in the BMC and run the system
292    polled until we can allocate some memory.  Once we have some
293    memory, we will re-enable the interrupt. */
294 static inline void disable_si_irq(struct smi_info *smi_info)
295 {
296         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
297                 disable_irq_nosync(smi_info->irq);
298                 smi_info->interrupt_disabled = 1;
299         }
300 }
301
302 static inline void enable_si_irq(struct smi_info *smi_info)
303 {
304         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
305                 enable_irq(smi_info->irq);
306                 smi_info->interrupt_disabled = 0;
307         }
308 }
309
310 static void handle_flags(struct smi_info *smi_info)
311 {
312         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
313                 /* Watchdog pre-timeout */
314                 spin_lock(&smi_info->count_lock);
315                 smi_info->watchdog_pretimeouts++;
316                 spin_unlock(&smi_info->count_lock);
317
318                 start_clear_flags(smi_info);
319                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
320                 spin_unlock(&(smi_info->si_lock));
321                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
322                 spin_lock(&(smi_info->si_lock));
323         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
324                 /* Messages available. */
325                 smi_info->curr_msg = ipmi_alloc_smi_msg();
326                 if (!smi_info->curr_msg) {
327                         disable_si_irq(smi_info);
328                         smi_info->si_state = SI_NORMAL;
329                         return;
330                 }
331                 enable_si_irq(smi_info);
332
333                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
334                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
335                 smi_info->curr_msg->data_size = 2;
336
337                 smi_info->handlers->start_transaction(
338                         smi_info->si_sm,
339                         smi_info->curr_msg->data,
340                         smi_info->curr_msg->data_size);
341                 smi_info->si_state = SI_GETTING_MESSAGES;
342         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
343                 /* Events available. */
344                 smi_info->curr_msg = ipmi_alloc_smi_msg();
345                 if (!smi_info->curr_msg) {
346                         disable_si_irq(smi_info);
347                         smi_info->si_state = SI_NORMAL;
348                         return;
349                 }
350                 enable_si_irq(smi_info);
351
352                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
353                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
354                 smi_info->curr_msg->data_size = 2;
355
356                 smi_info->handlers->start_transaction(
357                         smi_info->si_sm,
358                         smi_info->curr_msg->data,
359                         smi_info->curr_msg->data_size);
360                 smi_info->si_state = SI_GETTING_EVENTS;
361         } else {
362                 smi_info->si_state = SI_NORMAL;
363         }
364 }
365
366 static void handle_transaction_done(struct smi_info *smi_info)
367 {
368         struct ipmi_smi_msg *msg;
369 #ifdef DEBUG_TIMING
370         struct timeval t;
371
372         do_gettimeofday(&t);
373         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
374 #endif
375         switch (smi_info->si_state) {
376         case SI_NORMAL:
377                 if (!smi_info->curr_msg)
378                         break;
379
380                 smi_info->curr_msg->rsp_size
381                         = smi_info->handlers->get_result(
382                                 smi_info->si_sm,
383                                 smi_info->curr_msg->rsp,
384                                 IPMI_MAX_MSG_LENGTH);
385
386                 /* Do this here becase deliver_recv_msg() releases the
387                    lock, and a new message can be put in during the
388                    time the lock is released. */
389                 msg = smi_info->curr_msg;
390                 smi_info->curr_msg = NULL;
391                 deliver_recv_msg(smi_info, msg);
392                 break;
393
394         case SI_GETTING_FLAGS:
395         {
396                 unsigned char msg[4];
397                 unsigned int  len;
398
399                 /* We got the flags from the SMI, now handle them. */
400                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
401                 if (msg[2] != 0) {
402                         /* Error fetching flags, just give up for
403                            now. */
404                         smi_info->si_state = SI_NORMAL;
405                 } else if (len < 3) {
406                         /* Hmm, no flags.  That's technically illegal, but
407                            don't use uninitialized data. */
408                         smi_info->si_state = SI_NORMAL;
409                 } else {
410                         smi_info->msg_flags = msg[3];
411                         handle_flags(smi_info);
412                 }
413                 break;
414         }
415
416         case SI_CLEARING_FLAGS:
417         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
418         {
419                 unsigned char msg[3];
420
421                 /* We cleared the flags. */
422                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
423                 if (msg[2] != 0) {
424                         /* Error clearing flags */
425                         printk(KERN_WARNING
426                                "ipmi_si: Error clearing flags: %2.2x\n",
427                                msg[2]);
428                 }
429                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
430                         start_enable_irq(smi_info);
431                 else
432                         smi_info->si_state = SI_NORMAL;
433                 break;
434         }
435
436         case SI_GETTING_EVENTS:
437         {
438                 smi_info->curr_msg->rsp_size
439                         = smi_info->handlers->get_result(
440                                 smi_info->si_sm,
441                                 smi_info->curr_msg->rsp,
442                                 IPMI_MAX_MSG_LENGTH);
443
444                 /* Do this here becase deliver_recv_msg() releases the
445                    lock, and a new message can be put in during the
446                    time the lock is released. */
447                 msg = smi_info->curr_msg;
448                 smi_info->curr_msg = NULL;
449                 if (msg->rsp[2] != 0) {
450                         /* Error getting event, probably done. */
451                         msg->done(msg);
452
453                         /* Take off the event flag. */
454                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
455                 } else {
456                         spin_lock(&smi_info->count_lock);
457                         smi_info->events++;
458                         spin_unlock(&smi_info->count_lock);
459
460                         deliver_recv_msg(smi_info, msg);
461                 }
462                 handle_flags(smi_info);
463                 break;
464         }
465
466         case SI_GETTING_MESSAGES:
467         {
468                 smi_info->curr_msg->rsp_size
469                         = smi_info->handlers->get_result(
470                                 smi_info->si_sm,
471                                 smi_info->curr_msg->rsp,
472                                 IPMI_MAX_MSG_LENGTH);
473
474                 /* Do this here becase deliver_recv_msg() releases the
475                    lock, and a new message can be put in during the
476                    time the lock is released. */
477                 msg = smi_info->curr_msg;
478                 smi_info->curr_msg = NULL;
479                 if (msg->rsp[2] != 0) {
480                         /* Error getting event, probably done. */
481                         msg->done(msg);
482
483                         /* Take off the msg flag. */
484                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
485                 } else {
486                         spin_lock(&smi_info->count_lock);
487                         smi_info->incoming_messages++;
488                         spin_unlock(&smi_info->count_lock);
489
490                         deliver_recv_msg(smi_info, msg);
491                 }
492                 handle_flags(smi_info);
493                 break;
494         }
495
496         case SI_ENABLE_INTERRUPTS1:
497         {
498                 unsigned char msg[4];
499
500                 /* We got the flags from the SMI, now handle them. */
501                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
502                 if (msg[2] != 0) {
503                         printk(KERN_WARNING
504                                "ipmi_si: Could not enable interrupts"
505                                ", failed get, using polled mode.\n");
506                         smi_info->si_state = SI_NORMAL;
507                 } else {
508                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
509                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
510                         msg[2] = msg[3] | 1; /* enable msg queue int */
511                         smi_info->handlers->start_transaction(
512                                 smi_info->si_sm, msg, 3);
513                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
514                 }
515                 break;
516         }
517
518         case SI_ENABLE_INTERRUPTS2:
519         {
520                 unsigned char msg[4];
521
522                 /* We got the flags from the SMI, now handle them. */
523                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
524                 if (msg[2] != 0) {
525                         printk(KERN_WARNING
526                                "ipmi_si: Could not enable interrupts"
527                                ", failed set, using polled mode.\n");
528                 }
529                 smi_info->si_state = SI_NORMAL;
530                 break;
531         }
532         }
533 }
534
535 /* Called on timeouts and events.  Timeouts should pass the elapsed
536    time, interrupts should pass in zero. */
537 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
538                                            int time)
539 {
540         enum si_sm_result si_sm_result;
541
542  restart:
543         /* There used to be a loop here that waited a little while
544            (around 25us) before giving up.  That turned out to be
545            pointless, the minimum delays I was seeing were in the 300us
546            range, which is far too long to wait in an interrupt.  So
547            we just run until the state machine tells us something
548            happened or it needs a delay. */
549         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
550         time = 0;
551         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
552         {
553                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
554         }
555
556         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
557         {
558                 spin_lock(&smi_info->count_lock);
559                 smi_info->complete_transactions++;
560                 spin_unlock(&smi_info->count_lock);
561
562                 handle_transaction_done(smi_info);
563                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
564         }
565         else if (si_sm_result == SI_SM_HOSED)
566         {
567                 spin_lock(&smi_info->count_lock);
568                 smi_info->hosed_count++;
569                 spin_unlock(&smi_info->count_lock);
570
571                 if (smi_info->curr_msg != NULL) {
572                         /* If we were handling a user message, format
573                            a response to send to the upper layer to
574                            tell it about the error. */
575                         return_hosed_msg(smi_info);
576                 }
577                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
578                 smi_info->si_state = SI_NORMAL;
579         }
580
581         /* We prefer handling attn over new messages. */
582         if (si_sm_result == SI_SM_ATTN)
583         {
584                 unsigned char msg[2];
585
586                 spin_lock(&smi_info->count_lock);
587                 smi_info->attentions++;
588                 spin_unlock(&smi_info->count_lock);
589
590                 /* Got a attn, send down a get message flags to see
591                    what's causing it.  It would be better to handle
592                    this in the upper layer, but due to the way
593                    interrupts work with the SMI, that's not really
594                    possible. */
595                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
596                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
597
598                 smi_info->handlers->start_transaction(
599                         smi_info->si_sm, msg, 2);
600                 smi_info->si_state = SI_GETTING_FLAGS;
601                 goto restart;
602         }
603
604         /* If we are currently idle, try to start the next message. */
605         if (si_sm_result == SI_SM_IDLE) {
606                 spin_lock(&smi_info->count_lock);
607                 smi_info->idles++;
608                 spin_unlock(&smi_info->count_lock);
609
610                 si_sm_result = start_next_msg(smi_info);
611                 if (si_sm_result != SI_SM_IDLE)
612                         goto restart;
613         }
614
615         if ((si_sm_result == SI_SM_IDLE)
616             && (atomic_read(&smi_info->req_events)))
617         {
618                 /* We are idle and the upper layer requested that I fetch
619                    events, so do so. */
620                 unsigned char msg[2];
621
622                 spin_lock(&smi_info->count_lock);
623                 smi_info->flag_fetches++;
624                 spin_unlock(&smi_info->count_lock);
625
626                 atomic_set(&smi_info->req_events, 0);
627                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
628                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
629
630                 smi_info->handlers->start_transaction(
631                         smi_info->si_sm, msg, 2);
632                 smi_info->si_state = SI_GETTING_FLAGS;
633                 goto restart;
634         }
635
636         return si_sm_result;
637 }
638
639 static void sender(void                *send_info,
640                    struct ipmi_smi_msg *msg,
641                    int                 priority)
642 {
643         struct smi_info   *smi_info = send_info;
644         enum si_sm_result result;
645         unsigned long     flags;
646 #ifdef DEBUG_TIMING
647         struct timeval    t;
648 #endif
649
650         spin_lock_irqsave(&(smi_info->msg_lock), flags);
651 #ifdef DEBUG_TIMING
652         do_gettimeofday(&t);
653         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
654 #endif
655
656         if (smi_info->run_to_completion) {
657                 /* If we are running to completion, then throw it in
658                    the list and run transactions until everything is
659                    clear.  Priority doesn't matter here. */
660                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
661
662                 /* We have to release the msg lock and claim the smi
663                    lock in this case, because of race conditions. */
664                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
665
666                 spin_lock_irqsave(&(smi_info->si_lock), flags);
667                 result = smi_event_handler(smi_info, 0);
668                 while (result != SI_SM_IDLE) {
669                         udelay(SI_SHORT_TIMEOUT_USEC);
670                         result = smi_event_handler(smi_info,
671                                                    SI_SHORT_TIMEOUT_USEC);
672                 }
673                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
674                 return;
675         } else {
676                 if (priority > 0) {
677                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
678                 } else {
679                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
680                 }
681         }
682         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
683
684         spin_lock_irqsave(&(smi_info->si_lock), flags);
685         if ((smi_info->si_state == SI_NORMAL)
686             && (smi_info->curr_msg == NULL))
687         {
688                 start_next_msg(smi_info);
689                 si_restart_short_timer(smi_info);
690         }
691         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
692 }
693
694 static void set_run_to_completion(void *send_info, int i_run_to_completion)
695 {
696         struct smi_info   *smi_info = send_info;
697         enum si_sm_result result;
698         unsigned long     flags;
699
700         spin_lock_irqsave(&(smi_info->si_lock), flags);
701
702         smi_info->run_to_completion = i_run_to_completion;
703         if (i_run_to_completion) {
704                 result = smi_event_handler(smi_info, 0);
705                 while (result != SI_SM_IDLE) {
706                         udelay(SI_SHORT_TIMEOUT_USEC);
707                         result = smi_event_handler(smi_info,
708                                                    SI_SHORT_TIMEOUT_USEC);
709                 }
710         }
711
712         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
713 }
714
715 static void request_events(void *send_info)
716 {
717         struct smi_info *smi_info = send_info;
718
719         atomic_set(&smi_info->req_events, 1);
720 }
721
722 static int initialized = 0;
723
724 /* Must be called with interrupts off and with the si_lock held. */
725 static void si_restart_short_timer(struct smi_info *smi_info)
726 {
727 #if defined(CONFIG_HIGH_RES_TIMERS)
728         unsigned long flags;
729         unsigned long jiffies_now;
730
731         if (del_timer(&(smi_info->si_timer))) {
732                 /* If we don't delete the timer, then it will go off
733                    immediately, anyway.  So we only process if we
734                    actually delete the timer. */
735
736                 /* We already have irqsave on, so no need for it
737                    here. */
738                 read_lock(&xtime_lock);
739                 jiffies_now = jiffies;
740                 smi_info->si_timer.expires = jiffies_now;
741                 smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now);
742
743                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
744
745                 add_timer(&(smi_info->si_timer));
746                 spin_lock_irqsave(&smi_info->count_lock, flags);
747                 smi_info->timeout_restarts++;
748                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
749         }
750 #endif
751 }
752
753 static void smi_timeout(unsigned long data)
754 {
755         struct smi_info   *smi_info = (struct smi_info *) data;
756         enum si_sm_result smi_result;
757         unsigned long     flags;
758         unsigned long     jiffies_now;
759         unsigned long     time_diff;
760 #ifdef DEBUG_TIMING
761         struct timeval    t;
762 #endif
763
764         if (smi_info->stop_operation) {
765                 smi_info->timer_stopped = 1;
766                 return;
767         }
768
769         spin_lock_irqsave(&(smi_info->si_lock), flags);
770 #ifdef DEBUG_TIMING
771         do_gettimeofday(&t);
772         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
773 #endif
774         jiffies_now = jiffies;
775         time_diff = ((jiffies_now - smi_info->last_timeout_jiffies)
776                      * SI_USEC_PER_JIFFY);
777         smi_result = smi_event_handler(smi_info, time_diff);
778
779         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
780
781         smi_info->last_timeout_jiffies = jiffies_now;
782
783         if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
784                 /* Running with interrupts, only do long timeouts. */
785                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
786                 spin_lock_irqsave(&smi_info->count_lock, flags);
787                 smi_info->long_timeouts++;
788                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
789                 goto do_add_timer;
790         }
791
792         /* If the state machine asks for a short delay, then shorten
793            the timer timeout. */
794         if (smi_result == SI_SM_CALL_WITH_DELAY) {
795                 spin_lock_irqsave(&smi_info->count_lock, flags);
796                 smi_info->short_timeouts++;
797                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
798 #if defined(CONFIG_HIGH_RES_TIMERS)
799                 read_lock(&xtime_lock);
800                 smi_info->si_timer.expires = jiffies;
801                 smi_info->si_timer.sub_expires
802                         = get_arch_cycles(smi_info->si_timer.expires);
803                 read_unlock(&xtime_lock);
804                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
805 #else
806                 smi_info->si_timer.expires = jiffies + 1;
807 #endif
808         } else {
809                 spin_lock_irqsave(&smi_info->count_lock, flags);
810                 smi_info->long_timeouts++;
811                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
812                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
813 #if defined(CONFIG_HIGH_RES_TIMERS)
814                 smi_info->si_timer.sub_expires = 0;
815 #endif
816         }
817
818  do_add_timer:
819         add_timer(&(smi_info->si_timer));
820 }
821
822 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
823 {
824         struct smi_info *smi_info = data;
825         unsigned long   flags;
826 #ifdef DEBUG_TIMING
827         struct timeval  t;
828 #endif
829
830         spin_lock_irqsave(&(smi_info->si_lock), flags);
831
832         spin_lock(&smi_info->count_lock);
833         smi_info->interrupts++;
834         spin_unlock(&smi_info->count_lock);
835
836         if (smi_info->stop_operation)
837                 goto out;
838
839 #ifdef DEBUG_TIMING
840         do_gettimeofday(&t);
841         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
842 #endif
843         smi_event_handler(smi_info, 0);
844  out:
845         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
846         return IRQ_HANDLED;
847 }
848
849 static struct ipmi_smi_handlers handlers =
850 {
851         .owner                  = THIS_MODULE,
852         .sender                 = sender,
853         .request_events         = request_events,
854         .set_run_to_completion  = set_run_to_completion
855 };
856
857 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
858    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
859
860 #define SI_MAX_PARMS 4
861 #define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
862 static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
863 { NULL, NULL, NULL, NULL };
864
865 #define DEVICE_NAME "ipmi_si"
866
867 #define DEFAULT_KCS_IO_PORT 0xca2
868 #define DEFAULT_SMIC_IO_PORT 0xca9
869 #define DEFAULT_BT_IO_PORT   0xe4
870
871 static int           si_trydefaults = 1;
872 static char          *si_type[SI_MAX_PARMS] = { NULL, NULL, NULL, NULL };
873 #define MAX_SI_TYPE_STR 30
874 static char          si_type_str[MAX_SI_TYPE_STR];
875 static unsigned long addrs[SI_MAX_PARMS] = { 0, 0, 0, 0 };
876 static int num_addrs = 0;
877 static unsigned int  ports[SI_MAX_PARMS] = { 0, 0, 0, 0 };
878 static int num_ports = 0;
879 static int           irqs[SI_MAX_PARMS] = { 0, 0, 0, 0 };
880 static int num_irqs = 0;
881
882
883 module_param_named(trydefaults, si_trydefaults, bool, 0);
884 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
885                  " default scan of the KCS and SMIC interface at the standard"
886                  " address");
887 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
888 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
889                  " interface separated by commas.  The types are 'kcs',"
890                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
891                  " the first interface to kcs and the second to bt");
892 module_param_array(addrs, long, num_addrs, 0);
893 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
894                  " addresses separated by commas.  Only use if an interface"
895                  " is in memory.  Otherwise, set it to zero or leave"
896                  " it blank.");
897 module_param_array(ports, int, num_ports, 0);
898 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
899                  " addresses separated by commas.  Only use if an interface"
900                  " is a port.  Otherwise, set it to zero or leave"
901                  " it blank.");
902 module_param_array(irqs, int, num_irqs, 0);
903 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
904                  " addresses separated by commas.  Only use if an interface"
905                  " has an interrupt.  Otherwise, set it to zero or leave"
906                  " it blank.");
907
908 #define IPMI_MEM_ADDR_SPACE 1
909 #define IPMI_IO_ADDR_SPACE  2
910
911 #if defined(CONFIG_ACPI_INTERPETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)
912 static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
913 {
914         int i;
915
916         for (i = 0; i < SI_MAX_PARMS; ++i) {
917                 /* Don't check our address. */
918                 if (i == intf)
919                         continue;
920                 if (si_type[i] != NULL) {
921                         if ((addr_space == IPMI_MEM_ADDR_SPACE &&
922                              base_addr == addrs[i]) ||
923                             (addr_space == IPMI_IO_ADDR_SPACE &&
924                              base_addr == ports[i]))
925                                 return 0;
926                 }
927                 else
928                         break;
929         }
930
931         return 1;
932 }
933 #endif
934
935 static int std_irq_setup(struct smi_info *info)
936 {
937         int rv;
938
939         if (!info->irq)
940                 return 0;
941
942         rv = request_irq(info->irq,
943                          si_irq_handler,
944                          SA_INTERRUPT,
945                          DEVICE_NAME,
946                          info);
947         if (rv) {
948                 printk(KERN_WARNING
949                        "ipmi_si: %s unable to claim interrupt %d,"
950                        " running polled\n",
951                        DEVICE_NAME, info->irq);
952                 info->irq = 0;
953         } else {
954                 printk("  Using irq %d\n", info->irq);
955         }
956
957         return rv;
958 }
959
960 static void std_irq_cleanup(struct smi_info *info)
961 {
962         if (!info->irq)
963                 return;
964
965         free_irq(info->irq, info);
966 }
967
968 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
969 {
970         unsigned int *addr = io->info;
971
972         return inb((*addr)+offset);
973 }
974
975 static void port_outb(struct si_sm_io *io, unsigned int offset,
976                       unsigned char b)
977 {
978         unsigned int *addr = io->info;
979
980         outb(b, (*addr)+offset);
981 }
982
983 static int port_setup(struct smi_info *info)
984 {
985         unsigned int *addr = info->io.info;
986
987         if (!addr || (!*addr))
988                 return -ENODEV;
989
990         if (request_region(*addr, info->io_size, DEVICE_NAME) == NULL)
991                 return -EIO;
992         return 0;
993 }
994
995 static void port_cleanup(struct smi_info *info)
996 {
997         unsigned int *addr = info->io.info;
998
999         if (addr && (*addr))
1000                 release_region (*addr, info->io_size);
1001         kfree(info);
1002 }
1003
1004 static int try_init_port(int intf_num, struct smi_info **new_info)
1005 {
1006         struct smi_info *info;
1007
1008         if (!ports[intf_num])
1009                 return -ENODEV;
1010
1011         if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1012                               ports[intf_num]))
1013                 return -ENODEV;
1014
1015         info = kmalloc(sizeof(*info), GFP_KERNEL);
1016         if (!info) {
1017                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1018                 return -ENOMEM;
1019         }
1020         memset(info, 0, sizeof(*info));
1021
1022         info->io_setup = port_setup;
1023         info->io_cleanup = port_cleanup;
1024         info->io.inputb = port_inb;
1025         info->io.outputb = port_outb;
1026         info->io.info = &(ports[intf_num]);
1027         info->io.addr = NULL;
1028         info->irq = 0;
1029         info->irq_setup = NULL;
1030         *new_info = info;
1031
1032         if (si_type[intf_num] == NULL)
1033                 si_type[intf_num] = "kcs";
1034
1035         printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1036                si_type[intf_num], ports[intf_num]);
1037         return 0;
1038 }
1039
1040 static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
1041 {
1042         return readb((io->addr)+offset);
1043 }
1044
1045 static void mem_outb(struct si_sm_io *io, unsigned int offset,
1046                      unsigned char b)
1047 {
1048         writeb(b, (io->addr)+offset);
1049 }
1050
1051 static int mem_setup(struct smi_info *info)
1052 {
1053         unsigned long *addr = info->io.info;
1054
1055         if (!addr || (!*addr))
1056                 return -ENODEV;
1057
1058         if (request_mem_region(*addr, info->io_size, DEVICE_NAME) == NULL)
1059                 return -EIO;
1060
1061         info->io.addr = ioremap(*addr, info->io_size);
1062         if (info->io.addr == NULL) {
1063                 release_mem_region(*addr, info->io_size);
1064                 return -EIO;
1065         }
1066         return 0;
1067 }
1068
1069 static void mem_cleanup(struct smi_info *info)
1070 {
1071         unsigned long *addr = info->io.info;
1072
1073         if (info->io.addr) {
1074                 iounmap(info->io.addr);
1075                 release_mem_region(*addr, info->io_size);
1076         }
1077         kfree(info);
1078 }
1079
1080 static int try_init_mem(int intf_num, struct smi_info **new_info)
1081 {
1082         struct smi_info *info;
1083
1084         if (!addrs[intf_num])
1085                 return -ENODEV;
1086
1087         if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1088                               addrs[intf_num]))
1089                 return -ENODEV;
1090
1091         info = kmalloc(sizeof(*info), GFP_KERNEL);
1092         if (!info) {
1093                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1094                 return -ENOMEM;
1095         }
1096         memset(info, 0, sizeof(*info));
1097
1098         info->io_setup = mem_setup;
1099         info->io_cleanup = mem_cleanup;
1100         info->io.inputb = mem_inb;
1101         info->io.outputb = mem_outb;
1102         info->io.info = (void *) addrs[intf_num];
1103         info->io.addr = NULL;
1104         info->irq = 0;
1105         info->irq_setup = NULL;
1106         *new_info = info;
1107
1108         if (si_type[intf_num] == NULL)
1109                 si_type[intf_num] = "kcs";
1110
1111         printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
1112                si_type[intf_num], addrs[intf_num]);
1113         return 0;
1114 }
1115
1116
1117 #ifdef CONFIG_ACPI_INTERPRETER
1118
1119 #include <linux/acpi.h>
1120
1121 /* Once we get an ACPI failure, we don't try any more, because we go
1122    through the tables sequentially.  Once we don't find a table, there
1123    are no more. */
1124 static int acpi_failure = 0;
1125
1126 /* For GPE-type interrupts. */
1127 void ipmi_acpi_gpe(void *context)
1128 {
1129         struct smi_info *smi_info = context;
1130         unsigned long   flags;
1131 #ifdef DEBUG_TIMING
1132         struct timeval t;
1133 #endif
1134
1135         spin_lock_irqsave(&(smi_info->si_lock), flags);
1136
1137         spin_lock(&smi_info->count_lock);
1138         smi_info->interrupts++;
1139         spin_unlock(&smi_info->count_lock);
1140
1141         if (smi_info->stop_operation)
1142                 goto out;
1143
1144 #ifdef DEBUG_TIMING
1145         do_gettimeofday(&t);
1146         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1147 #endif
1148         smi_event_handler(smi_info, 0);
1149  out:
1150         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1151 }
1152
1153 static int acpi_gpe_irq_setup(struct smi_info *info)
1154 {
1155         acpi_status status;
1156
1157         if (!info->irq)
1158                 return 0;
1159
1160         /* FIXME - is level triggered right? */
1161         status = acpi_install_gpe_handler(NULL,
1162                                           info->irq,
1163                                           ACPI_GPE_LEVEL_TRIGGERED,
1164                                           ipmi_acpi_gpe,
1165                                           info);
1166         if (status != AE_OK) {
1167                 printk(KERN_WARNING
1168                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1169                        " running polled\n",
1170                        DEVICE_NAME, info->irq);
1171                 info->irq = 0;
1172                 return -EINVAL;
1173         } else {
1174                 printk("  Using ACPI GPE %d\n", info->irq);
1175                 return 0;
1176         }
1177
1178 }
1179
1180 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1181 {
1182         if (!info->irq)
1183                 return;
1184
1185         acpi_remove_gpe_handler(NULL, info->irq, ipmi_acpi_gpe);
1186 }
1187
1188 /*
1189  * Defined at
1190  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1191  */
1192 struct SPMITable {
1193         s8      Signature[4];
1194         u32     Length;
1195         u8      Revision;
1196         u8      Checksum;
1197         s8      OEMID[6];
1198         s8      OEMTableID[8];
1199         s8      OEMRevision[4];
1200         s8      CreatorID[4];
1201         s8      CreatorRevision[4];
1202         u8      InterfaceType;
1203         u8      IPMIlegacy;
1204         s16     SpecificationRevision;
1205
1206         /*
1207          * Bit 0 - SCI interrupt supported
1208          * Bit 1 - I/O APIC/SAPIC
1209          */
1210         u8      InterruptType;
1211
1212         /* If bit 0 of InterruptType is set, then this is the SCI
1213            interrupt in the GPEx_STS register. */
1214         u8      GPE;
1215
1216         s16     Reserved;
1217
1218         /* If bit 1 of InterruptType is set, then this is the I/O
1219            APIC/SAPIC interrupt. */
1220         u32     GlobalSystemInterrupt;
1221
1222         /* The actual register address. */
1223         struct acpi_generic_address addr;
1224
1225         u8      UID[4];
1226
1227         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1228 };
1229
1230 static int try_init_acpi(int intf_num, struct smi_info **new_info)
1231 {
1232         struct smi_info  *info;
1233         acpi_status      status;
1234         struct SPMITable *spmi;
1235         char             *io_type;
1236         u8               addr_space;
1237
1238         if (acpi_failure)
1239                 return -ENODEV;
1240
1241         status = acpi_get_firmware_table("SPMI", intf_num+1,
1242                                          ACPI_LOGICAL_ADDRESSING,
1243                                          (struct acpi_table_header **) &spmi);
1244         if (status != AE_OK) {
1245                 acpi_failure = 1;
1246                 return -ENODEV;
1247         }
1248
1249         if (spmi->IPMIlegacy != 1) {
1250             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1251             return -ENODEV;
1252         }
1253
1254         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1255                 addr_space = IPMI_MEM_ADDR_SPACE;
1256         else
1257                 addr_space = IPMI_IO_ADDR_SPACE;
1258         if (!is_new_interface(-1, addr_space, spmi->addr.address))
1259                 return -ENODEV;
1260
1261         /* Figure out the interface type. */
1262         switch (spmi->InterfaceType)
1263         {
1264         case 1: /* KCS */
1265                 si_type[intf_num] = "kcs";
1266                 break;
1267
1268         case 2: /* SMIC */
1269                 si_type[intf_num] = "smic";
1270                 break;
1271
1272         case 3: /* BT */
1273                 si_type[intf_num] = "bt";
1274                 break;
1275
1276         default:
1277                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1278                         spmi->InterfaceType);
1279                 return -EIO;
1280         }
1281
1282         info = kmalloc(sizeof(*info), GFP_KERNEL);
1283         if (!info) {
1284                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1285                 return -ENOMEM;
1286         }
1287         memset(info, 0, sizeof(*info));
1288
1289         if (spmi->InterruptType & 1) {
1290                 /* We've got a GPE interrupt. */
1291                 info->irq = spmi->GPE;
1292                 info->irq_setup = acpi_gpe_irq_setup;
1293                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1294         } else if (spmi->InterruptType & 2) {
1295                 /* We've got an APIC/SAPIC interrupt. */
1296                 info->irq = spmi->GlobalSystemInterrupt;
1297                 info->irq_setup = std_irq_setup;
1298                 info->irq_cleanup = std_irq_cleanup;
1299         } else {
1300                 /* Use the default interrupt setting. */
1301                 info->irq = 0;
1302                 info->irq_setup = NULL;
1303         }
1304
1305         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1306                 io_type = "memory";
1307                 info->io_setup = mem_setup;
1308                 info->io_cleanup = mem_cleanup;
1309                 addrs[intf_num] = spmi->addr.address;
1310                 info->io.inputb = mem_inb;
1311                 info->io.outputb = mem_outb;
1312                 info->io.info = &(addrs[intf_num]);
1313         } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1314                 io_type = "I/O";
1315                 info->io_setup = port_setup;
1316                 info->io_cleanup = port_cleanup;
1317                 ports[intf_num] = spmi->addr.address;
1318                 info->io.inputb = port_inb;
1319                 info->io.outputb = port_outb;
1320                 info->io.info = &(ports[intf_num]);
1321         } else {
1322                 kfree(info);
1323                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1324                 return -EIO;
1325         }
1326
1327         *new_info = info;
1328
1329         printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1330                si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1331         return 0;
1332 }
1333 #endif
1334
1335 #ifdef CONFIG_X86
1336
1337 typedef struct dmi_ipmi_data
1338 {
1339         u8              type;
1340         u8              addr_space;
1341         unsigned long   base_addr;
1342         u8              irq;
1343 }dmi_ipmi_data_t;
1344
1345 typedef struct dmi_header
1346 {
1347         u8      type;
1348         u8      length;
1349         u16     handle;
1350 }dmi_header_t;
1351
1352 static int decode_dmi(dmi_header_t *dm, dmi_ipmi_data_t *ipmi_data)
1353 {
1354         u8              *data = (u8 *)dm;
1355         unsigned long   base_addr;
1356
1357         ipmi_data->type = data[0x04];
1358
1359         memcpy(&base_addr,&data[0x08],sizeof(unsigned long));
1360         if (base_addr & 1) {
1361                 /* I/O */
1362                 base_addr &= 0xFFFE;
1363                 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1364         }
1365         else {
1366                 /* Memory */
1367                 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
1368         }
1369
1370         ipmi_data->base_addr = base_addr;
1371         ipmi_data->irq = data[0x11];
1372
1373         if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr))
1374             return 0;
1375
1376         memset(ipmi_data,0,sizeof(dmi_ipmi_data_t));
1377
1378         return -1;
1379 }
1380
1381 static int dmi_table(u32 base, int len, int num,
1382         dmi_ipmi_data_t *ipmi_data)
1383 {
1384         u8                *buf;
1385         struct dmi_header *dm;
1386         u8                *data;
1387         int               i=1;
1388         int               status=-1;
1389
1390         buf = ioremap(base, len);
1391         if(buf==NULL)
1392                 return -1;
1393
1394         data = buf;
1395
1396         while(i<num && (data - buf) < len)
1397         {
1398                 dm=(dmi_header_t *)data;
1399
1400                 if((data-buf+dm->length) >= len)
1401                         break;
1402
1403                 if (dm->type == 38) {
1404                         if (decode_dmi(dm, ipmi_data) == 0) {
1405                                 status = 0;
1406                                 break;
1407                         }
1408                 }
1409
1410                 data+=dm->length;
1411                 while((data-buf) < len && (*data || data[1]))
1412                         data++;
1413                 data+=2;
1414                 i++;
1415         }
1416         iounmap(buf);
1417
1418         return status;
1419 }
1420
1421 inline static int dmi_checksum(u8 *buf)
1422 {
1423         u8   sum=0;
1424         int  a;
1425
1426         for(a=0; a<15; a++)
1427                 sum+=buf[a];
1428         return (sum==0);
1429 }
1430
1431 static int dmi_iterator(dmi_ipmi_data_t *ipmi_data)
1432 {
1433         u8   buf[15];
1434         u32  fp=0xF0000;
1435
1436 #ifdef CONFIG_SIMNOW
1437         return -1;
1438 #endif
1439
1440         while(fp < 0xFFFFF)
1441         {
1442                 isa_memcpy_fromio(buf, fp, 15);
1443                 if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
1444                 {
1445                         u16 num=buf[13]<<8|buf[12];
1446                         u16 len=buf[7]<<8|buf[6];
1447                         u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
1448
1449                         if(dmi_table(base, len, num, ipmi_data) == 0)
1450                                 return 0;
1451                 }
1452                 fp+=16;
1453         }
1454
1455         return -1;
1456 }
1457
1458 static int try_init_smbios(int intf_num, struct smi_info **new_info)
1459 {
1460         struct smi_info   *info;
1461         dmi_ipmi_data_t   ipmi_data;
1462         char              *io_type;
1463         int               status;
1464
1465         status = dmi_iterator(&ipmi_data);
1466
1467         if (status < 0)
1468                 return -ENODEV;
1469
1470         switch(ipmi_data.type) {
1471                 case 0x01: /* KCS */
1472                         si_type[intf_num] = "kcs";
1473                         break;
1474                 case 0x02: /* SMIC */
1475                         si_type[intf_num] = "smic";
1476                         break;
1477                 case 0x03: /* BT */
1478                         si_type[intf_num] = "bt";
1479                         break;
1480                 default:
1481                         printk("ipmi_si: Unknown SMBIOS SI type.\n");
1482                         return -EIO;
1483         }
1484
1485         info = kmalloc(sizeof(*info), GFP_KERNEL);
1486         if (!info) {
1487                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1488                 return -ENOMEM;
1489         }
1490         memset(info, 0, sizeof(*info));
1491
1492         if (ipmi_data.addr_space == 1) {
1493                 io_type = "memory";
1494                 info->io_setup = mem_setup;
1495                 info->io_cleanup = mem_cleanup;
1496                 addrs[intf_num] = ipmi_data.base_addr;
1497                 info->io.inputb = mem_inb;
1498                 info->io.outputb = mem_outb;
1499                 info->io.info = &(addrs[intf_num]);
1500         } else if (ipmi_data.addr_space == 2) {
1501                 io_type = "I/O";
1502                 info->io_setup = port_setup;
1503                 info->io_cleanup = port_cleanup;
1504                 ports[intf_num] = ipmi_data.base_addr;
1505                 info->io.inputb = port_inb;
1506                 info->io.outputb = port_outb;
1507                 info->io.info = &(ports[intf_num]);
1508         } else {
1509                 kfree(info);
1510                 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
1511                 return -EIO;
1512         }
1513
1514         irqs[intf_num] = ipmi_data.irq;
1515
1516         *new_info = info;
1517
1518         printk("ipmi_si: Found SMBIOS-specified state machine at %s"
1519                " address 0x%lx\n",
1520                io_type, (unsigned long)ipmi_data.base_addr);
1521         return 0;
1522 }
1523 #endif /* CONFIG_X86 */
1524
1525 #ifdef CONFIG_PCI
1526
1527 #define PCI_ERMC_CLASSCODE  0x0C0700
1528 #define PCI_HP_VENDOR_ID    0x103C
1529 #define PCI_MMC_DEVICE_ID   0x121A
1530 #define PCI_MMC_ADDR_CW     0x10
1531
1532 /* Avoid more than one attempt to probe pci smic. */
1533 static int pci_smic_checked = 0;
1534
1535 static int find_pci_smic(int intf_num, struct smi_info **new_info)
1536 {
1537         struct smi_info  *info;
1538         int              error;
1539         struct pci_dev   *pci_dev = NULL;
1540         u16              base_addr;
1541         int              fe_rmc = 0;
1542
1543         if (pci_smic_checked)
1544                 return -ENODEV;
1545
1546         pci_smic_checked = 1;
1547
1548         if ((pci_dev = pci_find_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID,
1549                                        NULL)))
1550                 ;
1551         else if ((pci_dev = pci_find_class(PCI_ERMC_CLASSCODE, NULL)) &&
1552                  pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)
1553                 fe_rmc = 1;
1554         else
1555                 return -ENODEV;
1556
1557         error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1558         if (error)
1559         {
1560                 printk(KERN_ERR
1561                        "ipmi_si: pci_read_config_word() failed (%d).\n",
1562                        error);
1563                 return -ENODEV;
1564         }
1565
1566         /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1567         if (!(base_addr & 0x0001))
1568         {
1569                 printk(KERN_ERR
1570                        "ipmi_si: memory mapped I/O not supported for PCI"
1571                        " smic.\n");
1572                 return -ENODEV;
1573         }
1574
1575         base_addr &= 0xFFFE;
1576         if (!fe_rmc)
1577                 /* Data register starts at base address + 1 in eRMC */
1578                 ++base_addr;
1579
1580         if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr))
1581             return -ENODEV;
1582
1583         info = kmalloc(sizeof(*info), GFP_KERNEL);
1584         if (!info) {
1585                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1586                 return -ENOMEM;
1587         }
1588         memset(info, 0, sizeof(*info));
1589
1590         info->io_setup = port_setup;
1591         info->io_cleanup = port_cleanup;
1592         ports[intf_num] = base_addr;
1593         info->io.inputb = port_inb;
1594         info->io.outputb = port_outb;
1595         info->io.info = &(ports[intf_num]);
1596
1597         *new_info = info;
1598
1599         irqs[intf_num] = pci_dev->irq;
1600         si_type[intf_num] = "smic";
1601
1602         printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
1603                 (long unsigned int) base_addr);
1604
1605         return 0;
1606 }
1607 #endif /* CONFIG_PCI */
1608
1609 static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1610 {
1611 #ifdef CONFIG_PCI
1612         if (find_pci_smic(intf_num, new_info)==0)
1613                 return 0;
1614 #endif
1615         /* Include other methods here. */
1616
1617         return -ENODEV;
1618 }
1619
1620
1621 static int try_get_dev_id(struct smi_info *smi_info)
1622 {
1623         unsigned char      msg[2];
1624         unsigned char      *resp;
1625         unsigned long      resp_len;
1626         enum si_sm_result smi_result;
1627         int               rv = 0;
1628
1629         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1630         if (!resp)
1631                 return -ENOMEM;
1632
1633         /* Do a Get Device ID command, since it comes back with some
1634            useful info. */
1635         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1636         msg[1] = IPMI_GET_DEVICE_ID_CMD;
1637         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1638
1639         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1640         for (;;)
1641         {
1642                 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1643                         set_current_state(TASK_UNINTERRUPTIBLE);
1644                         schedule_timeout(1);
1645                         smi_result = smi_info->handlers->event(
1646                                 smi_info->si_sm, 100);
1647                 }
1648                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1649                 {
1650                         smi_result = smi_info->handlers->event(
1651                                 smi_info->si_sm, 0);
1652                 }
1653                 else
1654                         break;
1655         }
1656         if (smi_result == SI_SM_HOSED) {
1657                 /* We couldn't get the state machine to run, so whatever's at
1658                    the port is probably not an IPMI SMI interface. */
1659                 rv = -ENODEV;
1660                 goto out;
1661         }
1662
1663         /* Otherwise, we got some data. */
1664         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1665                                                   resp, IPMI_MAX_MSG_LENGTH);
1666         if (resp_len < 6) {
1667                 /* That's odd, it should be longer. */
1668                 rv = -EINVAL;
1669                 goto out;
1670         }
1671
1672         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1673                 /* That's odd, it shouldn't be able to fail. */
1674                 rv = -EINVAL;
1675                 goto out;
1676         }
1677
1678         /* Record info from the get device id, in case we need it. */
1679         smi_info->ipmi_si_dev_rev = resp[4] & 0xf;
1680         smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f;
1681         smi_info->ipmi_si_fw_rev_minor = resp[6];
1682         smi_info->ipmi_version_major = resp[7] & 0xf;
1683         smi_info->ipmi_version_minor = resp[7] >> 4;
1684
1685  out:
1686         kfree(resp);
1687         return rv;
1688 }
1689
1690 static int type_file_read_proc(char *page, char **start, off_t off,
1691                                int count, int *eof, void *data)
1692 {
1693         char            *out = (char *) page;
1694         struct smi_info *smi = data;
1695
1696         switch (smi->si_type) {
1697             case SI_KCS:
1698                 return sprintf(out, "kcs\n");
1699             case SI_SMIC:
1700                 return sprintf(out, "smic\n");
1701             case SI_BT:
1702                 return sprintf(out, "bt\n");
1703             default:
1704                 return 0;
1705         }
1706 }
1707
1708 static int stat_file_read_proc(char *page, char **start, off_t off,
1709                                int count, int *eof, void *data)
1710 {
1711         char            *out = (char *) page;
1712         struct smi_info *smi = data;
1713
1714         out += sprintf(out, "interrupts_enabled:    %d\n",
1715                        smi->irq && !smi->interrupt_disabled);
1716         out += sprintf(out, "short_timeouts:        %ld\n",
1717                        smi->short_timeouts);
1718         out += sprintf(out, "long_timeouts:         %ld\n",
1719                        smi->long_timeouts);
1720         out += sprintf(out, "timeout_restarts:      %ld\n",
1721                        smi->timeout_restarts);
1722         out += sprintf(out, "idles:                 %ld\n",
1723                        smi->idles);
1724         out += sprintf(out, "interrupts:            %ld\n",
1725                        smi->interrupts);
1726         out += sprintf(out, "attentions:            %ld\n",
1727                        smi->attentions);
1728         out += sprintf(out, "flag_fetches:          %ld\n",
1729                        smi->flag_fetches);
1730         out += sprintf(out, "hosed_count:           %ld\n",
1731                        smi->hosed_count);
1732         out += sprintf(out, "complete_transactions: %ld\n",
1733                        smi->complete_transactions);
1734         out += sprintf(out, "events:                %ld\n",
1735                        smi->events);
1736         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
1737                        smi->watchdog_pretimeouts);
1738         out += sprintf(out, "incoming_messages:     %ld\n",
1739                        smi->incoming_messages);
1740
1741         return (out - ((char *) page));
1742 }
1743
1744 /* Returns 0 if initialized, or negative on an error. */
1745 static int init_one_smi(int intf_num, struct smi_info **smi)
1746 {
1747         int             rv;
1748         struct smi_info *new_smi;
1749
1750
1751         rv = try_init_mem(intf_num, &new_smi);
1752         if (rv)
1753                 rv = try_init_port(intf_num, &new_smi);
1754 #ifdef CONFIG_ACPI_INTERPRETER
1755         if ((rv) && (si_trydefaults)) {
1756                 rv = try_init_acpi(intf_num, &new_smi);
1757         }
1758 #endif
1759 #ifdef CONFIG_X86
1760         if ((rv) && (si_trydefaults)) {
1761                 rv = try_init_smbios(intf_num, &new_smi);
1762         }
1763 #endif
1764         if ((rv) && (si_trydefaults)) {
1765                 rv = try_init_plug_and_play(intf_num, &new_smi);
1766         }
1767
1768
1769         if (rv)
1770                 return rv;
1771
1772         /* So we know not to free it unless we have allocated one. */
1773         new_smi->intf = NULL;
1774         new_smi->si_sm = NULL;
1775         new_smi->handlers = 0;
1776
1777         if (!new_smi->irq_setup) {
1778                 new_smi->irq = irqs[intf_num];
1779                 new_smi->irq_setup = std_irq_setup;
1780                 new_smi->irq_cleanup = std_irq_cleanup;
1781         }
1782
1783         /* Default to KCS if no type is specified. */
1784         if (si_type[intf_num] == NULL) {
1785                 if (si_trydefaults)
1786                         si_type[intf_num] = "kcs";
1787                 else {
1788                         rv = -EINVAL;
1789                         goto out_err;
1790                 }
1791         }
1792
1793         /* Set up the state machine to use. */
1794         if (strcmp(si_type[intf_num], "kcs") == 0) {
1795                 new_smi->handlers = &kcs_smi_handlers;
1796                 new_smi->si_type = SI_KCS;
1797         } else if (strcmp(si_type[intf_num], "smic") == 0) {
1798                 new_smi->handlers = &smic_smi_handlers;
1799                 new_smi->si_type = SI_SMIC;
1800         } else if (strcmp(si_type[intf_num], "bt") == 0) {
1801                 new_smi->handlers = &bt_smi_handlers;
1802                 new_smi->si_type = SI_BT;
1803         } else {
1804                 /* No support for anything else yet. */
1805                 rv = -EIO;
1806                 goto out_err;
1807         }
1808
1809         /* Allocate the state machine's data and initialize it. */
1810         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
1811         if (!new_smi->si_sm) {
1812                 printk(" Could not allocate state machine memory\n");
1813                 rv = -ENOMEM;
1814                 goto out_err;
1815         }
1816         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
1817                                                         &new_smi->io);
1818
1819         /* Now that we know the I/O size, we can set up the I/O. */
1820         rv = new_smi->io_setup(new_smi);
1821         if (rv) {
1822                 printk(" Could not set up I/O space\n");
1823                 goto out_err;
1824         }
1825
1826         spin_lock_init(&(new_smi->si_lock));
1827         spin_lock_init(&(new_smi->msg_lock));
1828         spin_lock_init(&(new_smi->count_lock));
1829
1830         /* Do low-level detection first. */
1831         if (new_smi->handlers->detect(new_smi->si_sm)) {
1832                 rv = -ENODEV;
1833                 goto out_err;
1834         }
1835
1836         /* Attempt a get device id command.  If it fails, we probably
1837            don't have a SMI here. */
1838         rv = try_get_dev_id(new_smi);
1839         if (rv)
1840                 goto out_err;
1841
1842         /* Try to claim any interrupts. */
1843         new_smi->irq_setup(new_smi);
1844
1845         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
1846         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
1847         new_smi->curr_msg = NULL;
1848         atomic_set(&new_smi->req_events, 0);
1849         new_smi->run_to_completion = 0;
1850
1851         rv = ipmi_register_smi(&handlers,
1852                                new_smi,
1853                                new_smi->ipmi_version_major,
1854                                new_smi->ipmi_version_minor,
1855                                &(new_smi->intf));
1856         if (rv) {
1857                 printk(KERN_ERR
1858                        "ipmi_si: Unable to register device: error %d\n",
1859                        rv);
1860                 goto out_err;
1861         }
1862
1863         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
1864                                      type_file_read_proc, NULL,
1865                                      new_smi, THIS_MODULE);
1866         if (rv) {
1867                 printk(KERN_ERR
1868                        "ipmi_si: Unable to create proc entry: %d\n",
1869                        rv);
1870                 goto out_err;
1871         }
1872
1873         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
1874                                      stat_file_read_proc, NULL,
1875                                      new_smi, THIS_MODULE);
1876         if (rv) {
1877                 printk(KERN_ERR
1878                        "ipmi_si: Unable to create proc entry: %d\n",
1879                        rv);
1880                 goto out_err;
1881         }
1882
1883         start_clear_flags(new_smi);
1884
1885         /* IRQ is defined to be set when non-zero. */
1886         if (new_smi->irq)
1887                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
1888
1889         new_smi->interrupt_disabled = 0;
1890         new_smi->timer_stopped = 0;
1891         new_smi->stop_operation = 0;
1892
1893         init_timer(&(new_smi->si_timer));
1894         new_smi->si_timer.data = (long) new_smi;
1895         new_smi->si_timer.function = smi_timeout;
1896         new_smi->last_timeout_jiffies = jiffies;
1897         new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
1898         add_timer(&(new_smi->si_timer));
1899
1900         *smi = new_smi;
1901
1902         printk(" IPMI %s interface initialized\n", si_type[intf_num]);
1903
1904         return 0;
1905
1906  out_err:
1907         if (new_smi->intf)
1908                 ipmi_unregister_smi(new_smi->intf);
1909
1910         new_smi->irq_cleanup(new_smi);
1911         if (new_smi->si_sm) {
1912                 if (new_smi->handlers)
1913                         new_smi->handlers->cleanup(new_smi->si_sm);
1914                 kfree(new_smi->si_sm);
1915         }
1916         new_smi->io_cleanup(new_smi);
1917         return rv;
1918 }
1919
1920 static __init int init_ipmi_si(void)
1921 {
1922         int  rv = 0;
1923         int  pos = 0;
1924         int  i;
1925         char *str;
1926
1927         if (initialized)
1928                 return 0;
1929         initialized = 1;
1930
1931         /* Parse out the si_type string into its components. */
1932         str = si_type_str;
1933         if (*str != '\0') {
1934                 for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) {
1935                         si_type[i] = str;
1936                         str = strchr(str, ',');
1937                         if (str) {
1938                                 *str = '\0';
1939                                 str++;
1940                         } else {
1941                                 break;
1942                         }
1943                 }
1944         }
1945
1946         printk(KERN_INFO "IPMI System Interface driver version "
1947                IPMI_SI_VERSION);
1948         if (kcs_smi_handlers.version)
1949                 printk(", KCS version %s", kcs_smi_handlers.version);
1950         if (smic_smi_handlers.version)
1951                 printk(", SMIC version %s", smic_smi_handlers.version);
1952         if (bt_smi_handlers.version)
1953                 printk(", BT version %s", bt_smi_handlers.version);
1954         printk("\n");
1955
1956         rv = init_one_smi(0, &(smi_infos[pos]));
1957         if (rv && !ports[0] && si_trydefaults) {
1958                 /* If we are trying defaults and the initial port is
1959                    not set, then set it. */
1960                 si_type[0] = "kcs";
1961                 ports[0] = DEFAULT_KCS_IO_PORT;
1962                 rv = init_one_smi(0, &(smi_infos[pos]));
1963                 if (rv) {
1964                         /* No KCS - try SMIC */
1965                         si_type[0] = "smic";
1966                         ports[0] = DEFAULT_SMIC_IO_PORT;
1967                         rv = init_one_smi(0, &(smi_infos[pos]));
1968                 }
1969                 if (rv) {
1970                         /* No SMIC - try BT */
1971                         si_type[0] = "bt";
1972                         ports[0] = DEFAULT_BT_IO_PORT;
1973                         rv = init_one_smi(0, &(smi_infos[pos]));
1974                 }
1975         }
1976         if (rv == 0)
1977                 pos++;
1978
1979         for (i=1; i < SI_MAX_PARMS; i++) {
1980                 rv = init_one_smi(i, &(smi_infos[pos]));
1981                 if (rv == 0)
1982                         pos++;
1983         }
1984
1985         if (smi_infos[0] == NULL) {
1986                 printk("ipmi_si: Unable to find any System Interface(s)\n");
1987                 return -ENODEV;
1988         }
1989
1990         return 0;
1991 }
1992 module_init(init_ipmi_si);
1993
1994 void __exit cleanup_one_si(struct smi_info *to_clean)
1995 {
1996         int           rv;
1997         unsigned long flags;
1998
1999         if (! to_clean)
2000                 return;
2001
2002         /* Tell the timer and interrupt handlers that we are shutting
2003            down. */
2004         spin_lock_irqsave(&(to_clean->si_lock), flags);
2005         spin_lock(&(to_clean->msg_lock));
2006
2007         to_clean->stop_operation = 1;
2008
2009         to_clean->irq_cleanup(to_clean);
2010
2011         spin_unlock(&(to_clean->msg_lock));
2012         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2013
2014         /* Wait until we know that we are out of any interrupt
2015            handlers might have been running before we freed the
2016            interrupt. */
2017         synchronize_kernel();
2018
2019         /* Wait for the timer to stop.  This avoids problems with race
2020            conditions removing the timer here. */
2021         while (!to_clean->timer_stopped) {
2022                 set_current_state(TASK_UNINTERRUPTIBLE);
2023                 schedule_timeout(1);
2024         }
2025
2026         rv = ipmi_unregister_smi(to_clean->intf);
2027         if (rv) {
2028                 printk(KERN_ERR
2029                        "ipmi_si: Unable to unregister device: errno=%d\n",
2030                        rv);
2031         }
2032
2033         to_clean->handlers->cleanup(to_clean->si_sm);
2034
2035         kfree(to_clean->si_sm);
2036
2037         to_clean->io_cleanup(to_clean);
2038 }
2039
2040 static __exit void cleanup_ipmi_si(void)
2041 {
2042         int i;
2043
2044         if (!initialized)
2045                 return;
2046
2047         for (i=0; i<SI_MAX_DRIVERS; i++) {
2048                 cleanup_one_si(smi_infos[i]);
2049         }
2050 }
2051 module_exit(cleanup_ipmi_si);
2052
2053 MODULE_LICENSE("GPL");