VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <asm/irq.h>
55 #ifdef CONFIG_HIGH_RES_TIMERS
56 #include <linux/hrtime.h>
57 # if defined(schedule_next_int)
58 /* Old high-res timer code, do translations. */
59 #  define get_arch_cycles(a) quick_update_jiffies_sub(a)
60 #  define arch_cycles_per_jiffy cycles_per_jiffies
61 # endif
62 static inline void add_usec_to_timer(struct timer_list *t, long v)
63 {
64         t->sub_expires += nsec_to_arch_cycle(v * 1000);
65         while (t->sub_expires >= arch_cycles_per_jiffy)
66         {
67                 t->expires++;
68                 t->sub_expires -= arch_cycles_per_jiffy;
69         }
70 }
71 #endif
72 #include <linux/interrupt.h>
73 #include <linux/rcupdate.h>
74 #include <linux/ipmi_smi.h>
75 #include <asm/io.h>
76 #include "ipmi_si_sm.h"
77 #include <linux/init.h>
78
79 #define IPMI_SI_VERSION "v32"
80
81 /* Measure times between events in the driver. */
82 #undef DEBUG_TIMING
83
84 /* Call every 10 ms. */
85 #define SI_TIMEOUT_TIME_USEC    10000
86 #define SI_USEC_PER_JIFFY       (1000000/HZ)
87 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
88 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
89                                        short timeout */
90
91 enum si_intf_state {
92         SI_NORMAL,
93         SI_GETTING_FLAGS,
94         SI_GETTING_EVENTS,
95         SI_CLEARING_FLAGS,
96         SI_CLEARING_FLAGS_THEN_SET_IRQ,
97         SI_GETTING_MESSAGES,
98         SI_ENABLE_INTERRUPTS1,
99         SI_ENABLE_INTERRUPTS2
100         /* FIXME - add watchdog stuff. */
101 };
102
103 enum si_type {
104     SI_KCS, SI_SMIC, SI_BT
105 };
106
107 struct smi_info
108 {
109         ipmi_smi_t             intf;
110         struct si_sm_data      *si_sm;
111         struct si_sm_handlers  *handlers;
112         enum si_type           si_type;
113         spinlock_t             si_lock;
114         spinlock_t             msg_lock;
115         struct list_head       xmit_msgs;
116         struct list_head       hp_xmit_msgs;
117         struct ipmi_smi_msg    *curr_msg;
118         enum si_intf_state     si_state;
119
120         /* Used to handle the various types of I/O that can occur with
121            IPMI */
122         struct si_sm_io io;
123         int (*io_setup)(struct smi_info *info);
124         void (*io_cleanup)(struct smi_info *info);
125         int (*irq_setup)(struct smi_info *info);
126         void (*irq_cleanup)(struct smi_info *info);
127         unsigned int io_size;
128
129         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
130            is set to hold the flags until we are done handling everything
131            from the flags. */
132 #define RECEIVE_MSG_AVAIL       0x01
133 #define EVENT_MSG_BUFFER_FULL   0x02
134 #define WDT_PRE_TIMEOUT_INT     0x08
135         unsigned char       msg_flags;
136
137         /* If set to true, this will request events the next time the
138            state machine is idle. */
139         atomic_t            req_events;
140
141         /* If true, run the state machine to completion on every send
142            call.  Generally used after a panic to make sure stuff goes
143            out. */
144         int                 run_to_completion;
145
146         /* The I/O port of an SI interface. */
147         int                 port;
148
149         /* zero if no irq; */
150         int                 irq;
151
152         /* The timer for this si. */
153         struct timer_list   si_timer;
154
155         /* The time (in jiffies) the last timeout occurred at. */
156         unsigned long       last_timeout_jiffies;
157
158         /* Used to gracefully stop the timer without race conditions. */
159         volatile int        stop_operation;
160         volatile int        timer_stopped;
161
162         /* The driver will disable interrupts when it gets into a
163            situation where it cannot handle messages due to lack of
164            memory.  Once that situation clears up, it will re-enable
165            interrupts. */
166         int interrupt_disabled;
167
168         unsigned char ipmi_si_dev_rev;
169         unsigned char ipmi_si_fw_rev_major;
170         unsigned char ipmi_si_fw_rev_minor;
171         unsigned char ipmi_version_major;
172         unsigned char ipmi_version_minor;
173
174         /* Counters and things for the proc filesystem. */
175         spinlock_t count_lock;
176         unsigned long short_timeouts;
177         unsigned long long_timeouts;
178         unsigned long timeout_restarts;
179         unsigned long idles;
180         unsigned long interrupts;
181         unsigned long attentions;
182         unsigned long flag_fetches;
183         unsigned long hosed_count;
184         unsigned long complete_transactions;
185         unsigned long events;
186         unsigned long watchdog_pretimeouts;
187         unsigned long incoming_messages;
188 };
189
190 static void si_restart_short_timer(struct smi_info *smi_info);
191
192 static void deliver_recv_msg(struct smi_info *smi_info,
193                              struct ipmi_smi_msg *msg)
194 {
195         /* Deliver the message to the upper layer with the lock
196            released. */
197         spin_unlock(&(smi_info->si_lock));
198         ipmi_smi_msg_received(smi_info->intf, msg);
199         spin_lock(&(smi_info->si_lock));
200 }
201
202 static void return_hosed_msg(struct smi_info *smi_info)
203 {
204         struct ipmi_smi_msg *msg = smi_info->curr_msg;
205
206         /* Make it a reponse */
207         msg->rsp[0] = msg->data[0] | 4;
208         msg->rsp[1] = msg->data[1];
209         msg->rsp[2] = 0xFF; /* Unknown error. */
210         msg->rsp_size = 3;
211
212         smi_info->curr_msg = NULL;
213         deliver_recv_msg(smi_info, msg);
214 }
215
216 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
217 {
218         int              rv;
219         struct list_head *entry = NULL;
220 #ifdef DEBUG_TIMING
221         struct timeval t;
222 #endif
223
224         /* No need to save flags, we aleady have interrupts off and we
225            already hold the SMI lock. */
226         spin_lock(&(smi_info->msg_lock));
227
228         /* Pick the high priority queue first. */
229         if (! list_empty(&(smi_info->hp_xmit_msgs))) {
230                 entry = smi_info->hp_xmit_msgs.next;
231         } else if (! list_empty(&(smi_info->xmit_msgs))) {
232                 entry = smi_info->xmit_msgs.next;
233         }
234
235         if (!entry) {
236                 smi_info->curr_msg = NULL;
237                 rv = SI_SM_IDLE;
238         } else {
239                 int err;
240
241                 list_del(entry);
242                 smi_info->curr_msg = list_entry(entry,
243                                                 struct ipmi_smi_msg,
244                                                 link);
245 #ifdef DEBUG_TIMING
246                 do_gettimeofday(&t);
247                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
248 #endif
249                 err = smi_info->handlers->start_transaction(
250                         smi_info->si_sm,
251                         smi_info->curr_msg->data,
252                         smi_info->curr_msg->data_size);
253                 if (err) {
254                         return_hosed_msg(smi_info);
255                 }
256
257                 rv = SI_SM_CALL_WITHOUT_DELAY;
258         }
259         spin_unlock(&(smi_info->msg_lock));
260
261         return rv;
262 }
263
264 static void start_enable_irq(struct smi_info *smi_info)
265 {
266         unsigned char msg[2];
267
268         /* If we are enabling interrupts, we have to tell the
269            BMC to use them. */
270         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
271         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
272
273         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
274         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
275 }
276
277 static void start_clear_flags(struct smi_info *smi_info)
278 {
279         unsigned char msg[3];
280
281         /* Make sure the watchdog pre-timeout flag is not set at startup. */
282         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
283         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
284         msg[2] = WDT_PRE_TIMEOUT_INT;
285
286         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
287         smi_info->si_state = SI_CLEARING_FLAGS;
288 }
289
290 /* When we have a situtaion where we run out of memory and cannot
291    allocate messages, we just leave them in the BMC and run the system
292    polled until we can allocate some memory.  Once we have some
293    memory, we will re-enable the interrupt. */
294 static inline void disable_si_irq(struct smi_info *smi_info)
295 {
296         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
297                 disable_irq_nosync(smi_info->irq);
298                 smi_info->interrupt_disabled = 1;
299         }
300 }
301
302 static inline void enable_si_irq(struct smi_info *smi_info)
303 {
304         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
305                 enable_irq(smi_info->irq);
306                 smi_info->interrupt_disabled = 0;
307         }
308 }
309
310 static void handle_flags(struct smi_info *smi_info)
311 {
312         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
313                 /* Watchdog pre-timeout */
314                 spin_lock(&smi_info->count_lock);
315                 smi_info->watchdog_pretimeouts++;
316                 spin_unlock(&smi_info->count_lock);
317
318                 start_clear_flags(smi_info);
319                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
320                 spin_unlock(&(smi_info->si_lock));
321                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
322                 spin_lock(&(smi_info->si_lock));
323         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
324                 /* Messages available. */
325                 smi_info->curr_msg = ipmi_alloc_smi_msg();
326                 if (!smi_info->curr_msg) {
327                         disable_si_irq(smi_info);
328                         smi_info->si_state = SI_NORMAL;
329                         return;
330                 }
331                 enable_si_irq(smi_info);
332
333                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
334                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
335                 smi_info->curr_msg->data_size = 2;
336
337                 smi_info->handlers->start_transaction(
338                         smi_info->si_sm,
339                         smi_info->curr_msg->data,
340                         smi_info->curr_msg->data_size);
341                 smi_info->si_state = SI_GETTING_MESSAGES;
342         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
343                 /* Events available. */
344                 smi_info->curr_msg = ipmi_alloc_smi_msg();
345                 if (!smi_info->curr_msg) {
346                         disable_si_irq(smi_info);
347                         smi_info->si_state = SI_NORMAL;
348                         return;
349                 }
350                 enable_si_irq(smi_info);
351
352                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
353                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
354                 smi_info->curr_msg->data_size = 2;
355
356                 smi_info->handlers->start_transaction(
357                         smi_info->si_sm,
358                         smi_info->curr_msg->data,
359                         smi_info->curr_msg->data_size);
360                 smi_info->si_state = SI_GETTING_EVENTS;
361         } else {
362                 smi_info->si_state = SI_NORMAL;
363         }
364 }
365
366 static void handle_transaction_done(struct smi_info *smi_info)
367 {
368         struct ipmi_smi_msg *msg;
369 #ifdef DEBUG_TIMING
370         struct timeval t;
371
372         do_gettimeofday(&t);
373         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
374 #endif
375         switch (smi_info->si_state) {
376         case SI_NORMAL:
377                 if (!smi_info->curr_msg)
378                         break;
379
380                 smi_info->curr_msg->rsp_size
381                         = smi_info->handlers->get_result(
382                                 smi_info->si_sm,
383                                 smi_info->curr_msg->rsp,
384                                 IPMI_MAX_MSG_LENGTH);
385
386                 /* Do this here becase deliver_recv_msg() releases the
387                    lock, and a new message can be put in during the
388                    time the lock is released. */
389                 msg = smi_info->curr_msg;
390                 smi_info->curr_msg = NULL;
391                 deliver_recv_msg(smi_info, msg);
392                 break;
393
394         case SI_GETTING_FLAGS:
395         {
396                 unsigned char msg[4];
397                 unsigned int  len;
398
399                 /* We got the flags from the SMI, now handle them. */
400                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
401                 if (msg[2] != 0) {
402                         /* Error fetching flags, just give up for
403                            now. */
404                         smi_info->si_state = SI_NORMAL;
405                 } else if (len < 3) {
406                         /* Hmm, no flags.  That's technically illegal, but
407                            don't use uninitialized data. */
408                         smi_info->si_state = SI_NORMAL;
409                 } else {
410                         smi_info->msg_flags = msg[3];
411                         handle_flags(smi_info);
412                 }
413                 break;
414         }
415
416         case SI_CLEARING_FLAGS:
417         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
418         {
419                 unsigned char msg[3];
420
421                 /* We cleared the flags. */
422                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
423                 if (msg[2] != 0) {
424                         /* Error clearing flags */
425                         printk(KERN_WARNING
426                                "ipmi_si: Error clearing flags: %2.2x\n",
427                                msg[2]);
428                 }
429                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
430                         start_enable_irq(smi_info);
431                 else
432                         smi_info->si_state = SI_NORMAL;
433                 break;
434         }
435
436         case SI_GETTING_EVENTS:
437         {
438                 smi_info->curr_msg->rsp_size
439                         = smi_info->handlers->get_result(
440                                 smi_info->si_sm,
441                                 smi_info->curr_msg->rsp,
442                                 IPMI_MAX_MSG_LENGTH);
443
444                 /* Do this here becase deliver_recv_msg() releases the
445                    lock, and a new message can be put in during the
446                    time the lock is released. */
447                 msg = smi_info->curr_msg;
448                 smi_info->curr_msg = NULL;
449                 if (msg->rsp[2] != 0) {
450                         /* Error getting event, probably done. */
451                         msg->done(msg);
452
453                         /* Take off the event flag. */
454                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
455                 } else {
456                         spin_lock(&smi_info->count_lock);
457                         smi_info->events++;
458                         spin_unlock(&smi_info->count_lock);
459
460                         deliver_recv_msg(smi_info, msg);
461                 }
462                 handle_flags(smi_info);
463                 break;
464         }
465
466         case SI_GETTING_MESSAGES:
467         {
468                 smi_info->curr_msg->rsp_size
469                         = smi_info->handlers->get_result(
470                                 smi_info->si_sm,
471                                 smi_info->curr_msg->rsp,
472                                 IPMI_MAX_MSG_LENGTH);
473
474                 /* Do this here becase deliver_recv_msg() releases the
475                    lock, and a new message can be put in during the
476                    time the lock is released. */
477                 msg = smi_info->curr_msg;
478                 smi_info->curr_msg = NULL;
479                 if (msg->rsp[2] != 0) {
480                         /* Error getting event, probably done. */
481                         msg->done(msg);
482
483                         /* Take off the msg flag. */
484                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
485                 } else {
486                         spin_lock(&smi_info->count_lock);
487                         smi_info->incoming_messages++;
488                         spin_unlock(&smi_info->count_lock);
489
490                         deliver_recv_msg(smi_info, msg);
491                 }
492                 handle_flags(smi_info);
493                 break;
494         }
495
496         case SI_ENABLE_INTERRUPTS1:
497         {
498                 unsigned char msg[4];
499
500                 /* We got the flags from the SMI, now handle them. */
501                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
502                 if (msg[2] != 0) {
503                         printk(KERN_WARNING
504                                "ipmi_si: Could not enable interrupts"
505                                ", failed get, using polled mode.\n");
506                         smi_info->si_state = SI_NORMAL;
507                 } else {
508                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
509                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
510                         msg[2] = msg[3] | 1; /* enable msg queue int */
511                         smi_info->handlers->start_transaction(
512                                 smi_info->si_sm, msg, 3);
513                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
514                 }
515                 break;
516         }
517
518         case SI_ENABLE_INTERRUPTS2:
519         {
520                 unsigned char msg[4];
521
522                 /* We got the flags from the SMI, now handle them. */
523                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
524                 if (msg[2] != 0) {
525                         printk(KERN_WARNING
526                                "ipmi_si: Could not enable interrupts"
527                                ", failed set, using polled mode.\n");
528                 }
529                 smi_info->si_state = SI_NORMAL;
530                 break;
531         }
532         }
533 }
534
535 /* Called on timeouts and events.  Timeouts should pass the elapsed
536    time, interrupts should pass in zero. */
537 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
538                                            int time)
539 {
540         enum si_sm_result si_sm_result;
541
542  restart:
543         /* There used to be a loop here that waited a little while
544            (around 25us) before giving up.  That turned out to be
545            pointless, the minimum delays I was seeing were in the 300us
546            range, which is far too long to wait in an interrupt.  So
547            we just run until the state machine tells us something
548            happened or it needs a delay. */
549         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
550         time = 0;
551         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
552         {
553                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
554         }
555
556         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
557         {
558                 spin_lock(&smi_info->count_lock);
559                 smi_info->complete_transactions++;
560                 spin_unlock(&smi_info->count_lock);
561
562                 handle_transaction_done(smi_info);
563                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
564         }
565         else if (si_sm_result == SI_SM_HOSED)
566         {
567                 spin_lock(&smi_info->count_lock);
568                 smi_info->hosed_count++;
569                 spin_unlock(&smi_info->count_lock);
570
571                 if (smi_info->curr_msg != NULL) {
572                         /* If we were handling a user message, format
573                            a response to send to the upper layer to
574                            tell it about the error. */
575                         return_hosed_msg(smi_info);
576                 }
577                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
578                 smi_info->si_state = SI_NORMAL;
579         }
580
581         /* We prefer handling attn over new messages. */
582         if (si_sm_result == SI_SM_ATTN)
583         {
584                 unsigned char msg[2];
585
586                 spin_lock(&smi_info->count_lock);
587                 smi_info->attentions++;
588                 spin_unlock(&smi_info->count_lock);
589
590                 /* Got a attn, send down a get message flags to see
591                    what's causing it.  It would be better to handle
592                    this in the upper layer, but due to the way
593                    interrupts work with the SMI, that's not really
594                    possible. */
595                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
596                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
597
598                 smi_info->handlers->start_transaction(
599                         smi_info->si_sm, msg, 2);
600                 smi_info->si_state = SI_GETTING_FLAGS;
601                 goto restart;
602         }
603
604         /* If we are currently idle, try to start the next message. */
605         if (si_sm_result == SI_SM_IDLE) {
606                 spin_lock(&smi_info->count_lock);
607                 smi_info->idles++;
608                 spin_unlock(&smi_info->count_lock);
609
610                 si_sm_result = start_next_msg(smi_info);
611                 if (si_sm_result != SI_SM_IDLE)
612                         goto restart;
613         }
614
615         if ((si_sm_result == SI_SM_IDLE)
616             && (atomic_read(&smi_info->req_events)))
617         {
618                 /* We are idle and the upper layer requested that I fetch
619                    events, so do so. */
620                 unsigned char msg[2];
621
622                 spin_lock(&smi_info->count_lock);
623                 smi_info->flag_fetches++;
624                 spin_unlock(&smi_info->count_lock);
625
626                 atomic_set(&smi_info->req_events, 0);
627                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
628                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
629
630                 smi_info->handlers->start_transaction(
631                         smi_info->si_sm, msg, 2);
632                 smi_info->si_state = SI_GETTING_FLAGS;
633                 goto restart;
634         }
635
636         return si_sm_result;
637 }
638
639 static void sender(void                *send_info,
640                    struct ipmi_smi_msg *msg,
641                    int                 priority)
642 {
643         struct smi_info   *smi_info = send_info;
644         enum si_sm_result result;
645         unsigned long     flags;
646 #ifdef DEBUG_TIMING
647         struct timeval    t;
648 #endif
649
650         spin_lock_irqsave(&(smi_info->msg_lock), flags);
651 #ifdef DEBUG_TIMING
652         do_gettimeofday(&t);
653         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
654 #endif
655
656         if (smi_info->run_to_completion) {
657                 /* If we are running to completion, then throw it in
658                    the list and run transactions until everything is
659                    clear.  Priority doesn't matter here. */
660                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
661
662                 /* We have to release the msg lock and claim the smi
663                    lock in this case, because of race conditions. */
664                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
665
666                 spin_lock_irqsave(&(smi_info->si_lock), flags);
667                 result = smi_event_handler(smi_info, 0);
668                 while (result != SI_SM_IDLE) {
669                         udelay(SI_SHORT_TIMEOUT_USEC);
670                         result = smi_event_handler(smi_info,
671                                                    SI_SHORT_TIMEOUT_USEC);
672                 }
673                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
674                 return;
675         } else {
676                 if (priority > 0) {
677                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
678                 } else {
679                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
680                 }
681         }
682         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
683
684         spin_lock_irqsave(&(smi_info->si_lock), flags);
685         if ((smi_info->si_state == SI_NORMAL)
686             && (smi_info->curr_msg == NULL))
687         {
688                 start_next_msg(smi_info);
689                 si_restart_short_timer(smi_info);
690         }
691         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
692 }
693
694 static void set_run_to_completion(void *send_info, int i_run_to_completion)
695 {
696         struct smi_info   *smi_info = send_info;
697         enum si_sm_result result;
698         unsigned long     flags;
699
700         spin_lock_irqsave(&(smi_info->si_lock), flags);
701
702         smi_info->run_to_completion = i_run_to_completion;
703         if (i_run_to_completion) {
704                 result = smi_event_handler(smi_info, 0);
705                 while (result != SI_SM_IDLE) {
706                         udelay(SI_SHORT_TIMEOUT_USEC);
707                         result = smi_event_handler(smi_info,
708                                                    SI_SHORT_TIMEOUT_USEC);
709                 }
710         }
711
712         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
713 }
714
715 static void poll(void *send_info)
716 {
717         struct smi_info *smi_info = send_info;
718
719         smi_event_handler(smi_info, 0);
720 }
721
722 static void request_events(void *send_info)
723 {
724         struct smi_info *smi_info = send_info;
725
726         atomic_set(&smi_info->req_events, 1);
727 }
728
729 static int initialized = 0;
730
731 /* Must be called with interrupts off and with the si_lock held. */
732 static void si_restart_short_timer(struct smi_info *smi_info)
733 {
734 #if defined(CONFIG_HIGH_RES_TIMERS)
735         unsigned long flags;
736         unsigned long jiffies_now;
737
738         if (del_timer(&(smi_info->si_timer))) {
739                 /* If we don't delete the timer, then it will go off
740                    immediately, anyway.  So we only process if we
741                    actually delete the timer. */
742
743                 /* We already have irqsave on, so no need for it
744                    here. */
745                 read_lock(&xtime_lock);
746                 jiffies_now = jiffies;
747                 smi_info->si_timer.expires = jiffies_now;
748                 smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now);
749
750                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
751
752                 add_timer(&(smi_info->si_timer));
753                 spin_lock_irqsave(&smi_info->count_lock, flags);
754                 smi_info->timeout_restarts++;
755                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
756         }
757 #endif
758 }
759
760 static void smi_timeout(unsigned long data)
761 {
762         struct smi_info   *smi_info = (struct smi_info *) data;
763         enum si_sm_result smi_result;
764         unsigned long     flags;
765         unsigned long     jiffies_now;
766         unsigned long     time_diff;
767 #ifdef DEBUG_TIMING
768         struct timeval    t;
769 #endif
770
771         if (smi_info->stop_operation) {
772                 smi_info->timer_stopped = 1;
773                 return;
774         }
775
776         spin_lock_irqsave(&(smi_info->si_lock), flags);
777 #ifdef DEBUG_TIMING
778         do_gettimeofday(&t);
779         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
780 #endif
781         jiffies_now = jiffies;
782         time_diff = ((jiffies_now - smi_info->last_timeout_jiffies)
783                      * SI_USEC_PER_JIFFY);
784         smi_result = smi_event_handler(smi_info, time_diff);
785
786         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
787
788         smi_info->last_timeout_jiffies = jiffies_now;
789
790         if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
791                 /* Running with interrupts, only do long timeouts. */
792                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
793                 spin_lock_irqsave(&smi_info->count_lock, flags);
794                 smi_info->long_timeouts++;
795                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
796                 goto do_add_timer;
797         }
798
799         /* If the state machine asks for a short delay, then shorten
800            the timer timeout. */
801         if (smi_result == SI_SM_CALL_WITH_DELAY) {
802                 spin_lock_irqsave(&smi_info->count_lock, flags);
803                 smi_info->short_timeouts++;
804                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
805 #if defined(CONFIG_HIGH_RES_TIMERS)
806                 read_lock(&xtime_lock);
807                 smi_info->si_timer.expires = jiffies;
808                 smi_info->si_timer.sub_expires
809                         = get_arch_cycles(smi_info->si_timer.expires);
810                 read_unlock(&xtime_lock);
811                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
812 #else
813                 smi_info->si_timer.expires = jiffies + 1;
814 #endif
815         } else {
816                 spin_lock_irqsave(&smi_info->count_lock, flags);
817                 smi_info->long_timeouts++;
818                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
819                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
820 #if defined(CONFIG_HIGH_RES_TIMERS)
821                 smi_info->si_timer.sub_expires = 0;
822 #endif
823         }
824
825  do_add_timer:
826         add_timer(&(smi_info->si_timer));
827 }
828
829 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
830 {
831         struct smi_info *smi_info = data;
832         unsigned long   flags;
833 #ifdef DEBUG_TIMING
834         struct timeval  t;
835 #endif
836
837         spin_lock_irqsave(&(smi_info->si_lock), flags);
838
839         spin_lock(&smi_info->count_lock);
840         smi_info->interrupts++;
841         spin_unlock(&smi_info->count_lock);
842
843         if (smi_info->stop_operation)
844                 goto out;
845
846 #ifdef DEBUG_TIMING
847         do_gettimeofday(&t);
848         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
849 #endif
850         smi_event_handler(smi_info, 0);
851  out:
852         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
853         return IRQ_HANDLED;
854 }
855
856 static struct ipmi_smi_handlers handlers =
857 {
858         .owner                  = THIS_MODULE,
859         .sender                 = sender,
860         .request_events         = request_events,
861         .set_run_to_completion  = set_run_to_completion,
862         .poll                   = poll,
863 };
864
865 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
866    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
867
868 #define SI_MAX_PARMS 4
869 #define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
870 static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
871 { NULL, NULL, NULL, NULL };
872
873 #define DEVICE_NAME "ipmi_si"
874
875 #define DEFAULT_KCS_IO_PORT 0xca2
876 #define DEFAULT_SMIC_IO_PORT 0xca9
877 #define DEFAULT_BT_IO_PORT   0xe4
878
879 static int           si_trydefaults = 1;
880 static char          *si_type[SI_MAX_PARMS] = { NULL, NULL, NULL, NULL };
881 #define MAX_SI_TYPE_STR 30
882 static char          si_type_str[MAX_SI_TYPE_STR];
883 static unsigned long addrs[SI_MAX_PARMS] = { 0, 0, 0, 0 };
884 static int num_addrs = 0;
885 static unsigned int  ports[SI_MAX_PARMS] = { 0, 0, 0, 0 };
886 static int num_ports = 0;
887 static int           irqs[SI_MAX_PARMS] = { 0, 0, 0, 0 };
888 static int num_irqs = 0;
889
890
891 module_param_named(trydefaults, si_trydefaults, bool, 0);
892 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
893                  " default scan of the KCS and SMIC interface at the standard"
894                  " address");
895 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
896 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
897                  " interface separated by commas.  The types are 'kcs',"
898                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
899                  " the first interface to kcs and the second to bt");
900 module_param_array(addrs, long, num_addrs, 0);
901 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
902                  " addresses separated by commas.  Only use if an interface"
903                  " is in memory.  Otherwise, set it to zero or leave"
904                  " it blank.");
905 module_param_array(ports, int, num_ports, 0);
906 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
907                  " addresses separated by commas.  Only use if an interface"
908                  " is a port.  Otherwise, set it to zero or leave"
909                  " it blank.");
910 module_param_array(irqs, int, num_irqs, 0);
911 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
912                  " addresses separated by commas.  Only use if an interface"
913                  " has an interrupt.  Otherwise, set it to zero or leave"
914                  " it blank.");
915
916 #define IPMI_MEM_ADDR_SPACE 1
917 #define IPMI_IO_ADDR_SPACE  2
918
919 #if defined(CONFIG_ACPI_INTERPETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)
920 static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
921 {
922         int i;
923
924         for (i = 0; i < SI_MAX_PARMS; ++i) {
925                 /* Don't check our address. */
926                 if (i == intf)
927                         continue;
928                 if (si_type[i] != NULL) {
929                         if ((addr_space == IPMI_MEM_ADDR_SPACE &&
930                              base_addr == addrs[i]) ||
931                             (addr_space == IPMI_IO_ADDR_SPACE &&
932                              base_addr == ports[i]))
933                                 return 0;
934                 }
935                 else
936                         break;
937         }
938
939         return 1;
940 }
941 #endif
942
943 static int std_irq_setup(struct smi_info *info)
944 {
945         int rv;
946
947         if (!info->irq)
948                 return 0;
949
950         rv = request_irq(info->irq,
951                          si_irq_handler,
952                          SA_INTERRUPT,
953                          DEVICE_NAME,
954                          info);
955         if (rv) {
956                 printk(KERN_WARNING
957                        "ipmi_si: %s unable to claim interrupt %d,"
958                        " running polled\n",
959                        DEVICE_NAME, info->irq);
960                 info->irq = 0;
961         } else {
962                 printk("  Using irq %d\n", info->irq);
963         }
964
965         return rv;
966 }
967
968 static void std_irq_cleanup(struct smi_info *info)
969 {
970         if (!info->irq)
971                 return;
972
973         free_irq(info->irq, info);
974 }
975
976 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
977 {
978         unsigned int *addr = io->info;
979
980         return inb((*addr)+offset);
981 }
982
983 static void port_outb(struct si_sm_io *io, unsigned int offset,
984                       unsigned char b)
985 {
986         unsigned int *addr = io->info;
987
988         outb(b, (*addr)+offset);
989 }
990
991 static int port_setup(struct smi_info *info)
992 {
993         unsigned int *addr = info->io.info;
994
995         if (!addr || (!*addr))
996                 return -ENODEV;
997
998         if (request_region(*addr, info->io_size, DEVICE_NAME) == NULL)
999                 return -EIO;
1000         return 0;
1001 }
1002
1003 static void port_cleanup(struct smi_info *info)
1004 {
1005         unsigned int *addr = info->io.info;
1006
1007         if (addr && (*addr))
1008                 release_region (*addr, info->io_size);
1009         kfree(info);
1010 }
1011
1012 static int try_init_port(int intf_num, struct smi_info **new_info)
1013 {
1014         struct smi_info *info;
1015
1016         if (!ports[intf_num])
1017                 return -ENODEV;
1018
1019         if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1020                               ports[intf_num]))
1021                 return -ENODEV;
1022
1023         info = kmalloc(sizeof(*info), GFP_KERNEL);
1024         if (!info) {
1025                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1026                 return -ENOMEM;
1027         }
1028         memset(info, 0, sizeof(*info));
1029
1030         info->io_setup = port_setup;
1031         info->io_cleanup = port_cleanup;
1032         info->io.inputb = port_inb;
1033         info->io.outputb = port_outb;
1034         info->io.info = &(ports[intf_num]);
1035         info->io.addr = NULL;
1036         info->irq = 0;
1037         info->irq_setup = NULL;
1038         *new_info = info;
1039
1040         if (si_type[intf_num] == NULL)
1041                 si_type[intf_num] = "kcs";
1042
1043         printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1044                si_type[intf_num], ports[intf_num]);
1045         return 0;
1046 }
1047
1048 static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
1049 {
1050         return readb((io->addr)+offset);
1051 }
1052
1053 static void mem_outb(struct si_sm_io *io, unsigned int offset,
1054                      unsigned char b)
1055 {
1056         writeb(b, (io->addr)+offset);
1057 }
1058
1059 static int mem_setup(struct smi_info *info)
1060 {
1061         unsigned long *addr = info->io.info;
1062
1063         if (!addr || (!*addr))
1064                 return -ENODEV;
1065
1066         if (request_mem_region(*addr, info->io_size, DEVICE_NAME) == NULL)
1067                 return -EIO;
1068
1069         info->io.addr = ioremap(*addr, info->io_size);
1070         if (info->io.addr == NULL) {
1071                 release_mem_region(*addr, info->io_size);
1072                 return -EIO;
1073         }
1074         return 0;
1075 }
1076
1077 static void mem_cleanup(struct smi_info *info)
1078 {
1079         unsigned long *addr = info->io.info;
1080
1081         if (info->io.addr) {
1082                 iounmap(info->io.addr);
1083                 release_mem_region(*addr, info->io_size);
1084         }
1085         kfree(info);
1086 }
1087
1088 static int try_init_mem(int intf_num, struct smi_info **new_info)
1089 {
1090         struct smi_info *info;
1091
1092         if (!addrs[intf_num])
1093                 return -ENODEV;
1094
1095         if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1096                               addrs[intf_num]))
1097                 return -ENODEV;
1098
1099         info = kmalloc(sizeof(*info), GFP_KERNEL);
1100         if (!info) {
1101                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1102                 return -ENOMEM;
1103         }
1104         memset(info, 0, sizeof(*info));
1105
1106         info->io_setup = mem_setup;
1107         info->io_cleanup = mem_cleanup;
1108         info->io.inputb = mem_inb;
1109         info->io.outputb = mem_outb;
1110         info->io.info = (void *) addrs[intf_num];
1111         info->io.addr = NULL;
1112         info->irq = 0;
1113         info->irq_setup = NULL;
1114         *new_info = info;
1115
1116         if (si_type[intf_num] == NULL)
1117                 si_type[intf_num] = "kcs";
1118
1119         printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
1120                si_type[intf_num], addrs[intf_num]);
1121         return 0;
1122 }
1123
1124
1125 #ifdef CONFIG_ACPI_INTERPRETER
1126
1127 #include <linux/acpi.h>
1128
1129 /* Once we get an ACPI failure, we don't try any more, because we go
1130    through the tables sequentially.  Once we don't find a table, there
1131    are no more. */
1132 static int acpi_failure = 0;
1133
1134 /* For GPE-type interrupts. */
1135 void ipmi_acpi_gpe(void *context)
1136 {
1137         struct smi_info *smi_info = context;
1138         unsigned long   flags;
1139 #ifdef DEBUG_TIMING
1140         struct timeval t;
1141 #endif
1142
1143         spin_lock_irqsave(&(smi_info->si_lock), flags);
1144
1145         spin_lock(&smi_info->count_lock);
1146         smi_info->interrupts++;
1147         spin_unlock(&smi_info->count_lock);
1148
1149         if (smi_info->stop_operation)
1150                 goto out;
1151
1152 #ifdef DEBUG_TIMING
1153         do_gettimeofday(&t);
1154         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1155 #endif
1156         smi_event_handler(smi_info, 0);
1157  out:
1158         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1159 }
1160
1161 static int acpi_gpe_irq_setup(struct smi_info *info)
1162 {
1163         acpi_status status;
1164
1165         if (!info->irq)
1166                 return 0;
1167
1168         /* FIXME - is level triggered right? */
1169         status = acpi_install_gpe_handler(NULL,
1170                                           info->irq,
1171                                           ACPI_GPE_LEVEL_TRIGGERED,
1172                                           ipmi_acpi_gpe,
1173                                           info);
1174         if (status != AE_OK) {
1175                 printk(KERN_WARNING
1176                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1177                        " running polled\n",
1178                        DEVICE_NAME, info->irq);
1179                 info->irq = 0;
1180                 return -EINVAL;
1181         } else {
1182                 printk("  Using ACPI GPE %d\n", info->irq);
1183                 return 0;
1184         }
1185
1186 }
1187
1188 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1189 {
1190         if (!info->irq)
1191                 return;
1192
1193         acpi_remove_gpe_handler(NULL, info->irq, ipmi_acpi_gpe);
1194 }
1195
1196 /*
1197  * Defined at
1198  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1199  */
1200 struct SPMITable {
1201         s8      Signature[4];
1202         u32     Length;
1203         u8      Revision;
1204         u8      Checksum;
1205         s8      OEMID[6];
1206         s8      OEMTableID[8];
1207         s8      OEMRevision[4];
1208         s8      CreatorID[4];
1209         s8      CreatorRevision[4];
1210         u8      InterfaceType;
1211         u8      IPMIlegacy;
1212         s16     SpecificationRevision;
1213
1214         /*
1215          * Bit 0 - SCI interrupt supported
1216          * Bit 1 - I/O APIC/SAPIC
1217          */
1218         u8      InterruptType;
1219
1220         /* If bit 0 of InterruptType is set, then this is the SCI
1221            interrupt in the GPEx_STS register. */
1222         u8      GPE;
1223
1224         s16     Reserved;
1225
1226         /* If bit 1 of InterruptType is set, then this is the I/O
1227            APIC/SAPIC interrupt. */
1228         u32     GlobalSystemInterrupt;
1229
1230         /* The actual register address. */
1231         struct acpi_generic_address addr;
1232
1233         u8      UID[4];
1234
1235         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1236 };
1237
1238 static int try_init_acpi(int intf_num, struct smi_info **new_info)
1239 {
1240         struct smi_info  *info;
1241         acpi_status      status;
1242         struct SPMITable *spmi;
1243         char             *io_type;
1244         u8               addr_space;
1245
1246         if (acpi_failure)
1247                 return -ENODEV;
1248
1249         status = acpi_get_firmware_table("SPMI", intf_num+1,
1250                                          ACPI_LOGICAL_ADDRESSING,
1251                                          (struct acpi_table_header **) &spmi);
1252         if (status != AE_OK) {
1253                 acpi_failure = 1;
1254                 return -ENODEV;
1255         }
1256
1257         if (spmi->IPMIlegacy != 1) {
1258             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1259             return -ENODEV;
1260         }
1261
1262         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1263                 addr_space = IPMI_MEM_ADDR_SPACE;
1264         else
1265                 addr_space = IPMI_IO_ADDR_SPACE;
1266         if (!is_new_interface(-1, addr_space, spmi->addr.address))
1267                 return -ENODEV;
1268
1269         /* Figure out the interface type. */
1270         switch (spmi->InterfaceType)
1271         {
1272         case 1: /* KCS */
1273                 si_type[intf_num] = "kcs";
1274                 break;
1275
1276         case 2: /* SMIC */
1277                 si_type[intf_num] = "smic";
1278                 break;
1279
1280         case 3: /* BT */
1281                 si_type[intf_num] = "bt";
1282                 break;
1283
1284         default:
1285                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1286                         spmi->InterfaceType);
1287                 return -EIO;
1288         }
1289
1290         info = kmalloc(sizeof(*info), GFP_KERNEL);
1291         if (!info) {
1292                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1293                 return -ENOMEM;
1294         }
1295         memset(info, 0, sizeof(*info));
1296
1297         if (spmi->InterruptType & 1) {
1298                 /* We've got a GPE interrupt. */
1299                 info->irq = spmi->GPE;
1300                 info->irq_setup = acpi_gpe_irq_setup;
1301                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1302         } else if (spmi->InterruptType & 2) {
1303                 /* We've got an APIC/SAPIC interrupt. */
1304                 info->irq = spmi->GlobalSystemInterrupt;
1305                 info->irq_setup = std_irq_setup;
1306                 info->irq_cleanup = std_irq_cleanup;
1307         } else {
1308                 /* Use the default interrupt setting. */
1309                 info->irq = 0;
1310                 info->irq_setup = NULL;
1311         }
1312
1313         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1314                 io_type = "memory";
1315                 info->io_setup = mem_setup;
1316                 info->io_cleanup = mem_cleanup;
1317                 addrs[intf_num] = spmi->addr.address;
1318                 info->io.inputb = mem_inb;
1319                 info->io.outputb = mem_outb;
1320                 info->io.info = &(addrs[intf_num]);
1321         } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1322                 io_type = "I/O";
1323                 info->io_setup = port_setup;
1324                 info->io_cleanup = port_cleanup;
1325                 ports[intf_num] = spmi->addr.address;
1326                 info->io.inputb = port_inb;
1327                 info->io.outputb = port_outb;
1328                 info->io.info = &(ports[intf_num]);
1329         } else {
1330                 kfree(info);
1331                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1332                 return -EIO;
1333         }
1334
1335         *new_info = info;
1336
1337         printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1338                si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1339         return 0;
1340 }
1341 #endif
1342
1343 #ifdef CONFIG_X86
1344
1345 typedef struct dmi_ipmi_data
1346 {
1347         u8              type;
1348         u8              addr_space;
1349         unsigned long   base_addr;
1350         u8              irq;
1351 }dmi_ipmi_data_t;
1352
1353 typedef struct dmi_header
1354 {
1355         u8      type;
1356         u8      length;
1357         u16     handle;
1358 }dmi_header_t;
1359
1360 static int decode_dmi(dmi_header_t *dm, dmi_ipmi_data_t *ipmi_data)
1361 {
1362         u8              *data = (u8 *)dm;
1363         unsigned long   base_addr;
1364
1365         ipmi_data->type = data[0x04];
1366
1367         memcpy(&base_addr,&data[0x08],sizeof(unsigned long));
1368         if (base_addr & 1) {
1369                 /* I/O */
1370                 base_addr &= 0xFFFE;
1371                 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1372         }
1373         else {
1374                 /* Memory */
1375                 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
1376         }
1377
1378         ipmi_data->base_addr = base_addr;
1379         ipmi_data->irq = data[0x11];
1380
1381         if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr))
1382             return 0;
1383
1384         memset(ipmi_data,0,sizeof(dmi_ipmi_data_t));
1385
1386         return -1;
1387 }
1388
1389 static int dmi_table(u32 base, int len, int num,
1390         dmi_ipmi_data_t *ipmi_data)
1391 {
1392         u8                *buf;
1393         struct dmi_header *dm;
1394         u8                *data;
1395         int               i=1;
1396         int               status=-1;
1397
1398         buf = ioremap(base, len);
1399         if(buf==NULL)
1400                 return -1;
1401
1402         data = buf;
1403
1404         while(i<num && (data - buf) < len)
1405         {
1406                 dm=(dmi_header_t *)data;
1407
1408                 if((data-buf+dm->length) >= len)
1409                         break;
1410
1411                 if (dm->type == 38) {
1412                         if (decode_dmi(dm, ipmi_data) == 0) {
1413                                 status = 0;
1414                                 break;
1415                         }
1416                 }
1417
1418                 data+=dm->length;
1419                 while((data-buf) < len && (*data || data[1]))
1420                         data++;
1421                 data+=2;
1422                 i++;
1423         }
1424         iounmap(buf);
1425
1426         return status;
1427 }
1428
1429 inline static int dmi_checksum(u8 *buf)
1430 {
1431         u8   sum=0;
1432         int  a;
1433
1434         for(a=0; a<15; a++)
1435                 sum+=buf[a];
1436         return (sum==0);
1437 }
1438
1439 static int dmi_iterator(dmi_ipmi_data_t *ipmi_data)
1440 {
1441         u8   buf[15];
1442         u32  fp=0xF0000;
1443
1444 #ifdef CONFIG_SIMNOW
1445         return -1;
1446 #endif
1447
1448         while(fp < 0xFFFFF)
1449         {
1450                 isa_memcpy_fromio(buf, fp, 15);
1451                 if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
1452                 {
1453                         u16 num=buf[13]<<8|buf[12];
1454                         u16 len=buf[7]<<8|buf[6];
1455                         u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
1456
1457                         if(dmi_table(base, len, num, ipmi_data) == 0)
1458                                 return 0;
1459                 }
1460                 fp+=16;
1461         }
1462
1463         return -1;
1464 }
1465
1466 static int try_init_smbios(int intf_num, struct smi_info **new_info)
1467 {
1468         struct smi_info   *info;
1469         dmi_ipmi_data_t   ipmi_data;
1470         char              *io_type;
1471         int               status;
1472
1473         status = dmi_iterator(&ipmi_data);
1474
1475         if (status < 0)
1476                 return -ENODEV;
1477
1478         switch(ipmi_data.type) {
1479                 case 0x01: /* KCS */
1480                         si_type[intf_num] = "kcs";
1481                         break;
1482                 case 0x02: /* SMIC */
1483                         si_type[intf_num] = "smic";
1484                         break;
1485                 case 0x03: /* BT */
1486                         si_type[intf_num] = "bt";
1487                         break;
1488                 default:
1489                         printk("ipmi_si: Unknown SMBIOS SI type.\n");
1490                         return -EIO;
1491         }
1492
1493         info = kmalloc(sizeof(*info), GFP_KERNEL);
1494         if (!info) {
1495                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1496                 return -ENOMEM;
1497         }
1498         memset(info, 0, sizeof(*info));
1499
1500         if (ipmi_data.addr_space == 1) {
1501                 io_type = "memory";
1502                 info->io_setup = mem_setup;
1503                 info->io_cleanup = mem_cleanup;
1504                 addrs[intf_num] = ipmi_data.base_addr;
1505                 info->io.inputb = mem_inb;
1506                 info->io.outputb = mem_outb;
1507                 info->io.info = &(addrs[intf_num]);
1508         } else if (ipmi_data.addr_space == 2) {
1509                 io_type = "I/O";
1510                 info->io_setup = port_setup;
1511                 info->io_cleanup = port_cleanup;
1512                 ports[intf_num] = ipmi_data.base_addr;
1513                 info->io.inputb = port_inb;
1514                 info->io.outputb = port_outb;
1515                 info->io.info = &(ports[intf_num]);
1516         } else {
1517                 kfree(info);
1518                 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
1519                 return -EIO;
1520         }
1521
1522         irqs[intf_num] = ipmi_data.irq;
1523
1524         *new_info = info;
1525
1526         printk("ipmi_si: Found SMBIOS-specified state machine at %s"
1527                " address 0x%lx\n",
1528                io_type, (unsigned long)ipmi_data.base_addr);
1529         return 0;
1530 }
1531 #endif /* CONFIG_X86 */
1532
1533 #ifdef CONFIG_PCI
1534
1535 #define PCI_ERMC_CLASSCODE  0x0C0700
1536 #define PCI_HP_VENDOR_ID    0x103C
1537 #define PCI_MMC_DEVICE_ID   0x121A
1538 #define PCI_MMC_ADDR_CW     0x10
1539
1540 /* Avoid more than one attempt to probe pci smic. */
1541 static int pci_smic_checked = 0;
1542
1543 static int find_pci_smic(int intf_num, struct smi_info **new_info)
1544 {
1545         struct smi_info  *info;
1546         int              error;
1547         struct pci_dev   *pci_dev = NULL;
1548         u16              base_addr;
1549         int              fe_rmc = 0;
1550
1551         if (pci_smic_checked)
1552                 return -ENODEV;
1553
1554         pci_smic_checked = 1;
1555
1556         if ((pci_dev = pci_find_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID,
1557                                        NULL)))
1558                 ;
1559         else if ((pci_dev = pci_find_class(PCI_ERMC_CLASSCODE, NULL)) &&
1560                  pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)
1561                 fe_rmc = 1;
1562         else
1563                 return -ENODEV;
1564
1565         error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1566         if (error)
1567         {
1568                 printk(KERN_ERR
1569                        "ipmi_si: pci_read_config_word() failed (%d).\n",
1570                        error);
1571                 return -ENODEV;
1572         }
1573
1574         /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1575         if (!(base_addr & 0x0001))
1576         {
1577                 printk(KERN_ERR
1578                        "ipmi_si: memory mapped I/O not supported for PCI"
1579                        " smic.\n");
1580                 return -ENODEV;
1581         }
1582
1583         base_addr &= 0xFFFE;
1584         if (!fe_rmc)
1585                 /* Data register starts at base address + 1 in eRMC */
1586                 ++base_addr;
1587
1588         if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr))
1589             return -ENODEV;
1590
1591         info = kmalloc(sizeof(*info), GFP_KERNEL);
1592         if (!info) {
1593                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1594                 return -ENOMEM;
1595         }
1596         memset(info, 0, sizeof(*info));
1597
1598         info->io_setup = port_setup;
1599         info->io_cleanup = port_cleanup;
1600         ports[intf_num] = base_addr;
1601         info->io.inputb = port_inb;
1602         info->io.outputb = port_outb;
1603         info->io.info = &(ports[intf_num]);
1604
1605         *new_info = info;
1606
1607         irqs[intf_num] = pci_dev->irq;
1608         si_type[intf_num] = "smic";
1609
1610         printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
1611                 (long unsigned int) base_addr);
1612
1613         return 0;
1614 }
1615 #endif /* CONFIG_PCI */
1616
1617 static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1618 {
1619 #ifdef CONFIG_PCI
1620         if (find_pci_smic(intf_num, new_info)==0)
1621                 return 0;
1622 #endif
1623         /* Include other methods here. */
1624
1625         return -ENODEV;
1626 }
1627
1628
1629 static int try_get_dev_id(struct smi_info *smi_info)
1630 {
1631         unsigned char      msg[2];
1632         unsigned char      *resp;
1633         unsigned long      resp_len;
1634         enum si_sm_result smi_result;
1635         int               rv = 0;
1636
1637         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1638         if (!resp)
1639                 return -ENOMEM;
1640
1641         /* Do a Get Device ID command, since it comes back with some
1642            useful info. */
1643         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1644         msg[1] = IPMI_GET_DEVICE_ID_CMD;
1645         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1646
1647         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1648         for (;;)
1649         {
1650                 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1651                         set_current_state(TASK_UNINTERRUPTIBLE);
1652                         schedule_timeout(1);
1653                         smi_result = smi_info->handlers->event(
1654                                 smi_info->si_sm, 100);
1655                 }
1656                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1657                 {
1658                         smi_result = smi_info->handlers->event(
1659                                 smi_info->si_sm, 0);
1660                 }
1661                 else
1662                         break;
1663         }
1664         if (smi_result == SI_SM_HOSED) {
1665                 /* We couldn't get the state machine to run, so whatever's at
1666                    the port is probably not an IPMI SMI interface. */
1667                 rv = -ENODEV;
1668                 goto out;
1669         }
1670
1671         /* Otherwise, we got some data. */
1672         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1673                                                   resp, IPMI_MAX_MSG_LENGTH);
1674         if (resp_len < 6) {
1675                 /* That's odd, it should be longer. */
1676                 rv = -EINVAL;
1677                 goto out;
1678         }
1679
1680         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1681                 /* That's odd, it shouldn't be able to fail. */
1682                 rv = -EINVAL;
1683                 goto out;
1684         }
1685
1686         /* Record info from the get device id, in case we need it. */
1687         smi_info->ipmi_si_dev_rev = resp[4] & 0xf;
1688         smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f;
1689         smi_info->ipmi_si_fw_rev_minor = resp[6];
1690         smi_info->ipmi_version_major = resp[7] & 0xf;
1691         smi_info->ipmi_version_minor = resp[7] >> 4;
1692
1693  out:
1694         kfree(resp);
1695         return rv;
1696 }
1697
1698 static int type_file_read_proc(char *page, char **start, off_t off,
1699                                int count, int *eof, void *data)
1700 {
1701         char            *out = (char *) page;
1702         struct smi_info *smi = data;
1703
1704         switch (smi->si_type) {
1705             case SI_KCS:
1706                 return sprintf(out, "kcs\n");
1707             case SI_SMIC:
1708                 return sprintf(out, "smic\n");
1709             case SI_BT:
1710                 return sprintf(out, "bt\n");
1711             default:
1712                 return 0;
1713         }
1714 }
1715
1716 static int stat_file_read_proc(char *page, char **start, off_t off,
1717                                int count, int *eof, void *data)
1718 {
1719         char            *out = (char *) page;
1720         struct smi_info *smi = data;
1721
1722         out += sprintf(out, "interrupts_enabled:    %d\n",
1723                        smi->irq && !smi->interrupt_disabled);
1724         out += sprintf(out, "short_timeouts:        %ld\n",
1725                        smi->short_timeouts);
1726         out += sprintf(out, "long_timeouts:         %ld\n",
1727                        smi->long_timeouts);
1728         out += sprintf(out, "timeout_restarts:      %ld\n",
1729                        smi->timeout_restarts);
1730         out += sprintf(out, "idles:                 %ld\n",
1731                        smi->idles);
1732         out += sprintf(out, "interrupts:            %ld\n",
1733                        smi->interrupts);
1734         out += sprintf(out, "attentions:            %ld\n",
1735                        smi->attentions);
1736         out += sprintf(out, "flag_fetches:          %ld\n",
1737                        smi->flag_fetches);
1738         out += sprintf(out, "hosed_count:           %ld\n",
1739                        smi->hosed_count);
1740         out += sprintf(out, "complete_transactions: %ld\n",
1741                        smi->complete_transactions);
1742         out += sprintf(out, "events:                %ld\n",
1743                        smi->events);
1744         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
1745                        smi->watchdog_pretimeouts);
1746         out += sprintf(out, "incoming_messages:     %ld\n",
1747                        smi->incoming_messages);
1748
1749         return (out - ((char *) page));
1750 }
1751
1752 /* Returns 0 if initialized, or negative on an error. */
1753 static int init_one_smi(int intf_num, struct smi_info **smi)
1754 {
1755         int             rv;
1756         struct smi_info *new_smi;
1757
1758
1759         rv = try_init_mem(intf_num, &new_smi);
1760         if (rv)
1761                 rv = try_init_port(intf_num, &new_smi);
1762 #ifdef CONFIG_ACPI_INTERPRETER
1763         if ((rv) && (si_trydefaults)) {
1764                 rv = try_init_acpi(intf_num, &new_smi);
1765         }
1766 #endif
1767 #ifdef CONFIG_X86
1768         if ((rv) && (si_trydefaults)) {
1769                 rv = try_init_smbios(intf_num, &new_smi);
1770         }
1771 #endif
1772         if ((rv) && (si_trydefaults)) {
1773                 rv = try_init_plug_and_play(intf_num, &new_smi);
1774         }
1775
1776
1777         if (rv)
1778                 return rv;
1779
1780         /* So we know not to free it unless we have allocated one. */
1781         new_smi->intf = NULL;
1782         new_smi->si_sm = NULL;
1783         new_smi->handlers = NULL;
1784
1785         if (!new_smi->irq_setup) {
1786                 new_smi->irq = irqs[intf_num];
1787                 new_smi->irq_setup = std_irq_setup;
1788                 new_smi->irq_cleanup = std_irq_cleanup;
1789         }
1790
1791         /* Default to KCS if no type is specified. */
1792         if (si_type[intf_num] == NULL) {
1793                 if (si_trydefaults)
1794                         si_type[intf_num] = "kcs";
1795                 else {
1796                         rv = -EINVAL;
1797                         goto out_err;
1798                 }
1799         }
1800
1801         /* Set up the state machine to use. */
1802         if (strcmp(si_type[intf_num], "kcs") == 0) {
1803                 new_smi->handlers = &kcs_smi_handlers;
1804                 new_smi->si_type = SI_KCS;
1805         } else if (strcmp(si_type[intf_num], "smic") == 0) {
1806                 new_smi->handlers = &smic_smi_handlers;
1807                 new_smi->si_type = SI_SMIC;
1808         } else if (strcmp(si_type[intf_num], "bt") == 0) {
1809                 new_smi->handlers = &bt_smi_handlers;
1810                 new_smi->si_type = SI_BT;
1811         } else {
1812                 /* No support for anything else yet. */
1813                 rv = -EIO;
1814                 goto out_err;
1815         }
1816
1817         /* Allocate the state machine's data and initialize it. */
1818         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
1819         if (!new_smi->si_sm) {
1820                 printk(" Could not allocate state machine memory\n");
1821                 rv = -ENOMEM;
1822                 goto out_err;
1823         }
1824         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
1825                                                         &new_smi->io);
1826
1827         /* Now that we know the I/O size, we can set up the I/O. */
1828         rv = new_smi->io_setup(new_smi);
1829         if (rv) {
1830                 printk(" Could not set up I/O space\n");
1831                 goto out_err;
1832         }
1833
1834         spin_lock_init(&(new_smi->si_lock));
1835         spin_lock_init(&(new_smi->msg_lock));
1836         spin_lock_init(&(new_smi->count_lock));
1837
1838         /* Do low-level detection first. */
1839         if (new_smi->handlers->detect(new_smi->si_sm)) {
1840                 rv = -ENODEV;
1841                 goto out_err;
1842         }
1843
1844         /* Attempt a get device id command.  If it fails, we probably
1845            don't have a SMI here. */
1846         rv = try_get_dev_id(new_smi);
1847         if (rv)
1848                 goto out_err;
1849
1850         /* Try to claim any interrupts. */
1851         new_smi->irq_setup(new_smi);
1852
1853         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
1854         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
1855         new_smi->curr_msg = NULL;
1856         atomic_set(&new_smi->req_events, 0);
1857         new_smi->run_to_completion = 0;
1858
1859         new_smi->interrupt_disabled = 0;
1860         new_smi->timer_stopped = 0;
1861         new_smi->stop_operation = 0;
1862
1863         /* The ipmi_register_smi() code does some operations to
1864            determine the channel information, so we must be ready to
1865            handle operations before it is called.  This means we have
1866            to stop the timer if we get an error after this point. */
1867         init_timer(&(new_smi->si_timer));
1868         new_smi->si_timer.data = (long) new_smi;
1869         new_smi->si_timer.function = smi_timeout;
1870         new_smi->last_timeout_jiffies = jiffies;
1871         new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
1872         add_timer(&(new_smi->si_timer));
1873
1874         rv = ipmi_register_smi(&handlers,
1875                                new_smi,
1876                                new_smi->ipmi_version_major,
1877                                new_smi->ipmi_version_minor,
1878                                &(new_smi->intf));
1879         if (rv) {
1880                 printk(KERN_ERR
1881                        "ipmi_si: Unable to register device: error %d\n",
1882                        rv);
1883                 goto out_err_stop_timer;
1884         }
1885
1886         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
1887                                      type_file_read_proc, NULL,
1888                                      new_smi, THIS_MODULE);
1889         if (rv) {
1890                 printk(KERN_ERR
1891                        "ipmi_si: Unable to create proc entry: %d\n",
1892                        rv);
1893                 goto out_err_stop_timer;
1894         }
1895
1896         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
1897                                      stat_file_read_proc, NULL,
1898                                      new_smi, THIS_MODULE);
1899         if (rv) {
1900                 printk(KERN_ERR
1901                        "ipmi_si: Unable to create proc entry: %d\n",
1902                        rv);
1903                 goto out_err_stop_timer;
1904         }
1905
1906         start_clear_flags(new_smi);
1907
1908         /* IRQ is defined to be set when non-zero. */
1909         if (new_smi->irq)
1910                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
1911
1912         *smi = new_smi;
1913
1914         printk(" IPMI %s interface initialized\n", si_type[intf_num]);
1915
1916         return 0;
1917
1918  out_err_stop_timer:
1919         new_smi->stop_operation = 1;
1920
1921         /* Wait for the timer to stop.  This avoids problems with race
1922            conditions removing the timer here. */
1923         while (!new_smi->timer_stopped) {
1924                 set_current_state(TASK_UNINTERRUPTIBLE);
1925                 schedule_timeout(1);
1926         }
1927
1928  out_err:
1929         if (new_smi->intf)
1930                 ipmi_unregister_smi(new_smi->intf);
1931
1932         new_smi->irq_cleanup(new_smi);
1933
1934         /* Wait until we know that we are out of any interrupt
1935            handlers might have been running before we freed the
1936            interrupt. */
1937         synchronize_kernel();
1938
1939         if (new_smi->si_sm) {
1940                 if (new_smi->handlers)
1941                         new_smi->handlers->cleanup(new_smi->si_sm);
1942                 kfree(new_smi->si_sm);
1943         }
1944         new_smi->io_cleanup(new_smi);
1945
1946         return rv;
1947 }
1948
1949 static __init int init_ipmi_si(void)
1950 {
1951         int  rv = 0;
1952         int  pos = 0;
1953         int  i;
1954         char *str;
1955
1956         if (initialized)
1957                 return 0;
1958         initialized = 1;
1959
1960         /* Parse out the si_type string into its components. */
1961         str = si_type_str;
1962         if (*str != '\0') {
1963                 for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) {
1964                         si_type[i] = str;
1965                         str = strchr(str, ',');
1966                         if (str) {
1967                                 *str = '\0';
1968                                 str++;
1969                         } else {
1970                                 break;
1971                         }
1972                 }
1973         }
1974
1975         printk(KERN_INFO "IPMI System Interface driver version "
1976                IPMI_SI_VERSION);
1977         if (kcs_smi_handlers.version)
1978                 printk(", KCS version %s", kcs_smi_handlers.version);
1979         if (smic_smi_handlers.version)
1980                 printk(", SMIC version %s", smic_smi_handlers.version);
1981         if (bt_smi_handlers.version)
1982                 printk(", BT version %s", bt_smi_handlers.version);
1983         printk("\n");
1984
1985         rv = init_one_smi(0, &(smi_infos[pos]));
1986         if (rv && !ports[0] && si_trydefaults) {
1987                 /* If we are trying defaults and the initial port is
1988                    not set, then set it. */
1989                 si_type[0] = "kcs";
1990                 ports[0] = DEFAULT_KCS_IO_PORT;
1991                 rv = init_one_smi(0, &(smi_infos[pos]));
1992                 if (rv) {
1993                         /* No KCS - try SMIC */
1994                         si_type[0] = "smic";
1995                         ports[0] = DEFAULT_SMIC_IO_PORT;
1996                         rv = init_one_smi(0, &(smi_infos[pos]));
1997                 }
1998                 if (rv) {
1999                         /* No SMIC - try BT */
2000                         si_type[0] = "bt";
2001                         ports[0] = DEFAULT_BT_IO_PORT;
2002                         rv = init_one_smi(0, &(smi_infos[pos]));
2003                 }
2004         }
2005         if (rv == 0)
2006                 pos++;
2007
2008         for (i=1; i < SI_MAX_PARMS; i++) {
2009                 rv = init_one_smi(i, &(smi_infos[pos]));
2010                 if (rv == 0)
2011                         pos++;
2012         }
2013
2014         if (smi_infos[0] == NULL) {
2015                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2016                 return -ENODEV;
2017         }
2018
2019         return 0;
2020 }
2021 module_init(init_ipmi_si);
2022
2023 void __exit cleanup_one_si(struct smi_info *to_clean)
2024 {
2025         int           rv;
2026         unsigned long flags;
2027
2028         if (! to_clean)
2029                 return;
2030
2031         /* Tell the timer and interrupt handlers that we are shutting
2032            down. */
2033         spin_lock_irqsave(&(to_clean->si_lock), flags);
2034         spin_lock(&(to_clean->msg_lock));
2035
2036         to_clean->stop_operation = 1;
2037
2038         to_clean->irq_cleanup(to_clean);
2039
2040         spin_unlock(&(to_clean->msg_lock));
2041         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2042
2043         /* Wait until we know that we are out of any interrupt
2044            handlers might have been running before we freed the
2045            interrupt. */
2046         synchronize_kernel();
2047
2048         /* Wait for the timer to stop.  This avoids problems with race
2049            conditions removing the timer here. */
2050         while (!to_clean->timer_stopped) {
2051                 set_current_state(TASK_UNINTERRUPTIBLE);
2052                 schedule_timeout(1);
2053         }
2054
2055         rv = ipmi_unregister_smi(to_clean->intf);
2056         if (rv) {
2057                 printk(KERN_ERR
2058                        "ipmi_si: Unable to unregister device: errno=%d\n",
2059                        rv);
2060         }
2061
2062         to_clean->handlers->cleanup(to_clean->si_sm);
2063
2064         kfree(to_clean->si_sm);
2065
2066         to_clean->io_cleanup(to_clean);
2067 }
2068
2069 static __exit void cleanup_ipmi_si(void)
2070 {
2071         int i;
2072
2073         if (!initialized)
2074                 return;
2075
2076         for (i=0; i<SI_MAX_DRIVERS; i++) {
2077                 cleanup_one_si(smi_infos[i]);
2078         }
2079 }
2080 module_exit(cleanup_ipmi_si);
2081
2082 MODULE_LICENSE("GPL");