upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <asm/irq.h>
55 #ifdef CONFIG_HIGH_RES_TIMERS
56 #include <linux/hrtime.h>
57 # if defined(schedule_next_int)
58 /* Old high-res timer code, do translations. */
59 #  define get_arch_cycles(a) quick_update_jiffies_sub(a)
60 #  define arch_cycles_per_jiffy cycles_per_jiffies
61 # endif
62 static inline void add_usec_to_timer(struct timer_list *t, long v)
63 {
64         t->sub_expires += nsec_to_arch_cycle(v * 1000);
65         while (t->sub_expires >= arch_cycles_per_jiffy)
66         {
67                 t->expires++;
68                 t->sub_expires -= arch_cycles_per_jiffy;
69         }
70 }
71 #endif
72 #include <linux/interrupt.h>
73 #include <linux/rcupdate.h>
74 #include <linux/ipmi_smi.h>
75 #include <asm/io.h>
76 #include "ipmi_si_sm.h"
77 #include <linux/init.h>
78
79 #define IPMI_SI_VERSION "v33"
80
81 /* Measure times between events in the driver. */
82 #undef DEBUG_TIMING
83
84 /* Call every 10 ms. */
85 #define SI_TIMEOUT_TIME_USEC    10000
86 #define SI_USEC_PER_JIFFY       (1000000/HZ)
87 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
88 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
89                                        short timeout */
90
91 enum si_intf_state {
92         SI_NORMAL,
93         SI_GETTING_FLAGS,
94         SI_GETTING_EVENTS,
95         SI_CLEARING_FLAGS,
96         SI_CLEARING_FLAGS_THEN_SET_IRQ,
97         SI_GETTING_MESSAGES,
98         SI_ENABLE_INTERRUPTS1,
99         SI_ENABLE_INTERRUPTS2
100         /* FIXME - add watchdog stuff. */
101 };
102
103 enum si_type {
104     SI_KCS, SI_SMIC, SI_BT
105 };
106
107 struct smi_info
108 {
109         ipmi_smi_t             intf;
110         struct si_sm_data      *si_sm;
111         struct si_sm_handlers  *handlers;
112         enum si_type           si_type;
113         spinlock_t             si_lock;
114         spinlock_t             msg_lock;
115         struct list_head       xmit_msgs;
116         struct list_head       hp_xmit_msgs;
117         struct ipmi_smi_msg    *curr_msg;
118         enum si_intf_state     si_state;
119
120         /* Used to handle the various types of I/O that can occur with
121            IPMI */
122         struct si_sm_io io;
123         int (*io_setup)(struct smi_info *info);
124         void (*io_cleanup)(struct smi_info *info);
125         int (*irq_setup)(struct smi_info *info);
126         void (*irq_cleanup)(struct smi_info *info);
127         unsigned int io_size;
128
129         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
130            is set to hold the flags until we are done handling everything
131            from the flags. */
132 #define RECEIVE_MSG_AVAIL       0x01
133 #define EVENT_MSG_BUFFER_FULL   0x02
134 #define WDT_PRE_TIMEOUT_INT     0x08
135         unsigned char       msg_flags;
136
137         /* If set to true, this will request events the next time the
138            state machine is idle. */
139         atomic_t            req_events;
140
141         /* If true, run the state machine to completion on every send
142            call.  Generally used after a panic to make sure stuff goes
143            out. */
144         int                 run_to_completion;
145
146         /* The I/O port of an SI interface. */
147         int                 port;
148
149         /* The space between start addresses of the two ports.  For
150            instance, if the first port is 0xca2 and the spacing is 4, then
151            the second port is 0xca6. */
152         unsigned int        spacing;
153
154         /* zero if no irq; */
155         int                 irq;
156
157         /* The timer for this si. */
158         struct timer_list   si_timer;
159
160         /* The time (in jiffies) the last timeout occurred at. */
161         unsigned long       last_timeout_jiffies;
162
163         /* Used to gracefully stop the timer without race conditions. */
164         volatile int        stop_operation;
165         volatile int        timer_stopped;
166
167         /* The driver will disable interrupts when it gets into a
168            situation where it cannot handle messages due to lack of
169            memory.  Once that situation clears up, it will re-enable
170            interrupts. */
171         int interrupt_disabled;
172
173         unsigned char ipmi_si_dev_rev;
174         unsigned char ipmi_si_fw_rev_major;
175         unsigned char ipmi_si_fw_rev_minor;
176         unsigned char ipmi_version_major;
177         unsigned char ipmi_version_minor;
178
179         /* Counters and things for the proc filesystem. */
180         spinlock_t count_lock;
181         unsigned long short_timeouts;
182         unsigned long long_timeouts;
183         unsigned long timeout_restarts;
184         unsigned long idles;
185         unsigned long interrupts;
186         unsigned long attentions;
187         unsigned long flag_fetches;
188         unsigned long hosed_count;
189         unsigned long complete_transactions;
190         unsigned long events;
191         unsigned long watchdog_pretimeouts;
192         unsigned long incoming_messages;
193 };
194
195 static void si_restart_short_timer(struct smi_info *smi_info);
196
197 static void deliver_recv_msg(struct smi_info *smi_info,
198                              struct ipmi_smi_msg *msg)
199 {
200         /* Deliver the message to the upper layer with the lock
201            released. */
202         spin_unlock(&(smi_info->si_lock));
203         ipmi_smi_msg_received(smi_info->intf, msg);
204         spin_lock(&(smi_info->si_lock));
205 }
206
207 static void return_hosed_msg(struct smi_info *smi_info)
208 {
209         struct ipmi_smi_msg *msg = smi_info->curr_msg;
210
211         /* Make it a reponse */
212         msg->rsp[0] = msg->data[0] | 4;
213         msg->rsp[1] = msg->data[1];
214         msg->rsp[2] = 0xFF; /* Unknown error. */
215         msg->rsp_size = 3;
216
217         smi_info->curr_msg = NULL;
218         deliver_recv_msg(smi_info, msg);
219 }
220
221 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
222 {
223         int              rv;
224         struct list_head *entry = NULL;
225 #ifdef DEBUG_TIMING
226         struct timeval t;
227 #endif
228
229         /* No need to save flags, we aleady have interrupts off and we
230            already hold the SMI lock. */
231         spin_lock(&(smi_info->msg_lock));
232
233         /* Pick the high priority queue first. */
234         if (! list_empty(&(smi_info->hp_xmit_msgs))) {
235                 entry = smi_info->hp_xmit_msgs.next;
236         } else if (! list_empty(&(smi_info->xmit_msgs))) {
237                 entry = smi_info->xmit_msgs.next;
238         }
239
240         if (!entry) {
241                 smi_info->curr_msg = NULL;
242                 rv = SI_SM_IDLE;
243         } else {
244                 int err;
245
246                 list_del(entry);
247                 smi_info->curr_msg = list_entry(entry,
248                                                 struct ipmi_smi_msg,
249                                                 link);
250 #ifdef DEBUG_TIMING
251                 do_gettimeofday(&t);
252                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
253 #endif
254                 err = smi_info->handlers->start_transaction(
255                         smi_info->si_sm,
256                         smi_info->curr_msg->data,
257                         smi_info->curr_msg->data_size);
258                 if (err) {
259                         return_hosed_msg(smi_info);
260                 }
261
262                 rv = SI_SM_CALL_WITHOUT_DELAY;
263         }
264         spin_unlock(&(smi_info->msg_lock));
265
266         return rv;
267 }
268
269 static void start_enable_irq(struct smi_info *smi_info)
270 {
271         unsigned char msg[2];
272
273         /* If we are enabling interrupts, we have to tell the
274            BMC to use them. */
275         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
276         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
277
278         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
279         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
280 }
281
282 static void start_clear_flags(struct smi_info *smi_info)
283 {
284         unsigned char msg[3];
285
286         /* Make sure the watchdog pre-timeout flag is not set at startup. */
287         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
288         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
289         msg[2] = WDT_PRE_TIMEOUT_INT;
290
291         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
292         smi_info->si_state = SI_CLEARING_FLAGS;
293 }
294
295 /* When we have a situtaion where we run out of memory and cannot
296    allocate messages, we just leave them in the BMC and run the system
297    polled until we can allocate some memory.  Once we have some
298    memory, we will re-enable the interrupt. */
299 static inline void disable_si_irq(struct smi_info *smi_info)
300 {
301         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
302                 disable_irq_nosync(smi_info->irq);
303                 smi_info->interrupt_disabled = 1;
304         }
305 }
306
307 static inline void enable_si_irq(struct smi_info *smi_info)
308 {
309         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
310                 enable_irq(smi_info->irq);
311                 smi_info->interrupt_disabled = 0;
312         }
313 }
314
315 static void handle_flags(struct smi_info *smi_info)
316 {
317         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
318                 /* Watchdog pre-timeout */
319                 spin_lock(&smi_info->count_lock);
320                 smi_info->watchdog_pretimeouts++;
321                 spin_unlock(&smi_info->count_lock);
322
323                 start_clear_flags(smi_info);
324                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
325                 spin_unlock(&(smi_info->si_lock));
326                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
327                 spin_lock(&(smi_info->si_lock));
328         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
329                 /* Messages available. */
330                 smi_info->curr_msg = ipmi_alloc_smi_msg();
331                 if (!smi_info->curr_msg) {
332                         disable_si_irq(smi_info);
333                         smi_info->si_state = SI_NORMAL;
334                         return;
335                 }
336                 enable_si_irq(smi_info);
337
338                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
339                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
340                 smi_info->curr_msg->data_size = 2;
341
342                 smi_info->handlers->start_transaction(
343                         smi_info->si_sm,
344                         smi_info->curr_msg->data,
345                         smi_info->curr_msg->data_size);
346                 smi_info->si_state = SI_GETTING_MESSAGES;
347         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
348                 /* Events available. */
349                 smi_info->curr_msg = ipmi_alloc_smi_msg();
350                 if (!smi_info->curr_msg) {
351                         disable_si_irq(smi_info);
352                         smi_info->si_state = SI_NORMAL;
353                         return;
354                 }
355                 enable_si_irq(smi_info);
356
357                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
358                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
359                 smi_info->curr_msg->data_size = 2;
360
361                 smi_info->handlers->start_transaction(
362                         smi_info->si_sm,
363                         smi_info->curr_msg->data,
364                         smi_info->curr_msg->data_size);
365                 smi_info->si_state = SI_GETTING_EVENTS;
366         } else {
367                 smi_info->si_state = SI_NORMAL;
368         }
369 }
370
371 static void handle_transaction_done(struct smi_info *smi_info)
372 {
373         struct ipmi_smi_msg *msg;
374 #ifdef DEBUG_TIMING
375         struct timeval t;
376
377         do_gettimeofday(&t);
378         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
379 #endif
380         switch (smi_info->si_state) {
381         case SI_NORMAL:
382                 if (!smi_info->curr_msg)
383                         break;
384
385                 smi_info->curr_msg->rsp_size
386                         = smi_info->handlers->get_result(
387                                 smi_info->si_sm,
388                                 smi_info->curr_msg->rsp,
389                                 IPMI_MAX_MSG_LENGTH);
390
391                 /* Do this here becase deliver_recv_msg() releases the
392                    lock, and a new message can be put in during the
393                    time the lock is released. */
394                 msg = smi_info->curr_msg;
395                 smi_info->curr_msg = NULL;
396                 deliver_recv_msg(smi_info, msg);
397                 break;
398
399         case SI_GETTING_FLAGS:
400         {
401                 unsigned char msg[4];
402                 unsigned int  len;
403
404                 /* We got the flags from the SMI, now handle them. */
405                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
406                 if (msg[2] != 0) {
407                         /* Error fetching flags, just give up for
408                            now. */
409                         smi_info->si_state = SI_NORMAL;
410                 } else if (len < 3) {
411                         /* Hmm, no flags.  That's technically illegal, but
412                            don't use uninitialized data. */
413                         smi_info->si_state = SI_NORMAL;
414                 } else {
415                         smi_info->msg_flags = msg[3];
416                         handle_flags(smi_info);
417                 }
418                 break;
419         }
420
421         case SI_CLEARING_FLAGS:
422         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
423         {
424                 unsigned char msg[3];
425
426                 /* We cleared the flags. */
427                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
428                 if (msg[2] != 0) {
429                         /* Error clearing flags */
430                         printk(KERN_WARNING
431                                "ipmi_si: Error clearing flags: %2.2x\n",
432                                msg[2]);
433                 }
434                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
435                         start_enable_irq(smi_info);
436                 else
437                         smi_info->si_state = SI_NORMAL;
438                 break;
439         }
440
441         case SI_GETTING_EVENTS:
442         {
443                 smi_info->curr_msg->rsp_size
444                         = smi_info->handlers->get_result(
445                                 smi_info->si_sm,
446                                 smi_info->curr_msg->rsp,
447                                 IPMI_MAX_MSG_LENGTH);
448
449                 /* Do this here becase deliver_recv_msg() releases the
450                    lock, and a new message can be put in during the
451                    time the lock is released. */
452                 msg = smi_info->curr_msg;
453                 smi_info->curr_msg = NULL;
454                 if (msg->rsp[2] != 0) {
455                         /* Error getting event, probably done. */
456                         msg->done(msg);
457
458                         /* Take off the event flag. */
459                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
460                         handle_flags(smi_info);
461                 } else {
462                         spin_lock(&smi_info->count_lock);
463                         smi_info->events++;
464                         spin_unlock(&smi_info->count_lock);
465
466                         /* Do this before we deliver the message
467                            because delivering the message releases the
468                            lock and something else can mess with the
469                            state. */
470                         handle_flags(smi_info);
471
472                         deliver_recv_msg(smi_info, msg);
473                 }
474                 break;
475         }
476
477         case SI_GETTING_MESSAGES:
478         {
479                 smi_info->curr_msg->rsp_size
480                         = smi_info->handlers->get_result(
481                                 smi_info->si_sm,
482                                 smi_info->curr_msg->rsp,
483                                 IPMI_MAX_MSG_LENGTH);
484
485                 /* Do this here becase deliver_recv_msg() releases the
486                    lock, and a new message can be put in during the
487                    time the lock is released. */
488                 msg = smi_info->curr_msg;
489                 smi_info->curr_msg = NULL;
490                 if (msg->rsp[2] != 0) {
491                         /* Error getting event, probably done. */
492                         msg->done(msg);
493
494                         /* Take off the msg flag. */
495                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
496                         handle_flags(smi_info);
497                 } else {
498                         spin_lock(&smi_info->count_lock);
499                         smi_info->incoming_messages++;
500                         spin_unlock(&smi_info->count_lock);
501
502                         /* Do this before we deliver the message
503                            because delivering the message releases the
504                            lock and something else can mess with the
505                            state. */
506                         handle_flags(smi_info);
507
508                         deliver_recv_msg(smi_info, msg);
509                 }
510                 break;
511         }
512
513         case SI_ENABLE_INTERRUPTS1:
514         {
515                 unsigned char msg[4];
516
517                 /* We got the flags from the SMI, now handle them. */
518                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
519                 if (msg[2] != 0) {
520                         printk(KERN_WARNING
521                                "ipmi_si: Could not enable interrupts"
522                                ", failed get, using polled mode.\n");
523                         smi_info->si_state = SI_NORMAL;
524                 } else {
525                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
526                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
527                         msg[2] = msg[3] | 1; /* enable msg queue int */
528                         smi_info->handlers->start_transaction(
529                                 smi_info->si_sm, msg, 3);
530                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
531                 }
532                 break;
533         }
534
535         case SI_ENABLE_INTERRUPTS2:
536         {
537                 unsigned char msg[4];
538
539                 /* We got the flags from the SMI, now handle them. */
540                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
541                 if (msg[2] != 0) {
542                         printk(KERN_WARNING
543                                "ipmi_si: Could not enable interrupts"
544                                ", failed set, using polled mode.\n");
545                 }
546                 smi_info->si_state = SI_NORMAL;
547                 break;
548         }
549         }
550 }
551
552 /* Called on timeouts and events.  Timeouts should pass the elapsed
553    time, interrupts should pass in zero. */
554 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
555                                            int time)
556 {
557         enum si_sm_result si_sm_result;
558
559  restart:
560         /* There used to be a loop here that waited a little while
561            (around 25us) before giving up.  That turned out to be
562            pointless, the minimum delays I was seeing were in the 300us
563            range, which is far too long to wait in an interrupt.  So
564            we just run until the state machine tells us something
565            happened or it needs a delay. */
566         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
567         time = 0;
568         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
569         {
570                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
571         }
572
573         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
574         {
575                 spin_lock(&smi_info->count_lock);
576                 smi_info->complete_transactions++;
577                 spin_unlock(&smi_info->count_lock);
578
579                 handle_transaction_done(smi_info);
580                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
581         }
582         else if (si_sm_result == SI_SM_HOSED)
583         {
584                 spin_lock(&smi_info->count_lock);
585                 smi_info->hosed_count++;
586                 spin_unlock(&smi_info->count_lock);
587
588                 /* Do the before return_hosed_msg, because that
589                    releases the lock. */
590                 smi_info->si_state = SI_NORMAL;
591                 if (smi_info->curr_msg != NULL) {
592                         /* If we were handling a user message, format
593                            a response to send to the upper layer to
594                            tell it about the error. */
595                         return_hosed_msg(smi_info);
596                 }
597                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
598         }
599
600         /* We prefer handling attn over new messages. */
601         if (si_sm_result == SI_SM_ATTN)
602         {
603                 unsigned char msg[2];
604
605                 spin_lock(&smi_info->count_lock);
606                 smi_info->attentions++;
607                 spin_unlock(&smi_info->count_lock);
608
609                 /* Got a attn, send down a get message flags to see
610                    what's causing it.  It would be better to handle
611                    this in the upper layer, but due to the way
612                    interrupts work with the SMI, that's not really
613                    possible. */
614                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
615                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
616
617                 smi_info->handlers->start_transaction(
618                         smi_info->si_sm, msg, 2);
619                 smi_info->si_state = SI_GETTING_FLAGS;
620                 goto restart;
621         }
622
623         /* If we are currently idle, try to start the next message. */
624         if (si_sm_result == SI_SM_IDLE) {
625                 spin_lock(&smi_info->count_lock);
626                 smi_info->idles++;
627                 spin_unlock(&smi_info->count_lock);
628
629                 si_sm_result = start_next_msg(smi_info);
630                 if (si_sm_result != SI_SM_IDLE)
631                         goto restart;
632         }
633
634         if ((si_sm_result == SI_SM_IDLE)
635             && (atomic_read(&smi_info->req_events)))
636         {
637                 /* We are idle and the upper layer requested that I fetch
638                    events, so do so. */
639                 unsigned char msg[2];
640
641                 spin_lock(&smi_info->count_lock);
642                 smi_info->flag_fetches++;
643                 spin_unlock(&smi_info->count_lock);
644
645                 atomic_set(&smi_info->req_events, 0);
646                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
647                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
648
649                 smi_info->handlers->start_transaction(
650                         smi_info->si_sm, msg, 2);
651                 smi_info->si_state = SI_GETTING_FLAGS;
652                 goto restart;
653         }
654
655         return si_sm_result;
656 }
657
658 static void sender(void                *send_info,
659                    struct ipmi_smi_msg *msg,
660                    int                 priority)
661 {
662         struct smi_info   *smi_info = send_info;
663         enum si_sm_result result;
664         unsigned long     flags;
665 #ifdef DEBUG_TIMING
666         struct timeval    t;
667 #endif
668
669         spin_lock_irqsave(&(smi_info->msg_lock), flags);
670 #ifdef DEBUG_TIMING
671         do_gettimeofday(&t);
672         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
673 #endif
674
675         if (smi_info->run_to_completion) {
676                 /* If we are running to completion, then throw it in
677                    the list and run transactions until everything is
678                    clear.  Priority doesn't matter here. */
679                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
680
681                 /* We have to release the msg lock and claim the smi
682                    lock in this case, because of race conditions. */
683                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
684
685                 spin_lock_irqsave(&(smi_info->si_lock), flags);
686                 result = smi_event_handler(smi_info, 0);
687                 while (result != SI_SM_IDLE) {
688                         udelay(SI_SHORT_TIMEOUT_USEC);
689                         result = smi_event_handler(smi_info,
690                                                    SI_SHORT_TIMEOUT_USEC);
691                 }
692                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
693                 return;
694         } else {
695                 if (priority > 0) {
696                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
697                 } else {
698                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
699                 }
700         }
701         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
702
703         spin_lock_irqsave(&(smi_info->si_lock), flags);
704         if ((smi_info->si_state == SI_NORMAL)
705             && (smi_info->curr_msg == NULL))
706         {
707                 start_next_msg(smi_info);
708                 si_restart_short_timer(smi_info);
709         }
710         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
711 }
712
713 static void set_run_to_completion(void *send_info, int i_run_to_completion)
714 {
715         struct smi_info   *smi_info = send_info;
716         enum si_sm_result result;
717         unsigned long     flags;
718
719         spin_lock_irqsave(&(smi_info->si_lock), flags);
720
721         smi_info->run_to_completion = i_run_to_completion;
722         if (i_run_to_completion) {
723                 result = smi_event_handler(smi_info, 0);
724                 while (result != SI_SM_IDLE) {
725                         udelay(SI_SHORT_TIMEOUT_USEC);
726                         result = smi_event_handler(smi_info,
727                                                    SI_SHORT_TIMEOUT_USEC);
728                 }
729         }
730
731         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
732 }
733
734 static void poll(void *send_info)
735 {
736         struct smi_info *smi_info = send_info;
737
738         smi_event_handler(smi_info, 0);
739 }
740
741 static void request_events(void *send_info)
742 {
743         struct smi_info *smi_info = send_info;
744
745         atomic_set(&smi_info->req_events, 1);
746 }
747
748 static int initialized = 0;
749
750 /* Must be called with interrupts off and with the si_lock held. */
751 static void si_restart_short_timer(struct smi_info *smi_info)
752 {
753 #if defined(CONFIG_HIGH_RES_TIMERS)
754         unsigned long flags;
755         unsigned long jiffies_now;
756
757         if (del_timer(&(smi_info->si_timer))) {
758                 /* If we don't delete the timer, then it will go off
759                    immediately, anyway.  So we only process if we
760                    actually delete the timer. */
761
762                 /* We already have irqsave on, so no need for it
763                    here. */
764                 read_lock(&xtime_lock);
765                 jiffies_now = jiffies;
766                 smi_info->si_timer.expires = jiffies_now;
767                 smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now);
768
769                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
770
771                 add_timer(&(smi_info->si_timer));
772                 spin_lock_irqsave(&smi_info->count_lock, flags);
773                 smi_info->timeout_restarts++;
774                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
775         }
776 #endif
777 }
778
779 static void smi_timeout(unsigned long data)
780 {
781         struct smi_info   *smi_info = (struct smi_info *) data;
782         enum si_sm_result smi_result;
783         unsigned long     flags;
784         unsigned long     jiffies_now;
785         unsigned long     time_diff;
786 #ifdef DEBUG_TIMING
787         struct timeval    t;
788 #endif
789
790         if (smi_info->stop_operation) {
791                 smi_info->timer_stopped = 1;
792                 return;
793         }
794
795         spin_lock_irqsave(&(smi_info->si_lock), flags);
796 #ifdef DEBUG_TIMING
797         do_gettimeofday(&t);
798         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
799 #endif
800         jiffies_now = jiffies;
801         time_diff = ((jiffies_now - smi_info->last_timeout_jiffies)
802                      * SI_USEC_PER_JIFFY);
803         smi_result = smi_event_handler(smi_info, time_diff);
804
805         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
806
807         smi_info->last_timeout_jiffies = jiffies_now;
808
809         if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
810                 /* Running with interrupts, only do long timeouts. */
811                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
812                 spin_lock_irqsave(&smi_info->count_lock, flags);
813                 smi_info->long_timeouts++;
814                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
815                 goto do_add_timer;
816         }
817
818         /* If the state machine asks for a short delay, then shorten
819            the timer timeout. */
820         if (smi_result == SI_SM_CALL_WITH_DELAY) {
821                 spin_lock_irqsave(&smi_info->count_lock, flags);
822                 smi_info->short_timeouts++;
823                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
824 #if defined(CONFIG_HIGH_RES_TIMERS)
825                 read_lock(&xtime_lock);
826                 smi_info->si_timer.expires = jiffies;
827                 smi_info->si_timer.sub_expires
828                         = get_arch_cycles(smi_info->si_timer.expires);
829                 read_unlock(&xtime_lock);
830                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
831 #else
832                 smi_info->si_timer.expires = jiffies + 1;
833 #endif
834         } else {
835                 spin_lock_irqsave(&smi_info->count_lock, flags);
836                 smi_info->long_timeouts++;
837                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
838                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
839 #if defined(CONFIG_HIGH_RES_TIMERS)
840                 smi_info->si_timer.sub_expires = 0;
841 #endif
842         }
843
844  do_add_timer:
845         add_timer(&(smi_info->si_timer));
846 }
847
848 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
849 {
850         struct smi_info *smi_info = data;
851         unsigned long   flags;
852 #ifdef DEBUG_TIMING
853         struct timeval  t;
854 #endif
855
856         spin_lock_irqsave(&(smi_info->si_lock), flags);
857
858         spin_lock(&smi_info->count_lock);
859         smi_info->interrupts++;
860         spin_unlock(&smi_info->count_lock);
861
862         if (smi_info->stop_operation)
863                 goto out;
864
865 #ifdef DEBUG_TIMING
866         do_gettimeofday(&t);
867         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
868 #endif
869         smi_event_handler(smi_info, 0);
870  out:
871         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
872         return IRQ_HANDLED;
873 }
874
875 static struct ipmi_smi_handlers handlers =
876 {
877         .owner                  = THIS_MODULE,
878         .sender                 = sender,
879         .request_events         = request_events,
880         .set_run_to_completion  = set_run_to_completion,
881         .poll                   = poll,
882 };
883
884 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
885    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
886
887 #define SI_MAX_PARMS 4
888 #define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
889 static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
890 { NULL, NULL, NULL, NULL };
891
892 #define DEVICE_NAME "ipmi_si"
893
894 #define DEFAULT_KCS_IO_PORT     0xca2
895 #define DEFAULT_SMIC_IO_PORT    0xca9
896 #define DEFAULT_BT_IO_PORT      0xe4
897 #define DEFAULT_REGSPACING      1
898
899 static int           si_trydefaults = 1;
900 static char          *si_type[SI_MAX_PARMS] = { NULL, NULL, NULL, NULL };
901 #define MAX_SI_TYPE_STR 30
902 static char          si_type_str[MAX_SI_TYPE_STR];
903 static unsigned long addrs[SI_MAX_PARMS] = { 0, 0, 0, 0 };
904 static int num_addrs = 0;
905 static unsigned int  ports[SI_MAX_PARMS] = { 0, 0, 0, 0 };
906 static int num_ports = 0;
907 static int           irqs[SI_MAX_PARMS] = { 0, 0, 0, 0 };
908 static int num_irqs = 0;
909 static int           regspacings[SI_MAX_PARMS] = { 0, 0, 0, 0 };
910 static int num_regspacings = 0;
911 static int           regsizes[SI_MAX_PARMS] = { 0, 0, 0, 0 };
912 static int num_regsizes = 0;
913 static int           regshifts[SI_MAX_PARMS] = { 0, 0, 0, 0 };
914 static int num_regshifts = 0;
915
916
917 module_param_named(trydefaults, si_trydefaults, bool, 0);
918 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
919                  " default scan of the KCS and SMIC interface at the standard"
920                  " address");
921 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
922 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
923                  " interface separated by commas.  The types are 'kcs',"
924                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
925                  " the first interface to kcs and the second to bt");
926 module_param_array(addrs, long, &num_addrs, 0);
927 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
928                  " addresses separated by commas.  Only use if an interface"
929                  " is in memory.  Otherwise, set it to zero or leave"
930                  " it blank.");
931 module_param_array(ports, int, &num_ports, 0);
932 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
933                  " addresses separated by commas.  Only use if an interface"
934                  " is a port.  Otherwise, set it to zero or leave"
935                  " it blank.");
936 module_param_array(irqs, int, &num_irqs, 0);
937 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
938                  " addresses separated by commas.  Only use if an interface"
939                  " has an interrupt.  Otherwise, set it to zero or leave"
940                  " it blank.");
941 module_param_array(regspacings, int, &num_regspacings, 0);
942 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
943                  " and each successive register used by the interface.  For"
944                  " instance, if the start address is 0xca2 and the spacing"
945                  " is 2, then the second address is at 0xca4.  Defaults"
946                  " to 1.");
947 module_param_array(regsizes, int, &num_regsizes, 0);
948 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
949                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
950                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
951                  " the 8-bit IPMI register has to be read from a larger"
952                  " register.");
953 module_param_array(regshifts, int, &num_regshifts, 0);
954 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
955                  " IPMI register, in bits.  For instance, if the data"
956                  " is read from a 32-bit word and the IPMI data is in"
957                  " bit 8-15, then the shift would be 8");
958
959 #define IPMI_MEM_ADDR_SPACE 1
960 #define IPMI_IO_ADDR_SPACE  2
961
962 #if defined(CONFIG_ACPI_INTERPETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)
963 static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
964 {
965         int i;
966
967         for (i = 0; i < SI_MAX_PARMS; ++i) {
968                 /* Don't check our address. */
969                 if (i == intf)
970                         continue;
971                 if (si_type[i] != NULL) {
972                         if ((addr_space == IPMI_MEM_ADDR_SPACE &&
973                              base_addr == addrs[i]) ||
974                             (addr_space == IPMI_IO_ADDR_SPACE &&
975                              base_addr == ports[i]))
976                                 return 0;
977                 }
978                 else
979                         break;
980         }
981
982         return 1;
983 }
984 #endif
985
986 static int std_irq_setup(struct smi_info *info)
987 {
988         int rv;
989
990         if (!info->irq)
991                 return 0;
992
993         rv = request_irq(info->irq,
994                          si_irq_handler,
995                          SA_INTERRUPT,
996                          DEVICE_NAME,
997                          info);
998         if (rv) {
999                 printk(KERN_WARNING
1000                        "ipmi_si: %s unable to claim interrupt %d,"
1001                        " running polled\n",
1002                        DEVICE_NAME, info->irq);
1003                 info->irq = 0;
1004         } else {
1005                 printk("  Using irq %d\n", info->irq);
1006         }
1007
1008         return rv;
1009 }
1010
1011 static void std_irq_cleanup(struct smi_info *info)
1012 {
1013         if (!info->irq)
1014                 return;
1015
1016         free_irq(info->irq, info);
1017 }
1018
1019 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1020 {
1021         unsigned int *addr = io->info;
1022
1023         return inb((*addr)+(offset*io->regspacing));
1024 }
1025
1026 static void port_outb(struct si_sm_io *io, unsigned int offset,
1027                       unsigned char b)
1028 {
1029         unsigned int *addr = io->info;
1030
1031         outb(b, (*addr)+(offset * io->regspacing));
1032 }
1033
1034 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1035 {
1036         unsigned int *addr = io->info;
1037
1038         return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1039 }
1040
1041 static void port_outw(struct si_sm_io *io, unsigned int offset,
1042                       unsigned char b)
1043 {
1044         unsigned int *addr = io->info;
1045
1046         outw(b << io->regshift, (*addr)+(offset * io->regspacing));
1047 }
1048
1049 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1050 {
1051         unsigned int *addr = io->info;
1052
1053         return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1054 }
1055
1056 static void port_outl(struct si_sm_io *io, unsigned int offset,
1057                       unsigned char b)
1058 {
1059         unsigned int *addr = io->info;
1060
1061         outl(b << io->regshift, (*addr)+(offset * io->regspacing));
1062 }
1063
1064 static void port_cleanup(struct smi_info *info)
1065 {
1066         unsigned int *addr = info->io.info;
1067         int           mapsize;
1068
1069         if (addr && (*addr)) {
1070                 mapsize = ((info->io_size * info->io.regspacing)
1071                            - (info->io.regspacing - info->io.regsize));
1072
1073                 release_region (*addr, mapsize);
1074         }
1075         kfree(info);
1076 }
1077
1078 static int port_setup(struct smi_info *info)
1079 {
1080         unsigned int *addr = info->io.info;
1081         int           mapsize;
1082
1083         if (!addr || (!*addr))
1084                 return -ENODEV;
1085
1086         info->io_cleanup = port_cleanup;
1087
1088         /* Figure out the actual inb/inw/inl/etc routine to use based
1089            upon the register size. */
1090         switch (info->io.regsize) {
1091         case 1:
1092                 info->io.inputb = port_inb;
1093                 info->io.outputb = port_outb;
1094                 break;
1095         case 2:
1096                 info->io.inputb = port_inw;
1097                 info->io.outputb = port_outw;
1098                 break;
1099         case 4:
1100                 info->io.inputb = port_inl;
1101                 info->io.outputb = port_outl;
1102                 break;
1103         default:
1104                 printk("ipmi_si: Invalid register size: %d\n",
1105                        info->io.regsize);
1106                 return -EINVAL;
1107         }
1108
1109         /* Calculate the total amount of memory to claim.  This is an
1110          * unusual looking calculation, but it avoids claiming any
1111          * more memory than it has to.  It will claim everything
1112          * between the first address to the end of the last full
1113          * register. */
1114         mapsize = ((info->io_size * info->io.regspacing)
1115                    - (info->io.regspacing - info->io.regsize));
1116
1117         if (request_region(*addr, mapsize, DEVICE_NAME) == NULL)
1118                 return -EIO;
1119         return 0;
1120 }
1121
1122 static int try_init_port(int intf_num, struct smi_info **new_info)
1123 {
1124         struct smi_info *info;
1125
1126         if (!ports[intf_num])
1127                 return -ENODEV;
1128
1129         if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1130                               ports[intf_num]))
1131                 return -ENODEV;
1132
1133         info = kmalloc(sizeof(*info), GFP_KERNEL);
1134         if (!info) {
1135                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1136                 return -ENOMEM;
1137         }
1138         memset(info, 0, sizeof(*info));
1139
1140         info->io_setup = port_setup;
1141         info->io.info = &(ports[intf_num]);
1142         info->io.addr = NULL;
1143         info->io.regspacing = regspacings[intf_num];
1144         if (!info->io.regspacing)
1145                 info->io.regspacing = DEFAULT_REGSPACING;
1146         info->io.regsize = regsizes[intf_num];
1147         if (!info->io.regsize)
1148                 info->io.regsize = DEFAULT_REGSPACING;
1149         info->io.regshift = regshifts[intf_num];
1150         info->irq = 0;
1151         info->irq_setup = NULL;
1152         *new_info = info;
1153
1154         if (si_type[intf_num] == NULL)
1155                 si_type[intf_num] = "kcs";
1156
1157         printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1158                si_type[intf_num], ports[intf_num]);
1159         return 0;
1160 }
1161
1162 static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
1163 {
1164         return readb((io->addr)+(offset * io->regspacing));
1165 }
1166
1167 static void mem_outb(struct si_sm_io *io, unsigned int offset,
1168                      unsigned char b)
1169 {
1170         writeb(b, (io->addr)+(offset * io->regspacing));
1171 }
1172
1173 static unsigned char mem_inw(struct si_sm_io *io, unsigned int offset)
1174 {
1175         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1176                 && 0xff;
1177 }
1178
1179 static void mem_outw(struct si_sm_io *io, unsigned int offset,
1180                      unsigned char b)
1181 {
1182         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1183 }
1184
1185 static unsigned char mem_inl(struct si_sm_io *io, unsigned int offset)
1186 {
1187         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1188                 && 0xff;
1189 }
1190
1191 static void mem_outl(struct si_sm_io *io, unsigned int offset,
1192                      unsigned char b)
1193 {
1194         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1195 }
1196
1197 #ifdef readq
1198 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1199 {
1200         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1201                 && 0xff;
1202 }
1203
1204 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1205                      unsigned char b)
1206 {
1207         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1208 }
1209 #endif
1210
1211 static void mem_cleanup(struct smi_info *info)
1212 {
1213         unsigned long *addr = info->io.info;
1214         int           mapsize;
1215
1216         if (info->io.addr) {
1217                 iounmap(info->io.addr);
1218
1219                 mapsize = ((info->io_size * info->io.regspacing)
1220                            - (info->io.regspacing - info->io.regsize));
1221
1222                 release_mem_region(*addr, mapsize);
1223         }
1224         kfree(info);
1225 }
1226
1227 static int mem_setup(struct smi_info *info)
1228 {
1229         unsigned long *addr = info->io.info;
1230         int           mapsize;
1231
1232         if (!addr || (!*addr))
1233                 return -ENODEV;
1234
1235         info->io_cleanup = mem_cleanup;
1236
1237         /* Figure out the actual readb/readw/readl/etc routine to use based
1238            upon the register size. */
1239         switch (info->io.regsize) {
1240         case 1:
1241                 info->io.inputb = mem_inb;
1242                 info->io.outputb = mem_outb;
1243                 break;
1244         case 2:
1245                 info->io.inputb = mem_inw;
1246                 info->io.outputb = mem_outw;
1247                 break;
1248         case 4:
1249                 info->io.inputb = mem_inl;
1250                 info->io.outputb = mem_outl;
1251                 break;
1252 #ifdef readq
1253         case 8:
1254                 info->io.inputb = mem_inq;
1255                 info->io.outputb = mem_outq;
1256                 break;
1257 #endif
1258         default:
1259                 printk("ipmi_si: Invalid register size: %d\n",
1260                        info->io.regsize);
1261                 return -EINVAL;
1262         }
1263
1264         /* Calculate the total amount of memory to claim.  This is an
1265          * unusual looking calculation, but it avoids claiming any
1266          * more memory than it has to.  It will claim everything
1267          * between the first address to the end of the last full
1268          * register. */
1269         mapsize = ((info->io_size * info->io.regspacing)
1270                    - (info->io.regspacing - info->io.regsize));
1271
1272         if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL)
1273                 return -EIO;
1274
1275         info->io.addr = ioremap(*addr, mapsize);
1276         if (info->io.addr == NULL) {
1277                 release_mem_region(*addr, mapsize);
1278                 return -EIO;
1279         }
1280         return 0;
1281 }
1282
1283 static int try_init_mem(int intf_num, struct smi_info **new_info)
1284 {
1285         struct smi_info *info;
1286
1287         if (!addrs[intf_num])
1288                 return -ENODEV;
1289
1290         if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1291                               addrs[intf_num]))
1292                 return -ENODEV;
1293
1294         info = kmalloc(sizeof(*info), GFP_KERNEL);
1295         if (!info) {
1296                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1297                 return -ENOMEM;
1298         }
1299         memset(info, 0, sizeof(*info));
1300
1301         info->io_setup = mem_setup;
1302         info->io.info = (void *) addrs[intf_num];
1303         info->io.addr = NULL;
1304         info->io.regspacing = regspacings[intf_num];
1305         if (!info->io.regspacing)
1306                 info->io.regspacing = DEFAULT_REGSPACING;
1307         info->io.regsize = regsizes[intf_num];
1308         if (!info->io.regsize)
1309                 info->io.regsize = DEFAULT_REGSPACING;
1310         info->io.regshift = regshifts[intf_num];
1311         info->irq = 0;
1312         info->irq_setup = NULL;
1313         *new_info = info;
1314
1315         if (si_type[intf_num] == NULL)
1316                 si_type[intf_num] = "kcs";
1317
1318         printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
1319                si_type[intf_num], addrs[intf_num]);
1320         return 0;
1321 }
1322
1323
1324 #ifdef CONFIG_ACPI_INTERPRETER
1325
1326 #include <linux/acpi.h>
1327
1328 /* Once we get an ACPI failure, we don't try any more, because we go
1329    through the tables sequentially.  Once we don't find a table, there
1330    are no more. */
1331 static int acpi_failure = 0;
1332
1333 /* For GPE-type interrupts. */
1334 u32 ipmi_acpi_gpe(void *context)
1335 {
1336         struct smi_info *smi_info = context;
1337         unsigned long   flags;
1338 #ifdef DEBUG_TIMING
1339         struct timeval t;
1340 #endif
1341
1342         spin_lock_irqsave(&(smi_info->si_lock), flags);
1343
1344         spin_lock(&smi_info->count_lock);
1345         smi_info->interrupts++;
1346         spin_unlock(&smi_info->count_lock);
1347
1348         if (smi_info->stop_operation)
1349                 goto out;
1350
1351 #ifdef DEBUG_TIMING
1352         do_gettimeofday(&t);
1353         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1354 #endif
1355         smi_event_handler(smi_info, 0);
1356  out:
1357         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1358
1359         return ACPI_INTERRUPT_HANDLED;
1360 }
1361
1362 static int acpi_gpe_irq_setup(struct smi_info *info)
1363 {
1364         acpi_status status;
1365
1366         if (!info->irq)
1367                 return 0;
1368
1369         /* FIXME - is level triggered right? */
1370         status = acpi_install_gpe_handler(NULL,
1371                                           info->irq,
1372                                           ACPI_GPE_LEVEL_TRIGGERED,
1373                                           &ipmi_acpi_gpe,
1374                                           info);
1375         if (status != AE_OK) {
1376                 printk(KERN_WARNING
1377                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1378                        " running polled\n",
1379                        DEVICE_NAME, info->irq);
1380                 info->irq = 0;
1381                 return -EINVAL;
1382         } else {
1383                 printk("  Using ACPI GPE %d\n", info->irq);
1384                 return 0;
1385         }
1386 }
1387
1388 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1389 {
1390         if (!info->irq)
1391                 return;
1392
1393         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1394 }
1395
1396 /*
1397  * Defined at
1398  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1399  */
1400 struct SPMITable {
1401         s8      Signature[4];
1402         u32     Length;
1403         u8      Revision;
1404         u8      Checksum;
1405         s8      OEMID[6];
1406         s8      OEMTableID[8];
1407         s8      OEMRevision[4];
1408         s8      CreatorID[4];
1409         s8      CreatorRevision[4];
1410         u8      InterfaceType;
1411         u8      IPMIlegacy;
1412         s16     SpecificationRevision;
1413
1414         /*
1415          * Bit 0 - SCI interrupt supported
1416          * Bit 1 - I/O APIC/SAPIC
1417          */
1418         u8      InterruptType;
1419
1420         /* If bit 0 of InterruptType is set, then this is the SCI
1421            interrupt in the GPEx_STS register. */
1422         u8      GPE;
1423
1424         s16     Reserved;
1425
1426         /* If bit 1 of InterruptType is set, then this is the I/O
1427            APIC/SAPIC interrupt. */
1428         u32     GlobalSystemInterrupt;
1429
1430         /* The actual register address. */
1431         struct acpi_generic_address addr;
1432
1433         u8      UID[4];
1434
1435         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1436 };
1437
1438 static int try_init_acpi(int intf_num, struct smi_info **new_info)
1439 {
1440         struct smi_info  *info;
1441         acpi_status      status;
1442         struct SPMITable *spmi;
1443         char             *io_type;
1444         u8               addr_space;
1445
1446         if (acpi_failure)
1447                 return -ENODEV;
1448
1449         status = acpi_get_firmware_table("SPMI", intf_num+1,
1450                                          ACPI_LOGICAL_ADDRESSING,
1451                                          (struct acpi_table_header **) &spmi);
1452         if (status != AE_OK) {
1453                 acpi_failure = 1;
1454                 return -ENODEV;
1455         }
1456
1457         if (spmi->IPMIlegacy != 1) {
1458             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1459             return -ENODEV;
1460         }
1461
1462         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1463                 addr_space = IPMI_MEM_ADDR_SPACE;
1464         else
1465                 addr_space = IPMI_IO_ADDR_SPACE;
1466         if (!is_new_interface(-1, addr_space, spmi->addr.address))
1467                 return -ENODEV;
1468
1469         /* Figure out the interface type. */
1470         switch (spmi->InterfaceType)
1471         {
1472         case 1: /* KCS */
1473                 si_type[intf_num] = "kcs";
1474                 break;
1475
1476         case 2: /* SMIC */
1477                 si_type[intf_num] = "smic";
1478                 break;
1479
1480         case 3: /* BT */
1481                 si_type[intf_num] = "bt";
1482                 break;
1483
1484         default:
1485                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1486                         spmi->InterfaceType);
1487                 return -EIO;
1488         }
1489
1490         info = kmalloc(sizeof(*info), GFP_KERNEL);
1491         if (!info) {
1492                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1493                 return -ENOMEM;
1494         }
1495         memset(info, 0, sizeof(*info));
1496
1497         if (spmi->InterruptType & 1) {
1498                 /* We've got a GPE interrupt. */
1499                 info->irq = spmi->GPE;
1500                 info->irq_setup = acpi_gpe_irq_setup;
1501                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1502         } else if (spmi->InterruptType & 2) {
1503                 /* We've got an APIC/SAPIC interrupt. */
1504                 info->irq = spmi->GlobalSystemInterrupt;
1505                 info->irq_setup = std_irq_setup;
1506                 info->irq_cleanup = std_irq_cleanup;
1507         } else {
1508                 /* Use the default interrupt setting. */
1509                 info->irq = 0;
1510                 info->irq_setup = NULL;
1511         }
1512
1513         regspacings[intf_num] = spmi->addr.register_bit_width / 8;
1514         info->io.regspacing = spmi->addr.register_bit_width / 8;
1515         regsizes[intf_num] = regspacings[intf_num];
1516         info->io.regsize = regsizes[intf_num];
1517         regshifts[intf_num] = spmi->addr.register_bit_offset;
1518         info->io.regshift = regshifts[intf_num];
1519
1520         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1521                 io_type = "memory";
1522                 info->io_setup = mem_setup;
1523                 addrs[intf_num] = spmi->addr.address;
1524                 info->io.info = &(addrs[intf_num]);
1525         } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1526                 io_type = "I/O";
1527                 info->io_setup = port_setup;
1528                 ports[intf_num] = spmi->addr.address;
1529                 info->io.info = &(ports[intf_num]);
1530         } else {
1531                 kfree(info);
1532                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1533                 return -EIO;
1534         }
1535
1536         *new_info = info;
1537
1538         printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1539                si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1540         return 0;
1541 }
1542 #endif
1543
1544 #ifdef CONFIG_X86
1545
1546 typedef struct dmi_ipmi_data
1547 {
1548         u8              type;
1549         u8              addr_space;
1550         unsigned long   base_addr;
1551         u8              irq;
1552         u8              offset;
1553 }dmi_ipmi_data_t;
1554
1555 typedef struct dmi_header
1556 {
1557         u8      type;
1558         u8      length;
1559         u16     handle;
1560 }dmi_header_t;
1561
1562 static int decode_dmi(dmi_header_t *dm, dmi_ipmi_data_t *ipmi_data)
1563 {
1564         u8              *data = (u8 *)dm;
1565         unsigned long   base_addr;
1566         u8              reg_spacing;
1567
1568         ipmi_data->type = data[0x04];
1569
1570         memcpy(&base_addr,&data[0x08],sizeof(unsigned long));
1571         if (base_addr & 1) {
1572                 /* I/O */
1573                 base_addr &= 0xFFFE;
1574                 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1575         }
1576         else {
1577                 /* Memory */
1578                 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
1579         }
1580
1581         /* The top two bits of byte 0x10 hold the register spacing. */
1582         reg_spacing = (data[0x10] & 0xC0) >> 6;
1583         switch(reg_spacing){
1584         case 0x00: /* Byte boundaries */
1585                 ipmi_data->offset = 1;
1586                 break;
1587         case 0x01: /* 32-bit boundaries */
1588                 ipmi_data->offset = 4;
1589                 break;
1590         case 0x02: /* 16-bit boundaries */
1591                 ipmi_data->offset = 2;
1592         default:
1593                 printk("ipmi_si: Unknown SMBIOS IPMI Base Addr"
1594                        " Modifier: 0x%x\n", reg_spacing);
1595                 return -EIO;
1596         }
1597
1598         /* If bit 4 of byte 0x10 is set, then the lsb for the address
1599            is odd. */
1600         ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1601
1602         ipmi_data->irq = data[0x11];
1603
1604         if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr))
1605             return 0;
1606
1607         memset(ipmi_data,0,sizeof(dmi_ipmi_data_t));
1608
1609         return -1;
1610 }
1611
1612 static int dmi_table(u32 base, int len, int num,
1613         dmi_ipmi_data_t *ipmi_data)
1614 {
1615         u8                *buf;
1616         struct dmi_header *dm;
1617         u8                *data;
1618         int               i=1;
1619         int               status=-1;
1620
1621         buf = ioremap(base, len);
1622         if(buf==NULL)
1623                 return -1;
1624
1625         data = buf;
1626
1627         while(i<num && (data - buf) < len)
1628         {
1629                 dm=(dmi_header_t *)data;
1630
1631                 if((data-buf+dm->length) >= len)
1632                         break;
1633
1634                 if (dm->type == 38) {
1635                         if (decode_dmi(dm, ipmi_data) == 0) {
1636                                 status = 0;
1637                                 break;
1638                         }
1639                 }
1640
1641                 data+=dm->length;
1642                 while((data-buf) < len && (*data || data[1]))
1643                         data++;
1644                 data+=2;
1645                 i++;
1646         }
1647         iounmap(buf);
1648
1649         return status;
1650 }
1651
1652 inline static int dmi_checksum(u8 *buf)
1653 {
1654         u8   sum=0;
1655         int  a;
1656
1657         for(a=0; a<15; a++)
1658                 sum+=buf[a];
1659         return (sum==0);
1660 }
1661
1662 static int dmi_iterator(dmi_ipmi_data_t *ipmi_data)
1663 {
1664         u8   buf[15];
1665         u32  fp=0xF0000;
1666
1667 #ifdef CONFIG_SIMNOW
1668         return -1;
1669 #endif
1670
1671         while(fp < 0xFFFFF)
1672         {
1673                 isa_memcpy_fromio(buf, fp, 15);
1674                 if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
1675                 {
1676                         u16 num=buf[13]<<8|buf[12];
1677                         u16 len=buf[7]<<8|buf[6];
1678                         u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
1679
1680                         if(dmi_table(base, len, num, ipmi_data) == 0)
1681                                 return 0;
1682                 }
1683                 fp+=16;
1684         }
1685
1686         return -1;
1687 }
1688
1689 static int try_init_smbios(int intf_num, struct smi_info **new_info)
1690 {
1691         struct smi_info   *info;
1692         dmi_ipmi_data_t   ipmi_data;
1693         char              *io_type;
1694         int               status;
1695
1696         status = dmi_iterator(&ipmi_data);
1697
1698         if (status < 0)
1699                 return -ENODEV;
1700
1701         switch(ipmi_data.type) {
1702                 case 0x01: /* KCS */
1703                         si_type[intf_num] = "kcs";
1704                         break;
1705                 case 0x02: /* SMIC */
1706                         si_type[intf_num] = "smic";
1707                         break;
1708                 case 0x03: /* BT */
1709                         si_type[intf_num] = "bt";
1710                         break;
1711                 default:
1712                         printk("ipmi_si: Unknown SMBIOS SI type.\n");
1713                         return -EIO;
1714         }
1715
1716         info = kmalloc(sizeof(*info), GFP_KERNEL);
1717         if (!info) {
1718                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1719                 return -ENOMEM;
1720         }
1721         memset(info, 0, sizeof(*info));
1722
1723         if (ipmi_data.addr_space == 1) {
1724                 io_type = "memory";
1725                 info->io_setup = mem_setup;
1726                 addrs[intf_num] = ipmi_data.base_addr;
1727                 info->io.info = &(addrs[intf_num]);
1728         } else if (ipmi_data.addr_space == 2) {
1729                 io_type = "I/O";
1730                 info->io_setup = port_setup;
1731                 ports[intf_num] = ipmi_data.base_addr;
1732                 info->io.info = &(ports[intf_num]);
1733         } else {
1734                 kfree(info);
1735                 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
1736                 return -EIO;
1737         }
1738
1739         regspacings[intf_num] = ipmi_data.offset;
1740         info->io.regspacing = regspacings[intf_num];
1741         if (!info->io.regspacing)
1742                 info->io.regspacing = DEFAULT_REGSPACING;
1743         info->io.regsize = DEFAULT_REGSPACING;
1744         info->io.regshift = regshifts[intf_num];
1745
1746         irqs[intf_num] = ipmi_data.irq;
1747
1748         *new_info = info;
1749
1750         printk("ipmi_si: Found SMBIOS-specified state machine at %s"
1751                " address 0x%lx\n",
1752                io_type, (unsigned long)ipmi_data.base_addr);
1753         return 0;
1754 }
1755 #endif /* CONFIG_X86 */
1756
1757 #ifdef CONFIG_PCI
1758
1759 #define PCI_ERMC_CLASSCODE  0x0C0700
1760 #define PCI_HP_VENDOR_ID    0x103C
1761 #define PCI_MMC_DEVICE_ID   0x121A
1762 #define PCI_MMC_ADDR_CW     0x10
1763
1764 /* Avoid more than one attempt to probe pci smic. */
1765 static int pci_smic_checked = 0;
1766
1767 static int find_pci_smic(int intf_num, struct smi_info **new_info)
1768 {
1769         struct smi_info  *info;
1770         int              error;
1771         struct pci_dev   *pci_dev = NULL;
1772         u16              base_addr;
1773         int              fe_rmc = 0;
1774
1775         if (pci_smic_checked)
1776                 return -ENODEV;
1777
1778         pci_smic_checked = 1;
1779
1780         if ((pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID,
1781                                        NULL)))
1782                 ;
1783         else if ((pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL)) &&
1784                  pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)
1785                 fe_rmc = 1;
1786         else
1787                 return -ENODEV;
1788
1789         error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1790         if (error)
1791         {
1792                 pci_dev_put(pci_dev);
1793                 printk(KERN_ERR
1794                        "ipmi_si: pci_read_config_word() failed (%d).\n",
1795                        error);
1796                 return -ENODEV;
1797         }
1798
1799         /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1800         if (!(base_addr & 0x0001))
1801         {
1802                 pci_dev_put(pci_dev);
1803                 printk(KERN_ERR
1804                        "ipmi_si: memory mapped I/O not supported for PCI"
1805                        " smic.\n");
1806                 return -ENODEV;
1807         }
1808
1809         base_addr &= 0xFFFE;
1810         if (!fe_rmc)
1811                 /* Data register starts at base address + 1 in eRMC */
1812                 ++base_addr;
1813
1814         if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
1815                 pci_dev_put(pci_dev);
1816                 return -ENODEV;
1817         }
1818
1819         info = kmalloc(sizeof(*info), GFP_KERNEL);
1820         if (!info) {
1821                 pci_dev_put(pci_dev);
1822                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1823                 return -ENOMEM;
1824         }
1825         memset(info, 0, sizeof(*info));
1826
1827         info->io_setup = port_setup;
1828         ports[intf_num] = base_addr;
1829         info->io.info = &(ports[intf_num]);
1830         info->io.regspacing = regspacings[intf_num];
1831         if (!info->io.regspacing)
1832                 info->io.regspacing = DEFAULT_REGSPACING;
1833         info->io.regsize = DEFAULT_REGSPACING;
1834         info->io.regshift = regshifts[intf_num];
1835
1836         *new_info = info;
1837
1838         irqs[intf_num] = pci_dev->irq;
1839         si_type[intf_num] = "smic";
1840
1841         printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
1842                 (long unsigned int) base_addr);
1843
1844         pci_dev_put(pci_dev);
1845         return 0;
1846 }
1847 #endif /* CONFIG_PCI */
1848
1849 static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1850 {
1851 #ifdef CONFIG_PCI
1852         if (find_pci_smic(intf_num, new_info)==0)
1853                 return 0;
1854 #endif
1855         /* Include other methods here. */
1856
1857         return -ENODEV;
1858 }
1859
1860
1861 static int try_get_dev_id(struct smi_info *smi_info)
1862 {
1863         unsigned char      msg[2];
1864         unsigned char      *resp;
1865         unsigned long      resp_len;
1866         enum si_sm_result smi_result;
1867         int               rv = 0;
1868
1869         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1870         if (!resp)
1871                 return -ENOMEM;
1872
1873         /* Do a Get Device ID command, since it comes back with some
1874            useful info. */
1875         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1876         msg[1] = IPMI_GET_DEVICE_ID_CMD;
1877         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1878
1879         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1880         for (;;)
1881         {
1882                 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1883                         set_current_state(TASK_UNINTERRUPTIBLE);
1884                         schedule_timeout(1);
1885                         smi_result = smi_info->handlers->event(
1886                                 smi_info->si_sm, 100);
1887                 }
1888                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1889                 {
1890                         smi_result = smi_info->handlers->event(
1891                                 smi_info->si_sm, 0);
1892                 }
1893                 else
1894                         break;
1895         }
1896         if (smi_result == SI_SM_HOSED) {
1897                 /* We couldn't get the state machine to run, so whatever's at
1898                    the port is probably not an IPMI SMI interface. */
1899                 rv = -ENODEV;
1900                 goto out;
1901         }
1902
1903         /* Otherwise, we got some data. */
1904         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1905                                                   resp, IPMI_MAX_MSG_LENGTH);
1906         if (resp_len < 6) {
1907                 /* That's odd, it should be longer. */
1908                 rv = -EINVAL;
1909                 goto out;
1910         }
1911
1912         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1913                 /* That's odd, it shouldn't be able to fail. */
1914                 rv = -EINVAL;
1915                 goto out;
1916         }
1917
1918         /* Record info from the get device id, in case we need it. */
1919         smi_info->ipmi_si_dev_rev = resp[4] & 0xf;
1920         smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f;
1921         smi_info->ipmi_si_fw_rev_minor = resp[6];
1922         smi_info->ipmi_version_major = resp[7] & 0xf;
1923         smi_info->ipmi_version_minor = resp[7] >> 4;
1924
1925  out:
1926         kfree(resp);
1927         return rv;
1928 }
1929
1930 static int type_file_read_proc(char *page, char **start, off_t off,
1931                                int count, int *eof, void *data)
1932 {
1933         char            *out = (char *) page;
1934         struct smi_info *smi = data;
1935
1936         switch (smi->si_type) {
1937             case SI_KCS:
1938                 return sprintf(out, "kcs\n");
1939             case SI_SMIC:
1940                 return sprintf(out, "smic\n");
1941             case SI_BT:
1942                 return sprintf(out, "bt\n");
1943             default:
1944                 return 0;
1945         }
1946 }
1947
1948 static int stat_file_read_proc(char *page, char **start, off_t off,
1949                                int count, int *eof, void *data)
1950 {
1951         char            *out = (char *) page;
1952         struct smi_info *smi = data;
1953
1954         out += sprintf(out, "interrupts_enabled:    %d\n",
1955                        smi->irq && !smi->interrupt_disabled);
1956         out += sprintf(out, "short_timeouts:        %ld\n",
1957                        smi->short_timeouts);
1958         out += sprintf(out, "long_timeouts:         %ld\n",
1959                        smi->long_timeouts);
1960         out += sprintf(out, "timeout_restarts:      %ld\n",
1961                        smi->timeout_restarts);
1962         out += sprintf(out, "idles:                 %ld\n",
1963                        smi->idles);
1964         out += sprintf(out, "interrupts:            %ld\n",
1965                        smi->interrupts);
1966         out += sprintf(out, "attentions:            %ld\n",
1967                        smi->attentions);
1968         out += sprintf(out, "flag_fetches:          %ld\n",
1969                        smi->flag_fetches);
1970         out += sprintf(out, "hosed_count:           %ld\n",
1971                        smi->hosed_count);
1972         out += sprintf(out, "complete_transactions: %ld\n",
1973                        smi->complete_transactions);
1974         out += sprintf(out, "events:                %ld\n",
1975                        smi->events);
1976         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
1977                        smi->watchdog_pretimeouts);
1978         out += sprintf(out, "incoming_messages:     %ld\n",
1979                        smi->incoming_messages);
1980
1981         return (out - ((char *) page));
1982 }
1983
1984 /* Returns 0 if initialized, or negative on an error. */
1985 static int init_one_smi(int intf_num, struct smi_info **smi)
1986 {
1987         int             rv;
1988         struct smi_info *new_smi;
1989
1990
1991         rv = try_init_mem(intf_num, &new_smi);
1992         if (rv)
1993                 rv = try_init_port(intf_num, &new_smi);
1994 #ifdef CONFIG_ACPI_INTERPRETER
1995         if ((rv) && (si_trydefaults)) {
1996                 rv = try_init_acpi(intf_num, &new_smi);
1997         }
1998 #endif
1999 #ifdef CONFIG_X86
2000         if ((rv) && (si_trydefaults)) {
2001                 rv = try_init_smbios(intf_num, &new_smi);
2002         }
2003 #endif
2004         if ((rv) && (si_trydefaults)) {
2005                 rv = try_init_plug_and_play(intf_num, &new_smi);
2006         }
2007
2008
2009         if (rv)
2010                 return rv;
2011
2012         /* So we know not to free it unless we have allocated one. */
2013         new_smi->intf = NULL;
2014         new_smi->si_sm = NULL;
2015         new_smi->handlers = NULL;
2016
2017         if (!new_smi->irq_setup) {
2018                 new_smi->irq = irqs[intf_num];
2019                 new_smi->irq_setup = std_irq_setup;
2020                 new_smi->irq_cleanup = std_irq_cleanup;
2021         }
2022
2023         /* Default to KCS if no type is specified. */
2024         if (si_type[intf_num] == NULL) {
2025                 if (si_trydefaults)
2026                         si_type[intf_num] = "kcs";
2027                 else {
2028                         rv = -EINVAL;
2029                         goto out_err;
2030                 }
2031         }
2032
2033         /* Set up the state machine to use. */
2034         if (strcmp(si_type[intf_num], "kcs") == 0) {
2035                 new_smi->handlers = &kcs_smi_handlers;
2036                 new_smi->si_type = SI_KCS;
2037         } else if (strcmp(si_type[intf_num], "smic") == 0) {
2038                 new_smi->handlers = &smic_smi_handlers;
2039                 new_smi->si_type = SI_SMIC;
2040         } else if (strcmp(si_type[intf_num], "bt") == 0) {
2041                 new_smi->handlers = &bt_smi_handlers;
2042                 new_smi->si_type = SI_BT;
2043         } else {
2044                 /* No support for anything else yet. */
2045                 rv = -EIO;
2046                 goto out_err;
2047         }
2048
2049         /* Allocate the state machine's data and initialize it. */
2050         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2051         if (!new_smi->si_sm) {
2052                 printk(" Could not allocate state machine memory\n");
2053                 rv = -ENOMEM;
2054                 goto out_err;
2055         }
2056         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2057                                                         &new_smi->io);
2058
2059         /* Now that we know the I/O size, we can set up the I/O. */
2060         rv = new_smi->io_setup(new_smi);
2061         if (rv) {
2062                 printk(" Could not set up I/O space\n");
2063                 goto out_err;
2064         }
2065
2066         spin_lock_init(&(new_smi->si_lock));
2067         spin_lock_init(&(new_smi->msg_lock));
2068         spin_lock_init(&(new_smi->count_lock));
2069
2070         /* Do low-level detection first. */
2071         if (new_smi->handlers->detect(new_smi->si_sm)) {
2072                 rv = -ENODEV;
2073                 goto out_err;
2074         }
2075
2076         /* Attempt a get device id command.  If it fails, we probably
2077            don't have a SMI here. */
2078         rv = try_get_dev_id(new_smi);
2079         if (rv)
2080                 goto out_err;
2081
2082         /* Try to claim any interrupts. */
2083         new_smi->irq_setup(new_smi);
2084
2085         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2086         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2087         new_smi->curr_msg = NULL;
2088         atomic_set(&new_smi->req_events, 0);
2089         new_smi->run_to_completion = 0;
2090
2091         new_smi->interrupt_disabled = 0;
2092         new_smi->timer_stopped = 0;
2093         new_smi->stop_operation = 0;
2094
2095         /* Start clearing the flags before we enable interrupts or the
2096            timer to avoid racing with the timer. */
2097         start_clear_flags(new_smi);
2098         /* IRQ is defined to be set when non-zero. */
2099         if (new_smi->irq)
2100                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2101
2102         /* The ipmi_register_smi() code does some operations to
2103            determine the channel information, so we must be ready to
2104            handle operations before it is called.  This means we have
2105            to stop the timer if we get an error after this point. */
2106         init_timer(&(new_smi->si_timer));
2107         new_smi->si_timer.data = (long) new_smi;
2108         new_smi->si_timer.function = smi_timeout;
2109         new_smi->last_timeout_jiffies = jiffies;
2110         new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2111         add_timer(&(new_smi->si_timer));
2112
2113         rv = ipmi_register_smi(&handlers,
2114                                new_smi,
2115                                new_smi->ipmi_version_major,
2116                                new_smi->ipmi_version_minor,
2117                                &(new_smi->intf));
2118         if (rv) {
2119                 printk(KERN_ERR
2120                        "ipmi_si: Unable to register device: error %d\n",
2121                        rv);
2122                 goto out_err_stop_timer;
2123         }
2124
2125         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2126                                      type_file_read_proc, NULL,
2127                                      new_smi, THIS_MODULE);
2128         if (rv) {
2129                 printk(KERN_ERR
2130                        "ipmi_si: Unable to create proc entry: %d\n",
2131                        rv);
2132                 goto out_err_stop_timer;
2133         }
2134
2135         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2136                                      stat_file_read_proc, NULL,
2137                                      new_smi, THIS_MODULE);
2138         if (rv) {
2139                 printk(KERN_ERR
2140                        "ipmi_si: Unable to create proc entry: %d\n",
2141                        rv);
2142                 goto out_err_stop_timer;
2143         }
2144
2145         *smi = new_smi;
2146
2147         printk(" IPMI %s interface initialized\n", si_type[intf_num]);
2148
2149         return 0;
2150
2151  out_err_stop_timer:
2152         new_smi->stop_operation = 1;
2153
2154         /* Wait for the timer to stop.  This avoids problems with race
2155            conditions removing the timer here. */
2156         while (!new_smi->timer_stopped) {
2157                 set_current_state(TASK_UNINTERRUPTIBLE);
2158                 schedule_timeout(1);
2159         }
2160
2161  out_err:
2162         if (new_smi->intf)
2163                 ipmi_unregister_smi(new_smi->intf);
2164
2165         new_smi->irq_cleanup(new_smi);
2166
2167         /* Wait until we know that we are out of any interrupt
2168            handlers might have been running before we freed the
2169            interrupt. */
2170         synchronize_kernel();
2171
2172         if (new_smi->si_sm) {
2173                 if (new_smi->handlers)
2174                         new_smi->handlers->cleanup(new_smi->si_sm);
2175                 kfree(new_smi->si_sm);
2176         }
2177         new_smi->io_cleanup(new_smi);
2178
2179         return rv;
2180 }
2181
2182 static __init int init_ipmi_si(void)
2183 {
2184         int  rv = 0;
2185         int  pos = 0;
2186         int  i;
2187         char *str;
2188
2189         if (initialized)
2190                 return 0;
2191         initialized = 1;
2192
2193         /* Parse out the si_type string into its components. */
2194         str = si_type_str;
2195         if (*str != '\0') {
2196                 for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) {
2197                         si_type[i] = str;
2198                         str = strchr(str, ',');
2199                         if (str) {
2200                                 *str = '\0';
2201                                 str++;
2202                         } else {
2203                                 break;
2204                         }
2205                 }
2206         }
2207
2208         printk(KERN_INFO "IPMI System Interface driver version "
2209                IPMI_SI_VERSION);
2210         if (kcs_smi_handlers.version)
2211                 printk(", KCS version %s", kcs_smi_handlers.version);
2212         if (smic_smi_handlers.version)
2213                 printk(", SMIC version %s", smic_smi_handlers.version);
2214         if (bt_smi_handlers.version)
2215                 printk(", BT version %s", bt_smi_handlers.version);
2216         printk("\n");
2217
2218         rv = init_one_smi(0, &(smi_infos[pos]));
2219         if (rv && !ports[0] && si_trydefaults) {
2220                 /* If we are trying defaults and the initial port is
2221                    not set, then set it. */
2222                 si_type[0] = "kcs";
2223                 ports[0] = DEFAULT_KCS_IO_PORT;
2224                 rv = init_one_smi(0, &(smi_infos[pos]));
2225                 if (rv) {
2226                         /* No KCS - try SMIC */
2227                         si_type[0] = "smic";
2228                         ports[0] = DEFAULT_SMIC_IO_PORT;
2229                         rv = init_one_smi(0, &(smi_infos[pos]));
2230                 }
2231                 if (rv) {
2232                         /* No SMIC - try BT */
2233                         si_type[0] = "bt";
2234                         ports[0] = DEFAULT_BT_IO_PORT;
2235                         rv = init_one_smi(0, &(smi_infos[pos]));
2236                 }
2237         }
2238         if (rv == 0)
2239                 pos++;
2240
2241         for (i=1; i < SI_MAX_PARMS; i++) {
2242                 rv = init_one_smi(i, &(smi_infos[pos]));
2243                 if (rv == 0)
2244                         pos++;
2245         }
2246
2247         if (smi_infos[0] == NULL) {
2248                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2249                 return -ENODEV;
2250         }
2251
2252         return 0;
2253 }
2254 module_init(init_ipmi_si);
2255
2256 void __exit cleanup_one_si(struct smi_info *to_clean)
2257 {
2258         int           rv;
2259         unsigned long flags;
2260
2261         if (! to_clean)
2262                 return;
2263
2264         /* Tell the timer and interrupt handlers that we are shutting
2265            down. */
2266         spin_lock_irqsave(&(to_clean->si_lock), flags);
2267         spin_lock(&(to_clean->msg_lock));
2268
2269         to_clean->stop_operation = 1;
2270
2271         to_clean->irq_cleanup(to_clean);
2272
2273         spin_unlock(&(to_clean->msg_lock));
2274         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2275
2276         /* Wait until we know that we are out of any interrupt
2277            handlers might have been running before we freed the
2278            interrupt. */
2279         synchronize_kernel();
2280
2281         /* Wait for the timer to stop.  This avoids problems with race
2282            conditions removing the timer here. */
2283         while (!to_clean->timer_stopped) {
2284                 set_current_state(TASK_UNINTERRUPTIBLE);
2285                 schedule_timeout(1);
2286         }
2287
2288         /* Interrupts and timeouts are stopped, now make sure the
2289            interface is in a clean state. */
2290         while ((to_clean->curr_msg) || (to_clean->si_state != SI_NORMAL)) {
2291                 poll(to_clean);
2292                 set_current_state(TASK_UNINTERRUPTIBLE);
2293                 schedule_timeout(1);
2294         }
2295
2296         rv = ipmi_unregister_smi(to_clean->intf);
2297         if (rv) {
2298                 printk(KERN_ERR
2299                        "ipmi_si: Unable to unregister device: errno=%d\n",
2300                        rv);
2301         }
2302
2303         to_clean->handlers->cleanup(to_clean->si_sm);
2304
2305         kfree(to_clean->si_sm);
2306
2307         to_clean->io_cleanup(to_clean);
2308 }
2309
2310 static __exit void cleanup_ipmi_si(void)
2311 {
2312         int i;
2313
2314         if (!initialized)
2315                 return;
2316
2317         for (i=0; i<SI_MAX_DRIVERS; i++) {
2318                 cleanup_one_si(smi_infos[i]);
2319         }
2320 }
2321 module_exit(cleanup_ipmi_si);
2322
2323 MODULE_LICENSE("GPL");