VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / message / i2o / i2o_core.c
1 /*
2  * Core I2O structure management 
3  * 
4  * (C) Copyright 1999-2002   Red Hat Software 
5  *
6  * Written by Alan Cox, Building Number Three Ltd 
7  * 
8  * This program is free software; you can redistribute it and/or 
9  * modify it under the terms of the GNU General Public License 
10  * as published by the Free Software Foundation; either version 
11  * 2 of the License, or (at your option) any later version.  
12  * 
13  * A lot of the I2O message side code from this is taken from the 
14  * Red Creek RCPCI45 adapter driver by Red Creek Communications 
15  * 
16  * Fixes/additions:
17  *      Philipp Rumpf
18  *      Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
19  *      Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20  *      Deepak Saxena <deepak@plexity.net>
21  *      Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22  *      Alan Cox <alan@redhat.com>:
23  *              Ported to Linux 2.5.
24  *      Markus Lidel <Markus.Lidel@shadowconnect.com>:
25  *              Minor fixes for 2.6.
26  * 
27  */
28
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
33
34 #include <linux/i2o.h>
35
36 #include <linux/errno.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/smp_lock.h>
41
42 #include <linux/bitops.h>
43 #include <linux/wait.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/sched.h>
48 #include <asm/semaphore.h>
49 #include <linux/completion.h>
50 #include <linux/workqueue.h>
51
52 #include <asm/io.h>
53 #include <linux/reboot.h>
54 #ifdef CONFIG_MTRR
55 #include <asm/mtrr.h>
56 #endif // CONFIG_MTRR
57
58 #include "i2o_lan.h"
59
60 //#define DRIVERDEBUG
61
62 #ifdef DRIVERDEBUG
63 #define dprintk(s, args...) printk(s, ## args)
64 #else
65 #define dprintk(s, args...)
66 #endif
67
68 /* OSM table */
69 static struct i2o_handler *i2o_handlers[MAX_I2O_MODULES];
70
71 /* Controller list */
72 static struct i2o_controller *i2o_controllers[MAX_I2O_CONTROLLERS];
73 struct i2o_controller *i2o_controller_chain;
74 int i2o_num_controllers;
75
76 /* Initiator Context for Core message */
77 static int core_context;
78
79 /* Initialization && shutdown functions */
80 void i2o_sys_init(void);
81 static void i2o_sys_shutdown(void);
82 static int i2o_reset_controller(struct i2o_controller *);
83 static int i2o_reboot_event(struct notifier_block *, unsigned long , void *);
84 static int i2o_online_controller(struct i2o_controller *);
85 static int i2o_init_outbound_q(struct i2o_controller *);
86 static int i2o_post_outbound_messages(struct i2o_controller *);
87
88 /* Reply handler */
89 static void i2o_core_reply(struct i2o_handler *, struct i2o_controller *,
90                            struct i2o_message *);
91
92 /* Various helper functions */
93 static int i2o_lct_get(struct i2o_controller *);
94 static int i2o_lct_notify(struct i2o_controller *);
95 static int i2o_hrt_get(struct i2o_controller *);
96
97 static int i2o_build_sys_table(void);
98 static int i2o_systab_send(struct i2o_controller *c);
99
100 /* I2O core event handler */
101 static int i2o_core_evt(void *);
102 static int evt_pid;
103 static int evt_running;
104
105 /* Dynamic LCT update handler */
106 static int i2o_dyn_lct(void *);
107
108 void i2o_report_controller_unit(struct i2o_controller *, struct i2o_device *);
109
110 static void i2o_pci_dispose(struct i2o_controller *c);
111
112 /*
113  * I2O System Table.  Contains information about
114  * all the IOPs in the system.  Used to inform IOPs
115  * about each other's existence.
116  *
117  * sys_tbl_ver is the CurrentChangeIndicator that is
118  * used by IOPs to track changes.
119  */
120 static struct i2o_sys_tbl *sys_tbl;
121 static int sys_tbl_ind;
122 static int sys_tbl_len;
123
124 /*
125  * This spin lock is used to keep a device from being
126  * added and deleted concurrently across CPUs or interrupts.
127  * This can occur when a user creates a device and immediatelly
128  * deletes it before the new_dev_notify() handler is called.
129  */
130 static spinlock_t i2o_dev_lock = SPIN_LOCK_UNLOCKED;
131
132 /*
133  * Structures and definitions for synchronous message posting.
134  * See i2o_post_wait() for description.
135  */ 
136 struct i2o_post_wait_data
137 {
138         int *status;            /* Pointer to status block on caller stack */
139         int *complete;          /* Pointer to completion flag on caller stack */
140         u32 id;                 /* Unique identifier */
141         wait_queue_head_t *wq;  /* Wake up for caller (NULL for dead) */
142         struct i2o_post_wait_data *next;        /* Chain */
143         void *mem[2];           /* Memory blocks to recover on failure path */
144         dma_addr_t phys[2];     /* Physical address of blocks to recover */
145         u32 size[2];            /* Size of blocks to recover */
146 };
147
148 static struct i2o_post_wait_data *post_wait_queue;
149 static u32 post_wait_id;        // Unique ID for each post_wait
150 static spinlock_t post_wait_lock = SPIN_LOCK_UNLOCKED;
151 static void i2o_post_wait_complete(struct i2o_controller *, u32, int);
152
153 /* OSM descriptor handler */ 
154 static struct i2o_handler i2o_core_handler =
155 {
156         (void *)i2o_core_reply,
157         NULL,
158         NULL,
159         NULL,
160         "I2O core layer",
161         0,
162         I2O_CLASS_EXECUTIVE
163 };
164
165 /*
166  * Used when queueing a reply to be handled later
167  */
168  
169 struct reply_info
170 {
171         struct i2o_controller *iop;
172         u32 msg[MSG_FRAME_SIZE];
173 };
174 static struct reply_info evt_reply;
175 static struct reply_info events[I2O_EVT_Q_LEN];
176 static int evt_in;
177 static int evt_out;
178 static int evt_q_len;
179 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
180
181 /*
182  * I2O configuration spinlock. This isnt a big deal for contention
183  * so we have one only
184  */
185
186 static DECLARE_MUTEX(i2o_configuration_lock);
187
188 /* 
189  * Event spinlock.  Used to keep event queue sane and from
190  * handling multiple events simultaneously.
191  */
192 static spinlock_t i2o_evt_lock = SPIN_LOCK_UNLOCKED;
193
194 /*
195  * Semaphore used to synchronize event handling thread with 
196  * interrupt handler.
197  */
198  
199 static DECLARE_MUTEX(evt_sem);
200 static DECLARE_COMPLETION(evt_dead);
201 static DECLARE_WAIT_QUEUE_HEAD(evt_wait);
202
203 static struct notifier_block i2o_reboot_notifier =
204 {
205         i2o_reboot_event,
206         NULL,
207         0
208 };
209
210 /*
211  *      Config options
212  */
213
214 static int verbose;
215
216 #if BITS_PER_LONG == 64
217 /**
218  *      i2o_context_list_add -  append an ptr to the context list and return a
219  *                              matching context id.
220  *      @ptr: pointer to add to the context list
221  *      @c: controller to which the context list belong
222  *      returns context id, which could be used in the transaction context
223  *      field.
224  *
225  *      Because the context field in I2O is only 32-bit large, on 64-bit the
226  *      pointer is to large to fit in the context field. The i2o_context_list
227  *      functiones map pointers to context fields.
228  */
229 u32 i2o_context_list_add(void *ptr, struct i2o_controller *c) {
230         u32 context = 1;
231         struct i2o_context_list_element **entry = &c->context_list;
232         struct i2o_context_list_element *element;
233         unsigned long flags;
234
235         spin_lock_irqsave(&c->context_list_lock, flags);
236         while(*entry && ((*entry)->flags & I2O_CONTEXT_LIST_USED)) {
237                 if((*entry)->context >= context)
238                         context = (*entry)->context + 1;
239                 entry = &((*entry)->next);
240         }
241
242         if(!*entry) {
243                 if(unlikely(!context)) {
244                         spin_unlock_irqrestore(&c->context_list_lock, flags);
245                         printk(KERN_EMERG "i2o_core: context list overflow\n");
246                         return 0;
247                 }
248
249                 element = kmalloc(sizeof(struct i2o_context_list_element), GFP_KERNEL);
250                 if(!element) {
251                         printk(KERN_EMERG "i2o_core: could not allocate memory for context list element\n");
252                         return 0;
253                 }
254                 element->context = context;
255                 element->next = NULL;
256                 *entry = element;
257         } else
258                 element = *entry;
259
260         element->ptr = ptr;
261         element->flags = I2O_CONTEXT_LIST_USED;
262
263         spin_unlock_irqrestore(&c->context_list_lock, flags);
264         dprintk(KERN_DEBUG "i2o_core: add context to list %p -> %d\n", ptr, context);
265         return context;
266 }
267
268 /**
269  *      i2o_context_list_remove - remove a ptr from the context list and return
270  *                                the matching context id.
271  *      @ptr: pointer to be removed from the context list
272  *      @c: controller to which the context list belong
273  *      returns context id, which could be used in the transaction context
274  *      field.
275  */
276 u32 i2o_context_list_remove(void *ptr, struct i2o_controller *c) {
277         struct i2o_context_list_element **entry = &c->context_list;
278         struct i2o_context_list_element *element;
279         u32 context;
280         unsigned long flags;
281
282         spin_lock_irqsave(&c->context_list_lock, flags);
283         while(*entry && ((*entry)->ptr != ptr))
284                 entry = &((*entry)->next);
285
286         if(unlikely(!*entry)) {
287                 spin_unlock_irqrestore(&c->context_list_lock, flags);
288                 printk(KERN_WARNING "i2o_core: could not remove nonexistent ptr %p\n", ptr);
289                 return 0;
290         }
291
292         element = *entry;
293
294         context = element->context;
295         element->ptr = NULL;
296         element->flags |= I2O_CONTEXT_LIST_DELETED;
297
298         spin_unlock_irqrestore(&c->context_list_lock, flags);
299         dprintk(KERN_DEBUG "i2o_core: markt as deleted in context list %p -> %d\n", ptr, context);
300         return context;
301 }
302
303 /**
304  *      i2o_context_list_get -  get a ptr from the context list and remove it
305  *                              from the list.
306  *      @context: context id to which the pointer belong
307  *      @c: controller to which the context list belong
308  *      returns pointer to the matching context id
309  */
310 void *i2o_context_list_get(u32 context, struct i2o_controller *c) {
311         struct i2o_context_list_element **entry = &c->context_list;
312         struct i2o_context_list_element *element;
313         void *ptr;
314         int count = 0;
315         unsigned long flags;
316
317         spin_lock_irqsave(&c->context_list_lock, flags);
318         while(*entry && ((*entry)->context != context)) {
319                 entry = &((*entry)->next);
320                 count ++;
321         }
322
323         if(unlikely(!*entry)) {
324                 spin_unlock_irqrestore(&c->context_list_lock, flags);
325                 printk(KERN_WARNING "i2o_core: context id %d not found\n", context);
326                 return NULL;
327         }
328
329         element = *entry;
330         ptr = element->ptr;
331         if(count >= I2O_CONTEXT_LIST_MIN_LENGTH) {
332                 *entry = (*entry)->next;
333                 kfree(element);
334         } else {
335                 element->ptr = NULL;
336                 element->flags &= !I2O_CONTEXT_LIST_USED;
337         }
338
339         spin_unlock_irqrestore(&c->context_list_lock, flags);
340         dprintk(KERN_DEBUG "i2o_core: get ptr from context list %d -> %p\n", context, ptr);
341         return ptr;
342 }
343 #endif
344
345 /*
346  * I2O Core reply handler
347  */
348 static void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c,
349                     struct i2o_message *m)
350 {
351         u32 *msg=(u32 *)m;
352         u32 status;
353         u32 context = msg[2];
354
355         if (msg[0] & MSG_FAIL) // Fail bit is set
356         {
357                 u32 *preserved_msg = (u32*)(c->msg_virt + msg[7]);
358
359                 i2o_report_status(KERN_INFO, "i2o_core", msg);
360                 i2o_dump_message(preserved_msg);
361
362                 /* If the failed request needs special treatment,
363                  * it should be done here. */
364
365                 /* Release the preserved msg by resubmitting it as a NOP */
366
367                 preserved_msg[0] = cpu_to_le32(THREE_WORD_MSG_SIZE | SGL_OFFSET_0);
368                 preserved_msg[1] = cpu_to_le32(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0);
369                 preserved_msg[2] = 0;
370                 i2o_post_message(c, msg[7]);
371
372                 /* If reply to i2o_post_wait failed, return causes a timeout */
373
374                 return;
375         }       
376
377 #ifdef DRIVERDEBUG
378         i2o_report_status(KERN_INFO, "i2o_core", msg);
379 #endif
380
381         if(msg[2]&0x80000000)   // Post wait message
382         {
383                 if (msg[4] >> 24)
384                         status = (msg[4] & 0xFFFF);
385                 else
386                         status = I2O_POST_WAIT_OK;
387         
388                 i2o_post_wait_complete(c, context, status);
389                 return;
390         }
391
392         if(m->function == I2O_CMD_UTIL_EVT_REGISTER)
393         {
394                 memcpy(events[evt_in].msg, msg, (msg[0]>>16)<<2);
395                 events[evt_in].iop = c;
396
397                 spin_lock(&i2o_evt_lock);
398                 MODINC(evt_in, I2O_EVT_Q_LEN);
399                 if(evt_q_len == I2O_EVT_Q_LEN)
400                         MODINC(evt_out, I2O_EVT_Q_LEN);
401                 else
402                         evt_q_len++;
403                 spin_unlock(&i2o_evt_lock);
404
405                 up(&evt_sem);
406                 wake_up_interruptible(&evt_wait);
407                 return;
408         }
409
410         if(m->function == I2O_CMD_LCT_NOTIFY)
411         {
412                 up(&c->lct_sem);
413                 return;
414         }
415
416         /*
417          * If this happens, we want to dump the message to the syslog so
418          * it can be sent back to the card manufacturer by the end user
419          * to aid in debugging.
420          * 
421          */
422         printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
423                         "Message dumped to syslog\n", 
424                         c->name);
425         i2o_dump_message(msg);
426
427         return;
428 }
429
430 /**
431  *      i2o_install_handler - install a message handler
432  *      @h: Handler structure
433  *
434  *      Install an I2O handler - these handle the asynchronous messaging
435  *      from the card once it has initialised. If the table of handlers is
436  *      full then -ENOSPC is returned. On a success 0 is returned and the
437  *      context field is set by the function. The structure is part of the
438  *      system from this time onwards. It must not be freed until it has
439  *      been uninstalled
440  */
441  
442 int i2o_install_handler(struct i2o_handler *h)
443 {
444         int i;
445         down(&i2o_configuration_lock);
446         for(i=0;i<MAX_I2O_MODULES;i++)
447         {
448                 if(i2o_handlers[i]==NULL)
449                 {
450                         h->context = i;
451                         i2o_handlers[i]=h;
452                         up(&i2o_configuration_lock);
453                         return 0;
454                 }
455         }
456         up(&i2o_configuration_lock);
457         return -ENOSPC;
458 }
459
460 /**
461  *      i2o_remove_handler - remove an i2o message handler
462  *      @h: handler
463  *
464  *      Remove a message handler previously installed with i2o_install_handler.
465  *      After this function returns the handler object can be freed or re-used
466  */
467  
468 int i2o_remove_handler(struct i2o_handler *h)
469 {
470         i2o_handlers[h->context]=NULL;
471         return 0;
472 }
473         
474
475 /*
476  *      Each I2O controller has a chain of devices on it.
477  * Each device has a pointer to its LCT entry to be used
478  * for fun purposes.
479  */
480
481 /**
482  *      i2o_install_device      -       attach a device to a controller
483  *      @c: controller
484  *      @d: device
485  *      
486  *      Add a new device to an i2o controller. This can be called from
487  *      non interrupt contexts only. It adds the device and marks it as
488  *      unclaimed. The device memory becomes part of the kernel and must
489  *      be uninstalled before being freed or reused. Zero is returned
490  *      on success.
491  */
492  
493 int i2o_install_device(struct i2o_controller *c, struct i2o_device *d)
494 {
495         int i;
496
497         down(&i2o_configuration_lock);
498         d->controller=c;
499         d->owner=NULL;
500         d->next=c->devices;
501         d->prev=NULL;
502         if (c->devices != NULL)
503                 c->devices->prev=d;
504         c->devices=d;
505         *d->dev_name = 0;
506
507         for(i = 0; i < I2O_MAX_MANAGERS; i++)
508                 d->managers[i] = NULL;
509
510         up(&i2o_configuration_lock);
511         return 0;
512 }
513
514 /* we need this version to call out of i2o_delete_controller */
515
516 int __i2o_delete_device(struct i2o_device *d)
517 {
518         struct i2o_device **p;
519         int i;
520
521         p=&(d->controller->devices);
522
523         /*
524          *      Hey we have a driver!
525          * Check to see if the driver wants us to notify it of 
526          * device deletion. If it doesn't we assume that it
527          * is unsafe to delete a device with an owner and 
528          * fail.
529          */
530         if(d->owner)
531         {
532                 if(d->owner->dev_del_notify)
533                 {
534                         dprintk(KERN_INFO "Device has owner, notifying\n");
535                         d->owner->dev_del_notify(d->controller, d);
536                         if(d->owner)
537                         {
538                                 printk(KERN_WARNING 
539                                         "Driver \"%s\" did not release device!\n", d->owner->name);
540                                 return -EBUSY;
541                         }
542                 }
543                 else
544                         return -EBUSY;
545         }
546
547         /*
548          * Tell any other users who are talking to this device
549          * that it's going away.  We assume that everything works.
550          */
551         for(i=0; i < I2O_MAX_MANAGERS; i++)
552         {
553                 if(d->managers[i] && d->managers[i]->dev_del_notify)
554                         d->managers[i]->dev_del_notify(d->controller, d);
555         }
556                                 
557         while(*p!=NULL)
558         {
559                 if(*p==d)
560                 {
561                         /*
562                          *      Destroy
563                          */
564                         *p=d->next;
565                         kfree(d);
566                         return 0;
567                 }
568                 p=&((*p)->next);
569         }
570         printk(KERN_ERR "i2o_delete_device: passed invalid device.\n");
571         return -EINVAL;
572 }
573
574 /**
575  *      i2o_delete_device       -       remove an i2o device
576  *      @d: device to remove
577  *
578  *      This function unhooks a device from a controller. The device
579  *      will not be unhooked if it has an owner who does not wish to free
580  *      it, or if the owner lacks a dev_del_notify function. In that case
581  *      -EBUSY is returned. On success 0 is returned. Other errors cause
582  *      negative errno values to be returned
583  */
584  
585 int i2o_delete_device(struct i2o_device *d)
586 {
587         int ret;
588
589         down(&i2o_configuration_lock);
590
591         /*
592          *      Seek, locate
593          */
594
595         ret = __i2o_delete_device(d);
596
597         up(&i2o_configuration_lock);
598
599         return ret;
600 }
601
602 /**
603  *      i2o_install_controller  -       attach a controller
604  *      @c: controller
605  *      
606  *      Add a new controller to the i2o layer. This can be called from
607  *      non interrupt contexts only. It adds the controller and marks it as
608  *      unused with no devices. If the tables are full or memory allocations
609  *      fail then a negative errno code is returned. On success zero is
610  *      returned and the controller is bound to the system. The structure
611  *      must not be freed or reused until being uninstalled.
612  */
613  
614 int i2o_install_controller(struct i2o_controller *c)
615 {
616         int i;
617         down(&i2o_configuration_lock);
618         for(i=0;i<MAX_I2O_CONTROLLERS;i++)
619         {
620                 if(i2o_controllers[i]==NULL)
621                 {
622                         c->dlct = (i2o_lct*)pci_alloc_consistent(c->pdev, 8192, &c->dlct_phys);
623                         if(c->dlct==NULL)
624                         {
625                                 up(&i2o_configuration_lock);
626                                 return -ENOMEM;
627                         }
628                         i2o_controllers[i]=c;
629                         c->devices = NULL;
630                         c->next=i2o_controller_chain;
631                         i2o_controller_chain=c;
632                         c->unit = i;
633                         c->page_frame = NULL;
634                         c->hrt = NULL;
635                         c->hrt_len = 0;
636                         c->lct = NULL;
637                         c->status_block = NULL;
638                         sprintf(c->name, "i2o/iop%d", i);
639                         i2o_num_controllers++;
640                         init_MUTEX_LOCKED(&c->lct_sem);
641                         up(&i2o_configuration_lock);
642                         return 0;
643                 }
644         }
645         printk(KERN_ERR "No free i2o controller slots.\n");
646         up(&i2o_configuration_lock);
647         return -EBUSY;
648 }
649
650 /**
651  *      i2o_delete_controller   - delete a controller
652  *      @c: controller
653  *      
654  *      Remove an i2o controller from the system. If the controller or its
655  *      devices are busy then -EBUSY is returned. On a failure a negative
656  *      errno code is returned. On success zero is returned.
657  */
658   
659 int i2o_delete_controller(struct i2o_controller *c)
660 {
661         struct i2o_controller **p;
662         int users;
663         char name[16];
664         int stat;
665
666         dprintk(KERN_INFO "Deleting controller %s\n", c->name);
667
668         /*
669          * Clear event registration as this can cause weird behavior
670          */
671         if(c->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
672                 i2o_event_register(c, core_context, 0, 0, 0);
673
674         down(&i2o_configuration_lock);
675         if((users=atomic_read(&c->users)))
676         {
677                 dprintk(KERN_INFO "I2O: %d users for controller %s\n", users,
678                         c->name);
679                 up(&i2o_configuration_lock);
680                 return -EBUSY;
681         }
682         while(c->devices)
683         {
684                 if(__i2o_delete_device(c->devices)<0)
685                 {
686                         /* Shouldnt happen */
687                         I2O_IRQ_WRITE32(c, 0xFFFFFFFF);
688                         c->enabled = 0;
689                         up(&i2o_configuration_lock);
690                         return -EBUSY;
691                 }
692         }
693
694         /*
695          * If this is shutdown time, the thread's already been killed
696          */
697         if(c->lct_running) {
698                 stat = kill_proc(c->lct_pid, SIGKILL, 1);
699                 if(!stat) {
700                         int count = 10 * 100;
701                         while(c->lct_running && --count) {
702                                 current->state = TASK_INTERRUPTIBLE;
703                                 schedule_timeout(1);
704                         }
705                 
706                         if(!count)
707                                 printk(KERN_ERR 
708                                         "%s: LCT thread still running!\n", 
709                                         c->name);
710                 }
711         }
712
713         p=&i2o_controller_chain;
714
715         while(*p)
716         {
717                 if(*p==c)
718                 {
719                         /* Ask the IOP to switch to RESET state */
720                         i2o_reset_controller(c);
721
722                         /* Release IRQ */
723                         i2o_pci_dispose(c);
724
725                         *p=c->next;
726                         up(&i2o_configuration_lock);
727
728                         if(c->page_frame)
729                         {
730                                 pci_unmap_single(c->pdev, c->page_frame_map, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
731                                 kfree(c->page_frame);
732                         }
733                         if(c->hrt)
734                                 pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys);
735                         if(c->lct)
736                                 pci_free_consistent(c->pdev, c->lct->table_size << 2, c->lct, c->lct_phys);
737                         if(c->status_block)
738                                 pci_free_consistent(c->pdev, sizeof(i2o_status_block), c->status_block, c->status_block_phys);
739                         if(c->dlct)
740                                 pci_free_consistent(c->pdev, 8192, c->dlct, c->dlct_phys);
741
742                         i2o_controllers[c->unit]=NULL;
743                         memcpy(name, c->name, strlen(c->name)+1);
744                         kfree(c);
745                         dprintk(KERN_INFO "%s: Deleted from controller chain.\n", name);
746                         
747                         i2o_num_controllers--;
748                         return 0;
749                 }
750                 p=&((*p)->next);
751         }
752         up(&i2o_configuration_lock);
753         printk(KERN_ERR "i2o_delete_controller: bad pointer!\n");
754         return -ENOENT;
755 }
756
757 /**
758  *      i2o_unlock_controller   -       unlock a controller
759  *      @c: controller to unlock
760  *
761  *      Take a lock on an i2o controller. This prevents it being deleted.
762  *      i2o controllers are not refcounted so a deletion of an in use device
763  *      will fail, not take affect on the last dereference.
764  */
765  
766 void i2o_unlock_controller(struct i2o_controller *c)
767 {
768         atomic_dec(&c->users);
769 }
770
771 /**
772  *      i2o_find_controller - return a locked controller
773  *      @n: controller number
774  *
775  *      Returns a pointer to the controller object. The controller is locked
776  *      on return. NULL is returned if the controller is not found.
777  */
778  
779 struct i2o_controller *i2o_find_controller(int n)
780 {
781         struct i2o_controller *c;
782         
783         if(n<0 || n>=MAX_I2O_CONTROLLERS)
784                 return NULL;
785         
786         down(&i2o_configuration_lock);
787         c=i2o_controllers[n];
788         if(c!=NULL)
789                 atomic_inc(&c->users);
790         up(&i2o_configuration_lock);
791         return c;
792 }
793
794 /**
795  *      i2o_issue_claim - claim or release a device
796  *      @cmd: command
797  *      @c: controller to claim for
798  *      @tid: i2o task id
799  *      @type: type of claim
800  *
801  *      Issue I2O UTIL_CLAIM and UTIL_RELEASE messages. The message to be sent
802  *      is set by cmd. The tid is the task id of the object to claim and the
803  *      type is the claim type (see the i2o standard)
804  *
805  *      Zero is returned on success.
806  */
807  
808 static int i2o_issue_claim(u32 cmd, struct i2o_controller *c, int tid, u32 type)
809 {
810         u32 msg[5];
811
812         msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
813         msg[1] = cmd << 24 | HOST_TID<<12 | tid;
814         msg[3] = 0;
815         msg[4] = type;
816         
817         return i2o_post_wait(c, msg, sizeof(msg), 60);
818 }
819
820 /*
821  *      i2o_claim_device - claim a device for use by an OSM
822  *      @d: device to claim
823  *      @h: handler for this device
824  *
825  *      Do the leg work to assign a device to a given OSM on Linux. The
826  *      kernel updates the internal handler data for the device and then
827  *      performs an I2O claim for the device, attempting to claim the
828  *      device as primary. If the attempt fails a negative errno code
829  *      is returned. On success zero is returned.
830  */
831  
832 int i2o_claim_device(struct i2o_device *d, struct i2o_handler *h)
833 {
834         int ret = 0;
835
836         down(&i2o_configuration_lock);
837         if (d->owner) {
838                 printk(KERN_INFO "Device claim called, but dev already owned by %s!",
839                        h->name);
840                 ret = -EBUSY;
841                 goto out;
842         }
843         d->owner=h;
844
845         if(i2o_issue_claim(I2O_CMD_UTIL_CLAIM ,d->controller,d->lct_data.tid, 
846                            I2O_CLAIM_PRIMARY))
847         {
848                 d->owner = NULL;
849                 ret = -EBUSY;
850         }
851 out:
852         up(&i2o_configuration_lock);
853         return ret;
854 }
855
856 /**
857  *      i2o_release_device - release a device that the OSM is using
858  *      @d: device to claim
859  *      @h: handler for this device
860  *
861  *      Drop a claim by an OSM on a given I2O device. The handler is cleared
862  *      and 0 is returned on success.
863  *
864  *      AC - some devices seem to want to refuse an unclaim until they have
865  *      finished internal processing. It makes sense since you don't want a
866  *      new device to go reconfiguring the entire system until you are done.
867  *      Thus we are prepared to wait briefly.
868  */
869
870 int i2o_release_device(struct i2o_device *d, struct i2o_handler *h)
871 {
872         int err = 0;
873         int tries;
874
875         down(&i2o_configuration_lock);
876         if (d->owner != h) {
877                 printk(KERN_INFO "Claim release called, but not owned by %s!\n",
878                        h->name);
879                 up(&i2o_configuration_lock);
880                 return -ENOENT;
881         }       
882
883         for(tries=0;tries<10;tries++)
884         {
885                 d->owner = NULL;
886
887                 /*
888                  *      If the controller takes a nonblocking approach to
889                  *      releases we have to sleep/poll for a few times.
890                  */
891                  
892                 if((err=i2o_issue_claim(I2O_CMD_UTIL_RELEASE, d->controller, d->lct_data.tid, I2O_CLAIM_PRIMARY)) )
893                 {
894                         err = -ENXIO;
895                         current->state = TASK_UNINTERRUPTIBLE;
896                         schedule_timeout(HZ);
897                 }
898                 else
899                 {
900                         err=0;
901                         break;
902                 }
903         }
904         up(&i2o_configuration_lock);
905         return err;
906 }
907
908 /**
909  *      i2o_device_notify_on    -       Enable deletion notifiers
910  *      @d: device for notification
911  *      @h: handler to install
912  *
913  *      Called by OSMs to let the core know that they want to be
914  *      notified if the given device is deleted from the system.
915  */
916
917 int i2o_device_notify_on(struct i2o_device *d, struct i2o_handler *h)
918 {
919         int i;
920
921         if(d->num_managers == I2O_MAX_MANAGERS)
922                 return -ENOSPC;
923
924         for(i = 0; i < I2O_MAX_MANAGERS; i++)
925         {
926                 if(!d->managers[i])
927                 {
928                         d->managers[i] = h;
929                         break;
930                 }
931         }
932         
933         d->num_managers++;
934         
935         return 0;
936 }
937
938 /**
939  *      i2o_device_notify_off   -       Remove deletion notifiers
940  *      @d: device for notification
941  *      @h: handler to remove
942  *
943  * Called by OSMs to let the core know that they no longer
944  * are interested in the fate of the given device.
945  */
946 int i2o_device_notify_off(struct i2o_device *d, struct i2o_handler *h)
947 {
948         int i;
949
950         for(i=0; i < I2O_MAX_MANAGERS; i++)
951         {
952                 if(d->managers[i] == h)
953                 {
954                         d->managers[i] = NULL;
955                         d->num_managers--;
956                         return 0;
957                 }
958         }
959
960         return -ENOENT;
961 }
962
963 /**
964  *      i2o_event_register      -       register interest in an event
965  *      @c: Controller to register interest with
966  *      @tid: I2O task id
967  *      @init_context: initiator context to use with this notifier
968  *      @tr_context: transaction context to use with this notifier
969  *      @evt_mask: mask of events
970  *
971  *      Create and posts an event registration message to the task. No reply
972  *      is waited for, or expected. Errors in posting will be reported.
973  */
974  
975 int i2o_event_register(struct i2o_controller *c, u32 tid, 
976                 u32 init_context, u32 tr_context, u32 evt_mask)
977 {
978         u32 msg[5];     // Not performance critical, so we just 
979                         // i2o_post_this it instead of building it
980                         // in IOP memory
981         
982         msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
983         msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | tid;
984         msg[2] = init_context;
985         msg[3] = tr_context;
986         msg[4] = evt_mask;
987
988         return i2o_post_this(c, msg, sizeof(msg));
989 }
990
991 /*
992  *      i2o_event_ack   -       acknowledge an event
993  *      @c: controller 
994  *      @msg: pointer to the UTIL_EVENT_REGISTER reply we received
995  *
996  *      We just take a pointer to the original UTIL_EVENT_REGISTER reply
997  *      message and change the function code since that's what spec
998  *      describes an EventAck message looking like.
999  */
1000  
1001 int i2o_event_ack(struct i2o_controller *c, u32 *msg)
1002 {
1003         struct i2o_message *m = (struct i2o_message *)msg;
1004
1005         m->function = I2O_CMD_UTIL_EVT_ACK;
1006
1007         return i2o_post_wait(c, msg, m->size * 4, 2);
1008 }
1009
1010 /*
1011  * Core event handler.  Runs as a separate thread and is woken
1012  * up whenever there is an Executive class event.
1013  */
1014 static int i2o_core_evt(void *reply_data)
1015 {
1016         struct reply_info *reply = (struct reply_info *) reply_data;
1017         u32 *msg = reply->msg;
1018         struct i2o_controller *c = NULL;
1019         unsigned long flags;
1020
1021         daemonize("i2oevtd");
1022         allow_signal(SIGKILL);
1023
1024         evt_running = 1;
1025
1026         while(1)
1027         {
1028                 if(down_interruptible(&evt_sem))
1029                 {
1030                         dprintk(KERN_INFO "I2O event thread dead\n");
1031                         printk("exiting...");
1032                         evt_running = 0;
1033                         complete_and_exit(&evt_dead, 0);
1034                 }
1035
1036                 /* 
1037                  * Copy the data out of the queue so that we don't have to lock
1038                  * around the whole function and just around the qlen update
1039                  */
1040                 spin_lock_irqsave(&i2o_evt_lock, flags);
1041                 memcpy(reply, &events[evt_out], sizeof(struct reply_info));
1042                 MODINC(evt_out, I2O_EVT_Q_LEN);
1043                 evt_q_len--;
1044                 spin_unlock_irqrestore(&i2o_evt_lock, flags);
1045         
1046                 c = reply->iop;
1047                 dprintk(KERN_INFO "I2O IRTOS EVENT: iop%d, event %#10x\n", c->unit, msg[4]);
1048
1049                 /* 
1050                  * We do not attempt to delete/quiesce/etc. the controller if
1051                  * some sort of error indidication occurs.  We may want to do
1052                  * so in the future, but for now we just let the user deal with 
1053                  * it.  One reason for this is that what to do with an error
1054                  * or when to send what ærror is not really agreed on, so
1055                  * we get errors that may not be fatal but just look like they
1056                  * are...so let the user deal with it.
1057                  */
1058                 switch(msg[4])
1059                 {
1060                         case I2O_EVT_IND_EXEC_RESOURCE_LIMITS:
1061                                 printk(KERN_ERR "%s: Out of resources\n", c->name);
1062                                 break;
1063
1064                         case I2O_EVT_IND_EXEC_POWER_FAIL:
1065                                 printk(KERN_ERR "%s: Power failure\n", c->name);
1066                                 break;
1067
1068                         case I2O_EVT_IND_EXEC_HW_FAIL:
1069                         {
1070                                 char *fail[] = 
1071                                         { 
1072                                                 "Unknown Error",
1073                                                 "Power Lost",
1074                                                 "Code Violation",
1075                                                 "Parity Error",
1076                                                 "Code Execution Exception",
1077                                                 "Watchdog Timer Expired" 
1078                                         };
1079
1080                                 if(msg[5] <= 6)
1081                                         printk(KERN_ERR "%s: Hardware Failure: %s\n", 
1082                                                 c->name, fail[msg[5]]);
1083                                 else
1084                                         printk(KERN_ERR "%s: Unknown Hardware Failure\n", c->name);
1085
1086                                 break;
1087                         }
1088
1089                         /*
1090                          * New device created
1091                          * - Create a new i2o_device entry
1092                          * - Inform all interested drivers about this device's existence
1093                          */
1094                         case I2O_EVT_IND_EXEC_NEW_LCT_ENTRY:
1095                         {
1096                                 struct i2o_device *d = (struct i2o_device *)
1097                                         kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1098                                 int i;
1099
1100                                 if (d == NULL) {
1101                                         printk(KERN_EMERG "i2oevtd: out of memory\n");
1102                                         break;
1103                                 }
1104                                 memcpy(&d->lct_data, &msg[5], sizeof(i2o_lct_entry));
1105         
1106                                 d->next = NULL;
1107                                 d->controller = c;
1108                                 d->flags = 0;
1109         
1110                                 i2o_report_controller_unit(c, d);
1111                                 i2o_install_device(c,d);
1112         
1113                                 for(i = 0; i < MAX_I2O_MODULES; i++)
1114                                 {
1115                                         if(i2o_handlers[i] && 
1116                                                 i2o_handlers[i]->new_dev_notify &&
1117                                                 (i2o_handlers[i]->class&d->lct_data.class_id))
1118                                                 {
1119                                                 spin_lock(&i2o_dev_lock);
1120                                                 i2o_handlers[i]->new_dev_notify(c,d);
1121                                                 spin_unlock(&i2o_dev_lock);
1122                                                 }
1123                                 }
1124                         
1125                                 break;
1126                         }
1127         
1128                         /*
1129                          * LCT entry for a device has been modified, so update it
1130                          * internally.
1131                          */
1132                         case I2O_EVT_IND_EXEC_MODIFIED_LCT:
1133                         {
1134                                 struct i2o_device *d;
1135                                 i2o_lct_entry *new_lct = (i2o_lct_entry *)&msg[5];
1136
1137                                 for(d = c->devices; d; d = d->next)
1138                                 {
1139                                         if(d->lct_data.tid == new_lct->tid)
1140                                         {
1141                                                 memcpy(&d->lct_data, new_lct, sizeof(i2o_lct_entry));
1142                                                 break;
1143                                         }
1144                                 }
1145                                 break;
1146                         }
1147         
1148                         case I2O_EVT_IND_CONFIGURATION_FLAG:
1149                                 printk(KERN_WARNING "%s requires user configuration\n", c->name);
1150                                 break;
1151         
1152                         case I2O_EVT_IND_GENERAL_WARNING:
1153                                 printk(KERN_WARNING "%s: Warning notification received!"
1154                                         "Check configuration for errors!\n", c->name);
1155                                 break;
1156                                 
1157                         case I2O_EVT_IND_EVT_MASK_MODIFIED:
1158                                 /* Well I guess that was us hey .. */
1159                                 break;
1160                                         
1161                         default:
1162                                 printk(KERN_WARNING "%s: No handler for event (0x%08x)\n", c->name, msg[4]);
1163                                 break;
1164                 }
1165         }
1166
1167         return 0;
1168 }
1169
1170 /*
1171  * Dynamic LCT update.  This compares the LCT with the currently
1172  * installed devices to check for device deletions..this needed b/c there
1173  * is no DELETED_LCT_ENTRY EventIndicator for the Executive class so
1174  * we can't just have the event handler do this...annoying
1175  *
1176  * This is a hole in the spec that will hopefully be fixed someday.
1177  */
1178 static int i2o_dyn_lct(void *foo)
1179 {
1180         struct i2o_controller *c = (struct i2o_controller *)foo;
1181         struct i2o_device *d = NULL;
1182         struct i2o_device *d1 = NULL;
1183         int i = 0;
1184         int found = 0;
1185         int entries;
1186         void *tmp;
1187
1188         daemonize("iop%d_lctd", c->unit);
1189         allow_signal(SIGKILL);
1190
1191         c->lct_running = 1;
1192
1193         while(1)
1194         {
1195                 down_interruptible(&c->lct_sem);
1196                 if(signal_pending(current))
1197                 {
1198                         dprintk(KERN_ERR "%s: LCT thread dead\n", c->name);
1199                         c->lct_running = 0;
1200                         return 0;
1201                 }
1202
1203                 entries = c->dlct->table_size;
1204                 entries -= 3;
1205                 entries /= 9;
1206
1207                 dprintk(KERN_INFO "%s: Dynamic LCT Update\n",c->name);
1208                 dprintk(KERN_INFO "%s: Dynamic LCT contains %d entries\n", c->name, entries);
1209
1210                 if(!entries)
1211                 {
1212                         printk(KERN_INFO "%s: Empty LCT???\n", c->name);
1213                         continue;
1214                 }
1215
1216                 /*
1217                  * Loop through all the devices on the IOP looking for their
1218                  * LCT data in the LCT.  We assume that TIDs are not repeated.
1219                  * as that is the only way to really tell.  It's been confirmed
1220                  * by the IRTOS vendor(s?) that TIDs are not reused until they 
1221                  * wrap arround(4096), and I doubt a system will up long enough
1222                  * to create/delete that many devices.
1223                  */
1224                 for(d = c->devices; d; )
1225                 {
1226                         found = 0;
1227                         d1 = d->next;
1228                         
1229                         for(i = 0; i < entries; i++) 
1230                         { 
1231                                 if(d->lct_data.tid == c->dlct->lct_entry[i].tid) 
1232                                 { 
1233                                         found = 1; 
1234                                         break; 
1235                                 } 
1236                         } 
1237                         if(!found) 
1238                         {
1239                                 dprintk(KERN_INFO "i2o_core: Deleted device!\n"); 
1240                                 spin_lock(&i2o_dev_lock);
1241                                 i2o_delete_device(d); 
1242                                 spin_unlock(&i2o_dev_lock);
1243                         } 
1244                         d = d1; 
1245                 }
1246
1247                 /* 
1248                  * Tell LCT to renotify us next time there is a change
1249                  */
1250                 i2o_lct_notify(c);
1251
1252                 /*
1253                  * Copy new LCT into public LCT
1254                  *
1255                  * Possible race if someone is reading LCT while  we are copying 
1256                  * over it. If this happens, we'll fix it then. but I doubt that
1257                  * the LCT will get updated often enough or will get read by
1258                  * a user often enough to worry.
1259                  */
1260                 if(c->lct->table_size < c->dlct->table_size)
1261                 {
1262                         dma_addr_t phys;
1263                         tmp = c->lct;
1264                         c->lct = pci_alloc_consistent(c->pdev, c->dlct->table_size<<2, &phys);
1265                         if(!c->lct)
1266                         {
1267                                 printk(KERN_ERR "%s: No memory for LCT!\n", c->name);
1268                                 c->lct = tmp;
1269                                 continue;
1270                         }
1271                         pci_free_consistent(tmp, c->lct->table_size << 2, c->lct, c->lct_phys);
1272                         c->lct_phys = phys;
1273                 }
1274                 memcpy(c->lct, c->dlct, c->dlct->table_size<<2);
1275         }
1276
1277         return 0;
1278 }
1279
1280 /**
1281  *      i2o_run_queue   -       process pending events on a controller
1282  *      @c: controller to process
1283  *
1284  *      This is called by the bus specific driver layer when an interrupt
1285  *      or poll of this card interface is desired.
1286  */
1287  
1288 void i2o_run_queue(struct i2o_controller *c)
1289 {
1290         struct i2o_message *m;
1291         u32 mv;
1292         u32 *msg;
1293
1294         /*
1295          * Old 960 steppings had a bug in the I2O unit that caused
1296          * the queue to appear empty when it wasn't.
1297          */
1298         if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1299                 mv=I2O_REPLY_READ32(c);
1300
1301         while(mv!=0xFFFFFFFF)
1302         {
1303                 struct i2o_handler *i;
1304                 /* Map the message from the page frame map to kernel virtual */
1305                 /* m=(struct i2o_message *)(mv - (unsigned long)c->page_frame_map + (unsigned long)c->page_frame); */
1306                 m=(struct i2o_message *)bus_to_virt(mv);
1307                 msg=(u32*)m;
1308
1309                 /*
1310                  *      Ensure this message is seen coherently but cachably by
1311                  *      the processor 
1312                  */
1313
1314                 pci_dma_sync_single_for_cpu(c->pdev, c->page_frame_map, MSG_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1315         
1316                 /*
1317                  *      Despatch it
1318                  */
1319
1320                 i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)];
1321                 if(i && i->reply)
1322                         i->reply(i,c,m);
1323                 else
1324                 {
1325                         printk(KERN_WARNING "I2O: Spurious reply to handler %d\n", 
1326                                 m->initiator_context&(MAX_I2O_MODULES-1));
1327                 }       
1328                 i2o_flush_reply(c,mv);
1329                 mb();
1330
1331                 /* That 960 bug again... */     
1332                 if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1333                         mv=I2O_REPLY_READ32(c);
1334         }               
1335 }
1336
1337
1338 /**
1339  *      i2o_get_class_name -    do i2o class name lookup
1340  *      @class: class number
1341  *
1342  *      Return a descriptive string for an i2o class
1343  */
1344  
1345 const char *i2o_get_class_name(int class)
1346 {
1347         int idx = 16;
1348         static char *i2o_class_name[] = {
1349                 "Executive",
1350                 "Device Driver Module",
1351                 "Block Device",
1352                 "Tape Device",
1353                 "LAN Interface",
1354                 "WAN Interface",
1355                 "Fibre Channel Port",
1356                 "Fibre Channel Device",
1357                 "SCSI Device",
1358                 "ATE Port",
1359                 "ATE Device",
1360                 "Floppy Controller",
1361                 "Floppy Device",
1362                 "Secondary Bus Port",
1363                 "Peer Transport Agent",
1364                 "Peer Transport",
1365                 "Unknown"
1366         };
1367         
1368         switch(class&0xFFF)
1369         {
1370                 case I2O_CLASS_EXECUTIVE:
1371                         idx = 0; break;
1372                 case I2O_CLASS_DDM:
1373                         idx = 1; break;
1374                 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1375                         idx = 2; break;
1376                 case I2O_CLASS_SEQUENTIAL_STORAGE:
1377                         idx = 3; break;
1378                 case I2O_CLASS_LAN:
1379                         idx = 4; break;
1380                 case I2O_CLASS_WAN:
1381                         idx = 5; break;
1382                 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1383                         idx = 6; break;
1384                 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1385                         idx = 7; break;
1386                 case I2O_CLASS_SCSI_PERIPHERAL:
1387                         idx = 8; break;
1388                 case I2O_CLASS_ATE_PORT:
1389                         idx = 9; break;
1390                 case I2O_CLASS_ATE_PERIPHERAL:
1391                         idx = 10; break;
1392                 case I2O_CLASS_FLOPPY_CONTROLLER:
1393                         idx = 11; break;
1394                 case I2O_CLASS_FLOPPY_DEVICE:
1395                         idx = 12; break;
1396                 case I2O_CLASS_BUS_ADAPTER_PORT:
1397                         idx = 13; break;
1398                 case I2O_CLASS_PEER_TRANSPORT_AGENT:
1399                         idx = 14; break;
1400                 case I2O_CLASS_PEER_TRANSPORT:
1401                         idx = 15; break;
1402         }
1403
1404         return i2o_class_name[idx];
1405 }
1406
1407
1408 /**
1409  *      i2o_wait_message        -       obtain an i2o message from the IOP
1410  *      @c: controller
1411  *      @why: explanation 
1412  *
1413  *      This function waits up to 5 seconds for a message slot to be
1414  *      available. If no message is available it prints an error message
1415  *      that is expected to be what the message will be used for (eg
1416  *      "get_status"). 0xFFFFFFFF is returned on a failure.
1417  *
1418  *      On a success the message is returned. This is the physical page
1419  *      frame offset address from the read port. (See the i2o spec)
1420  */
1421  
1422 u32 i2o_wait_message(struct i2o_controller *c, char *why)
1423 {
1424         long time=jiffies;
1425         u32 m;
1426         while((m=I2O_POST_READ32(c))==0xFFFFFFFF)
1427         {
1428                 if((jiffies-time)>=5*HZ)
1429                 {
1430                         dprintk(KERN_ERR "%s: Timeout waiting for message frame to send %s.\n", 
1431                                 c->name, why);
1432                         return 0xFFFFFFFF;
1433                 }
1434                 schedule();
1435                 barrier();
1436         }
1437         return m;
1438 }
1439         
1440 /**
1441  *      i2o_report_controller_unit - print information about a tid
1442  *      @c: controller
1443  *      @d: device
1444  *      
1445  *      Dump an information block associated with a given unit (TID). The
1446  *      tables are read and a block of text is output to printk that is
1447  *      formatted intended for the user.
1448  */
1449  
1450 void i2o_report_controller_unit(struct i2o_controller *c, struct i2o_device *d)
1451 {
1452         char buf[64];
1453         char str[22];
1454         int ret;
1455         int unit = d->lct_data.tid;
1456
1457         if(verbose==0)
1458                 return;
1459                 
1460         printk(KERN_INFO "Target ID %d.\n", unit);
1461         if((ret=i2o_query_scalar(c, unit, 0xF100, 3, buf, 16))>=0)
1462         {
1463                 buf[16]=0;
1464                 printk(KERN_INFO "     Vendor: %s\n", buf);
1465         }
1466         if((ret=i2o_query_scalar(c, unit, 0xF100, 4, buf, 16))>=0)
1467         {
1468                 buf[16]=0;
1469                 printk(KERN_INFO "     Device: %s\n", buf);
1470         }
1471         if(i2o_query_scalar(c, unit, 0xF100, 5, buf, 16)>=0)
1472         {
1473                 buf[16]=0;
1474                 printk(KERN_INFO "     Description: %s\n", buf);
1475         }
1476         if((ret=i2o_query_scalar(c, unit, 0xF100, 6, buf, 8))>=0)
1477         {
1478                 buf[8]=0;
1479                 printk(KERN_INFO "        Rev: %s\n", buf);
1480         }
1481
1482         printk(KERN_INFO "    Class: ");
1483         sprintf(str, "%-21s", i2o_get_class_name(d->lct_data.class_id));
1484         printk("%s\n", str);
1485                 
1486         printk(KERN_INFO "  Subclass: 0x%04X\n", d->lct_data.sub_class);
1487         printk(KERN_INFO "     Flags: ");
1488                 
1489         if(d->lct_data.device_flags&(1<<0))
1490                 printk("C");            // ConfigDialog requested
1491         if(d->lct_data.device_flags&(1<<1))
1492                 printk("U");            // Multi-user capable
1493         if(!(d->lct_data.device_flags&(1<<4)))
1494                 printk("P");            // Peer service enabled!
1495         if(!(d->lct_data.device_flags&(1<<5)))
1496                 printk("M");            // Mgmt service enabled!
1497         printk("\n");
1498                         
1499 }
1500
1501
1502 /*
1503  *      Parse the hardware resource table. Right now we print it out
1504  *      and don't do a lot with it. We should collate these and then
1505  *      interact with the Linux resource allocation block.
1506  *
1507  *      Lets prove we can read it first eh ?
1508  *
1509  *      This is full of endianisms!
1510  */
1511  
1512 static int i2o_parse_hrt(struct i2o_controller *c)
1513 {
1514 #ifdef DRIVERDEBUG
1515         u32 *rows=(u32*)c->hrt;
1516         u8 *p=(u8 *)c->hrt;
1517         u8 *d;
1518         int count;
1519         int length;
1520         int i;
1521         int state;
1522         
1523         if(p[3]!=0)
1524         {
1525                 printk(KERN_ERR "%s: HRT table for controller is too new a version.\n",
1526                         c->name);
1527                 return -1;
1528         }
1529                 
1530         count=p[0]|(p[1]<<8);
1531         length = p[2];
1532         
1533         printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n",
1534                 c->name, count, length<<2);
1535
1536         rows+=2;
1537         
1538         for(i=0;i<count;i++)
1539         {
1540                 printk(KERN_INFO "Adapter %08X: ", rows[0]);
1541                 p=(u8 *)(rows+1);
1542                 d=(u8 *)(rows+2);
1543                 state=p[1]<<8|p[0];
1544                 
1545                 printk("TID %04X:[", state&0xFFF);
1546                 state>>=12;
1547                 if(state&(1<<0))
1548                         printk("H");            /* Hidden */
1549                 if(state&(1<<2))
1550                 {
1551                         printk("P");            /* Present */
1552                         if(state&(1<<1))
1553                                 printk("C");    /* Controlled */
1554                 }
1555                 if(state>9)
1556                         printk("*");            /* Hard */
1557                 
1558                 printk("]:");
1559                 
1560                 switch(p[3]&0xFFFF)
1561                 {
1562                         case 0:
1563                                 /* Adapter private bus - easy */
1564                                 printk("Local bus %d: I/O at 0x%04X Mem 0x%08X", 
1565                                         p[2], d[1]<<8|d[0], *(u32 *)(d+4));
1566                                 break;
1567                         case 1:
1568                                 /* ISA bus */
1569                                 printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X",
1570                                         p[2], d[2], d[1]<<8|d[0], *(u32 *)(d+4));
1571                                 break;
1572                                         
1573                         case 2: /* EISA bus */
1574                                 printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1575                                         p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1576                                 break;
1577
1578                         case 3: /* MCA bus */
1579                                 printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1580                                         p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1581                                 break;
1582
1583                         case 4: /* PCI bus */
1584                                 printk("PCI %d: Bus %d Device %d Function %d",
1585                                         p[2], d[2], d[1], d[0]);
1586                                 break;
1587
1588                         case 0x80: /* Other */
1589                         default:
1590                                 printk("Unsupported bus type.");
1591                                 break;
1592                 }
1593                 printk("\n");
1594                 rows+=length;
1595         }
1596 #endif
1597         return 0;
1598 }
1599         
1600 /*
1601  *      The logical configuration table tells us what we can talk to
1602  *      on the board. Most of the stuff isn't interesting to us. 
1603  */
1604
1605 static int i2o_parse_lct(struct i2o_controller *c)
1606 {
1607         int i;
1608         int max;
1609         int tid;
1610         struct i2o_device *d;
1611         i2o_lct *lct = c->lct;
1612
1613         if (lct == NULL) {
1614                 printk(KERN_ERR "%s: LCT is empty???\n", c->name);
1615                 return -1;
1616         }
1617
1618         max = lct->table_size;
1619         max -= 3;
1620         max /= 9;
1621         
1622         printk(KERN_INFO "%s: LCT has %d entries.\n", c->name, max);
1623         
1624         if(lct->iop_flags&(1<<0))
1625                 printk(KERN_WARNING "%s: Configuration dialog desired.\n", c->name);
1626                 
1627         for(i=0;i<max;i++)
1628         {
1629                 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1630                 if(d==NULL)
1631                 {
1632                         printk(KERN_CRIT "i2o_core: Out of memory for I2O device data.\n");
1633                         return -ENOMEM;
1634                 }
1635                 
1636                 d->controller = c;
1637                 d->next = NULL;
1638
1639                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1640
1641                 d->flags = 0;
1642                 tid = d->lct_data.tid;
1643                 
1644                 i2o_report_controller_unit(c, d);
1645                 
1646                 i2o_install_device(c, d);
1647         }
1648         return 0;
1649 }
1650
1651
1652 /**
1653  *      i2o_quiesce_controller - quiesce controller
1654  *      @c: controller 
1655  *
1656  *      Quiesce an IOP. Causes IOP to make external operation quiescent
1657  *      (i2o 'READY' state). Internal operation of the IOP continues normally.
1658  */
1659  
1660 int i2o_quiesce_controller(struct i2o_controller *c)
1661 {
1662         u32 msg[4];
1663         int ret;
1664
1665         i2o_status_get(c);
1666
1667         /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
1668
1669         if ((c->status_block->iop_state != ADAPTER_STATE_READY) &&
1670                 (c->status_block->iop_state != ADAPTER_STATE_OPERATIONAL))
1671         {
1672                 return 0;
1673         }
1674
1675         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1676         msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
1677         msg[3] = 0;
1678
1679         /* Long timeout needed for quiesce if lots of devices */
1680
1681         if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1682                 printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
1683                         c->name, -ret);
1684         else
1685                 dprintk(KERN_INFO "%s: Quiesced.\n", c->name);
1686
1687         i2o_status_get(c); // Entered READY state
1688         return ret;
1689 }
1690
1691 /**
1692  *      i2o_enable_controller - move controller from ready to operational
1693  *      @c: controller
1694  *
1695  *      Enable IOP. This allows the IOP to resume external operations and
1696  *      reverses the effect of a quiesce. In the event of an error a negative
1697  *      errno code is returned.
1698  */
1699  
1700 int i2o_enable_controller(struct i2o_controller *c)
1701 {
1702         u32 msg[4];
1703         int ret;
1704
1705         i2o_status_get(c);
1706         
1707         /* Enable only allowed on READY state */        
1708         if(c->status_block->iop_state != ADAPTER_STATE_READY)
1709                 return -EINVAL;
1710
1711         msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1712         msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
1713
1714         /* How long of a timeout do we need? */
1715
1716         if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1717                 printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
1718                         c->name, -ret);
1719         else
1720                 dprintk(KERN_INFO "%s: Enabled.\n", c->name);
1721
1722         i2o_status_get(c); // entered OPERATIONAL state
1723
1724         return ret;
1725 }
1726
1727 /**
1728  *      i2o_clear_controller    -       clear a controller
1729  *      @c: controller
1730  *
1731  *      Clear an IOP to HOLD state, ie. terminate external operations, clear all
1732  *      input queues and prepare for a system restart. IOP's internal operation
1733  *      continues normally and the outbound queue is alive.
1734  *      The IOP is not expected to rebuild its LCT.
1735  */
1736  
1737 int i2o_clear_controller(struct i2o_controller *c)
1738 {
1739         struct i2o_controller *iop;
1740         u32 msg[4];
1741         int ret;
1742
1743         /* Quiesce all IOPs first */
1744
1745         for (iop = i2o_controller_chain; iop; iop = iop->next)
1746                 i2o_quiesce_controller(iop);
1747
1748         msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1749         msg[1]=I2O_CMD_ADAPTER_CLEAR<<24|HOST_TID<<12|ADAPTER_TID;
1750         msg[3]=0;
1751
1752         if ((ret=i2o_post_wait(c, msg, sizeof(msg), 30)))
1753                 printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
1754                         c->name, -ret);
1755         else
1756                 dprintk(KERN_INFO "%s: Cleared.\n",c->name);
1757
1758         i2o_status_get(c);
1759
1760         /* Enable other IOPs */
1761
1762         for (iop = i2o_controller_chain; iop; iop = iop->next)
1763                 if (iop != c)
1764                         i2o_enable_controller(iop);
1765
1766         return ret;
1767 }
1768
1769
1770 /**
1771  *      i2o_reset_controller    -       reset an IOP
1772  *      @c: controller to reset
1773  *
1774  *      Reset the IOP into INIT state and wait until IOP gets into RESET state.
1775  *      Terminate all external operations, clear IOP's inbound and outbound
1776  *      queues, terminate all DDMs, and reload the IOP's operating environment
1777  *      and all local DDMs. The IOP rebuilds its LCT.
1778  */
1779  
1780 static int i2o_reset_controller(struct i2o_controller *c)
1781 {
1782         struct i2o_controller *iop;
1783         u32 m;
1784         u8 *status;
1785         dma_addr_t status_phys;
1786         u32 *msg;
1787         long time;
1788
1789         /* Quiesce all IOPs first */
1790
1791         for (iop = i2o_controller_chain; iop; iop = iop->next)
1792         {
1793                 if(!iop->dpt)
1794                         i2o_quiesce_controller(iop);
1795         }
1796
1797         m=i2o_wait_message(c, "AdapterReset");
1798         if(m==0xFFFFFFFF)       
1799                 return -ETIMEDOUT;
1800         msg=(u32 *)(c->msg_virt+m);
1801         
1802         status = pci_alloc_consistent(c->pdev, 4, &status_phys);
1803         if(status == NULL) {
1804                 printk(KERN_ERR "IOP reset failed - no free memory.\n");
1805                 return -ENOMEM;
1806         }
1807         memset(status, 0, 4);
1808         
1809         msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1810         msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1811         msg[2]=core_context;
1812         msg[3]=0;
1813         msg[4]=0;
1814         msg[5]=0;
1815         msg[6]=status_phys;
1816         msg[7]=0;       /* 64bit host FIXME */
1817
1818         i2o_post_message(c,m);
1819
1820         /* Wait for a reply */
1821         time=jiffies;
1822         while(*status==0)
1823         {
1824                 if((jiffies-time)>=20*HZ)
1825                 {
1826                         printk(KERN_ERR "IOP reset timeout.\n");
1827                         /* The controller still may respond and overwrite
1828                          * status_phys, LEAK it to prevent memory corruption.
1829                          */
1830                         return -ETIMEDOUT;
1831                 }
1832                 schedule();
1833                 barrier();
1834         }
1835
1836         if (*status==I2O_CMD_IN_PROGRESS)
1837         { 
1838                 /* 
1839                  * Once the reset is sent, the IOP goes into the INIT state 
1840                  * which is indeterminate.  We need to wait until the IOP 
1841                  * has rebooted before we can let the system talk to 
1842                  * it. We read the inbound Free_List until a message is 
1843                  * available.  If we can't read one in the given ammount of 
1844                  * time, we assume the IOP could not reboot properly.  
1845                  */ 
1846
1847                 dprintk(KERN_INFO "%s: Reset in progress, waiting for reboot...\n",
1848                         c->name); 
1849
1850                 time = jiffies; 
1851                 m = I2O_POST_READ32(c); 
1852                 while(m == 0XFFFFFFFF) 
1853                 { 
1854                         if((jiffies-time) >= 30*HZ)
1855                         {
1856                                 printk(KERN_ERR "%s: Timeout waiting for IOP reset.\n", 
1857                                                 c->name); 
1858                                 /* The controller still may respond and
1859                                  * overwrite status_phys, LEAK it to prevent
1860                                  * memory corruption.
1861                                  */
1862                                 return -ETIMEDOUT; 
1863                         } 
1864                         schedule(); 
1865                         barrier(); 
1866                         m = I2O_POST_READ32(c); 
1867                 }
1868                 i2o_flush_reply(c,m);
1869         }
1870
1871         /* If IopReset was rejected or didn't perform reset, try IopClear */
1872
1873         i2o_status_get(c);
1874         if (status[0] == I2O_CMD_REJECTED || 
1875                 c->status_block->iop_state != ADAPTER_STATE_RESET)
1876         {
1877                 printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",c->name);
1878                 i2o_clear_controller(c);
1879         }
1880         else
1881                 dprintk(KERN_INFO "%s: Reset completed.\n", c->name);
1882
1883         /* Enable other IOPs */
1884
1885         for (iop = i2o_controller_chain; iop; iop = iop->next)
1886                 if (iop != c)
1887                         i2o_enable_controller(iop);
1888
1889         pci_free_consistent(c->pdev, 4, status, status_phys);
1890         return 0;
1891 }
1892
1893
1894 /**
1895  *      i2o_status_get  -       get the status block for the IOP
1896  *      @c: controller
1897  *
1898  *      Issue a status query on the controller. This updates the
1899  *      attached status_block. If the controller fails to reply or an
1900  *      error occurs then a negative errno code is returned. On success
1901  *      zero is returned and the status_blok is updated.
1902  */
1903  
1904 int i2o_status_get(struct i2o_controller *c)
1905 {
1906         long time;
1907         u32 m;
1908         u32 *msg;
1909         u8 *status_block;
1910
1911         if (c->status_block == NULL) 
1912         {
1913                 c->status_block = (i2o_status_block *)
1914                         pci_alloc_consistent(c->pdev, sizeof(i2o_status_block), &c->status_block_phys);
1915                 if (c->status_block == NULL)
1916                 {
1917                         printk(KERN_CRIT "%s: Get Status Block failed; Out of memory.\n",
1918                                 c->name);
1919                         return -ENOMEM;
1920                 }
1921         }
1922
1923         status_block = (u8*)c->status_block;
1924         memset(c->status_block,0,sizeof(i2o_status_block));
1925         
1926         m=i2o_wait_message(c, "StatusGet");
1927         if(m==0xFFFFFFFF)
1928                 return -ETIMEDOUT;      
1929         msg=(u32 *)(c->msg_virt+m);
1930
1931         msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_0;
1932         msg[1]=I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID;
1933         msg[2]=core_context;
1934         msg[3]=0;
1935         msg[4]=0;
1936         msg[5]=0;
1937         msg[6]=c->status_block_phys;
1938         msg[7]=0;   /* 64bit host FIXME */
1939         msg[8]=sizeof(i2o_status_block); /* always 88 bytes */
1940
1941         i2o_post_message(c,m);
1942
1943         /* Wait for a reply */
1944
1945         time=jiffies;
1946         while(status_block[87]!=0xFF)
1947         {
1948                 if((jiffies-time)>=5*HZ)
1949                 {
1950                         printk(KERN_ERR "%s: Get status timeout.\n",c->name);
1951                         return -ETIMEDOUT;
1952                 }
1953                 yield();
1954                 barrier();
1955         }
1956
1957 #ifdef DRIVERDEBUG
1958         printk(KERN_INFO "%s: State = ", c->name);
1959         switch (c->status_block->iop_state) {
1960                 case 0x01:  
1961                         printk("INIT\n");
1962                         break;
1963                 case 0x02:
1964                         printk("RESET\n");
1965                         break;
1966                 case 0x04:
1967                         printk("HOLD\n");
1968                         break;
1969                 case 0x05:
1970                         printk("READY\n");
1971                         break;
1972                 case 0x08:
1973                         printk("OPERATIONAL\n");
1974                         break;
1975                 case 0x10:
1976                         printk("FAILED\n");
1977                         break;
1978                 case 0x11:
1979                         printk("FAULTED\n");
1980                         break;
1981                 default: 
1982                         printk("%x (unknown !!)\n",c->status_block->iop_state);
1983 }     
1984 #endif   
1985
1986         return 0;
1987 }
1988
1989 /*
1990  * Get the Hardware Resource Table for the device.
1991  * The HRT contains information about possible hidden devices
1992  * but is mostly useless to us 
1993  */
1994 int i2o_hrt_get(struct i2o_controller *c)
1995 {
1996         u32 msg[6];
1997         int ret, size = sizeof(i2o_hrt);
1998         int loops = 3;  /* we only try 3 times to get the HRT, this should be
1999                            more then enough. Worst case should be 2 times.*/
2000
2001         /* First read just the header to figure out the real size */
2002
2003         do  {
2004                 /* first we allocate the memory for the HRT */
2005                 if (c->hrt == NULL) {
2006                         c->hrt=pci_alloc_consistent(c->pdev, size, &c->hrt_phys);
2007                         if (c->hrt == NULL) {
2008                                 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", c->name);
2009                                 return -ENOMEM;
2010                         }
2011                         c->hrt_len = size;
2012                 }
2013
2014                 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
2015                 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
2016                 msg[3]= 0;
2017                 msg[4]= (0xD0000000 | c->hrt_len);      /* Simple transaction */
2018                 msg[5]= c->hrt_phys;            /* Dump it here */
2019
2020                 ret = i2o_post_wait_mem(c, msg, sizeof(msg), 20, c->hrt, NULL, c->hrt_phys, 0, c->hrt_len, 0);
2021                 
2022                 if(ret == -ETIMEDOUT)
2023                 {
2024                         /* The HRT block we used is in limbo somewhere. When the iop wakes up
2025                            we will recover it */
2026                         c->hrt = NULL;
2027                         c->hrt_len = 0;
2028                         return ret;
2029                 }
2030                 
2031                 if(ret<0)
2032                 {
2033                         printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
2034                                 c->name, -ret); 
2035                         return ret;
2036                 }
2037
2038                 if (c->hrt->num_entries * c->hrt->entry_len << 2 > c->hrt_len) {
2039                         size = c->hrt->num_entries * c->hrt->entry_len << 2;
2040                         pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys);
2041                         c->hrt_len = 0;
2042                         c->hrt = NULL;
2043                 }
2044                 loops --;
2045         } while (c->hrt == NULL && loops > 0);
2046
2047         if(c->hrt == NULL)
2048         {
2049                 printk(KERN_ERR "%s: Unable to get HRT after three tries, giving up\n", c->name);
2050                 return -1;
2051         }
2052
2053         i2o_parse_hrt(c); // just for debugging
2054
2055         return 0;
2056 }
2057
2058 /*
2059  * Send the I2O System Table to the specified IOP
2060  *
2061  * The system table contains information about all the IOPs in the
2062  * system.  It is build and then sent to each IOP so that IOPs can
2063  * establish connections between each other.
2064  *
2065  */
2066 static int i2o_systab_send(struct i2o_controller *iop)
2067 {
2068         u32 msg[12];
2069         dma_addr_t sys_tbl_phys;
2070         int ret;
2071         struct resource *root;
2072         u32 *privbuf = kmalloc(16, GFP_KERNEL);
2073         if(privbuf == NULL)
2074                 return -ENOMEM;
2075         
2076                 
2077         if(iop->status_block->current_mem_size < iop->status_block->desired_mem_size)
2078         {
2079                 struct resource *res = &iop->mem_resource;
2080                 res->name = iop->pdev->bus->name;
2081                 res->flags = IORESOURCE_MEM;
2082                 res->start = 0;
2083                 res->end = 0;
2084                 printk("%s: requires private memory resources.\n", iop->name);
2085                 root = pci_find_parent_resource(iop->pdev, res);
2086                 if(root==NULL)
2087                         printk("Can't find parent resource!\n");
2088                 if(root && allocate_resource(root, res, 
2089                                 iop->status_block->desired_mem_size,
2090                                 iop->status_block->desired_mem_size,
2091                                 iop->status_block->desired_mem_size,
2092                                 1<<20,  /* Unspecified, so use 1Mb and play safe */
2093                                 NULL,
2094                                 NULL)>=0)
2095                 {
2096                         iop->mem_alloc = 1;
2097                         iop->status_block->current_mem_size = 1 + res->end - res->start;
2098                         iop->status_block->current_mem_base = res->start;
2099                         printk(KERN_INFO "%s: allocated %ld bytes of PCI memory at 0x%08lX.\n", 
2100                                 iop->name, 1+res->end-res->start, res->start);
2101                 }
2102         }
2103         if(iop->status_block->current_io_size < iop->status_block->desired_io_size)
2104         {
2105                 struct resource *res = &iop->io_resource;
2106                 res->name = iop->pdev->bus->name;
2107                 res->flags = IORESOURCE_IO;
2108                 res->start = 0;
2109                 res->end = 0;
2110                 printk("%s: requires private memory resources.\n", iop->name);
2111                 root = pci_find_parent_resource(iop->pdev, res);
2112                 if(root==NULL)
2113                         printk("Can't find parent resource!\n");
2114                 if(root &&  allocate_resource(root, res, 
2115                                 iop->status_block->desired_io_size,
2116                                 iop->status_block->desired_io_size,
2117                                 iop->status_block->desired_io_size,
2118                                 1<<20,  /* Unspecified, so use 1Mb and play safe */
2119                                 NULL,
2120                                 NULL)>=0)
2121                 {
2122                         iop->io_alloc = 1;
2123                         iop->status_block->current_io_size = 1 + res->end - res->start;
2124                         iop->status_block->current_mem_base = res->start;
2125                         printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at 0x%08lX.\n", 
2126                                 iop->name, 1+res->end-res->start, res->start);
2127                 }
2128         }
2129         else
2130         {       
2131                 privbuf[0] = iop->status_block->current_mem_base;
2132                 privbuf[1] = iop->status_block->current_mem_size;
2133                 privbuf[2] = iop->status_block->current_io_base;
2134                 privbuf[3] = iop->status_block->current_io_size;
2135         }
2136
2137         msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
2138         msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
2139         msg[3] = 0;
2140         msg[4] = (0<<16) | ((iop->unit+2) );      /* Host 0 IOP ID (unit + 2) */
2141         msg[5] = 0;                               /* Segment 0 */
2142
2143         /* 
2144          * Provide three SGL-elements:
2145          * System table (SysTab), Private memory space declaration and 
2146          * Private i/o space declaration  
2147          * 
2148          * Nasty one here. We can't use pci_alloc_consistent to send the
2149          * same table to everyone. We have to go remap it for them all
2150          */
2151          
2152         sys_tbl_phys = pci_map_single(iop->pdev, sys_tbl, sys_tbl_len, PCI_DMA_TODEVICE);
2153         msg[6] = 0x54000000 | sys_tbl_phys;
2154
2155         msg[7] = sys_tbl_phys;
2156         msg[8] = 0x54000000 | privbuf[1];
2157         msg[9] = privbuf[0];
2158         msg[10] = 0xD4000000 | privbuf[3];
2159         msg[11] = privbuf[2];
2160
2161         ret=i2o_post_wait(iop, msg, sizeof(msg), 120);
2162
2163         pci_unmap_single(iop->pdev, sys_tbl_phys, sys_tbl_len, PCI_DMA_TODEVICE);
2164         
2165         if(ret==-ETIMEDOUT)
2166         {
2167                 printk(KERN_ERR "%s: SysTab setup timed out.\n", iop->name);
2168         }
2169         else if(ret<0)
2170         {
2171                 printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n", 
2172                         iop->name, -ret);
2173         }
2174         else
2175         {
2176                 dprintk(KERN_INFO "%s: SysTab set.\n", iop->name);
2177         }
2178         i2o_status_get(iop); // Entered READY state
2179
2180         kfree(privbuf);
2181         return ret;     
2182
2183  }
2184
2185 /*
2186  * Initialize I2O subsystem.
2187  */
2188 void __init i2o_sys_init(void)
2189 {
2190         struct i2o_controller *iop, *niop = NULL;
2191
2192         printk(KERN_INFO "Activating I2O controllers...\n");
2193         printk(KERN_INFO "This may take a few minutes if there are many devices\n");
2194         
2195         /* In INIT state, Activate IOPs */
2196         for (iop = i2o_controller_chain; iop; iop = niop) {
2197                 dprintk(KERN_INFO "Calling i2o_activate_controller for %s...\n", 
2198                         iop->name);
2199                 niop = iop->next;
2200                 if (i2o_activate_controller(iop) < 0)
2201                         i2o_delete_controller(iop);
2202         }
2203
2204         /* Active IOPs in HOLD state */
2205
2206 rebuild_sys_tab:
2207         if (i2o_controller_chain == NULL)
2208                 return;
2209
2210         /*
2211          * If build_sys_table fails, we kill everything and bail
2212          * as we can't init the IOPs w/o a system table
2213          */     
2214         dprintk(KERN_INFO "i2o_core: Calling i2o_build_sys_table...\n");
2215         if (i2o_build_sys_table() < 0) {
2216                 i2o_sys_shutdown();
2217                 return;
2218         }
2219
2220         /* If IOP don't get online, we need to rebuild the System table */
2221         for (iop = i2o_controller_chain; iop; iop = niop) {
2222                 niop = iop->next;
2223                 dprintk(KERN_INFO "Calling i2o_online_controller for %s...\n", iop->name);
2224                 if (i2o_online_controller(iop) < 0) {
2225                         i2o_delete_controller(iop);     
2226                         goto rebuild_sys_tab;
2227                 }
2228         }
2229         
2230         /* Active IOPs now in OPERATIONAL state */
2231
2232         /*
2233          * Register for status updates from all IOPs
2234          */
2235         for(iop = i2o_controller_chain; iop; iop=iop->next) {
2236
2237                 /* Create a kernel thread to deal with dynamic LCT updates */
2238                 iop->lct_pid = kernel_thread(i2o_dyn_lct, iop, CLONE_SIGHAND);
2239         
2240                 /* Update change ind on DLCT */
2241                 iop->dlct->change_ind = iop->lct->change_ind;
2242
2243                 /* Start dynamic LCT updates */
2244                 i2o_lct_notify(iop);
2245
2246                 /* Register for all events from IRTOS */
2247                 i2o_event_register(iop, core_context, 0, 0, 0xFFFFFFFF);
2248         }
2249 }
2250
2251 /**
2252  *      i2o_sys_shutdown - shutdown I2O system
2253  *
2254  *      Bring down each i2o controller and then return. Each controller
2255  *      is taken through an orderly shutdown
2256  */
2257  
2258 static void i2o_sys_shutdown(void)
2259 {
2260         struct i2o_controller *iop, *niop;
2261
2262         /* Delete all IOPs from the controller chain */
2263         /* that will reset all IOPs too */
2264
2265         for (iop = i2o_controller_chain; iop; iop = niop) {
2266                 niop = iop->next;
2267                 i2o_delete_controller(iop);
2268         }
2269 }
2270
2271 /**
2272  *      i2o_activate_controller -       bring controller up to HOLD
2273  *      @iop: controller
2274  *
2275  *      This function brings an I2O controller into HOLD state. The adapter
2276  *      is reset if necessary and then the queues and resource table
2277  *      are read. -1 is returned on a failure, 0 on success.
2278  *      
2279  */
2280  
2281 int i2o_activate_controller(struct i2o_controller *iop)
2282 {
2283         /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
2284         /* In READY state, Get status */
2285
2286         if (i2o_status_get(iop) < 0) {
2287                 printk(KERN_INFO "Unable to obtain status of %s, "
2288                         "attempting a reset.\n", iop->name);
2289                 if (i2o_reset_controller(iop) < 0)
2290                         return -1;
2291         }
2292
2293         if(iop->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2294                 printk(KERN_CRIT "%s: hardware fault\n", iop->name);
2295                 return -1;
2296         }
2297
2298         if (iop->status_block->i2o_version > I2OVER15) {
2299                 printk(KERN_ERR "%s: Not running vrs. 1.5. of the I2O Specification.\n",
2300                         iop->name);
2301                 return -1;
2302         }
2303
2304         if (iop->status_block->iop_state == ADAPTER_STATE_READY ||
2305             iop->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2306             iop->status_block->iop_state == ADAPTER_STATE_HOLD ||
2307             iop->status_block->iop_state == ADAPTER_STATE_FAILED)
2308         {
2309                 dprintk(KERN_INFO "%s: Already running, trying to reset...\n",
2310                         iop->name);
2311                 if (i2o_reset_controller(iop) < 0)
2312                         return -1;
2313         }
2314
2315         if (i2o_init_outbound_q(iop) < 0)
2316                 return -1;
2317
2318         if (i2o_post_outbound_messages(iop)) 
2319                 return -1;
2320
2321         /* In HOLD state */
2322         
2323         if (i2o_hrt_get(iop) < 0)
2324                 return -1;
2325
2326         return 0;
2327 }
2328
2329
2330 /**
2331  *      i2o_init_outbound_queue - setup the outbound queue
2332  *      @c: controller
2333  *
2334  *      Clear and (re)initialize IOP's outbound queue. Returns 0 on
2335  *      success or a negative errno code on a failure.
2336  */
2337  
2338 int i2o_init_outbound_q(struct i2o_controller *c)
2339 {
2340         u8 *status;
2341         dma_addr_t status_phys;
2342         u32 m;
2343         u32 *msg;
2344         u32 time;
2345
2346         dprintk(KERN_INFO "%s: Initializing Outbound Queue...\n", c->name);
2347         m=i2o_wait_message(c, "OutboundInit");
2348         if(m==0xFFFFFFFF)
2349                 return -ETIMEDOUT;
2350         msg=(u32 *)(c->msg_virt+m);
2351
2352         status = pci_alloc_consistent(c->pdev, 4, &status_phys);
2353         if (status==NULL) {
2354                 printk(KERN_ERR "%s: Outbound Queue initialization failed - no free memory.\n",
2355                         c->name);
2356                 return -ENOMEM;
2357         }
2358         memset(status, 0, 4);
2359
2360         msg[0]= EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6;
2361         msg[1]= I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID;
2362         msg[2]= core_context;
2363         msg[3]= 0x0106;                         /* Transaction context */
2364         msg[4]= 4096;                           /* Host page frame size */
2365         /* Frame size is in words. 256 bytes a frame for now */
2366         msg[5]= MSG_FRAME_SIZE<<16|0x80;        /* Outbound msg frame size in words and Initcode */
2367         msg[6]= 0xD0000004;                     /* Simple SG LE, EOB */
2368         msg[7]= status_phys;
2369
2370         i2o_post_message(c,m);
2371         
2372         barrier();
2373         time=jiffies;
2374         while(status[0] < I2O_CMD_REJECTED)
2375         {
2376                 if((jiffies-time)>=30*HZ)
2377                 {
2378                         if(status[0]==0x00)
2379                                 printk(KERN_ERR "%s: Ignored queue initialize request.\n",
2380                                         c->name);
2381                         else  
2382                                 printk(KERN_ERR "%s: Outbound queue initialize timeout.\n",
2383                                         c->name);
2384                         pci_free_consistent(c->pdev, 4, status, status_phys);
2385                         return -ETIMEDOUT;
2386                 }  
2387                 yield();
2388                 barrier();
2389         }  
2390
2391         if(status[0] != I2O_CMD_COMPLETED)
2392         {
2393                 printk(KERN_ERR "%s: IOP outbound initialise failed.\n", c->name);
2394                 pci_free_consistent(c->pdev, 4, status, status_phys);
2395                 return -ETIMEDOUT;
2396         }
2397         pci_free_consistent(c->pdev, 4, status, status_phys);
2398         return 0;
2399 }
2400
2401 /**
2402  *      i2o_post_outbound_messages      -       fill message queue
2403  *      @c: controller
2404  *
2405  *      Allocate a message frame and load the messages into the IOP. The
2406  *      function returns zero on success or a negative errno code on
2407  *      failure.
2408  */
2409
2410 int i2o_post_outbound_messages(struct i2o_controller *c)
2411 {
2412         int i;
2413         u32 m;
2414         /* Alloc space for IOP's outbound queue message frames */
2415
2416         c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL);
2417         if(c->page_frame==NULL) {
2418                 printk(KERN_ERR "%s: Outbound Q initialize failed; out of memory.\n",
2419                         c->name);
2420                 return -ENOMEM;
2421         }
2422
2423         c->page_frame_map = pci_map_single(c->pdev, c->page_frame, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
2424
2425         if(c->page_frame_map == 0)
2426         {
2427                 kfree(c->page_frame);
2428                 printk(KERN_ERR "%s: Unable to map outbound queue.\n", c->name);
2429                 return -ENOMEM;
2430         }
2431
2432         m = c->page_frame_map;
2433
2434         /* Post frames */
2435
2436         for(i=0; i< NMBR_MSG_FRAMES; i++) {
2437                 I2O_REPLY_WRITE32(c,m);
2438                 mb();
2439                 m += (MSG_FRAME_SIZE << 2);
2440         }
2441
2442         return 0;
2443 }
2444
2445 /*
2446  * Get the IOP's Logical Configuration Table
2447  */
2448 int i2o_lct_get(struct i2o_controller *c)
2449 {
2450         u32 msg[8];
2451         int ret, size = c->status_block->expected_lct_size;
2452
2453         do {
2454                 if (c->lct == NULL) {
2455                         c->lct = pci_alloc_consistent(c->pdev, size, &c->lct_phys);
2456                         if(c->lct == NULL) {
2457                                 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2458                                         c->name);
2459                                 return -ENOMEM;
2460                         }
2461                 }
2462                 memset(c->lct, 0, size);
2463
2464                 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2465                 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2466                 /* msg[2] filled in i2o_post_wait */
2467                 msg[3] = 0;
2468                 msg[4] = 0xFFFFFFFF;    /* All devices */
2469                 msg[5] = 0x00000000;    /* Report now */
2470                 msg[6] = 0xD0000000|size;
2471                 msg[7] = c->lct_phys;
2472
2473                 ret=i2o_post_wait_mem(c, msg, sizeof(msg), 120, c->lct, NULL, c->lct_phys, 0, size, 0);
2474                 
2475                 if(ret == -ETIMEDOUT)
2476                 {
2477                         c->lct = NULL;
2478                         return ret;
2479                 }
2480                 
2481                 if(ret<0)
2482                 {
2483                         printk(KERN_ERR "%s: LCT Get failed (status=%#x.\n", 
2484                                 c->name, -ret); 
2485                         return ret;
2486                 }
2487
2488                 if (c->lct->table_size << 2 > size) {
2489                         int new_size = c->lct->table_size << 2;
2490                         pci_free_consistent(c->pdev, size, c->lct, c->lct_phys);
2491                         size = new_size;
2492                         c->lct = NULL;
2493                 }
2494         } while (c->lct == NULL);
2495
2496         if ((ret=i2o_parse_lct(c)) < 0)
2497                 return ret;
2498
2499         return 0;
2500 }
2501
2502 /*
2503  * Like above, but used for async notification.  The main
2504  * difference is that we keep track of the CurrentChangeIndiicator
2505  * so that we only get updates when it actually changes.
2506  *
2507  */
2508 int i2o_lct_notify(struct i2o_controller *c)
2509 {
2510         u32 msg[8];
2511
2512         msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2513         msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2514         msg[2] = core_context;
2515         msg[3] = 0xDEADBEEF;    
2516         msg[4] = 0xFFFFFFFF;    /* All devices */
2517         msg[5] = c->dlct->change_ind+1; /* Next change */
2518         msg[6] = 0xD0000000|8192;
2519         msg[7] = c->dlct_phys;
2520
2521         return i2o_post_this(c, msg, sizeof(msg));
2522 }
2523                 
2524 /*
2525  *      Bring a controller online into OPERATIONAL state. 
2526  */
2527  
2528 int i2o_online_controller(struct i2o_controller *iop)
2529 {
2530         u32 v;
2531         
2532         if (i2o_systab_send(iop) < 0)
2533                 return -1;
2534
2535         /* In READY state */
2536
2537         dprintk(KERN_INFO "%s: Attempting to enable...\n", iop->name);
2538         if (i2o_enable_controller(iop) < 0)
2539                 return -1;
2540
2541         /* In OPERATIONAL state  */
2542
2543         dprintk(KERN_INFO "%s: Attempting to get/parse lct...\n", iop->name);
2544         if (i2o_lct_get(iop) < 0)
2545                 return -1;
2546
2547         /* Check battery status */
2548          
2549         iop->battery = 0;
2550         if(i2o_query_scalar(iop, ADAPTER_TID, 0x0000, 4, &v, 4)>=0)
2551         {
2552                 if(v&16)
2553                         iop->battery = 1;
2554         }
2555
2556         return 0;
2557 }
2558
2559 /*
2560  * Build system table
2561  *
2562  * The system table contains information about all the IOPs in the
2563  * system (duh) and is used by the Executives on the IOPs to establish
2564  * peer2peer connections.  We're not supporting peer2peer at the moment,
2565  * but this will be needed down the road for things like lan2lan forwarding.
2566  */
2567 static int i2o_build_sys_table(void)
2568 {
2569         struct i2o_controller *iop = NULL;
2570         struct i2o_controller *niop = NULL;
2571         int count = 0;
2572
2573         sys_tbl_len = sizeof(struct i2o_sys_tbl) +      // Header + IOPs
2574                                 (i2o_num_controllers) *
2575                                         sizeof(struct i2o_sys_tbl_entry);
2576
2577         if(sys_tbl)
2578                 kfree(sys_tbl);
2579
2580         sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL);
2581         if(!sys_tbl) {
2582                 printk(KERN_CRIT "SysTab Set failed. Out of memory.\n");
2583                 return -ENOMEM;
2584         }
2585         memset((void*)sys_tbl, 0, sys_tbl_len);
2586
2587         sys_tbl->num_entries = i2o_num_controllers;
2588         sys_tbl->version = I2OVERSION; /* TODO: Version 2.0 */
2589         sys_tbl->change_ind = sys_tbl_ind++;
2590
2591         for(iop = i2o_controller_chain; iop; iop = niop)
2592         {
2593                 niop = iop->next;
2594
2595                 /* 
2596                  * Get updated IOP state so we have the latest information
2597                  *
2598                  * We should delete the controller at this point if it
2599                  * doesn't respond since  if it's not on the system table 
2600                  * it is techninically not part of the I2O subsyßtem...
2601                  */
2602                 if(i2o_status_get(iop)) {
2603                         printk(KERN_ERR "%s: Deleting b/c could not get status while"
2604                                 "attempting to build system table\n", iop->name);
2605                         i2o_delete_controller(iop);             
2606                         sys_tbl->num_entries--;
2607                         continue; // try the next one
2608                 }
2609
2610                 sys_tbl->iops[count].org_id = iop->status_block->org_id;
2611                 sys_tbl->iops[count].iop_id = iop->unit + 2;
2612                 sys_tbl->iops[count].seg_num = 0;
2613                 sys_tbl->iops[count].i2o_version = 
2614                                 iop->status_block->i2o_version;
2615                 sys_tbl->iops[count].iop_state = 
2616                                 iop->status_block->iop_state;
2617                 sys_tbl->iops[count].msg_type = 
2618                                 iop->status_block->msg_type;
2619                 sys_tbl->iops[count].frame_size = 
2620                                 iop->status_block->inbound_frame_size;
2621                 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2622                 sys_tbl->iops[count].iop_capabilities = 
2623                                 iop->status_block->iop_capabilities;
2624                 sys_tbl->iops[count].inbound_low = (u32)iop->post_port;
2625                 sys_tbl->iops[count].inbound_high = 0;  // FIXME: 64-bit support
2626
2627                 count++;
2628         }
2629
2630 #ifdef DRIVERDEBUG
2631 {
2632         u32 *table;
2633         table = (u32*)sys_tbl;
2634         for(count = 0; count < (sys_tbl_len >>2); count++)
2635                 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", count, table[count]);
2636 }
2637 #endif
2638
2639         return 0;
2640 }
2641
2642
2643 /*
2644  *      Run time support routines
2645  */
2646  
2647 /*
2648  *      Generic "post and forget" helpers. This is less efficient - we do
2649  *      a memcpy for example that isnt strictly needed, but for most uses
2650  *      this is simply not worth optimising
2651  */
2652
2653 int i2o_post_this(struct i2o_controller *c, u32 *data, int len)
2654 {
2655         u32 m;
2656         u32 *msg;
2657         unsigned long t=jiffies;
2658
2659         do
2660         {
2661                 mb();
2662                 m = I2O_POST_READ32(c);
2663         }
2664         while(m==0xFFFFFFFF && (jiffies-t)<HZ);
2665         
2666         if(m==0xFFFFFFFF)
2667         {
2668                 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",
2669                        c->name);
2670                 return -ETIMEDOUT;
2671         }
2672         msg = (u32 *)(c->msg_virt + m);
2673         memcpy_toio(msg, data, len);
2674         i2o_post_message(c,m);
2675         return 0;
2676 }
2677
2678 /**
2679  *      i2o_post_wait_mem       -       I2O query/reply with DMA buffers
2680  *      @c: controller
2681  *      @msg: message to send
2682  *      @len: length of message
2683  *      @timeout: time in seconds to wait
2684  *      @mem1: attached memory buffer 1
2685  *      @mem2: attached memory buffer 2
2686  *      @phys1: physical address of buffer 1
2687  *      @phys2: physical address of buffer 2
2688  *      @size1: size of buffer 1
2689  *      @size2: size of buffer 2
2690  *
2691  *      This core API allows an OSM to post a message and then be told whether
2692  *      or not the system received a successful reply. 
2693  *
2694  *      If the message times out then the value '-ETIMEDOUT' is returned. This
2695  *      is a special case. In this situation the message may (should) complete
2696  *      at an indefinite time in the future. When it completes it will use the
2697  *      memory buffers attached to the request. If -ETIMEDOUT is returned then
2698  *      the memory buffers must not be freed. Instead the event completion will
2699  *      free them for you. In all other cases the buffers are your problem.
2700  *
2701  *      Pass NULL for unneeded buffers.
2702  */
2703  
2704 int i2o_post_wait_mem(struct i2o_controller *c, u32 *msg, int len, int timeout, void *mem1, void *mem2, dma_addr_t phys1, dma_addr_t phys2, int size1, int size2)
2705 {
2706         DECLARE_WAIT_QUEUE_HEAD(wq_i2o_post);
2707         DECLARE_WAITQUEUE(wait, current);
2708         int complete = 0;
2709         int status;
2710         unsigned long flags = 0;
2711         struct i2o_post_wait_data *wait_data =
2712                 kmalloc(sizeof(struct i2o_post_wait_data), GFP_KERNEL);
2713
2714         if(!wait_data)
2715                 return -ENOMEM;
2716
2717         /*
2718          *      Create a new notification object
2719          */
2720         wait_data->status = &status;
2721         wait_data->complete = &complete;
2722         wait_data->mem[0] = mem1;
2723         wait_data->mem[1] = mem2;
2724         wait_data->phys[0] = phys1;
2725         wait_data->phys[1] = phys2;
2726         wait_data->size[0] = size1;
2727         wait_data->size[1] = size2;
2728         
2729         /* 
2730          *      Queue the event with its unique id
2731          */
2732         spin_lock_irqsave(&post_wait_lock, flags);
2733
2734         wait_data->next = post_wait_queue;
2735         post_wait_queue = wait_data;
2736         wait_data->id = (++post_wait_id) & 0x7fff;
2737         wait_data->wq = &wq_i2o_post;
2738
2739         spin_unlock_irqrestore(&post_wait_lock, flags);
2740
2741         /*
2742          *      Fill in the message id
2743          */
2744          
2745         msg[2] = 0x80000000|(u32)core_context|((u32)wait_data->id<<16);
2746         
2747         /*
2748          *      Post the message to the controller. At some point later it 
2749          *      will return. If we time out before it returns then
2750          *      complete will be zero.  From the point post_this returns
2751          *      the wait_data may have been deleted.
2752          */
2753
2754         add_wait_queue(&wq_i2o_post, &wait);
2755         set_current_state(TASK_INTERRUPTIBLE);
2756         if ((status = i2o_post_this(c, msg, len))==0) {
2757                 schedule_timeout(HZ * timeout);
2758         }  
2759         else
2760         {
2761                 remove_wait_queue(&wq_i2o_post, &wait);
2762                 return -EIO;
2763         }
2764         remove_wait_queue(&wq_i2o_post, &wait);
2765
2766         if(signal_pending(current))
2767                 status = -EINTR;
2768                 
2769         spin_lock_irqsave(&post_wait_lock, flags);
2770         barrier();      /* Be sure we see complete as it is locked */
2771         if(!complete)
2772         {
2773                 /* 
2774                  *      Mark the entry dead. We cannot remove it. This is important.
2775                  *      When it does terminate (which it must do if the controller hasnt
2776                  *      died..) then it will otherwise scribble on stuff.
2777                  *      !complete lets us safely check if the entry is still
2778                  *      allocated and thus we can write into it
2779                  */
2780                 wait_data->wq = NULL;
2781                 status = -ETIMEDOUT;
2782         }
2783         else
2784         {
2785                 /* Debugging check - remove me soon */
2786                 if(status == -ETIMEDOUT)
2787                 {
2788                         printk("TIMEDOUT BUG!\n");
2789                         status = -EIO;
2790                 }
2791         }
2792         /* And the wait_data is not leaked either! */    
2793         spin_unlock_irqrestore(&post_wait_lock, flags);
2794         return status;
2795 }
2796
2797 /**
2798  *      i2o_post_wait           -       I2O query/reply
2799  *      @c: controller
2800  *      @msg: message to send
2801  *      @len: length of message
2802  *      @timeout: time in seconds to wait
2803  *
2804  *      This core API allows an OSM to post a message and then be told whether
2805  *      or not the system received a successful reply. 
2806  */
2807  
2808 int i2o_post_wait(struct i2o_controller *c, u32 *msg, int len, int timeout)
2809 {
2810         return i2o_post_wait_mem(c, msg, len, timeout, NULL, NULL, 0, 0, 0, 0);
2811 }
2812
2813 /*
2814  * i2o_post_wait is completed and we want to wake up the 
2815  * sleeping proccess. Called by core's reply handler.
2816  */
2817
2818 static void i2o_post_wait_complete(struct i2o_controller *c, u32 context, int status)
2819 {
2820         struct i2o_post_wait_data **p1, *q;
2821         unsigned long flags;
2822         
2823         /* 
2824          * We need to search through the post_wait 
2825          * queue to see if the given message is still
2826          * outstanding.  If not, it means that the IOP 
2827          * took longer to respond to the message than we 
2828          * had allowed and timer has already expired.  
2829          * Not much we can do about that except log
2830          * it for debug purposes, increase timeout, and recompile
2831          *
2832          * Lock needed to keep anyone from moving queue pointers 
2833          * around while we're looking through them.
2834          */
2835
2836         spin_lock_irqsave(&post_wait_lock, flags);
2837
2838         for(p1 = &post_wait_queue; *p1!=NULL; p1 = &((*p1)->next)) 
2839         {
2840                 q = (*p1);
2841                 if(q->id == ((context >> 16) & 0x7fff)) {
2842                         /*
2843                          *      Delete it 
2844                          */
2845                          
2846                         *p1 = q->next;
2847                         
2848                         /*
2849                          *      Live or dead ?
2850                          */
2851                          
2852                         if(q->wq)
2853                         {
2854                                 /* Live entry - wakeup and set status */
2855                                 *q->status = status;
2856                                 *q->complete = 1;
2857                                 wake_up(q->wq);
2858                         }
2859                         else
2860                         {
2861                                 /*
2862                                  *      Free resources. Caller is dead
2863                                  */
2864
2865                                 if(q->mem[0])
2866                                         pci_free_consistent(c->pdev, q->size[0], q->mem[0], q->phys[0]);
2867                                 if(q->mem[1])
2868                                         pci_free_consistent(c->pdev, q->size[1], q->mem[1], q->phys[1]);
2869
2870                                 printk(KERN_WARNING "i2o_post_wait event completed after timeout.\n");
2871                         }
2872                         kfree(q);
2873                         spin_unlock(&post_wait_lock);
2874                         return;
2875                 }
2876         }
2877         spin_unlock(&post_wait_lock);
2878
2879         printk(KERN_DEBUG "i2o_post_wait: Bogus reply!\n");
2880 }
2881
2882 /*      Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
2883  *
2884  *      This function can be used for all UtilParamsGet/Set operations.
2885  *      The OperationList is given in oplist-buffer, 
2886  *      and results are returned in reslist-buffer.
2887  *      Note that the minimum sized reslist is 8 bytes and contains
2888  *      ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
2889  */
2890
2891 int i2o_issue_params(int cmd, struct i2o_controller *iop, int tid, 
2892                 void *oplist, int oplen, void *reslist, int reslen)
2893 {
2894         u32 msg[9]; 
2895         u32 *res32 = (u32*)reslist;
2896         u32 *restmp = (u32*)reslist;
2897         int len = 0;
2898         int i = 0;
2899         int wait_status;
2900         u32 *opmem, *resmem;
2901         dma_addr_t opmem_phys, resmem_phys;
2902         
2903         /* Get DMAable memory */
2904         opmem = pci_alloc_consistent(iop->pdev, oplen, &opmem_phys);
2905         if(opmem == NULL)
2906                 return -ENOMEM;
2907         memcpy(opmem, oplist, oplen);
2908         
2909         resmem = pci_alloc_consistent(iop->pdev, reslen, &resmem_phys);
2910         if(resmem == NULL)
2911         {
2912                 pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys);
2913                 return -ENOMEM;
2914         }
2915         
2916         msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
2917         msg[1] = cmd << 24 | HOST_TID << 12 | tid; 
2918         msg[3] = 0;
2919         msg[4] = 0;
2920         msg[5] = 0x54000000 | oplen;    /* OperationList */
2921         msg[6] = opmem_phys;
2922         msg[7] = 0xD0000000 | reslen;   /* ResultList */
2923         msg[8] = resmem_phys;
2924
2925         wait_status = i2o_post_wait_mem(iop, msg, sizeof(msg), 10, opmem, resmem, opmem_phys, resmem_phys, oplen, reslen);
2926         
2927         /*
2928          *      This only looks like a memory leak - don't "fix" it.    
2929          */
2930         if(wait_status == -ETIMEDOUT)
2931                 return wait_status;
2932
2933         memcpy(reslist, resmem, reslen);
2934         pci_free_consistent(iop->pdev, reslen, resmem, resmem_phys);
2935         pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys);
2936         
2937         /* Query failed */
2938         if(wait_status != 0)
2939                 return wait_status;             
2940         /*
2941          * Calculate number of bytes of Result LIST
2942          * We need to loop through each Result BLOCK and grab the length
2943          */
2944         restmp = res32 + 1;
2945         len = 1;
2946         for(i = 0; i < (res32[0]&0X0000FFFF); i++)
2947         {
2948                 if(restmp[0]&0x00FF0000)        /* BlockStatus != SUCCESS */
2949                 {
2950                         printk(KERN_WARNING "%s - Error:\n  ErrorInfoSize = 0x%02x, " 
2951                                         "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
2952                                         (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
2953                                         : "PARAMS_GET",   
2954                                         res32[1]>>24, (res32[1]>>16)&0xFF, res32[1]&0xFFFF);
2955         
2956                         /*
2957                          *      If this is the only request,than we return an error
2958                          */
2959                         if((res32[0]&0x0000FFFF) == 1)
2960                         {
2961                                 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
2962                         }
2963                 }
2964                 len += restmp[0] & 0x0000FFFF;  /* Length of res BLOCK */
2965                 restmp += restmp[0] & 0x0000FFFF;       /* Skip to next BLOCK */
2966         }
2967         return (len << 2);  /* bytes used by result list */
2968 }
2969
2970 /*
2971  *       Query one scalar group value or a whole scalar group.
2972  */                     
2973 int i2o_query_scalar(struct i2o_controller *iop, int tid, 
2974                      int group, int field, void *buf, int buflen)
2975 {
2976         u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
2977         u8  resblk[8+buflen]; /* 8 bytes for header */
2978         int size;
2979
2980         if (field == -1)                /* whole group */
2981                 opblk[4] = -1;
2982               
2983         size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, iop, tid, 
2984                 opblk, sizeof(opblk), resblk, sizeof(resblk));
2985                 
2986         memcpy(buf, resblk+8, buflen);  /* cut off header */
2987         
2988         if(size>buflen)
2989                 return buflen;
2990         return size;
2991 }
2992
2993 /*
2994  *      Set a scalar group value or a whole group.
2995  */
2996 int i2o_set_scalar(struct i2o_controller *iop, int tid, 
2997                    int group, int field, void *buf, int buflen)
2998 {
2999         u16 *opblk;
3000         u8  resblk[8+buflen]; /* 8 bytes for header */
3001         int size;
3002
3003         opblk = kmalloc(buflen+64, GFP_KERNEL);
3004         if (opblk == NULL)
3005         {
3006                 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
3007                 return -ENOMEM;
3008         }
3009
3010         opblk[0] = 1;                        /* operation count */
3011         opblk[1] = 0;                        /* pad */
3012         opblk[2] = I2O_PARAMS_FIELD_SET;
3013         opblk[3] = group;
3014
3015         if(field == -1) {               /* whole group */
3016                 opblk[4] = -1;
3017                 memcpy(opblk+5, buf, buflen);
3018         }
3019         else                            /* single field */
3020         {
3021                 opblk[4] = 1;
3022                 opblk[5] = field;
3023                 memcpy(opblk+6, buf, buflen);
3024         }   
3025
3026         size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid, 
3027                                 opblk, 12+buflen, resblk, sizeof(resblk));
3028
3029         kfree(opblk);
3030         if(size>buflen)
3031                 return buflen;
3032         return size;
3033 }
3034
3035 /* 
3036  *      if oper == I2O_PARAMS_TABLE_GET, get from all rows 
3037  *              if fieldcount == -1 return all fields
3038  *                      ibuf and ibuflen are unused (use NULL, 0)
3039  *              else return specific fields
3040  *                      ibuf contains fieldindexes
3041  *
3042  *      if oper == I2O_PARAMS_LIST_GET, get from specific rows
3043  *              if fieldcount == -1 return all fields
3044  *                      ibuf contains rowcount, keyvalues
3045  *              else return specific fields
3046  *                      fieldcount is # of fieldindexes
3047  *                      ibuf contains fieldindexes, rowcount, keyvalues
3048  *
3049  *      You could also use directly function i2o_issue_params().
3050  */
3051 int i2o_query_table(int oper, struct i2o_controller *iop, int tid, int group,
3052                 int fieldcount, void *ibuf, int ibuflen,
3053                 void *resblk, int reslen) 
3054 {
3055         u16 *opblk;
3056         int size;
3057
3058         opblk = kmalloc(10 + ibuflen, GFP_KERNEL);
3059         if (opblk == NULL)
3060         {
3061                 printk(KERN_ERR "i2o: no memory for query buffer.\n");
3062                 return -ENOMEM;
3063         }
3064
3065         opblk[0] = 1;                           /* operation count */
3066         opblk[1] = 0;                           /* pad */
3067         opblk[2] = oper;
3068         opblk[3] = group;               
3069         opblk[4] = fieldcount;
3070         memcpy(opblk+5, ibuf, ibuflen);         /* other params */
3071
3072         size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET,iop, tid, 
3073                                 opblk, 10+ibuflen, resblk, reslen);
3074
3075         kfree(opblk);
3076         if(size>reslen)
3077                 return reslen;
3078         return size;
3079 }
3080
3081 /*
3082  *      Clear table group, i.e. delete all rows.
3083  */
3084 int i2o_clear_table(struct i2o_controller *iop, int tid, int group)
3085 {
3086         u16 opblk[] = { 1, 0, I2O_PARAMS_TABLE_CLEAR, group };
3087         u8  resblk[32]; /* min 8 bytes for result header */
3088
3089         return i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid, 
3090                                 opblk, sizeof(opblk), resblk, sizeof(resblk));
3091 }
3092
3093 /*
3094  *      Add a new row into a table group.
3095  *
3096  *      if fieldcount==-1 then we add whole rows
3097  *              buf contains rowcount, keyvalues
3098  *      else just specific fields are given, rest use defaults
3099  *              buf contains fieldindexes, rowcount, keyvalues
3100  */     
3101 int i2o_row_add_table(struct i2o_controller *iop, int tid,
3102                     int group, int fieldcount, void *buf, int buflen)
3103 {
3104         u16 *opblk;
3105         u8  resblk[32]; /* min 8 bytes for header */
3106         int size;
3107
3108         opblk = kmalloc(buflen+64, GFP_KERNEL);
3109         if (opblk == NULL)
3110         {
3111                 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
3112                 return -ENOMEM;
3113         }
3114
3115         opblk[0] = 1;                   /* operation count */
3116         opblk[1] = 0;                   /* pad */
3117         opblk[2] = I2O_PARAMS_ROW_ADD;
3118         opblk[3] = group;       
3119         opblk[4] = fieldcount;
3120         memcpy(opblk+5, buf, buflen);
3121
3122         size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid, 
3123                                 opblk, 10+buflen, resblk, sizeof(resblk));
3124
3125         kfree(opblk);
3126         if(size>buflen)
3127                 return buflen;
3128         return size;
3129 }
3130
3131
3132 /*
3133  * Used for error reporting/debugging purposes.
3134  * Following fail status are common to all classes.
3135  * The preserved message must be handled in the reply handler. 
3136  */
3137 void i2o_report_fail_status(u8 req_status, u32* msg)
3138 {
3139         static char *FAIL_STATUS[] = { 
3140                 "0x80",                         /* not used */
3141                 "SERVICE_SUSPENDED",            /* 0x81 */
3142                 "SERVICE_TERMINATED",           /* 0x82 */
3143                 "CONGESTION",
3144                 "FAILURE",
3145                 "STATE_ERROR",
3146                 "TIME_OUT",
3147                 "ROUTING_FAILURE",
3148                 "INVALID_VERSION",
3149                 "INVALID_OFFSET",
3150                 "INVALID_MSG_FLAGS",
3151                 "FRAME_TOO_SMALL",
3152                 "FRAME_TOO_LARGE",
3153                 "INVALID_TARGET_ID",
3154                 "INVALID_INITIATOR_ID",
3155                 "INVALID_INITIATOR_CONTEX",     /* 0x8F */
3156                 "UNKNOWN_FAILURE"               /* 0xFF */
3157         };
3158
3159         if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
3160                 printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", req_status);
3161         else
3162                 printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]);
3163
3164         /* Dump some details */
3165
3166         printk(KERN_ERR "  InitiatorId = %d, TargetId = %d\n",
3167                 (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF); 
3168         printk(KERN_ERR "  LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
3169                 (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
3170         printk(KERN_ERR "  FailingHostUnit = 0x%04X,  FailingIOP = 0x%03X\n",
3171                 msg[5] >> 16, msg[5] & 0xFFF);
3172
3173         printk(KERN_ERR "  Severity:  0x%02X ", (msg[4] >> 16) & 0xFF); 
3174         if (msg[4] & (1<<16))
3175                 printk("(FormatError), "
3176                         "this msg can never be delivered/processed.\n");
3177         if (msg[4] & (1<<17))
3178                 printk("(PathError), "
3179                         "this msg can no longer be delivered/processed.\n");
3180         if (msg[4] & (1<<18))
3181                 printk("(PathState), "
3182                         "the system state does not allow delivery.\n");
3183         if (msg[4] & (1<<19))
3184                 printk("(Congestion), resources temporarily not available;"
3185                         "do not retry immediately.\n");
3186 }
3187
3188 /*
3189  * Used for error reporting/debugging purposes.
3190  * Following reply status are common to all classes.
3191  */
3192 void i2o_report_common_status(u8 req_status)
3193 {
3194         static char *REPLY_STATUS[] = { 
3195                 "SUCCESS", 
3196                 "ABORT_DIRTY", 
3197                 "ABORT_NO_DATA_TRANSFER",
3198                 "ABORT_PARTIAL_TRANSFER",
3199                 "ERROR_DIRTY",
3200                 "ERROR_NO_DATA_TRANSFER",
3201                 "ERROR_PARTIAL_TRANSFER",
3202                 "PROCESS_ABORT_DIRTY",
3203                 "PROCESS_ABORT_NO_DATA_TRANSFER",
3204                 "PROCESS_ABORT_PARTIAL_TRANSFER",
3205                 "TRANSACTION_ERROR",
3206                 "PROGRESS_REPORT"       
3207         };
3208
3209         if (req_status >= ARRAY_SIZE(REPLY_STATUS))
3210                 printk("RequestStatus = %0#2x", req_status);
3211         else
3212                 printk("%s", REPLY_STATUS[req_status]);
3213 }
3214
3215 /*
3216  * Used for error reporting/debugging purposes.
3217  * Following detailed status are valid  for executive class, 
3218  * utility class, DDM class and for transaction error replies.
3219  */
3220 static void i2o_report_common_dsc(u16 detailed_status)
3221 {
3222         static char *COMMON_DSC[] = { 
3223                 "SUCCESS",
3224                 "0x01",                         // not used
3225                 "BAD_KEY",
3226                 "TCL_ERROR",
3227                 "REPLY_BUFFER_FULL",
3228                 "NO_SUCH_PAGE",
3229                 "INSUFFICIENT_RESOURCE_SOFT",
3230                 "INSUFFICIENT_RESOURCE_HARD",
3231                 "0x08",                         // not used
3232                 "CHAIN_BUFFER_TOO_LARGE",
3233                 "UNSUPPORTED_FUNCTION",
3234                 "DEVICE_LOCKED",
3235                 "DEVICE_RESET",
3236                 "INAPPROPRIATE_FUNCTION",
3237                 "INVALID_INITIATOR_ADDRESS",
3238                 "INVALID_MESSAGE_FLAGS",
3239                 "INVALID_OFFSET",
3240                 "INVALID_PARAMETER",
3241                 "INVALID_REQUEST",
3242                 "INVALID_TARGET_ADDRESS",
3243                 "MESSAGE_TOO_LARGE",
3244                 "MESSAGE_TOO_SMALL",
3245                 "MISSING_PARAMETER",
3246                 "TIMEOUT",
3247                 "UNKNOWN_ERROR",
3248                 "UNKNOWN_FUNCTION",
3249                 "UNSUPPORTED_VERSION",
3250                 "DEVICE_BUSY",
3251                 "DEVICE_NOT_AVAILABLE"          
3252         };
3253
3254         if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
3255                 printk(" / DetailedStatus = %0#4x.\n", detailed_status);
3256         else
3257                 printk(" / %s.\n", COMMON_DSC[detailed_status]);
3258 }
3259
3260 /*
3261  * Used for error reporting/debugging purposes
3262  */
3263 static void i2o_report_lan_dsc(u16 detailed_status)
3264 {
3265         static char *LAN_DSC[] = {      // Lan detailed status code strings
3266                 "SUCCESS",
3267                 "DEVICE_FAILURE",
3268                 "DESTINATION_NOT_FOUND",
3269                 "TRANSMIT_ERROR",
3270                 "TRANSMIT_ABORTED",
3271                 "RECEIVE_ERROR",
3272                 "RECEIVE_ABORTED",
3273                 "DMA_ERROR",
3274                 "BAD_PACKET_DETECTED",
3275                 "OUT_OF_MEMORY",
3276                 "BUCKET_OVERRUN",
3277                 "IOP_INTERNAL_ERROR",
3278                 "CANCELED",
3279                 "INVALID_TRANSACTION_CONTEXT",
3280                 "DEST_ADDRESS_DETECTED",
3281                 "DEST_ADDRESS_OMITTED",
3282                 "PARTIAL_PACKET_RETURNED",
3283                 "TEMP_SUSPENDED_STATE", // last Lan detailed status code
3284                 "INVALID_REQUEST"       // general detailed status code
3285         };
3286
3287         if (detailed_status > I2O_DSC_INVALID_REQUEST)
3288                 printk(" / %0#4x.\n", detailed_status);
3289         else
3290                 printk(" / %s.\n", LAN_DSC[detailed_status]);
3291 }
3292
3293 /*
3294  * Used for error reporting/debugging purposes
3295  */
3296 static void i2o_report_util_cmd(u8 cmd)
3297 {
3298         switch (cmd) {
3299         case I2O_CMD_UTIL_NOP:
3300                 printk("UTIL_NOP, ");
3301                 break;                  
3302         case I2O_CMD_UTIL_ABORT:
3303                 printk("UTIL_ABORT, ");
3304                 break;
3305         case I2O_CMD_UTIL_CLAIM:
3306                 printk("UTIL_CLAIM, ");
3307                 break;
3308         case I2O_CMD_UTIL_RELEASE:
3309                 printk("UTIL_CLAIM_RELEASE, ");
3310                 break;
3311         case I2O_CMD_UTIL_CONFIG_DIALOG:
3312                 printk("UTIL_CONFIG_DIALOG, ");
3313                 break;
3314         case I2O_CMD_UTIL_DEVICE_RESERVE:
3315                 printk("UTIL_DEVICE_RESERVE, ");
3316                 break;
3317         case I2O_CMD_UTIL_DEVICE_RELEASE:
3318                 printk("UTIL_DEVICE_RELEASE, ");
3319                 break;
3320         case I2O_CMD_UTIL_EVT_ACK:
3321                 printk("UTIL_EVENT_ACKNOWLEDGE, ");
3322                 break;
3323         case I2O_CMD_UTIL_EVT_REGISTER:
3324                 printk("UTIL_EVENT_REGISTER, ");
3325                 break;
3326         case I2O_CMD_UTIL_LOCK:
3327                 printk("UTIL_LOCK, ");
3328                 break;
3329         case I2O_CMD_UTIL_LOCK_RELEASE:
3330                 printk("UTIL_LOCK_RELEASE, ");
3331                 break;
3332         case I2O_CMD_UTIL_PARAMS_GET:
3333                 printk("UTIL_PARAMS_GET, ");
3334                 break;
3335         case I2O_CMD_UTIL_PARAMS_SET:
3336                 printk("UTIL_PARAMS_SET, ");
3337                 break;
3338         case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
3339                 printk("UTIL_REPLY_FAULT_NOTIFY, ");
3340                 break;
3341         default:
3342                 printk("Cmd = %0#2x, ",cmd);    
3343         }
3344 }
3345
3346 /*
3347  * Used for error reporting/debugging purposes
3348  */
3349 static void i2o_report_exec_cmd(u8 cmd)
3350 {
3351         switch (cmd) {
3352         case I2O_CMD_ADAPTER_ASSIGN:
3353                 printk("EXEC_ADAPTER_ASSIGN, ");
3354                 break;
3355         case I2O_CMD_ADAPTER_READ:
3356                 printk("EXEC_ADAPTER_READ, ");
3357                 break;
3358         case I2O_CMD_ADAPTER_RELEASE:
3359                 printk("EXEC_ADAPTER_RELEASE, ");
3360                 break;
3361         case I2O_CMD_BIOS_INFO_SET:
3362                 printk("EXEC_BIOS_INFO_SET, ");
3363                 break;
3364         case I2O_CMD_BOOT_DEVICE_SET:
3365                 printk("EXEC_BOOT_DEVICE_SET, ");
3366                 break;
3367         case I2O_CMD_CONFIG_VALIDATE:
3368                 printk("EXEC_CONFIG_VALIDATE, ");
3369                 break;
3370         case I2O_CMD_CONN_SETUP:
3371                 printk("EXEC_CONN_SETUP, ");
3372                 break;
3373         case I2O_CMD_DDM_DESTROY:
3374                 printk("EXEC_DDM_DESTROY, ");
3375                 break;
3376         case I2O_CMD_DDM_ENABLE:
3377                 printk("EXEC_DDM_ENABLE, ");
3378                 break;
3379         case I2O_CMD_DDM_QUIESCE:
3380                 printk("EXEC_DDM_QUIESCE, ");
3381                 break;
3382         case I2O_CMD_DDM_RESET:
3383                 printk("EXEC_DDM_RESET, ");
3384                 break;
3385         case I2O_CMD_DDM_SUSPEND:
3386                 printk("EXEC_DDM_SUSPEND, ");
3387                 break;
3388         case I2O_CMD_DEVICE_ASSIGN:
3389                 printk("EXEC_DEVICE_ASSIGN, ");
3390                 break;
3391         case I2O_CMD_DEVICE_RELEASE:
3392                 printk("EXEC_DEVICE_RELEASE, ");
3393                 break;
3394         case I2O_CMD_HRT_GET:
3395                 printk("EXEC_HRT_GET, ");
3396                 break;
3397         case I2O_CMD_ADAPTER_CLEAR:
3398                 printk("EXEC_IOP_CLEAR, ");
3399                 break;
3400         case I2O_CMD_ADAPTER_CONNECT:
3401                 printk("EXEC_IOP_CONNECT, ");
3402                 break;
3403         case I2O_CMD_ADAPTER_RESET:
3404                 printk("EXEC_IOP_RESET, ");
3405                 break;
3406         case I2O_CMD_LCT_NOTIFY:
3407                 printk("EXEC_LCT_NOTIFY, ");
3408                 break;
3409         case I2O_CMD_OUTBOUND_INIT:
3410                 printk("EXEC_OUTBOUND_INIT, ");
3411                 break;
3412         case I2O_CMD_PATH_ENABLE:
3413                 printk("EXEC_PATH_ENABLE, ");
3414                 break;
3415         case I2O_CMD_PATH_QUIESCE:
3416                 printk("EXEC_PATH_QUIESCE, ");
3417                 break;
3418         case I2O_CMD_PATH_RESET:
3419                 printk("EXEC_PATH_RESET, ");
3420                 break;
3421         case I2O_CMD_STATIC_MF_CREATE:
3422                 printk("EXEC_STATIC_MF_CREATE, ");
3423                 break;
3424         case I2O_CMD_STATIC_MF_RELEASE:
3425                 printk("EXEC_STATIC_MF_RELEASE, ");
3426                 break;
3427         case I2O_CMD_STATUS_GET:
3428                 printk("EXEC_STATUS_GET, ");
3429                 break;
3430         case I2O_CMD_SW_DOWNLOAD:
3431                 printk("EXEC_SW_DOWNLOAD, ");
3432                 break;
3433         case I2O_CMD_SW_UPLOAD:
3434                 printk("EXEC_SW_UPLOAD, ");
3435                 break;
3436         case I2O_CMD_SW_REMOVE:
3437                 printk("EXEC_SW_REMOVE, ");
3438                 break;
3439         case I2O_CMD_SYS_ENABLE:
3440                 printk("EXEC_SYS_ENABLE, ");
3441                 break;
3442         case I2O_CMD_SYS_MODIFY:
3443                 printk("EXEC_SYS_MODIFY, ");
3444                 break;
3445         case I2O_CMD_SYS_QUIESCE:
3446                 printk("EXEC_SYS_QUIESCE, ");
3447                 break;
3448         case I2O_CMD_SYS_TAB_SET:
3449                 printk("EXEC_SYS_TAB_SET, ");
3450                 break;
3451         default:
3452                 printk("Cmd = %#02x, ",cmd);    
3453         }
3454 }
3455
3456 /*
3457  * Used for error reporting/debugging purposes
3458  */
3459 static void i2o_report_lan_cmd(u8 cmd)
3460 {
3461         switch (cmd) {
3462         case LAN_PACKET_SEND:
3463                 printk("LAN_PACKET_SEND, "); 
3464                 break;
3465         case LAN_SDU_SEND:
3466                 printk("LAN_SDU_SEND, ");
3467                 break;
3468         case LAN_RECEIVE_POST:
3469                 printk("LAN_RECEIVE_POST, ");
3470                 break;
3471         case LAN_RESET:
3472                 printk("LAN_RESET, ");
3473                 break;
3474         case LAN_SUSPEND:
3475                 printk("LAN_SUSPEND, ");
3476                 break;
3477         default:
3478                 printk("Cmd = %0#2x, ",cmd);    
3479         }       
3480 }
3481
3482 /*
3483  * Used for error reporting/debugging purposes.
3484  * Report Cmd name, Request status, Detailed Status.
3485  */
3486 void i2o_report_status(const char *severity, const char *str, u32 *msg)
3487 {
3488         u8 cmd = (msg[1]>>24)&0xFF;
3489         u8 req_status = (msg[4]>>24)&0xFF;
3490         u16 detailed_status = msg[4]&0xFFFF;
3491         struct i2o_handler *h = i2o_handlers[msg[2] & (MAX_I2O_MODULES-1)];
3492
3493         if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
3494                 return;                         // No status in this reply
3495
3496         printk("%s%s: ", severity, str);
3497
3498         if (cmd < 0x1F)                         // Utility cmd
3499                 i2o_report_util_cmd(cmd);
3500         
3501         else if (cmd >= 0xA0 && cmd <= 0xEF)    // Executive cmd
3502                 i2o_report_exec_cmd(cmd);
3503         
3504         else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3505                 i2o_report_lan_cmd(cmd);        // LAN cmd
3506         else
3507                 printk("Cmd = %0#2x, ", cmd);   // Other cmds
3508
3509         if (msg[0] & MSG_FAIL) {
3510                 i2o_report_fail_status(req_status, msg);
3511                 return;
3512         }
3513         
3514         i2o_report_common_status(req_status);
3515
3516         if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
3517                 i2o_report_common_dsc(detailed_status); 
3518         else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3519                 i2o_report_lan_dsc(detailed_status);
3520         else
3521                 printk(" / DetailedStatus = %0#4x.\n", detailed_status); 
3522 }
3523
3524 /* Used to dump a message to syslog during debugging */
3525 void i2o_dump_message(u32 *msg)
3526 {
3527 #ifdef DRIVERDEBUG
3528         int i;
3529         printk(KERN_INFO "Dumping I2O message size %d @ %p\n", 
3530                 msg[0]>>16&0xffff, msg);
3531         for(i = 0; i < ((msg[0]>>16)&0xffff); i++)
3532                 printk(KERN_INFO "  msg[%d] = %0#10x\n", i, msg[i]);
3533 #endif
3534 }
3535
3536 /*
3537  * I2O reboot/shutdown notification.
3538  *
3539  * - Call each OSM's reboot notifier (if one exists)
3540  * - Quiesce each IOP in the system
3541  *
3542  * Each IOP has to be quiesced before we can ensure that the system
3543  * can be properly shutdown as a transaction that has already been
3544  * acknowledged still needs to be placed in permanent store on the IOP.
3545  * The SysQuiesce causes the IOP to force all HDMs to complete their
3546  * transactions before returning, so only at that point is it safe
3547  * 
3548  */
3549 static int i2o_reboot_event(struct notifier_block *n, unsigned long code, void
3550 *p)
3551 {
3552         int i = 0;
3553         struct i2o_controller *c = NULL;
3554
3555         if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
3556                 return NOTIFY_DONE;
3557
3558         printk(KERN_INFO "Shutting down I2O system.\n");
3559         printk(KERN_INFO 
3560                 "   This could take a few minutes if there are many devices attached\n");
3561
3562         for(i = 0; i < MAX_I2O_MODULES; i++)
3563         {
3564                 if(i2o_handlers[i] && i2o_handlers[i]->reboot_notify)
3565                         i2o_handlers[i]->reboot_notify();
3566         }
3567
3568         for(c = i2o_controller_chain; c; c = c->next)
3569         {
3570                 if(i2o_quiesce_controller(c))
3571                 {
3572                         printk(KERN_WARNING "i2o: Could not quiesce %s.\n"
3573                                "Verify setup on next system power up.\n",
3574                                c->name);
3575                 }
3576         }
3577
3578         printk(KERN_INFO "I2O system down.\n");
3579         return NOTIFY_DONE;
3580 }
3581
3582
3583
3584
3585 /**
3586  *      i2o_pci_dispose         -       Free bus specific resources
3587  *      @c: I2O controller
3588  *
3589  *      Disable interrupts and then free interrupt, I/O and mtrr resources 
3590  *      used by this controller. Called by the I2O core on unload.
3591  */
3592  
3593 static void i2o_pci_dispose(struct i2o_controller *c)
3594 {
3595         I2O_IRQ_WRITE32(c,0xFFFFFFFF);
3596         if(c->irq > 0)
3597                 free_irq(c->irq, c);
3598         iounmap(c->base_virt);
3599         if(c->raptor)
3600                 iounmap(c->msg_virt);
3601
3602 #ifdef CONFIG_MTRR
3603         if(c->mtrr_reg0 > 0)
3604                 mtrr_del(c->mtrr_reg0, 0, 0);
3605         if(c->mtrr_reg1 > 0)
3606                 mtrr_del(c->mtrr_reg1, 0, 0);
3607 #endif
3608 }
3609
3610 /**
3611  *      i2o_pci_interrupt       -       Bus specific interrupt handler
3612  *      @irq: interrupt line
3613  *      @dev_id: cookie
3614  *
3615  *      Handle an interrupt from a PCI based I2O controller. This turns out
3616  *      to be rather simple. We keep the controller pointer in the cookie.
3617  */
3618  
3619 static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
3620 {
3621         struct i2o_controller *c = dev_id;
3622         i2o_run_queue(c);
3623         return IRQ_HANDLED;
3624 }       
3625
3626 /**
3627  *      i2o_pci_install         -       Install a PCI i2o controller
3628  *      @dev: PCI device of the I2O controller
3629  *
3630  *      Install a PCI (or in theory AGP) i2o controller. Devices are
3631  *      initialized, configured and registered with the i2o core subsystem. Be
3632  *      very careful with ordering. There may be pending interrupts.
3633  *
3634  *      To Do: Add support for polled controllers
3635  */
3636
3637 int __init i2o_pci_install(struct pci_dev *dev)
3638 {
3639         struct i2o_controller *c=kmalloc(sizeof(struct i2o_controller),
3640                                                 GFP_KERNEL);
3641         void *bar0_virt;
3642         void *bar1_virt;
3643         unsigned long bar0_phys = 0;
3644         unsigned long bar1_phys = 0;
3645         unsigned long bar0_size = 0;
3646         unsigned long bar1_size = 0;
3647         
3648         int i;
3649
3650         if(c==NULL)
3651         {
3652                 printk(KERN_ERR "i2o: Insufficient memory to add controller.\n");
3653                 return -ENOMEM;
3654         }
3655         memset(c, 0, sizeof(*c));
3656
3657         c->irq = -1;
3658         c->dpt = 0;
3659         c->raptor = 0;
3660         c->short_req = 0;
3661         c->pdev = dev;
3662
3663 #if BITS_PER_LONG == 64
3664         c->context_list_lock = SPIN_LOCK_UNLOCKED;
3665 #endif
3666
3667         /*
3668          *      Cards that fall apart if you hit them with large I/O
3669          *      loads...
3670          */
3671          
3672         if(dev->vendor == PCI_VENDOR_ID_NCR && dev->device == 0x0630)
3673         {
3674                 c->short_req = 1;
3675                 printk(KERN_INFO "I2O: Symbios FC920 workarounds activated.\n");
3676         }
3677
3678         if(dev->subsystem_vendor == PCI_VENDOR_ID_PROMISE)
3679         {
3680                 c->promise = 1;
3681                 printk(KERN_INFO "I2O: Promise workarounds activated.\n");
3682         }
3683
3684         /*
3685          *      Cards that go bananas if you quiesce them before you reset
3686          *      them
3687          */
3688          
3689         if(dev->vendor == PCI_VENDOR_ID_DPT) {
3690                 c->dpt=1;
3691                 if(dev->device == 0xA511)
3692                         c->raptor=1;
3693         }
3694
3695         for(i=0; i<6; i++)
3696         {
3697                 /* Skip I/O spaces */
3698                 if(!(pci_resource_flags(dev, i) & IORESOURCE_IO))
3699                 {
3700                         if(!bar0_phys)
3701                         {
3702                                 bar0_phys = pci_resource_start(dev, i);
3703                                 bar0_size = pci_resource_len(dev, i);
3704                                 if(!c->raptor)
3705                                         break;
3706                         }
3707                         else
3708                         {
3709                                 bar1_phys = pci_resource_start(dev, i);
3710                                 bar1_size = pci_resource_len(dev, i);
3711                                 break;
3712                         }
3713                 }
3714         }
3715
3716         if(i==6)
3717         {
3718                 printk(KERN_ERR "i2o: I2O controller has no memory regions defined.\n");
3719                 kfree(c);
3720                 return -EINVAL;
3721         }
3722
3723
3724         /* Map the I2O controller */
3725         if(!c->raptor)
3726                 printk(KERN_INFO "i2o: PCI I2O controller at %08lX size=%ld\n", bar0_phys, bar0_size);
3727         else
3728                 printk(KERN_INFO "i2o: PCI I2O controller\n    BAR0 at 0x%08lX size=%ld\n    BAR1 at 0x%08lX size=%ld\n", bar0_phys, bar0_size, bar1_phys, bar1_size);
3729
3730         bar0_virt = ioremap(bar0_phys, bar0_size);
3731         if(bar0_virt==0)
3732         {
3733                 printk(KERN_ERR "i2o: Unable to map controller.\n");
3734                 kfree(c);
3735                 return -EINVAL;
3736         }
3737
3738         if(c->raptor)
3739         {
3740                 bar1_virt = ioremap(bar1_phys, bar1_size);
3741                 if(bar1_virt==0)
3742                 {
3743                         printk(KERN_ERR "i2o: Unable to map controller.\n");
3744                         kfree(c);
3745                         iounmap(bar0_virt);
3746                         return -EINVAL;
3747                 }
3748         } else {
3749                 bar1_virt = bar0_virt;
3750                 bar1_phys = bar0_phys;
3751                 bar1_size = bar0_size;
3752         }
3753
3754         c->irq_mask = bar0_virt+0x34;
3755         c->post_port = bar0_virt+0x40;
3756         c->reply_port = bar0_virt+0x44;
3757
3758         c->base_phys = bar0_phys;
3759         c->base_virt = bar0_virt;
3760         c->msg_phys = bar1_phys;
3761         c->msg_virt = bar1_virt;
3762         
3763         /* 
3764          * Enable Write Combining MTRR for IOP's memory region
3765          */
3766 #ifdef CONFIG_MTRR
3767         c->mtrr_reg0 = mtrr_add(c->base_phys, bar0_size, MTRR_TYPE_WRCOMB, 1);
3768         /*
3769          * If it is an INTEL i960 I/O processor then set the first 64K to
3770          * Uncacheable since the region contains the Messaging unit which
3771          * shouldn't be cached.
3772          */
3773         c->mtrr_reg1 = -1;
3774         if(dev->vendor == PCI_VENDOR_ID_INTEL || dev->vendor == PCI_VENDOR_ID_DPT)
3775         {
3776                 printk(KERN_INFO "I2O: MTRR workaround for Intel i960 processor\n"); 
3777                 c->mtrr_reg1 =  mtrr_add(c->base_phys, 65536, MTRR_TYPE_UNCACHABLE, 1);
3778                 if(c->mtrr_reg1< 0)
3779                 {
3780                         printk(KERN_INFO "i2o_pci: Error in setting MTRR_TYPE_UNCACHABLE\n");
3781                         mtrr_del(c->mtrr_reg0, c->msg_phys, bar1_size);
3782                         c->mtrr_reg0 = -1;
3783                 }
3784         }
3785         if(c->raptor)
3786                 c->mtrr_reg1 = mtrr_add(c->msg_phys, bar1_size, MTRR_TYPE_WRCOMB, 1);
3787
3788 #endif
3789
3790         I2O_IRQ_WRITE32(c,0xFFFFFFFF);
3791
3792         i = i2o_install_controller(c);
3793         
3794         if(i<0)
3795         {
3796                 printk(KERN_ERR "i2o: Unable to install controller.\n");
3797                 kfree(c);
3798                 iounmap(bar0_virt);
3799                 if(c->raptor)
3800                         iounmap(bar1_virt);
3801                 return i;
3802         }
3803
3804         c->irq = dev->irq;
3805         if(c->irq)
3806         {
3807                 i=request_irq(dev->irq, i2o_pci_interrupt, SA_SHIRQ,
3808                         c->name, c);
3809                 if(i<0)
3810                 {
3811                         printk(KERN_ERR "%s: unable to allocate interrupt %d.\n",
3812                                 c->name, dev->irq);
3813                         c->irq = -1;
3814                         i2o_delete_controller(c);
3815                         iounmap(bar0_virt);
3816                         if(c->raptor)
3817                                 iounmap(bar1_virt);
3818                         return -EBUSY;
3819                 }
3820         }
3821
3822         printk(KERN_INFO "%s: Installed at IRQ%d\n", c->name, dev->irq);
3823         I2O_IRQ_WRITE32(c,0x0);
3824         c->enabled = 1;
3825         return 0;       
3826 }
3827
3828 /**
3829  *      i2o_pci_scan    -       Scan the pci bus for controllers
3830  *      
3831  *      Scan the PCI devices on the system looking for any device which is a 
3832  *      memory of the Intelligent, I2O class. We attempt to set up each such device
3833  *      and register it with the core.
3834  *
3835  *      Returns the number of controllers registered
3836  *
3837  *      Note; Do not change this to a hot plug interface. I2O 1.5 itself
3838  *      does not support hot plugging.
3839  */
3840  
3841 int __init i2o_pci_scan(void)
3842 {
3843         struct pci_dev *dev = NULL;
3844         int count=0;
3845         
3846         printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
3847
3848         while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL)
3849         {
3850                 if((dev->class>>8)!=PCI_CLASS_INTELLIGENT_I2O &&
3851                    (dev->vendor!=PCI_VENDOR_ID_DPT || dev->device!=0xA511))
3852                         continue;
3853
3854                 if((dev->class>>8)==PCI_CLASS_INTELLIGENT_I2O &&
3855                    (dev->class&0xFF)>1)
3856                 {
3857                         printk(KERN_INFO "i2o: I2O Controller found but does not support I2O 1.5 (skipping).\n");
3858                         continue;
3859                 }
3860                 if (pci_enable_device(dev))
3861                         continue;
3862                 printk(KERN_INFO "i2o: I2O controller on bus %d at %d.\n",
3863                         dev->bus->number, dev->devfn);
3864                 if(pci_set_dma_mask(dev, 0xffffffff))
3865                 {
3866                         printk(KERN_WARNING "I2O controller on bus %d at %d : No suitable DMA available\n", dev->bus->number, dev->devfn);
3867                         continue;
3868                 }
3869                 pci_set_master(dev);
3870                 if(i2o_pci_install(dev)==0)
3871                         count++;
3872         }
3873         if(count)
3874                 printk(KERN_INFO "i2o: %d I2O controller%s found and installed.\n", count,
3875                         count==1?"":"s");
3876         return count?count:-ENODEV;
3877 }
3878
3879 static int i2o_core_init(void)
3880 {
3881         printk(KERN_INFO "I2O Core - (C) Copyright 1999 Red Hat Software\n");
3882         if (i2o_install_handler(&i2o_core_handler) < 0)
3883         {
3884                 printk(KERN_ERR "i2o_core: Unable to install core handler.\nI2O stack not loaded!");
3885                 return 0;
3886         }
3887
3888         core_context = i2o_core_handler.context;
3889
3890         /*
3891          * Initialize event handling thread
3892          */     
3893
3894         init_MUTEX_LOCKED(&evt_sem);
3895         evt_pid = kernel_thread(i2o_core_evt, &evt_reply, CLONE_SIGHAND);
3896         if(evt_pid < 0)
3897         {
3898                 printk(KERN_ERR "I2O: Could not create event handler kernel thread\n");
3899                 i2o_remove_handler(&i2o_core_handler);
3900                 return 0;
3901         }
3902         else
3903                 printk(KERN_INFO "I2O: Event thread created as pid %d\n", evt_pid);
3904
3905         i2o_pci_scan();
3906         if(i2o_num_controllers)
3907                 i2o_sys_init();
3908
3909         register_reboot_notifier(&i2o_reboot_notifier);
3910
3911         return 0;
3912 }
3913
3914 static void i2o_core_exit(void)
3915 {
3916         int stat;
3917
3918         unregister_reboot_notifier(&i2o_reboot_notifier);
3919
3920         if(i2o_num_controllers)
3921                 i2o_sys_shutdown();
3922
3923         /*
3924          * If this is shutdown time, the thread has already been killed
3925          */
3926         if(evt_running) {
3927                 printk("Terminating i2o threads...");
3928                 stat = kill_proc(evt_pid, SIGKILL, 1);
3929                 if(!stat) {
3930                         printk("waiting...\n");
3931                         wait_for_completion(&evt_dead);
3932                 }
3933                 printk("done.\n");
3934         }
3935         i2o_remove_handler(&i2o_core_handler);
3936 }
3937
3938 module_init(i2o_core_init);
3939 module_exit(i2o_core_exit);
3940
3941 MODULE_PARM(verbose, "i");
3942 MODULE_PARM_DESC(verbose, "Verbose diagnostics");
3943
3944 MODULE_AUTHOR("Red Hat Software");
3945 MODULE_DESCRIPTION("I2O Core");
3946 MODULE_LICENSE("GPL");
3947
3948 EXPORT_SYMBOL(i2o_controller_chain);
3949 EXPORT_SYMBOL(i2o_num_controllers);
3950 EXPORT_SYMBOL(i2o_find_controller);
3951 EXPORT_SYMBOL(i2o_unlock_controller);
3952 EXPORT_SYMBOL(i2o_status_get);
3953 EXPORT_SYMBOL(i2o_install_handler);
3954 EXPORT_SYMBOL(i2o_remove_handler);
3955 EXPORT_SYMBOL(i2o_install_controller);
3956 EXPORT_SYMBOL(i2o_delete_controller);
3957 EXPORT_SYMBOL(i2o_run_queue);
3958 EXPORT_SYMBOL(i2o_claim_device);
3959 EXPORT_SYMBOL(i2o_release_device);
3960 EXPORT_SYMBOL(i2o_device_notify_on);
3961 EXPORT_SYMBOL(i2o_device_notify_off);
3962 EXPORT_SYMBOL(i2o_post_this);
3963 EXPORT_SYMBOL(i2o_post_wait);
3964 EXPORT_SYMBOL(i2o_post_wait_mem);
3965 EXPORT_SYMBOL(i2o_query_scalar);
3966 EXPORT_SYMBOL(i2o_set_scalar);
3967 EXPORT_SYMBOL(i2o_query_table);
3968 EXPORT_SYMBOL(i2o_clear_table);
3969 EXPORT_SYMBOL(i2o_row_add_table);
3970 EXPORT_SYMBOL(i2o_issue_params);
3971 EXPORT_SYMBOL(i2o_event_register);
3972 EXPORT_SYMBOL(i2o_event_ack);
3973 EXPORT_SYMBOL(i2o_report_status);
3974 EXPORT_SYMBOL(i2o_dump_message);
3975 EXPORT_SYMBOL(i2o_get_class_name);
3976 EXPORT_SYMBOL(i2o_context_list_add);
3977 EXPORT_SYMBOL(i2o_context_list_get);
3978 EXPORT_SYMBOL(i2o_context_list_remove);