2 * Core I2O structure management
4 * (C) Copyright 1999-2002 Red Hat Software
6 * Written by Alan Cox, Building Number Three Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * A lot of the I2O message side code from this is taken from the
14 * Red Creek RCPCI45 adapter driver by Red Creek Communications
18 * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
19 * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 * Deepak Saxena <deepak@plexity.net>
21 * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 * Alan Cox <alan@redhat.com>:
23 * Ported to Linux 2.5.
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Minor fixes for 2.6.
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
34 #include <linux/i2o.h>
36 #include <linux/errno.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/smp_lock.h>
42 #include <linux/bitops.h>
43 #include <linux/wait.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/sched.h>
48 #include <asm/semaphore.h>
49 #include <linux/completion.h>
50 #include <linux/workqueue.h>
53 #include <linux/reboot.h>
63 #define dprintk(s, args...) printk(s, ## args)
65 #define dprintk(s, args...)
69 static struct i2o_handler *i2o_handlers[MAX_I2O_MODULES];
72 static struct i2o_controller *i2o_controllers[MAX_I2O_CONTROLLERS];
73 struct i2o_controller *i2o_controller_chain;
74 int i2o_num_controllers;
76 /* Initiator Context for Core message */
77 static int core_context;
79 /* Initialization && shutdown functions */
80 void i2o_sys_init(void);
81 static void i2o_sys_shutdown(void);
82 static int i2o_reset_controller(struct i2o_controller *);
83 static int i2o_reboot_event(struct notifier_block *, unsigned long , void *);
84 static int i2o_online_controller(struct i2o_controller *);
85 static int i2o_init_outbound_q(struct i2o_controller *);
86 static int i2o_post_outbound_messages(struct i2o_controller *);
89 static void i2o_core_reply(struct i2o_handler *, struct i2o_controller *,
90 struct i2o_message *);
92 /* Various helper functions */
93 static int i2o_lct_get(struct i2o_controller *);
94 static int i2o_lct_notify(struct i2o_controller *);
95 static int i2o_hrt_get(struct i2o_controller *);
97 static int i2o_build_sys_table(void);
98 static int i2o_systab_send(struct i2o_controller *c);
100 /* I2O core event handler */
101 static int i2o_core_evt(void *);
103 static int evt_running;
105 /* Dynamic LCT update handler */
106 static int i2o_dyn_lct(void *);
108 void i2o_report_controller_unit(struct i2o_controller *, struct i2o_device *);
110 static void i2o_pci_dispose(struct i2o_controller *c);
113 * I2O System Table. Contains information about
114 * all the IOPs in the system. Used to inform IOPs
115 * about each other's existence.
117 * sys_tbl_ver is the CurrentChangeIndicator that is
118 * used by IOPs to track changes.
120 static struct i2o_sys_tbl *sys_tbl;
121 static int sys_tbl_ind;
122 static int sys_tbl_len;
125 * This spin lock is used to keep a device from being
126 * added and deleted concurrently across CPUs or interrupts.
127 * This can occur when a user creates a device and immediatelly
128 * deletes it before the new_dev_notify() handler is called.
130 static spinlock_t i2o_dev_lock = SPIN_LOCK_UNLOCKED;
133 * Structures and definitions for synchronous message posting.
134 * See i2o_post_wait() for description.
136 struct i2o_post_wait_data
138 int *status; /* Pointer to status block on caller stack */
139 int *complete; /* Pointer to completion flag on caller stack */
140 u32 id; /* Unique identifier */
141 wait_queue_head_t *wq; /* Wake up for caller (NULL for dead) */
142 struct i2o_post_wait_data *next; /* Chain */
143 void *mem[2]; /* Memory blocks to recover on failure path */
144 dma_addr_t phys[2]; /* Physical address of blocks to recover */
145 u32 size[2]; /* Size of blocks to recover */
148 static struct i2o_post_wait_data *post_wait_queue;
149 static u32 post_wait_id; // Unique ID for each post_wait
150 static spinlock_t post_wait_lock = SPIN_LOCK_UNLOCKED;
151 static void i2o_post_wait_complete(struct i2o_controller *, u32, int);
153 /* OSM descriptor handler */
154 static struct i2o_handler i2o_core_handler =
156 (void *)i2o_core_reply,
166 * Used when queueing a reply to be handled later
171 struct i2o_controller *iop;
172 u32 msg[MSG_FRAME_SIZE];
174 static struct reply_info evt_reply;
175 static struct reply_info events[I2O_EVT_Q_LEN];
178 static int evt_q_len;
179 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
182 * I2O configuration spinlock. This isnt a big deal for contention
183 * so we have one only
186 static DECLARE_MUTEX(i2o_configuration_lock);
189 * Event spinlock. Used to keep event queue sane and from
190 * handling multiple events simultaneously.
192 static spinlock_t i2o_evt_lock = SPIN_LOCK_UNLOCKED;
195 * Semaphore used to synchronize event handling thread with
199 static DECLARE_MUTEX(evt_sem);
200 static DECLARE_COMPLETION(evt_dead);
201 static DECLARE_WAIT_QUEUE_HEAD(evt_wait);
203 static struct notifier_block i2o_reboot_notifier =
216 #if BITS_PER_LONG == 64
218 * i2o_context_list_add - append an ptr to the context list and return a
219 * matching context id.
220 * @ptr: pointer to add to the context list
221 * @c: controller to which the context list belong
222 * returns context id, which could be used in the transaction context
225 * Because the context field in I2O is only 32-bit large, on 64-bit the
226 * pointer is to large to fit in the context field. The i2o_context_list
227 * functiones map pointers to context fields.
229 u32 i2o_context_list_add(void *ptr, struct i2o_controller *c) {
231 struct i2o_context_list_element **entry = &c->context_list;
232 struct i2o_context_list_element *element;
235 spin_lock_irqsave(&c->context_list_lock, flags);
236 while(*entry && ((*entry)->flags & I2O_CONTEXT_LIST_USED)) {
237 if((*entry)->context >= context)
238 context = (*entry)->context + 1;
239 entry = &((*entry)->next);
243 if(unlikely(!context)) {
244 spin_unlock_irqrestore(&c->context_list_lock, flags);
245 printk(KERN_EMERG "i2o_core: context list overflow\n");
249 element = kmalloc(sizeof(struct i2o_context_list_element), GFP_KERNEL);
251 printk(KERN_EMERG "i2o_core: could not allocate memory for context list element\n");
254 element->context = context;
255 element->next = NULL;
261 element->flags = I2O_CONTEXT_LIST_USED;
263 spin_unlock_irqrestore(&c->context_list_lock, flags);
264 dprintk(KERN_DEBUG "i2o_core: add context to list %p -> %d\n", ptr, context);
269 * i2o_context_list_remove - remove a ptr from the context list and return
270 * the matching context id.
271 * @ptr: pointer to be removed from the context list
272 * @c: controller to which the context list belong
273 * returns context id, which could be used in the transaction context
276 u32 i2o_context_list_remove(void *ptr, struct i2o_controller *c) {
277 struct i2o_context_list_element **entry = &c->context_list;
278 struct i2o_context_list_element *element;
282 spin_lock_irqsave(&c->context_list_lock, flags);
283 while(*entry && ((*entry)->ptr != ptr))
284 entry = &((*entry)->next);
286 if(unlikely(!*entry)) {
287 spin_unlock_irqrestore(&c->context_list_lock, flags);
288 printk(KERN_WARNING "i2o_core: could not remove nonexistent ptr %p\n", ptr);
294 context = element->context;
296 element->flags |= I2O_CONTEXT_LIST_DELETED;
298 spin_unlock_irqrestore(&c->context_list_lock, flags);
299 dprintk(KERN_DEBUG "i2o_core: markt as deleted in context list %p -> %d\n", ptr, context);
304 * i2o_context_list_get - get a ptr from the context list and remove it
306 * @context: context id to which the pointer belong
307 * @c: controller to which the context list belong
308 * returns pointer to the matching context id
310 void *i2o_context_list_get(u32 context, struct i2o_controller *c) {
311 struct i2o_context_list_element **entry = &c->context_list;
312 struct i2o_context_list_element *element;
317 spin_lock_irqsave(&c->context_list_lock, flags);
318 while(*entry && ((*entry)->context != context)) {
319 entry = &((*entry)->next);
323 if(unlikely(!*entry)) {
324 spin_unlock_irqrestore(&c->context_list_lock, flags);
325 printk(KERN_WARNING "i2o_core: context id %d not found\n", context);
331 if(count >= I2O_CONTEXT_LIST_MIN_LENGTH) {
332 *entry = (*entry)->next;
336 element->flags &= !I2O_CONTEXT_LIST_USED;
339 spin_unlock_irqrestore(&c->context_list_lock, flags);
340 dprintk(KERN_DEBUG "i2o_core: get ptr from context list %d -> %p\n", context, ptr);
346 * I2O Core reply handler
348 static void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c,
349 struct i2o_message *m)
353 u32 context = msg[2];
355 if (msg[0] & MSG_FAIL) // Fail bit is set
357 u32 *preserved_msg = (u32*)(c->msg_virt + msg[7]);
359 i2o_report_status(KERN_INFO, "i2o_core", msg);
360 i2o_dump_message(preserved_msg);
362 /* If the failed request needs special treatment,
363 * it should be done here. */
365 /* Release the preserved msg by resubmitting it as a NOP */
367 preserved_msg[0] = cpu_to_le32(THREE_WORD_MSG_SIZE | SGL_OFFSET_0);
368 preserved_msg[1] = cpu_to_le32(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0);
369 preserved_msg[2] = 0;
370 i2o_post_message(c, msg[7]);
372 /* If reply to i2o_post_wait failed, return causes a timeout */
378 i2o_report_status(KERN_INFO, "i2o_core", msg);
381 if(msg[2]&0x80000000) // Post wait message
384 status = (msg[4] & 0xFFFF);
386 status = I2O_POST_WAIT_OK;
388 i2o_post_wait_complete(c, context, status);
392 if(m->function == I2O_CMD_UTIL_EVT_REGISTER)
394 memcpy(events[evt_in].msg, msg, (msg[0]>>16)<<2);
395 events[evt_in].iop = c;
397 spin_lock(&i2o_evt_lock);
398 MODINC(evt_in, I2O_EVT_Q_LEN);
399 if(evt_q_len == I2O_EVT_Q_LEN)
400 MODINC(evt_out, I2O_EVT_Q_LEN);
403 spin_unlock(&i2o_evt_lock);
406 wake_up_interruptible(&evt_wait);
410 if(m->function == I2O_CMD_LCT_NOTIFY)
417 * If this happens, we want to dump the message to the syslog so
418 * it can be sent back to the card manufacturer by the end user
419 * to aid in debugging.
422 printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
423 "Message dumped to syslog\n",
425 i2o_dump_message(msg);
431 * i2o_install_handler - install a message handler
432 * @h: Handler structure
434 * Install an I2O handler - these handle the asynchronous messaging
435 * from the card once it has initialised. If the table of handlers is
436 * full then -ENOSPC is returned. On a success 0 is returned and the
437 * context field is set by the function. The structure is part of the
438 * system from this time onwards. It must not be freed until it has
442 int i2o_install_handler(struct i2o_handler *h)
445 down(&i2o_configuration_lock);
446 for(i=0;i<MAX_I2O_MODULES;i++)
448 if(i2o_handlers[i]==NULL)
452 up(&i2o_configuration_lock);
456 up(&i2o_configuration_lock);
461 * i2o_remove_handler - remove an i2o message handler
464 * Remove a message handler previously installed with i2o_install_handler.
465 * After this function returns the handler object can be freed or re-used
468 int i2o_remove_handler(struct i2o_handler *h)
470 i2o_handlers[h->context]=NULL;
476 * Each I2O controller has a chain of devices on it.
477 * Each device has a pointer to its LCT entry to be used
482 * i2o_install_device - attach a device to a controller
486 * Add a new device to an i2o controller. This can be called from
487 * non interrupt contexts only. It adds the device and marks it as
488 * unclaimed. The device memory becomes part of the kernel and must
489 * be uninstalled before being freed or reused. Zero is returned
493 int i2o_install_device(struct i2o_controller *c, struct i2o_device *d)
497 down(&i2o_configuration_lock);
502 if (c->devices != NULL)
507 for(i = 0; i < I2O_MAX_MANAGERS; i++)
508 d->managers[i] = NULL;
510 up(&i2o_configuration_lock);
514 /* we need this version to call out of i2o_delete_controller */
516 int __i2o_delete_device(struct i2o_device *d)
518 struct i2o_device **p;
521 p=&(d->controller->devices);
524 * Hey we have a driver!
525 * Check to see if the driver wants us to notify it of
526 * device deletion. If it doesn't we assume that it
527 * is unsafe to delete a device with an owner and
532 if(d->owner->dev_del_notify)
534 dprintk(KERN_INFO "Device has owner, notifying\n");
535 d->owner->dev_del_notify(d->controller, d);
539 "Driver \"%s\" did not release device!\n", d->owner->name);
548 * Tell any other users who are talking to this device
549 * that it's going away. We assume that everything works.
551 for(i=0; i < I2O_MAX_MANAGERS; i++)
553 if(d->managers[i] && d->managers[i]->dev_del_notify)
554 d->managers[i]->dev_del_notify(d->controller, d);
570 printk(KERN_ERR "i2o_delete_device: passed invalid device.\n");
575 * i2o_delete_device - remove an i2o device
576 * @d: device to remove
578 * This function unhooks a device from a controller. The device
579 * will not be unhooked if it has an owner who does not wish to free
580 * it, or if the owner lacks a dev_del_notify function. In that case
581 * -EBUSY is returned. On success 0 is returned. Other errors cause
582 * negative errno values to be returned
585 int i2o_delete_device(struct i2o_device *d)
589 down(&i2o_configuration_lock);
595 ret = __i2o_delete_device(d);
597 up(&i2o_configuration_lock);
603 * i2o_install_controller - attach a controller
606 * Add a new controller to the i2o layer. This can be called from
607 * non interrupt contexts only. It adds the controller and marks it as
608 * unused with no devices. If the tables are full or memory allocations
609 * fail then a negative errno code is returned. On success zero is
610 * returned and the controller is bound to the system. The structure
611 * must not be freed or reused until being uninstalled.
614 int i2o_install_controller(struct i2o_controller *c)
617 down(&i2o_configuration_lock);
618 for(i=0;i<MAX_I2O_CONTROLLERS;i++)
620 if(i2o_controllers[i]==NULL)
622 c->dlct = (i2o_lct*)pci_alloc_consistent(c->pdev, 8192, &c->dlct_phys);
625 up(&i2o_configuration_lock);
628 i2o_controllers[i]=c;
630 c->next=i2o_controller_chain;
631 i2o_controller_chain=c;
633 c->page_frame = NULL;
637 c->status_block = NULL;
638 sprintf(c->name, "i2o/iop%d", i);
639 i2o_num_controllers++;
640 init_MUTEX_LOCKED(&c->lct_sem);
641 up(&i2o_configuration_lock);
645 printk(KERN_ERR "No free i2o controller slots.\n");
646 up(&i2o_configuration_lock);
651 * i2o_delete_controller - delete a controller
654 * Remove an i2o controller from the system. If the controller or its
655 * devices are busy then -EBUSY is returned. On a failure a negative
656 * errno code is returned. On success zero is returned.
659 int i2o_delete_controller(struct i2o_controller *c)
661 struct i2o_controller **p;
666 dprintk(KERN_INFO "Deleting controller %s\n", c->name);
669 * Clear event registration as this can cause weird behavior
671 if(c->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
672 i2o_event_register(c, core_context, 0, 0, 0);
674 down(&i2o_configuration_lock);
675 if((users=atomic_read(&c->users)))
677 dprintk(KERN_INFO "I2O: %d users for controller %s\n", users,
679 up(&i2o_configuration_lock);
684 if(__i2o_delete_device(c->devices)<0)
686 /* Shouldnt happen */
687 I2O_IRQ_WRITE32(c, 0xFFFFFFFF);
689 up(&i2o_configuration_lock);
695 * If this is shutdown time, the thread's already been killed
698 stat = kill_proc(c->lct_pid, SIGKILL, 1);
700 int count = 10 * 100;
701 while(c->lct_running && --count) {
702 current->state = TASK_INTERRUPTIBLE;
708 "%s: LCT thread still running!\n",
713 p=&i2o_controller_chain;
719 /* Ask the IOP to switch to RESET state */
720 i2o_reset_controller(c);
726 up(&i2o_configuration_lock);
730 pci_unmap_single(c->pdev, c->page_frame_map, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
731 kfree(c->page_frame);
734 pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys);
736 pci_free_consistent(c->pdev, c->lct->table_size << 2, c->lct, c->lct_phys);
738 pci_free_consistent(c->pdev, sizeof(i2o_status_block), c->status_block, c->status_block_phys);
740 pci_free_consistent(c->pdev, 8192, c->dlct, c->dlct_phys);
742 i2o_controllers[c->unit]=NULL;
743 memcpy(name, c->name, strlen(c->name)+1);
745 dprintk(KERN_INFO "%s: Deleted from controller chain.\n", name);
747 i2o_num_controllers--;
752 up(&i2o_configuration_lock);
753 printk(KERN_ERR "i2o_delete_controller: bad pointer!\n");
758 * i2o_unlock_controller - unlock a controller
759 * @c: controller to unlock
761 * Take a lock on an i2o controller. This prevents it being deleted.
762 * i2o controllers are not refcounted so a deletion of an in use device
763 * will fail, not take affect on the last dereference.
766 void i2o_unlock_controller(struct i2o_controller *c)
768 atomic_dec(&c->users);
772 * i2o_find_controller - return a locked controller
773 * @n: controller number
775 * Returns a pointer to the controller object. The controller is locked
776 * on return. NULL is returned if the controller is not found.
779 struct i2o_controller *i2o_find_controller(int n)
781 struct i2o_controller *c;
783 if(n<0 || n>=MAX_I2O_CONTROLLERS)
786 down(&i2o_configuration_lock);
787 c=i2o_controllers[n];
789 atomic_inc(&c->users);
790 up(&i2o_configuration_lock);
795 * i2o_issue_claim - claim or release a device
797 * @c: controller to claim for
799 * @type: type of claim
801 * Issue I2O UTIL_CLAIM and UTIL_RELEASE messages. The message to be sent
802 * is set by cmd. The tid is the task id of the object to claim and the
803 * type is the claim type (see the i2o standard)
805 * Zero is returned on success.
808 static int i2o_issue_claim(u32 cmd, struct i2o_controller *c, int tid, u32 type)
812 msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
813 msg[1] = cmd << 24 | HOST_TID<<12 | tid;
817 return i2o_post_wait(c, msg, sizeof(msg), 60);
821 * i2o_claim_device - claim a device for use by an OSM
822 * @d: device to claim
823 * @h: handler for this device
825 * Do the leg work to assign a device to a given OSM on Linux. The
826 * kernel updates the internal handler data for the device and then
827 * performs an I2O claim for the device, attempting to claim the
828 * device as primary. If the attempt fails a negative errno code
829 * is returned. On success zero is returned.
832 int i2o_claim_device(struct i2o_device *d, struct i2o_handler *h)
834 down(&i2o_configuration_lock);
836 printk(KERN_INFO "Device claim called, but dev already owned by %s!",
838 up(&i2o_configuration_lock);
843 if(i2o_issue_claim(I2O_CMD_UTIL_CLAIM ,d->controller,d->lct_data.tid,
849 up(&i2o_configuration_lock);
854 * i2o_release_device - release a device that the OSM is using
855 * @d: device to claim
856 * @h: handler for this device
858 * Drop a claim by an OSM on a given I2O device. The handler is cleared
859 * and 0 is returned on success.
861 * AC - some devices seem to want to refuse an unclaim until they have
862 * finished internal processing. It makes sense since you don't want a
863 * new device to go reconfiguring the entire system until you are done.
864 * Thus we are prepared to wait briefly.
867 int i2o_release_device(struct i2o_device *d, struct i2o_handler *h)
872 down(&i2o_configuration_lock);
874 printk(KERN_INFO "Claim release called, but not owned by %s!\n",
876 up(&i2o_configuration_lock);
880 for(tries=0;tries<10;tries++)
885 * If the controller takes a nonblocking approach to
886 * releases we have to sleep/poll for a few times.
889 if((err=i2o_issue_claim(I2O_CMD_UTIL_RELEASE, d->controller, d->lct_data.tid, I2O_CLAIM_PRIMARY)) )
892 current->state = TASK_UNINTERRUPTIBLE;
893 schedule_timeout(HZ);
901 up(&i2o_configuration_lock);
906 * i2o_device_notify_on - Enable deletion notifiers
907 * @d: device for notification
908 * @h: handler to install
910 * Called by OSMs to let the core know that they want to be
911 * notified if the given device is deleted from the system.
914 int i2o_device_notify_on(struct i2o_device *d, struct i2o_handler *h)
918 if(d->num_managers == I2O_MAX_MANAGERS)
921 for(i = 0; i < I2O_MAX_MANAGERS; i++)
936 * i2o_device_notify_off - Remove deletion notifiers
937 * @d: device for notification
938 * @h: handler to remove
940 * Called by OSMs to let the core know that they no longer
941 * are interested in the fate of the given device.
943 int i2o_device_notify_off(struct i2o_device *d, struct i2o_handler *h)
947 for(i=0; i < I2O_MAX_MANAGERS; i++)
949 if(d->managers[i] == h)
951 d->managers[i] = NULL;
961 * i2o_event_register - register interest in an event
962 * @c: Controller to register interest with
964 * @init_context: initiator context to use with this notifier
965 * @tr_context: transaction context to use with this notifier
966 * @evt_mask: mask of events
968 * Create and posts an event registration message to the task. No reply
969 * is waited for, or expected. Errors in posting will be reported.
972 int i2o_event_register(struct i2o_controller *c, u32 tid,
973 u32 init_context, u32 tr_context, u32 evt_mask)
975 u32 msg[5]; // Not performance critical, so we just
976 // i2o_post_this it instead of building it
979 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
980 msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | tid;
981 msg[2] = init_context;
985 return i2o_post_this(c, msg, sizeof(msg));
989 * i2o_event_ack - acknowledge an event
991 * @msg: pointer to the UTIL_EVENT_REGISTER reply we received
993 * We just take a pointer to the original UTIL_EVENT_REGISTER reply
994 * message and change the function code since that's what spec
995 * describes an EventAck message looking like.
998 int i2o_event_ack(struct i2o_controller *c, u32 *msg)
1000 struct i2o_message *m = (struct i2o_message *)msg;
1002 m->function = I2O_CMD_UTIL_EVT_ACK;
1004 return i2o_post_wait(c, msg, m->size * 4, 2);
1008 * Core event handler. Runs as a separate thread and is woken
1009 * up whenever there is an Executive class event.
1011 static int i2o_core_evt(void *reply_data)
1013 struct reply_info *reply = (struct reply_info *) reply_data;
1014 u32 *msg = reply->msg;
1015 struct i2o_controller *c = NULL;
1016 unsigned long flags;
1018 daemonize("i2oevtd");
1019 allow_signal(SIGKILL);
1025 if(down_interruptible(&evt_sem))
1027 dprintk(KERN_INFO "I2O event thread dead\n");
1028 printk("exiting...");
1030 complete_and_exit(&evt_dead, 0);
1034 * Copy the data out of the queue so that we don't have to lock
1035 * around the whole function and just around the qlen update
1037 spin_lock_irqsave(&i2o_evt_lock, flags);
1038 memcpy(reply, &events[evt_out], sizeof(struct reply_info));
1039 MODINC(evt_out, I2O_EVT_Q_LEN);
1041 spin_unlock_irqrestore(&i2o_evt_lock, flags);
1044 dprintk(KERN_INFO "I2O IRTOS EVENT: iop%d, event %#10x\n", c->unit, msg[4]);
1047 * We do not attempt to delete/quiesce/etc. the controller if
1048 * some sort of error indidication occurs. We may want to do
1049 * so in the future, but for now we just let the user deal with
1050 * it. One reason for this is that what to do with an error
1051 * or when to send what ærror is not really agreed on, so
1052 * we get errors that may not be fatal but just look like they
1053 * are...so let the user deal with it.
1057 case I2O_EVT_IND_EXEC_RESOURCE_LIMITS:
1058 printk(KERN_ERR "%s: Out of resources\n", c->name);
1061 case I2O_EVT_IND_EXEC_POWER_FAIL:
1062 printk(KERN_ERR "%s: Power failure\n", c->name);
1065 case I2O_EVT_IND_EXEC_HW_FAIL:
1073 "Code Execution Exception",
1074 "Watchdog Timer Expired"
1078 printk(KERN_ERR "%s: Hardware Failure: %s\n",
1079 c->name, fail[msg[5]]);
1081 printk(KERN_ERR "%s: Unknown Hardware Failure\n", c->name);
1087 * New device created
1088 * - Create a new i2o_device entry
1089 * - Inform all interested drivers about this device's existence
1091 case I2O_EVT_IND_EXEC_NEW_LCT_ENTRY:
1093 struct i2o_device *d = (struct i2o_device *)
1094 kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1098 printk(KERN_EMERG "i2oevtd: out of memory\n");
1101 memcpy(&d->lct_data, &msg[5], sizeof(i2o_lct_entry));
1107 i2o_report_controller_unit(c, d);
1108 i2o_install_device(c,d);
1110 for(i = 0; i < MAX_I2O_MODULES; i++)
1112 if(i2o_handlers[i] &&
1113 i2o_handlers[i]->new_dev_notify &&
1114 (i2o_handlers[i]->class&d->lct_data.class_id))
1116 spin_lock(&i2o_dev_lock);
1117 i2o_handlers[i]->new_dev_notify(c,d);
1118 spin_unlock(&i2o_dev_lock);
1126 * LCT entry for a device has been modified, so update it
1129 case I2O_EVT_IND_EXEC_MODIFIED_LCT:
1131 struct i2o_device *d;
1132 i2o_lct_entry *new_lct = (i2o_lct_entry *)&msg[5];
1134 for(d = c->devices; d; d = d->next)
1136 if(d->lct_data.tid == new_lct->tid)
1138 memcpy(&d->lct_data, new_lct, sizeof(i2o_lct_entry));
1145 case I2O_EVT_IND_CONFIGURATION_FLAG:
1146 printk(KERN_WARNING "%s requires user configuration\n", c->name);
1149 case I2O_EVT_IND_GENERAL_WARNING:
1150 printk(KERN_WARNING "%s: Warning notification received!"
1151 "Check configuration for errors!\n", c->name);
1154 case I2O_EVT_IND_EVT_MASK_MODIFIED:
1155 /* Well I guess that was us hey .. */
1159 printk(KERN_WARNING "%s: No handler for event (0x%08x)\n", c->name, msg[4]);
1168 * Dynamic LCT update. This compares the LCT with the currently
1169 * installed devices to check for device deletions..this needed b/c there
1170 * is no DELETED_LCT_ENTRY EventIndicator for the Executive class so
1171 * we can't just have the event handler do this...annoying
1173 * This is a hole in the spec that will hopefully be fixed someday.
1175 static int i2o_dyn_lct(void *foo)
1177 struct i2o_controller *c = (struct i2o_controller *)foo;
1178 struct i2o_device *d = NULL;
1179 struct i2o_device *d1 = NULL;
1185 daemonize("iop%d_lctd", c->unit);
1186 allow_signal(SIGKILL);
1192 down_interruptible(&c->lct_sem);
1193 if(signal_pending(current))
1195 dprintk(KERN_ERR "%s: LCT thread dead\n", c->name);
1200 entries = c->dlct->table_size;
1204 dprintk(KERN_INFO "%s: Dynamic LCT Update\n",c->name);
1205 dprintk(KERN_INFO "%s: Dynamic LCT contains %d entries\n", c->name, entries);
1209 printk(KERN_INFO "%s: Empty LCT???\n", c->name);
1214 * Loop through all the devices on the IOP looking for their
1215 * LCT data in the LCT. We assume that TIDs are not repeated.
1216 * as that is the only way to really tell. It's been confirmed
1217 * by the IRTOS vendor(s?) that TIDs are not reused until they
1218 * wrap arround(4096), and I doubt a system will up long enough
1219 * to create/delete that many devices.
1221 for(d = c->devices; d; )
1226 for(i = 0; i < entries; i++)
1228 if(d->lct_data.tid == c->dlct->lct_entry[i].tid)
1236 dprintk(KERN_INFO "i2o_core: Deleted device!\n");
1237 spin_lock(&i2o_dev_lock);
1238 i2o_delete_device(d);
1239 spin_unlock(&i2o_dev_lock);
1245 * Tell LCT to renotify us next time there is a change
1250 * Copy new LCT into public LCT
1252 * Possible race if someone is reading LCT while we are copying
1253 * over it. If this happens, we'll fix it then. but I doubt that
1254 * the LCT will get updated often enough or will get read by
1255 * a user often enough to worry.
1257 if(c->lct->table_size < c->dlct->table_size)
1261 c->lct = pci_alloc_consistent(c->pdev, c->dlct->table_size<<2, &phys);
1264 printk(KERN_ERR "%s: No memory for LCT!\n", c->name);
1268 pci_free_consistent(tmp, c->lct->table_size << 2, c->lct, c->lct_phys);
1271 memcpy(c->lct, c->dlct, c->dlct->table_size<<2);
1278 * i2o_run_queue - process pending events on a controller
1279 * @c: controller to process
1281 * This is called by the bus specific driver layer when an interrupt
1282 * or poll of this card interface is desired.
1285 void i2o_run_queue(struct i2o_controller *c)
1287 struct i2o_message *m;
1292 * Old 960 steppings had a bug in the I2O unit that caused
1293 * the queue to appear empty when it wasn't.
1295 if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1296 mv=I2O_REPLY_READ32(c);
1298 while(mv!=0xFFFFFFFF)
1300 struct i2o_handler *i;
1301 /* Map the message from the page frame map to kernel virtual */
1302 /* m=(struct i2o_message *)(mv - (unsigned long)c->page_frame_map + (unsigned long)c->page_frame); */
1303 m=(struct i2o_message *)bus_to_virt(mv);
1307 * Ensure this message is seen coherently but cachably by
1311 pci_dma_sync_single_for_cpu(c->pdev, c->page_frame_map, MSG_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1317 i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)];
1322 printk(KERN_WARNING "I2O: Spurious reply to handler %d\n",
1323 m->initiator_context&(MAX_I2O_MODULES-1));
1325 i2o_flush_reply(c,mv);
1328 /* That 960 bug again... */
1329 if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1330 mv=I2O_REPLY_READ32(c);
1336 * i2o_get_class_name - do i2o class name lookup
1337 * @class: class number
1339 * Return a descriptive string for an i2o class
1342 const char *i2o_get_class_name(int class)
1345 static char *i2o_class_name[] = {
1347 "Device Driver Module",
1352 "Fibre Channel Port",
1353 "Fibre Channel Device",
1357 "Floppy Controller",
1359 "Secondary Bus Port",
1360 "Peer Transport Agent",
1367 case I2O_CLASS_EXECUTIVE:
1371 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1373 case I2O_CLASS_SEQUENTIAL_STORAGE:
1379 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1381 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1383 case I2O_CLASS_SCSI_PERIPHERAL:
1385 case I2O_CLASS_ATE_PORT:
1387 case I2O_CLASS_ATE_PERIPHERAL:
1389 case I2O_CLASS_FLOPPY_CONTROLLER:
1391 case I2O_CLASS_FLOPPY_DEVICE:
1393 case I2O_CLASS_BUS_ADAPTER_PORT:
1395 case I2O_CLASS_PEER_TRANSPORT_AGENT:
1397 case I2O_CLASS_PEER_TRANSPORT:
1401 return i2o_class_name[idx];
1406 * i2o_wait_message - obtain an i2o message from the IOP
1410 * This function waits up to 5 seconds for a message slot to be
1411 * available. If no message is available it prints an error message
1412 * that is expected to be what the message will be used for (eg
1413 * "get_status"). 0xFFFFFFFF is returned on a failure.
1415 * On a success the message is returned. This is the physical page
1416 * frame offset address from the read port. (See the i2o spec)
1419 u32 i2o_wait_message(struct i2o_controller *c, char *why)
1423 while((m=I2O_POST_READ32(c))==0xFFFFFFFF)
1425 if((jiffies-time)>=5*HZ)
1427 dprintk(KERN_ERR "%s: Timeout waiting for message frame to send %s.\n",
1438 * i2o_report_controller_unit - print information about a tid
1442 * Dump an information block associated with a given unit (TID). The
1443 * tables are read and a block of text is output to printk that is
1444 * formatted intended for the user.
1447 void i2o_report_controller_unit(struct i2o_controller *c, struct i2o_device *d)
1452 int unit = d->lct_data.tid;
1457 printk(KERN_INFO "Target ID %d.\n", unit);
1458 if((ret=i2o_query_scalar(c, unit, 0xF100, 3, buf, 16))>=0)
1461 printk(KERN_INFO " Vendor: %s\n", buf);
1463 if((ret=i2o_query_scalar(c, unit, 0xF100, 4, buf, 16))>=0)
1466 printk(KERN_INFO " Device: %s\n", buf);
1468 if(i2o_query_scalar(c, unit, 0xF100, 5, buf, 16)>=0)
1471 printk(KERN_INFO " Description: %s\n", buf);
1473 if((ret=i2o_query_scalar(c, unit, 0xF100, 6, buf, 8))>=0)
1476 printk(KERN_INFO " Rev: %s\n", buf);
1479 printk(KERN_INFO " Class: ");
1480 sprintf(str, "%-21s", i2o_get_class_name(d->lct_data.class_id));
1481 printk("%s\n", str);
1483 printk(KERN_INFO " Subclass: 0x%04X\n", d->lct_data.sub_class);
1484 printk(KERN_INFO " Flags: ");
1486 if(d->lct_data.device_flags&(1<<0))
1487 printk("C"); // ConfigDialog requested
1488 if(d->lct_data.device_flags&(1<<1))
1489 printk("U"); // Multi-user capable
1490 if(!(d->lct_data.device_flags&(1<<4)))
1491 printk("P"); // Peer service enabled!
1492 if(!(d->lct_data.device_flags&(1<<5)))
1493 printk("M"); // Mgmt service enabled!
1500 * Parse the hardware resource table. Right now we print it out
1501 * and don't do a lot with it. We should collate these and then
1502 * interact with the Linux resource allocation block.
1504 * Lets prove we can read it first eh ?
1506 * This is full of endianisms!
1509 static int i2o_parse_hrt(struct i2o_controller *c)
1512 u32 *rows=(u32*)c->hrt;
1522 printk(KERN_ERR "%s: HRT table for controller is too new a version.\n",
1527 count=p[0]|(p[1]<<8);
1530 printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n",
1531 c->name, count, length<<2);
1535 for(i=0;i<count;i++)
1537 printk(KERN_INFO "Adapter %08X: ", rows[0]);
1542 printk("TID %04X:[", state&0xFFF);
1545 printk("H"); /* Hidden */
1548 printk("P"); /* Present */
1550 printk("C"); /* Controlled */
1553 printk("*"); /* Hard */
1560 /* Adapter private bus - easy */
1561 printk("Local bus %d: I/O at 0x%04X Mem 0x%08X",
1562 p[2], d[1]<<8|d[0], *(u32 *)(d+4));
1566 printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X",
1567 p[2], d[2], d[1]<<8|d[0], *(u32 *)(d+4));
1570 case 2: /* EISA bus */
1571 printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1572 p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1575 case 3: /* MCA bus */
1576 printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1577 p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1580 case 4: /* PCI bus */
1581 printk("PCI %d: Bus %d Device %d Function %d",
1582 p[2], d[2], d[1], d[0]);
1585 case 0x80: /* Other */
1587 printk("Unsupported bus type.");
1598 * The logical configuration table tells us what we can talk to
1599 * on the board. Most of the stuff isn't interesting to us.
1602 static int i2o_parse_lct(struct i2o_controller *c)
1607 struct i2o_device *d;
1608 i2o_lct *lct = c->lct;
1611 printk(KERN_ERR "%s: LCT is empty???\n", c->name);
1615 max = lct->table_size;
1619 printk(KERN_INFO "%s: LCT has %d entries.\n", c->name, max);
1621 if(lct->iop_flags&(1<<0))
1622 printk(KERN_WARNING "%s: Configuration dialog desired.\n", c->name);
1626 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1629 printk(KERN_CRIT "i2o_core: Out of memory for I2O device data.\n");
1636 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1639 tid = d->lct_data.tid;
1641 i2o_report_controller_unit(c, d);
1643 i2o_install_device(c, d);
1650 * i2o_quiesce_controller - quiesce controller
1653 * Quiesce an IOP. Causes IOP to make external operation quiescent
1654 * (i2o 'READY' state). Internal operation of the IOP continues normally.
1657 int i2o_quiesce_controller(struct i2o_controller *c)
1664 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
1666 if ((c->status_block->iop_state != ADAPTER_STATE_READY) &&
1667 (c->status_block->iop_state != ADAPTER_STATE_OPERATIONAL))
1672 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1673 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
1676 /* Long timeout needed for quiesce if lots of devices */
1678 if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1679 printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
1682 dprintk(KERN_INFO "%s: Quiesced.\n", c->name);
1684 i2o_status_get(c); // Entered READY state
1689 * i2o_enable_controller - move controller from ready to operational
1692 * Enable IOP. This allows the IOP to resume external operations and
1693 * reverses the effect of a quiesce. In the event of an error a negative
1694 * errno code is returned.
1697 int i2o_enable_controller(struct i2o_controller *c)
1704 /* Enable only allowed on READY state */
1705 if(c->status_block->iop_state != ADAPTER_STATE_READY)
1708 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1709 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
1711 /* How long of a timeout do we need? */
1713 if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1714 printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
1717 dprintk(KERN_INFO "%s: Enabled.\n", c->name);
1719 i2o_status_get(c); // entered OPERATIONAL state
1725 * i2o_clear_controller - clear a controller
1728 * Clear an IOP to HOLD state, ie. terminate external operations, clear all
1729 * input queues and prepare for a system restart. IOP's internal operation
1730 * continues normally and the outbound queue is alive.
1731 * The IOP is not expected to rebuild its LCT.
1734 int i2o_clear_controller(struct i2o_controller *c)
1736 struct i2o_controller *iop;
1740 /* Quiesce all IOPs first */
1742 for (iop = i2o_controller_chain; iop; iop = iop->next)
1743 i2o_quiesce_controller(iop);
1745 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1746 msg[1]=I2O_CMD_ADAPTER_CLEAR<<24|HOST_TID<<12|ADAPTER_TID;
1749 if ((ret=i2o_post_wait(c, msg, sizeof(msg), 30)))
1750 printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
1753 dprintk(KERN_INFO "%s: Cleared.\n",c->name);
1757 /* Enable other IOPs */
1759 for (iop = i2o_controller_chain; iop; iop = iop->next)
1761 i2o_enable_controller(iop);
1768 * i2o_reset_controller - reset an IOP
1769 * @c: controller to reset
1771 * Reset the IOP into INIT state and wait until IOP gets into RESET state.
1772 * Terminate all external operations, clear IOP's inbound and outbound
1773 * queues, terminate all DDMs, and reload the IOP's operating environment
1774 * and all local DDMs. The IOP rebuilds its LCT.
1777 static int i2o_reset_controller(struct i2o_controller *c)
1779 struct i2o_controller *iop;
1782 dma_addr_t status_phys;
1786 /* Quiesce all IOPs first */
1788 for (iop = i2o_controller_chain; iop; iop = iop->next)
1791 i2o_quiesce_controller(iop);
1794 m=i2o_wait_message(c, "AdapterReset");
1797 msg=(u32 *)(c->msg_virt+m);
1799 status = pci_alloc_consistent(c->pdev, 4, &status_phys);
1800 if(status == NULL) {
1801 printk(KERN_ERR "IOP reset failed - no free memory.\n");
1804 memset(status, 0, 4);
1806 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1807 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1808 msg[2]=core_context;
1813 msg[7]=0; /* 64bit host FIXME */
1815 i2o_post_message(c,m);
1817 /* Wait for a reply */
1821 if((jiffies-time)>=20*HZ)
1823 printk(KERN_ERR "IOP reset timeout.\n");
1824 /* The controller still may respond and overwrite
1825 * status_phys, LEAK it to prevent memory corruption.
1833 if (*status==I2O_CMD_IN_PROGRESS)
1836 * Once the reset is sent, the IOP goes into the INIT state
1837 * which is indeterminate. We need to wait until the IOP
1838 * has rebooted before we can let the system talk to
1839 * it. We read the inbound Free_List until a message is
1840 * available. If we can't read one in the given ammount of
1841 * time, we assume the IOP could not reboot properly.
1844 dprintk(KERN_INFO "%s: Reset in progress, waiting for reboot...\n",
1848 m = I2O_POST_READ32(c);
1849 while(m == 0XFFFFFFFF)
1851 if((jiffies-time) >= 30*HZ)
1853 printk(KERN_ERR "%s: Timeout waiting for IOP reset.\n",
1855 /* The controller still may respond and
1856 * overwrite status_phys, LEAK it to prevent
1857 * memory corruption.
1863 m = I2O_POST_READ32(c);
1865 i2o_flush_reply(c,m);
1868 /* If IopReset was rejected or didn't perform reset, try IopClear */
1871 if (status[0] == I2O_CMD_REJECTED ||
1872 c->status_block->iop_state != ADAPTER_STATE_RESET)
1874 printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",c->name);
1875 i2o_clear_controller(c);
1878 dprintk(KERN_INFO "%s: Reset completed.\n", c->name);
1880 /* Enable other IOPs */
1882 for (iop = i2o_controller_chain; iop; iop = iop->next)
1884 i2o_enable_controller(iop);
1886 pci_free_consistent(c->pdev, 4, status, status_phys);
1892 * i2o_status_get - get the status block for the IOP
1895 * Issue a status query on the controller. This updates the
1896 * attached status_block. If the controller fails to reply or an
1897 * error occurs then a negative errno code is returned. On success
1898 * zero is returned and the status_blok is updated.
1901 int i2o_status_get(struct i2o_controller *c)
1908 if (c->status_block == NULL)
1910 c->status_block = (i2o_status_block *)
1911 pci_alloc_consistent(c->pdev, sizeof(i2o_status_block), &c->status_block_phys);
1912 if (c->status_block == NULL)
1914 printk(KERN_CRIT "%s: Get Status Block failed; Out of memory.\n",
1920 status_block = (u8*)c->status_block;
1921 memset(c->status_block,0,sizeof(i2o_status_block));
1923 m=i2o_wait_message(c, "StatusGet");
1926 msg=(u32 *)(c->msg_virt+m);
1928 msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_0;
1929 msg[1]=I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID;
1930 msg[2]=core_context;
1934 msg[6]=c->status_block_phys;
1935 msg[7]=0; /* 64bit host FIXME */
1936 msg[8]=sizeof(i2o_status_block); /* always 88 bytes */
1938 i2o_post_message(c,m);
1940 /* Wait for a reply */
1943 while(status_block[87]!=0xFF)
1945 if((jiffies-time)>=5*HZ)
1947 printk(KERN_ERR "%s: Get status timeout.\n",c->name);
1955 printk(KERN_INFO "%s: State = ", c->name);
1956 switch (c->status_block->iop_state) {
1970 printk("OPERATIONAL\n");
1976 printk("FAULTED\n");
1979 printk("%x (unknown !!)\n",c->status_block->iop_state);
1987 * Get the Hardware Resource Table for the device.
1988 * The HRT contains information about possible hidden devices
1989 * but is mostly useless to us
1991 int i2o_hrt_get(struct i2o_controller *c)
1994 int ret, size = sizeof(i2o_hrt);
1995 int loops = 3; /* we only try 3 times to get the HRT, this should be
1996 more then enough. Worst case should be 2 times.*/
1998 /* First read just the header to figure out the real size */
2001 /* first we allocate the memory for the HRT */
2002 if (c->hrt == NULL) {
2003 c->hrt=pci_alloc_consistent(c->pdev, size, &c->hrt_phys);
2004 if (c->hrt == NULL) {
2005 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", c->name);
2011 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
2012 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
2014 msg[4]= (0xD0000000 | c->hrt_len); /* Simple transaction */
2015 msg[5]= c->hrt_phys; /* Dump it here */
2017 ret = i2o_post_wait_mem(c, msg, sizeof(msg), 20, c->hrt, NULL, c->hrt_phys, 0, c->hrt_len, 0);
2019 if(ret == -ETIMEDOUT)
2021 /* The HRT block we used is in limbo somewhere. When the iop wakes up
2022 we will recover it */
2030 printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
2035 if (c->hrt->num_entries * c->hrt->entry_len << 2 > c->hrt_len) {
2036 size = c->hrt->num_entries * c->hrt->entry_len << 2;
2037 pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys);
2042 } while (c->hrt == NULL && loops > 0);
2046 printk(KERN_ERR "%s: Unable to get HRT after three tries, giving up\n", c->name);
2050 i2o_parse_hrt(c); // just for debugging
2056 * Send the I2O System Table to the specified IOP
2058 * The system table contains information about all the IOPs in the
2059 * system. It is build and then sent to each IOP so that IOPs can
2060 * establish connections between each other.
2063 static int i2o_systab_send(struct i2o_controller *iop)
2066 dma_addr_t sys_tbl_phys;
2068 struct resource *root;
2069 u32 *privbuf = kmalloc(16, GFP_KERNEL);
2074 if(iop->status_block->current_mem_size < iop->status_block->desired_mem_size)
2076 struct resource *res = &iop->mem_resource;
2077 res->name = iop->pdev->bus->name;
2078 res->flags = IORESOURCE_MEM;
2081 printk("%s: requires private memory resources.\n", iop->name);
2082 root = pci_find_parent_resource(iop->pdev, res);
2084 printk("Can't find parent resource!\n");
2085 if(root && allocate_resource(root, res,
2086 iop->status_block->desired_mem_size,
2087 iop->status_block->desired_mem_size,
2088 iop->status_block->desired_mem_size,
2089 1<<20, /* Unspecified, so use 1Mb and play safe */
2094 iop->status_block->current_mem_size = 1 + res->end - res->start;
2095 iop->status_block->current_mem_base = res->start;
2096 printk(KERN_INFO "%s: allocated %ld bytes of PCI memory at 0x%08lX.\n",
2097 iop->name, 1+res->end-res->start, res->start);
2100 if(iop->status_block->current_io_size < iop->status_block->desired_io_size)
2102 struct resource *res = &iop->io_resource;
2103 res->name = iop->pdev->bus->name;
2104 res->flags = IORESOURCE_IO;
2107 printk("%s: requires private memory resources.\n", iop->name);
2108 root = pci_find_parent_resource(iop->pdev, res);
2110 printk("Can't find parent resource!\n");
2111 if(root && allocate_resource(root, res,
2112 iop->status_block->desired_io_size,
2113 iop->status_block->desired_io_size,
2114 iop->status_block->desired_io_size,
2115 1<<20, /* Unspecified, so use 1Mb and play safe */
2120 iop->status_block->current_io_size = 1 + res->end - res->start;
2121 iop->status_block->current_mem_base = res->start;
2122 printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at 0x%08lX.\n",
2123 iop->name, 1+res->end-res->start, res->start);
2128 privbuf[0] = iop->status_block->current_mem_base;
2129 privbuf[1] = iop->status_block->current_mem_size;
2130 privbuf[2] = iop->status_block->current_io_base;
2131 privbuf[3] = iop->status_block->current_io_size;
2134 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
2135 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
2137 msg[4] = (0<<16) | ((iop->unit+2) ); /* Host 0 IOP ID (unit + 2) */
2138 msg[5] = 0; /* Segment 0 */
2141 * Provide three SGL-elements:
2142 * System table (SysTab), Private memory space declaration and
2143 * Private i/o space declaration
2145 * Nasty one here. We can't use pci_alloc_consistent to send the
2146 * same table to everyone. We have to go remap it for them all
2149 sys_tbl_phys = pci_map_single(iop->pdev, sys_tbl, sys_tbl_len, PCI_DMA_TODEVICE);
2150 msg[6] = 0x54000000 | sys_tbl_phys;
2152 msg[7] = sys_tbl_phys;
2153 msg[8] = 0x54000000 | privbuf[1];
2154 msg[9] = privbuf[0];
2155 msg[10] = 0xD4000000 | privbuf[3];
2156 msg[11] = privbuf[2];
2158 ret=i2o_post_wait(iop, msg, sizeof(msg), 120);
2160 pci_unmap_single(iop->pdev, sys_tbl_phys, sys_tbl_len, PCI_DMA_TODEVICE);
2164 printk(KERN_ERR "%s: SysTab setup timed out.\n", iop->name);
2168 printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n",
2173 dprintk(KERN_INFO "%s: SysTab set.\n", iop->name);
2175 i2o_status_get(iop); // Entered READY state
2183 * Initialize I2O subsystem.
2185 void __init i2o_sys_init(void)
2187 struct i2o_controller *iop, *niop = NULL;
2189 printk(KERN_INFO "Activating I2O controllers...\n");
2190 printk(KERN_INFO "This may take a few minutes if there are many devices\n");
2192 /* In INIT state, Activate IOPs */
2193 for (iop = i2o_controller_chain; iop; iop = niop) {
2194 dprintk(KERN_INFO "Calling i2o_activate_controller for %s...\n",
2197 if (i2o_activate_controller(iop) < 0)
2198 i2o_delete_controller(iop);
2201 /* Active IOPs in HOLD state */
2204 if (i2o_controller_chain == NULL)
2208 * If build_sys_table fails, we kill everything and bail
2209 * as we can't init the IOPs w/o a system table
2211 dprintk(KERN_INFO "i2o_core: Calling i2o_build_sys_table...\n");
2212 if (i2o_build_sys_table() < 0) {
2217 /* If IOP don't get online, we need to rebuild the System table */
2218 for (iop = i2o_controller_chain; iop; iop = niop) {
2220 dprintk(KERN_INFO "Calling i2o_online_controller for %s...\n", iop->name);
2221 if (i2o_online_controller(iop) < 0) {
2222 i2o_delete_controller(iop);
2223 goto rebuild_sys_tab;
2227 /* Active IOPs now in OPERATIONAL state */
2230 * Register for status updates from all IOPs
2232 for(iop = i2o_controller_chain; iop; iop=iop->next) {
2234 /* Create a kernel thread to deal with dynamic LCT updates */
2235 iop->lct_pid = kernel_thread(i2o_dyn_lct, iop, CLONE_SIGHAND);
2237 /* Update change ind on DLCT */
2238 iop->dlct->change_ind = iop->lct->change_ind;
2240 /* Start dynamic LCT updates */
2241 i2o_lct_notify(iop);
2243 /* Register for all events from IRTOS */
2244 i2o_event_register(iop, core_context, 0, 0, 0xFFFFFFFF);
2249 * i2o_sys_shutdown - shutdown I2O system
2251 * Bring down each i2o controller and then return. Each controller
2252 * is taken through an orderly shutdown
2255 static void i2o_sys_shutdown(void)
2257 struct i2o_controller *iop, *niop;
2259 /* Delete all IOPs from the controller chain */
2260 /* that will reset all IOPs too */
2262 for (iop = i2o_controller_chain; iop; iop = niop) {
2264 i2o_delete_controller(iop);
2269 * i2o_activate_controller - bring controller up to HOLD
2272 * This function brings an I2O controller into HOLD state. The adapter
2273 * is reset if necessary and then the queues and resource table
2274 * are read. -1 is returned on a failure, 0 on success.
2278 int i2o_activate_controller(struct i2o_controller *iop)
2280 /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
2281 /* In READY state, Get status */
2283 if (i2o_status_get(iop) < 0) {
2284 printk(KERN_INFO "Unable to obtain status of %s, "
2285 "attempting a reset.\n", iop->name);
2286 if (i2o_reset_controller(iop) < 0)
2290 if(iop->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2291 printk(KERN_CRIT "%s: hardware fault\n", iop->name);
2295 if (iop->status_block->i2o_version > I2OVER15) {
2296 printk(KERN_ERR "%s: Not running vrs. 1.5. of the I2O Specification.\n",
2301 if (iop->status_block->iop_state == ADAPTER_STATE_READY ||
2302 iop->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2303 iop->status_block->iop_state == ADAPTER_STATE_HOLD ||
2304 iop->status_block->iop_state == ADAPTER_STATE_FAILED)
2306 dprintk(KERN_INFO "%s: Already running, trying to reset...\n",
2308 if (i2o_reset_controller(iop) < 0)
2312 if (i2o_init_outbound_q(iop) < 0)
2315 if (i2o_post_outbound_messages(iop))
2320 if (i2o_hrt_get(iop) < 0)
2328 * i2o_init_outbound_queue - setup the outbound queue
2331 * Clear and (re)initialize IOP's outbound queue. Returns 0 on
2332 * success or a negative errno code on a failure.
2335 int i2o_init_outbound_q(struct i2o_controller *c)
2338 dma_addr_t status_phys;
2343 dprintk(KERN_INFO "%s: Initializing Outbound Queue...\n", c->name);
2344 m=i2o_wait_message(c, "OutboundInit");
2347 msg=(u32 *)(c->msg_virt+m);
2349 status = pci_alloc_consistent(c->pdev, 4, &status_phys);
2351 printk(KERN_ERR "%s: Outbound Queue initialization failed - no free memory.\n",
2355 memset(status, 0, 4);
2357 msg[0]= EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6;
2358 msg[1]= I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID;
2359 msg[2]= core_context;
2360 msg[3]= 0x0106; /* Transaction context */
2361 msg[4]= 4096; /* Host page frame size */
2362 /* Frame size is in words. 256 bytes a frame for now */
2363 msg[5]= MSG_FRAME_SIZE<<16|0x80; /* Outbound msg frame size in words and Initcode */
2364 msg[6]= 0xD0000004; /* Simple SG LE, EOB */
2365 msg[7]= status_phys;
2367 i2o_post_message(c,m);
2371 while(status[0] < I2O_CMD_REJECTED)
2373 if((jiffies-time)>=30*HZ)
2376 printk(KERN_ERR "%s: Ignored queue initialize request.\n",
2379 printk(KERN_ERR "%s: Outbound queue initialize timeout.\n",
2381 pci_free_consistent(c->pdev, 4, status, status_phys);
2388 if(status[0] != I2O_CMD_COMPLETED)
2390 printk(KERN_ERR "%s: IOP outbound initialise failed.\n", c->name);
2391 pci_free_consistent(c->pdev, 4, status, status_phys);
2394 pci_free_consistent(c->pdev, 4, status, status_phys);
2399 * i2o_post_outbound_messages - fill message queue
2402 * Allocate a message frame and load the messages into the IOP. The
2403 * function returns zero on success or a negative errno code on
2407 int i2o_post_outbound_messages(struct i2o_controller *c)
2411 /* Alloc space for IOP's outbound queue message frames */
2413 c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL);
2414 if(c->page_frame==NULL) {
2415 printk(KERN_ERR "%s: Outbound Q initialize failed; out of memory.\n",
2420 c->page_frame_map = pci_map_single(c->pdev, c->page_frame, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
2422 if(c->page_frame_map == 0)
2424 kfree(c->page_frame);
2425 printk(KERN_ERR "%s: Unable to map outbound queue.\n", c->name);
2429 m = c->page_frame_map;
2433 for(i=0; i< NMBR_MSG_FRAMES; i++) {
2434 I2O_REPLY_WRITE32(c,m);
2436 m += (MSG_FRAME_SIZE << 2);
2443 * Get the IOP's Logical Configuration Table
2445 int i2o_lct_get(struct i2o_controller *c)
2448 int ret, size = c->status_block->expected_lct_size;
2451 if (c->lct == NULL) {
2452 c->lct = pci_alloc_consistent(c->pdev, size, &c->lct_phys);
2453 if(c->lct == NULL) {
2454 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2459 memset(c->lct, 0, size);
2461 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2462 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2463 /* msg[2] filled in i2o_post_wait */
2465 msg[4] = 0xFFFFFFFF; /* All devices */
2466 msg[5] = 0x00000000; /* Report now */
2467 msg[6] = 0xD0000000|size;
2468 msg[7] = c->lct_phys;
2470 ret=i2o_post_wait_mem(c, msg, sizeof(msg), 120, c->lct, NULL, c->lct_phys, 0, size, 0);
2472 if(ret == -ETIMEDOUT)
2480 printk(KERN_ERR "%s: LCT Get failed (status=%#x.\n",
2485 if (c->lct->table_size << 2 > size) {
2486 int new_size = c->lct->table_size << 2;
2487 pci_free_consistent(c->pdev, size, c->lct, c->lct_phys);
2491 } while (c->lct == NULL);
2493 if ((ret=i2o_parse_lct(c)) < 0)
2500 * Like above, but used for async notification. The main
2501 * difference is that we keep track of the CurrentChangeIndiicator
2502 * so that we only get updates when it actually changes.
2505 int i2o_lct_notify(struct i2o_controller *c)
2509 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2510 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2511 msg[2] = core_context;
2512 msg[3] = 0xDEADBEEF;
2513 msg[4] = 0xFFFFFFFF; /* All devices */
2514 msg[5] = c->dlct->change_ind+1; /* Next change */
2515 msg[6] = 0xD0000000|8192;
2516 msg[7] = c->dlct_phys;
2518 return i2o_post_this(c, msg, sizeof(msg));
2522 * Bring a controller online into OPERATIONAL state.
2525 int i2o_online_controller(struct i2o_controller *iop)
2529 if (i2o_systab_send(iop) < 0)
2532 /* In READY state */
2534 dprintk(KERN_INFO "%s: Attempting to enable...\n", iop->name);
2535 if (i2o_enable_controller(iop) < 0)
2538 /* In OPERATIONAL state */
2540 dprintk(KERN_INFO "%s: Attempting to get/parse lct...\n", iop->name);
2541 if (i2o_lct_get(iop) < 0)
2544 /* Check battery status */
2547 if(i2o_query_scalar(iop, ADAPTER_TID, 0x0000, 4, &v, 4)>=0)
2557 * Build system table
2559 * The system table contains information about all the IOPs in the
2560 * system (duh) and is used by the Executives on the IOPs to establish
2561 * peer2peer connections. We're not supporting peer2peer at the moment,
2562 * but this will be needed down the road for things like lan2lan forwarding.
2564 static int i2o_build_sys_table(void)
2566 struct i2o_controller *iop = NULL;
2567 struct i2o_controller *niop = NULL;
2570 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2571 (i2o_num_controllers) *
2572 sizeof(struct i2o_sys_tbl_entry);
2577 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL);
2579 printk(KERN_CRIT "SysTab Set failed. Out of memory.\n");
2582 memset((void*)sys_tbl, 0, sys_tbl_len);
2584 sys_tbl->num_entries = i2o_num_controllers;
2585 sys_tbl->version = I2OVERSION; /* TODO: Version 2.0 */
2586 sys_tbl->change_ind = sys_tbl_ind++;
2588 for(iop = i2o_controller_chain; iop; iop = niop)
2593 * Get updated IOP state so we have the latest information
2595 * We should delete the controller at this point if it
2596 * doesn't respond since if it's not on the system table
2597 * it is techninically not part of the I2O subsyßtem...
2599 if(i2o_status_get(iop)) {
2600 printk(KERN_ERR "%s: Deleting b/c could not get status while"
2601 "attempting to build system table\n", iop->name);
2602 i2o_delete_controller(iop);
2603 sys_tbl->num_entries--;
2604 continue; // try the next one
2607 sys_tbl->iops[count].org_id = iop->status_block->org_id;
2608 sys_tbl->iops[count].iop_id = iop->unit + 2;
2609 sys_tbl->iops[count].seg_num = 0;
2610 sys_tbl->iops[count].i2o_version =
2611 iop->status_block->i2o_version;
2612 sys_tbl->iops[count].iop_state =
2613 iop->status_block->iop_state;
2614 sys_tbl->iops[count].msg_type =
2615 iop->status_block->msg_type;
2616 sys_tbl->iops[count].frame_size =
2617 iop->status_block->inbound_frame_size;
2618 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2619 sys_tbl->iops[count].iop_capabilities =
2620 iop->status_block->iop_capabilities;
2621 sys_tbl->iops[count].inbound_low = (u32)iop->post_port;
2622 sys_tbl->iops[count].inbound_high = 0; // FIXME: 64-bit support
2630 table = (u32*)sys_tbl;
2631 for(count = 0; count < (sys_tbl_len >>2); count++)
2632 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", count, table[count]);
2641 * Run time support routines
2645 * Generic "post and forget" helpers. This is less efficient - we do
2646 * a memcpy for example that isnt strictly needed, but for most uses
2647 * this is simply not worth optimising
2650 int i2o_post_this(struct i2o_controller *c, u32 *data, int len)
2654 unsigned long t=jiffies;
2659 m = I2O_POST_READ32(c);
2661 while(m==0xFFFFFFFF && (jiffies-t)<HZ);
2665 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",
2669 msg = (u32 *)(c->msg_virt + m);
2670 memcpy_toio(msg, data, len);
2671 i2o_post_message(c,m);
2676 * i2o_post_wait_mem - I2O query/reply with DMA buffers
2678 * @msg: message to send
2679 * @len: length of message
2680 * @timeout: time in seconds to wait
2681 * @mem1: attached memory buffer 1
2682 * @mem2: attached memory buffer 2
2683 * @phys1: physical address of buffer 1
2684 * @phys2: physical address of buffer 2
2685 * @size1: size of buffer 1
2686 * @size2: size of buffer 2
2688 * This core API allows an OSM to post a message and then be told whether
2689 * or not the system received a successful reply.
2691 * If the message times out then the value '-ETIMEDOUT' is returned. This
2692 * is a special case. In this situation the message may (should) complete
2693 * at an indefinite time in the future. When it completes it will use the
2694 * memory buffers attached to the request. If -ETIMEDOUT is returned then
2695 * the memory buffers must not be freed. Instead the event completion will
2696 * free them for you. In all other cases the buffers are your problem.
2698 * Pass NULL for unneeded buffers.
2701 int i2o_post_wait_mem(struct i2o_controller *c, u32 *msg, int len, int timeout, void *mem1, void *mem2, dma_addr_t phys1, dma_addr_t phys2, int size1, int size2)
2703 DECLARE_WAIT_QUEUE_HEAD(wq_i2o_post);
2704 DECLARE_WAITQUEUE(wait, current);
2707 unsigned long flags = 0;
2708 struct i2o_post_wait_data *wait_data =
2709 kmalloc(sizeof(struct i2o_post_wait_data), GFP_KERNEL);
2715 * Create a new notification object
2717 wait_data->status = &status;
2718 wait_data->complete = &complete;
2719 wait_data->mem[0] = mem1;
2720 wait_data->mem[1] = mem2;
2721 wait_data->phys[0] = phys1;
2722 wait_data->phys[1] = phys2;
2723 wait_data->size[0] = size1;
2724 wait_data->size[1] = size2;
2727 * Queue the event with its unique id
2729 spin_lock_irqsave(&post_wait_lock, flags);
2731 wait_data->next = post_wait_queue;
2732 post_wait_queue = wait_data;
2733 wait_data->id = (++post_wait_id) & 0x7fff;
2734 wait_data->wq = &wq_i2o_post;
2736 spin_unlock_irqrestore(&post_wait_lock, flags);
2739 * Fill in the message id
2742 msg[2] = 0x80000000|(u32)core_context|((u32)wait_data->id<<16);
2745 * Post the message to the controller. At some point later it
2746 * will return. If we time out before it returns then
2747 * complete will be zero. From the point post_this returns
2748 * the wait_data may have been deleted.
2751 add_wait_queue(&wq_i2o_post, &wait);
2752 set_current_state(TASK_INTERRUPTIBLE);
2753 if ((status = i2o_post_this(c, msg, len))==0) {
2754 schedule_timeout(HZ * timeout);
2758 remove_wait_queue(&wq_i2o_post, &wait);
2761 remove_wait_queue(&wq_i2o_post, &wait);
2763 if(signal_pending(current))
2766 spin_lock_irqsave(&post_wait_lock, flags);
2767 barrier(); /* Be sure we see complete as it is locked */
2771 * Mark the entry dead. We cannot remove it. This is important.
2772 * When it does terminate (which it must do if the controller hasnt
2773 * died..) then it will otherwise scribble on stuff.
2774 * !complete lets us safely check if the entry is still
2775 * allocated and thus we can write into it
2777 wait_data->wq = NULL;
2778 status = -ETIMEDOUT;
2782 /* Debugging check - remove me soon */
2783 if(status == -ETIMEDOUT)
2785 printk("TIMEDOUT BUG!\n");
2789 /* And the wait_data is not leaked either! */
2790 spin_unlock_irqrestore(&post_wait_lock, flags);
2795 * i2o_post_wait - I2O query/reply
2797 * @msg: message to send
2798 * @len: length of message
2799 * @timeout: time in seconds to wait
2801 * This core API allows an OSM to post a message and then be told whether
2802 * or not the system received a successful reply.
2805 int i2o_post_wait(struct i2o_controller *c, u32 *msg, int len, int timeout)
2807 return i2o_post_wait_mem(c, msg, len, timeout, NULL, NULL, 0, 0, 0, 0);
2811 * i2o_post_wait is completed and we want to wake up the
2812 * sleeping proccess. Called by core's reply handler.
2815 static void i2o_post_wait_complete(struct i2o_controller *c, u32 context, int status)
2817 struct i2o_post_wait_data **p1, *q;
2818 unsigned long flags;
2821 * We need to search through the post_wait
2822 * queue to see if the given message is still
2823 * outstanding. If not, it means that the IOP
2824 * took longer to respond to the message than we
2825 * had allowed and timer has already expired.
2826 * Not much we can do about that except log
2827 * it for debug purposes, increase timeout, and recompile
2829 * Lock needed to keep anyone from moving queue pointers
2830 * around while we're looking through them.
2833 spin_lock_irqsave(&post_wait_lock, flags);
2835 for(p1 = &post_wait_queue; *p1!=NULL; p1 = &((*p1)->next))
2838 if(q->id == ((context >> 16) & 0x7fff)) {
2851 /* Live entry - wakeup and set status */
2852 *q->status = status;
2859 * Free resources. Caller is dead
2863 pci_free_consistent(c->pdev, q->size[0], q->mem[0], q->phys[0]);
2865 pci_free_consistent(c->pdev, q->size[1], q->mem[1], q->phys[1]);
2867 printk(KERN_WARNING "i2o_post_wait event completed after timeout.\n");
2870 spin_unlock(&post_wait_lock);
2874 spin_unlock(&post_wait_lock);
2876 printk(KERN_DEBUG "i2o_post_wait: Bogus reply!\n");
2879 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
2881 * This function can be used for all UtilParamsGet/Set operations.
2882 * The OperationList is given in oplist-buffer,
2883 * and results are returned in reslist-buffer.
2884 * Note that the minimum sized reslist is 8 bytes and contains
2885 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
2888 int i2o_issue_params(int cmd, struct i2o_controller *iop, int tid,
2889 void *oplist, int oplen, void *reslist, int reslen)
2892 u32 *res32 = (u32*)reslist;
2893 u32 *restmp = (u32*)reslist;
2897 u32 *opmem, *resmem;
2898 dma_addr_t opmem_phys, resmem_phys;
2900 /* Get DMAable memory */
2901 opmem = pci_alloc_consistent(iop->pdev, oplen, &opmem_phys);
2904 memcpy(opmem, oplist, oplen);
2906 resmem = pci_alloc_consistent(iop->pdev, reslen, &resmem_phys);
2909 pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys);
2913 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
2914 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
2917 msg[5] = 0x54000000 | oplen; /* OperationList */
2918 msg[6] = opmem_phys;
2919 msg[7] = 0xD0000000 | reslen; /* ResultList */
2920 msg[8] = resmem_phys;
2922 wait_status = i2o_post_wait_mem(iop, msg, sizeof(msg), 10, opmem, resmem, opmem_phys, resmem_phys, oplen, reslen);
2925 * This only looks like a memory leak - don't "fix" it.
2927 if(wait_status == -ETIMEDOUT)
2930 memcpy(reslist, resmem, reslen);
2931 pci_free_consistent(iop->pdev, reslen, resmem, resmem_phys);
2932 pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys);
2935 if(wait_status != 0)
2938 * Calculate number of bytes of Result LIST
2939 * We need to loop through each Result BLOCK and grab the length
2943 for(i = 0; i < (res32[0]&0X0000FFFF); i++)
2945 if(restmp[0]&0x00FF0000) /* BlockStatus != SUCCESS */
2947 printk(KERN_WARNING "%s - Error:\n ErrorInfoSize = 0x%02x, "
2948 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
2949 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
2951 res32[1]>>24, (res32[1]>>16)&0xFF, res32[1]&0xFFFF);
2954 * If this is the only request,than we return an error
2956 if((res32[0]&0x0000FFFF) == 1)
2958 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
2961 len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
2962 restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
2964 return (len << 2); /* bytes used by result list */
2968 * Query one scalar group value or a whole scalar group.
2970 int i2o_query_scalar(struct i2o_controller *iop, int tid,
2971 int group, int field, void *buf, int buflen)
2973 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
2974 u8 resblk[8+buflen]; /* 8 bytes for header */
2977 if (field == -1) /* whole group */
2980 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, iop, tid,
2981 opblk, sizeof(opblk), resblk, sizeof(resblk));
2983 memcpy(buf, resblk+8, buflen); /* cut off header */
2991 * Set a scalar group value or a whole group.
2993 int i2o_set_scalar(struct i2o_controller *iop, int tid,
2994 int group, int field, void *buf, int buflen)
2997 u8 resblk[8+buflen]; /* 8 bytes for header */
3000 opblk = kmalloc(buflen+64, GFP_KERNEL);
3003 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
3007 opblk[0] = 1; /* operation count */
3008 opblk[1] = 0; /* pad */
3009 opblk[2] = I2O_PARAMS_FIELD_SET;
3012 if(field == -1) { /* whole group */
3014 memcpy(opblk+5, buf, buflen);
3016 else /* single field */
3020 memcpy(opblk+6, buf, buflen);
3023 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
3024 opblk, 12+buflen, resblk, sizeof(resblk));
3033 * if oper == I2O_PARAMS_TABLE_GET, get from all rows
3034 * if fieldcount == -1 return all fields
3035 * ibuf and ibuflen are unused (use NULL, 0)
3036 * else return specific fields
3037 * ibuf contains fieldindexes
3039 * if oper == I2O_PARAMS_LIST_GET, get from specific rows
3040 * if fieldcount == -1 return all fields
3041 * ibuf contains rowcount, keyvalues
3042 * else return specific fields
3043 * fieldcount is # of fieldindexes
3044 * ibuf contains fieldindexes, rowcount, keyvalues
3046 * You could also use directly function i2o_issue_params().
3048 int i2o_query_table(int oper, struct i2o_controller *iop, int tid, int group,
3049 int fieldcount, void *ibuf, int ibuflen,
3050 void *resblk, int reslen)
3055 opblk = kmalloc(10 + ibuflen, GFP_KERNEL);
3058 printk(KERN_ERR "i2o: no memory for query buffer.\n");
3062 opblk[0] = 1; /* operation count */
3063 opblk[1] = 0; /* pad */
3066 opblk[4] = fieldcount;
3067 memcpy(opblk+5, ibuf, ibuflen); /* other params */
3069 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET,iop, tid,
3070 opblk, 10+ibuflen, resblk, reslen);
3079 * Clear table group, i.e. delete all rows.
3081 int i2o_clear_table(struct i2o_controller *iop, int tid, int group)
3083 u16 opblk[] = { 1, 0, I2O_PARAMS_TABLE_CLEAR, group };
3084 u8 resblk[32]; /* min 8 bytes for result header */
3086 return i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
3087 opblk, sizeof(opblk), resblk, sizeof(resblk));
3091 * Add a new row into a table group.
3093 * if fieldcount==-1 then we add whole rows
3094 * buf contains rowcount, keyvalues
3095 * else just specific fields are given, rest use defaults
3096 * buf contains fieldindexes, rowcount, keyvalues
3098 int i2o_row_add_table(struct i2o_controller *iop, int tid,
3099 int group, int fieldcount, void *buf, int buflen)
3102 u8 resblk[32]; /* min 8 bytes for header */
3105 opblk = kmalloc(buflen+64, GFP_KERNEL);
3108 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
3112 opblk[0] = 1; /* operation count */
3113 opblk[1] = 0; /* pad */
3114 opblk[2] = I2O_PARAMS_ROW_ADD;
3116 opblk[4] = fieldcount;
3117 memcpy(opblk+5, buf, buflen);
3119 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
3120 opblk, 10+buflen, resblk, sizeof(resblk));
3130 * Used for error reporting/debugging purposes.
3131 * Following fail status are common to all classes.
3132 * The preserved message must be handled in the reply handler.
3134 void i2o_report_fail_status(u8 req_status, u32* msg)
3136 static char *FAIL_STATUS[] = {
3137 "0x80", /* not used */
3138 "SERVICE_SUSPENDED", /* 0x81 */
3139 "SERVICE_TERMINATED", /* 0x82 */
3147 "INVALID_MSG_FLAGS",
3150 "INVALID_TARGET_ID",
3151 "INVALID_INITIATOR_ID",
3152 "INVALID_INITIATOR_CONTEX", /* 0x8F */
3153 "UNKNOWN_FAILURE" /* 0xFF */
3156 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
3157 printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", req_status);
3159 printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]);
3161 /* Dump some details */
3163 printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n",
3164 (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
3165 printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
3166 (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
3167 printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
3168 msg[5] >> 16, msg[5] & 0xFFF);
3170 printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF);
3171 if (msg[4] & (1<<16))
3172 printk("(FormatError), "
3173 "this msg can never be delivered/processed.\n");
3174 if (msg[4] & (1<<17))
3175 printk("(PathError), "
3176 "this msg can no longer be delivered/processed.\n");
3177 if (msg[4] & (1<<18))
3178 printk("(PathState), "
3179 "the system state does not allow delivery.\n");
3180 if (msg[4] & (1<<19))
3181 printk("(Congestion), resources temporarily not available;"
3182 "do not retry immediately.\n");
3186 * Used for error reporting/debugging purposes.
3187 * Following reply status are common to all classes.
3189 void i2o_report_common_status(u8 req_status)
3191 static char *REPLY_STATUS[] = {
3194 "ABORT_NO_DATA_TRANSFER",
3195 "ABORT_PARTIAL_TRANSFER",
3197 "ERROR_NO_DATA_TRANSFER",
3198 "ERROR_PARTIAL_TRANSFER",
3199 "PROCESS_ABORT_DIRTY",
3200 "PROCESS_ABORT_NO_DATA_TRANSFER",
3201 "PROCESS_ABORT_PARTIAL_TRANSFER",
3202 "TRANSACTION_ERROR",
3206 if (req_status >= ARRAY_SIZE(REPLY_STATUS))
3207 printk("RequestStatus = %0#2x", req_status);
3209 printk("%s", REPLY_STATUS[req_status]);
3213 * Used for error reporting/debugging purposes.
3214 * Following detailed status are valid for executive class,
3215 * utility class, DDM class and for transaction error replies.
3217 static void i2o_report_common_dsc(u16 detailed_status)
3219 static char *COMMON_DSC[] = {
3224 "REPLY_BUFFER_FULL",
3226 "INSUFFICIENT_RESOURCE_SOFT",
3227 "INSUFFICIENT_RESOURCE_HARD",
3229 "CHAIN_BUFFER_TOO_LARGE",
3230 "UNSUPPORTED_FUNCTION",
3233 "INAPPROPRIATE_FUNCTION",
3234 "INVALID_INITIATOR_ADDRESS",
3235 "INVALID_MESSAGE_FLAGS",
3237 "INVALID_PARAMETER",
3239 "INVALID_TARGET_ADDRESS",
3240 "MESSAGE_TOO_LARGE",
3241 "MESSAGE_TOO_SMALL",
3242 "MISSING_PARAMETER",
3246 "UNSUPPORTED_VERSION",
3248 "DEVICE_NOT_AVAILABLE"
3251 if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
3252 printk(" / DetailedStatus = %0#4x.\n", detailed_status);
3254 printk(" / %s.\n", COMMON_DSC[detailed_status]);
3258 * Used for error reporting/debugging purposes
3260 static void i2o_report_lan_dsc(u16 detailed_status)
3262 static char *LAN_DSC[] = { // Lan detailed status code strings
3265 "DESTINATION_NOT_FOUND",
3271 "BAD_PACKET_DETECTED",
3274 "IOP_INTERNAL_ERROR",
3276 "INVALID_TRANSACTION_CONTEXT",
3277 "DEST_ADDRESS_DETECTED",
3278 "DEST_ADDRESS_OMITTED",
3279 "PARTIAL_PACKET_RETURNED",
3280 "TEMP_SUSPENDED_STATE", // last Lan detailed status code
3281 "INVALID_REQUEST" // general detailed status code
3284 if (detailed_status > I2O_DSC_INVALID_REQUEST)
3285 printk(" / %0#4x.\n", detailed_status);
3287 printk(" / %s.\n", LAN_DSC[detailed_status]);
3291 * Used for error reporting/debugging purposes
3293 static void i2o_report_util_cmd(u8 cmd)
3296 case I2O_CMD_UTIL_NOP:
3297 printk("UTIL_NOP, ");
3299 case I2O_CMD_UTIL_ABORT:
3300 printk("UTIL_ABORT, ");
3302 case I2O_CMD_UTIL_CLAIM:
3303 printk("UTIL_CLAIM, ");
3305 case I2O_CMD_UTIL_RELEASE:
3306 printk("UTIL_CLAIM_RELEASE, ");
3308 case I2O_CMD_UTIL_CONFIG_DIALOG:
3309 printk("UTIL_CONFIG_DIALOG, ");
3311 case I2O_CMD_UTIL_DEVICE_RESERVE:
3312 printk("UTIL_DEVICE_RESERVE, ");
3314 case I2O_CMD_UTIL_DEVICE_RELEASE:
3315 printk("UTIL_DEVICE_RELEASE, ");
3317 case I2O_CMD_UTIL_EVT_ACK:
3318 printk("UTIL_EVENT_ACKNOWLEDGE, ");
3320 case I2O_CMD_UTIL_EVT_REGISTER:
3321 printk("UTIL_EVENT_REGISTER, ");
3323 case I2O_CMD_UTIL_LOCK:
3324 printk("UTIL_LOCK, ");
3326 case I2O_CMD_UTIL_LOCK_RELEASE:
3327 printk("UTIL_LOCK_RELEASE, ");
3329 case I2O_CMD_UTIL_PARAMS_GET:
3330 printk("UTIL_PARAMS_GET, ");
3332 case I2O_CMD_UTIL_PARAMS_SET:
3333 printk("UTIL_PARAMS_SET, ");
3335 case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
3336 printk("UTIL_REPLY_FAULT_NOTIFY, ");
3339 printk("Cmd = %0#2x, ",cmd);
3344 * Used for error reporting/debugging purposes
3346 static void i2o_report_exec_cmd(u8 cmd)
3349 case I2O_CMD_ADAPTER_ASSIGN:
3350 printk("EXEC_ADAPTER_ASSIGN, ");
3352 case I2O_CMD_ADAPTER_READ:
3353 printk("EXEC_ADAPTER_READ, ");
3355 case I2O_CMD_ADAPTER_RELEASE:
3356 printk("EXEC_ADAPTER_RELEASE, ");
3358 case I2O_CMD_BIOS_INFO_SET:
3359 printk("EXEC_BIOS_INFO_SET, ");
3361 case I2O_CMD_BOOT_DEVICE_SET:
3362 printk("EXEC_BOOT_DEVICE_SET, ");
3364 case I2O_CMD_CONFIG_VALIDATE:
3365 printk("EXEC_CONFIG_VALIDATE, ");
3367 case I2O_CMD_CONN_SETUP:
3368 printk("EXEC_CONN_SETUP, ");
3370 case I2O_CMD_DDM_DESTROY:
3371 printk("EXEC_DDM_DESTROY, ");
3373 case I2O_CMD_DDM_ENABLE:
3374 printk("EXEC_DDM_ENABLE, ");
3376 case I2O_CMD_DDM_QUIESCE:
3377 printk("EXEC_DDM_QUIESCE, ");
3379 case I2O_CMD_DDM_RESET:
3380 printk("EXEC_DDM_RESET, ");
3382 case I2O_CMD_DDM_SUSPEND:
3383 printk("EXEC_DDM_SUSPEND, ");
3385 case I2O_CMD_DEVICE_ASSIGN:
3386 printk("EXEC_DEVICE_ASSIGN, ");
3388 case I2O_CMD_DEVICE_RELEASE:
3389 printk("EXEC_DEVICE_RELEASE, ");
3391 case I2O_CMD_HRT_GET:
3392 printk("EXEC_HRT_GET, ");
3394 case I2O_CMD_ADAPTER_CLEAR:
3395 printk("EXEC_IOP_CLEAR, ");
3397 case I2O_CMD_ADAPTER_CONNECT:
3398 printk("EXEC_IOP_CONNECT, ");
3400 case I2O_CMD_ADAPTER_RESET:
3401 printk("EXEC_IOP_RESET, ");
3403 case I2O_CMD_LCT_NOTIFY:
3404 printk("EXEC_LCT_NOTIFY, ");
3406 case I2O_CMD_OUTBOUND_INIT:
3407 printk("EXEC_OUTBOUND_INIT, ");
3409 case I2O_CMD_PATH_ENABLE:
3410 printk("EXEC_PATH_ENABLE, ");
3412 case I2O_CMD_PATH_QUIESCE:
3413 printk("EXEC_PATH_QUIESCE, ");
3415 case I2O_CMD_PATH_RESET:
3416 printk("EXEC_PATH_RESET, ");
3418 case I2O_CMD_STATIC_MF_CREATE:
3419 printk("EXEC_STATIC_MF_CREATE, ");
3421 case I2O_CMD_STATIC_MF_RELEASE:
3422 printk("EXEC_STATIC_MF_RELEASE, ");
3424 case I2O_CMD_STATUS_GET:
3425 printk("EXEC_STATUS_GET, ");
3427 case I2O_CMD_SW_DOWNLOAD:
3428 printk("EXEC_SW_DOWNLOAD, ");
3430 case I2O_CMD_SW_UPLOAD:
3431 printk("EXEC_SW_UPLOAD, ");
3433 case I2O_CMD_SW_REMOVE:
3434 printk("EXEC_SW_REMOVE, ");
3436 case I2O_CMD_SYS_ENABLE:
3437 printk("EXEC_SYS_ENABLE, ");
3439 case I2O_CMD_SYS_MODIFY:
3440 printk("EXEC_SYS_MODIFY, ");
3442 case I2O_CMD_SYS_QUIESCE:
3443 printk("EXEC_SYS_QUIESCE, ");
3445 case I2O_CMD_SYS_TAB_SET:
3446 printk("EXEC_SYS_TAB_SET, ");
3449 printk("Cmd = %#02x, ",cmd);
3454 * Used for error reporting/debugging purposes
3456 static void i2o_report_lan_cmd(u8 cmd)
3459 case LAN_PACKET_SEND:
3460 printk("LAN_PACKET_SEND, ");
3463 printk("LAN_SDU_SEND, ");
3465 case LAN_RECEIVE_POST:
3466 printk("LAN_RECEIVE_POST, ");
3469 printk("LAN_RESET, ");
3472 printk("LAN_SUSPEND, ");
3475 printk("Cmd = %0#2x, ",cmd);
3480 * Used for error reporting/debugging purposes.
3481 * Report Cmd name, Request status, Detailed Status.
3483 void i2o_report_status(const char *severity, const char *str, u32 *msg)
3485 u8 cmd = (msg[1]>>24)&0xFF;
3486 u8 req_status = (msg[4]>>24)&0xFF;
3487 u16 detailed_status = msg[4]&0xFFFF;
3488 struct i2o_handler *h = i2o_handlers[msg[2] & (MAX_I2O_MODULES-1)];
3490 if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
3491 return; // No status in this reply
3493 printk("%s%s: ", severity, str);
3495 if (cmd < 0x1F) // Utility cmd
3496 i2o_report_util_cmd(cmd);
3498 else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
3499 i2o_report_exec_cmd(cmd);
3501 else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3502 i2o_report_lan_cmd(cmd); // LAN cmd
3504 printk("Cmd = %0#2x, ", cmd); // Other cmds
3506 if (msg[0] & MSG_FAIL) {
3507 i2o_report_fail_status(req_status, msg);
3511 i2o_report_common_status(req_status);
3513 if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
3514 i2o_report_common_dsc(detailed_status);
3515 else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3516 i2o_report_lan_dsc(detailed_status);
3518 printk(" / DetailedStatus = %0#4x.\n", detailed_status);
3521 /* Used to dump a message to syslog during debugging */
3522 void i2o_dump_message(u32 *msg)
3526 printk(KERN_INFO "Dumping I2O message size %d @ %p\n",
3527 msg[0]>>16&0xffff, msg);
3528 for(i = 0; i < ((msg[0]>>16)&0xffff); i++)
3529 printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]);
3534 * I2O reboot/shutdown notification.
3536 * - Call each OSM's reboot notifier (if one exists)
3537 * - Quiesce each IOP in the system
3539 * Each IOP has to be quiesced before we can ensure that the system
3540 * can be properly shutdown as a transaction that has already been
3541 * acknowledged still needs to be placed in permanent store on the IOP.
3542 * The SysQuiesce causes the IOP to force all HDMs to complete their
3543 * transactions before returning, so only at that point is it safe
3546 static int i2o_reboot_event(struct notifier_block *n, unsigned long code, void
3550 struct i2o_controller *c = NULL;
3552 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
3555 printk(KERN_INFO "Shutting down I2O system.\n");
3557 " This could take a few minutes if there are many devices attached\n");
3559 for(i = 0; i < MAX_I2O_MODULES; i++)
3561 if(i2o_handlers[i] && i2o_handlers[i]->reboot_notify)
3562 i2o_handlers[i]->reboot_notify();
3565 for(c = i2o_controller_chain; c; c = c->next)
3567 if(i2o_quiesce_controller(c))
3569 printk(KERN_WARNING "i2o: Could not quiesce %s.\n"
3570 "Verify setup on next system power up.\n",
3575 printk(KERN_INFO "I2O system down.\n");
3583 * i2o_pci_dispose - Free bus specific resources
3584 * @c: I2O controller
3586 * Disable interrupts and then free interrupt, I/O and mtrr resources
3587 * used by this controller. Called by the I2O core on unload.
3590 static void i2o_pci_dispose(struct i2o_controller *c)
3592 I2O_IRQ_WRITE32(c,0xFFFFFFFF);
3594 free_irq(c->irq, c);
3595 iounmap(c->base_virt);
3597 iounmap(c->msg_virt);
3600 if(c->mtrr_reg0 > 0)
3601 mtrr_del(c->mtrr_reg0, 0, 0);
3602 if(c->mtrr_reg1 > 0)
3603 mtrr_del(c->mtrr_reg1, 0, 0);
3608 * i2o_pci_interrupt - Bus specific interrupt handler
3609 * @irq: interrupt line
3612 * Handle an interrupt from a PCI based I2O controller. This turns out
3613 * to be rather simple. We keep the controller pointer in the cookie.
3616 static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
3618 struct i2o_controller *c = dev_id;
3624 * i2o_pci_install - Install a PCI i2o controller
3625 * @dev: PCI device of the I2O controller
3627 * Install a PCI (or in theory AGP) i2o controller. Devices are
3628 * initialized, configured and registered with the i2o core subsystem. Be
3629 * very careful with ordering. There may be pending interrupts.
3631 * To Do: Add support for polled controllers
3634 int __init i2o_pci_install(struct pci_dev *dev)
3636 struct i2o_controller *c=kmalloc(sizeof(struct i2o_controller),
3640 unsigned long bar0_phys = 0;
3641 unsigned long bar1_phys = 0;
3642 unsigned long bar0_size = 0;
3643 unsigned long bar1_size = 0;
3649 printk(KERN_ERR "i2o: Insufficient memory to add controller.\n");
3652 memset(c, 0, sizeof(*c));
3660 #if BITS_PER_LONG == 64
3661 c->context_list_lock = SPIN_LOCK_UNLOCKED;
3665 * Cards that fall apart if you hit them with large I/O
3669 if(dev->vendor == PCI_VENDOR_ID_NCR && dev->device == 0x0630)
3672 printk(KERN_INFO "I2O: Symbios FC920 workarounds activated.\n");
3675 if(dev->subsystem_vendor == PCI_VENDOR_ID_PROMISE)
3678 printk(KERN_INFO "I2O: Promise workarounds activated.\n");
3682 * Cards that go bananas if you quiesce them before you reset
3686 if(dev->vendor == PCI_VENDOR_ID_DPT) {
3688 if(dev->device == 0xA511)
3694 /* Skip I/O spaces */
3695 if(!(pci_resource_flags(dev, i) & IORESOURCE_IO))
3699 bar0_phys = pci_resource_start(dev, i);
3700 bar0_size = pci_resource_len(dev, i);
3706 bar1_phys = pci_resource_start(dev, i);
3707 bar1_size = pci_resource_len(dev, i);
3715 printk(KERN_ERR "i2o: I2O controller has no memory regions defined.\n");
3721 /* Map the I2O controller */
3723 printk(KERN_INFO "i2o: PCI I2O controller at %08lX size=%ld\n", bar0_phys, bar0_size);
3725 printk(KERN_INFO "i2o: PCI I2O controller\n BAR0 at 0x%08lX size=%ld\n BAR1 at 0x%08lX size=%ld\n", bar0_phys, bar0_size, bar1_phys, bar1_size);
3727 bar0_virt = ioremap(bar0_phys, bar0_size);
3730 printk(KERN_ERR "i2o: Unable to map controller.\n");
3737 bar1_virt = ioremap(bar1_phys, bar1_size);
3740 printk(KERN_ERR "i2o: Unable to map controller.\n");
3746 bar1_virt = bar0_virt;
3747 bar1_phys = bar0_phys;
3748 bar1_size = bar0_size;
3751 c->irq_mask = bar0_virt+0x34;
3752 c->post_port = bar0_virt+0x40;
3753 c->reply_port = bar0_virt+0x44;
3755 c->base_phys = bar0_phys;
3756 c->base_virt = bar0_virt;
3757 c->msg_phys = bar1_phys;
3758 c->msg_virt = bar1_virt;
3761 * Enable Write Combining MTRR for IOP's memory region
3764 c->mtrr_reg0 = mtrr_add(c->base_phys, bar0_size, MTRR_TYPE_WRCOMB, 1);
3766 * If it is an INTEL i960 I/O processor then set the first 64K to
3767 * Uncacheable since the region contains the Messaging unit which
3768 * shouldn't be cached.
3771 if(dev->vendor == PCI_VENDOR_ID_INTEL || dev->vendor == PCI_VENDOR_ID_DPT)
3773 printk(KERN_INFO "I2O: MTRR workaround for Intel i960 processor\n");
3774 c->mtrr_reg1 = mtrr_add(c->base_phys, 65536, MTRR_TYPE_UNCACHABLE, 1);
3777 printk(KERN_INFO "i2o_pci: Error in setting MTRR_TYPE_UNCACHABLE\n");
3778 mtrr_del(c->mtrr_reg0, c->msg_phys, bar1_size);
3783 c->mtrr_reg1 = mtrr_add(c->msg_phys, bar1_size, MTRR_TYPE_WRCOMB, 1);
3787 I2O_IRQ_WRITE32(c,0xFFFFFFFF);
3789 i = i2o_install_controller(c);
3793 printk(KERN_ERR "i2o: Unable to install controller.\n");
3804 i=request_irq(dev->irq, i2o_pci_interrupt, SA_SHIRQ,
3808 printk(KERN_ERR "%s: unable to allocate interrupt %d.\n",
3811 i2o_delete_controller(c);
3819 printk(KERN_INFO "%s: Installed at IRQ%d\n", c->name, dev->irq);
3820 I2O_IRQ_WRITE32(c,0x0);
3826 * i2o_pci_scan - Scan the pci bus for controllers
3828 * Scan the PCI devices on the system looking for any device which is a
3829 * memory of the Intelligent, I2O class. We attempt to set up each such device
3830 * and register it with the core.
3832 * Returns the number of controllers registered
3834 * Note; Do not change this to a hot plug interface. I2O 1.5 itself
3835 * does not support hot plugging.
3838 int __init i2o_pci_scan(void)
3840 struct pci_dev *dev = NULL;
3843 printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
3845 while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL)
3847 if((dev->class>>8)!=PCI_CLASS_INTELLIGENT_I2O &&
3848 (dev->vendor!=PCI_VENDOR_ID_DPT || dev->device!=0xA511))
3851 if((dev->class>>8)==PCI_CLASS_INTELLIGENT_I2O &&
3852 (dev->class&0xFF)>1)
3854 printk(KERN_INFO "i2o: I2O Controller found but does not support I2O 1.5 (skipping).\n");
3857 if (pci_enable_device(dev))
3859 printk(KERN_INFO "i2o: I2O controller on bus %d at %d.\n",
3860 dev->bus->number, dev->devfn);
3861 if(pci_set_dma_mask(dev, 0xffffffff))
3863 printk(KERN_WARNING "I2O controller on bus %d at %d : No suitable DMA available\n", dev->bus->number, dev->devfn);
3866 pci_set_master(dev);
3867 if(i2o_pci_install(dev)==0)
3871 printk(KERN_INFO "i2o: %d I2O controller%s found and installed.\n", count,
3873 return count?count:-ENODEV;
3876 static int i2o_core_init(void)
3878 printk(KERN_INFO "I2O Core - (C) Copyright 1999 Red Hat Software\n");
3879 if (i2o_install_handler(&i2o_core_handler) < 0)
3881 printk(KERN_ERR "i2o_core: Unable to install core handler.\nI2O stack not loaded!");
3885 core_context = i2o_core_handler.context;
3888 * Initialize event handling thread
3891 init_MUTEX_LOCKED(&evt_sem);
3892 evt_pid = kernel_thread(i2o_core_evt, &evt_reply, CLONE_SIGHAND);
3895 printk(KERN_ERR "I2O: Could not create event handler kernel thread\n");
3896 i2o_remove_handler(&i2o_core_handler);
3900 printk(KERN_INFO "I2O: Event thread created as pid %d\n", evt_pid);
3903 if(i2o_num_controllers)
3906 register_reboot_notifier(&i2o_reboot_notifier);
3911 static void i2o_core_exit(void)
3915 unregister_reboot_notifier(&i2o_reboot_notifier);
3917 if(i2o_num_controllers)
3921 * If this is shutdown time, the thread has already been killed
3924 printk("Terminating i2o threads...");
3925 stat = kill_proc(evt_pid, SIGKILL, 1);
3927 printk("waiting...\n");
3928 wait_for_completion(&evt_dead);
3932 i2o_remove_handler(&i2o_core_handler);
3935 module_init(i2o_core_init);
3936 module_exit(i2o_core_exit);
3938 MODULE_PARM(verbose, "i");
3939 MODULE_PARM_DESC(verbose, "Verbose diagnostics");
3941 MODULE_AUTHOR("Red Hat Software");
3942 MODULE_DESCRIPTION("I2O Core");
3943 MODULE_LICENSE("GPL");
3945 EXPORT_SYMBOL(i2o_controller_chain);
3946 EXPORT_SYMBOL(i2o_num_controllers);
3947 EXPORT_SYMBOL(i2o_find_controller);
3948 EXPORT_SYMBOL(i2o_unlock_controller);
3949 EXPORT_SYMBOL(i2o_status_get);
3950 EXPORT_SYMBOL(i2o_install_handler);
3951 EXPORT_SYMBOL(i2o_remove_handler);
3952 EXPORT_SYMBOL(i2o_install_controller);
3953 EXPORT_SYMBOL(i2o_delete_controller);
3954 EXPORT_SYMBOL(i2o_run_queue);
3955 EXPORT_SYMBOL(i2o_claim_device);
3956 EXPORT_SYMBOL(i2o_release_device);
3957 EXPORT_SYMBOL(i2o_device_notify_on);
3958 EXPORT_SYMBOL(i2o_device_notify_off);
3959 EXPORT_SYMBOL(i2o_post_this);
3960 EXPORT_SYMBOL(i2o_post_wait);
3961 EXPORT_SYMBOL(i2o_post_wait_mem);
3962 EXPORT_SYMBOL(i2o_query_scalar);
3963 EXPORT_SYMBOL(i2o_set_scalar);
3964 EXPORT_SYMBOL(i2o_query_table);
3965 EXPORT_SYMBOL(i2o_clear_table);
3966 EXPORT_SYMBOL(i2o_row_add_table);
3967 EXPORT_SYMBOL(i2o_issue_params);
3968 EXPORT_SYMBOL(i2o_event_register);
3969 EXPORT_SYMBOL(i2o_event_ack);
3970 EXPORT_SYMBOL(i2o_report_status);
3971 EXPORT_SYMBOL(i2o_dump_message);
3972 EXPORT_SYMBOL(i2o_get_class_name);
3973 EXPORT_SYMBOL(i2o_context_list_add);
3974 EXPORT_SYMBOL(i2o_context_list_get);
3975 EXPORT_SYMBOL(i2o_context_list_remove);