2 * Core I2O structure management
4 * (C) Copyright 1999-2002 Red Hat Software
6 * Written by Alan Cox, Building Number Three Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * A lot of the I2O message side code from this is taken from the
14 * Red Creek RCPCI45 adapter driver by Red Creek Communications
18 * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
19 * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 * Deepak Saxena <deepak@plexity.net>
21 * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 * Alan Cox <alan@redhat.com>:
23 * Ported to Linux 2.5.
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Minor fixes for 2.6.
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
34 #include <linux/i2o.h>
36 #include <linux/errno.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/smp_lock.h>
42 #include <linux/bitops.h>
43 #include <linux/wait.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/sched.h>
48 #include <asm/semaphore.h>
49 #include <linux/completion.h>
50 #include <linux/workqueue.h>
53 #include <linux/reboot.h>
63 #define dprintk(s, args...) printk(s, ## args)
65 #define dprintk(s, args...)
69 static struct i2o_handler *i2o_handlers[MAX_I2O_MODULES];
72 static struct i2o_controller *i2o_controllers[MAX_I2O_CONTROLLERS];
73 struct i2o_controller *i2o_controller_chain;
74 int i2o_num_controllers;
76 /* Initiator Context for Core message */
77 static int core_context;
79 /* Initialization && shutdown functions */
80 void i2o_sys_init(void);
81 static void i2o_sys_shutdown(void);
82 static int i2o_reset_controller(struct i2o_controller *);
83 static int i2o_reboot_event(struct notifier_block *, unsigned long , void *);
84 static int i2o_online_controller(struct i2o_controller *);
85 static int i2o_init_outbound_q(struct i2o_controller *);
86 static int i2o_post_outbound_messages(struct i2o_controller *);
89 static void i2o_core_reply(struct i2o_handler *, struct i2o_controller *,
90 struct i2o_message *);
92 /* Various helper functions */
93 static int i2o_lct_get(struct i2o_controller *);
94 static int i2o_lct_notify(struct i2o_controller *);
95 static int i2o_hrt_get(struct i2o_controller *);
97 static int i2o_build_sys_table(void);
98 static int i2o_systab_send(struct i2o_controller *c);
100 /* I2O core event handler */
101 static int i2o_core_evt(void *);
103 static int evt_running;
105 /* Dynamic LCT update handler */
106 static int i2o_dyn_lct(void *);
108 void i2o_report_controller_unit(struct i2o_controller *, struct i2o_device *);
110 static void i2o_pci_dispose(struct i2o_controller *c);
113 * I2O System Table. Contains information about
114 * all the IOPs in the system. Used to inform IOPs
115 * about each other's existence.
117 * sys_tbl_ver is the CurrentChangeIndicator that is
118 * used by IOPs to track changes.
120 static struct i2o_sys_tbl *sys_tbl;
121 static int sys_tbl_ind;
122 static int sys_tbl_len;
125 * This spin lock is used to keep a device from being
126 * added and deleted concurrently across CPUs or interrupts.
127 * This can occur when a user creates a device and immediatelly
128 * deletes it before the new_dev_notify() handler is called.
130 static spinlock_t i2o_dev_lock = SPIN_LOCK_UNLOCKED;
133 * Structures and definitions for synchronous message posting.
134 * See i2o_post_wait() for description.
136 struct i2o_post_wait_data
138 int *status; /* Pointer to status block on caller stack */
139 int *complete; /* Pointer to completion flag on caller stack */
140 u32 id; /* Unique identifier */
141 wait_queue_head_t *wq; /* Wake up for caller (NULL for dead) */
142 struct i2o_post_wait_data *next; /* Chain */
143 void *mem[2]; /* Memory blocks to recover on failure path */
144 dma_addr_t phys[2]; /* Physical address of blocks to recover */
145 u32 size[2]; /* Size of blocks to recover */
148 static struct i2o_post_wait_data *post_wait_queue;
149 static u32 post_wait_id; // Unique ID for each post_wait
150 static spinlock_t post_wait_lock = SPIN_LOCK_UNLOCKED;
151 static void i2o_post_wait_complete(struct i2o_controller *, u32, int);
153 /* OSM descriptor handler */
154 static struct i2o_handler i2o_core_handler =
156 (void *)i2o_core_reply,
166 * Used when queueing a reply to be handled later
171 struct i2o_controller *iop;
172 u32 msg[MSG_FRAME_SIZE];
174 static struct reply_info evt_reply;
175 static struct reply_info events[I2O_EVT_Q_LEN];
178 static int evt_q_len;
179 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
182 * I2O configuration spinlock. This isnt a big deal for contention
183 * so we have one only
186 static DECLARE_MUTEX(i2o_configuration_lock);
189 * Event spinlock. Used to keep event queue sane and from
190 * handling multiple events simultaneously.
192 static spinlock_t i2o_evt_lock = SPIN_LOCK_UNLOCKED;
195 * Semaphore used to synchronize event handling thread with
199 static DECLARE_MUTEX(evt_sem);
200 static DECLARE_COMPLETION(evt_dead);
201 static DECLARE_WAIT_QUEUE_HEAD(evt_wait);
203 static struct notifier_block i2o_reboot_notifier =
216 #if BITS_PER_LONG == 64
218 * i2o_context_list_add - append an ptr to the context list and return a
219 * matching context id.
220 * @ptr: pointer to add to the context list
221 * @c: controller to which the context list belong
222 * returns context id, which could be used in the transaction context
225 * Because the context field in I2O is only 32-bit large, on 64-bit the
226 * pointer is to large to fit in the context field. The i2o_context_list
227 * functiones map pointers to context fields.
229 u32 i2o_context_list_add(void *ptr, struct i2o_controller *c) {
231 struct i2o_context_list_element **entry = &c->context_list;
232 struct i2o_context_list_element *element;
235 spin_lock_irqsave(&c->context_list_lock, flags);
236 while(*entry && ((*entry)->flags & I2O_CONTEXT_LIST_USED)) {
237 if((*entry)->context >= context)
238 context = (*entry)->context + 1;
239 entry = &((*entry)->next);
243 if(unlikely(!context)) {
244 spin_unlock_irqrestore(&c->context_list_lock, flags);
245 printk(KERN_EMERG "i2o_core: context list overflow\n");
249 element = kmalloc(sizeof(struct i2o_context_list_element), GFP_KERNEL);
251 printk(KERN_EMERG "i2o_core: could not allocate memory for context list element\n");
254 element->context = context;
255 element->next = NULL;
261 element->flags = I2O_CONTEXT_LIST_USED;
263 spin_unlock_irqrestore(&c->context_list_lock, flags);
264 dprintk(KERN_DEBUG "i2o_core: add context to list %p -> %d\n", ptr, context);
269 * i2o_context_list_remove - remove a ptr from the context list and return
270 * the matching context id.
271 * @ptr: pointer to be removed from the context list
272 * @c: controller to which the context list belong
273 * returns context id, which could be used in the transaction context
276 u32 i2o_context_list_remove(void *ptr, struct i2o_controller *c) {
277 struct i2o_context_list_element **entry = &c->context_list;
278 struct i2o_context_list_element *element;
282 spin_lock_irqsave(&c->context_list_lock, flags);
283 while(*entry && ((*entry)->ptr != ptr))
284 entry = &((*entry)->next);
286 if(unlikely(!*entry)) {
287 spin_unlock_irqrestore(&c->context_list_lock, flags);
288 printk(KERN_WARNING "i2o_core: could not remove nonexistent ptr %p\n", ptr);
294 context = element->context;
296 element->flags |= I2O_CONTEXT_LIST_DELETED;
298 spin_unlock_irqrestore(&c->context_list_lock, flags);
299 dprintk(KERN_DEBUG "i2o_core: markt as deleted in context list %p -> %d\n", ptr, context);
304 * i2o_context_list_get - get a ptr from the context list and remove it
306 * @context: context id to which the pointer belong
307 * @c: controller to which the context list belong
308 * returns pointer to the matching context id
310 void *i2o_context_list_get(u32 context, struct i2o_controller *c) {
311 struct i2o_context_list_element **entry = &c->context_list;
312 struct i2o_context_list_element *element;
317 spin_lock_irqsave(&c->context_list_lock, flags);
318 while(*entry && ((*entry)->context != context)) {
319 entry = &((*entry)->next);
323 if(unlikely(!*entry)) {
324 spin_unlock_irqrestore(&c->context_list_lock, flags);
325 printk(KERN_WARNING "i2o_core: context id %d not found\n", context);
331 if(count >= I2O_CONTEXT_LIST_MIN_LENGTH) {
332 *entry = (*entry)->next;
336 element->flags &= !I2O_CONTEXT_LIST_USED;
339 spin_unlock_irqrestore(&c->context_list_lock, flags);
340 dprintk(KERN_DEBUG "i2o_core: get ptr from context list %d -> %p\n", context, ptr);
346 * I2O Core reply handler
348 static void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c,
349 struct i2o_message *m)
353 u32 context = msg[2];
355 if (msg[0] & MSG_FAIL) // Fail bit is set
357 u32 *preserved_msg = (u32*)(c->msg_virt + msg[7]);
359 i2o_report_status(KERN_INFO, "i2o_core", msg);
360 i2o_dump_message(preserved_msg);
362 /* If the failed request needs special treatment,
363 * it should be done here. */
365 /* Release the preserved msg by resubmitting it as a NOP */
367 preserved_msg[0] = cpu_to_le32(THREE_WORD_MSG_SIZE | SGL_OFFSET_0);
368 preserved_msg[1] = cpu_to_le32(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0);
369 preserved_msg[2] = 0;
370 i2o_post_message(c, msg[7]);
372 /* If reply to i2o_post_wait failed, return causes a timeout */
378 i2o_report_status(KERN_INFO, "i2o_core", msg);
381 if(msg[2]&0x80000000) // Post wait message
384 status = (msg[4] & 0xFFFF);
386 status = I2O_POST_WAIT_OK;
388 i2o_post_wait_complete(c, context, status);
392 if(m->function == I2O_CMD_UTIL_EVT_REGISTER)
394 memcpy(events[evt_in].msg, msg, (msg[0]>>16)<<2);
395 events[evt_in].iop = c;
397 spin_lock(&i2o_evt_lock);
398 MODINC(evt_in, I2O_EVT_Q_LEN);
399 if(evt_q_len == I2O_EVT_Q_LEN)
400 MODINC(evt_out, I2O_EVT_Q_LEN);
403 spin_unlock(&i2o_evt_lock);
406 wake_up_interruptible(&evt_wait);
410 if(m->function == I2O_CMD_LCT_NOTIFY)
417 * If this happens, we want to dump the message to the syslog so
418 * it can be sent back to the card manufacturer by the end user
419 * to aid in debugging.
422 printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
423 "Message dumped to syslog\n",
425 i2o_dump_message(msg);
431 * i2o_install_handler - install a message handler
432 * @h: Handler structure
434 * Install an I2O handler - these handle the asynchronous messaging
435 * from the card once it has initialised. If the table of handlers is
436 * full then -ENOSPC is returned. On a success 0 is returned and the
437 * context field is set by the function. The structure is part of the
438 * system from this time onwards. It must not be freed until it has
442 int i2o_install_handler(struct i2o_handler *h)
445 down(&i2o_configuration_lock);
446 for(i=0;i<MAX_I2O_MODULES;i++)
448 if(i2o_handlers[i]==NULL)
452 up(&i2o_configuration_lock);
456 up(&i2o_configuration_lock);
461 * i2o_remove_handler - remove an i2o message handler
464 * Remove a message handler previously installed with i2o_install_handler.
465 * After this function returns the handler object can be freed or re-used
468 int i2o_remove_handler(struct i2o_handler *h)
470 i2o_handlers[h->context]=NULL;
476 * Each I2O controller has a chain of devices on it.
477 * Each device has a pointer to its LCT entry to be used
482 * i2o_install_device - attach a device to a controller
486 * Add a new device to an i2o controller. This can be called from
487 * non interrupt contexts only. It adds the device and marks it as
488 * unclaimed. The device memory becomes part of the kernel and must
489 * be uninstalled before being freed or reused. Zero is returned
493 int i2o_install_device(struct i2o_controller *c, struct i2o_device *d)
497 down(&i2o_configuration_lock);
502 if (c->devices != NULL)
507 for(i = 0; i < I2O_MAX_MANAGERS; i++)
508 d->managers[i] = NULL;
510 up(&i2o_configuration_lock);
514 /* we need this version to call out of i2o_delete_controller */
516 int __i2o_delete_device(struct i2o_device *d)
518 struct i2o_device **p;
521 p=&(d->controller->devices);
524 * Hey we have a driver!
525 * Check to see if the driver wants us to notify it of
526 * device deletion. If it doesn't we assume that it
527 * is unsafe to delete a device with an owner and
532 if(d->owner->dev_del_notify)
534 dprintk(KERN_INFO "Device has owner, notifying\n");
535 d->owner->dev_del_notify(d->controller, d);
539 "Driver \"%s\" did not release device!\n", d->owner->name);
548 * Tell any other users who are talking to this device
549 * that it's going away. We assume that everything works.
551 for(i=0; i < I2O_MAX_MANAGERS; i++)
553 if(d->managers[i] && d->managers[i]->dev_del_notify)
554 d->managers[i]->dev_del_notify(d->controller, d);
570 printk(KERN_ERR "i2o_delete_device: passed invalid device.\n");
575 * i2o_delete_device - remove an i2o device
576 * @d: device to remove
578 * This function unhooks a device from a controller. The device
579 * will not be unhooked if it has an owner who does not wish to free
580 * it, or if the owner lacks a dev_del_notify function. In that case
581 * -EBUSY is returned. On success 0 is returned. Other errors cause
582 * negative errno values to be returned
585 int i2o_delete_device(struct i2o_device *d)
589 down(&i2o_configuration_lock);
595 ret = __i2o_delete_device(d);
597 up(&i2o_configuration_lock);
603 * i2o_install_controller - attach a controller
606 * Add a new controller to the i2o layer. This can be called from
607 * non interrupt contexts only. It adds the controller and marks it as
608 * unused with no devices. If the tables are full or memory allocations
609 * fail then a negative errno code is returned. On success zero is
610 * returned and the controller is bound to the system. The structure
611 * must not be freed or reused until being uninstalled.
614 int i2o_install_controller(struct i2o_controller *c)
617 down(&i2o_configuration_lock);
618 for(i=0;i<MAX_I2O_CONTROLLERS;i++)
620 if(i2o_controllers[i]==NULL)
622 c->dlct = (i2o_lct*)pci_alloc_consistent(c->pdev, 8192, &c->dlct_phys);
625 up(&i2o_configuration_lock);
628 i2o_controllers[i]=c;
630 c->next=i2o_controller_chain;
631 i2o_controller_chain=c;
633 c->page_frame = NULL;
637 c->status_block = NULL;
638 sprintf(c->name, "i2o/iop%d", i);
639 i2o_num_controllers++;
640 init_MUTEX_LOCKED(&c->lct_sem);
641 up(&i2o_configuration_lock);
645 printk(KERN_ERR "No free i2o controller slots.\n");
646 up(&i2o_configuration_lock);
651 * i2o_delete_controller - delete a controller
654 * Remove an i2o controller from the system. If the controller or its
655 * devices are busy then -EBUSY is returned. On a failure a negative
656 * errno code is returned. On success zero is returned.
659 int i2o_delete_controller(struct i2o_controller *c)
661 struct i2o_controller **p;
666 dprintk(KERN_INFO "Deleting controller %s\n", c->name);
669 * Clear event registration as this can cause weird behavior
671 if(c->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
672 i2o_event_register(c, core_context, 0, 0, 0);
674 down(&i2o_configuration_lock);
675 if((users=atomic_read(&c->users)))
677 dprintk(KERN_INFO "I2O: %d users for controller %s\n", users,
679 up(&i2o_configuration_lock);
684 if(__i2o_delete_device(c->devices)<0)
686 /* Shouldnt happen */
687 I2O_IRQ_WRITE32(c, 0xFFFFFFFF);
689 up(&i2o_configuration_lock);
695 * If this is shutdown time, the thread's already been killed
698 stat = kill_proc(c->lct_pid, SIGKILL, 1);
700 int count = 10 * 100;
701 while(c->lct_running && --count) {
702 current->state = TASK_INTERRUPTIBLE;
708 "%s: LCT thread still running!\n",
713 p=&i2o_controller_chain;
719 /* Ask the IOP to switch to RESET state */
720 i2o_reset_controller(c);
726 up(&i2o_configuration_lock);
730 pci_unmap_single(c->pdev, c->page_frame_map, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
731 kfree(c->page_frame);
734 pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys);
736 pci_free_consistent(c->pdev, c->lct->table_size << 2, c->lct, c->lct_phys);
738 pci_free_consistent(c->pdev, sizeof(i2o_status_block), c->status_block, c->status_block_phys);
740 pci_free_consistent(c->pdev, 8192, c->dlct, c->dlct_phys);
742 i2o_controllers[c->unit]=NULL;
743 memcpy(name, c->name, strlen(c->name)+1);
745 dprintk(KERN_INFO "%s: Deleted from controller chain.\n", name);
747 i2o_num_controllers--;
752 up(&i2o_configuration_lock);
753 printk(KERN_ERR "i2o_delete_controller: bad pointer!\n");
758 * i2o_unlock_controller - unlock a controller
759 * @c: controller to unlock
761 * Take a lock on an i2o controller. This prevents it being deleted.
762 * i2o controllers are not refcounted so a deletion of an in use device
763 * will fail, not take affect on the last dereference.
766 void i2o_unlock_controller(struct i2o_controller *c)
768 atomic_dec(&c->users);
772 * i2o_find_controller - return a locked controller
773 * @n: controller number
775 * Returns a pointer to the controller object. The controller is locked
776 * on return. NULL is returned if the controller is not found.
779 struct i2o_controller *i2o_find_controller(int n)
781 struct i2o_controller *c;
783 if(n<0 || n>=MAX_I2O_CONTROLLERS)
786 down(&i2o_configuration_lock);
787 c=i2o_controllers[n];
789 atomic_inc(&c->users);
790 up(&i2o_configuration_lock);
795 * i2o_issue_claim - claim or release a device
797 * @c: controller to claim for
799 * @type: type of claim
801 * Issue I2O UTIL_CLAIM and UTIL_RELEASE messages. The message to be sent
802 * is set by cmd. The tid is the task id of the object to claim and the
803 * type is the claim type (see the i2o standard)
805 * Zero is returned on success.
808 static int i2o_issue_claim(u32 cmd, struct i2o_controller *c, int tid, u32 type)
812 msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
813 msg[1] = cmd << 24 | HOST_TID<<12 | tid;
817 return i2o_post_wait(c, msg, sizeof(msg), 60);
821 * i2o_claim_device - claim a device for use by an OSM
822 * @d: device to claim
823 * @h: handler for this device
825 * Do the leg work to assign a device to a given OSM on Linux. The
826 * kernel updates the internal handler data for the device and then
827 * performs an I2O claim for the device, attempting to claim the
828 * device as primary. If the attempt fails a negative errno code
829 * is returned. On success zero is returned.
832 int i2o_claim_device(struct i2o_device *d, struct i2o_handler *h)
836 down(&i2o_configuration_lock);
838 printk(KERN_INFO "Device claim called, but dev already owned by %s!",
845 if(i2o_issue_claim(I2O_CMD_UTIL_CLAIM ,d->controller,d->lct_data.tid,
852 up(&i2o_configuration_lock);
857 * i2o_release_device - release a device that the OSM is using
858 * @d: device to claim
859 * @h: handler for this device
861 * Drop a claim by an OSM on a given I2O device. The handler is cleared
862 * and 0 is returned on success.
864 * AC - some devices seem to want to refuse an unclaim until they have
865 * finished internal processing. It makes sense since you don't want a
866 * new device to go reconfiguring the entire system until you are done.
867 * Thus we are prepared to wait briefly.
870 int i2o_release_device(struct i2o_device *d, struct i2o_handler *h)
875 down(&i2o_configuration_lock);
877 printk(KERN_INFO "Claim release called, but not owned by %s!\n",
879 up(&i2o_configuration_lock);
883 for(tries=0;tries<10;tries++)
888 * If the controller takes a nonblocking approach to
889 * releases we have to sleep/poll for a few times.
892 if((err=i2o_issue_claim(I2O_CMD_UTIL_RELEASE, d->controller, d->lct_data.tid, I2O_CLAIM_PRIMARY)) )
895 current->state = TASK_UNINTERRUPTIBLE;
896 schedule_timeout(HZ);
904 up(&i2o_configuration_lock);
909 * i2o_device_notify_on - Enable deletion notifiers
910 * @d: device for notification
911 * @h: handler to install
913 * Called by OSMs to let the core know that they want to be
914 * notified if the given device is deleted from the system.
917 int i2o_device_notify_on(struct i2o_device *d, struct i2o_handler *h)
921 if(d->num_managers == I2O_MAX_MANAGERS)
924 for(i = 0; i < I2O_MAX_MANAGERS; i++)
939 * i2o_device_notify_off - Remove deletion notifiers
940 * @d: device for notification
941 * @h: handler to remove
943 * Called by OSMs to let the core know that they no longer
944 * are interested in the fate of the given device.
946 int i2o_device_notify_off(struct i2o_device *d, struct i2o_handler *h)
950 for(i=0; i < I2O_MAX_MANAGERS; i++)
952 if(d->managers[i] == h)
954 d->managers[i] = NULL;
964 * i2o_event_register - register interest in an event
965 * @c: Controller to register interest with
967 * @init_context: initiator context to use with this notifier
968 * @tr_context: transaction context to use with this notifier
969 * @evt_mask: mask of events
971 * Create and posts an event registration message to the task. No reply
972 * is waited for, or expected. Errors in posting will be reported.
975 int i2o_event_register(struct i2o_controller *c, u32 tid,
976 u32 init_context, u32 tr_context, u32 evt_mask)
978 u32 msg[5]; // Not performance critical, so we just
979 // i2o_post_this it instead of building it
982 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
983 msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | tid;
984 msg[2] = init_context;
988 return i2o_post_this(c, msg, sizeof(msg));
992 * i2o_event_ack - acknowledge an event
994 * @msg: pointer to the UTIL_EVENT_REGISTER reply we received
996 * We just take a pointer to the original UTIL_EVENT_REGISTER reply
997 * message and change the function code since that's what spec
998 * describes an EventAck message looking like.
1001 int i2o_event_ack(struct i2o_controller *c, u32 *msg)
1003 struct i2o_message *m = (struct i2o_message *)msg;
1005 m->function = I2O_CMD_UTIL_EVT_ACK;
1007 return i2o_post_wait(c, msg, m->size * 4, 2);
1011 * Core event handler. Runs as a separate thread and is woken
1012 * up whenever there is an Executive class event.
1014 static int i2o_core_evt(void *reply_data)
1016 struct reply_info *reply = (struct reply_info *) reply_data;
1017 u32 *msg = reply->msg;
1018 struct i2o_controller *c = NULL;
1019 unsigned long flags;
1021 daemonize("i2oevtd");
1022 allow_signal(SIGKILL);
1028 if(down_interruptible(&evt_sem))
1030 dprintk(KERN_INFO "I2O event thread dead\n");
1031 printk("exiting...");
1033 complete_and_exit(&evt_dead, 0);
1037 * Copy the data out of the queue so that we don't have to lock
1038 * around the whole function and just around the qlen update
1040 spin_lock_irqsave(&i2o_evt_lock, flags);
1041 memcpy(reply, &events[evt_out], sizeof(struct reply_info));
1042 MODINC(evt_out, I2O_EVT_Q_LEN);
1044 spin_unlock_irqrestore(&i2o_evt_lock, flags);
1047 dprintk(KERN_INFO "I2O IRTOS EVENT: iop%d, event %#10x\n", c->unit, msg[4]);
1050 * We do not attempt to delete/quiesce/etc. the controller if
1051 * some sort of error indidication occurs. We may want to do
1052 * so in the future, but for now we just let the user deal with
1053 * it. One reason for this is that what to do with an error
1054 * or when to send what ærror is not really agreed on, so
1055 * we get errors that may not be fatal but just look like they
1056 * are...so let the user deal with it.
1060 case I2O_EVT_IND_EXEC_RESOURCE_LIMITS:
1061 printk(KERN_ERR "%s: Out of resources\n", c->name);
1064 case I2O_EVT_IND_EXEC_POWER_FAIL:
1065 printk(KERN_ERR "%s: Power failure\n", c->name);
1068 case I2O_EVT_IND_EXEC_HW_FAIL:
1076 "Code Execution Exception",
1077 "Watchdog Timer Expired"
1081 printk(KERN_ERR "%s: Hardware Failure: %s\n",
1082 c->name, fail[msg[5]]);
1084 printk(KERN_ERR "%s: Unknown Hardware Failure\n", c->name);
1090 * New device created
1091 * - Create a new i2o_device entry
1092 * - Inform all interested drivers about this device's existence
1094 case I2O_EVT_IND_EXEC_NEW_LCT_ENTRY:
1096 struct i2o_device *d = (struct i2o_device *)
1097 kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1101 printk(KERN_EMERG "i2oevtd: out of memory\n");
1104 memcpy(&d->lct_data, &msg[5], sizeof(i2o_lct_entry));
1110 i2o_report_controller_unit(c, d);
1111 i2o_install_device(c,d);
1113 for(i = 0; i < MAX_I2O_MODULES; i++)
1115 if(i2o_handlers[i] &&
1116 i2o_handlers[i]->new_dev_notify &&
1117 (i2o_handlers[i]->class&d->lct_data.class_id))
1119 spin_lock(&i2o_dev_lock);
1120 i2o_handlers[i]->new_dev_notify(c,d);
1121 spin_unlock(&i2o_dev_lock);
1129 * LCT entry for a device has been modified, so update it
1132 case I2O_EVT_IND_EXEC_MODIFIED_LCT:
1134 struct i2o_device *d;
1135 i2o_lct_entry *new_lct = (i2o_lct_entry *)&msg[5];
1137 for(d = c->devices; d; d = d->next)
1139 if(d->lct_data.tid == new_lct->tid)
1141 memcpy(&d->lct_data, new_lct, sizeof(i2o_lct_entry));
1148 case I2O_EVT_IND_CONFIGURATION_FLAG:
1149 printk(KERN_WARNING "%s requires user configuration\n", c->name);
1152 case I2O_EVT_IND_GENERAL_WARNING:
1153 printk(KERN_WARNING "%s: Warning notification received!"
1154 "Check configuration for errors!\n", c->name);
1157 case I2O_EVT_IND_EVT_MASK_MODIFIED:
1158 /* Well I guess that was us hey .. */
1162 printk(KERN_WARNING "%s: No handler for event (0x%08x)\n", c->name, msg[4]);
1171 * Dynamic LCT update. This compares the LCT with the currently
1172 * installed devices to check for device deletions..this needed b/c there
1173 * is no DELETED_LCT_ENTRY EventIndicator for the Executive class so
1174 * we can't just have the event handler do this...annoying
1176 * This is a hole in the spec that will hopefully be fixed someday.
1178 static int i2o_dyn_lct(void *foo)
1180 struct i2o_controller *c = (struct i2o_controller *)foo;
1181 struct i2o_device *d = NULL;
1182 struct i2o_device *d1 = NULL;
1188 daemonize("iop%d_lctd", c->unit);
1189 allow_signal(SIGKILL);
1195 down_interruptible(&c->lct_sem);
1196 if(signal_pending(current))
1198 dprintk(KERN_ERR "%s: LCT thread dead\n", c->name);
1203 entries = c->dlct->table_size;
1207 dprintk(KERN_INFO "%s: Dynamic LCT Update\n",c->name);
1208 dprintk(KERN_INFO "%s: Dynamic LCT contains %d entries\n", c->name, entries);
1212 printk(KERN_INFO "%s: Empty LCT???\n", c->name);
1217 * Loop through all the devices on the IOP looking for their
1218 * LCT data in the LCT. We assume that TIDs are not repeated.
1219 * as that is the only way to really tell. It's been confirmed
1220 * by the IRTOS vendor(s?) that TIDs are not reused until they
1221 * wrap arround(4096), and I doubt a system will up long enough
1222 * to create/delete that many devices.
1224 for(d = c->devices; d; )
1229 for(i = 0; i < entries; i++)
1231 if(d->lct_data.tid == c->dlct->lct_entry[i].tid)
1239 dprintk(KERN_INFO "i2o_core: Deleted device!\n");
1240 spin_lock(&i2o_dev_lock);
1241 i2o_delete_device(d);
1242 spin_unlock(&i2o_dev_lock);
1248 * Tell LCT to renotify us next time there is a change
1253 * Copy new LCT into public LCT
1255 * Possible race if someone is reading LCT while we are copying
1256 * over it. If this happens, we'll fix it then. but I doubt that
1257 * the LCT will get updated often enough or will get read by
1258 * a user often enough to worry.
1260 if(c->lct->table_size < c->dlct->table_size)
1264 c->lct = pci_alloc_consistent(c->pdev, c->dlct->table_size<<2, &phys);
1267 printk(KERN_ERR "%s: No memory for LCT!\n", c->name);
1271 pci_free_consistent(tmp, c->lct->table_size << 2, c->lct, c->lct_phys);
1274 memcpy(c->lct, c->dlct, c->dlct->table_size<<2);
1281 * i2o_run_queue - process pending events on a controller
1282 * @c: controller to process
1284 * This is called by the bus specific driver layer when an interrupt
1285 * or poll of this card interface is desired.
1288 void i2o_run_queue(struct i2o_controller *c)
1290 struct i2o_message *m;
1295 * Old 960 steppings had a bug in the I2O unit that caused
1296 * the queue to appear empty when it wasn't.
1298 if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1299 mv=I2O_REPLY_READ32(c);
1301 while(mv!=0xFFFFFFFF)
1303 struct i2o_handler *i;
1304 /* Map the message from the page frame map to kernel virtual */
1305 /* m=(struct i2o_message *)(mv - (unsigned long)c->page_frame_map + (unsigned long)c->page_frame); */
1306 m=(struct i2o_message *)bus_to_virt(mv);
1310 * Ensure this message is seen coherently but cachably by
1314 pci_dma_sync_single_for_cpu(c->pdev, c->page_frame_map, MSG_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1320 i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)];
1325 printk(KERN_WARNING "I2O: Spurious reply to handler %d\n",
1326 m->initiator_context&(MAX_I2O_MODULES-1));
1328 i2o_flush_reply(c,mv);
1331 /* That 960 bug again... */
1332 if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1333 mv=I2O_REPLY_READ32(c);
1339 * i2o_get_class_name - do i2o class name lookup
1340 * @class: class number
1342 * Return a descriptive string for an i2o class
1345 const char *i2o_get_class_name(int class)
1348 static char *i2o_class_name[] = {
1350 "Device Driver Module",
1355 "Fibre Channel Port",
1356 "Fibre Channel Device",
1360 "Floppy Controller",
1362 "Secondary Bus Port",
1363 "Peer Transport Agent",
1370 case I2O_CLASS_EXECUTIVE:
1374 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1376 case I2O_CLASS_SEQUENTIAL_STORAGE:
1382 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1384 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1386 case I2O_CLASS_SCSI_PERIPHERAL:
1388 case I2O_CLASS_ATE_PORT:
1390 case I2O_CLASS_ATE_PERIPHERAL:
1392 case I2O_CLASS_FLOPPY_CONTROLLER:
1394 case I2O_CLASS_FLOPPY_DEVICE:
1396 case I2O_CLASS_BUS_ADAPTER_PORT:
1398 case I2O_CLASS_PEER_TRANSPORT_AGENT:
1400 case I2O_CLASS_PEER_TRANSPORT:
1404 return i2o_class_name[idx];
1409 * i2o_wait_message - obtain an i2o message from the IOP
1413 * This function waits up to 5 seconds for a message slot to be
1414 * available. If no message is available it prints an error message
1415 * that is expected to be what the message will be used for (eg
1416 * "get_status"). 0xFFFFFFFF is returned on a failure.
1418 * On a success the message is returned. This is the physical page
1419 * frame offset address from the read port. (See the i2o spec)
1422 u32 i2o_wait_message(struct i2o_controller *c, char *why)
1426 while((m=I2O_POST_READ32(c))==0xFFFFFFFF)
1428 if((jiffies-time)>=5*HZ)
1430 dprintk(KERN_ERR "%s: Timeout waiting for message frame to send %s.\n",
1441 * i2o_report_controller_unit - print information about a tid
1445 * Dump an information block associated with a given unit (TID). The
1446 * tables are read and a block of text is output to printk that is
1447 * formatted intended for the user.
1450 void i2o_report_controller_unit(struct i2o_controller *c, struct i2o_device *d)
1455 int unit = d->lct_data.tid;
1460 printk(KERN_INFO "Target ID %d.\n", unit);
1461 if((ret=i2o_query_scalar(c, unit, 0xF100, 3, buf, 16))>=0)
1464 printk(KERN_INFO " Vendor: %s\n", buf);
1466 if((ret=i2o_query_scalar(c, unit, 0xF100, 4, buf, 16))>=0)
1469 printk(KERN_INFO " Device: %s\n", buf);
1471 if(i2o_query_scalar(c, unit, 0xF100, 5, buf, 16)>=0)
1474 printk(KERN_INFO " Description: %s\n", buf);
1476 if((ret=i2o_query_scalar(c, unit, 0xF100, 6, buf, 8))>=0)
1479 printk(KERN_INFO " Rev: %s\n", buf);
1482 printk(KERN_INFO " Class: ");
1483 sprintf(str, "%-21s", i2o_get_class_name(d->lct_data.class_id));
1484 printk("%s\n", str);
1486 printk(KERN_INFO " Subclass: 0x%04X\n", d->lct_data.sub_class);
1487 printk(KERN_INFO " Flags: ");
1489 if(d->lct_data.device_flags&(1<<0))
1490 printk("C"); // ConfigDialog requested
1491 if(d->lct_data.device_flags&(1<<1))
1492 printk("U"); // Multi-user capable
1493 if(!(d->lct_data.device_flags&(1<<4)))
1494 printk("P"); // Peer service enabled!
1495 if(!(d->lct_data.device_flags&(1<<5)))
1496 printk("M"); // Mgmt service enabled!
1503 * Parse the hardware resource table. Right now we print it out
1504 * and don't do a lot with it. We should collate these and then
1505 * interact with the Linux resource allocation block.
1507 * Lets prove we can read it first eh ?
1509 * This is full of endianisms!
1512 static int i2o_parse_hrt(struct i2o_controller *c)
1515 u32 *rows=(u32*)c->hrt;
1525 printk(KERN_ERR "%s: HRT table for controller is too new a version.\n",
1530 count=p[0]|(p[1]<<8);
1533 printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n",
1534 c->name, count, length<<2);
1538 for(i=0;i<count;i++)
1540 printk(KERN_INFO "Adapter %08X: ", rows[0]);
1545 printk("TID %04X:[", state&0xFFF);
1548 printk("H"); /* Hidden */
1551 printk("P"); /* Present */
1553 printk("C"); /* Controlled */
1556 printk("*"); /* Hard */
1563 /* Adapter private bus - easy */
1564 printk("Local bus %d: I/O at 0x%04X Mem 0x%08X",
1565 p[2], d[1]<<8|d[0], *(u32 *)(d+4));
1569 printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X",
1570 p[2], d[2], d[1]<<8|d[0], *(u32 *)(d+4));
1573 case 2: /* EISA bus */
1574 printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1575 p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1578 case 3: /* MCA bus */
1579 printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1580 p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1583 case 4: /* PCI bus */
1584 printk("PCI %d: Bus %d Device %d Function %d",
1585 p[2], d[2], d[1], d[0]);
1588 case 0x80: /* Other */
1590 printk("Unsupported bus type.");
1601 * The logical configuration table tells us what we can talk to
1602 * on the board. Most of the stuff isn't interesting to us.
1605 static int i2o_parse_lct(struct i2o_controller *c)
1610 struct i2o_device *d;
1611 i2o_lct *lct = c->lct;
1614 printk(KERN_ERR "%s: LCT is empty???\n", c->name);
1618 max = lct->table_size;
1622 printk(KERN_INFO "%s: LCT has %d entries.\n", c->name, max);
1624 if(lct->iop_flags&(1<<0))
1625 printk(KERN_WARNING "%s: Configuration dialog desired.\n", c->name);
1629 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1632 printk(KERN_CRIT "i2o_core: Out of memory for I2O device data.\n");
1639 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1642 tid = d->lct_data.tid;
1644 i2o_report_controller_unit(c, d);
1646 i2o_install_device(c, d);
1653 * i2o_quiesce_controller - quiesce controller
1656 * Quiesce an IOP. Causes IOP to make external operation quiescent
1657 * (i2o 'READY' state). Internal operation of the IOP continues normally.
1660 int i2o_quiesce_controller(struct i2o_controller *c)
1667 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
1669 if ((c->status_block->iop_state != ADAPTER_STATE_READY) &&
1670 (c->status_block->iop_state != ADAPTER_STATE_OPERATIONAL))
1675 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1676 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
1679 /* Long timeout needed for quiesce if lots of devices */
1681 if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1682 printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
1685 dprintk(KERN_INFO "%s: Quiesced.\n", c->name);
1687 i2o_status_get(c); // Entered READY state
1692 * i2o_enable_controller - move controller from ready to operational
1695 * Enable IOP. This allows the IOP to resume external operations and
1696 * reverses the effect of a quiesce. In the event of an error a negative
1697 * errno code is returned.
1700 int i2o_enable_controller(struct i2o_controller *c)
1707 /* Enable only allowed on READY state */
1708 if(c->status_block->iop_state != ADAPTER_STATE_READY)
1711 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1712 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
1714 /* How long of a timeout do we need? */
1716 if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1717 printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
1720 dprintk(KERN_INFO "%s: Enabled.\n", c->name);
1722 i2o_status_get(c); // entered OPERATIONAL state
1728 * i2o_clear_controller - clear a controller
1731 * Clear an IOP to HOLD state, ie. terminate external operations, clear all
1732 * input queues and prepare for a system restart. IOP's internal operation
1733 * continues normally and the outbound queue is alive.
1734 * The IOP is not expected to rebuild its LCT.
1737 int i2o_clear_controller(struct i2o_controller *c)
1739 struct i2o_controller *iop;
1743 /* Quiesce all IOPs first */
1745 for (iop = i2o_controller_chain; iop; iop = iop->next)
1746 i2o_quiesce_controller(iop);
1748 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1749 msg[1]=I2O_CMD_ADAPTER_CLEAR<<24|HOST_TID<<12|ADAPTER_TID;
1752 if ((ret=i2o_post_wait(c, msg, sizeof(msg), 30)))
1753 printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
1756 dprintk(KERN_INFO "%s: Cleared.\n",c->name);
1760 /* Enable other IOPs */
1762 for (iop = i2o_controller_chain; iop; iop = iop->next)
1764 i2o_enable_controller(iop);
1771 * i2o_reset_controller - reset an IOP
1772 * @c: controller to reset
1774 * Reset the IOP into INIT state and wait until IOP gets into RESET state.
1775 * Terminate all external operations, clear IOP's inbound and outbound
1776 * queues, terminate all DDMs, and reload the IOP's operating environment
1777 * and all local DDMs. The IOP rebuilds its LCT.
1780 static int i2o_reset_controller(struct i2o_controller *c)
1782 struct i2o_controller *iop;
1785 dma_addr_t status_phys;
1789 /* Quiesce all IOPs first */
1791 for (iop = i2o_controller_chain; iop; iop = iop->next)
1794 i2o_quiesce_controller(iop);
1797 m=i2o_wait_message(c, "AdapterReset");
1800 msg=(u32 *)(c->msg_virt+m);
1802 status = pci_alloc_consistent(c->pdev, 4, &status_phys);
1803 if(status == NULL) {
1804 printk(KERN_ERR "IOP reset failed - no free memory.\n");
1807 memset(status, 0, 4);
1809 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1810 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1811 msg[2]=core_context;
1816 msg[7]=0; /* 64bit host FIXME */
1818 i2o_post_message(c,m);
1820 /* Wait for a reply */
1824 if((jiffies-time)>=20*HZ)
1826 printk(KERN_ERR "IOP reset timeout.\n");
1827 /* The controller still may respond and overwrite
1828 * status_phys, LEAK it to prevent memory corruption.
1836 if (*status==I2O_CMD_IN_PROGRESS)
1839 * Once the reset is sent, the IOP goes into the INIT state
1840 * which is indeterminate. We need to wait until the IOP
1841 * has rebooted before we can let the system talk to
1842 * it. We read the inbound Free_List until a message is
1843 * available. If we can't read one in the given ammount of
1844 * time, we assume the IOP could not reboot properly.
1847 dprintk(KERN_INFO "%s: Reset in progress, waiting for reboot...\n",
1851 m = I2O_POST_READ32(c);
1852 while(m == 0XFFFFFFFF)
1854 if((jiffies-time) >= 30*HZ)
1856 printk(KERN_ERR "%s: Timeout waiting for IOP reset.\n",
1858 /* The controller still may respond and
1859 * overwrite status_phys, LEAK it to prevent
1860 * memory corruption.
1866 m = I2O_POST_READ32(c);
1868 i2o_flush_reply(c,m);
1871 /* If IopReset was rejected or didn't perform reset, try IopClear */
1874 if (status[0] == I2O_CMD_REJECTED ||
1875 c->status_block->iop_state != ADAPTER_STATE_RESET)
1877 printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",c->name);
1878 i2o_clear_controller(c);
1881 dprintk(KERN_INFO "%s: Reset completed.\n", c->name);
1883 /* Enable other IOPs */
1885 for (iop = i2o_controller_chain; iop; iop = iop->next)
1887 i2o_enable_controller(iop);
1889 pci_free_consistent(c->pdev, 4, status, status_phys);
1895 * i2o_status_get - get the status block for the IOP
1898 * Issue a status query on the controller. This updates the
1899 * attached status_block. If the controller fails to reply or an
1900 * error occurs then a negative errno code is returned. On success
1901 * zero is returned and the status_blok is updated.
1904 int i2o_status_get(struct i2o_controller *c)
1911 if (c->status_block == NULL)
1913 c->status_block = (i2o_status_block *)
1914 pci_alloc_consistent(c->pdev, sizeof(i2o_status_block), &c->status_block_phys);
1915 if (c->status_block == NULL)
1917 printk(KERN_CRIT "%s: Get Status Block failed; Out of memory.\n",
1923 status_block = (u8*)c->status_block;
1924 memset(c->status_block,0,sizeof(i2o_status_block));
1926 m=i2o_wait_message(c, "StatusGet");
1929 msg=(u32 *)(c->msg_virt+m);
1931 msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_0;
1932 msg[1]=I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID;
1933 msg[2]=core_context;
1937 msg[6]=c->status_block_phys;
1938 msg[7]=0; /* 64bit host FIXME */
1939 msg[8]=sizeof(i2o_status_block); /* always 88 bytes */
1941 i2o_post_message(c,m);
1943 /* Wait for a reply */
1946 while(status_block[87]!=0xFF)
1948 if((jiffies-time)>=5*HZ)
1950 printk(KERN_ERR "%s: Get status timeout.\n",c->name);
1958 printk(KERN_INFO "%s: State = ", c->name);
1959 switch (c->status_block->iop_state) {
1973 printk("OPERATIONAL\n");
1979 printk("FAULTED\n");
1982 printk("%x (unknown !!)\n",c->status_block->iop_state);
1990 * Get the Hardware Resource Table for the device.
1991 * The HRT contains information about possible hidden devices
1992 * but is mostly useless to us
1994 int i2o_hrt_get(struct i2o_controller *c)
1997 int ret, size = sizeof(i2o_hrt);
1998 int loops = 3; /* we only try 3 times to get the HRT, this should be
1999 more then enough. Worst case should be 2 times.*/
2001 /* First read just the header to figure out the real size */
2004 /* first we allocate the memory for the HRT */
2005 if (c->hrt == NULL) {
2006 c->hrt=pci_alloc_consistent(c->pdev, size, &c->hrt_phys);
2007 if (c->hrt == NULL) {
2008 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", c->name);
2014 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
2015 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
2017 msg[4]= (0xD0000000 | c->hrt_len); /* Simple transaction */
2018 msg[5]= c->hrt_phys; /* Dump it here */
2020 ret = i2o_post_wait_mem(c, msg, sizeof(msg), 20, c->hrt, NULL, c->hrt_phys, 0, c->hrt_len, 0);
2022 if(ret == -ETIMEDOUT)
2024 /* The HRT block we used is in limbo somewhere. When the iop wakes up
2025 we will recover it */
2033 printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
2038 if (c->hrt->num_entries * c->hrt->entry_len << 2 > c->hrt_len) {
2039 size = c->hrt->num_entries * c->hrt->entry_len << 2;
2040 pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys);
2045 } while (c->hrt == NULL && loops > 0);
2049 printk(KERN_ERR "%s: Unable to get HRT after three tries, giving up\n", c->name);
2053 i2o_parse_hrt(c); // just for debugging
2059 * Send the I2O System Table to the specified IOP
2061 * The system table contains information about all the IOPs in the
2062 * system. It is build and then sent to each IOP so that IOPs can
2063 * establish connections between each other.
2066 static int i2o_systab_send(struct i2o_controller *iop)
2069 dma_addr_t sys_tbl_phys;
2071 struct resource *root;
2072 u32 *privbuf = kmalloc(16, GFP_KERNEL);
2077 if(iop->status_block->current_mem_size < iop->status_block->desired_mem_size)
2079 struct resource *res = &iop->mem_resource;
2080 res->name = iop->pdev->bus->name;
2081 res->flags = IORESOURCE_MEM;
2084 printk("%s: requires private memory resources.\n", iop->name);
2085 root = pci_find_parent_resource(iop->pdev, res);
2087 printk("Can't find parent resource!\n");
2088 if(root && allocate_resource(root, res,
2089 iop->status_block->desired_mem_size,
2090 iop->status_block->desired_mem_size,
2091 iop->status_block->desired_mem_size,
2092 1<<20, /* Unspecified, so use 1Mb and play safe */
2097 iop->status_block->current_mem_size = 1 + res->end - res->start;
2098 iop->status_block->current_mem_base = res->start;
2099 printk(KERN_INFO "%s: allocated %ld bytes of PCI memory at 0x%08lX.\n",
2100 iop->name, 1+res->end-res->start, res->start);
2103 if(iop->status_block->current_io_size < iop->status_block->desired_io_size)
2105 struct resource *res = &iop->io_resource;
2106 res->name = iop->pdev->bus->name;
2107 res->flags = IORESOURCE_IO;
2110 printk("%s: requires private memory resources.\n", iop->name);
2111 root = pci_find_parent_resource(iop->pdev, res);
2113 printk("Can't find parent resource!\n");
2114 if(root && allocate_resource(root, res,
2115 iop->status_block->desired_io_size,
2116 iop->status_block->desired_io_size,
2117 iop->status_block->desired_io_size,
2118 1<<20, /* Unspecified, so use 1Mb and play safe */
2123 iop->status_block->current_io_size = 1 + res->end - res->start;
2124 iop->status_block->current_mem_base = res->start;
2125 printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at 0x%08lX.\n",
2126 iop->name, 1+res->end-res->start, res->start);
2131 privbuf[0] = iop->status_block->current_mem_base;
2132 privbuf[1] = iop->status_block->current_mem_size;
2133 privbuf[2] = iop->status_block->current_io_base;
2134 privbuf[3] = iop->status_block->current_io_size;
2137 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
2138 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
2140 msg[4] = (0<<16) | ((iop->unit+2) ); /* Host 0 IOP ID (unit + 2) */
2141 msg[5] = 0; /* Segment 0 */
2144 * Provide three SGL-elements:
2145 * System table (SysTab), Private memory space declaration and
2146 * Private i/o space declaration
2148 * Nasty one here. We can't use pci_alloc_consistent to send the
2149 * same table to everyone. We have to go remap it for them all
2152 sys_tbl_phys = pci_map_single(iop->pdev, sys_tbl, sys_tbl_len, PCI_DMA_TODEVICE);
2153 msg[6] = 0x54000000 | sys_tbl_phys;
2155 msg[7] = sys_tbl_phys;
2156 msg[8] = 0x54000000 | privbuf[1];
2157 msg[9] = privbuf[0];
2158 msg[10] = 0xD4000000 | privbuf[3];
2159 msg[11] = privbuf[2];
2161 ret=i2o_post_wait(iop, msg, sizeof(msg), 120);
2163 pci_unmap_single(iop->pdev, sys_tbl_phys, sys_tbl_len, PCI_DMA_TODEVICE);
2167 printk(KERN_ERR "%s: SysTab setup timed out.\n", iop->name);
2171 printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n",
2176 dprintk(KERN_INFO "%s: SysTab set.\n", iop->name);
2178 i2o_status_get(iop); // Entered READY state
2186 * Initialize I2O subsystem.
2188 void __init i2o_sys_init(void)
2190 struct i2o_controller *iop, *niop = NULL;
2192 printk(KERN_INFO "Activating I2O controllers...\n");
2193 printk(KERN_INFO "This may take a few minutes if there are many devices\n");
2195 /* In INIT state, Activate IOPs */
2196 for (iop = i2o_controller_chain; iop; iop = niop) {
2197 dprintk(KERN_INFO "Calling i2o_activate_controller for %s...\n",
2200 if (i2o_activate_controller(iop) < 0)
2201 i2o_delete_controller(iop);
2204 /* Active IOPs in HOLD state */
2207 if (i2o_controller_chain == NULL)
2211 * If build_sys_table fails, we kill everything and bail
2212 * as we can't init the IOPs w/o a system table
2214 dprintk(KERN_INFO "i2o_core: Calling i2o_build_sys_table...\n");
2215 if (i2o_build_sys_table() < 0) {
2220 /* If IOP don't get online, we need to rebuild the System table */
2221 for (iop = i2o_controller_chain; iop; iop = niop) {
2223 dprintk(KERN_INFO "Calling i2o_online_controller for %s...\n", iop->name);
2224 if (i2o_online_controller(iop) < 0) {
2225 i2o_delete_controller(iop);
2226 goto rebuild_sys_tab;
2230 /* Active IOPs now in OPERATIONAL state */
2233 * Register for status updates from all IOPs
2235 for(iop = i2o_controller_chain; iop; iop=iop->next) {
2237 /* Create a kernel thread to deal with dynamic LCT updates */
2238 iop->lct_pid = kernel_thread(i2o_dyn_lct, iop, CLONE_SIGHAND);
2240 /* Update change ind on DLCT */
2241 iop->dlct->change_ind = iop->lct->change_ind;
2243 /* Start dynamic LCT updates */
2244 i2o_lct_notify(iop);
2246 /* Register for all events from IRTOS */
2247 i2o_event_register(iop, core_context, 0, 0, 0xFFFFFFFF);
2252 * i2o_sys_shutdown - shutdown I2O system
2254 * Bring down each i2o controller and then return. Each controller
2255 * is taken through an orderly shutdown
2258 static void i2o_sys_shutdown(void)
2260 struct i2o_controller *iop, *niop;
2262 /* Delete all IOPs from the controller chain */
2263 /* that will reset all IOPs too */
2265 for (iop = i2o_controller_chain; iop; iop = niop) {
2267 i2o_delete_controller(iop);
2272 * i2o_activate_controller - bring controller up to HOLD
2275 * This function brings an I2O controller into HOLD state. The adapter
2276 * is reset if necessary and then the queues and resource table
2277 * are read. -1 is returned on a failure, 0 on success.
2281 int i2o_activate_controller(struct i2o_controller *iop)
2283 /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
2284 /* In READY state, Get status */
2286 if (i2o_status_get(iop) < 0) {
2287 printk(KERN_INFO "Unable to obtain status of %s, "
2288 "attempting a reset.\n", iop->name);
2289 if (i2o_reset_controller(iop) < 0)
2293 if(iop->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2294 printk(KERN_CRIT "%s: hardware fault\n", iop->name);
2298 if (iop->status_block->i2o_version > I2OVER15) {
2299 printk(KERN_ERR "%s: Not running vrs. 1.5. of the I2O Specification.\n",
2304 if (iop->status_block->iop_state == ADAPTER_STATE_READY ||
2305 iop->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2306 iop->status_block->iop_state == ADAPTER_STATE_HOLD ||
2307 iop->status_block->iop_state == ADAPTER_STATE_FAILED)
2309 dprintk(KERN_INFO "%s: Already running, trying to reset...\n",
2311 if (i2o_reset_controller(iop) < 0)
2315 if (i2o_init_outbound_q(iop) < 0)
2318 if (i2o_post_outbound_messages(iop))
2323 if (i2o_hrt_get(iop) < 0)
2331 * i2o_init_outbound_queue - setup the outbound queue
2334 * Clear and (re)initialize IOP's outbound queue. Returns 0 on
2335 * success or a negative errno code on a failure.
2338 int i2o_init_outbound_q(struct i2o_controller *c)
2341 dma_addr_t status_phys;
2346 dprintk(KERN_INFO "%s: Initializing Outbound Queue...\n", c->name);
2347 m=i2o_wait_message(c, "OutboundInit");
2350 msg=(u32 *)(c->msg_virt+m);
2352 status = pci_alloc_consistent(c->pdev, 4, &status_phys);
2354 printk(KERN_ERR "%s: Outbound Queue initialization failed - no free memory.\n",
2358 memset(status, 0, 4);
2360 msg[0]= EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6;
2361 msg[1]= I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID;
2362 msg[2]= core_context;
2363 msg[3]= 0x0106; /* Transaction context */
2364 msg[4]= 4096; /* Host page frame size */
2365 /* Frame size is in words. 256 bytes a frame for now */
2366 msg[5]= MSG_FRAME_SIZE<<16|0x80; /* Outbound msg frame size in words and Initcode */
2367 msg[6]= 0xD0000004; /* Simple SG LE, EOB */
2368 msg[7]= status_phys;
2370 i2o_post_message(c,m);
2374 while(status[0] < I2O_CMD_REJECTED)
2376 if((jiffies-time)>=30*HZ)
2379 printk(KERN_ERR "%s: Ignored queue initialize request.\n",
2382 printk(KERN_ERR "%s: Outbound queue initialize timeout.\n",
2384 pci_free_consistent(c->pdev, 4, status, status_phys);
2391 if(status[0] != I2O_CMD_COMPLETED)
2393 printk(KERN_ERR "%s: IOP outbound initialise failed.\n", c->name);
2394 pci_free_consistent(c->pdev, 4, status, status_phys);
2397 pci_free_consistent(c->pdev, 4, status, status_phys);
2402 * i2o_post_outbound_messages - fill message queue
2405 * Allocate a message frame and load the messages into the IOP. The
2406 * function returns zero on success or a negative errno code on
2410 int i2o_post_outbound_messages(struct i2o_controller *c)
2414 /* Alloc space for IOP's outbound queue message frames */
2416 c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL);
2417 if(c->page_frame==NULL) {
2418 printk(KERN_ERR "%s: Outbound Q initialize failed; out of memory.\n",
2423 c->page_frame_map = pci_map_single(c->pdev, c->page_frame, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
2425 if(c->page_frame_map == 0)
2427 kfree(c->page_frame);
2428 printk(KERN_ERR "%s: Unable to map outbound queue.\n", c->name);
2432 m = c->page_frame_map;
2436 for(i=0; i< NMBR_MSG_FRAMES; i++) {
2437 I2O_REPLY_WRITE32(c,m);
2439 m += (MSG_FRAME_SIZE << 2);
2446 * Get the IOP's Logical Configuration Table
2448 int i2o_lct_get(struct i2o_controller *c)
2451 int ret, size = c->status_block->expected_lct_size;
2454 if (c->lct == NULL) {
2455 c->lct = pci_alloc_consistent(c->pdev, size, &c->lct_phys);
2456 if(c->lct == NULL) {
2457 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2462 memset(c->lct, 0, size);
2464 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2465 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2466 /* msg[2] filled in i2o_post_wait */
2468 msg[4] = 0xFFFFFFFF; /* All devices */
2469 msg[5] = 0x00000000; /* Report now */
2470 msg[6] = 0xD0000000|size;
2471 msg[7] = c->lct_phys;
2473 ret=i2o_post_wait_mem(c, msg, sizeof(msg), 120, c->lct, NULL, c->lct_phys, 0, size, 0);
2475 if(ret == -ETIMEDOUT)
2483 printk(KERN_ERR "%s: LCT Get failed (status=%#x.\n",
2488 if (c->lct->table_size << 2 > size) {
2489 int new_size = c->lct->table_size << 2;
2490 pci_free_consistent(c->pdev, size, c->lct, c->lct_phys);
2494 } while (c->lct == NULL);
2496 if ((ret=i2o_parse_lct(c)) < 0)
2503 * Like above, but used for async notification. The main
2504 * difference is that we keep track of the CurrentChangeIndiicator
2505 * so that we only get updates when it actually changes.
2508 int i2o_lct_notify(struct i2o_controller *c)
2512 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2513 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2514 msg[2] = core_context;
2515 msg[3] = 0xDEADBEEF;
2516 msg[4] = 0xFFFFFFFF; /* All devices */
2517 msg[5] = c->dlct->change_ind+1; /* Next change */
2518 msg[6] = 0xD0000000|8192;
2519 msg[7] = c->dlct_phys;
2521 return i2o_post_this(c, msg, sizeof(msg));
2525 * Bring a controller online into OPERATIONAL state.
2528 int i2o_online_controller(struct i2o_controller *iop)
2532 if (i2o_systab_send(iop) < 0)
2535 /* In READY state */
2537 dprintk(KERN_INFO "%s: Attempting to enable...\n", iop->name);
2538 if (i2o_enable_controller(iop) < 0)
2541 /* In OPERATIONAL state */
2543 dprintk(KERN_INFO "%s: Attempting to get/parse lct...\n", iop->name);
2544 if (i2o_lct_get(iop) < 0)
2547 /* Check battery status */
2550 if(i2o_query_scalar(iop, ADAPTER_TID, 0x0000, 4, &v, 4)>=0)
2560 * Build system table
2562 * The system table contains information about all the IOPs in the
2563 * system (duh) and is used by the Executives on the IOPs to establish
2564 * peer2peer connections. We're not supporting peer2peer at the moment,
2565 * but this will be needed down the road for things like lan2lan forwarding.
2567 static int i2o_build_sys_table(void)
2569 struct i2o_controller *iop = NULL;
2570 struct i2o_controller *niop = NULL;
2573 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2574 (i2o_num_controllers) *
2575 sizeof(struct i2o_sys_tbl_entry);
2580 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL);
2582 printk(KERN_CRIT "SysTab Set failed. Out of memory.\n");
2585 memset((void*)sys_tbl, 0, sys_tbl_len);
2587 sys_tbl->num_entries = i2o_num_controllers;
2588 sys_tbl->version = I2OVERSION; /* TODO: Version 2.0 */
2589 sys_tbl->change_ind = sys_tbl_ind++;
2591 for(iop = i2o_controller_chain; iop; iop = niop)
2596 * Get updated IOP state so we have the latest information
2598 * We should delete the controller at this point if it
2599 * doesn't respond since if it's not on the system table
2600 * it is techninically not part of the I2O subsyßtem...
2602 if(i2o_status_get(iop)) {
2603 printk(KERN_ERR "%s: Deleting b/c could not get status while"
2604 "attempting to build system table\n", iop->name);
2605 i2o_delete_controller(iop);
2606 sys_tbl->num_entries--;
2607 continue; // try the next one
2610 sys_tbl->iops[count].org_id = iop->status_block->org_id;
2611 sys_tbl->iops[count].iop_id = iop->unit + 2;
2612 sys_tbl->iops[count].seg_num = 0;
2613 sys_tbl->iops[count].i2o_version =
2614 iop->status_block->i2o_version;
2615 sys_tbl->iops[count].iop_state =
2616 iop->status_block->iop_state;
2617 sys_tbl->iops[count].msg_type =
2618 iop->status_block->msg_type;
2619 sys_tbl->iops[count].frame_size =
2620 iop->status_block->inbound_frame_size;
2621 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2622 sys_tbl->iops[count].iop_capabilities =
2623 iop->status_block->iop_capabilities;
2624 sys_tbl->iops[count].inbound_low = (u32)iop->post_port;
2625 sys_tbl->iops[count].inbound_high = 0; // FIXME: 64-bit support
2633 table = (u32*)sys_tbl;
2634 for(count = 0; count < (sys_tbl_len >>2); count++)
2635 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", count, table[count]);
2644 * Run time support routines
2648 * Generic "post and forget" helpers. This is less efficient - we do
2649 * a memcpy for example that isnt strictly needed, but for most uses
2650 * this is simply not worth optimising
2653 int i2o_post_this(struct i2o_controller *c, u32 *data, int len)
2657 unsigned long t=jiffies;
2662 m = I2O_POST_READ32(c);
2664 while(m==0xFFFFFFFF && (jiffies-t)<HZ);
2668 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",
2672 msg = (u32 *)(c->msg_virt + m);
2673 memcpy_toio(msg, data, len);
2674 i2o_post_message(c,m);
2679 * i2o_post_wait_mem - I2O query/reply with DMA buffers
2681 * @msg: message to send
2682 * @len: length of message
2683 * @timeout: time in seconds to wait
2684 * @mem1: attached memory buffer 1
2685 * @mem2: attached memory buffer 2
2686 * @phys1: physical address of buffer 1
2687 * @phys2: physical address of buffer 2
2688 * @size1: size of buffer 1
2689 * @size2: size of buffer 2
2691 * This core API allows an OSM to post a message and then be told whether
2692 * or not the system received a successful reply.
2694 * If the message times out then the value '-ETIMEDOUT' is returned. This
2695 * is a special case. In this situation the message may (should) complete
2696 * at an indefinite time in the future. When it completes it will use the
2697 * memory buffers attached to the request. If -ETIMEDOUT is returned then
2698 * the memory buffers must not be freed. Instead the event completion will
2699 * free them for you. In all other cases the buffers are your problem.
2701 * Pass NULL for unneeded buffers.
2704 int i2o_post_wait_mem(struct i2o_controller *c, u32 *msg, int len, int timeout, void *mem1, void *mem2, dma_addr_t phys1, dma_addr_t phys2, int size1, int size2)
2706 DECLARE_WAIT_QUEUE_HEAD(wq_i2o_post);
2707 DECLARE_WAITQUEUE(wait, current);
2710 unsigned long flags = 0;
2711 struct i2o_post_wait_data *wait_data =
2712 kmalloc(sizeof(struct i2o_post_wait_data), GFP_KERNEL);
2718 * Create a new notification object
2720 wait_data->status = &status;
2721 wait_data->complete = &complete;
2722 wait_data->mem[0] = mem1;
2723 wait_data->mem[1] = mem2;
2724 wait_data->phys[0] = phys1;
2725 wait_data->phys[1] = phys2;
2726 wait_data->size[0] = size1;
2727 wait_data->size[1] = size2;
2730 * Queue the event with its unique id
2732 spin_lock_irqsave(&post_wait_lock, flags);
2734 wait_data->next = post_wait_queue;
2735 post_wait_queue = wait_data;
2736 wait_data->id = (++post_wait_id) & 0x7fff;
2737 wait_data->wq = &wq_i2o_post;
2739 spin_unlock_irqrestore(&post_wait_lock, flags);
2742 * Fill in the message id
2745 msg[2] = 0x80000000|(u32)core_context|((u32)wait_data->id<<16);
2748 * Post the message to the controller. At some point later it
2749 * will return. If we time out before it returns then
2750 * complete will be zero. From the point post_this returns
2751 * the wait_data may have been deleted.
2754 add_wait_queue(&wq_i2o_post, &wait);
2755 set_current_state(TASK_INTERRUPTIBLE);
2756 if ((status = i2o_post_this(c, msg, len))==0) {
2757 schedule_timeout(HZ * timeout);
2761 remove_wait_queue(&wq_i2o_post, &wait);
2764 remove_wait_queue(&wq_i2o_post, &wait);
2766 if(signal_pending(current))
2769 spin_lock_irqsave(&post_wait_lock, flags);
2770 barrier(); /* Be sure we see complete as it is locked */
2774 * Mark the entry dead. We cannot remove it. This is important.
2775 * When it does terminate (which it must do if the controller hasnt
2776 * died..) then it will otherwise scribble on stuff.
2777 * !complete lets us safely check if the entry is still
2778 * allocated and thus we can write into it
2780 wait_data->wq = NULL;
2781 status = -ETIMEDOUT;
2785 /* Debugging check - remove me soon */
2786 if(status == -ETIMEDOUT)
2788 printk("TIMEDOUT BUG!\n");
2792 /* And the wait_data is not leaked either! */
2793 spin_unlock_irqrestore(&post_wait_lock, flags);
2798 * i2o_post_wait - I2O query/reply
2800 * @msg: message to send
2801 * @len: length of message
2802 * @timeout: time in seconds to wait
2804 * This core API allows an OSM to post a message and then be told whether
2805 * or not the system received a successful reply.
2808 int i2o_post_wait(struct i2o_controller *c, u32 *msg, int len, int timeout)
2810 return i2o_post_wait_mem(c, msg, len, timeout, NULL, NULL, 0, 0, 0, 0);
2814 * i2o_post_wait is completed and we want to wake up the
2815 * sleeping proccess. Called by core's reply handler.
2818 static void i2o_post_wait_complete(struct i2o_controller *c, u32 context, int status)
2820 struct i2o_post_wait_data **p1, *q;
2821 unsigned long flags;
2824 * We need to search through the post_wait
2825 * queue to see if the given message is still
2826 * outstanding. If not, it means that the IOP
2827 * took longer to respond to the message than we
2828 * had allowed and timer has already expired.
2829 * Not much we can do about that except log
2830 * it for debug purposes, increase timeout, and recompile
2832 * Lock needed to keep anyone from moving queue pointers
2833 * around while we're looking through them.
2836 spin_lock_irqsave(&post_wait_lock, flags);
2838 for(p1 = &post_wait_queue; *p1!=NULL; p1 = &((*p1)->next))
2841 if(q->id == ((context >> 16) & 0x7fff)) {
2854 /* Live entry - wakeup and set status */
2855 *q->status = status;
2862 * Free resources. Caller is dead
2866 pci_free_consistent(c->pdev, q->size[0], q->mem[0], q->phys[0]);
2868 pci_free_consistent(c->pdev, q->size[1], q->mem[1], q->phys[1]);
2870 printk(KERN_WARNING "i2o_post_wait event completed after timeout.\n");
2873 spin_unlock(&post_wait_lock);
2877 spin_unlock(&post_wait_lock);
2879 printk(KERN_DEBUG "i2o_post_wait: Bogus reply!\n");
2882 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
2884 * This function can be used for all UtilParamsGet/Set operations.
2885 * The OperationList is given in oplist-buffer,
2886 * and results are returned in reslist-buffer.
2887 * Note that the minimum sized reslist is 8 bytes and contains
2888 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
2891 int i2o_issue_params(int cmd, struct i2o_controller *iop, int tid,
2892 void *oplist, int oplen, void *reslist, int reslen)
2895 u32 *res32 = (u32*)reslist;
2896 u32 *restmp = (u32*)reslist;
2900 u32 *opmem, *resmem;
2901 dma_addr_t opmem_phys, resmem_phys;
2903 /* Get DMAable memory */
2904 opmem = pci_alloc_consistent(iop->pdev, oplen, &opmem_phys);
2907 memcpy(opmem, oplist, oplen);
2909 resmem = pci_alloc_consistent(iop->pdev, reslen, &resmem_phys);
2912 pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys);
2916 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
2917 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
2920 msg[5] = 0x54000000 | oplen; /* OperationList */
2921 msg[6] = opmem_phys;
2922 msg[7] = 0xD0000000 | reslen; /* ResultList */
2923 msg[8] = resmem_phys;
2925 wait_status = i2o_post_wait_mem(iop, msg, sizeof(msg), 10, opmem, resmem, opmem_phys, resmem_phys, oplen, reslen);
2928 * This only looks like a memory leak - don't "fix" it.
2930 if(wait_status == -ETIMEDOUT)
2933 memcpy(reslist, resmem, reslen);
2934 pci_free_consistent(iop->pdev, reslen, resmem, resmem_phys);
2935 pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys);
2938 if(wait_status != 0)
2941 * Calculate number of bytes of Result LIST
2942 * We need to loop through each Result BLOCK and grab the length
2946 for(i = 0; i < (res32[0]&0X0000FFFF); i++)
2948 if(restmp[0]&0x00FF0000) /* BlockStatus != SUCCESS */
2950 printk(KERN_WARNING "%s - Error:\n ErrorInfoSize = 0x%02x, "
2951 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
2952 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
2954 res32[1]>>24, (res32[1]>>16)&0xFF, res32[1]&0xFFFF);
2957 * If this is the only request,than we return an error
2959 if((res32[0]&0x0000FFFF) == 1)
2961 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
2964 len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
2965 restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
2967 return (len << 2); /* bytes used by result list */
2971 * Query one scalar group value or a whole scalar group.
2973 int i2o_query_scalar(struct i2o_controller *iop, int tid,
2974 int group, int field, void *buf, int buflen)
2976 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
2977 u8 resblk[8+buflen]; /* 8 bytes for header */
2980 if (field == -1) /* whole group */
2983 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, iop, tid,
2984 opblk, sizeof(opblk), resblk, sizeof(resblk));
2986 memcpy(buf, resblk+8, buflen); /* cut off header */
2994 * Set a scalar group value or a whole group.
2996 int i2o_set_scalar(struct i2o_controller *iop, int tid,
2997 int group, int field, void *buf, int buflen)
3000 u8 resblk[8+buflen]; /* 8 bytes for header */
3003 opblk = kmalloc(buflen+64, GFP_KERNEL);
3006 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
3010 opblk[0] = 1; /* operation count */
3011 opblk[1] = 0; /* pad */
3012 opblk[2] = I2O_PARAMS_FIELD_SET;
3015 if(field == -1) { /* whole group */
3017 memcpy(opblk+5, buf, buflen);
3019 else /* single field */
3023 memcpy(opblk+6, buf, buflen);
3026 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
3027 opblk, 12+buflen, resblk, sizeof(resblk));
3036 * if oper == I2O_PARAMS_TABLE_GET, get from all rows
3037 * if fieldcount == -1 return all fields
3038 * ibuf and ibuflen are unused (use NULL, 0)
3039 * else return specific fields
3040 * ibuf contains fieldindexes
3042 * if oper == I2O_PARAMS_LIST_GET, get from specific rows
3043 * if fieldcount == -1 return all fields
3044 * ibuf contains rowcount, keyvalues
3045 * else return specific fields
3046 * fieldcount is # of fieldindexes
3047 * ibuf contains fieldindexes, rowcount, keyvalues
3049 * You could also use directly function i2o_issue_params().
3051 int i2o_query_table(int oper, struct i2o_controller *iop, int tid, int group,
3052 int fieldcount, void *ibuf, int ibuflen,
3053 void *resblk, int reslen)
3058 opblk = kmalloc(10 + ibuflen, GFP_KERNEL);
3061 printk(KERN_ERR "i2o: no memory for query buffer.\n");
3065 opblk[0] = 1; /* operation count */
3066 opblk[1] = 0; /* pad */
3069 opblk[4] = fieldcount;
3070 memcpy(opblk+5, ibuf, ibuflen); /* other params */
3072 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET,iop, tid,
3073 opblk, 10+ibuflen, resblk, reslen);
3082 * Clear table group, i.e. delete all rows.
3084 int i2o_clear_table(struct i2o_controller *iop, int tid, int group)
3086 u16 opblk[] = { 1, 0, I2O_PARAMS_TABLE_CLEAR, group };
3087 u8 resblk[32]; /* min 8 bytes for result header */
3089 return i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
3090 opblk, sizeof(opblk), resblk, sizeof(resblk));
3094 * Add a new row into a table group.
3096 * if fieldcount==-1 then we add whole rows
3097 * buf contains rowcount, keyvalues
3098 * else just specific fields are given, rest use defaults
3099 * buf contains fieldindexes, rowcount, keyvalues
3101 int i2o_row_add_table(struct i2o_controller *iop, int tid,
3102 int group, int fieldcount, void *buf, int buflen)
3105 u8 resblk[32]; /* min 8 bytes for header */
3108 opblk = kmalloc(buflen+64, GFP_KERNEL);
3111 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
3115 opblk[0] = 1; /* operation count */
3116 opblk[1] = 0; /* pad */
3117 opblk[2] = I2O_PARAMS_ROW_ADD;
3119 opblk[4] = fieldcount;
3120 memcpy(opblk+5, buf, buflen);
3122 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
3123 opblk, 10+buflen, resblk, sizeof(resblk));
3133 * Used for error reporting/debugging purposes.
3134 * Following fail status are common to all classes.
3135 * The preserved message must be handled in the reply handler.
3137 void i2o_report_fail_status(u8 req_status, u32* msg)
3139 static char *FAIL_STATUS[] = {
3140 "0x80", /* not used */
3141 "SERVICE_SUSPENDED", /* 0x81 */
3142 "SERVICE_TERMINATED", /* 0x82 */
3150 "INVALID_MSG_FLAGS",
3153 "INVALID_TARGET_ID",
3154 "INVALID_INITIATOR_ID",
3155 "INVALID_INITIATOR_CONTEX", /* 0x8F */
3156 "UNKNOWN_FAILURE" /* 0xFF */
3159 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
3160 printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", req_status);
3162 printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]);
3164 /* Dump some details */
3166 printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n",
3167 (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
3168 printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
3169 (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
3170 printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
3171 msg[5] >> 16, msg[5] & 0xFFF);
3173 printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF);
3174 if (msg[4] & (1<<16))
3175 printk("(FormatError), "
3176 "this msg can never be delivered/processed.\n");
3177 if (msg[4] & (1<<17))
3178 printk("(PathError), "
3179 "this msg can no longer be delivered/processed.\n");
3180 if (msg[4] & (1<<18))
3181 printk("(PathState), "
3182 "the system state does not allow delivery.\n");
3183 if (msg[4] & (1<<19))
3184 printk("(Congestion), resources temporarily not available;"
3185 "do not retry immediately.\n");
3189 * Used for error reporting/debugging purposes.
3190 * Following reply status are common to all classes.
3192 void i2o_report_common_status(u8 req_status)
3194 static char *REPLY_STATUS[] = {
3197 "ABORT_NO_DATA_TRANSFER",
3198 "ABORT_PARTIAL_TRANSFER",
3200 "ERROR_NO_DATA_TRANSFER",
3201 "ERROR_PARTIAL_TRANSFER",
3202 "PROCESS_ABORT_DIRTY",
3203 "PROCESS_ABORT_NO_DATA_TRANSFER",
3204 "PROCESS_ABORT_PARTIAL_TRANSFER",
3205 "TRANSACTION_ERROR",
3209 if (req_status >= ARRAY_SIZE(REPLY_STATUS))
3210 printk("RequestStatus = %0#2x", req_status);
3212 printk("%s", REPLY_STATUS[req_status]);
3216 * Used for error reporting/debugging purposes.
3217 * Following detailed status are valid for executive class,
3218 * utility class, DDM class and for transaction error replies.
3220 static void i2o_report_common_dsc(u16 detailed_status)
3222 static char *COMMON_DSC[] = {
3227 "REPLY_BUFFER_FULL",
3229 "INSUFFICIENT_RESOURCE_SOFT",
3230 "INSUFFICIENT_RESOURCE_HARD",
3232 "CHAIN_BUFFER_TOO_LARGE",
3233 "UNSUPPORTED_FUNCTION",
3236 "INAPPROPRIATE_FUNCTION",
3237 "INVALID_INITIATOR_ADDRESS",
3238 "INVALID_MESSAGE_FLAGS",
3240 "INVALID_PARAMETER",
3242 "INVALID_TARGET_ADDRESS",
3243 "MESSAGE_TOO_LARGE",
3244 "MESSAGE_TOO_SMALL",
3245 "MISSING_PARAMETER",
3249 "UNSUPPORTED_VERSION",
3251 "DEVICE_NOT_AVAILABLE"
3254 if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
3255 printk(" / DetailedStatus = %0#4x.\n", detailed_status);
3257 printk(" / %s.\n", COMMON_DSC[detailed_status]);
3261 * Used for error reporting/debugging purposes
3263 static void i2o_report_lan_dsc(u16 detailed_status)
3265 static char *LAN_DSC[] = { // Lan detailed status code strings
3268 "DESTINATION_NOT_FOUND",
3274 "BAD_PACKET_DETECTED",
3277 "IOP_INTERNAL_ERROR",
3279 "INVALID_TRANSACTION_CONTEXT",
3280 "DEST_ADDRESS_DETECTED",
3281 "DEST_ADDRESS_OMITTED",
3282 "PARTIAL_PACKET_RETURNED",
3283 "TEMP_SUSPENDED_STATE", // last Lan detailed status code
3284 "INVALID_REQUEST" // general detailed status code
3287 if (detailed_status > I2O_DSC_INVALID_REQUEST)
3288 printk(" / %0#4x.\n", detailed_status);
3290 printk(" / %s.\n", LAN_DSC[detailed_status]);
3294 * Used for error reporting/debugging purposes
3296 static void i2o_report_util_cmd(u8 cmd)
3299 case I2O_CMD_UTIL_NOP:
3300 printk("UTIL_NOP, ");
3302 case I2O_CMD_UTIL_ABORT:
3303 printk("UTIL_ABORT, ");
3305 case I2O_CMD_UTIL_CLAIM:
3306 printk("UTIL_CLAIM, ");
3308 case I2O_CMD_UTIL_RELEASE:
3309 printk("UTIL_CLAIM_RELEASE, ");
3311 case I2O_CMD_UTIL_CONFIG_DIALOG:
3312 printk("UTIL_CONFIG_DIALOG, ");
3314 case I2O_CMD_UTIL_DEVICE_RESERVE:
3315 printk("UTIL_DEVICE_RESERVE, ");
3317 case I2O_CMD_UTIL_DEVICE_RELEASE:
3318 printk("UTIL_DEVICE_RELEASE, ");
3320 case I2O_CMD_UTIL_EVT_ACK:
3321 printk("UTIL_EVENT_ACKNOWLEDGE, ");
3323 case I2O_CMD_UTIL_EVT_REGISTER:
3324 printk("UTIL_EVENT_REGISTER, ");
3326 case I2O_CMD_UTIL_LOCK:
3327 printk("UTIL_LOCK, ");
3329 case I2O_CMD_UTIL_LOCK_RELEASE:
3330 printk("UTIL_LOCK_RELEASE, ");
3332 case I2O_CMD_UTIL_PARAMS_GET:
3333 printk("UTIL_PARAMS_GET, ");
3335 case I2O_CMD_UTIL_PARAMS_SET:
3336 printk("UTIL_PARAMS_SET, ");
3338 case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
3339 printk("UTIL_REPLY_FAULT_NOTIFY, ");
3342 printk("Cmd = %0#2x, ",cmd);
3347 * Used for error reporting/debugging purposes
3349 static void i2o_report_exec_cmd(u8 cmd)
3352 case I2O_CMD_ADAPTER_ASSIGN:
3353 printk("EXEC_ADAPTER_ASSIGN, ");
3355 case I2O_CMD_ADAPTER_READ:
3356 printk("EXEC_ADAPTER_READ, ");
3358 case I2O_CMD_ADAPTER_RELEASE:
3359 printk("EXEC_ADAPTER_RELEASE, ");
3361 case I2O_CMD_BIOS_INFO_SET:
3362 printk("EXEC_BIOS_INFO_SET, ");
3364 case I2O_CMD_BOOT_DEVICE_SET:
3365 printk("EXEC_BOOT_DEVICE_SET, ");
3367 case I2O_CMD_CONFIG_VALIDATE:
3368 printk("EXEC_CONFIG_VALIDATE, ");
3370 case I2O_CMD_CONN_SETUP:
3371 printk("EXEC_CONN_SETUP, ");
3373 case I2O_CMD_DDM_DESTROY:
3374 printk("EXEC_DDM_DESTROY, ");
3376 case I2O_CMD_DDM_ENABLE:
3377 printk("EXEC_DDM_ENABLE, ");
3379 case I2O_CMD_DDM_QUIESCE:
3380 printk("EXEC_DDM_QUIESCE, ");
3382 case I2O_CMD_DDM_RESET:
3383 printk("EXEC_DDM_RESET, ");
3385 case I2O_CMD_DDM_SUSPEND:
3386 printk("EXEC_DDM_SUSPEND, ");
3388 case I2O_CMD_DEVICE_ASSIGN:
3389 printk("EXEC_DEVICE_ASSIGN, ");
3391 case I2O_CMD_DEVICE_RELEASE:
3392 printk("EXEC_DEVICE_RELEASE, ");
3394 case I2O_CMD_HRT_GET:
3395 printk("EXEC_HRT_GET, ");
3397 case I2O_CMD_ADAPTER_CLEAR:
3398 printk("EXEC_IOP_CLEAR, ");
3400 case I2O_CMD_ADAPTER_CONNECT:
3401 printk("EXEC_IOP_CONNECT, ");
3403 case I2O_CMD_ADAPTER_RESET:
3404 printk("EXEC_IOP_RESET, ");
3406 case I2O_CMD_LCT_NOTIFY:
3407 printk("EXEC_LCT_NOTIFY, ");
3409 case I2O_CMD_OUTBOUND_INIT:
3410 printk("EXEC_OUTBOUND_INIT, ");
3412 case I2O_CMD_PATH_ENABLE:
3413 printk("EXEC_PATH_ENABLE, ");
3415 case I2O_CMD_PATH_QUIESCE:
3416 printk("EXEC_PATH_QUIESCE, ");
3418 case I2O_CMD_PATH_RESET:
3419 printk("EXEC_PATH_RESET, ");
3421 case I2O_CMD_STATIC_MF_CREATE:
3422 printk("EXEC_STATIC_MF_CREATE, ");
3424 case I2O_CMD_STATIC_MF_RELEASE:
3425 printk("EXEC_STATIC_MF_RELEASE, ");
3427 case I2O_CMD_STATUS_GET:
3428 printk("EXEC_STATUS_GET, ");
3430 case I2O_CMD_SW_DOWNLOAD:
3431 printk("EXEC_SW_DOWNLOAD, ");
3433 case I2O_CMD_SW_UPLOAD:
3434 printk("EXEC_SW_UPLOAD, ");
3436 case I2O_CMD_SW_REMOVE:
3437 printk("EXEC_SW_REMOVE, ");
3439 case I2O_CMD_SYS_ENABLE:
3440 printk("EXEC_SYS_ENABLE, ");
3442 case I2O_CMD_SYS_MODIFY:
3443 printk("EXEC_SYS_MODIFY, ");
3445 case I2O_CMD_SYS_QUIESCE:
3446 printk("EXEC_SYS_QUIESCE, ");
3448 case I2O_CMD_SYS_TAB_SET:
3449 printk("EXEC_SYS_TAB_SET, ");
3452 printk("Cmd = %#02x, ",cmd);
3457 * Used for error reporting/debugging purposes
3459 static void i2o_report_lan_cmd(u8 cmd)
3462 case LAN_PACKET_SEND:
3463 printk("LAN_PACKET_SEND, ");
3466 printk("LAN_SDU_SEND, ");
3468 case LAN_RECEIVE_POST:
3469 printk("LAN_RECEIVE_POST, ");
3472 printk("LAN_RESET, ");
3475 printk("LAN_SUSPEND, ");
3478 printk("Cmd = %0#2x, ",cmd);
3483 * Used for error reporting/debugging purposes.
3484 * Report Cmd name, Request status, Detailed Status.
3486 void i2o_report_status(const char *severity, const char *str, u32 *msg)
3488 u8 cmd = (msg[1]>>24)&0xFF;
3489 u8 req_status = (msg[4]>>24)&0xFF;
3490 u16 detailed_status = msg[4]&0xFFFF;
3491 struct i2o_handler *h = i2o_handlers[msg[2] & (MAX_I2O_MODULES-1)];
3493 if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
3494 return; // No status in this reply
3496 printk("%s%s: ", severity, str);
3498 if (cmd < 0x1F) // Utility cmd
3499 i2o_report_util_cmd(cmd);
3501 else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
3502 i2o_report_exec_cmd(cmd);
3504 else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3505 i2o_report_lan_cmd(cmd); // LAN cmd
3507 printk("Cmd = %0#2x, ", cmd); // Other cmds
3509 if (msg[0] & MSG_FAIL) {
3510 i2o_report_fail_status(req_status, msg);
3514 i2o_report_common_status(req_status);
3516 if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
3517 i2o_report_common_dsc(detailed_status);
3518 else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3519 i2o_report_lan_dsc(detailed_status);
3521 printk(" / DetailedStatus = %0#4x.\n", detailed_status);
3524 /* Used to dump a message to syslog during debugging */
3525 void i2o_dump_message(u32 *msg)
3529 printk(KERN_INFO "Dumping I2O message size %d @ %p\n",
3530 msg[0]>>16&0xffff, msg);
3531 for(i = 0; i < ((msg[0]>>16)&0xffff); i++)
3532 printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]);
3537 * I2O reboot/shutdown notification.
3539 * - Call each OSM's reboot notifier (if one exists)
3540 * - Quiesce each IOP in the system
3542 * Each IOP has to be quiesced before we can ensure that the system
3543 * can be properly shutdown as a transaction that has already been
3544 * acknowledged still needs to be placed in permanent store on the IOP.
3545 * The SysQuiesce causes the IOP to force all HDMs to complete their
3546 * transactions before returning, so only at that point is it safe
3549 static int i2o_reboot_event(struct notifier_block *n, unsigned long code, void
3553 struct i2o_controller *c = NULL;
3555 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
3558 printk(KERN_INFO "Shutting down I2O system.\n");
3560 " This could take a few minutes if there are many devices attached\n");
3562 for(i = 0; i < MAX_I2O_MODULES; i++)
3564 if(i2o_handlers[i] && i2o_handlers[i]->reboot_notify)
3565 i2o_handlers[i]->reboot_notify();
3568 for(c = i2o_controller_chain; c; c = c->next)
3570 if(i2o_quiesce_controller(c))
3572 printk(KERN_WARNING "i2o: Could not quiesce %s.\n"
3573 "Verify setup on next system power up.\n",
3578 printk(KERN_INFO "I2O system down.\n");
3586 * i2o_pci_dispose - Free bus specific resources
3587 * @c: I2O controller
3589 * Disable interrupts and then free interrupt, I/O and mtrr resources
3590 * used by this controller. Called by the I2O core on unload.
3593 static void i2o_pci_dispose(struct i2o_controller *c)
3595 I2O_IRQ_WRITE32(c,0xFFFFFFFF);
3597 free_irq(c->irq, c);
3598 iounmap(c->base_virt);
3600 iounmap(c->msg_virt);
3603 if(c->mtrr_reg0 > 0)
3604 mtrr_del(c->mtrr_reg0, 0, 0);
3605 if(c->mtrr_reg1 > 0)
3606 mtrr_del(c->mtrr_reg1, 0, 0);
3611 * i2o_pci_interrupt - Bus specific interrupt handler
3612 * @irq: interrupt line
3615 * Handle an interrupt from a PCI based I2O controller. This turns out
3616 * to be rather simple. We keep the controller pointer in the cookie.
3619 static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
3621 struct i2o_controller *c = dev_id;
3627 * i2o_pci_install - Install a PCI i2o controller
3628 * @dev: PCI device of the I2O controller
3630 * Install a PCI (or in theory AGP) i2o controller. Devices are
3631 * initialized, configured and registered with the i2o core subsystem. Be
3632 * very careful with ordering. There may be pending interrupts.
3634 * To Do: Add support for polled controllers
3637 int __init i2o_pci_install(struct pci_dev *dev)
3639 struct i2o_controller *c=kmalloc(sizeof(struct i2o_controller),
3643 unsigned long bar0_phys = 0;
3644 unsigned long bar1_phys = 0;
3645 unsigned long bar0_size = 0;
3646 unsigned long bar1_size = 0;
3652 printk(KERN_ERR "i2o: Insufficient memory to add controller.\n");
3655 memset(c, 0, sizeof(*c));
3663 #if BITS_PER_LONG == 64
3664 c->context_list_lock = SPIN_LOCK_UNLOCKED;
3668 * Cards that fall apart if you hit them with large I/O
3672 if(dev->vendor == PCI_VENDOR_ID_NCR && dev->device == 0x0630)
3675 printk(KERN_INFO "I2O: Symbios FC920 workarounds activated.\n");
3678 if(dev->subsystem_vendor == PCI_VENDOR_ID_PROMISE)
3681 printk(KERN_INFO "I2O: Promise workarounds activated.\n");
3685 * Cards that go bananas if you quiesce them before you reset
3689 if(dev->vendor == PCI_VENDOR_ID_DPT) {
3691 if(dev->device == 0xA511)
3697 /* Skip I/O spaces */
3698 if(!(pci_resource_flags(dev, i) & IORESOURCE_IO))
3702 bar0_phys = pci_resource_start(dev, i);
3703 bar0_size = pci_resource_len(dev, i);
3709 bar1_phys = pci_resource_start(dev, i);
3710 bar1_size = pci_resource_len(dev, i);
3718 printk(KERN_ERR "i2o: I2O controller has no memory regions defined.\n");
3724 /* Map the I2O controller */
3726 printk(KERN_INFO "i2o: PCI I2O controller at %08lX size=%ld\n", bar0_phys, bar0_size);
3728 printk(KERN_INFO "i2o: PCI I2O controller\n BAR0 at 0x%08lX size=%ld\n BAR1 at 0x%08lX size=%ld\n", bar0_phys, bar0_size, bar1_phys, bar1_size);
3730 bar0_virt = ioremap(bar0_phys, bar0_size);
3733 printk(KERN_ERR "i2o: Unable to map controller.\n");
3740 bar1_virt = ioremap(bar1_phys, bar1_size);
3743 printk(KERN_ERR "i2o: Unable to map controller.\n");
3749 bar1_virt = bar0_virt;
3750 bar1_phys = bar0_phys;
3751 bar1_size = bar0_size;
3754 c->irq_mask = bar0_virt+0x34;
3755 c->post_port = bar0_virt+0x40;
3756 c->reply_port = bar0_virt+0x44;
3758 c->base_phys = bar0_phys;
3759 c->base_virt = bar0_virt;
3760 c->msg_phys = bar1_phys;
3761 c->msg_virt = bar1_virt;
3764 * Enable Write Combining MTRR for IOP's memory region
3767 c->mtrr_reg0 = mtrr_add(c->base_phys, bar0_size, MTRR_TYPE_WRCOMB, 1);
3769 * If it is an INTEL i960 I/O processor then set the first 64K to
3770 * Uncacheable since the region contains the Messaging unit which
3771 * shouldn't be cached.
3774 if(dev->vendor == PCI_VENDOR_ID_INTEL || dev->vendor == PCI_VENDOR_ID_DPT)
3776 printk(KERN_INFO "I2O: MTRR workaround for Intel i960 processor\n");
3777 c->mtrr_reg1 = mtrr_add(c->base_phys, 65536, MTRR_TYPE_UNCACHABLE, 1);
3780 printk(KERN_INFO "i2o_pci: Error in setting MTRR_TYPE_UNCACHABLE\n");
3781 mtrr_del(c->mtrr_reg0, c->msg_phys, bar1_size);
3786 c->mtrr_reg1 = mtrr_add(c->msg_phys, bar1_size, MTRR_TYPE_WRCOMB, 1);
3790 I2O_IRQ_WRITE32(c,0xFFFFFFFF);
3792 i = i2o_install_controller(c);
3796 printk(KERN_ERR "i2o: Unable to install controller.\n");
3807 i=request_irq(dev->irq, i2o_pci_interrupt, SA_SHIRQ,
3811 printk(KERN_ERR "%s: unable to allocate interrupt %d.\n",
3814 i2o_delete_controller(c);
3822 printk(KERN_INFO "%s: Installed at IRQ%d\n", c->name, dev->irq);
3823 I2O_IRQ_WRITE32(c,0x0);
3829 * i2o_pci_scan - Scan the pci bus for controllers
3831 * Scan the PCI devices on the system looking for any device which is a
3832 * memory of the Intelligent, I2O class. We attempt to set up each such device
3833 * and register it with the core.
3835 * Returns the number of controllers registered
3837 * Note; Do not change this to a hot plug interface. I2O 1.5 itself
3838 * does not support hot plugging.
3841 int __init i2o_pci_scan(void)
3843 struct pci_dev *dev = NULL;
3846 printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
3848 while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL)
3850 if((dev->class>>8)!=PCI_CLASS_INTELLIGENT_I2O &&
3851 (dev->vendor!=PCI_VENDOR_ID_DPT || dev->device!=0xA511))
3854 if((dev->class>>8)==PCI_CLASS_INTELLIGENT_I2O &&
3855 (dev->class&0xFF)>1)
3857 printk(KERN_INFO "i2o: I2O Controller found but does not support I2O 1.5 (skipping).\n");
3860 if (pci_enable_device(dev))
3862 printk(KERN_INFO "i2o: I2O controller on bus %d at %d.\n",
3863 dev->bus->number, dev->devfn);
3864 if(pci_set_dma_mask(dev, 0xffffffff))
3866 printk(KERN_WARNING "I2O controller on bus %d at %d : No suitable DMA available\n", dev->bus->number, dev->devfn);
3869 pci_set_master(dev);
3870 if(i2o_pci_install(dev)==0)
3874 printk(KERN_INFO "i2o: %d I2O controller%s found and installed.\n", count,
3876 return count?count:-ENODEV;
3879 static int i2o_core_init(void)
3881 printk(KERN_INFO "I2O Core - (C) Copyright 1999 Red Hat Software\n");
3882 if (i2o_install_handler(&i2o_core_handler) < 0)
3884 printk(KERN_ERR "i2o_core: Unable to install core handler.\nI2O stack not loaded!");
3888 core_context = i2o_core_handler.context;
3891 * Initialize event handling thread
3894 init_MUTEX_LOCKED(&evt_sem);
3895 evt_pid = kernel_thread(i2o_core_evt, &evt_reply, CLONE_SIGHAND);
3898 printk(KERN_ERR "I2O: Could not create event handler kernel thread\n");
3899 i2o_remove_handler(&i2o_core_handler);
3903 printk(KERN_INFO "I2O: Event thread created as pid %d\n", evt_pid);
3906 if(i2o_num_controllers)
3909 register_reboot_notifier(&i2o_reboot_notifier);
3914 static void i2o_core_exit(void)
3918 unregister_reboot_notifier(&i2o_reboot_notifier);
3920 if(i2o_num_controllers)
3924 * If this is shutdown time, the thread has already been killed
3927 printk("Terminating i2o threads...");
3928 stat = kill_proc(evt_pid, SIGKILL, 1);
3930 printk("waiting...\n");
3931 wait_for_completion(&evt_dead);
3935 i2o_remove_handler(&i2o_core_handler);
3938 module_init(i2o_core_init);
3939 module_exit(i2o_core_exit);
3941 MODULE_PARM(verbose, "i");
3942 MODULE_PARM_DESC(verbose, "Verbose diagnostics");
3944 MODULE_AUTHOR("Red Hat Software");
3945 MODULE_DESCRIPTION("I2O Core");
3946 MODULE_LICENSE("GPL");
3948 EXPORT_SYMBOL(i2o_controller_chain);
3949 EXPORT_SYMBOL(i2o_num_controllers);
3950 EXPORT_SYMBOL(i2o_find_controller);
3951 EXPORT_SYMBOL(i2o_unlock_controller);
3952 EXPORT_SYMBOL(i2o_status_get);
3953 EXPORT_SYMBOL(i2o_install_handler);
3954 EXPORT_SYMBOL(i2o_remove_handler);
3955 EXPORT_SYMBOL(i2o_install_controller);
3956 EXPORT_SYMBOL(i2o_delete_controller);
3957 EXPORT_SYMBOL(i2o_run_queue);
3958 EXPORT_SYMBOL(i2o_claim_device);
3959 EXPORT_SYMBOL(i2o_release_device);
3960 EXPORT_SYMBOL(i2o_device_notify_on);
3961 EXPORT_SYMBOL(i2o_device_notify_off);
3962 EXPORT_SYMBOL(i2o_post_this);
3963 EXPORT_SYMBOL(i2o_post_wait);
3964 EXPORT_SYMBOL(i2o_post_wait_mem);
3965 EXPORT_SYMBOL(i2o_query_scalar);
3966 EXPORT_SYMBOL(i2o_set_scalar);
3967 EXPORT_SYMBOL(i2o_query_table);
3968 EXPORT_SYMBOL(i2o_clear_table);
3969 EXPORT_SYMBOL(i2o_row_add_table);
3970 EXPORT_SYMBOL(i2o_issue_params);
3971 EXPORT_SYMBOL(i2o_event_register);
3972 EXPORT_SYMBOL(i2o_event_ack);
3973 EXPORT_SYMBOL(i2o_report_status);
3974 EXPORT_SYMBOL(i2o_dump_message);
3975 EXPORT_SYMBOL(i2o_get_class_name);
3976 EXPORT_SYMBOL(i2o_context_list_add);
3977 EXPORT_SYMBOL(i2o_context_list_get);
3978 EXPORT_SYMBOL(i2o_context_list_remove);