2 * Core I2O structure management
4 * (C) Copyright 1999-2002 Red Hat Software
6 * Written by Alan Cox, Building Number Three Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * A lot of the I2O message side code from this is taken from the
14 * Red Creek RCPCI45 adapter driver by Red Creek Communications
18 * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
19 * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 * Deepak Saxena <deepak@plexity.net>
21 * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 * Alan Cox <alan@redhat.com>:
23 * Ported to Linux 2.5.
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Minor fixes for 2.6.
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
34 #include <linux/i2o.h>
36 #include <linux/errno.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/smp_lock.h>
42 #include <linux/bitops.h>
43 #include <linux/wait.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/sched.h>
48 #include <asm/semaphore.h>
49 #include <linux/completion.h>
50 #include <linux/workqueue.h>
53 #include <linux/reboot.h>
63 #define dprintk(s, args...) printk(s, ## args)
65 #define dprintk(s, args...)
69 static struct i2o_handler *i2o_handlers[MAX_I2O_MODULES];
72 static struct i2o_controller *i2o_controllers[MAX_I2O_CONTROLLERS];
73 struct i2o_controller *i2o_controller_chain;
74 int i2o_num_controllers;
76 /* Initiator Context for Core message */
77 static int core_context;
79 /* Initialization && shutdown functions */
80 void i2o_sys_init(void);
81 static void i2o_sys_shutdown(void);
82 static int i2o_reset_controller(struct i2o_controller *);
83 static int i2o_reboot_event(struct notifier_block *, unsigned long , void *);
84 static int i2o_online_controller(struct i2o_controller *);
85 static int i2o_init_outbound_q(struct i2o_controller *);
86 static int i2o_post_outbound_messages(struct i2o_controller *);
89 static void i2o_core_reply(struct i2o_handler *, struct i2o_controller *,
90 struct i2o_message *);
92 /* Various helper functions */
93 static int i2o_lct_get(struct i2o_controller *);
94 static int i2o_lct_notify(struct i2o_controller *);
95 static int i2o_hrt_get(struct i2o_controller *);
97 static int i2o_build_sys_table(void);
98 static int i2o_systab_send(struct i2o_controller *c);
100 /* I2O core event handler */
101 static int i2o_core_evt(void *);
103 static int evt_running;
105 /* Dynamic LCT update handler */
106 static int i2o_dyn_lct(void *);
108 void i2o_report_controller_unit(struct i2o_controller *, struct i2o_device *);
110 static void i2o_pci_dispose(struct i2o_controller *c);
113 * I2O System Table. Contains information about
114 * all the IOPs in the system. Used to inform IOPs
115 * about each other's existence.
117 * sys_tbl_ver is the CurrentChangeIndicator that is
118 * used by IOPs to track changes.
120 static struct i2o_sys_tbl *sys_tbl;
121 static int sys_tbl_ind;
122 static int sys_tbl_len;
125 * This spin lock is used to keep a device from being
126 * added and deleted concurrently across CPUs or interrupts.
127 * This can occur when a user creates a device and immediatelly
128 * deletes it before the new_dev_notify() handler is called.
130 static spinlock_t i2o_dev_lock = SPIN_LOCK_UNLOCKED;
133 * Structures and definitions for synchronous message posting.
134 * See i2o_post_wait() for description.
136 struct i2o_post_wait_data
138 int *status; /* Pointer to status block on caller stack */
139 int *complete; /* Pointer to completion flag on caller stack */
140 u32 id; /* Unique identifier */
141 wait_queue_head_t *wq; /* Wake up for caller (NULL for dead) */
142 struct i2o_post_wait_data *next; /* Chain */
143 void *mem[2]; /* Memory blocks to recover on failure path */
144 dma_addr_t phys[2]; /* Physical address of blocks to recover */
145 u32 size[2]; /* Size of blocks to recover */
148 static struct i2o_post_wait_data *post_wait_queue;
149 static u32 post_wait_id; // Unique ID for each post_wait
150 static spinlock_t post_wait_lock = SPIN_LOCK_UNLOCKED;
151 static void i2o_post_wait_complete(struct i2o_controller *, u32, int);
153 /* OSM descriptor handler */
154 static struct i2o_handler i2o_core_handler =
156 (void *)i2o_core_reply,
166 * Used when queueing a reply to be handled later
171 struct i2o_controller *iop;
172 u32 msg[MSG_FRAME_SIZE];
174 static struct reply_info evt_reply;
175 static struct reply_info events[I2O_EVT_Q_LEN];
178 static int evt_q_len;
179 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
182 * I2O configuration spinlock. This isnt a big deal for contention
183 * so we have one only
186 static DECLARE_MUTEX(i2o_configuration_lock);
189 * Event spinlock. Used to keep event queue sane and from
190 * handling multiple events simultaneously.
192 static spinlock_t i2o_evt_lock = SPIN_LOCK_UNLOCKED;
195 * Semaphore used to synchronize event handling thread with
199 static DECLARE_MUTEX(evt_sem);
200 static DECLARE_COMPLETION(evt_dead);
201 static DECLARE_WAIT_QUEUE_HEAD(evt_wait);
203 static struct notifier_block i2o_reboot_notifier =
217 * I2O Core reply handler
219 static void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c,
220 struct i2o_message *m)
224 u32 context = msg[2];
226 if (msg[0] & MSG_FAIL) // Fail bit is set
228 u32 *preserved_msg = (u32*)(c->mem_offset + msg[7]);
230 i2o_report_status(KERN_INFO, "i2o_core", msg);
231 i2o_dump_message(preserved_msg);
233 /* If the failed request needs special treatment,
234 * it should be done here. */
236 /* Release the preserved msg by resubmitting it as a NOP */
238 preserved_msg[0] = cpu_to_le32(THREE_WORD_MSG_SIZE | SGL_OFFSET_0);
239 preserved_msg[1] = cpu_to_le32(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0);
240 preserved_msg[2] = 0;
241 i2o_post_message(c, msg[7]);
243 /* If reply to i2o_post_wait failed, return causes a timeout */
249 i2o_report_status(KERN_INFO, "i2o_core", msg);
252 if(msg[2]&0x80000000) // Post wait message
255 status = (msg[4] & 0xFFFF);
257 status = I2O_POST_WAIT_OK;
259 i2o_post_wait_complete(c, context, status);
263 if(m->function == I2O_CMD_UTIL_EVT_REGISTER)
265 memcpy(events[evt_in].msg, msg, (msg[0]>>16)<<2);
266 events[evt_in].iop = c;
268 spin_lock(&i2o_evt_lock);
269 MODINC(evt_in, I2O_EVT_Q_LEN);
270 if(evt_q_len == I2O_EVT_Q_LEN)
271 MODINC(evt_out, I2O_EVT_Q_LEN);
274 spin_unlock(&i2o_evt_lock);
277 wake_up_interruptible(&evt_wait);
281 if(m->function == I2O_CMD_LCT_NOTIFY)
288 * If this happens, we want to dump the message to the syslog so
289 * it can be sent back to the card manufacturer by the end user
290 * to aid in debugging.
293 printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
294 "Message dumped to syslog\n",
296 i2o_dump_message(msg);
302 * i2o_install_handler - install a message handler
303 * @h: Handler structure
305 * Install an I2O handler - these handle the asynchronous messaging
306 * from the card once it has initialised. If the table of handlers is
307 * full then -ENOSPC is returned. On a success 0 is returned and the
308 * context field is set by the function. The structure is part of the
309 * system from this time onwards. It must not be freed until it has
313 int i2o_install_handler(struct i2o_handler *h)
316 down(&i2o_configuration_lock);
317 for(i=0;i<MAX_I2O_MODULES;i++)
319 if(i2o_handlers[i]==NULL)
323 up(&i2o_configuration_lock);
327 up(&i2o_configuration_lock);
332 * i2o_remove_handler - remove an i2o message handler
335 * Remove a message handler previously installed with i2o_install_handler.
336 * After this function returns the handler object can be freed or re-used
339 int i2o_remove_handler(struct i2o_handler *h)
341 i2o_handlers[h->context]=NULL;
347 * Each I2O controller has a chain of devices on it.
348 * Each device has a pointer to its LCT entry to be used
353 * i2o_install_device - attach a device to a controller
357 * Add a new device to an i2o controller. This can be called from
358 * non interrupt contexts only. It adds the device and marks it as
359 * unclaimed. The device memory becomes part of the kernel and must
360 * be uninstalled before being freed or reused. Zero is returned
364 int i2o_install_device(struct i2o_controller *c, struct i2o_device *d)
368 down(&i2o_configuration_lock);
373 if (c->devices != NULL)
378 for(i = 0; i < I2O_MAX_MANAGERS; i++)
379 d->managers[i] = NULL;
381 up(&i2o_configuration_lock);
385 /* we need this version to call out of i2o_delete_controller */
387 int __i2o_delete_device(struct i2o_device *d)
389 struct i2o_device **p;
392 p=&(d->controller->devices);
395 * Hey we have a driver!
396 * Check to see if the driver wants us to notify it of
397 * device deletion. If it doesn't we assume that it
398 * is unsafe to delete a device with an owner and
403 if(d->owner->dev_del_notify)
405 dprintk(KERN_INFO "Device has owner, notifying\n");
406 d->owner->dev_del_notify(d->controller, d);
410 "Driver \"%s\" did not release device!\n", d->owner->name);
419 * Tell any other users who are talking to this device
420 * that it's going away. We assume that everything works.
422 for(i=0; i < I2O_MAX_MANAGERS; i++)
424 if(d->managers[i] && d->managers[i]->dev_del_notify)
425 d->managers[i]->dev_del_notify(d->controller, d);
441 printk(KERN_ERR "i2o_delete_device: passed invalid device.\n");
446 * i2o_delete_device - remove an i2o device
447 * @d: device to remove
449 * This function unhooks a device from a controller. The device
450 * will not be unhooked if it has an owner who does not wish to free
451 * it, or if the owner lacks a dev_del_notify function. In that case
452 * -EBUSY is returned. On success 0 is returned. Other errors cause
453 * negative errno values to be returned
456 int i2o_delete_device(struct i2o_device *d)
460 down(&i2o_configuration_lock);
466 ret = __i2o_delete_device(d);
468 up(&i2o_configuration_lock);
474 * i2o_install_controller - attach a controller
477 * Add a new controller to the i2o layer. This can be called from
478 * non interrupt contexts only. It adds the controller and marks it as
479 * unused with no devices. If the tables are full or memory allocations
480 * fail then a negative errno code is returned. On success zero is
481 * returned and the controller is bound to the system. The structure
482 * must not be freed or reused until being uninstalled.
485 int i2o_install_controller(struct i2o_controller *c)
488 down(&i2o_configuration_lock);
489 for(i=0;i<MAX_I2O_CONTROLLERS;i++)
491 if(i2o_controllers[i]==NULL)
493 c->dlct = (i2o_lct*)pci_alloc_consistent(c->pdev, 8192, &c->dlct_phys);
496 up(&i2o_configuration_lock);
499 i2o_controllers[i]=c;
501 c->next=i2o_controller_chain;
502 i2o_controller_chain=c;
504 c->page_frame = NULL;
508 c->status_block = NULL;
509 sprintf(c->name, "i2o/iop%d", i);
510 i2o_num_controllers++;
511 init_MUTEX_LOCKED(&c->lct_sem);
512 up(&i2o_configuration_lock);
516 printk(KERN_ERR "No free i2o controller slots.\n");
517 up(&i2o_configuration_lock);
522 * i2o_delete_controller - delete a controller
525 * Remove an i2o controller from the system. If the controller or its
526 * devices are busy then -EBUSY is returned. On a failure a negative
527 * errno code is returned. On success zero is returned.
530 int i2o_delete_controller(struct i2o_controller *c)
532 struct i2o_controller **p;
537 dprintk(KERN_INFO "Deleting controller %s\n", c->name);
540 * Clear event registration as this can cause weird behavior
542 if(c->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
543 i2o_event_register(c, core_context, 0, 0, 0);
545 down(&i2o_configuration_lock);
546 if((users=atomic_read(&c->users)))
548 dprintk(KERN_INFO "I2O: %d users for controller %s\n", users,
550 up(&i2o_configuration_lock);
555 if(__i2o_delete_device(c->devices)<0)
557 /* Shouldnt happen */
558 I2O_IRQ_WRITE32(c, 0xFFFFFFFF);
560 up(&i2o_configuration_lock);
566 * If this is shutdown time, the thread's already been killed
569 stat = kill_proc(c->lct_pid, SIGKILL, 1);
571 int count = 10 * 100;
572 while(c->lct_running && --count) {
573 current->state = TASK_INTERRUPTIBLE;
579 "%s: LCT thread still running!\n",
584 p=&i2o_controller_chain;
590 /* Ask the IOP to switch to RESET state */
591 i2o_reset_controller(c);
597 up(&i2o_configuration_lock);
601 pci_unmap_single(c->pdev, c->page_frame_map, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
602 kfree(c->page_frame);
605 pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys);
607 pci_free_consistent(c->pdev, c->lct->table_size << 2, c->lct, c->lct_phys);
609 pci_free_consistent(c->pdev, sizeof(i2o_status_block), c->status_block, c->status_block_phys);
611 pci_free_consistent(c->pdev, 8192, c->dlct, c->dlct_phys);
613 i2o_controllers[c->unit]=NULL;
614 memcpy(name, c->name, strlen(c->name)+1);
616 dprintk(KERN_INFO "%s: Deleted from controller chain.\n", name);
618 i2o_num_controllers--;
623 up(&i2o_configuration_lock);
624 printk(KERN_ERR "i2o_delete_controller: bad pointer!\n");
629 * i2o_unlock_controller - unlock a controller
630 * @c: controller to unlock
632 * Take a lock on an i2o controller. This prevents it being deleted.
633 * i2o controllers are not refcounted so a deletion of an in use device
634 * will fail, not take affect on the last dereference.
637 void i2o_unlock_controller(struct i2o_controller *c)
639 atomic_dec(&c->users);
643 * i2o_find_controller - return a locked controller
644 * @n: controller number
646 * Returns a pointer to the controller object. The controller is locked
647 * on return. NULL is returned if the controller is not found.
650 struct i2o_controller *i2o_find_controller(int n)
652 struct i2o_controller *c;
654 if(n<0 || n>=MAX_I2O_CONTROLLERS)
657 down(&i2o_configuration_lock);
658 c=i2o_controllers[n];
660 atomic_inc(&c->users);
661 up(&i2o_configuration_lock);
666 * i2o_issue_claim - claim or release a device
668 * @c: controller to claim for
670 * @type: type of claim
672 * Issue I2O UTIL_CLAIM and UTIL_RELEASE messages. The message to be sent
673 * is set by cmd. The tid is the task id of the object to claim and the
674 * type is the claim type (see the i2o standard)
676 * Zero is returned on success.
679 static int i2o_issue_claim(u32 cmd, struct i2o_controller *c, int tid, u32 type)
683 msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
684 msg[1] = cmd << 24 | HOST_TID<<12 | tid;
688 return i2o_post_wait(c, msg, sizeof(msg), 60);
692 * i2o_claim_device - claim a device for use by an OSM
693 * @d: device to claim
694 * @h: handler for this device
696 * Do the leg work to assign a device to a given OSM on Linux. The
697 * kernel updates the internal handler data for the device and then
698 * performs an I2O claim for the device, attempting to claim the
699 * device as primary. If the attempt fails a negative errno code
700 * is returned. On success zero is returned.
703 int i2o_claim_device(struct i2o_device *d, struct i2o_handler *h)
705 down(&i2o_configuration_lock);
707 printk(KERN_INFO "Device claim called, but dev already owned by %s!",
709 up(&i2o_configuration_lock);
714 if(i2o_issue_claim(I2O_CMD_UTIL_CLAIM ,d->controller,d->lct_data.tid,
720 up(&i2o_configuration_lock);
725 * i2o_release_device - release a device that the OSM is using
726 * @d: device to claim
727 * @h: handler for this device
729 * Drop a claim by an OSM on a given I2O device. The handler is cleared
730 * and 0 is returned on success.
732 * AC - some devices seem to want to refuse an unclaim until they have
733 * finished internal processing. It makes sense since you don't want a
734 * new device to go reconfiguring the entire system until you are done.
735 * Thus we are prepared to wait briefly.
738 int i2o_release_device(struct i2o_device *d, struct i2o_handler *h)
743 down(&i2o_configuration_lock);
745 printk(KERN_INFO "Claim release called, but not owned by %s!\n",
747 up(&i2o_configuration_lock);
751 for(tries=0;tries<10;tries++)
756 * If the controller takes a nonblocking approach to
757 * releases we have to sleep/poll for a few times.
760 if((err=i2o_issue_claim(I2O_CMD_UTIL_RELEASE, d->controller, d->lct_data.tid, I2O_CLAIM_PRIMARY)) )
763 current->state = TASK_UNINTERRUPTIBLE;
764 schedule_timeout(HZ);
772 up(&i2o_configuration_lock);
777 * i2o_device_notify_on - Enable deletion notifiers
778 * @d: device for notification
779 * @h: handler to install
781 * Called by OSMs to let the core know that they want to be
782 * notified if the given device is deleted from the system.
785 int i2o_device_notify_on(struct i2o_device *d, struct i2o_handler *h)
789 if(d->num_managers == I2O_MAX_MANAGERS)
792 for(i = 0; i < I2O_MAX_MANAGERS; i++)
807 * i2o_device_notify_off - Remove deletion notifiers
808 * @d: device for notification
809 * @h: handler to remove
811 * Called by OSMs to let the core know that they no longer
812 * are interested in the fate of the given device.
814 int i2o_device_notify_off(struct i2o_device *d, struct i2o_handler *h)
818 for(i=0; i < I2O_MAX_MANAGERS; i++)
820 if(d->managers[i] == h)
822 d->managers[i] = NULL;
832 * i2o_event_register - register interest in an event
833 * @c: Controller to register interest with
835 * @init_context: initiator context to use with this notifier
836 * @tr_context: transaction context to use with this notifier
837 * @evt_mask: mask of events
839 * Create and posts an event registration message to the task. No reply
840 * is waited for, or expected. Errors in posting will be reported.
843 int i2o_event_register(struct i2o_controller *c, u32 tid,
844 u32 init_context, u32 tr_context, u32 evt_mask)
846 u32 msg[5]; // Not performance critical, so we just
847 // i2o_post_this it instead of building it
850 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
851 msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | tid;
852 msg[2] = init_context;
856 return i2o_post_this(c, msg, sizeof(msg));
860 * i2o_event_ack - acknowledge an event
862 * @msg: pointer to the UTIL_EVENT_REGISTER reply we received
864 * We just take a pointer to the original UTIL_EVENT_REGISTER reply
865 * message and change the function code since that's what spec
866 * describes an EventAck message looking like.
869 int i2o_event_ack(struct i2o_controller *c, u32 *msg)
871 struct i2o_message *m = (struct i2o_message *)msg;
873 m->function = I2O_CMD_UTIL_EVT_ACK;
875 return i2o_post_wait(c, msg, m->size * 4, 2);
879 * Core event handler. Runs as a separate thread and is woken
880 * up whenever there is an Executive class event.
882 static int i2o_core_evt(void *reply_data)
884 struct reply_info *reply = (struct reply_info *) reply_data;
885 u32 *msg = reply->msg;
886 struct i2o_controller *c = NULL;
889 daemonize("i2oevtd");
890 allow_signal(SIGKILL);
896 if(down_interruptible(&evt_sem))
898 dprintk(KERN_INFO "I2O event thread dead\n");
899 printk("exiting...");
901 complete_and_exit(&evt_dead, 0);
905 * Copy the data out of the queue so that we don't have to lock
906 * around the whole function and just around the qlen update
908 spin_lock_irqsave(&i2o_evt_lock, flags);
909 memcpy(reply, &events[evt_out], sizeof(struct reply_info));
910 MODINC(evt_out, I2O_EVT_Q_LEN);
912 spin_unlock_irqrestore(&i2o_evt_lock, flags);
915 dprintk(KERN_INFO "I2O IRTOS EVENT: iop%d, event %#10x\n", c->unit, msg[4]);
918 * We do not attempt to delete/quiesce/etc. the controller if
919 * some sort of error indidication occurs. We may want to do
920 * so in the future, but for now we just let the user deal with
921 * it. One reason for this is that what to do with an error
922 * or when to send what ærror is not really agreed on, so
923 * we get errors that may not be fatal but just look like they
924 * are...so let the user deal with it.
928 case I2O_EVT_IND_EXEC_RESOURCE_LIMITS:
929 printk(KERN_ERR "%s: Out of resources\n", c->name);
932 case I2O_EVT_IND_EXEC_POWER_FAIL:
933 printk(KERN_ERR "%s: Power failure\n", c->name);
936 case I2O_EVT_IND_EXEC_HW_FAIL:
944 "Code Execution Exception",
945 "Watchdog Timer Expired"
949 printk(KERN_ERR "%s: Hardware Failure: %s\n",
950 c->name, fail[msg[5]]);
952 printk(KERN_ERR "%s: Unknown Hardware Failure\n", c->name);
959 * - Create a new i2o_device entry
960 * - Inform all interested drivers about this device's existence
962 case I2O_EVT_IND_EXEC_NEW_LCT_ENTRY:
964 struct i2o_device *d = (struct i2o_device *)
965 kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
969 printk(KERN_EMERG "i2oevtd: out of memory\n");
972 memcpy(&d->lct_data, &msg[5], sizeof(i2o_lct_entry));
978 i2o_report_controller_unit(c, d);
979 i2o_install_device(c,d);
981 for(i = 0; i < MAX_I2O_MODULES; i++)
983 if(i2o_handlers[i] &&
984 i2o_handlers[i]->new_dev_notify &&
985 (i2o_handlers[i]->class&d->lct_data.class_id))
987 spin_lock(&i2o_dev_lock);
988 i2o_handlers[i]->new_dev_notify(c,d);
989 spin_unlock(&i2o_dev_lock);
997 * LCT entry for a device has been modified, so update it
1000 case I2O_EVT_IND_EXEC_MODIFIED_LCT:
1002 struct i2o_device *d;
1003 i2o_lct_entry *new_lct = (i2o_lct_entry *)&msg[5];
1005 for(d = c->devices; d; d = d->next)
1007 if(d->lct_data.tid == new_lct->tid)
1009 memcpy(&d->lct_data, new_lct, sizeof(i2o_lct_entry));
1016 case I2O_EVT_IND_CONFIGURATION_FLAG:
1017 printk(KERN_WARNING "%s requires user configuration\n", c->name);
1020 case I2O_EVT_IND_GENERAL_WARNING:
1021 printk(KERN_WARNING "%s: Warning notification received!"
1022 "Check configuration for errors!\n", c->name);
1025 case I2O_EVT_IND_EVT_MASK_MODIFIED:
1026 /* Well I guess that was us hey .. */
1030 printk(KERN_WARNING "%s: No handler for event (0x%08x)\n", c->name, msg[4]);
1039 * Dynamic LCT update. This compares the LCT with the currently
1040 * installed devices to check for device deletions..this needed b/c there
1041 * is no DELETED_LCT_ENTRY EventIndicator for the Executive class so
1042 * we can't just have the event handler do this...annoying
1044 * This is a hole in the spec that will hopefully be fixed someday.
1046 static int i2o_dyn_lct(void *foo)
1048 struct i2o_controller *c = (struct i2o_controller *)foo;
1049 struct i2o_device *d = NULL;
1050 struct i2o_device *d1 = NULL;
1056 daemonize("iop%d_lctd", c->unit);
1057 allow_signal(SIGKILL);
1063 down_interruptible(&c->lct_sem);
1064 if(signal_pending(current))
1066 dprintk(KERN_ERR "%s: LCT thread dead\n", c->name);
1071 entries = c->dlct->table_size;
1075 dprintk(KERN_INFO "%s: Dynamic LCT Update\n",c->name);
1076 dprintk(KERN_INFO "%s: Dynamic LCT contains %d entries\n", c->name, entries);
1080 printk(KERN_INFO "%s: Empty LCT???\n", c->name);
1085 * Loop through all the devices on the IOP looking for their
1086 * LCT data in the LCT. We assume that TIDs are not repeated.
1087 * as that is the only way to really tell. It's been confirmed
1088 * by the IRTOS vendor(s?) that TIDs are not reused until they
1089 * wrap arround(4096), and I doubt a system will up long enough
1090 * to create/delete that many devices.
1092 for(d = c->devices; d; )
1097 for(i = 0; i < entries; i++)
1099 if(d->lct_data.tid == c->dlct->lct_entry[i].tid)
1107 dprintk(KERN_INFO "i2o_core: Deleted device!\n");
1108 spin_lock(&i2o_dev_lock);
1109 i2o_delete_device(d);
1110 spin_unlock(&i2o_dev_lock);
1116 * Tell LCT to renotify us next time there is a change
1121 * Copy new LCT into public LCT
1123 * Possible race if someone is reading LCT while we are copying
1124 * over it. If this happens, we'll fix it then. but I doubt that
1125 * the LCT will get updated often enough or will get read by
1126 * a user often enough to worry.
1128 if(c->lct->table_size < c->dlct->table_size)
1132 c->lct = pci_alloc_consistent(c->pdev, c->dlct->table_size<<2, &phys);
1135 printk(KERN_ERR "%s: No memory for LCT!\n", c->name);
1139 pci_free_consistent(tmp, c->lct->table_size << 2, c->lct, c->lct_phys);
1142 memcpy(c->lct, c->dlct, c->dlct->table_size<<2);
1149 * i2o_run_queue - process pending events on a controller
1150 * @c: controller to process
1152 * This is called by the bus specific driver layer when an interrupt
1153 * or poll of this card interface is desired.
1156 void i2o_run_queue(struct i2o_controller *c)
1158 struct i2o_message *m;
1163 * Old 960 steppings had a bug in the I2O unit that caused
1164 * the queue to appear empty when it wasn't.
1166 if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1167 mv=I2O_REPLY_READ32(c);
1169 while(mv!=0xFFFFFFFF)
1171 struct i2o_handler *i;
1172 /* Map the message from the page frame map to kernel virtual */
1173 /* m=(struct i2o_message *)(mv - (unsigned long)c->page_frame_map + (unsigned long)c->page_frame); */
1174 m=(struct i2o_message *)bus_to_virt(mv);
1178 * Ensure this message is seen coherently but cachably by
1182 pci_dma_sync_single_for_cpu(c->pdev, c->page_frame_map, MSG_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1188 i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)];
1193 printk(KERN_WARNING "I2O: Spurious reply to handler %d\n",
1194 m->initiator_context&(MAX_I2O_MODULES-1));
1196 i2o_flush_reply(c,mv);
1199 /* That 960 bug again... */
1200 if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1201 mv=I2O_REPLY_READ32(c);
1207 * i2o_get_class_name - do i2o class name lookup
1208 * @class: class number
1210 * Return a descriptive string for an i2o class
1213 const char *i2o_get_class_name(int class)
1216 static char *i2o_class_name[] = {
1218 "Device Driver Module",
1223 "Fibre Channel Port",
1224 "Fibre Channel Device",
1228 "Floppy Controller",
1230 "Secondary Bus Port",
1231 "Peer Transport Agent",
1238 case I2O_CLASS_EXECUTIVE:
1242 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1244 case I2O_CLASS_SEQUENTIAL_STORAGE:
1250 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1252 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1254 case I2O_CLASS_SCSI_PERIPHERAL:
1256 case I2O_CLASS_ATE_PORT:
1258 case I2O_CLASS_ATE_PERIPHERAL:
1260 case I2O_CLASS_FLOPPY_CONTROLLER:
1262 case I2O_CLASS_FLOPPY_DEVICE:
1264 case I2O_CLASS_BUS_ADAPTER_PORT:
1266 case I2O_CLASS_PEER_TRANSPORT_AGENT:
1268 case I2O_CLASS_PEER_TRANSPORT:
1272 return i2o_class_name[idx];
1277 * i2o_wait_message - obtain an i2o message from the IOP
1281 * This function waits up to 5 seconds for a message slot to be
1282 * available. If no message is available it prints an error message
1283 * that is expected to be what the message will be used for (eg
1284 * "get_status"). 0xFFFFFFFF is returned on a failure.
1286 * On a success the message is returned. This is the physical page
1287 * frame offset address from the read port. (See the i2o spec)
1290 u32 i2o_wait_message(struct i2o_controller *c, char *why)
1294 while((m=I2O_POST_READ32(c))==0xFFFFFFFF)
1296 if((jiffies-time)>=5*HZ)
1298 dprintk(KERN_ERR "%s: Timeout waiting for message frame to send %s.\n",
1309 * i2o_report_controller_unit - print information about a tid
1313 * Dump an information block associated with a given unit (TID). The
1314 * tables are read and a block of text is output to printk that is
1315 * formatted intended for the user.
1318 void i2o_report_controller_unit(struct i2o_controller *c, struct i2o_device *d)
1323 int unit = d->lct_data.tid;
1328 printk(KERN_INFO "Target ID %d.\n", unit);
1329 if((ret=i2o_query_scalar(c, unit, 0xF100, 3, buf, 16))>=0)
1332 printk(KERN_INFO " Vendor: %s\n", buf);
1334 if((ret=i2o_query_scalar(c, unit, 0xF100, 4, buf, 16))>=0)
1337 printk(KERN_INFO " Device: %s\n", buf);
1339 if(i2o_query_scalar(c, unit, 0xF100, 5, buf, 16)>=0)
1342 printk(KERN_INFO " Description: %s\n", buf);
1344 if((ret=i2o_query_scalar(c, unit, 0xF100, 6, buf, 8))>=0)
1347 printk(KERN_INFO " Rev: %s\n", buf);
1350 printk(KERN_INFO " Class: ");
1351 sprintf(str, "%-21s", i2o_get_class_name(d->lct_data.class_id));
1352 printk("%s\n", str);
1354 printk(KERN_INFO " Subclass: 0x%04X\n", d->lct_data.sub_class);
1355 printk(KERN_INFO " Flags: ");
1357 if(d->lct_data.device_flags&(1<<0))
1358 printk("C"); // ConfigDialog requested
1359 if(d->lct_data.device_flags&(1<<1))
1360 printk("U"); // Multi-user capable
1361 if(!(d->lct_data.device_flags&(1<<4)))
1362 printk("P"); // Peer service enabled!
1363 if(!(d->lct_data.device_flags&(1<<5)))
1364 printk("M"); // Mgmt service enabled!
1371 * Parse the hardware resource table. Right now we print it out
1372 * and don't do a lot with it. We should collate these and then
1373 * interact with the Linux resource allocation block.
1375 * Lets prove we can read it first eh ?
1377 * This is full of endianisms!
1380 static int i2o_parse_hrt(struct i2o_controller *c)
1383 u32 *rows=(u32*)c->hrt;
1393 printk(KERN_ERR "%s: HRT table for controller is too new a version.\n",
1398 count=p[0]|(p[1]<<8);
1401 printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n",
1402 c->name, count, length<<2);
1406 for(i=0;i<count;i++)
1408 printk(KERN_INFO "Adapter %08X: ", rows[0]);
1413 printk("TID %04X:[", state&0xFFF);
1416 printk("H"); /* Hidden */
1419 printk("P"); /* Present */
1421 printk("C"); /* Controlled */
1424 printk("*"); /* Hard */
1431 /* Adapter private bus - easy */
1432 printk("Local bus %d: I/O at 0x%04X Mem 0x%08X",
1433 p[2], d[1]<<8|d[0], *(u32 *)(d+4));
1437 printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X",
1438 p[2], d[2], d[1]<<8|d[0], *(u32 *)(d+4));
1441 case 2: /* EISA bus */
1442 printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1443 p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1446 case 3: /* MCA bus */
1447 printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1448 p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1451 case 4: /* PCI bus */
1452 printk("PCI %d: Bus %d Device %d Function %d",
1453 p[2], d[2], d[1], d[0]);
1456 case 0x80: /* Other */
1458 printk("Unsupported bus type.");
1469 * The logical configuration table tells us what we can talk to
1470 * on the board. Most of the stuff isn't interesting to us.
1473 static int i2o_parse_lct(struct i2o_controller *c)
1478 struct i2o_device *d;
1479 i2o_lct *lct = c->lct;
1482 printk(KERN_ERR "%s: LCT is empty???\n", c->name);
1486 max = lct->table_size;
1490 printk(KERN_INFO "%s: LCT has %d entries.\n", c->name, max);
1492 if(lct->iop_flags&(1<<0))
1493 printk(KERN_WARNING "%s: Configuration dialog desired.\n", c->name);
1497 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1500 printk(KERN_CRIT "i2o_core: Out of memory for I2O device data.\n");
1507 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1510 tid = d->lct_data.tid;
1512 i2o_report_controller_unit(c, d);
1514 i2o_install_device(c, d);
1521 * i2o_quiesce_controller - quiesce controller
1524 * Quiesce an IOP. Causes IOP to make external operation quiescent
1525 * (i2o 'READY' state). Internal operation of the IOP continues normally.
1528 int i2o_quiesce_controller(struct i2o_controller *c)
1535 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
1537 if ((c->status_block->iop_state != ADAPTER_STATE_READY) &&
1538 (c->status_block->iop_state != ADAPTER_STATE_OPERATIONAL))
1543 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1544 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
1547 /* Long timeout needed for quiesce if lots of devices */
1549 if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1550 printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
1553 dprintk(KERN_INFO "%s: Quiesced.\n", c->name);
1555 i2o_status_get(c); // Entered READY state
1560 * i2o_enable_controller - move controller from ready to operational
1563 * Enable IOP. This allows the IOP to resume external operations and
1564 * reverses the effect of a quiesce. In the event of an error a negative
1565 * errno code is returned.
1568 int i2o_enable_controller(struct i2o_controller *c)
1575 /* Enable only allowed on READY state */
1576 if(c->status_block->iop_state != ADAPTER_STATE_READY)
1579 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1580 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
1582 /* How long of a timeout do we need? */
1584 if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1585 printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
1588 dprintk(KERN_INFO "%s: Enabled.\n", c->name);
1590 i2o_status_get(c); // entered OPERATIONAL state
1596 * i2o_clear_controller - clear a controller
1599 * Clear an IOP to HOLD state, ie. terminate external operations, clear all
1600 * input queues and prepare for a system restart. IOP's internal operation
1601 * continues normally and the outbound queue is alive.
1602 * The IOP is not expected to rebuild its LCT.
1605 int i2o_clear_controller(struct i2o_controller *c)
1607 struct i2o_controller *iop;
1611 /* Quiesce all IOPs first */
1613 for (iop = i2o_controller_chain; iop; iop = iop->next)
1614 i2o_quiesce_controller(iop);
1616 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1617 msg[1]=I2O_CMD_ADAPTER_CLEAR<<24|HOST_TID<<12|ADAPTER_TID;
1620 if ((ret=i2o_post_wait(c, msg, sizeof(msg), 30)))
1621 printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
1624 dprintk(KERN_INFO "%s: Cleared.\n",c->name);
1628 /* Enable other IOPs */
1630 for (iop = i2o_controller_chain; iop; iop = iop->next)
1632 i2o_enable_controller(iop);
1639 * i2o_reset_controller - reset an IOP
1640 * @c: controller to reset
1642 * Reset the IOP into INIT state and wait until IOP gets into RESET state.
1643 * Terminate all external operations, clear IOP's inbound and outbound
1644 * queues, terminate all DDMs, and reload the IOP's operating environment
1645 * and all local DDMs. The IOP rebuilds its LCT.
1648 static int i2o_reset_controller(struct i2o_controller *c)
1650 struct i2o_controller *iop;
1653 dma_addr_t status_phys;
1657 /* Quiesce all IOPs first */
1659 for (iop = i2o_controller_chain; iop; iop = iop->next)
1662 i2o_quiesce_controller(iop);
1665 m=i2o_wait_message(c, "AdapterReset");
1668 msg=(u32 *)(c->mem_offset+m);
1670 status = pci_alloc_consistent(c->pdev, 4, &status_phys);
1671 if(status == NULL) {
1672 printk(KERN_ERR "IOP reset failed - no free memory.\n");
1675 memset(status, 0, 4);
1677 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1678 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1679 msg[2]=core_context;
1684 msg[7]=0; /* 64bit host FIXME */
1686 i2o_post_message(c,m);
1688 /* Wait for a reply */
1692 if((jiffies-time)>=20*HZ)
1694 printk(KERN_ERR "IOP reset timeout.\n");
1695 /* The controller still may respond and overwrite
1696 * status_phys, LEAK it to prevent memory corruption.
1704 if (*status==I2O_CMD_IN_PROGRESS)
1707 * Once the reset is sent, the IOP goes into the INIT state
1708 * which is indeterminate. We need to wait until the IOP
1709 * has rebooted before we can let the system talk to
1710 * it. We read the inbound Free_List until a message is
1711 * available. If we can't read one in the given ammount of
1712 * time, we assume the IOP could not reboot properly.
1715 dprintk(KERN_INFO "%s: Reset in progress, waiting for reboot...\n",
1719 m = I2O_POST_READ32(c);
1720 while(m == 0XFFFFFFFF)
1722 if((jiffies-time) >= 30*HZ)
1724 printk(KERN_ERR "%s: Timeout waiting for IOP reset.\n",
1726 /* The controller still may respond and
1727 * overwrite status_phys, LEAK it to prevent
1728 * memory corruption.
1734 m = I2O_POST_READ32(c);
1736 i2o_flush_reply(c,m);
1739 /* If IopReset was rejected or didn't perform reset, try IopClear */
1742 if (status[0] == I2O_CMD_REJECTED ||
1743 c->status_block->iop_state != ADAPTER_STATE_RESET)
1745 printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",c->name);
1746 i2o_clear_controller(c);
1749 dprintk(KERN_INFO "%s: Reset completed.\n", c->name);
1751 /* Enable other IOPs */
1753 for (iop = i2o_controller_chain; iop; iop = iop->next)
1755 i2o_enable_controller(iop);
1757 pci_free_consistent(c->pdev, 4, status, status_phys);
1763 * i2o_status_get - get the status block for the IOP
1766 * Issue a status query on the controller. This updates the
1767 * attached status_block. If the controller fails to reply or an
1768 * error occurs then a negative errno code is returned. On success
1769 * zero is returned and the status_blok is updated.
1772 int i2o_status_get(struct i2o_controller *c)
1779 if (c->status_block == NULL)
1781 c->status_block = (i2o_status_block *)
1782 pci_alloc_consistent(c->pdev, sizeof(i2o_status_block), &c->status_block_phys);
1783 if (c->status_block == NULL)
1785 printk(KERN_CRIT "%s: Get Status Block failed; Out of memory.\n",
1791 status_block = (u8*)c->status_block;
1792 memset(c->status_block,0,sizeof(i2o_status_block));
1794 m=i2o_wait_message(c, "StatusGet");
1797 msg=(u32 *)(c->mem_offset+m);
1799 msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_0;
1800 msg[1]=I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID;
1801 msg[2]=core_context;
1805 msg[6]=c->status_block_phys;
1806 msg[7]=0; /* 64bit host FIXME */
1807 msg[8]=sizeof(i2o_status_block); /* always 88 bytes */
1809 i2o_post_message(c,m);
1811 /* Wait for a reply */
1814 while(status_block[87]!=0xFF)
1816 if((jiffies-time)>=5*HZ)
1818 printk(KERN_ERR "%s: Get status timeout.\n",c->name);
1826 printk(KERN_INFO "%s: State = ", c->name);
1827 switch (c->status_block->iop_state) {
1841 printk("OPERATIONAL\n");
1847 printk("FAULTED\n");
1850 printk("%x (unknown !!)\n",c->status_block->iop_state);
1858 * Get the Hardware Resource Table for the device.
1859 * The HRT contains information about possible hidden devices
1860 * but is mostly useless to us
1862 int i2o_hrt_get(struct i2o_controller *c)
1865 int ret, size = sizeof(i2o_hrt);
1866 int loops = 3; /* we only try 3 times to get the HRT, this should be
1867 more then enough. Worst case should be 2 times.*/
1869 /* First read just the header to figure out the real size */
1872 /* first we allocate the memory for the HRT */
1873 if (c->hrt == NULL) {
1874 c->hrt=pci_alloc_consistent(c->pdev, size, &c->hrt_phys);
1875 if (c->hrt == NULL) {
1876 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", c->name);
1882 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
1883 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
1885 msg[4]= (0xD0000000 | c->hrt_len); /* Simple transaction */
1886 msg[5]= c->hrt_phys; /* Dump it here */
1888 ret = i2o_post_wait_mem(c, msg, sizeof(msg), 20, c->hrt, NULL, c->hrt_phys, 0, c->hrt_len, 0);
1890 if(ret == -ETIMEDOUT)
1892 /* The HRT block we used is in limbo somewhere. When the iop wakes up
1893 we will recover it */
1901 printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
1906 if (c->hrt->num_entries * c->hrt->entry_len << 2 > c->hrt_len) {
1907 size = c->hrt->num_entries * c->hrt->entry_len << 2;
1908 pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys);
1913 } while (c->hrt == NULL && loops > 0);
1917 printk(KERN_ERR "%s: Unable to get HRT after three tries, giving up\n", c->name);
1921 i2o_parse_hrt(c); // just for debugging
1927 * Send the I2O System Table to the specified IOP
1929 * The system table contains information about all the IOPs in the
1930 * system. It is build and then sent to each IOP so that IOPs can
1931 * establish connections between each other.
1934 static int i2o_systab_send(struct i2o_controller *iop)
1937 dma_addr_t sys_tbl_phys;
1939 struct resource *root;
1940 u32 *privbuf = kmalloc(16, GFP_KERNEL);
1945 if(iop->status_block->current_mem_size < iop->status_block->desired_mem_size)
1947 struct resource *res = &iop->mem_resource;
1948 res->name = iop->pdev->bus->name;
1949 res->flags = IORESOURCE_MEM;
1952 printk("%s: requires private memory resources.\n", iop->name);
1953 root = pci_find_parent_resource(iop->pdev, res);
1955 printk("Can't find parent resource!\n");
1956 if(root && allocate_resource(root, res,
1957 iop->status_block->desired_mem_size,
1958 iop->status_block->desired_mem_size,
1959 iop->status_block->desired_mem_size,
1960 1<<20, /* Unspecified, so use 1Mb and play safe */
1965 iop->status_block->current_mem_size = 1 + res->end - res->start;
1966 iop->status_block->current_mem_base = res->start;
1967 printk(KERN_INFO "%s: allocated %ld bytes of PCI memory at 0x%08lX.\n",
1968 iop->name, 1+res->end-res->start, res->start);
1971 if(iop->status_block->current_io_size < iop->status_block->desired_io_size)
1973 struct resource *res = &iop->io_resource;
1974 res->name = iop->pdev->bus->name;
1975 res->flags = IORESOURCE_IO;
1978 printk("%s: requires private memory resources.\n", iop->name);
1979 root = pci_find_parent_resource(iop->pdev, res);
1981 printk("Can't find parent resource!\n");
1982 if(root && allocate_resource(root, res,
1983 iop->status_block->desired_io_size,
1984 iop->status_block->desired_io_size,
1985 iop->status_block->desired_io_size,
1986 1<<20, /* Unspecified, so use 1Mb and play safe */
1991 iop->status_block->current_io_size = 1 + res->end - res->start;
1992 iop->status_block->current_mem_base = res->start;
1993 printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at 0x%08lX.\n",
1994 iop->name, 1+res->end-res->start, res->start);
1999 privbuf[0] = iop->status_block->current_mem_base;
2000 privbuf[1] = iop->status_block->current_mem_size;
2001 privbuf[2] = iop->status_block->current_io_base;
2002 privbuf[3] = iop->status_block->current_io_size;
2005 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
2006 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
2008 msg[4] = (0<<16) | ((iop->unit+2) ); /* Host 0 IOP ID (unit + 2) */
2009 msg[5] = 0; /* Segment 0 */
2012 * Provide three SGL-elements:
2013 * System table (SysTab), Private memory space declaration and
2014 * Private i/o space declaration
2016 * Nasty one here. We can't use pci_alloc_consistent to send the
2017 * same table to everyone. We have to go remap it for them all
2020 sys_tbl_phys = pci_map_single(iop->pdev, sys_tbl, sys_tbl_len, PCI_DMA_TODEVICE);
2021 msg[6] = 0x54000000 | sys_tbl_phys;
2023 msg[7] = sys_tbl_phys;
2024 msg[8] = 0x54000000 | privbuf[1];
2025 msg[9] = privbuf[0];
2026 msg[10] = 0xD4000000 | privbuf[3];
2027 msg[11] = privbuf[2];
2029 ret=i2o_post_wait(iop, msg, sizeof(msg), 120);
2031 pci_unmap_single(iop->pdev, sys_tbl_phys, sys_tbl_len, PCI_DMA_TODEVICE);
2035 printk(KERN_ERR "%s: SysTab setup timed out.\n", iop->name);
2039 printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n",
2044 dprintk(KERN_INFO "%s: SysTab set.\n", iop->name);
2046 i2o_status_get(iop); // Entered READY state
2054 * Initialize I2O subsystem.
2056 void __init i2o_sys_init(void)
2058 struct i2o_controller *iop, *niop = NULL;
2060 printk(KERN_INFO "Activating I2O controllers...\n");
2061 printk(KERN_INFO "This may take a few minutes if there are many devices\n");
2063 /* In INIT state, Activate IOPs */
2064 for (iop = i2o_controller_chain; iop; iop = niop) {
2065 dprintk(KERN_INFO "Calling i2o_activate_controller for %s...\n",
2068 if (i2o_activate_controller(iop) < 0)
2069 i2o_delete_controller(iop);
2072 /* Active IOPs in HOLD state */
2075 if (i2o_controller_chain == NULL)
2079 * If build_sys_table fails, we kill everything and bail
2080 * as we can't init the IOPs w/o a system table
2082 dprintk(KERN_INFO "i2o_core: Calling i2o_build_sys_table...\n");
2083 if (i2o_build_sys_table() < 0) {
2088 /* If IOP don't get online, we need to rebuild the System table */
2089 for (iop = i2o_controller_chain; iop; iop = niop) {
2091 dprintk(KERN_INFO "Calling i2o_online_controller for %s...\n", iop->name);
2092 if (i2o_online_controller(iop) < 0) {
2093 i2o_delete_controller(iop);
2094 goto rebuild_sys_tab;
2098 /* Active IOPs now in OPERATIONAL state */
2101 * Register for status updates from all IOPs
2103 for(iop = i2o_controller_chain; iop; iop=iop->next) {
2105 /* Create a kernel thread to deal with dynamic LCT updates */
2106 iop->lct_pid = kernel_thread(i2o_dyn_lct, iop, CLONE_SIGHAND);
2108 /* Update change ind on DLCT */
2109 iop->dlct->change_ind = iop->lct->change_ind;
2111 /* Start dynamic LCT updates */
2112 i2o_lct_notify(iop);
2114 /* Register for all events from IRTOS */
2115 i2o_event_register(iop, core_context, 0, 0, 0xFFFFFFFF);
2120 * i2o_sys_shutdown - shutdown I2O system
2122 * Bring down each i2o controller and then return. Each controller
2123 * is taken through an orderly shutdown
2126 static void i2o_sys_shutdown(void)
2128 struct i2o_controller *iop, *niop;
2130 /* Delete all IOPs from the controller chain */
2131 /* that will reset all IOPs too */
2133 for (iop = i2o_controller_chain; iop; iop = niop) {
2135 i2o_delete_controller(iop);
2140 * i2o_activate_controller - bring controller up to HOLD
2143 * This function brings an I2O controller into HOLD state. The adapter
2144 * is reset if necessary and then the queues and resource table
2145 * are read. -1 is returned on a failure, 0 on success.
2149 int i2o_activate_controller(struct i2o_controller *iop)
2151 /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
2152 /* In READY state, Get status */
2154 if (i2o_status_get(iop) < 0) {
2155 printk(KERN_INFO "Unable to obtain status of %s, "
2156 "attempting a reset.\n", iop->name);
2157 if (i2o_reset_controller(iop) < 0)
2161 if(iop->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2162 printk(KERN_CRIT "%s: hardware fault\n", iop->name);
2166 if (iop->status_block->i2o_version > I2OVER15) {
2167 printk(KERN_ERR "%s: Not running vrs. 1.5. of the I2O Specification.\n",
2172 if (iop->status_block->iop_state == ADAPTER_STATE_READY ||
2173 iop->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2174 iop->status_block->iop_state == ADAPTER_STATE_HOLD ||
2175 iop->status_block->iop_state == ADAPTER_STATE_FAILED)
2177 dprintk(KERN_INFO "%s: Already running, trying to reset...\n",
2179 if (i2o_reset_controller(iop) < 0)
2183 if (i2o_init_outbound_q(iop) < 0)
2186 if (i2o_post_outbound_messages(iop))
2191 if (i2o_hrt_get(iop) < 0)
2199 * i2o_init_outbound_queue - setup the outbound queue
2202 * Clear and (re)initialize IOP's outbound queue. Returns 0 on
2203 * success or a negative errno code on a failure.
2206 int i2o_init_outbound_q(struct i2o_controller *c)
2209 dma_addr_t status_phys;
2214 dprintk(KERN_INFO "%s: Initializing Outbound Queue...\n", c->name);
2215 m=i2o_wait_message(c, "OutboundInit");
2218 msg=(u32 *)(c->mem_offset+m);
2220 status = pci_alloc_consistent(c->pdev, 4, &status_phys);
2222 printk(KERN_ERR "%s: Outbound Queue initialization failed - no free memory.\n",
2226 memset(status, 0, 4);
2228 msg[0]= EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6;
2229 msg[1]= I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID;
2230 msg[2]= core_context;
2231 msg[3]= 0x0106; /* Transaction context */
2232 msg[4]= 4096; /* Host page frame size */
2233 /* Frame size is in words. 256 bytes a frame for now */
2234 msg[5]= MSG_FRAME_SIZE<<16|0x80; /* Outbound msg frame size in words and Initcode */
2235 msg[6]= 0xD0000004; /* Simple SG LE, EOB */
2236 msg[7]= status_phys;
2238 i2o_post_message(c,m);
2242 while(status[0] < I2O_CMD_REJECTED)
2244 if((jiffies-time)>=30*HZ)
2247 printk(KERN_ERR "%s: Ignored queue initialize request.\n",
2250 printk(KERN_ERR "%s: Outbound queue initialize timeout.\n",
2252 pci_free_consistent(c->pdev, 4, status, status_phys);
2259 if(status[0] != I2O_CMD_COMPLETED)
2261 printk(KERN_ERR "%s: IOP outbound initialise failed.\n", c->name);
2262 pci_free_consistent(c->pdev, 4, status, status_phys);
2265 pci_free_consistent(c->pdev, 4, status, status_phys);
2270 * i2o_post_outbound_messages - fill message queue
2273 * Allocate a message frame and load the messages into the IOP. The
2274 * function returns zero on success or a negative errno code on
2278 int i2o_post_outbound_messages(struct i2o_controller *c)
2282 /* Alloc space for IOP's outbound queue message frames */
2284 c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL);
2285 if(c->page_frame==NULL) {
2286 printk(KERN_ERR "%s: Outbound Q initialize failed; out of memory.\n",
2291 c->page_frame_map = pci_map_single(c->pdev, c->page_frame, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
2293 if(c->page_frame_map == 0)
2295 kfree(c->page_frame);
2296 printk(KERN_ERR "%s: Unable to map outbound queue.\n", c->name);
2300 m = c->page_frame_map;
2304 for(i=0; i< NMBR_MSG_FRAMES; i++) {
2305 I2O_REPLY_WRITE32(c,m);
2307 m += (MSG_FRAME_SIZE << 2);
2314 * Get the IOP's Logical Configuration Table
2316 int i2o_lct_get(struct i2o_controller *c)
2319 int ret, size = c->status_block->expected_lct_size;
2322 if (c->lct == NULL) {
2323 c->lct = pci_alloc_consistent(c->pdev, size, &c->lct_phys);
2324 if(c->lct == NULL) {
2325 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2330 memset(c->lct, 0, size);
2332 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2333 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2334 /* msg[2] filled in i2o_post_wait */
2336 msg[4] = 0xFFFFFFFF; /* All devices */
2337 msg[5] = 0x00000000; /* Report now */
2338 msg[6] = 0xD0000000|size;
2339 msg[7] = c->lct_phys;
2341 ret=i2o_post_wait_mem(c, msg, sizeof(msg), 120, c->lct, NULL, c->lct_phys, 0, size, 0);
2343 if(ret == -ETIMEDOUT)
2351 printk(KERN_ERR "%s: LCT Get failed (status=%#x.\n",
2356 if (c->lct->table_size << 2 > size) {
2357 int new_size = c->lct->table_size << 2;
2358 pci_free_consistent(c->pdev, size, c->lct, c->lct_phys);
2362 } while (c->lct == NULL);
2364 if ((ret=i2o_parse_lct(c)) < 0)
2371 * Like above, but used for async notification. The main
2372 * difference is that we keep track of the CurrentChangeIndiicator
2373 * so that we only get updates when it actually changes.
2376 int i2o_lct_notify(struct i2o_controller *c)
2380 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2381 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2382 msg[2] = core_context;
2383 msg[3] = 0xDEADBEEF;
2384 msg[4] = 0xFFFFFFFF; /* All devices */
2385 msg[5] = c->dlct->change_ind+1; /* Next change */
2386 msg[6] = 0xD0000000|8192;
2387 msg[7] = c->dlct_phys;
2389 return i2o_post_this(c, msg, sizeof(msg));
2393 * Bring a controller online into OPERATIONAL state.
2396 int i2o_online_controller(struct i2o_controller *iop)
2400 if (i2o_systab_send(iop) < 0)
2403 /* In READY state */
2405 dprintk(KERN_INFO "%s: Attempting to enable...\n", iop->name);
2406 if (i2o_enable_controller(iop) < 0)
2409 /* In OPERATIONAL state */
2411 dprintk(KERN_INFO "%s: Attempting to get/parse lct...\n", iop->name);
2412 if (i2o_lct_get(iop) < 0)
2415 /* Check battery status */
2418 if(i2o_query_scalar(iop, ADAPTER_TID, 0x0000, 4, &v, 4)>=0)
2428 * Build system table
2430 * The system table contains information about all the IOPs in the
2431 * system (duh) and is used by the Executives on the IOPs to establish
2432 * peer2peer connections. We're not supporting peer2peer at the moment,
2433 * but this will be needed down the road for things like lan2lan forwarding.
2435 static int i2o_build_sys_table(void)
2437 struct i2o_controller *iop = NULL;
2438 struct i2o_controller *niop = NULL;
2441 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2442 (i2o_num_controllers) *
2443 sizeof(struct i2o_sys_tbl_entry);
2448 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL);
2450 printk(KERN_CRIT "SysTab Set failed. Out of memory.\n");
2453 memset((void*)sys_tbl, 0, sys_tbl_len);
2455 sys_tbl->num_entries = i2o_num_controllers;
2456 sys_tbl->version = I2OVERSION; /* TODO: Version 2.0 */
2457 sys_tbl->change_ind = sys_tbl_ind++;
2459 for(iop = i2o_controller_chain; iop; iop = niop)
2464 * Get updated IOP state so we have the latest information
2466 * We should delete the controller at this point if it
2467 * doesn't respond since if it's not on the system table
2468 * it is techninically not part of the I2O subsyßtem...
2470 if(i2o_status_get(iop)) {
2471 printk(KERN_ERR "%s: Deleting b/c could not get status while"
2472 "attempting to build system table\n", iop->name);
2473 i2o_delete_controller(iop);
2474 sys_tbl->num_entries--;
2475 continue; // try the next one
2478 sys_tbl->iops[count].org_id = iop->status_block->org_id;
2479 sys_tbl->iops[count].iop_id = iop->unit + 2;
2480 sys_tbl->iops[count].seg_num = 0;
2481 sys_tbl->iops[count].i2o_version =
2482 iop->status_block->i2o_version;
2483 sys_tbl->iops[count].iop_state =
2484 iop->status_block->iop_state;
2485 sys_tbl->iops[count].msg_type =
2486 iop->status_block->msg_type;
2487 sys_tbl->iops[count].frame_size =
2488 iop->status_block->inbound_frame_size;
2489 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2490 sys_tbl->iops[count].iop_capabilities =
2491 iop->status_block->iop_capabilities;
2492 sys_tbl->iops[count].inbound_low = iop->post_port;
2493 sys_tbl->iops[count].inbound_high = 0; // FIXME: 64-bit support
2501 table = (u32*)sys_tbl;
2502 for(count = 0; count < (sys_tbl_len >>2); count++)
2503 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", count, table[count]);
2512 * Run time support routines
2516 * Generic "post and forget" helpers. This is less efficient - we do
2517 * a memcpy for example that isnt strictly needed, but for most uses
2518 * this is simply not worth optimising
2521 int i2o_post_this(struct i2o_controller *c, u32 *data, int len)
2525 unsigned long t=jiffies;
2530 m = I2O_POST_READ32(c);
2532 while(m==0xFFFFFFFF && (jiffies-t)<HZ);
2536 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",
2540 msg = (u32 *)(c->mem_offset + m);
2541 memcpy_toio(msg, data, len);
2542 i2o_post_message(c,m);
2547 * i2o_post_wait_mem - I2O query/reply with DMA buffers
2549 * @msg: message to send
2550 * @len: length of message
2551 * @timeout: time in seconds to wait
2552 * @mem1: attached memory buffer 1
2553 * @mem2: attached memory buffer 2
2554 * @phys1: physical address of buffer 1
2555 * @phys2: physical address of buffer 2
2556 * @size1: size of buffer 1
2557 * @size2: size of buffer 2
2559 * This core API allows an OSM to post a message and then be told whether
2560 * or not the system received a successful reply.
2562 * If the message times out then the value '-ETIMEDOUT' is returned. This
2563 * is a special case. In this situation the message may (should) complete
2564 * at an indefinite time in the future. When it completes it will use the
2565 * memory buffers attached to the request. If -ETIMEDOUT is returned then
2566 * the memory buffers must not be freed. Instead the event completion will
2567 * free them for you. In all other cases the buffers are your problem.
2569 * Pass NULL for unneeded buffers.
2572 int i2o_post_wait_mem(struct i2o_controller *c, u32 *msg, int len, int timeout, void *mem1, void *mem2, dma_addr_t phys1, dma_addr_t phys2, int size1, int size2)
2574 DECLARE_WAIT_QUEUE_HEAD(wq_i2o_post);
2575 DECLARE_WAITQUEUE(wait, current);
2578 unsigned long flags = 0;
2579 struct i2o_post_wait_data *wait_data =
2580 kmalloc(sizeof(struct i2o_post_wait_data), GFP_KERNEL);
2586 * Create a new notification object
2588 wait_data->status = &status;
2589 wait_data->complete = &complete;
2590 wait_data->mem[0] = mem1;
2591 wait_data->mem[1] = mem2;
2592 wait_data->phys[0] = phys1;
2593 wait_data->phys[1] = phys2;
2594 wait_data->size[0] = size1;
2595 wait_data->size[1] = size2;
2598 * Queue the event with its unique id
2600 spin_lock_irqsave(&post_wait_lock, flags);
2602 wait_data->next = post_wait_queue;
2603 post_wait_queue = wait_data;
2604 wait_data->id = (++post_wait_id) & 0x7fff;
2605 wait_data->wq = &wq_i2o_post;
2607 spin_unlock_irqrestore(&post_wait_lock, flags);
2610 * Fill in the message id
2613 msg[2] = 0x80000000|(u32)core_context|((u32)wait_data->id<<16);
2616 * Post the message to the controller. At some point later it
2617 * will return. If we time out before it returns then
2618 * complete will be zero. From the point post_this returns
2619 * the wait_data may have been deleted.
2622 add_wait_queue(&wq_i2o_post, &wait);
2623 set_current_state(TASK_INTERRUPTIBLE);
2624 if ((status = i2o_post_this(c, msg, len))==0) {
2625 schedule_timeout(HZ * timeout);
2629 remove_wait_queue(&wq_i2o_post, &wait);
2632 remove_wait_queue(&wq_i2o_post, &wait);
2634 if(signal_pending(current))
2637 spin_lock_irqsave(&post_wait_lock, flags);
2638 barrier(); /* Be sure we see complete as it is locked */
2642 * Mark the entry dead. We cannot remove it. This is important.
2643 * When it does terminate (which it must do if the controller hasnt
2644 * died..) then it will otherwise scribble on stuff.
2645 * !complete lets us safely check if the entry is still
2646 * allocated and thus we can write into it
2648 wait_data->wq = NULL;
2649 status = -ETIMEDOUT;
2653 /* Debugging check - remove me soon */
2654 if(status == -ETIMEDOUT)
2656 printk("TIMEDOUT BUG!\n");
2660 /* And the wait_data is not leaked either! */
2661 spin_unlock_irqrestore(&post_wait_lock, flags);
2666 * i2o_post_wait - I2O query/reply
2668 * @msg: message to send
2669 * @len: length of message
2670 * @timeout: time in seconds to wait
2672 * This core API allows an OSM to post a message and then be told whether
2673 * or not the system received a successful reply.
2676 int i2o_post_wait(struct i2o_controller *c, u32 *msg, int len, int timeout)
2678 return i2o_post_wait_mem(c, msg, len, timeout, NULL, NULL, 0, 0, 0, 0);
2682 * i2o_post_wait is completed and we want to wake up the
2683 * sleeping proccess. Called by core's reply handler.
2686 static void i2o_post_wait_complete(struct i2o_controller *c, u32 context, int status)
2688 struct i2o_post_wait_data **p1, *q;
2689 unsigned long flags;
2692 * We need to search through the post_wait
2693 * queue to see if the given message is still
2694 * outstanding. If not, it means that the IOP
2695 * took longer to respond to the message than we
2696 * had allowed and timer has already expired.
2697 * Not much we can do about that except log
2698 * it for debug purposes, increase timeout, and recompile
2700 * Lock needed to keep anyone from moving queue pointers
2701 * around while we're looking through them.
2704 spin_lock_irqsave(&post_wait_lock, flags);
2706 for(p1 = &post_wait_queue; *p1!=NULL; p1 = &((*p1)->next))
2709 if(q->id == ((context >> 16) & 0x7fff)) {
2722 /* Live entry - wakeup and set status */
2723 *q->status = status;
2730 * Free resources. Caller is dead
2734 pci_free_consistent(c->pdev, q->size[0], q->mem[0], q->phys[0]);
2736 pci_free_consistent(c->pdev, q->size[1], q->mem[1], q->phys[1]);
2738 printk(KERN_WARNING "i2o_post_wait event completed after timeout.\n");
2741 spin_unlock(&post_wait_lock);
2745 spin_unlock(&post_wait_lock);
2747 printk(KERN_DEBUG "i2o_post_wait: Bogus reply!\n");
2750 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
2752 * This function can be used for all UtilParamsGet/Set operations.
2753 * The OperationList is given in oplist-buffer,
2754 * and results are returned in reslist-buffer.
2755 * Note that the minimum sized reslist is 8 bytes and contains
2756 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
2759 int i2o_issue_params(int cmd, struct i2o_controller *iop, int tid,
2760 void *oplist, int oplen, void *reslist, int reslen)
2763 u32 *res32 = (u32*)reslist;
2764 u32 *restmp = (u32*)reslist;
2768 u32 *opmem, *resmem;
2769 dma_addr_t opmem_phys, resmem_phys;
2771 /* Get DMAable memory */
2772 opmem = pci_alloc_consistent(iop->pdev, oplen, &opmem_phys);
2775 memcpy(opmem, oplist, oplen);
2777 resmem = pci_alloc_consistent(iop->pdev, reslen, &resmem_phys);
2780 pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys);
2784 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
2785 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
2788 msg[5] = 0x54000000 | oplen; /* OperationList */
2789 msg[6] = opmem_phys;
2790 msg[7] = 0xD0000000 | reslen; /* ResultList */
2791 msg[8] = resmem_phys;
2793 wait_status = i2o_post_wait_mem(iop, msg, sizeof(msg), 10, opmem, resmem, opmem_phys, resmem_phys, oplen, reslen);
2796 * This only looks like a memory leak - don't "fix" it.
2798 if(wait_status == -ETIMEDOUT)
2801 memcpy(reslist, resmem, reslen);
2802 pci_free_consistent(iop->pdev, reslen, resmem, resmem_phys);
2803 pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys);
2806 if(wait_status != 0)
2809 * Calculate number of bytes of Result LIST
2810 * We need to loop through each Result BLOCK and grab the length
2814 for(i = 0; i < (res32[0]&0X0000FFFF); i++)
2816 if(restmp[0]&0x00FF0000) /* BlockStatus != SUCCESS */
2818 printk(KERN_WARNING "%s - Error:\n ErrorInfoSize = 0x%02x, "
2819 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
2820 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
2822 res32[1]>>24, (res32[1]>>16)&0xFF, res32[1]&0xFFFF);
2825 * If this is the only request,than we return an error
2827 if((res32[0]&0x0000FFFF) == 1)
2829 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
2832 len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
2833 restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
2835 return (len << 2); /* bytes used by result list */
2839 * Query one scalar group value or a whole scalar group.
2841 int i2o_query_scalar(struct i2o_controller *iop, int tid,
2842 int group, int field, void *buf, int buflen)
2844 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
2845 u8 resblk[8+buflen]; /* 8 bytes for header */
2848 if (field == -1) /* whole group */
2851 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, iop, tid,
2852 opblk, sizeof(opblk), resblk, sizeof(resblk));
2854 memcpy(buf, resblk+8, buflen); /* cut off header */
2862 * Set a scalar group value or a whole group.
2864 int i2o_set_scalar(struct i2o_controller *iop, int tid,
2865 int group, int field, void *buf, int buflen)
2868 u8 resblk[8+buflen]; /* 8 bytes for header */
2871 opblk = kmalloc(buflen+64, GFP_KERNEL);
2874 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
2878 opblk[0] = 1; /* operation count */
2879 opblk[1] = 0; /* pad */
2880 opblk[2] = I2O_PARAMS_FIELD_SET;
2883 if(field == -1) { /* whole group */
2885 memcpy(opblk+5, buf, buflen);
2887 else /* single field */
2891 memcpy(opblk+6, buf, buflen);
2894 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
2895 opblk, 12+buflen, resblk, sizeof(resblk));
2904 * if oper == I2O_PARAMS_TABLE_GET, get from all rows
2905 * if fieldcount == -1 return all fields
2906 * ibuf and ibuflen are unused (use NULL, 0)
2907 * else return specific fields
2908 * ibuf contains fieldindexes
2910 * if oper == I2O_PARAMS_LIST_GET, get from specific rows
2911 * if fieldcount == -1 return all fields
2912 * ibuf contains rowcount, keyvalues
2913 * else return specific fields
2914 * fieldcount is # of fieldindexes
2915 * ibuf contains fieldindexes, rowcount, keyvalues
2917 * You could also use directly function i2o_issue_params().
2919 int i2o_query_table(int oper, struct i2o_controller *iop, int tid, int group,
2920 int fieldcount, void *ibuf, int ibuflen,
2921 void *resblk, int reslen)
2926 opblk = kmalloc(10 + ibuflen, GFP_KERNEL);
2929 printk(KERN_ERR "i2o: no memory for query buffer.\n");
2933 opblk[0] = 1; /* operation count */
2934 opblk[1] = 0; /* pad */
2937 opblk[4] = fieldcount;
2938 memcpy(opblk+5, ibuf, ibuflen); /* other params */
2940 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET,iop, tid,
2941 opblk, 10+ibuflen, resblk, reslen);
2950 * Clear table group, i.e. delete all rows.
2952 int i2o_clear_table(struct i2o_controller *iop, int tid, int group)
2954 u16 opblk[] = { 1, 0, I2O_PARAMS_TABLE_CLEAR, group };
2955 u8 resblk[32]; /* min 8 bytes for result header */
2957 return i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
2958 opblk, sizeof(opblk), resblk, sizeof(resblk));
2962 * Add a new row into a table group.
2964 * if fieldcount==-1 then we add whole rows
2965 * buf contains rowcount, keyvalues
2966 * else just specific fields are given, rest use defaults
2967 * buf contains fieldindexes, rowcount, keyvalues
2969 int i2o_row_add_table(struct i2o_controller *iop, int tid,
2970 int group, int fieldcount, void *buf, int buflen)
2973 u8 resblk[32]; /* min 8 bytes for header */
2976 opblk = kmalloc(buflen+64, GFP_KERNEL);
2979 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
2983 opblk[0] = 1; /* operation count */
2984 opblk[1] = 0; /* pad */
2985 opblk[2] = I2O_PARAMS_ROW_ADD;
2987 opblk[4] = fieldcount;
2988 memcpy(opblk+5, buf, buflen);
2990 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
2991 opblk, 10+buflen, resblk, sizeof(resblk));
3001 * Used for error reporting/debugging purposes.
3002 * Following fail status are common to all classes.
3003 * The preserved message must be handled in the reply handler.
3005 void i2o_report_fail_status(u8 req_status, u32* msg)
3007 static char *FAIL_STATUS[] = {
3008 "0x80", /* not used */
3009 "SERVICE_SUSPENDED", /* 0x81 */
3010 "SERVICE_TERMINATED", /* 0x82 */
3018 "INVALID_MSG_FLAGS",
3021 "INVALID_TARGET_ID",
3022 "INVALID_INITIATOR_ID",
3023 "INVALID_INITIATOR_CONTEX", /* 0x8F */
3024 "UNKNOWN_FAILURE" /* 0xFF */
3027 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
3028 printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", req_status);
3030 printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]);
3032 /* Dump some details */
3034 printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n",
3035 (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
3036 printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
3037 (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
3038 printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
3039 msg[5] >> 16, msg[5] & 0xFFF);
3041 printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF);
3042 if (msg[4] & (1<<16))
3043 printk("(FormatError), "
3044 "this msg can never be delivered/processed.\n");
3045 if (msg[4] & (1<<17))
3046 printk("(PathError), "
3047 "this msg can no longer be delivered/processed.\n");
3048 if (msg[4] & (1<<18))
3049 printk("(PathState), "
3050 "the system state does not allow delivery.\n");
3051 if (msg[4] & (1<<19))
3052 printk("(Congestion), resources temporarily not available;"
3053 "do not retry immediately.\n");
3057 * Used for error reporting/debugging purposes.
3058 * Following reply status are common to all classes.
3060 void i2o_report_common_status(u8 req_status)
3062 static char *REPLY_STATUS[] = {
3065 "ABORT_NO_DATA_TRANSFER",
3066 "ABORT_PARTIAL_TRANSFER",
3068 "ERROR_NO_DATA_TRANSFER",
3069 "ERROR_PARTIAL_TRANSFER",
3070 "PROCESS_ABORT_DIRTY",
3071 "PROCESS_ABORT_NO_DATA_TRANSFER",
3072 "PROCESS_ABORT_PARTIAL_TRANSFER",
3073 "TRANSACTION_ERROR",
3077 if (req_status >= ARRAY_SIZE(REPLY_STATUS))
3078 printk("RequestStatus = %0#2x", req_status);
3080 printk("%s", REPLY_STATUS[req_status]);
3084 * Used for error reporting/debugging purposes.
3085 * Following detailed status are valid for executive class,
3086 * utility class, DDM class and for transaction error replies.
3088 static void i2o_report_common_dsc(u16 detailed_status)
3090 static char *COMMON_DSC[] = {
3095 "REPLY_BUFFER_FULL",
3097 "INSUFFICIENT_RESOURCE_SOFT",
3098 "INSUFFICIENT_RESOURCE_HARD",
3100 "CHAIN_BUFFER_TOO_LARGE",
3101 "UNSUPPORTED_FUNCTION",
3104 "INAPPROPRIATE_FUNCTION",
3105 "INVALID_INITIATOR_ADDRESS",
3106 "INVALID_MESSAGE_FLAGS",
3108 "INVALID_PARAMETER",
3110 "INVALID_TARGET_ADDRESS",
3111 "MESSAGE_TOO_LARGE",
3112 "MESSAGE_TOO_SMALL",
3113 "MISSING_PARAMETER",
3117 "UNSUPPORTED_VERSION",
3119 "DEVICE_NOT_AVAILABLE"
3122 if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
3123 printk(" / DetailedStatus = %0#4x.\n", detailed_status);
3125 printk(" / %s.\n", COMMON_DSC[detailed_status]);
3129 * Used for error reporting/debugging purposes
3131 static void i2o_report_lan_dsc(u16 detailed_status)
3133 static char *LAN_DSC[] = { // Lan detailed status code strings
3136 "DESTINATION_NOT_FOUND",
3142 "BAD_PACKET_DETECTED",
3145 "IOP_INTERNAL_ERROR",
3147 "INVALID_TRANSACTION_CONTEXT",
3148 "DEST_ADDRESS_DETECTED",
3149 "DEST_ADDRESS_OMITTED",
3150 "PARTIAL_PACKET_RETURNED",
3151 "TEMP_SUSPENDED_STATE", // last Lan detailed status code
3152 "INVALID_REQUEST" // general detailed status code
3155 if (detailed_status > I2O_DSC_INVALID_REQUEST)
3156 printk(" / %0#4x.\n", detailed_status);
3158 printk(" / %s.\n", LAN_DSC[detailed_status]);
3162 * Used for error reporting/debugging purposes
3164 static void i2o_report_util_cmd(u8 cmd)
3167 case I2O_CMD_UTIL_NOP:
3168 printk("UTIL_NOP, ");
3170 case I2O_CMD_UTIL_ABORT:
3171 printk("UTIL_ABORT, ");
3173 case I2O_CMD_UTIL_CLAIM:
3174 printk("UTIL_CLAIM, ");
3176 case I2O_CMD_UTIL_RELEASE:
3177 printk("UTIL_CLAIM_RELEASE, ");
3179 case I2O_CMD_UTIL_CONFIG_DIALOG:
3180 printk("UTIL_CONFIG_DIALOG, ");
3182 case I2O_CMD_UTIL_DEVICE_RESERVE:
3183 printk("UTIL_DEVICE_RESERVE, ");
3185 case I2O_CMD_UTIL_DEVICE_RELEASE:
3186 printk("UTIL_DEVICE_RELEASE, ");
3188 case I2O_CMD_UTIL_EVT_ACK:
3189 printk("UTIL_EVENT_ACKNOWLEDGE, ");
3191 case I2O_CMD_UTIL_EVT_REGISTER:
3192 printk("UTIL_EVENT_REGISTER, ");
3194 case I2O_CMD_UTIL_LOCK:
3195 printk("UTIL_LOCK, ");
3197 case I2O_CMD_UTIL_LOCK_RELEASE:
3198 printk("UTIL_LOCK_RELEASE, ");
3200 case I2O_CMD_UTIL_PARAMS_GET:
3201 printk("UTIL_PARAMS_GET, ");
3203 case I2O_CMD_UTIL_PARAMS_SET:
3204 printk("UTIL_PARAMS_SET, ");
3206 case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
3207 printk("UTIL_REPLY_FAULT_NOTIFY, ");
3210 printk("Cmd = %0#2x, ",cmd);
3215 * Used for error reporting/debugging purposes
3217 static void i2o_report_exec_cmd(u8 cmd)
3220 case I2O_CMD_ADAPTER_ASSIGN:
3221 printk("EXEC_ADAPTER_ASSIGN, ");
3223 case I2O_CMD_ADAPTER_READ:
3224 printk("EXEC_ADAPTER_READ, ");
3226 case I2O_CMD_ADAPTER_RELEASE:
3227 printk("EXEC_ADAPTER_RELEASE, ");
3229 case I2O_CMD_BIOS_INFO_SET:
3230 printk("EXEC_BIOS_INFO_SET, ");
3232 case I2O_CMD_BOOT_DEVICE_SET:
3233 printk("EXEC_BOOT_DEVICE_SET, ");
3235 case I2O_CMD_CONFIG_VALIDATE:
3236 printk("EXEC_CONFIG_VALIDATE, ");
3238 case I2O_CMD_CONN_SETUP:
3239 printk("EXEC_CONN_SETUP, ");
3241 case I2O_CMD_DDM_DESTROY:
3242 printk("EXEC_DDM_DESTROY, ");
3244 case I2O_CMD_DDM_ENABLE:
3245 printk("EXEC_DDM_ENABLE, ");
3247 case I2O_CMD_DDM_QUIESCE:
3248 printk("EXEC_DDM_QUIESCE, ");
3250 case I2O_CMD_DDM_RESET:
3251 printk("EXEC_DDM_RESET, ");
3253 case I2O_CMD_DDM_SUSPEND:
3254 printk("EXEC_DDM_SUSPEND, ");
3256 case I2O_CMD_DEVICE_ASSIGN:
3257 printk("EXEC_DEVICE_ASSIGN, ");
3259 case I2O_CMD_DEVICE_RELEASE:
3260 printk("EXEC_DEVICE_RELEASE, ");
3262 case I2O_CMD_HRT_GET:
3263 printk("EXEC_HRT_GET, ");
3265 case I2O_CMD_ADAPTER_CLEAR:
3266 printk("EXEC_IOP_CLEAR, ");
3268 case I2O_CMD_ADAPTER_CONNECT:
3269 printk("EXEC_IOP_CONNECT, ");
3271 case I2O_CMD_ADAPTER_RESET:
3272 printk("EXEC_IOP_RESET, ");
3274 case I2O_CMD_LCT_NOTIFY:
3275 printk("EXEC_LCT_NOTIFY, ");
3277 case I2O_CMD_OUTBOUND_INIT:
3278 printk("EXEC_OUTBOUND_INIT, ");
3280 case I2O_CMD_PATH_ENABLE:
3281 printk("EXEC_PATH_ENABLE, ");
3283 case I2O_CMD_PATH_QUIESCE:
3284 printk("EXEC_PATH_QUIESCE, ");
3286 case I2O_CMD_PATH_RESET:
3287 printk("EXEC_PATH_RESET, ");
3289 case I2O_CMD_STATIC_MF_CREATE:
3290 printk("EXEC_STATIC_MF_CREATE, ");
3292 case I2O_CMD_STATIC_MF_RELEASE:
3293 printk("EXEC_STATIC_MF_RELEASE, ");
3295 case I2O_CMD_STATUS_GET:
3296 printk("EXEC_STATUS_GET, ");
3298 case I2O_CMD_SW_DOWNLOAD:
3299 printk("EXEC_SW_DOWNLOAD, ");
3301 case I2O_CMD_SW_UPLOAD:
3302 printk("EXEC_SW_UPLOAD, ");
3304 case I2O_CMD_SW_REMOVE:
3305 printk("EXEC_SW_REMOVE, ");
3307 case I2O_CMD_SYS_ENABLE:
3308 printk("EXEC_SYS_ENABLE, ");
3310 case I2O_CMD_SYS_MODIFY:
3311 printk("EXEC_SYS_MODIFY, ");
3313 case I2O_CMD_SYS_QUIESCE:
3314 printk("EXEC_SYS_QUIESCE, ");
3316 case I2O_CMD_SYS_TAB_SET:
3317 printk("EXEC_SYS_TAB_SET, ");
3320 printk("Cmd = %#02x, ",cmd);
3325 * Used for error reporting/debugging purposes
3327 static void i2o_report_lan_cmd(u8 cmd)
3330 case LAN_PACKET_SEND:
3331 printk("LAN_PACKET_SEND, ");
3334 printk("LAN_SDU_SEND, ");
3336 case LAN_RECEIVE_POST:
3337 printk("LAN_RECEIVE_POST, ");
3340 printk("LAN_RESET, ");
3343 printk("LAN_SUSPEND, ");
3346 printk("Cmd = %0#2x, ",cmd);
3351 * Used for error reporting/debugging purposes.
3352 * Report Cmd name, Request status, Detailed Status.
3354 void i2o_report_status(const char *severity, const char *str, u32 *msg)
3356 u8 cmd = (msg[1]>>24)&0xFF;
3357 u8 req_status = (msg[4]>>24)&0xFF;
3358 u16 detailed_status = msg[4]&0xFFFF;
3359 struct i2o_handler *h = i2o_handlers[msg[2] & (MAX_I2O_MODULES-1)];
3361 if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
3362 return; // No status in this reply
3364 printk("%s%s: ", severity, str);
3366 if (cmd < 0x1F) // Utility cmd
3367 i2o_report_util_cmd(cmd);
3369 else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
3370 i2o_report_exec_cmd(cmd);
3372 else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3373 i2o_report_lan_cmd(cmd); // LAN cmd
3375 printk("Cmd = %0#2x, ", cmd); // Other cmds
3377 if (msg[0] & MSG_FAIL) {
3378 i2o_report_fail_status(req_status, msg);
3382 i2o_report_common_status(req_status);
3384 if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
3385 i2o_report_common_dsc(detailed_status);
3386 else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3387 i2o_report_lan_dsc(detailed_status);
3389 printk(" / DetailedStatus = %0#4x.\n", detailed_status);
3392 /* Used to dump a message to syslog during debugging */
3393 void i2o_dump_message(u32 *msg)
3397 printk(KERN_INFO "Dumping I2O message size %d @ %p\n",
3398 msg[0]>>16&0xffff, msg);
3399 for(i = 0; i < ((msg[0]>>16)&0xffff); i++)
3400 printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]);
3405 * I2O reboot/shutdown notification.
3407 * - Call each OSM's reboot notifier (if one exists)
3408 * - Quiesce each IOP in the system
3410 * Each IOP has to be quiesced before we can ensure that the system
3411 * can be properly shutdown as a transaction that has already been
3412 * acknowledged still needs to be placed in permanent store on the IOP.
3413 * The SysQuiesce causes the IOP to force all HDMs to complete their
3414 * transactions before returning, so only at that point is it safe
3417 static int i2o_reboot_event(struct notifier_block *n, unsigned long code, void
3421 struct i2o_controller *c = NULL;
3423 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
3426 printk(KERN_INFO "Shutting down I2O system.\n");
3428 " This could take a few minutes if there are many devices attached\n");
3430 for(i = 0; i < MAX_I2O_MODULES; i++)
3432 if(i2o_handlers[i] && i2o_handlers[i]->reboot_notify)
3433 i2o_handlers[i]->reboot_notify();
3436 for(c = i2o_controller_chain; c; c = c->next)
3438 if(i2o_quiesce_controller(c))
3440 printk(KERN_WARNING "i2o: Could not quiesce %s.\n"
3441 "Verify setup on next system power up.\n",
3446 printk(KERN_INFO "I2O system down.\n");
3454 * i2o_pci_dispose - Free bus specific resources
3455 * @c: I2O controller
3457 * Disable interrupts and then free interrupt, I/O and mtrr resources
3458 * used by this controller. Called by the I2O core on unload.
3461 static void i2o_pci_dispose(struct i2o_controller *c)
3463 I2O_IRQ_WRITE32(c,0xFFFFFFFF);
3465 free_irq(c->irq, c);
3466 iounmap(((u8 *)c->post_port)-0x40);
3469 if(c->mtrr_reg0 > 0)
3470 mtrr_del(c->mtrr_reg0, 0, 0);
3471 if(c->mtrr_reg1 > 0)
3472 mtrr_del(c->mtrr_reg1, 0, 0);
3477 * i2o_pci_interrupt - Bus specific interrupt handler
3478 * @irq: interrupt line
3481 * Handle an interrupt from a PCI based I2O controller. This turns out
3482 * to be rather simple. We keep the controller pointer in the cookie.
3485 static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
3487 struct i2o_controller *c = dev_id;
3493 * i2o_pci_install - Install a PCI i2o controller
3494 * @dev: PCI device of the I2O controller
3496 * Install a PCI (or in theory AGP) i2o controller. Devices are
3497 * initialized, configured and registered with the i2o core subsystem. Be
3498 * very careful with ordering. There may be pending interrupts.
3500 * To Do: Add support for polled controllers
3503 int __init i2o_pci_install(struct pci_dev *dev)
3505 struct i2o_controller *c=kmalloc(sizeof(struct i2o_controller),
3515 printk(KERN_ERR "i2o: Insufficient memory to add controller.\n");
3518 memset(c, 0, sizeof(*c));
3522 /* Skip I/O spaces */
3523 if(!(pci_resource_flags(dev, i) & IORESOURCE_IO))
3525 memptr = pci_resource_start(dev, i);
3532 printk(KERN_ERR "i2o: I2O controller has no memory regions defined.\n");
3537 size = dev->resource[i].end-dev->resource[i].start+1;
3538 /* Map the I2O controller */
3540 printk(KERN_INFO "i2o: PCI I2O controller at 0x%08X size=%d\n", memptr, size);
3541 mem = (unsigned long)ioremap(memptr, size);
3544 printk(KERN_ERR "i2o: Unable to map controller.\n");
3554 c->irq_mask = mem+0x34;
3555 c->post_port = mem+0x40;
3556 c->reply_port = mem+0x44;
3558 c->mem_phys = memptr;
3559 c->mem_offset = mem;
3562 * Cards that fall apart if you hit them with large I/O
3566 if(dev->vendor == PCI_VENDOR_ID_NCR && dev->device == 0x0630)
3569 printk(KERN_INFO "I2O: Symbios FC920 workarounds activated.\n");
3571 if(dev->subsystem_vendor == PCI_VENDOR_ID_PROMISE)
3574 printk(KERN_INFO "I2O: Promise workarounds activated.\n");
3578 * Cards that go bananas if you quiesce them before you reset
3582 if(dev->vendor == PCI_VENDOR_ID_DPT)
3586 * Enable Write Combining MTRR for IOP's memory region
3590 mtrr_add(c->mem_phys, size, MTRR_TYPE_WRCOMB, 1);
3592 * If it is an INTEL i960 I/O processor then set the first 64K to
3593 * Uncacheable since the region contains the Messaging unit which
3594 * shouldn't be cached.
3597 if(dev->vendor == PCI_VENDOR_ID_INTEL || dev->vendor == PCI_VENDOR_ID_DPT)
3599 printk(KERN_INFO "I2O: MTRR workaround for Intel i960 processor\n");
3600 c->mtrr_reg1 = mtrr_add(c->mem_phys, 65536, MTRR_TYPE_UNCACHABLE, 1);
3603 printk(KERN_INFO "i2o_pci: Error in setting MTRR_TYPE_UNCACHABLE\n");
3604 mtrr_del(c->mtrr_reg0, c->mem_phys, size);
3611 I2O_IRQ_WRITE32(c,0xFFFFFFFF);
3613 i = i2o_install_controller(c);
3617 printk(KERN_ERR "i2o: Unable to install controller.\n");
3619 iounmap((void *)mem);
3626 i=request_irq(dev->irq, i2o_pci_interrupt, SA_SHIRQ,
3630 printk(KERN_ERR "%s: unable to allocate interrupt %d.\n",
3633 i2o_delete_controller(c);
3634 iounmap((void *)mem);
3639 printk(KERN_INFO "%s: Installed at IRQ%d\n", c->name, dev->irq);
3640 I2O_IRQ_WRITE32(c,0x0);
3646 * i2o_pci_scan - Scan the pci bus for controllers
3648 * Scan the PCI devices on the system looking for any device which is a
3649 * memory of the Intelligent, I2O class. We attempt to set up each such device
3650 * and register it with the core.
3652 * Returns the number of controllers registered
3654 * Note; Do not change this to a hot plug interface. I2O 1.5 itself
3655 * does not support hot plugging.
3658 int __init i2o_pci_scan(void)
3660 struct pci_dev *dev = NULL;
3663 printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
3665 while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL)
3667 if((dev->class>>8)!=PCI_CLASS_INTELLIGENT_I2O)
3670 if((dev->class&0xFF)>1)
3672 printk(KERN_INFO "i2o: I2O Controller found but does not support I2O 1.5 (skipping).\n");
3675 if (pci_enable_device(dev))
3677 printk(KERN_INFO "i2o: I2O controller on bus %d at %d.\n",
3678 dev->bus->number, dev->devfn);
3679 if(pci_set_dma_mask(dev, 0xffffffff))
3681 printk(KERN_WARNING "I2O controller on bus %d at %d : No suitable DMA available\n", dev->bus->number, dev->devfn);
3684 pci_set_master(dev);
3685 if(i2o_pci_install(dev)==0)
3689 printk(KERN_INFO "i2o: %d I2O controller%s found and installed.\n", count,
3691 return count?count:-ENODEV;
3694 static int i2o_core_init(void)
3696 printk(KERN_INFO "I2O Core - (C) Copyright 1999 Red Hat Software\n");
3697 if (i2o_install_handler(&i2o_core_handler) < 0)
3699 printk(KERN_ERR "i2o_core: Unable to install core handler.\nI2O stack not loaded!");
3703 core_context = i2o_core_handler.context;
3706 * Initialize event handling thread
3709 init_MUTEX_LOCKED(&evt_sem);
3710 evt_pid = kernel_thread(i2o_core_evt, &evt_reply, CLONE_SIGHAND);
3713 printk(KERN_ERR "I2O: Could not create event handler kernel thread\n");
3714 i2o_remove_handler(&i2o_core_handler);
3718 printk(KERN_INFO "I2O: Event thread created as pid %d\n", evt_pid);
3721 if(i2o_num_controllers)
3724 register_reboot_notifier(&i2o_reboot_notifier);
3729 static void i2o_core_exit(void)
3733 unregister_reboot_notifier(&i2o_reboot_notifier);
3735 if(i2o_num_controllers)
3739 * If this is shutdown time, the thread has already been killed
3742 printk("Terminating i2o threads...");
3743 stat = kill_proc(evt_pid, SIGKILL, 1);
3745 printk("waiting...\n");
3746 wait_for_completion(&evt_dead);
3750 i2o_remove_handler(&i2o_core_handler);
3753 module_init(i2o_core_init);
3754 module_exit(i2o_core_exit);
3756 MODULE_PARM(verbose, "i");
3757 MODULE_PARM_DESC(verbose, "Verbose diagnostics");
3759 MODULE_AUTHOR("Red Hat Software");
3760 MODULE_DESCRIPTION("I2O Core");
3761 MODULE_LICENSE("GPL");
3763 EXPORT_SYMBOL(i2o_controller_chain);
3764 EXPORT_SYMBOL(i2o_num_controllers);
3765 EXPORT_SYMBOL(i2o_find_controller);
3766 EXPORT_SYMBOL(i2o_unlock_controller);
3767 EXPORT_SYMBOL(i2o_status_get);
3768 EXPORT_SYMBOL(i2o_install_handler);
3769 EXPORT_SYMBOL(i2o_remove_handler);
3770 EXPORT_SYMBOL(i2o_install_controller);
3771 EXPORT_SYMBOL(i2o_delete_controller);
3772 EXPORT_SYMBOL(i2o_run_queue);
3773 EXPORT_SYMBOL(i2o_claim_device);
3774 EXPORT_SYMBOL(i2o_release_device);
3775 EXPORT_SYMBOL(i2o_device_notify_on);
3776 EXPORT_SYMBOL(i2o_device_notify_off);
3777 EXPORT_SYMBOL(i2o_post_this);
3778 EXPORT_SYMBOL(i2o_post_wait);
3779 EXPORT_SYMBOL(i2o_post_wait_mem);
3780 EXPORT_SYMBOL(i2o_query_scalar);
3781 EXPORT_SYMBOL(i2o_set_scalar);
3782 EXPORT_SYMBOL(i2o_query_table);
3783 EXPORT_SYMBOL(i2o_clear_table);
3784 EXPORT_SYMBOL(i2o_row_add_table);
3785 EXPORT_SYMBOL(i2o_issue_params);
3786 EXPORT_SYMBOL(i2o_event_register);
3787 EXPORT_SYMBOL(i2o_event_ack);
3788 EXPORT_SYMBOL(i2o_report_status);
3789 EXPORT_SYMBOL(i2o_dump_message);
3790 EXPORT_SYMBOL(i2o_get_class_name);