2 * I2O Configuration Interface Driver
4 * (C) Copyright 1999-2002 Red Hat
6 * Written by Alan Cox, Building Number Three Ltd
9 * Deepak Saxena (04/20/1999):
10 * Added basic ioctl() support
11 * Deepak Saxena (06/07/1999):
12 * Added software download ioctl (still testing)
13 * Auvo Häkkinen (09/10/1999):
14 * Changes to i2o_cfg_reply(), ioctl_parms()
15 * Added ioct_validate()
16 * Taneli Vähäkangas (09/30/1999):
18 * Taneli Vähäkangas (10/04/1999):
19 * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
20 * Deepak Saxena (11/18/1999):
21 * Added event managmenet support
22 * Alan Cox <alan@redhat.com>:
23 * 2.4 rewrite ported to 2.5
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Added pass-thru support for Adaptec's raidutils
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/pci.h>
36 #include <linux/i2o.h>
37 #include <linux/errno.h>
38 #include <linux/init.h>
39 #include <linux/slab.h>
40 #include <linux/miscdevice.h>
42 #include <linux/spinlock.h>
43 #include <linux/smp_lock.h>
45 #include <asm/uaccess.h>
48 static int i2o_cfg_context = -1;
49 static void *page_buf;
50 static spinlock_t i2o_config_lock = SPIN_LOCK_UNLOCKED;
51 struct wait_queue *i2o_wait_queue;
53 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
55 struct sg_simple_element {
63 struct fasync_struct *fasync;
64 struct i2o_evt_info event_q[I2O_EVT_Q_LEN];
65 u16 q_in; // Queue head index
66 u16 q_out; // Queue tail index
67 u16 q_len; // Queue length
68 u16 q_lost; // Number of lost events
69 u32 q_id; // Event queue ID...used as tx_context
70 struct i2o_cfg_info *next;
72 static struct i2o_cfg_info *open_files = NULL;
73 static int i2o_cfg_info_id = 0;
75 static int ioctl_getiops(unsigned long);
76 static int ioctl_gethrt(unsigned long);
77 static int ioctl_getlct(unsigned long);
78 static int ioctl_parms(unsigned long, unsigned int);
79 static int ioctl_html(unsigned long);
80 static int ioctl_swdl(unsigned long);
81 static int ioctl_swul(unsigned long);
82 static int ioctl_swdel(unsigned long);
83 static int ioctl_validate(unsigned long);
84 static int ioctl_evt_reg(unsigned long, struct file *);
85 static int ioctl_evt_get(unsigned long, struct file *);
86 static int ioctl_passthru(unsigned long);
87 static int cfg_fasync(int, struct file*, int);
90 * This is the callback for any message we have posted. The message itself
91 * will be returned to the message pool when we return from the IRQ
93 * This runs in irq context so be short and sweet.
95 static void i2o_cfg_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *m)
99 if (msg[0] & MSG_FAIL) {
100 u32 *preserved_msg = (u32*)(c->mem_offset + msg[7]);
102 printk(KERN_ERR "i2o_config: IOP failed to process the msg.\n");
104 /* Release the preserved msg frame by resubmitting it as a NOP */
106 preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0;
107 preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0;
108 preserved_msg[2] = 0;
109 i2o_post_message(c, msg[7]);
112 if (msg[4] >> 24) // ReqStatus != SUCCESS
113 i2o_report_status(KERN_INFO,"i2o_config", msg);
115 if(m->function == I2O_CMD_UTIL_EVT_REGISTER)
117 struct i2o_cfg_info *inf;
119 for(inf = open_files; inf; inf = inf->next)
120 if(inf->q_id == msg[3])
124 // If this is the case, it means that we're getting
125 // events for a file descriptor that's been close()'d
126 // w/o the user unregistering for events first.
127 // The code currently assumes that the user will
128 // take care of unregistering for events before closing
132 // Should we track event registartion and deregister
133 // for events when a file is close()'d so this doesn't
134 // happen? That would get rid of the search through
135 // the linked list since file->private_data could point
136 // directly to the i2o_config_info data structure...but
137 // it would mean having all sorts of tables to track
138 // what each file is registered for...I think the
139 // current method is simpler. - DS
144 inf->event_q[inf->q_in].id.iop = c->unit;
145 inf->event_q[inf->q_in].id.tid = m->target_tid;
146 inf->event_q[inf->q_in].id.evt_mask = msg[4];
149 // Data size = msg size - reply header
151 inf->event_q[inf->q_in].data_size = (m->size - 5) * 4;
152 if(inf->event_q[inf->q_in].data_size)
153 memcpy(inf->event_q[inf->q_in].evt_data,
154 (unsigned char *)(msg + 5),
155 inf->event_q[inf->q_in].data_size);
157 spin_lock(&i2o_config_lock);
158 MODINC(inf->q_in, I2O_EVT_Q_LEN);
159 if(inf->q_len == I2O_EVT_Q_LEN)
161 MODINC(inf->q_out, I2O_EVT_Q_LEN);
166 // Keep I2OEVTGET on another CPU from touching this
169 spin_unlock(&i2o_config_lock);
172 // printk(KERN_INFO "File %p w/id %d has %d events\n",
173 // inf->fp, inf->q_id, inf->q_len);
175 kill_fasync(&inf->fasync, SIGIO, POLL_IN);
182 * Each of these describes an i2o message handler. They are
183 * multiplexed by the i2o_core code
186 struct i2o_handler cfg_handler=
194 0xffffffff // All classes
197 static ssize_t cfg_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
199 printk(KERN_INFO "i2o_config write not yet supported\n");
205 static ssize_t cfg_read(struct file *file, char *buf, size_t count, loff_t *ptr)
213 static int cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
221 ret = ioctl_getiops(arg);
225 ret = ioctl_gethrt(arg);
229 ret = ioctl_getlct(arg);
233 ret = ioctl_parms(arg, I2OPARMSET);
237 ret = ioctl_parms(arg, I2OPARMGET);
241 ret = ioctl_swdl(arg);
245 ret = ioctl_swul(arg);
249 ret = ioctl_swdel(arg);
253 ret = ioctl_validate(arg);
257 ret = ioctl_html(arg);
261 ret = ioctl_evt_reg(arg, fp);
265 ret = ioctl_evt_get(arg, fp);
269 ret = ioctl_passthru(arg);
279 int ioctl_getiops(unsigned long arg)
281 u8 *user_iop_table = (u8*)arg;
282 struct i2o_controller *c = NULL;
284 u8 foo[MAX_I2O_CONTROLLERS];
286 if(!access_ok(VERIFY_WRITE, user_iop_table, MAX_I2O_CONTROLLERS))
289 for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
291 c = i2o_find_controller(i);
295 if(pci_set_dma_mask(c->pdev, 0xffffffff))
297 printk(KERN_WARNING "i2o_config : No suitable DMA available on controller %d\n", i);
298 i2o_unlock_controller(c);
302 i2o_unlock_controller(c);
310 __copy_to_user(user_iop_table, foo, MAX_I2O_CONTROLLERS);
314 int ioctl_gethrt(unsigned long arg)
316 struct i2o_controller *c;
317 struct i2o_cmd_hrtlct *cmd = (struct i2o_cmd_hrtlct*)arg;
318 struct i2o_cmd_hrtlct kcmd;
324 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
327 if(get_user(reslen, kcmd.reslen) < 0)
330 if(kcmd.resbuf == NULL)
333 c = i2o_find_controller(kcmd.iop);
337 hrt = (i2o_hrt *)c->hrt;
339 i2o_unlock_controller(c);
341 len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
343 /* We did a get user...so assuming mem is ok...is this bad? */
344 put_user(len, kcmd.reslen);
347 if(copy_to_user(kcmd.resbuf, (void*)hrt, len))
353 int ioctl_getlct(unsigned long arg)
355 struct i2o_controller *c;
356 struct i2o_cmd_hrtlct *cmd = (struct i2o_cmd_hrtlct*)arg;
357 struct i2o_cmd_hrtlct kcmd;
363 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
366 if(get_user(reslen, kcmd.reslen) < 0)
369 if(kcmd.resbuf == NULL)
372 c = i2o_find_controller(kcmd.iop);
376 lct = (i2o_lct *)c->lct;
377 i2o_unlock_controller(c);
379 len = (unsigned int)lct->table_size << 2;
380 put_user(len, kcmd.reslen);
383 else if(copy_to_user(kcmd.resbuf, (void*)lct, len))
389 static int ioctl_parms(unsigned long arg, unsigned int type)
392 struct i2o_controller *c;
393 struct i2o_cmd_psetget *cmd = (struct i2o_cmd_psetget*)arg;
394 struct i2o_cmd_psetget kcmd;
400 u32 i2o_cmd = (type == I2OPARMGET ?
401 I2O_CMD_UTIL_PARAMS_GET :
402 I2O_CMD_UTIL_PARAMS_SET);
404 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
407 if(get_user(reslen, kcmd.reslen))
410 c = i2o_find_controller(kcmd.iop);
414 ops = (u8*)kmalloc(kcmd.oplen, GFP_KERNEL);
417 i2o_unlock_controller(c);
421 if(copy_from_user(ops, kcmd.opbuf, kcmd.oplen))
423 i2o_unlock_controller(c);
429 * It's possible to have a _very_ large table
430 * and that the user asks for all of it at once...
432 res = (u8*)kmalloc(65536, GFP_KERNEL);
435 i2o_unlock_controller(c);
440 len = i2o_issue_params(i2o_cmd, c, kcmd.tid,
441 ops, kcmd.oplen, res, 65536);
442 i2o_unlock_controller(c);
450 put_user(len, kcmd.reslen);
453 else if(copy_to_user(kcmd.resbuf, res, len))
461 int ioctl_html(unsigned long arg)
463 struct i2o_html *cmd = (struct i2o_html*)arg;
464 struct i2o_html kcmd;
465 struct i2o_controller *c;
468 dma_addr_t query_phys, res_phys;
473 u32 msg[MSG_FRAME_SIZE];
475 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_html)))
477 printk(KERN_INFO "i2o_config: can't copy html cmd\n");
481 if(get_user(reslen, kcmd.reslen) < 0)
483 printk(KERN_INFO "i2o_config: can't copy html reslen\n");
489 printk(KERN_INFO "i2o_config: NULL html buffer\n");
493 c = i2o_find_controller(kcmd.iop);
497 if(kcmd.qlen) /* Check for post data */
499 query = pci_alloc_consistent(c->pdev, kcmd.qlen, &query_phys);
502 i2o_unlock_controller(c);
505 if(copy_from_user(query, kcmd.qbuf, kcmd.qlen))
507 i2o_unlock_controller(c);
508 printk(KERN_INFO "i2o_config: could not get query\n");
509 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
514 res = pci_alloc_consistent(c->pdev, 65536, &res_phys);
517 i2o_unlock_controller(c);
518 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
522 msg[1] = (I2O_CMD_UTIL_CONFIG_DIALOG << 24)|HOST_TID<<12|kcmd.tid;
523 msg[2] = i2o_cfg_context;
526 msg[5] = 0xD0000000|65536;
528 if(!kcmd.qlen) /* Check for post data */
529 msg[0] = SEVEN_WORD_MSG_SIZE|SGL_OFFSET_5;
532 msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
533 msg[5] = 0x50000000|65536;
534 msg[7] = 0xD4000000|(kcmd.qlen);
538 Wait for a considerable time till the Controller
539 does its job before timing out. The controller might
540 take more time to process this request if there are
541 many devices connected to it.
543 token = i2o_post_wait_mem(c, msg, 9*4, 400, query, res, query_phys, res_phys, kcmd.qlen, 65536);
546 printk(KERN_DEBUG "token = %#10x\n", token);
547 i2o_unlock_controller(c);
549 if(token != -ETIMEDOUT)
551 pci_free_consistent(c->pdev, 65536, res, res_phys);
553 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
557 i2o_unlock_controller(c);
559 len = strnlen(res, 65536);
560 put_user(len, kcmd.reslen);
563 if(copy_to_user(kcmd.resbuf, res, len))
566 pci_free_consistent(c->pdev, 65536, res, res_phys);
568 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
573 int ioctl_swdl(unsigned long arg)
575 struct i2o_sw_xfer kxfer;
576 struct i2o_sw_xfer *pxfer = (struct i2o_sw_xfer *)arg;
577 unsigned char maxfrag = 0, curfrag = 1;
578 unsigned char *buffer;
580 unsigned int status = 0, swlen = 0, fragsize = 8192;
581 struct i2o_controller *c;
582 dma_addr_t buffer_phys;
584 if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
587 if(get_user(swlen, kxfer.swlen) < 0)
590 if(get_user(maxfrag, kxfer.maxfrag) < 0)
593 if(get_user(curfrag, kxfer.curfrag) < 0)
596 if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192;
598 if(!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize))
601 c = i2o_find_controller(kxfer.iop);
605 buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys);
608 i2o_unlock_controller(c);
611 __copy_from_user(buffer, kxfer.buf, fragsize);
613 msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7;
614 msg[1]= I2O_CMD_SW_DOWNLOAD<<24 | HOST_TID<<12 | ADAPTER_TID;
615 msg[2]= (u32)cfg_handler.context;
617 msg[4]= (((u32)kxfer.flags)<<24) | (((u32)kxfer.sw_type)<<16) |
618 (((u32)maxfrag)<<8) | (((u32)curfrag));
621 msg[7]= (0xD0000000 | fragsize);
624 // printk("i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
625 status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0);
627 i2o_unlock_controller(c);
628 if(status != -ETIMEDOUT)
629 pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
631 if (status != I2O_POST_WAIT_OK)
633 // it fails if you try and send frags out of order
634 // and for some yet unknown reasons too
635 printk(KERN_INFO "i2o_config: swdl failed, DetailedStatus = %d\n", status);
642 int ioctl_swul(unsigned long arg)
644 struct i2o_sw_xfer kxfer;
645 struct i2o_sw_xfer *pxfer = (struct i2o_sw_xfer *)arg;
646 unsigned char maxfrag = 0, curfrag = 1;
647 unsigned char *buffer;
649 unsigned int status = 0, swlen = 0, fragsize = 8192;
650 struct i2o_controller *c;
651 dma_addr_t buffer_phys;
653 if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
656 if(get_user(swlen, kxfer.swlen) < 0)
659 if(get_user(maxfrag, kxfer.maxfrag) < 0)
662 if(get_user(curfrag, kxfer.curfrag) < 0)
665 if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192;
667 if(!kxfer.buf || !access_ok(VERIFY_WRITE, kxfer.buf, fragsize))
670 c = i2o_find_controller(kxfer.iop);
674 buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys);
677 i2o_unlock_controller(c);
681 msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7;
682 msg[1]= I2O_CMD_SW_UPLOAD<<24 | HOST_TID<<12 | ADAPTER_TID;
683 msg[2]= (u32)cfg_handler.context;
685 msg[4]= (u32)kxfer.flags<<24|(u32)kxfer.sw_type<<16|(u32)maxfrag<<8|(u32)curfrag;
688 msg[7]= (0xD0000000 | fragsize);
691 // printk("i2o_config: swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
692 status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0);
693 i2o_unlock_controller(c);
695 if (status != I2O_POST_WAIT_OK)
697 if(status != -ETIMEDOUT)
698 pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
699 printk(KERN_INFO "i2o_config: swul failed, DetailedStatus = %d\n", status);
703 __copy_to_user(kxfer.buf, buffer, fragsize);
704 pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
709 int ioctl_swdel(unsigned long arg)
711 struct i2o_controller *c;
712 struct i2o_sw_xfer kxfer, *pxfer = (struct i2o_sw_xfer *)arg;
717 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
720 if (get_user(swlen, kxfer.swlen) < 0)
723 c = i2o_find_controller(kxfer.iop);
727 msg[0] = SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0;
728 msg[1] = I2O_CMD_SW_REMOVE<<24 | HOST_TID<<12 | ADAPTER_TID;
729 msg[2] = (u32)i2o_cfg_context;
731 msg[4] = (u32)kxfer.flags<<24 | (u32)kxfer.sw_type<<16;
733 msg[6] = kxfer.sw_id;
735 token = i2o_post_wait(c, msg, sizeof(msg), 10);
736 i2o_unlock_controller(c);
738 if (token != I2O_POST_WAIT_OK)
740 printk(KERN_INFO "i2o_config: swdel failed, DetailedStatus = %d\n", token);
747 int ioctl_validate(unsigned long arg)
752 struct i2o_controller *c;
754 c=i2o_find_controller(iop);
758 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
759 msg[1] = I2O_CMD_CONFIG_VALIDATE<<24 | HOST_TID<<12 | iop;
760 msg[2] = (u32)i2o_cfg_context;
763 token = i2o_post_wait(c, msg, sizeof(msg), 10);
764 i2o_unlock_controller(c);
766 if (token != I2O_POST_WAIT_OK)
768 printk(KERN_INFO "Can't validate configuration, ErrorStatus = %d\n",
776 static int ioctl_evt_reg(unsigned long arg, struct file *fp)
779 struct i2o_evt_id *pdesc = (struct i2o_evt_id *)arg;
780 struct i2o_evt_id kdesc;
781 struct i2o_controller *iop;
782 struct i2o_device *d;
784 if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id)))
788 iop = i2o_find_controller(kdesc.iop);
791 i2o_unlock_controller(iop);
794 for(d = iop->devices; d; d = d->next)
795 if(d->lct_data.tid == kdesc.tid)
801 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
802 msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | kdesc.tid;
803 msg[2] = (u32)i2o_cfg_context;
804 msg[3] = (u32)fp->private_data;
805 msg[4] = kdesc.evt_mask;
807 i2o_post_this(iop, msg, 20);
812 static int ioctl_evt_get(unsigned long arg, struct file *fp)
814 u32 id = (u32)fp->private_data;
815 struct i2o_cfg_info *p = NULL;
816 struct i2o_evt_get *uget = (struct i2o_evt_get*)arg;
817 struct i2o_evt_get kget;
820 for(p = open_files; p; p = p->next)
830 memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info));
831 MODINC(p->q_out, I2O_EVT_Q_LEN);
832 spin_lock_irqsave(&i2o_config_lock, flags);
834 kget.pending = p->q_len;
835 kget.lost = p->q_lost;
836 spin_unlock_irqrestore(&i2o_config_lock, flags);
838 if(copy_to_user(uget, &kget, sizeof(struct i2o_evt_get)))
843 static int ioctl_passthru(unsigned long arg)
845 struct i2o_cmd_passthru *cmd = (struct i2o_cmd_passthru *) arg;
846 struct i2o_controller *c;
847 u32 msg[MSG_FRAME_SIZE];
848 u32 *user_msg = (u32*)cmd->msg;
850 u32 *user_reply = NULL;
854 ulong sg_list[SG_TABLESIZE];
861 c = i2o_find_controller(cmd->iop);
865 memset(&msg, 0, MSG_FRAME_SIZE*4);
866 if(get_user(size, &user_msg[0]))
870 user_reply = &user_msg[size];
871 if(size > MSG_FRAME_SIZE)
873 size *= 4; // Convert to bytes
875 /* Copy in the user's I2O command */
876 if(copy_from_user((void*)msg, (void*)user_msg, size))
878 if(get_user(reply_size, &user_reply[0]) < 0)
881 reply_size = reply_size>>16;
882 reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
884 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",c->name);
887 memset(reply, 0, REPLY_FRAME_SIZE*4);
888 sg_offset = (msg[0]>>4)&0x0f;
889 msg[2] = (u32)i2o_cfg_context;
892 memset(sg_list,0, sizeof(sg_list[0])*SG_TABLESIZE);
894 struct sg_simple_element *sg;
896 if(sg_offset * 4 >= size) {
901 sg = (struct sg_simple_element*) (msg+sg_offset);
902 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
903 if (sg_count > SG_TABLESIZE) {
904 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", c->name,sg_count);
909 for(i = 0; i < sg_count; i++) {
912 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
913 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",c->name,i, sg[i].flag_count);
917 sg_size = sg[i].flag_count & 0xffffff;
918 /* Allocate memory for the transfer */
919 p = (ulong)kmalloc(sg_size, GFP_KERNEL);
921 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", c->name,sg_size,i,sg_count);
925 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
926 /* Copy in the user's SG buffer if necessary */
927 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
929 if (copy_from_user((void*)p,(void*)sg[i].addr_bus, sg_size)) {
930 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",c->name,i);
936 sg[i].addr_bus = (u32)virt_to_bus((void*)p);
940 rcode = i2o_post_wait(c, msg, size, 60);
945 /* Copy back the Scatter Gather buffers back to user space */
948 struct sg_simple_element* sg;
951 // re-acquire the original message to handle correctly the sg copy operation
952 memset(&msg, 0, MSG_FRAME_SIZE*4);
953 // get user msg size in u32s
954 if (get_user(size, &user_msg[0])) {
960 /* Copy in the user's I2O command */
961 if (copy_from_user ((void*)msg, (void*)user_msg, size)) {
965 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
968 sg = (struct sg_simple_element*)(msg + sg_offset);
969 for (j = 0; j < sg_count; j++) {
970 /* Copy out the SG list to user's buffer if necessary */
971 if (!(sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
972 sg_size = sg[j].flag_count & 0xffffff;
974 if (copy_to_user((void*)sg[j].addr_bus,(void*)sg_list[j], sg_size)) {
975 printk(KERN_WARNING"%s: Could not copy %lx TO user %x\n",c->name, sg_list[j], sg[j].addr_bus);
983 /* Copy back the reply to user space */
985 // we wrote our own values for context - now restore the user supplied ones
986 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
987 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",c->name);
990 if(copy_to_user(user_reply, reply, reply_size)) {
991 printk(KERN_WARNING"%s: Could not copy reply TO user\n",c->name);
998 i2o_unlock_controller(c);
1002 static int cfg_open(struct inode *inode, struct file *file)
1004 struct i2o_cfg_info *tmp =
1005 (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info), GFP_KERNEL);
1006 unsigned long flags;
1011 file->private_data = (void*)(i2o_cfg_info_id++);
1014 tmp->q_id = (u32)file->private_data;
1019 tmp->next = open_files;
1021 spin_lock_irqsave(&i2o_config_lock, flags);
1023 spin_unlock_irqrestore(&i2o_config_lock, flags);
1028 static int cfg_release(struct inode *inode, struct file *file)
1030 u32 id = (u32)file->private_data;
1031 struct i2o_cfg_info *p1, *p2;
1032 unsigned long flags;
1037 spin_lock_irqsave(&i2o_config_lock, flags);
1038 for(p1 = open_files; p1; )
1044 cfg_fasync(-1, file, 0);
1046 p2->next = p1->next;
1048 open_files = p1->next;
1056 spin_unlock_irqrestore(&i2o_config_lock, flags);
1062 static int cfg_fasync(int fd, struct file *fp, int on)
1064 u32 id = (u32)fp->private_data;
1065 struct i2o_cfg_info *p;
1067 for(p = open_files; p; p = p->next)
1074 return fasync_helper(fd, fp, on, &p->fasync);
1077 static struct file_operations config_fops =
1079 .owner = THIS_MODULE,
1080 .llseek = no_llseek,
1085 .release = cfg_release,
1086 .fasync = cfg_fasync,
1089 static struct miscdevice i2o_miscdev = {
1095 static int __init i2o_config_init(void)
1097 printk(KERN_INFO "I2O configuration manager v 0.04.\n");
1098 printk(KERN_INFO " (C) Copyright 1999 Red Hat Software\n");
1100 if((page_buf = kmalloc(4096, GFP_KERNEL))==NULL)
1102 printk(KERN_ERR "i2o_config: no memory for page buffer.\n");
1105 if(misc_register(&i2o_miscdev) < 0)
1107 printk(KERN_ERR "i2o_config: can't register device.\n");
1112 * Install our handler
1114 if(i2o_install_handler(&cfg_handler)<0)
1117 printk(KERN_ERR "i2o_config: handler register failed.\n");
1118 misc_deregister(&i2o_miscdev);
1122 * The low 16bits of the transaction context must match this
1123 * for everything we post. Otherwise someone else gets our mail
1125 i2o_cfg_context = cfg_handler.context;
1129 static void i2o_config_exit(void)
1131 misc_deregister(&i2o_miscdev);
1135 if(i2o_cfg_context != -1)
1136 i2o_remove_handler(&cfg_handler);
1139 MODULE_AUTHOR("Red Hat Software");
1140 MODULE_DESCRIPTION("I2O Configuration");
1141 MODULE_LICENSE("GPL");
1143 module_init(i2o_config_init);
1144 module_exit(i2o_config_exit);