2 * I2O Configuration Interface Driver
4 * (C) Copyright 1999-2002 Red Hat
6 * Written by Alan Cox, Building Number Three Ltd
9 * Deepak Saxena (04/20/1999):
10 * Added basic ioctl() support
11 * Deepak Saxena (06/07/1999):
12 * Added software download ioctl (still testing)
13 * Auvo Häkkinen (09/10/1999):
14 * Changes to i2o_cfg_reply(), ioctl_parms()
15 * Added ioct_validate()
16 * Taneli Vähäkangas (09/30/1999):
18 * Taneli Vähäkangas (10/04/1999):
19 * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
20 * Deepak Saxena (11/18/1999):
21 * Added event managmenet support
22 * Alan Cox <alan@redhat.com>:
23 * 2.4 rewrite ported to 2.5
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Added pass-thru support for Adaptec's raidutils
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/pci.h>
36 #include <linux/i2o.h>
37 #include <linux/errno.h>
38 #include <linux/init.h>
39 #include <linux/slab.h>
40 #include <linux/miscdevice.h>
42 #include <linux/spinlock.h>
43 #include <linux/smp_lock.h>
45 #include <asm/uaccess.h>
48 static int i2o_cfg_context = -1;
49 static void *page_buf;
50 static spinlock_t i2o_config_lock = SPIN_LOCK_UNLOCKED;
51 struct wait_queue *i2o_wait_queue;
53 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
55 struct sg_simple_element {
63 struct fasync_struct *fasync;
64 struct i2o_evt_info event_q[I2O_EVT_Q_LEN];
65 u16 q_in; // Queue head index
66 u16 q_out; // Queue tail index
67 u16 q_len; // Queue length
68 u16 q_lost; // Number of lost events
69 u32 q_id; // Event queue ID...used as tx_context
70 struct i2o_cfg_info *next;
72 static struct i2o_cfg_info *open_files = NULL;
73 static int i2o_cfg_info_id = 0;
75 static int ioctl_getiops(unsigned long);
76 static int ioctl_gethrt(unsigned long);
77 static int ioctl_getlct(unsigned long);
78 static int ioctl_parms(unsigned long, unsigned int);
79 static int ioctl_html(unsigned long);
80 static int ioctl_swdl(unsigned long);
81 static int ioctl_swul(unsigned long);
82 static int ioctl_swdel(unsigned long);
83 static int ioctl_validate(unsigned long);
84 static int ioctl_evt_reg(unsigned long, struct file *);
85 static int ioctl_evt_get(unsigned long, struct file *);
86 static int ioctl_passthru(unsigned long);
87 static int cfg_fasync(int, struct file*, int);
90 * This is the callback for any message we have posted. The message itself
91 * will be returned to the message pool when we return from the IRQ
93 * This runs in irq context so be short and sweet.
95 static void i2o_cfg_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *m)
99 if (msg[0] & MSG_FAIL) {
100 u32 *preserved_msg = (u32*)(c->msg_virt + msg[7]);
102 printk(KERN_ERR "i2o_config: IOP failed to process the msg.\n");
104 /* Release the preserved msg frame by resubmitting it as a NOP */
106 preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0;
107 preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0;
108 preserved_msg[2] = 0;
109 i2o_post_message(c, msg[7]);
112 if (msg[4] >> 24) // ReqStatus != SUCCESS
113 i2o_report_status(KERN_INFO,"i2o_config", msg);
115 if(m->function == I2O_CMD_UTIL_EVT_REGISTER)
117 struct i2o_cfg_info *inf;
119 for(inf = open_files; inf; inf = inf->next)
120 if(inf->q_id == msg[3])
124 // If this is the case, it means that we're getting
125 // events for a file descriptor that's been close()'d
126 // w/o the user unregistering for events first.
127 // The code currently assumes that the user will
128 // take care of unregistering for events before closing
132 // Should we track event registartion and deregister
133 // for events when a file is close()'d so this doesn't
134 // happen? That would get rid of the search through
135 // the linked list since file->private_data could point
136 // directly to the i2o_config_info data structure...but
137 // it would mean having all sorts of tables to track
138 // what each file is registered for...I think the
139 // current method is simpler. - DS
144 inf->event_q[inf->q_in].id.iop = c->unit;
145 inf->event_q[inf->q_in].id.tid = m->target_tid;
146 inf->event_q[inf->q_in].id.evt_mask = msg[4];
149 // Data size = msg size - reply header
151 inf->event_q[inf->q_in].data_size = (m->size - 5) * 4;
152 if(inf->event_q[inf->q_in].data_size)
153 memcpy(inf->event_q[inf->q_in].evt_data,
154 (unsigned char *)(msg + 5),
155 inf->event_q[inf->q_in].data_size);
157 spin_lock(&i2o_config_lock);
158 MODINC(inf->q_in, I2O_EVT_Q_LEN);
159 if(inf->q_len == I2O_EVT_Q_LEN)
161 MODINC(inf->q_out, I2O_EVT_Q_LEN);
166 // Keep I2OEVTGET on another CPU from touching this
169 spin_unlock(&i2o_config_lock);
172 // printk(KERN_INFO "File %p w/id %d has %d events\n",
173 // inf->fp, inf->q_id, inf->q_len);
175 kill_fasync(&inf->fasync, SIGIO, POLL_IN);
182 * Each of these describes an i2o message handler. They are
183 * multiplexed by the i2o_core code
186 struct i2o_handler cfg_handler=
194 0xffffffff // All classes
197 static ssize_t cfg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
199 printk(KERN_INFO "i2o_config write not yet supported\n");
205 static ssize_t cfg_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
213 static int cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
221 ret = ioctl_getiops(arg);
225 ret = ioctl_gethrt(arg);
229 ret = ioctl_getlct(arg);
233 ret = ioctl_parms(arg, I2OPARMSET);
237 ret = ioctl_parms(arg, I2OPARMGET);
241 ret = ioctl_swdl(arg);
245 ret = ioctl_swul(arg);
249 ret = ioctl_swdel(arg);
253 ret = ioctl_validate(arg);
257 ret = ioctl_html(arg);
261 ret = ioctl_evt_reg(arg, fp);
265 ret = ioctl_evt_get(arg, fp);
269 ret = ioctl_passthru(arg);
279 int ioctl_getiops(unsigned long arg)
281 u8 __user *user_iop_table = (void __user *)arg;
282 struct i2o_controller *c = NULL;
284 u8 foo[MAX_I2O_CONTROLLERS];
286 if(!access_ok(VERIFY_WRITE, user_iop_table, MAX_I2O_CONTROLLERS))
289 for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
291 c = i2o_find_controller(i);
295 if(pci_set_dma_mask(c->pdev, 0xffffffff))
297 printk(KERN_WARNING "i2o_config : No suitable DMA available on controller %d\n", i);
298 i2o_unlock_controller(c);
302 i2o_unlock_controller(c);
310 __copy_to_user(user_iop_table, foo, MAX_I2O_CONTROLLERS);
314 int ioctl_gethrt(unsigned long arg)
316 struct i2o_controller *c;
317 struct i2o_cmd_hrtlct __user *cmd = (void __user *)arg;
318 struct i2o_cmd_hrtlct kcmd;
324 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
327 if(get_user(reslen, kcmd.reslen) < 0)
330 if(kcmd.resbuf == NULL)
333 c = i2o_find_controller(kcmd.iop);
337 hrt = (i2o_hrt *)c->hrt;
339 i2o_unlock_controller(c);
341 len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
343 /* We did a get user...so assuming mem is ok...is this bad? */
344 put_user(len, kcmd.reslen);
347 if(copy_to_user(kcmd.resbuf, (void*)hrt, len))
353 int ioctl_getlct(unsigned long arg)
355 struct i2o_controller *c;
356 struct i2o_cmd_hrtlct __user *cmd = (void __user *)arg;
357 struct i2o_cmd_hrtlct kcmd;
363 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
366 if(get_user(reslen, kcmd.reslen) < 0)
369 if(kcmd.resbuf == NULL)
372 c = i2o_find_controller(kcmd.iop);
376 lct = (i2o_lct *)c->lct;
377 i2o_unlock_controller(c);
379 len = (unsigned int)lct->table_size << 2;
380 put_user(len, kcmd.reslen);
383 else if(copy_to_user(kcmd.resbuf, (void*)lct, len))
389 static int ioctl_parms(unsigned long arg, unsigned int type)
392 struct i2o_controller *c;
393 struct i2o_cmd_psetget __user *cmd = (void __user *)arg;
394 struct i2o_cmd_psetget kcmd;
400 u32 i2o_cmd = (type == I2OPARMGET ?
401 I2O_CMD_UTIL_PARAMS_GET :
402 I2O_CMD_UTIL_PARAMS_SET);
404 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
407 if(get_user(reslen, kcmd.reslen))
410 c = i2o_find_controller(kcmd.iop);
414 ops = (u8*)kmalloc(kcmd.oplen, GFP_KERNEL);
417 i2o_unlock_controller(c);
421 if(copy_from_user(ops, kcmd.opbuf, kcmd.oplen))
423 i2o_unlock_controller(c);
429 * It's possible to have a _very_ large table
430 * and that the user asks for all of it at once...
432 res = (u8*)kmalloc(65536, GFP_KERNEL);
435 i2o_unlock_controller(c);
440 len = i2o_issue_params(i2o_cmd, c, kcmd.tid,
441 ops, kcmd.oplen, res, 65536);
442 i2o_unlock_controller(c);
450 put_user(len, kcmd.reslen);
453 else if(copy_to_user(kcmd.resbuf, res, len))
461 int ioctl_html(unsigned long arg)
463 struct i2o_html __user *cmd = (void __user *)arg;
464 struct i2o_html kcmd;
465 struct i2o_controller *c;
468 dma_addr_t query_phys, res_phys;
473 u32 msg[MSG_FRAME_SIZE];
475 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_html)))
477 printk(KERN_INFO "i2o_config: can't copy html cmd\n");
481 if(get_user(reslen, kcmd.reslen) < 0)
483 printk(KERN_INFO "i2o_config: can't copy html reslen\n");
489 printk(KERN_INFO "i2o_config: NULL html buffer\n");
493 c = i2o_find_controller(kcmd.iop);
497 if(kcmd.qlen) /* Check for post data */
499 query = pci_alloc_consistent(c->pdev, kcmd.qlen, &query_phys);
502 i2o_unlock_controller(c);
505 if(copy_from_user(query, kcmd.qbuf, kcmd.qlen))
507 i2o_unlock_controller(c);
508 printk(KERN_INFO "i2o_config: could not get query\n");
509 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
514 res = pci_alloc_consistent(c->pdev, 65536, &res_phys);
517 i2o_unlock_controller(c);
518 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
522 msg[1] = (I2O_CMD_UTIL_CONFIG_DIALOG << 24)|HOST_TID<<12|kcmd.tid;
523 msg[2] = i2o_cfg_context;
526 msg[5] = 0xD0000000|65536;
528 if(!kcmd.qlen) /* Check for post data */
529 msg[0] = SEVEN_WORD_MSG_SIZE|SGL_OFFSET_5;
532 msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
533 msg[5] = 0x50000000|65536;
534 msg[7] = 0xD4000000|(kcmd.qlen);
538 Wait for a considerable time till the Controller
539 does its job before timing out. The controller might
540 take more time to process this request if there are
541 many devices connected to it.
543 token = i2o_post_wait_mem(c, msg, 9*4, 400, query, res, query_phys, res_phys, kcmd.qlen, 65536);
546 printk(KERN_DEBUG "token = %#10x\n", token);
547 i2o_unlock_controller(c);
549 if(token != -ETIMEDOUT)
551 pci_free_consistent(c->pdev, 65536, res, res_phys);
553 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
557 i2o_unlock_controller(c);
559 len = strnlen(res, 65536);
560 put_user(len, kcmd.reslen);
563 if(copy_to_user(kcmd.resbuf, res, len))
566 pci_free_consistent(c->pdev, 65536, res, res_phys);
568 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
573 int ioctl_swdl(unsigned long arg)
575 struct i2o_sw_xfer kxfer;
576 struct i2o_sw_xfer __user *pxfer = (void __user *)arg;
577 unsigned char maxfrag = 0, curfrag = 1;
578 unsigned char *buffer;
580 unsigned int status = 0, swlen = 0, fragsize = 8192;
581 struct i2o_controller *c;
582 dma_addr_t buffer_phys;
584 if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
587 if(get_user(swlen, kxfer.swlen) < 0)
590 if(get_user(maxfrag, kxfer.maxfrag) < 0)
593 if(get_user(curfrag, kxfer.curfrag) < 0)
596 if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192;
598 if(!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize))
601 c = i2o_find_controller(kxfer.iop);
605 buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys);
608 i2o_unlock_controller(c);
611 __copy_from_user(buffer, kxfer.buf, fragsize);
613 msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7;
614 msg[1]= I2O_CMD_SW_DOWNLOAD<<24 | HOST_TID<<12 | ADAPTER_TID;
615 msg[2]= (u32)cfg_handler.context;
617 msg[4]= (((u32)kxfer.flags)<<24) | (((u32)kxfer.sw_type)<<16) |
618 (((u32)maxfrag)<<8) | (((u32)curfrag));
621 msg[7]= (0xD0000000 | fragsize);
624 // printk("i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
625 status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0);
627 i2o_unlock_controller(c);
628 if(status != -ETIMEDOUT)
629 pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
631 if (status != I2O_POST_WAIT_OK)
633 // it fails if you try and send frags out of order
634 // and for some yet unknown reasons too
635 printk(KERN_INFO "i2o_config: swdl failed, DetailedStatus = %d\n", status);
642 int ioctl_swul(unsigned long arg)
644 struct i2o_sw_xfer kxfer;
645 struct i2o_sw_xfer __user *pxfer = (void __user *)arg;
646 unsigned char maxfrag = 0, curfrag = 1;
647 unsigned char *buffer;
649 unsigned int status = 0, swlen = 0, fragsize = 8192;
650 struct i2o_controller *c;
651 dma_addr_t buffer_phys;
653 if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
656 if(get_user(swlen, kxfer.swlen) < 0)
659 if(get_user(maxfrag, kxfer.maxfrag) < 0)
662 if(get_user(curfrag, kxfer.curfrag) < 0)
665 if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192;
667 if(!kxfer.buf || !access_ok(VERIFY_WRITE, kxfer.buf, fragsize))
670 c = i2o_find_controller(kxfer.iop);
674 buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys);
677 i2o_unlock_controller(c);
681 msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7;
682 msg[1]= I2O_CMD_SW_UPLOAD<<24 | HOST_TID<<12 | ADAPTER_TID;
683 msg[2]= (u32)cfg_handler.context;
685 msg[4]= (u32)kxfer.flags<<24|(u32)kxfer.sw_type<<16|(u32)maxfrag<<8|(u32)curfrag;
688 msg[7]= (0xD0000000 | fragsize);
691 // printk("i2o_config: swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
692 status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0);
693 i2o_unlock_controller(c);
695 if (status != I2O_POST_WAIT_OK)
697 if(status != -ETIMEDOUT)
698 pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
699 printk(KERN_INFO "i2o_config: swul failed, DetailedStatus = %d\n", status);
703 __copy_to_user(kxfer.buf, buffer, fragsize);
704 pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
709 int ioctl_swdel(unsigned long arg)
711 struct i2o_controller *c;
712 struct i2o_sw_xfer kxfer;
713 struct i2o_sw_xfer __user *pxfer = (void __user *)arg;
718 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
721 if (get_user(swlen, kxfer.swlen) < 0)
724 c = i2o_find_controller(kxfer.iop);
728 msg[0] = SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0;
729 msg[1] = I2O_CMD_SW_REMOVE<<24 | HOST_TID<<12 | ADAPTER_TID;
730 msg[2] = (u32)i2o_cfg_context;
732 msg[4] = (u32)kxfer.flags<<24 | (u32)kxfer.sw_type<<16;
734 msg[6] = kxfer.sw_id;
736 token = i2o_post_wait(c, msg, sizeof(msg), 10);
737 i2o_unlock_controller(c);
739 if (token != I2O_POST_WAIT_OK)
741 printk(KERN_INFO "i2o_config: swdel failed, DetailedStatus = %d\n", token);
748 int ioctl_validate(unsigned long arg)
753 struct i2o_controller *c;
755 c=i2o_find_controller(iop);
759 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
760 msg[1] = I2O_CMD_CONFIG_VALIDATE<<24 | HOST_TID<<12 | iop;
761 msg[2] = (u32)i2o_cfg_context;
764 token = i2o_post_wait(c, msg, sizeof(msg), 10);
765 i2o_unlock_controller(c);
767 if (token != I2O_POST_WAIT_OK)
769 printk(KERN_INFO "Can't validate configuration, ErrorStatus = %d\n",
777 static int ioctl_evt_reg(unsigned long arg, struct file *fp)
780 struct i2o_evt_id __user *pdesc = (void __user *)arg;
781 struct i2o_evt_id kdesc;
782 struct i2o_controller *iop;
783 struct i2o_device *d;
785 if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id)))
789 iop = i2o_find_controller(kdesc.iop);
792 i2o_unlock_controller(iop);
795 for(d = iop->devices; d; d = d->next)
796 if(d->lct_data.tid == kdesc.tid)
802 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
803 msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | kdesc.tid;
804 msg[2] = (u32)i2o_cfg_context;
805 msg[3] = (u32)fp->private_data;
806 msg[4] = kdesc.evt_mask;
808 i2o_post_this(iop, msg, 20);
813 static int ioctl_evt_get(unsigned long arg, struct file *fp)
815 u32 id = (u32)fp->private_data;
816 struct i2o_cfg_info *p = NULL;
817 struct i2o_evt_get __user *uget = (void __user *)arg;
818 struct i2o_evt_get kget;
821 for(p = open_files; p; p = p->next)
831 memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info));
832 MODINC(p->q_out, I2O_EVT_Q_LEN);
833 spin_lock_irqsave(&i2o_config_lock, flags);
835 kget.pending = p->q_len;
836 kget.lost = p->q_lost;
837 spin_unlock_irqrestore(&i2o_config_lock, flags);
839 if(copy_to_user(uget, &kget, sizeof(struct i2o_evt_get)))
844 static int ioctl_passthru(unsigned long arg)
846 struct i2o_cmd_passthru __user *cmd = (void __user *) arg;
847 struct i2o_controller *c;
848 u32 msg[MSG_FRAME_SIZE];
849 u32 __user *user_msg;
851 u32 __user *user_reply = NULL;
855 void *sg_list[SG_TABLESIZE];
863 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
866 c = i2o_find_controller(iop);
870 memset(&msg, 0, MSG_FRAME_SIZE*4);
871 if(get_user(size, &user_msg[0]))
875 user_reply = &user_msg[size];
876 if(size > MSG_FRAME_SIZE)
878 size *= 4; // Convert to bytes
880 /* Copy in the user's I2O command */
881 if(copy_from_user(msg, user_msg, size))
883 if(get_user(reply_size, &user_reply[0]) < 0)
886 reply_size = reply_size>>16;
887 reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
889 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",c->name);
892 memset(reply, 0, REPLY_FRAME_SIZE*4);
893 sg_offset = (msg[0]>>4)&0x0f;
894 msg[2] = (u32)i2o_cfg_context;
897 memset(sg_list,0, sizeof(sg_list[0])*SG_TABLESIZE);
899 struct sg_simple_element *sg;
901 if(sg_offset * 4 >= size) {
906 sg = (struct sg_simple_element*) (msg+sg_offset);
907 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
908 if (sg_count > SG_TABLESIZE) {
909 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", c->name,sg_count);
914 for(i = 0; i < sg_count; i++) {
917 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
918 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",c->name,i, sg[i].flag_count);
922 sg_size = sg[i].flag_count & 0xffffff;
923 /* Allocate memory for the transfer */
924 p = kmalloc(sg_size, GFP_KERNEL);
926 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", c->name,sg_size,i,sg_count);
930 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
931 /* Copy in the user's SG buffer if necessary */
932 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
934 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
935 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",c->name,i);
941 sg[i].addr_bus = (u32)virt_to_bus(p);
945 rcode = i2o_post_wait(c, msg, size, 60);
950 /* Copy back the Scatter Gather buffers back to user space */
953 struct sg_simple_element* sg;
956 // re-acquire the original message to handle correctly the sg copy operation
957 memset(&msg, 0, MSG_FRAME_SIZE*4);
958 // get user msg size in u32s
959 if (get_user(size, &user_msg[0])) {
965 /* Copy in the user's I2O command */
966 if (copy_from_user (msg, user_msg, size)) {
970 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
973 sg = (struct sg_simple_element*)(msg + sg_offset);
974 for (j = 0; j < sg_count; j++) {
975 /* Copy out the SG list to user's buffer if necessary */
976 if (!(sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
977 sg_size = sg[j].flag_count & 0xffffff;
979 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
980 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",c->name, sg_list[j], sg[j].addr_bus);
988 /* Copy back the reply to user space */
990 // we wrote our own values for context - now restore the user supplied ones
991 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
992 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",c->name);
995 if(copy_to_user(user_reply, reply, reply_size)) {
996 printk(KERN_WARNING"%s: Could not copy reply TO user\n",c->name);
1003 i2o_unlock_controller(c);
1007 static int cfg_open(struct inode *inode, struct file *file)
1009 struct i2o_cfg_info *tmp =
1010 (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info), GFP_KERNEL);
1011 unsigned long flags;
1016 file->private_data = (void*)(i2o_cfg_info_id++);
1019 tmp->q_id = (u32)file->private_data;
1024 tmp->next = open_files;
1026 spin_lock_irqsave(&i2o_config_lock, flags);
1028 spin_unlock_irqrestore(&i2o_config_lock, flags);
1033 static int cfg_release(struct inode *inode, struct file *file)
1035 u32 id = (u32)file->private_data;
1036 struct i2o_cfg_info *p1, *p2;
1037 unsigned long flags;
1042 spin_lock_irqsave(&i2o_config_lock, flags);
1043 for(p1 = open_files; p1; )
1049 cfg_fasync(-1, file, 0);
1051 p2->next = p1->next;
1053 open_files = p1->next;
1061 spin_unlock_irqrestore(&i2o_config_lock, flags);
1067 static int cfg_fasync(int fd, struct file *fp, int on)
1069 u32 id = (u32)fp->private_data;
1070 struct i2o_cfg_info *p;
1072 for(p = open_files; p; p = p->next)
1079 return fasync_helper(fd, fp, on, &p->fasync);
1082 static struct file_operations config_fops =
1084 .owner = THIS_MODULE,
1085 .llseek = no_llseek,
1090 .release = cfg_release,
1091 .fasync = cfg_fasync,
1094 static struct miscdevice i2o_miscdev = {
1100 static int __init i2o_config_init(void)
1102 printk(KERN_INFO "I2O configuration manager v 0.04.\n");
1103 printk(KERN_INFO " (C) Copyright 1999 Red Hat Software\n");
1105 if((page_buf = kmalloc(4096, GFP_KERNEL))==NULL)
1107 printk(KERN_ERR "i2o_config: no memory for page buffer.\n");
1110 if(misc_register(&i2o_miscdev) < 0)
1112 printk(KERN_ERR "i2o_config: can't register device.\n");
1117 * Install our handler
1119 if(i2o_install_handler(&cfg_handler)<0)
1122 printk(KERN_ERR "i2o_config: handler register failed.\n");
1123 misc_deregister(&i2o_miscdev);
1127 * The low 16bits of the transaction context must match this
1128 * for everything we post. Otherwise someone else gets our mail
1130 i2o_cfg_context = cfg_handler.context;
1134 static void i2o_config_exit(void)
1136 misc_deregister(&i2o_miscdev);
1140 if(i2o_cfg_context != -1)
1141 i2o_remove_handler(&cfg_handler);
1144 MODULE_AUTHOR("Red Hat Software");
1145 MODULE_DESCRIPTION("I2O Configuration");
1146 MODULE_LICENSE("GPL");
1148 module_init(i2o_config_init);
1149 module_exit(i2o_config_exit);