2 * I2O Configuration Interface Driver
4 * (C) Copyright 1999-2002 Red Hat
6 * Written by Alan Cox, Building Number Three Ltd
8 * Modified 04/20/1999 by Deepak Saxena
9 * - Added basic ioctl() support
10 * Modified 06/07/1999 by Deepak Saxena
11 * - Added software download ioctl (still testing)
12 * Modified 09/10/1999 by Auvo Häkkinen
13 * - Changes to i2o_cfg_reply(), ioctl_parms()
14 * - Added ioct_validate()
15 * Modified 09/30/1999 by Taneli Vähäkangas
16 * - Fixed ioctl_swdl()
17 * Modified 10/04/1999 by Taneli Vähäkangas
18 * - Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
19 * Modified 11/18/1999 by Deepak Saxena
20 * - Added event managmenet support
22 * 2.4 rewrite ported to 2.5 - Alan Cox <alan@redhat.com>
24 * This program is free software; you can redistribute it and/or
25 * modify it under the terms of the GNU General Public License
26 * as published by the Free Software Foundation; either version
27 * 2 of the License, or (at your option) any later version.
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
33 #include <linux/i2o.h>
34 #include <linux/errno.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/miscdevice.h>
39 #include <linux/spinlock.h>
40 #include <linux/smp_lock.h>
42 #include <asm/uaccess.h>
45 static int i2o_cfg_context = -1;
46 static void *page_buf;
47 static spinlock_t i2o_config_lock = SPIN_LOCK_UNLOCKED;
48 struct wait_queue *i2o_wait_queue;
50 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
55 struct fasync_struct *fasync;
56 struct i2o_evt_info event_q[I2O_EVT_Q_LEN];
57 u16 q_in; // Queue head index
58 u16 q_out; // Queue tail index
59 u16 q_len; // Queue length
60 u16 q_lost; // Number of lost events
61 u32 q_id; // Event queue ID...used as tx_context
62 struct i2o_cfg_info *next;
64 static struct i2o_cfg_info *open_files = NULL;
65 static int i2o_cfg_info_id = 0;
67 static int ioctl_getiops(unsigned long);
68 static int ioctl_gethrt(unsigned long);
69 static int ioctl_getlct(unsigned long);
70 static int ioctl_parms(unsigned long, unsigned int);
71 static int ioctl_html(unsigned long);
72 static int ioctl_swdl(unsigned long);
73 static int ioctl_swul(unsigned long);
74 static int ioctl_swdel(unsigned long);
75 static int ioctl_validate(unsigned long);
76 static int ioctl_evt_reg(unsigned long, struct file *);
77 static int ioctl_evt_get(unsigned long, struct file *);
78 static int cfg_fasync(int, struct file*, int);
81 * This is the callback for any message we have posted. The message itself
82 * will be returned to the message pool when we return from the IRQ
84 * This runs in irq context so be short and sweet.
86 static void i2o_cfg_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *m)
90 if (msg[0] & MSG_FAIL) {
91 u32 *preserved_msg = (u32*)(c->mem_offset + msg[7]);
93 printk(KERN_ERR "i2o_config: IOP failed to process the msg.\n");
95 /* Release the preserved msg frame by resubmitting it as a NOP */
97 preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0;
98 preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0;
100 i2o_post_message(c, msg[7]);
103 if (msg[4] >> 24) // ReqStatus != SUCCESS
104 i2o_report_status(KERN_INFO,"i2o_config", msg);
106 if(m->function == I2O_CMD_UTIL_EVT_REGISTER)
108 struct i2o_cfg_info *inf;
110 for(inf = open_files; inf; inf = inf->next)
111 if(inf->q_id == msg[3])
115 // If this is the case, it means that we're getting
116 // events for a file descriptor that's been close()'d
117 // w/o the user unregistering for events first.
118 // The code currently assumes that the user will
119 // take care of unregistering for events before closing
123 // Should we track event registartion and deregister
124 // for events when a file is close()'d so this doesn't
125 // happen? That would get rid of the search through
126 // the linked list since file->private_data could point
127 // directly to the i2o_config_info data structure...but
128 // it would mean having all sorts of tables to track
129 // what each file is registered for...I think the
130 // current method is simpler. - DS
135 inf->event_q[inf->q_in].id.iop = c->unit;
136 inf->event_q[inf->q_in].id.tid = m->target_tid;
137 inf->event_q[inf->q_in].id.evt_mask = msg[4];
140 // Data size = msg size - reply header
142 inf->event_q[inf->q_in].data_size = (m->size - 5) * 4;
143 if(inf->event_q[inf->q_in].data_size)
144 memcpy(inf->event_q[inf->q_in].evt_data,
145 (unsigned char *)(msg + 5),
146 inf->event_q[inf->q_in].data_size);
148 spin_lock(&i2o_config_lock);
149 MODINC(inf->q_in, I2O_EVT_Q_LEN);
150 if(inf->q_len == I2O_EVT_Q_LEN)
152 MODINC(inf->q_out, I2O_EVT_Q_LEN);
157 // Keep I2OEVTGET on another CPU from touching this
160 spin_unlock(&i2o_config_lock);
163 // printk(KERN_INFO "File %p w/id %d has %d events\n",
164 // inf->fp, inf->q_id, inf->q_len);
166 kill_fasync(&inf->fasync, SIGIO, POLL_IN);
173 * Each of these describes an i2o message handler. They are
174 * multiplexed by the i2o_core code
177 struct i2o_handler cfg_handler=
185 0xffffffff // All classes
188 static ssize_t cfg_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
190 printk(KERN_INFO "i2o_config write not yet supported\n");
196 static ssize_t cfg_read(struct file *file, char *buf, size_t count, loff_t *ptr)
204 static int cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
212 ret = ioctl_getiops(arg);
216 ret = ioctl_gethrt(arg);
220 ret = ioctl_getlct(arg);
224 ret = ioctl_parms(arg, I2OPARMSET);
228 ret = ioctl_parms(arg, I2OPARMGET);
232 ret = ioctl_swdl(arg);
236 ret = ioctl_swul(arg);
240 ret = ioctl_swdel(arg);
244 ret = ioctl_validate(arg);
248 ret = ioctl_html(arg);
252 ret = ioctl_evt_reg(arg, fp);
256 ret = ioctl_evt_get(arg, fp);
266 int ioctl_getiops(unsigned long arg)
268 u8 *user_iop_table = (u8*)arg;
269 struct i2o_controller *c = NULL;
271 u8 foo[MAX_I2O_CONTROLLERS];
273 if(!access_ok(VERIFY_WRITE, user_iop_table, MAX_I2O_CONTROLLERS))
276 for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
278 c = i2o_find_controller(i);
282 if(pci_set_dma_mask(c->pdev, 0xffffffff))
284 printk(KERN_WARNING "i2o_config : No suitable DMA available on controller %d\n", i);
285 i2o_unlock_controller(c);
289 i2o_unlock_controller(c);
297 __copy_to_user(user_iop_table, foo, MAX_I2O_CONTROLLERS);
301 int ioctl_gethrt(unsigned long arg)
303 struct i2o_controller *c;
304 struct i2o_cmd_hrtlct *cmd = (struct i2o_cmd_hrtlct*)arg;
305 struct i2o_cmd_hrtlct kcmd;
311 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
314 if(get_user(reslen, kcmd.reslen) < 0)
317 if(kcmd.resbuf == NULL)
320 c = i2o_find_controller(kcmd.iop);
324 hrt = (i2o_hrt *)c->hrt;
326 i2o_unlock_controller(c);
328 len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
330 /* We did a get user...so assuming mem is ok...is this bad? */
331 put_user(len, kcmd.reslen);
334 if(copy_to_user(kcmd.resbuf, (void*)hrt, len))
340 int ioctl_getlct(unsigned long arg)
342 struct i2o_controller *c;
343 struct i2o_cmd_hrtlct *cmd = (struct i2o_cmd_hrtlct*)arg;
344 struct i2o_cmd_hrtlct kcmd;
350 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
353 if(get_user(reslen, kcmd.reslen) < 0)
356 if(kcmd.resbuf == NULL)
359 c = i2o_find_controller(kcmd.iop);
363 lct = (i2o_lct *)c->lct;
364 i2o_unlock_controller(c);
366 len = (unsigned int)lct->table_size << 2;
367 put_user(len, kcmd.reslen);
370 else if(copy_to_user(kcmd.resbuf, (void*)lct, len))
376 static int ioctl_parms(unsigned long arg, unsigned int type)
379 struct i2o_controller *c;
380 struct i2o_cmd_psetget *cmd = (struct i2o_cmd_psetget*)arg;
381 struct i2o_cmd_psetget kcmd;
387 u32 i2o_cmd = (type == I2OPARMGET ?
388 I2O_CMD_UTIL_PARAMS_GET :
389 I2O_CMD_UTIL_PARAMS_SET);
391 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
394 if(get_user(reslen, kcmd.reslen))
397 c = i2o_find_controller(kcmd.iop);
401 ops = (u8*)kmalloc(kcmd.oplen, GFP_KERNEL);
404 i2o_unlock_controller(c);
408 if(copy_from_user(ops, kcmd.opbuf, kcmd.oplen))
410 i2o_unlock_controller(c);
416 * It's possible to have a _very_ large table
417 * and that the user asks for all of it at once...
419 res = (u8*)kmalloc(65536, GFP_KERNEL);
422 i2o_unlock_controller(c);
427 len = i2o_issue_params(i2o_cmd, c, kcmd.tid,
428 ops, kcmd.oplen, res, 65536);
429 i2o_unlock_controller(c);
437 put_user(len, kcmd.reslen);
440 else if(copy_to_user(kcmd.resbuf, res, len))
448 int ioctl_html(unsigned long arg)
450 struct i2o_html *cmd = (struct i2o_html*)arg;
451 struct i2o_html kcmd;
452 struct i2o_controller *c;
455 dma_addr_t query_phys, res_phys;
460 u32 msg[MSG_FRAME_SIZE];
462 if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_html)))
464 printk(KERN_INFO "i2o_config: can't copy html cmd\n");
468 if(get_user(reslen, kcmd.reslen) < 0)
470 printk(KERN_INFO "i2o_config: can't copy html reslen\n");
476 printk(KERN_INFO "i2o_config: NULL html buffer\n");
480 c = i2o_find_controller(kcmd.iop);
484 if(kcmd.qlen) /* Check for post data */
486 query = pci_alloc_consistent(c->pdev, kcmd.qlen, &query_phys);
489 i2o_unlock_controller(c);
492 if(copy_from_user(query, kcmd.qbuf, kcmd.qlen))
494 i2o_unlock_controller(c);
495 printk(KERN_INFO "i2o_config: could not get query\n");
496 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
501 res = pci_alloc_consistent(c->pdev, 65536, &res_phys);
504 i2o_unlock_controller(c);
505 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
509 msg[1] = (I2O_CMD_UTIL_CONFIG_DIALOG << 24)|HOST_TID<<12|kcmd.tid;
510 msg[2] = i2o_cfg_context;
513 msg[5] = 0xD0000000|65536;
515 if(!kcmd.qlen) /* Check for post data */
516 msg[0] = SEVEN_WORD_MSG_SIZE|SGL_OFFSET_5;
519 msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
520 msg[5] = 0x50000000|65536;
521 msg[7] = 0xD4000000|(kcmd.qlen);
525 Wait for a considerable time till the Controller
526 does its job before timing out. The controller might
527 take more time to process this request if there are
528 many devices connected to it.
530 token = i2o_post_wait_mem(c, msg, 9*4, 400, query, res, query_phys, res_phys, kcmd.qlen, 65536);
533 printk(KERN_DEBUG "token = %#10x\n", token);
534 i2o_unlock_controller(c);
536 if(token != -ETIMEDOUT)
538 pci_free_consistent(c->pdev, 65536, res, res_phys);
540 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
544 i2o_unlock_controller(c);
546 len = strnlen(res, 65536);
547 put_user(len, kcmd.reslen);
550 if(copy_to_user(kcmd.resbuf, res, len))
553 pci_free_consistent(c->pdev, 65536, res, res_phys);
555 pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys);
560 int ioctl_swdl(unsigned long arg)
562 struct i2o_sw_xfer kxfer;
563 struct i2o_sw_xfer *pxfer = (struct i2o_sw_xfer *)arg;
564 unsigned char maxfrag = 0, curfrag = 1;
565 unsigned char *buffer;
567 unsigned int status = 0, swlen = 0, fragsize = 8192;
568 struct i2o_controller *c;
569 dma_addr_t buffer_phys;
571 if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
574 if(get_user(swlen, kxfer.swlen) < 0)
577 if(get_user(maxfrag, kxfer.maxfrag) < 0)
580 if(get_user(curfrag, kxfer.curfrag) < 0)
583 if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192;
585 if(!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize))
588 c = i2o_find_controller(kxfer.iop);
592 buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys);
595 i2o_unlock_controller(c);
598 __copy_from_user(buffer, kxfer.buf, fragsize);
600 msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7;
601 msg[1]= I2O_CMD_SW_DOWNLOAD<<24 | HOST_TID<<12 | ADAPTER_TID;
602 msg[2]= (u32)cfg_handler.context;
604 msg[4]= (((u32)kxfer.flags)<<24) | (((u32)kxfer.sw_type)<<16) |
605 (((u32)maxfrag)<<8) | (((u32)curfrag));
608 msg[7]= (0xD0000000 | fragsize);
611 // printk("i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
612 status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0);
614 i2o_unlock_controller(c);
615 if(status != -ETIMEDOUT)
616 pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
618 if (status != I2O_POST_WAIT_OK)
620 // it fails if you try and send frags out of order
621 // and for some yet unknown reasons too
622 printk(KERN_INFO "i2o_config: swdl failed, DetailedStatus = %d\n", status);
629 int ioctl_swul(unsigned long arg)
631 struct i2o_sw_xfer kxfer;
632 struct i2o_sw_xfer *pxfer = (struct i2o_sw_xfer *)arg;
633 unsigned char maxfrag = 0, curfrag = 1;
634 unsigned char *buffer;
636 unsigned int status = 0, swlen = 0, fragsize = 8192;
637 struct i2o_controller *c;
638 dma_addr_t buffer_phys;
640 if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
643 if(get_user(swlen, kxfer.swlen) < 0)
646 if(get_user(maxfrag, kxfer.maxfrag) < 0)
649 if(get_user(curfrag, kxfer.curfrag) < 0)
652 if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192;
654 if(!kxfer.buf || !access_ok(VERIFY_WRITE, kxfer.buf, fragsize))
657 c = i2o_find_controller(kxfer.iop);
661 buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys);
664 i2o_unlock_controller(c);
668 msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7;
669 msg[1]= I2O_CMD_SW_UPLOAD<<24 | HOST_TID<<12 | ADAPTER_TID;
670 msg[2]= (u32)cfg_handler.context;
672 msg[4]= (u32)kxfer.flags<<24|(u32)kxfer.sw_type<<16|(u32)maxfrag<<8|(u32)curfrag;
675 msg[7]= (0xD0000000 | fragsize);
678 // printk("i2o_config: swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
679 status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0);
680 i2o_unlock_controller(c);
682 if (status != I2O_POST_WAIT_OK)
684 if(status != -ETIMEDOUT)
685 pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
686 printk(KERN_INFO "i2o_config: swul failed, DetailedStatus = %d\n", status);
690 __copy_to_user(kxfer.buf, buffer, fragsize);
691 pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys);
696 int ioctl_swdel(unsigned long arg)
698 struct i2o_controller *c;
699 struct i2o_sw_xfer kxfer, *pxfer = (struct i2o_sw_xfer *)arg;
704 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
707 if (get_user(swlen, kxfer.swlen) < 0)
710 c = i2o_find_controller(kxfer.iop);
714 msg[0] = SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0;
715 msg[1] = I2O_CMD_SW_REMOVE<<24 | HOST_TID<<12 | ADAPTER_TID;
716 msg[2] = (u32)i2o_cfg_context;
718 msg[4] = (u32)kxfer.flags<<24 | (u32)kxfer.sw_type<<16;
720 msg[6] = kxfer.sw_id;
722 token = i2o_post_wait(c, msg, sizeof(msg), 10);
723 i2o_unlock_controller(c);
725 if (token != I2O_POST_WAIT_OK)
727 printk(KERN_INFO "i2o_config: swdel failed, DetailedStatus = %d\n", token);
734 int ioctl_validate(unsigned long arg)
739 struct i2o_controller *c;
741 c=i2o_find_controller(iop);
745 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
746 msg[1] = I2O_CMD_CONFIG_VALIDATE<<24 | HOST_TID<<12 | iop;
747 msg[2] = (u32)i2o_cfg_context;
750 token = i2o_post_wait(c, msg, sizeof(msg), 10);
751 i2o_unlock_controller(c);
753 if (token != I2O_POST_WAIT_OK)
755 printk(KERN_INFO "Can't validate configuration, ErrorStatus = %d\n",
763 static int ioctl_evt_reg(unsigned long arg, struct file *fp)
766 struct i2o_evt_id *pdesc = (struct i2o_evt_id *)arg;
767 struct i2o_evt_id kdesc;
768 struct i2o_controller *iop;
769 struct i2o_device *d;
771 if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id)))
775 iop = i2o_find_controller(kdesc.iop);
778 i2o_unlock_controller(iop);
781 for(d = iop->devices; d; d = d->next)
782 if(d->lct_data.tid == kdesc.tid)
788 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
789 msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | kdesc.tid;
790 msg[2] = (u32)i2o_cfg_context;
791 msg[3] = (u32)fp->private_data;
792 msg[4] = kdesc.evt_mask;
794 i2o_post_this(iop, msg, 20);
799 static int ioctl_evt_get(unsigned long arg, struct file *fp)
801 u32 id = (u32)fp->private_data;
802 struct i2o_cfg_info *p = NULL;
803 struct i2o_evt_get *uget = (struct i2o_evt_get*)arg;
804 struct i2o_evt_get kget;
807 for(p = open_files; p; p = p->next)
817 memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info));
818 MODINC(p->q_out, I2O_EVT_Q_LEN);
819 spin_lock_irqsave(&i2o_config_lock, flags);
821 kget.pending = p->q_len;
822 kget.lost = p->q_lost;
823 spin_unlock_irqrestore(&i2o_config_lock, flags);
825 if(copy_to_user(uget, &kget, sizeof(struct i2o_evt_get)))
830 static int cfg_open(struct inode *inode, struct file *file)
832 struct i2o_cfg_info *tmp =
833 (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info), GFP_KERNEL);
839 file->private_data = (void*)(i2o_cfg_info_id++);
842 tmp->q_id = (u32)file->private_data;
847 tmp->next = open_files;
849 spin_lock_irqsave(&i2o_config_lock, flags);
851 spin_unlock_irqrestore(&i2o_config_lock, flags);
856 static int cfg_release(struct inode *inode, struct file *file)
858 u32 id = (u32)file->private_data;
859 struct i2o_cfg_info *p1, *p2;
865 spin_lock_irqsave(&i2o_config_lock, flags);
866 for(p1 = open_files; p1; )
872 cfg_fasync(-1, file, 0);
876 open_files = p1->next;
884 spin_unlock_irqrestore(&i2o_config_lock, flags);
890 static int cfg_fasync(int fd, struct file *fp, int on)
892 u32 id = (u32)fp->private_data;
893 struct i2o_cfg_info *p;
895 for(p = open_files; p; p = p->next)
902 return fasync_helper(fd, fp, on, &p->fasync);
905 static struct file_operations config_fops =
907 .owner = THIS_MODULE,
913 .release = cfg_release,
914 .fasync = cfg_fasync,
917 static struct miscdevice i2o_miscdev = {
923 static int __init i2o_config_init(void)
925 printk(KERN_INFO "I2O configuration manager v 0.04.\n");
926 printk(KERN_INFO " (C) Copyright 1999 Red Hat Software\n");
928 if((page_buf = kmalloc(4096, GFP_KERNEL))==NULL)
930 printk(KERN_ERR "i2o_config: no memory for page buffer.\n");
933 if(misc_register(&i2o_miscdev) < 0)
935 printk(KERN_ERR "i2o_config: can't register device.\n");
940 * Install our handler
942 if(i2o_install_handler(&cfg_handler)<0)
945 printk(KERN_ERR "i2o_config: handler register failed.\n");
946 misc_deregister(&i2o_miscdev);
950 * The low 16bits of the transaction context must match this
951 * for everything we post. Otherwise someone else gets our mail
953 i2o_cfg_context = cfg_handler.context;
957 static void i2o_config_exit(void)
959 misc_deregister(&i2o_miscdev);
963 if(i2o_cfg_context != -1)
964 i2o_remove_handler(&cfg_handler);
967 MODULE_AUTHOR("Red Hat Software");
968 MODULE_DESCRIPTION("I2O Configuration");
969 MODULE_LICENSE("GPL");
971 module_init(i2o_config_init);
972 module_exit(i2o_config_exit);