4 * Linux device interface for the IPMI message handler.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/errno.h>
38 #include <asm/system.h>
39 #include <linux/sched.h>
40 #include <linux/poll.h>
41 #include <linux/spinlock.h>
42 #include <linux/slab.h>
43 #include <linux/devfs_fs_kernel.h>
44 #include <linux/ipmi.h>
45 #include <asm/semaphore.h>
46 #include <linux/init.h>
48 #define IPMI_DEVINTF_VERSION "v32"
50 struct ipmi_file_private
53 spinlock_t recv_msg_lock;
54 struct list_head recv_msgs;
56 struct fasync_struct *fasync_queue;
57 wait_queue_head_t wait;
58 struct semaphore recv_sem;
60 unsigned int default_retry_time_ms;
63 static void file_receive_handler(struct ipmi_recv_msg *msg,
66 struct ipmi_file_private *priv = handler_data;
70 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
72 was_empty = list_empty(&(priv->recv_msgs));
73 list_add_tail(&(msg->link), &(priv->recv_msgs));
76 wake_up_interruptible(&priv->wait);
77 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
80 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
83 static unsigned int ipmi_poll(struct file *file, poll_table *wait)
85 struct ipmi_file_private *priv = file->private_data;
86 unsigned int mask = 0;
89 poll_wait(file, &priv->wait, wait);
91 spin_lock_irqsave(&priv->recv_msg_lock, flags);
93 if (! list_empty(&(priv->recv_msgs)))
94 mask |= (POLLIN | POLLRDNORM);
96 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
101 static int ipmi_fasync(int fd, struct file *file, int on)
103 struct ipmi_file_private *priv = file->private_data;
106 result = fasync_helper(fd, file, on, &priv->fasync_queue);
111 static struct ipmi_user_hndl ipmi_hndlrs =
113 .ipmi_recv_hndl = file_receive_handler,
116 static int ipmi_open(struct inode *inode, struct file *file)
118 int if_num = iminor(inode);
120 struct ipmi_file_private *priv;
123 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
129 rv = ipmi_create_user(if_num,
138 file->private_data = priv;
140 spin_lock_init(&(priv->recv_msg_lock));
141 INIT_LIST_HEAD(&(priv->recv_msgs));
142 init_waitqueue_head(&priv->wait);
143 priv->fasync_queue = NULL;
144 sema_init(&(priv->recv_sem), 1);
146 /* Use the low-level defaults. */
147 priv->default_retries = -1;
148 priv->default_retry_time_ms = 0;
153 static int ipmi_release(struct inode *inode, struct file *file)
155 struct ipmi_file_private *priv = file->private_data;
158 rv = ipmi_destroy_user(priv->user);
162 ipmi_fasync (-1, file, 0);
164 /* FIXME - free the messages in the list. */
170 static int handle_send_req(ipmi_user_t user,
171 struct ipmi_req *req,
173 unsigned int retry_time_ms)
176 struct ipmi_addr addr;
177 unsigned char *msgdata;
179 if (req->addr_len > sizeof(struct ipmi_addr))
182 if (copy_from_user(&addr, req->addr, req->addr_len))
185 msgdata = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
189 /* From here out we cannot return, we must jump to "out" for
190 error exits to free msgdata. */
192 rv = ipmi_validate_addr(&addr, req->addr_len);
196 if (req->msg.data != NULL) {
197 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
202 if (copy_from_user(msgdata,
210 req->msg.data_len = 0;
212 req->msg.data = msgdata;
214 rv = ipmi_request_settime(user,
227 static int ipmi_ioctl(struct inode *inode,
233 struct ipmi_file_private *priv = file->private_data;
234 void __user *arg = (void __user *)data;
238 case IPMICTL_SEND_COMMAND:
242 if (copy_from_user(&req, arg, sizeof(req))) {
247 rv = handle_send_req(priv->user,
249 priv->default_retries,
250 priv->default_retry_time_ms);
254 case IPMICTL_SEND_COMMAND_SETTIME:
256 struct ipmi_req_settime req;
258 if (copy_from_user(&req, arg, sizeof(req))) {
263 rv = handle_send_req(priv->user,
270 case IPMICTL_RECEIVE_MSG:
271 case IPMICTL_RECEIVE_MSG_TRUNC:
273 struct ipmi_recv rsp;
275 struct list_head *entry;
276 struct ipmi_recv_msg *msg;
281 if (copy_from_user(&rsp, arg, sizeof(rsp))) {
286 /* We claim a semaphore because we don't want two
287 users getting something from the queue at a time.
288 Since we have to release the spinlock before we can
289 copy the data to the user, it's possible another
290 user will grab something from the queue, too. Then
291 the messages might get out of order if something
292 fails and the message gets put back onto the
293 queue. This semaphore prevents that problem. */
294 down(&(priv->recv_sem));
296 /* Grab the message off the list. */
297 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
298 if (list_empty(&(priv->recv_msgs))) {
299 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
303 entry = priv->recv_msgs.next;
304 msg = list_entry(entry, struct ipmi_recv_msg, link);
306 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
308 addr_len = ipmi_addr_length(msg->addr.addr_type);
309 if (rsp.addr_len < addr_len)
312 goto recv_putback_on_err;
315 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
317 goto recv_putback_on_err;
319 rsp.addr_len = addr_len;
321 rsp.recv_type = msg->recv_type;
322 rsp.msgid = msg->msgid;
323 rsp.msg.netfn = msg->msg.netfn;
324 rsp.msg.cmd = msg->msg.cmd;
326 if (msg->msg.data_len > 0) {
327 if (rsp.msg.data_len < msg->msg.data_len) {
329 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
330 msg->msg.data_len = rsp.msg.data_len;
332 goto recv_putback_on_err;
336 if (copy_to_user(rsp.msg.data,
341 goto recv_putback_on_err;
343 rsp.msg.data_len = msg->msg.data_len;
345 rsp.msg.data_len = 0;
348 if (copy_to_user(arg, &rsp, sizeof(rsp))) {
350 goto recv_putback_on_err;
353 up(&(priv->recv_sem));
354 ipmi_free_recv_msg(msg);
358 /* If we got an error, put the message back onto
359 the head of the queue. */
360 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
361 list_add(entry, &(priv->recv_msgs));
362 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
363 up(&(priv->recv_sem));
367 up(&(priv->recv_sem));
371 case IPMICTL_REGISTER_FOR_CMD:
373 struct ipmi_cmdspec val;
375 if (copy_from_user(&val, arg, sizeof(val))) {
380 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd);
384 case IPMICTL_UNREGISTER_FOR_CMD:
386 struct ipmi_cmdspec val;
388 if (copy_from_user(&val, arg, sizeof(val))) {
393 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd);
397 case IPMICTL_SET_GETS_EVENTS_CMD:
401 if (copy_from_user(&val, arg, sizeof(val))) {
406 rv = ipmi_set_gets_events(priv->user, val);
410 case IPMICTL_SET_MY_ADDRESS_CMD:
414 if (copy_from_user(&val, arg, sizeof(val))) {
419 ipmi_set_my_address(priv->user, val);
424 case IPMICTL_GET_MY_ADDRESS_CMD:
428 val = ipmi_get_my_address(priv->user);
430 if (copy_to_user(arg, &val, sizeof(val))) {
438 case IPMICTL_SET_MY_LUN_CMD:
442 if (copy_from_user(&val, arg, sizeof(val))) {
447 ipmi_set_my_LUN(priv->user, val);
452 case IPMICTL_GET_MY_LUN_CMD:
456 val = ipmi_get_my_LUN(priv->user);
458 if (copy_to_user(arg, &val, sizeof(val))) {
465 case IPMICTL_SET_TIMING_PARMS_CMD:
467 struct ipmi_timing_parms parms;
469 if (copy_from_user(&parms, arg, sizeof(parms))) {
474 priv->default_retries = parms.retries;
475 priv->default_retry_time_ms = parms.retry_time_ms;
480 case IPMICTL_GET_TIMING_PARMS_CMD:
482 struct ipmi_timing_parms parms;
484 parms.retries = priv->default_retries;
485 parms.retry_time_ms = priv->default_retry_time_ms;
487 if (copy_to_user(arg, &parms, sizeof(parms))) {
501 static struct file_operations ipmi_fops = {
502 .owner = THIS_MODULE,
505 .release = ipmi_release,
506 .fasync = ipmi_fasync,
510 #define DEVICE_NAME "ipmidev"
512 static int ipmi_major = 0;
513 module_param(ipmi_major, int, 0);
514 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
515 " default, or if you set it to zero, it will choose the next"
516 " available device. Setting it to -1 will disable the"
517 " interface. Other values will set the major device number"
520 static void ipmi_new_smi(int if_num)
522 devfs_mk_cdev(MKDEV(ipmi_major, if_num),
523 S_IFCHR | S_IRUSR | S_IWUSR,
524 "ipmidev/%d", if_num);
527 static void ipmi_smi_gone(int if_num)
529 devfs_remove("ipmidev/%d", if_num);
532 static struct ipmi_smi_watcher smi_watcher =
534 .owner = THIS_MODULE,
535 .new_smi = ipmi_new_smi,
536 .smi_gone = ipmi_smi_gone,
539 static __init int init_ipmi_devintf(void)
546 printk(KERN_INFO "ipmi device interface version "
547 IPMI_DEVINTF_VERSION "\n");
549 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
551 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
555 if (ipmi_major == 0) {
559 devfs_mk_dir(DEVICE_NAME);
561 rv = ipmi_smi_watcher_register(&smi_watcher);
563 unregister_chrdev(ipmi_major, DEVICE_NAME);
564 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
570 module_init(init_ipmi_devintf);
572 static __exit void cleanup_ipmi(void)
574 ipmi_smi_watcher_unregister(&smi_watcher);
575 devfs_remove(DEVICE_NAME);
576 unregister_chrdev(ipmi_major, DEVICE_NAME);
578 module_exit(cleanup_ipmi);
580 MODULE_LICENSE("GPL");