1 /*********************************************************************
3 * sir_kthread.c: dedicated thread to process scheduled
4 * sir device setup requests
6 * Copyright (c) 2002 Martin Diehl
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
13 ********************************************************************/
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/version.h>
18 #include <linux/init.h>
19 #include <linux/smp_lock.h>
20 #include <linux/completion.h>
21 #include <linux/delay.h>
22 #include <linux/suspend.h>
24 #include <net/irda/irda.h>
28 /**************************************************************************
30 * kIrDAd kernel thread and config state machine
34 struct irda_request_queue {
35 struct list_head request_list;
38 struct completion exit;
39 wait_queue_head_t kick, done;
43 static struct irda_request_queue irda_rq_queue;
45 static int irda_queue_request(struct irda_request *rq)
50 if (!test_and_set_bit(0, &rq->pending)) {
51 spin_lock_irqsave(&irda_rq_queue.lock, flags);
52 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
53 wake_up(&irda_rq_queue.kick);
54 atomic_inc(&irda_rq_queue.num_pending);
55 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
61 static void irda_request_timer(unsigned long data)
63 struct irda_request *rq = (struct irda_request *)data;
66 spin_lock_irqsave(&irda_rq_queue.lock, flags);
67 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
68 wake_up(&irda_rq_queue.kick);
69 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
72 static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
75 struct timer_list *timer = &rq->timer;
77 if (!test_and_set_bit(0, &rq->pending)) {
78 timer->expires = jiffies + delay;
79 timer->function = irda_request_timer;
80 timer->data = (unsigned long)rq;
81 atomic_inc(&irda_rq_queue.num_pending);
88 static void run_irda_queue(void)
91 struct list_head *entry, *tmp;
92 struct irda_request *rq;
94 spin_lock_irqsave(&irda_rq_queue.lock, flags);
95 list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
96 rq = list_entry(entry, struct irda_request, lh_request);
98 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
100 clear_bit(0, &rq->pending);
103 if (atomic_dec_and_test(&irda_rq_queue.num_pending))
104 wake_up(&irda_rq_queue.done);
106 spin_lock_irqsave(&irda_rq_queue.lock, flags);
108 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
111 static int irda_thread(void *startup)
113 DECLARE_WAITQUEUE(wait, current);
117 irda_rq_queue.thread = current;
119 complete((struct completion *)startup);
121 while (irda_rq_queue.thread != NULL) {
123 /* We use TASK_INTERRUPTIBLE, rather than
124 * TASK_UNINTERRUPTIBLE. Andrew Morton made this
125 * change ; he told me that it is safe, because "signal
126 * blocking is now handled in daemonize()", he added
127 * that the problem is that "uninterruptible sleep
128 * contributes to load average", making user worry.
130 set_task_state(current, TASK_INTERRUPTIBLE);
131 add_wait_queue(&irda_rq_queue.kick, &wait);
132 if (list_empty(&irda_rq_queue.request_list))
135 __set_task_state(current, TASK_RUNNING);
136 remove_wait_queue(&irda_rq_queue.kick, &wait);
138 /* make swsusp happy with our thread */
139 if (current->flags & PF_FREEZE)
140 refrigerator(PF_FREEZE);
145 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
148 complete_and_exit(&irda_rq_queue.exit, 0);
154 static void flush_irda_queue(void)
156 if (atomic_read(&irda_rq_queue.num_pending)) {
158 DECLARE_WAITQUEUE(wait, current);
160 if (!list_empty(&irda_rq_queue.request_list))
163 set_task_state(current, TASK_UNINTERRUPTIBLE);
164 add_wait_queue(&irda_rq_queue.done, &wait);
165 if (atomic_read(&irda_rq_queue.num_pending))
168 __set_task_state(current, TASK_RUNNING);
169 remove_wait_queue(&irda_rq_queue.done, &wait);
173 /* substate handler of the config-fsm to handle the cases where we want
174 * to wait for transmit completion before changing the port configuration
177 static int irda_tx_complete_fsm(struct sir_dev *dev)
179 struct sir_fsm *fsm = &dev->fsm;
180 unsigned next_state, delay;
184 next_state = fsm->substate; /* default: stay in current substate */
187 switch(fsm->substate) {
189 case SIRDEV_STATE_WAIT_XMIT:
190 if (dev->drv->chars_in_buffer)
191 bytes_left = dev->drv->chars_in_buffer(dev);
195 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
199 if (dev->speed > 115200)
200 delay = (bytes_left*8*10000) / (dev->speed/100);
201 else if (dev->speed > 0)
202 delay = (bytes_left*10*10000) / (dev->speed/100);
205 /* expected delay (usec) until remaining bytes are sent */
211 /* sleep some longer delay (msec) */
212 delay = (delay+999) / 1000;
215 case SIRDEV_STATE_WAIT_UNTIL_SENT:
216 /* block until underlaying hardware buffer are empty */
217 if (dev->drv->wait_until_sent)
218 dev->drv->wait_until_sent(dev);
219 next_state = SIRDEV_STATE_TX_DONE;
222 case SIRDEV_STATE_TX_DONE:
226 ERROR("%s - undefined state\n", __FUNCTION__);
229 fsm->substate = next_state;
230 } while (delay == 0);
235 * Function irda_config_fsm
237 * State machine to handle the configuration of the device (and attached dongle, if any).
238 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
239 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
240 * long. Instead, for longer delays we start a timer to reschedule us later.
241 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
242 * Both must be unlocked/restarted on completion - but only on final exit.
245 static void irda_config_fsm(void *data)
247 struct sir_dev *dev = data;
248 struct sir_fsm *fsm = &dev->fsm;
253 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
256 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
257 __FUNCTION__, fsm->state, fsm->substate);
259 next_state = fsm->state;
264 case SIRDEV_STATE_DONGLE_OPEN:
265 if (dev->dongle_drv != NULL) {
266 ret = sirdev_put_dongle(dev);
268 fsm->result = -EINVAL;
269 next_state = SIRDEV_STATE_ERROR;
274 /* Initialize dongle */
275 ret = sirdev_get_dongle(dev, fsm->param);
278 next_state = SIRDEV_STATE_ERROR;
282 /* Dongles are powered through the modem control lines which
283 * were just set during open. Before resetting, let's wait for
284 * the power to stabilize. This is what some dongle drivers did
285 * in open before, while others didn't - should be safe anyway.
289 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
290 next_state = SIRDEV_STATE_DONGLE_RESET;
296 case SIRDEV_STATE_DONGLE_CLOSE:
297 /* shouldn't we just treat this as success=? */
298 if (dev->dongle_drv == NULL) {
299 fsm->result = -EINVAL;
300 next_state = SIRDEV_STATE_ERROR;
304 ret = sirdev_put_dongle(dev);
307 next_state = SIRDEV_STATE_ERROR;
310 next_state = SIRDEV_STATE_DONE;
313 case SIRDEV_STATE_SET_DTR_RTS:
314 ret = sirdev_set_dtr_rts(dev,
315 (fsm->param&0x02) ? TRUE : FALSE,
316 (fsm->param&0x01) ? TRUE : FALSE);
317 next_state = SIRDEV_STATE_DONE;
320 case SIRDEV_STATE_SET_SPEED:
321 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
322 next_state = SIRDEV_STATE_DONGLE_CHECK;
325 case SIRDEV_STATE_DONGLE_CHECK:
326 ret = irda_tx_complete_fsm(dev);
329 next_state = SIRDEV_STATE_ERROR;
332 if ((delay=ret) != 0)
335 if (dev->dongle_drv) {
336 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
337 next_state = SIRDEV_STATE_DONGLE_RESET;
340 dev->speed = fsm->param;
341 next_state = SIRDEV_STATE_PORT_SPEED;
345 case SIRDEV_STATE_DONGLE_RESET:
346 if (dev->dongle_drv->reset) {
347 ret = dev->dongle_drv->reset(dev);
350 next_state = SIRDEV_STATE_ERROR;
356 if ((delay=ret) == 0) {
357 /* set serial port according to dongle default speed */
358 if (dev->drv->set_speed)
359 dev->drv->set_speed(dev, dev->speed);
360 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
361 next_state = SIRDEV_STATE_DONGLE_SPEED;
365 case SIRDEV_STATE_DONGLE_SPEED:
366 if (dev->dongle_drv->reset) {
367 ret = dev->dongle_drv->set_speed(dev, fsm->param);
370 next_state = SIRDEV_STATE_ERROR;
376 if ((delay=ret) == 0)
377 next_state = SIRDEV_STATE_PORT_SPEED;
380 case SIRDEV_STATE_PORT_SPEED:
381 /* Finally we are ready to change the serial port speed */
382 if (dev->drv->set_speed)
383 dev->drv->set_speed(dev, dev->speed);
385 next_state = SIRDEV_STATE_DONE;
388 case SIRDEV_STATE_DONE:
389 /* Signal network layer so it can send more frames */
390 netif_wake_queue(dev->netdev);
391 next_state = SIRDEV_STATE_COMPLETE;
395 ERROR("%s - undefined state\n", __FUNCTION__);
396 fsm->result = -EINVAL;
399 case SIRDEV_STATE_ERROR:
400 ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
402 #if 0 /* don't enable this before we have netdev->tx_timeout to recover */
403 netif_stop_queue(dev->netdev);
405 netif_wake_queue(dev->netdev);
409 case SIRDEV_STATE_COMPLETE:
410 /* config change finished, so we are not busy any longer */
411 sirdev_enable_rx(dev);
415 fsm->state = next_state;
418 irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
421 /* schedule some device configuration task for execution by kIrDAd
422 * on behalf of the above state machine.
423 * can be called from process or interrupt/tasklet context.
426 int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
428 struct sir_fsm *fsm = &dev->fsm;
431 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
433 if (down_trylock(&fsm->sem)) {
434 if (in_interrupt() || in_atomic() || irqs_disabled()) {
435 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
441 if (fsm->state == SIRDEV_STATE_DEAD) {
442 /* race with sirdev_close should never happen */
443 ERROR("%s(), instance staled!\n", __FUNCTION__);
445 return -ESTALE; /* or better EPIPE? */
448 xmit_was_down = netif_queue_stopped(dev->netdev);
449 netif_stop_queue(dev->netdev);
450 atomic_set(&dev->enable_rx, 0);
452 fsm->state = initial_state;
456 INIT_LIST_HEAD(&fsm->rq.lh_request);
458 fsm->rq.func = irda_config_fsm;
461 if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
462 atomic_set(&dev->enable_rx, 1);
464 netif_wake_queue(dev->netdev);
471 int __init irda_thread_create(void)
473 struct completion startup;
476 spin_lock_init(&irda_rq_queue.lock);
477 irda_rq_queue.thread = NULL;
478 INIT_LIST_HEAD(&irda_rq_queue.request_list);
479 init_waitqueue_head(&irda_rq_queue.kick);
480 init_waitqueue_head(&irda_rq_queue.done);
481 atomic_set(&irda_rq_queue.num_pending, 0);
483 init_completion(&startup);
484 pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
488 wait_for_completion(&startup);
493 void __exit irda_thread_join(void)
495 if (irda_rq_queue.thread) {
497 init_completion(&irda_rq_queue.exit);
498 irda_rq_queue.thread = NULL;
499 wake_up(&irda_rq_queue.kick);
500 wait_for_completion(&irda_rq_queue.exit);