patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / net / irda / sir_kthread.c
1 /*********************************************************************
2  *
3  *      sir_kthread.c:          dedicated thread to process scheduled
4  *                              sir device setup requests
5  *
6  *      Copyright (c) 2002 Martin Diehl
7  *
8  *      This program is free software; you can redistribute it and/or 
9  *      modify it under the terms of the GNU General Public License as 
10  *      published by the Free Software Foundation; either version 2 of 
11  *      the License, or (at your option) any later version.
12  *
13  ********************************************************************/    
14
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/version.h>
18 #include <linux/init.h>
19 #include <linux/smp_lock.h>
20 #include <linux/completion.h>
21 #include <linux/delay.h>
22 #include <linux/suspend.h>
23
24 #include <net/irda/irda.h>
25
26 #include "sir-dev.h"
27
28 /**************************************************************************
29  *
30  * kIrDAd kernel thread and config state machine
31  *
32  */
33
34 struct irda_request_queue {
35         struct list_head request_list;
36         spinlock_t lock;
37         task_t *thread;
38         struct completion exit;
39         wait_queue_head_t kick, done;
40         atomic_t num_pending;
41 };
42
43 static struct irda_request_queue irda_rq_queue;
44
45 static int irda_queue_request(struct irda_request *rq)
46 {
47         int ret = 0;
48         unsigned long flags;
49
50         if (!test_and_set_bit(0, &rq->pending)) {
51                 spin_lock_irqsave(&irda_rq_queue.lock, flags);
52                 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
53                 wake_up(&irda_rq_queue.kick);
54                 atomic_inc(&irda_rq_queue.num_pending);
55                 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
56                 ret = 1;
57         }
58         return ret;
59 }
60
61 static void irda_request_timer(unsigned long data)
62 {
63         struct irda_request *rq = (struct irda_request *)data;
64         unsigned long flags;
65         
66         spin_lock_irqsave(&irda_rq_queue.lock, flags);
67         list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
68         wake_up(&irda_rq_queue.kick);
69         spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
70 }
71
72 static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
73 {
74         int ret = 0;
75         struct timer_list *timer = &rq->timer;
76
77         if (!test_and_set_bit(0, &rq->pending)) {
78                 timer->expires = jiffies + delay;
79                 timer->function = irda_request_timer;
80                 timer->data = (unsigned long)rq;
81                 atomic_inc(&irda_rq_queue.num_pending);
82                 add_timer(timer);
83                 ret = 1;
84         }
85         return ret;
86 }
87
88 static void run_irda_queue(void)
89 {
90         unsigned long flags;
91         struct list_head *entry, *tmp;
92         struct irda_request *rq;
93
94         spin_lock_irqsave(&irda_rq_queue.lock, flags);
95         list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
96                 rq = list_entry(entry, struct irda_request, lh_request);
97                 list_del_init(entry);
98                 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
99
100                 clear_bit(0, &rq->pending);
101                 rq->func(rq->data);
102
103                 if (atomic_dec_and_test(&irda_rq_queue.num_pending))
104                         wake_up(&irda_rq_queue.done);
105
106                 spin_lock_irqsave(&irda_rq_queue.lock, flags);
107         }
108         spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
109 }               
110
111 static int irda_thread(void *startup)
112 {
113         DECLARE_WAITQUEUE(wait, current);
114
115         daemonize("kIrDAd");
116
117         irda_rq_queue.thread = current;
118
119         complete((struct completion *)startup);
120
121         while (irda_rq_queue.thread != NULL) {
122
123                 /* We use TASK_INTERRUPTIBLE, rather than
124                  * TASK_UNINTERRUPTIBLE.  Andrew Morton made this
125                  * change ; he told me that it is safe, because "signal
126                  * blocking is now handled in daemonize()", he added
127                  * that the problem is that "uninterruptible sleep
128                  * contributes to load average", making user worry.
129                  * Jean II */
130                 set_task_state(current, TASK_INTERRUPTIBLE);
131                 add_wait_queue(&irda_rq_queue.kick, &wait);
132                 if (list_empty(&irda_rq_queue.request_list))
133                         schedule();
134                 else
135                         __set_task_state(current, TASK_RUNNING);
136                 remove_wait_queue(&irda_rq_queue.kick, &wait);
137
138                 /* make swsusp happy with our thread */
139                 if (current->flags & PF_FREEZE)
140                         refrigerator(PF_FREEZE);
141
142                 run_irda_queue();
143         }
144
145 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
146         reparent_to_init();
147 #endif
148         complete_and_exit(&irda_rq_queue.exit, 0);
149         /* never reached */
150         return 0;
151 }
152
153
154 static void flush_irda_queue(void)
155 {
156         if (atomic_read(&irda_rq_queue.num_pending)) {
157
158                 DECLARE_WAITQUEUE(wait, current);
159
160                 if (!list_empty(&irda_rq_queue.request_list))
161                         run_irda_queue();
162
163                 set_task_state(current, TASK_UNINTERRUPTIBLE);
164                 add_wait_queue(&irda_rq_queue.done, &wait);
165                 if (atomic_read(&irda_rq_queue.num_pending))
166                         schedule();
167                 else
168                         __set_task_state(current, TASK_RUNNING);
169                 remove_wait_queue(&irda_rq_queue.done, &wait);
170         }
171 }
172
173 /* substate handler of the config-fsm to handle the cases where we want
174  * to wait for transmit completion before changing the port configuration
175  */
176
177 static int irda_tx_complete_fsm(struct sir_dev *dev)
178 {
179         struct sir_fsm *fsm = &dev->fsm;
180         unsigned next_state, delay;
181         unsigned bytes_left;
182
183         do {
184                 next_state = fsm->substate;     /* default: stay in current substate */
185                 delay = 0;
186
187                 switch(fsm->substate) {
188
189                 case SIRDEV_STATE_WAIT_XMIT:
190                         if (dev->drv->chars_in_buffer)
191                                 bytes_left = dev->drv->chars_in_buffer(dev);
192                         else
193                                 bytes_left = 0;
194                         if (!bytes_left) {
195                                 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
196                                 break;
197                         }
198
199                         if (dev->speed > 115200)
200                                 delay = (bytes_left*8*10000) / (dev->speed/100);
201                         else if (dev->speed > 0)
202                                 delay = (bytes_left*10*10000) / (dev->speed/100);
203                         else
204                                 delay = 0;
205                         /* expected delay (usec) until remaining bytes are sent */
206                         if (delay < 100) {
207                                 udelay(delay);
208                                 delay = 0;
209                                 break;
210                         }
211                         /* sleep some longer delay (msec) */
212                         delay = (delay+999) / 1000;
213                         break;
214
215                 case SIRDEV_STATE_WAIT_UNTIL_SENT:
216                         /* block until underlaying hardware buffer are empty */
217                         if (dev->drv->wait_until_sent)
218                                 dev->drv->wait_until_sent(dev);
219                         next_state = SIRDEV_STATE_TX_DONE;
220                         break;
221
222                 case SIRDEV_STATE_TX_DONE:
223                         return 0;
224
225                 default:
226                         ERROR("%s - undefined state\n", __FUNCTION__);
227                         return -EINVAL;
228                 }
229                 fsm->substate = next_state;
230         } while (delay == 0);
231         return delay;
232 }
233
234 /*
235  * Function irda_config_fsm
236  *
237  * State machine to handle the configuration of the device (and attached dongle, if any).
238  * This handler is scheduled for execution in kIrDAd context, so we can sleep.
239  * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
240  * long. Instead, for longer delays we start a timer to reschedule us later.
241  * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
242  * Both must be unlocked/restarted on completion - but only on final exit.
243  */
244
245 static void irda_config_fsm(void *data)
246 {
247         struct sir_dev *dev = data;
248         struct sir_fsm *fsm = &dev->fsm;
249         int next_state;
250         int ret = -1;
251         unsigned delay;
252
253         IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); 
254
255         do {
256                 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
257                         __FUNCTION__, fsm->state, fsm->substate);
258
259                 next_state = fsm->state;
260                 delay = 0;
261
262                 switch(fsm->state) {
263
264                 case SIRDEV_STATE_DONGLE_OPEN:
265                         if (dev->dongle_drv != NULL) {
266                                 ret = sirdev_put_dongle(dev);
267                                 if (ret) {
268                                         fsm->result = -EINVAL;
269                                         next_state = SIRDEV_STATE_ERROR;
270                                         break;
271                                 }
272                         }
273
274                         /* Initialize dongle */
275                         ret = sirdev_get_dongle(dev, fsm->param);
276                         if (ret) {
277                                 fsm->result = ret;
278                                 next_state = SIRDEV_STATE_ERROR;
279                                 break;
280                         }
281
282                         /* Dongles are powered through the modem control lines which
283                          * were just set during open. Before resetting, let's wait for
284                          * the power to stabilize. This is what some dongle drivers did
285                          * in open before, while others didn't - should be safe anyway.
286                          */
287
288                         delay = 50;
289                         fsm->substate = SIRDEV_STATE_DONGLE_RESET;
290                         next_state = SIRDEV_STATE_DONGLE_RESET;
291
292                         fsm->param = 9600;
293
294                         break;
295
296                 case SIRDEV_STATE_DONGLE_CLOSE:
297                         /* shouldn't we just treat this as success=? */
298                         if (dev->dongle_drv == NULL) {
299                                 fsm->result = -EINVAL;
300                                 next_state = SIRDEV_STATE_ERROR;
301                                 break;
302                         }
303
304                         ret = sirdev_put_dongle(dev);
305                         if (ret) {
306                                 fsm->result = ret;
307                                 next_state = SIRDEV_STATE_ERROR;
308                                 break;
309                         }
310                         next_state = SIRDEV_STATE_DONE;
311                         break;
312
313                 case SIRDEV_STATE_SET_DTR_RTS:
314                         ret = sirdev_set_dtr_rts(dev,
315                                 (fsm->param&0x02) ? TRUE : FALSE,
316                                 (fsm->param&0x01) ? TRUE : FALSE);
317                         next_state = SIRDEV_STATE_DONE;
318                         break;
319
320                 case SIRDEV_STATE_SET_SPEED:
321                         fsm->substate = SIRDEV_STATE_WAIT_XMIT;
322                         next_state = SIRDEV_STATE_DONGLE_CHECK;
323                         break;
324
325                 case SIRDEV_STATE_DONGLE_CHECK:
326                         ret = irda_tx_complete_fsm(dev);
327                         if (ret < 0) {
328                                 fsm->result = ret;
329                                 next_state = SIRDEV_STATE_ERROR;
330                                 break;
331                         }
332                         if ((delay=ret) != 0)
333                                 break;
334
335                         if (dev->dongle_drv) {
336                                 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
337                                 next_state = SIRDEV_STATE_DONGLE_RESET;
338                         }
339                         else {
340                                 dev->speed = fsm->param;
341                                 next_state = SIRDEV_STATE_PORT_SPEED;
342                         }
343                         break;
344
345                 case SIRDEV_STATE_DONGLE_RESET:
346                         if (dev->dongle_drv->reset) {
347                                 ret = dev->dongle_drv->reset(dev);      
348                                 if (ret < 0) {
349                                         fsm->result = ret;
350                                         next_state = SIRDEV_STATE_ERROR;
351                                         break;
352                                 }
353                         }
354                         else
355                                 ret = 0;
356                         if ((delay=ret) == 0) {
357                                 /* set serial port according to dongle default speed */
358                                 if (dev->drv->set_speed)
359                                         dev->drv->set_speed(dev, dev->speed);
360                                 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
361                                 next_state = SIRDEV_STATE_DONGLE_SPEED;
362                         }
363                         break;
364
365                 case SIRDEV_STATE_DONGLE_SPEED:                         
366                         if (dev->dongle_drv->reset) {
367                                 ret = dev->dongle_drv->set_speed(dev, fsm->param);
368                                 if (ret < 0) {
369                                         fsm->result = ret;
370                                         next_state = SIRDEV_STATE_ERROR;
371                                         break;
372                                 }
373                         }
374                         else
375                                 ret = 0;
376                         if ((delay=ret) == 0)
377                                 next_state = SIRDEV_STATE_PORT_SPEED;
378                         break;
379
380                 case SIRDEV_STATE_PORT_SPEED:
381                         /* Finally we are ready to change the serial port speed */
382                         if (dev->drv->set_speed)
383                                 dev->drv->set_speed(dev, dev->speed);
384                         dev->new_speed = 0;
385                         next_state = SIRDEV_STATE_DONE;
386                         break;
387
388                 case SIRDEV_STATE_DONE:
389                         /* Signal network layer so it can send more frames */
390                         netif_wake_queue(dev->netdev);
391                         next_state = SIRDEV_STATE_COMPLETE;
392                         break;
393
394                 default:
395                         ERROR("%s - undefined state\n", __FUNCTION__);
396                         fsm->result = -EINVAL;
397                         /* fall thru */
398
399                 case SIRDEV_STATE_ERROR:
400                         ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
401
402 #if 0   /* don't enable this before we have netdev->tx_timeout to recover */
403                         netif_stop_queue(dev->netdev);
404 #else
405                         netif_wake_queue(dev->netdev);
406 #endif
407                         /* fall thru */
408
409                 case SIRDEV_STATE_COMPLETE:
410                         /* config change finished, so we are not busy any longer */
411                         sirdev_enable_rx(dev);
412                         up(&fsm->sem);
413                         return;
414                 }
415                 fsm->state = next_state;
416         } while(!delay);
417
418         irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
419 }
420
421 /* schedule some device configuration task for execution by kIrDAd
422  * on behalf of the above state machine.
423  * can be called from process or interrupt/tasklet context.
424  */
425
426 int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
427 {
428         struct sir_fsm *fsm = &dev->fsm;
429         int xmit_was_down;
430
431         IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
432
433         if (down_trylock(&fsm->sem)) {
434                 if (in_interrupt()  ||  in_atomic()  ||  irqs_disabled()) {
435                         IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
436                         return -EWOULDBLOCK;
437                 } else
438                         down(&fsm->sem);
439         }
440
441         if (fsm->state == SIRDEV_STATE_DEAD) {
442                 /* race with sirdev_close should never happen */
443                 ERROR("%s(), instance staled!\n", __FUNCTION__);
444                 up(&fsm->sem);
445                 return -ESTALE;         /* or better EPIPE? */
446         }
447
448         xmit_was_down = netif_queue_stopped(dev->netdev);
449         netif_stop_queue(dev->netdev);
450         atomic_set(&dev->enable_rx, 0);
451
452         fsm->state = initial_state;
453         fsm->param = param;
454         fsm->result = 0;
455
456         INIT_LIST_HEAD(&fsm->rq.lh_request);
457         fsm->rq.pending = 0;
458         fsm->rq.func = irda_config_fsm;
459         fsm->rq.data = dev;
460
461         if (!irda_queue_request(&fsm->rq)) {    /* returns 0 on error! */
462                 atomic_set(&dev->enable_rx, 1);
463                 if (!xmit_was_down)
464                         netif_wake_queue(dev->netdev);          
465                 up(&fsm->sem);
466                 return -EAGAIN;
467         }
468         return 0;
469 }
470
471 int __init irda_thread_create(void)
472 {
473         struct completion startup;
474         int pid;
475
476         spin_lock_init(&irda_rq_queue.lock);
477         irda_rq_queue.thread = NULL;
478         INIT_LIST_HEAD(&irda_rq_queue.request_list);
479         init_waitqueue_head(&irda_rq_queue.kick);
480         init_waitqueue_head(&irda_rq_queue.done);
481         atomic_set(&irda_rq_queue.num_pending, 0);
482
483         init_completion(&startup);
484         pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
485         if (pid <= 0)
486                 return -EAGAIN;
487         else
488                 wait_for_completion(&startup);
489
490         return 0;
491 }
492
493 void __exit irda_thread_join(void) 
494 {
495         if (irda_rq_queue.thread) {
496                 flush_irda_queue();
497                 init_completion(&irda_rq_queue.exit);
498                 irda_rq_queue.thread = NULL;
499                 wake_up(&irda_rq_queue.kick);           
500                 wait_for_completion(&irda_rq_queue.exit);
501         }
502 }
503