1 /*********************************************************************
3 * sir_dev.c: irda sir network device
5 * Copyright (c) 2002 Martin Diehl
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
12 ********************************************************************/
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/smp_lock.h>
19 #include <net/irda/irda.h>
20 #include <net/irda/wrapper.h>
21 #include <net/irda/irda_device.h>
25 /***************************************************************************/
27 void sirdev_enable_rx(struct sir_dev *dev)
29 if (unlikely(atomic_read(&dev->enable_rx)))
32 /* flush rx-buffer - should also help in case of problems with echo cancelation */
33 dev->rx_buff.data = dev->rx_buff.head;
35 dev->rx_buff.in_frame = FALSE;
36 dev->rx_buff.state = OUTSIDE_FRAME;
37 atomic_set(&dev->enable_rx, 1);
40 static int sirdev_is_receiving(struct sir_dev *dev)
42 if (!atomic_read(&dev->enable_rx))
45 return (dev->rx_buff.state != OUTSIDE_FRAME);
48 int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
52 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type);
54 err = sirdev_schedule_dongle_open(dev, type);
57 down(&dev->fsm.sem); /* block until config change completed */
58 err = dev->fsm.result;
63 /* used by dongle drivers for dongle programming */
65 int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
70 if (unlikely(len > dev->tx_buff.truesize))
73 spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */
74 while (dev->tx_buff.len > 0) { /* wait until tx idle */
75 spin_unlock_irqrestore(&dev->tx_lock, flags);
76 set_current_state(TASK_UNINTERRUPTIBLE);
77 schedule_timeout(MSECS_TO_JIFFIES(10));
78 spin_lock_irqsave(&dev->tx_lock, flags);
81 dev->tx_buff.data = dev->tx_buff.head;
82 memcpy(dev->tx_buff.data, buf, len);
83 dev->tx_buff.len = len;
85 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
87 IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__);
89 dev->tx_buff.data += ret;
90 dev->tx_buff.len -= ret;
92 ret = len; /* all data is going to be sent */
94 spin_unlock_irqrestore(&dev->tx_lock, flags);
98 /* seems some dongle drivers may need this */
100 int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
104 if (atomic_read(&dev->enable_rx))
105 return -EIO; /* fail if we expect irda-frames */
107 count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
110 memcpy(buf, dev->rx_buff.data, count);
111 dev->rx_buff.data += count;
112 dev->rx_buff.len -= count;
115 /* remaining stuff gets flushed when re-enabling normal rx */
120 int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
123 if (dev->drv->set_dtr_rts != 0)
124 ret = dev->drv->set_dtr_rts(dev, dtr, rts);
128 /**********************************************************************/
130 /* called from client driver - likely with bh-context - to indicate
131 * it made some progress with transmission. Hence we send the next
132 * chunk, if any, or complete the skb otherwise
135 void sirdev_write_complete(struct sir_dev *dev)
142 spin_lock_irqsave(&dev->tx_lock, flags);
144 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
145 __FUNCTION__, dev->tx_buff.len);
147 if (likely(dev->tx_buff.len > 0)) {
148 /* Write data left in transmit buffer */
149 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
151 if (likely(actual>0)) {
152 dev->tx_buff.data += actual;
153 dev->tx_buff.len -= actual;
155 else if (unlikely(actual<0)) {
156 /* could be dropped later when we have tx_timeout to recover */
157 ERROR("%s: drv->do_write failed (%d)\n", __FUNCTION__, actual);
158 if ((skb=dev->tx_skb) != NULL) {
160 dev_kfree_skb_any(skb);
161 dev->stats.tx_errors++;
162 dev->stats.tx_dropped++;
164 dev->tx_buff.len = 0;
166 if (dev->tx_buff.len > 0) {
167 spin_unlock_irqrestore(&dev->tx_lock, flags);
172 if (unlikely(dev->raw_tx != 0)) {
173 /* in raw mode we are just done now after the buffer was sent
174 * completely. Since this was requested by some dongle driver
175 * running under the control of the irda-thread we must take
176 * care here not to re-enable the queue. The queue will be
177 * restarted when the irda-thread has completed the request.
180 IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__);
185 /* we have finished now sending this skb.
186 * update statistics and free the skb.
187 * finally we check and trigger a pending speed change, if any.
188 * if not we switch to rx mode and wake the queue for further
190 * note the scheduled speed request blocks until the lower
191 * client driver and the corresponding hardware has really
192 * finished sending all data (xmit fifo drained f.e.)
193 * before the speed change gets finally done and the queue
197 IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__);
199 if ((skb=dev->tx_skb) != NULL) {
201 dev->stats.tx_packets++;
202 dev->stats.tx_bytes += skb->len;
203 dev_kfree_skb_any(skb);
206 if (unlikely(dev->new_speed > 0)) {
207 IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__);
208 err = sirdev_schedule_speed(dev, dev->new_speed);
210 /* should never happen
211 * forget the speed change and hope the stack recovers
213 ERROR("%s - schedule speed change failed: %d\n", __FUNCTION__, err);
214 netif_wake_queue(dev->netdev);
217 * speed change in progress now
218 * on completion dev->new_speed gets cleared,
219 * rx-reenabled and the queue restarted
223 sirdev_enable_rx(dev);
224 netif_wake_queue(dev->netdev);
227 spin_unlock_irqrestore(&dev->tx_lock, flags);
230 /* called from client driver - likely with bh-context - to give us
231 * some more received bytes. We put them into the rx-buffer,
232 * normally unwrapping and building LAP-skb's (unless rx disabled)
235 int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
237 if (!dev || !dev->netdev) {
238 WARNING("%s(), not ready yet!\n", __FUNCTION__);
243 WARNING("%s - too early: %p / %zd!\n",
244 __FUNCTION__, cp, count);
249 /* error already at lower level receive
250 * just update stats and set media busy
252 irda_device_set_media_busy(dev->netdev, TRUE);
253 dev->stats.rx_dropped++;
254 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count);
258 /* Read the characters into the buffer */
259 if (likely(atomic_read(&dev->enable_rx))) {
261 /* Unwrap and destuff one byte */
262 async_unwrap_char(dev->netdev, &dev->stats,
263 &dev->rx_buff, *cp++);
266 /* rx not enabled: save the raw bytes and never
267 * trigger any netif_rx. The received bytes are flushed
268 * later when we re-enable rx but might be read meanwhile
269 * by the dongle driver.
271 dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
273 /* What should we do when the buffer is full? */
274 if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
275 dev->rx_buff.len = 0;
282 /**********************************************************************/
284 /* callbacks from network layer */
286 static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
288 struct sir_dev *dev = ndev->priv;
290 return (dev) ? &dev->stats : NULL;
293 static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
295 struct sir_dev *dev = ndev->priv;
301 ASSERT(dev != NULL, return 0;);
303 netif_stop_queue(ndev);
305 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len);
307 speed = irda_get_next_speed(skb);
308 if ((speed != dev->speed) && (speed != -1)) {
310 err = sirdev_schedule_speed(dev, speed);
311 if (unlikely(err == -EWOULDBLOCK)) {
312 /* Failed to initiate the speed change, likely the fsm
313 * is still busy (pretty unlikely, but...)
314 * We refuse to accept the skb and return with the queue
315 * stopped so the network layer will retry after the
316 * fsm completes and wakes the queue.
320 else if (unlikely(err)) {
321 /* other fatal error - forget the speed change and
322 * hope the stack will recover somehow
324 netif_start_queue(ndev);
327 * speed change in progress now
328 * on completion the queue gets restarted
331 dev_kfree_skb_any(skb);
334 dev->new_speed = speed;
338 dev->tx_buff.data = dev->tx_buff.head;
341 if(spin_is_locked(&dev->tx_lock)) {
342 IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__);
345 /* serialize with write completion */
346 spin_lock_irqsave(&dev->tx_lock, flags);
348 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
349 dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize);
351 /* transmission will start now - disable receive.
352 * if we are just in the middle of an incoming frame,
353 * treat it as collision. probably it's a good idea to
354 * reset the rx_buf OUTSIDE_FRAME in this case too?
356 atomic_set(&dev->enable_rx, 0);
357 if (unlikely(sirdev_is_receiving(dev)))
358 dev->stats.collisions++;
360 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
362 if (likely(actual > 0)) {
364 ndev->trans_start = jiffies;
365 dev->tx_buff.data += actual;
366 dev->tx_buff.len -= actual;
368 else if (unlikely(actual < 0)) {
369 /* could be dropped later when we have tx_timeout to recover */
370 ERROR("%s: drv->do_write failed (%d)\n", __FUNCTION__, actual);
371 dev_kfree_skb_any(skb);
372 dev->stats.tx_errors++;
373 dev->stats.tx_dropped++;
374 netif_wake_queue(ndev);
376 spin_unlock_irqrestore(&dev->tx_lock, flags);
381 /* called from network layer with rtnl hold */
383 static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
385 struct if_irda_req *irq = (struct if_irda_req *) rq;
386 struct sir_dev *dev = ndev->priv;
389 ASSERT(dev != NULL, return -1;);
391 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd);
394 case SIOCSBANDWIDTH: /* Set bandwidth */
395 if (!capable(CAP_NET_ADMIN))
398 ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
399 /* cannot sleep here for completion
400 * we are called from network layer with rtnl hold
404 case SIOCSDONGLE: /* Set dongle */
405 if (!capable(CAP_NET_ADMIN))
408 ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
409 /* cannot sleep here for completion
410 * we are called from network layer with rtnl hold
414 case SIOCSMEDIABUSY: /* Set media busy */
415 if (!capable(CAP_NET_ADMIN))
418 irda_device_set_media_busy(dev->netdev, TRUE);
421 case SIOCGRECEIVING: /* Check if we are receiving right now */
422 irq->ifr_receiving = sirdev_is_receiving(dev);
426 if (!capable(CAP_NET_ADMIN))
429 ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
430 /* cannot sleep here for completion
431 * we are called from network layer with rtnl hold
437 if (!capable(CAP_NET_ADMIN))
440 ret = sirdev_schedule_mode(dev, irq->ifr_mode);
441 /* cannot sleep here for completion
442 * we are called from network layer with rtnl hold
453 /* ----------------------------------------------------------------------------- */
455 #define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */
457 static int sirdev_alloc_buffers(struct sir_dev *dev)
459 dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
460 dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
462 /* Bootstrap ZeroCopy Rx */
463 dev->rx_buff.skb = __dev_alloc_skb(dev->rx_buff.truesize, GFP_KERNEL);
464 if (dev->rx_buff.skb == NULL)
466 skb_reserve(dev->rx_buff.skb, 1);
467 dev->rx_buff.head = dev->rx_buff.skb->data;
469 dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
470 if (dev->tx_buff.head == NULL) {
471 kfree_skb(dev->rx_buff.skb);
472 dev->rx_buff.skb = NULL;
473 dev->rx_buff.head = NULL;
477 dev->tx_buff.data = dev->tx_buff.head;
478 dev->rx_buff.data = dev->rx_buff.head;
479 dev->tx_buff.len = 0;
480 dev->rx_buff.len = 0;
482 dev->rx_buff.in_frame = FALSE;
483 dev->rx_buff.state = OUTSIDE_FRAME;
487 static void sirdev_free_buffers(struct sir_dev *dev)
489 if (dev->rx_buff.skb)
490 kfree_skb(dev->rx_buff.skb);
491 if (dev->tx_buff.head)
492 kfree(dev->tx_buff.head);
493 dev->rx_buff.head = dev->tx_buff.head = NULL;
494 dev->rx_buff.skb = NULL;
497 static int sirdev_open(struct net_device *ndev)
499 struct sir_dev *dev = ndev->priv;
500 const struct sir_driver *drv = dev->drv;
505 /* increase the reference count of the driver module before doing serious stuff */
506 if (!try_module_get(drv->owner))
509 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
511 if (sirdev_alloc_buffers(dev))
514 if (!dev->drv->start_dev || dev->drv->start_dev(dev))
517 sirdev_enable_rx(dev);
520 netif_start_queue(ndev);
521 dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
525 netif_wake_queue(ndev);
527 IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed);
532 atomic_set(&dev->enable_rx, 0);
533 if (dev->drv->stop_dev)
534 dev->drv->stop_dev(dev);
536 sirdev_free_buffers(dev);
538 module_put(drv->owner);
542 static int sirdev_close(struct net_device *ndev)
544 struct sir_dev *dev = ndev->priv;
545 const struct sir_driver *drv;
547 // IRDA_DEBUG(0, "%s\n", __FUNCTION__);
549 netif_stop_queue(ndev);
551 down(&dev->fsm.sem); /* block on pending config completion */
553 atomic_set(&dev->enable_rx, 0);
555 if (unlikely(!dev->irlap))
557 irlap_close(dev->irlap);
561 if (unlikely(!drv || !dev->priv))
567 sirdev_free_buffers(dev);
568 module_put(drv->owner);
576 /* ----------------------------------------------------------------------------- */
578 struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
580 struct net_device *ndev;
583 IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name);
585 /* instead of adding tests to protect against drv->do_write==NULL
586 * at several places we refuse to create a sir_dev instance for
587 * drivers which don't implement do_write.
589 if (!drv || !drv->do_write)
593 * Allocate new instance of the device
595 ndev = alloc_irdadev(sizeof(*dev));
597 ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__);
602 irda_init_max_qos_capabilies(&dev->qos);
603 dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
604 dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
605 irda_qos_bits_to_value(&dev->qos);
607 strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
609 atomic_set(&dev->enable_rx, 0);
612 spin_lock_init(&dev->tx_lock);
613 init_MUTEX(&dev->fsm.sem);
615 INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
616 dev->fsm.rq.pending = 0;
617 init_timer(&dev->fsm.rq.timer);
622 SET_MODULE_OWNER(ndev);
624 /* Override the network functions we need to use */
625 ndev->hard_start_xmit = sirdev_hard_xmit;
626 ndev->open = sirdev_open;
627 ndev->stop = sirdev_close;
628 ndev->get_stats = sirdev_get_stats;
629 ndev->do_ioctl = sirdev_ioctl;
631 if (register_netdev(ndev)) {
632 ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
644 int sirdev_put_instance(struct sir_dev *dev)
648 IRDA_DEBUG(0, "%s\n", __FUNCTION__);
650 atomic_set(&dev->enable_rx, 0);
652 netif_carrier_off(dev->netdev);
653 netif_device_detach(dev->netdev);
656 err = sirdev_schedule_dongle_close(dev);
658 ERROR("%s - error %d\n", __FUNCTION__, err);
660 sirdev_close(dev->netdev);
663 dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */
664 dev->dongle_drv = NULL;
668 /* Remove netdevice */
669 unregister_netdev(dev->netdev);
671 free_netdev(dev->netdev);