vserver 1.9.5.x5
[linux-2.6.git] / net / bluetooth / hci_core.c
1 /* 
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/major.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51
52 #ifndef CONFIG_BT_HCI_CORE_DEBUG
53 #undef  BT_DBG
54 #define BT_DBG(D...)
55 #endif
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 static void hci_notify(struct hci_dev *hdev, int event);
61
62 static DEFINE_RWLOCK(hci_task_lock);
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71
72 /* HCI protocols */
73 #define HCI_MAX_PROTO   2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75
76 /* HCI notifiers list */
77 static struct notifier_block *hci_notifier;
78
79 /* ---- HCI notifications ---- */
80
81 int hci_register_notifier(struct notifier_block *nb)
82 {
83         return notifier_chain_register(&hci_notifier, nb);
84 }
85
86 int hci_unregister_notifier(struct notifier_block *nb)
87 {
88         return notifier_chain_unregister(&hci_notifier, nb);
89 }
90
91 void hci_notify(struct hci_dev *hdev, int event)
92 {
93         notifier_call_chain(&hci_notifier, event, hdev);
94 }
95
96 /* ---- HCI requests ---- */
97
98 void hci_req_complete(struct hci_dev *hdev, int result)
99 {
100         BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = result;
104                 hdev->req_status = HCI_REQ_DONE;
105                 wake_up_interruptible(&hdev->req_wait_q);
106         }
107 }
108
109 static void hci_req_cancel(struct hci_dev *hdev, int err)
110 {
111         BT_DBG("%s err 0x%2.2x", hdev->name, err);
112
113         if (hdev->req_status == HCI_REQ_PEND) {
114                 hdev->req_result = err;
115                 hdev->req_status = HCI_REQ_CANCELED;
116                 wake_up_interruptible(&hdev->req_wait_q);
117         }
118 }
119
120 /* Execute request and wait for completion. */
121 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 
122                                 unsigned long opt, __u32 timeout)
123 {
124         DECLARE_WAITQUEUE(wait, current);
125         int err = 0;
126
127         BT_DBG("%s start", hdev->name);
128
129         hdev->req_status = HCI_REQ_PEND;
130
131         add_wait_queue(&hdev->req_wait_q, &wait);
132         set_current_state(TASK_INTERRUPTIBLE);
133
134         req(hdev, opt);
135         schedule_timeout(timeout);
136
137         remove_wait_queue(&hdev->req_wait_q, &wait);
138
139         if (signal_pending(current))
140                 return -EINTR;
141
142         switch (hdev->req_status) {
143         case HCI_REQ_DONE:
144                 err = -bt_err(hdev->req_result);
145                 break;
146
147         case HCI_REQ_CANCELED:
148                 err = -hdev->req_result;
149                 break;
150
151         default:
152                 err = -ETIMEDOUT;
153                 break;
154         };
155
156         hdev->req_status = hdev->req_result = 0;
157
158         BT_DBG("%s end: err %d", hdev->name, err);
159
160         return err;
161 }
162
163 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
164                                 unsigned long opt, __u32 timeout)
165 {
166         int ret;
167
168         /* Serialize all requests */
169         hci_req_lock(hdev);
170         ret = __hci_request(hdev, req, opt, timeout);
171         hci_req_unlock(hdev);
172
173         return ret;
174 }
175
176 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 {
178         BT_DBG("%s %ld", hdev->name, opt);
179
180         /* Reset device */
181         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
182 }
183
184 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         struct sk_buff *skb;
187         __u16 param;
188
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Driver initialization */
192
193         /* Special commands */
194         while ((skb = skb_dequeue(&hdev->driver_init))) {
195                 skb->pkt_type = HCI_COMMAND_PKT;
196                 skb->dev = (void *) hdev;
197                 skb_queue_tail(&hdev->cmd_q, skb);
198                 hci_sched_cmd(hdev);
199         }
200         skb_queue_purge(&hdev->driver_init);
201
202         /* Mandatory initialization */
203
204         /* Reset */
205         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
206                         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
207
208         /* Read Local Supported Features */
209         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
210
211         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
212         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
213
214 #if 0
215         /* Host buffer size */
216         {
217                 struct hci_cp_host_buffer_size cp;
218                 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
219                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
220                 cp.acl_max_pkt = __cpu_to_le16(0xffff);
221                 cp.sco_max_pkt = __cpu_to_le16(0xffff);
222                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
223         }
224 #endif
225
226         /* Read BD Address */
227         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
228
229         /* Read Voice Setting */
230         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
231
232         /* Optional initialization */
233
234         /* Clear Event Filters */
235         {
236                 struct hci_cp_set_event_flt cp;
237                 cp.flt_type  = HCI_FLT_CLEAR_ALL;
238                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
239         }
240
241         /* Page timeout ~20 secs */
242         param = __cpu_to_le16(0x8000);
243         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
244
245         /* Connection accept timeout ~20 secs */
246         param = __cpu_to_le16(0x7d00);
247         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
248 }
249
250 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
251 {
252         __u8 scan = opt;
253
254         BT_DBG("%s %x", hdev->name, scan);
255
256         /* Inquiry and Page scans */
257         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
258 }
259
260 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
261 {
262         __u8 auth = opt;
263
264         BT_DBG("%s %x", hdev->name, auth);
265
266         /* Authentication */
267         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
268 }
269
270 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
271 {
272         __u8 encrypt = opt;
273
274         BT_DBG("%s %x", hdev->name, encrypt);
275
276         /* Authentication */
277         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
278 }
279
280 /* Get HCI device by index. 
281  * Device is held on return. */
282 struct hci_dev *hci_dev_get(int index)
283 {
284         struct hci_dev *hdev = NULL;
285         struct list_head *p;
286
287         BT_DBG("%d", index);
288
289         if (index < 0)
290                 return NULL;
291
292         read_lock(&hci_dev_list_lock);
293         list_for_each(p, &hci_dev_list) {
294                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
295                 if (d->id == index) {
296                         hdev = hci_dev_hold(d);
297                         break;
298                 }
299         }
300         read_unlock(&hci_dev_list_lock);
301         return hdev;
302 }
303 EXPORT_SYMBOL(hci_dev_get);
304
305 /* ---- Inquiry support ---- */
306 static void inquiry_cache_flush(struct hci_dev *hdev)
307 {
308         struct inquiry_cache *cache = &hdev->inq_cache;
309         struct inquiry_entry *next  = cache->list, *e;
310
311         BT_DBG("cache %p", cache);
312
313         cache->list = NULL;
314         while ((e = next)) {
315                 next = e->next;
316                 kfree(e);
317         }
318 }
319
320 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
321 {
322         struct inquiry_cache *cache = &hdev->inq_cache;
323         struct inquiry_entry *e;
324
325         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
326
327         for (e = cache->list; e; e = e->next)
328                 if (!bacmp(&e->data.bdaddr, bdaddr))
329                         break;
330         return e;
331 }
332
333 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
334 {
335         struct inquiry_cache *cache = &hdev->inq_cache;
336         struct inquiry_entry *e;
337
338         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
339
340         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
341                 /* Entry not in the cache. Add new one. */
342                 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
343                         return;
344                 memset(e, 0, sizeof(struct inquiry_entry));
345                 e->next     = cache->list;
346                 cache->list = e;
347         }
348
349         memcpy(&e->data, data, sizeof(*data));
350         e->timestamp = jiffies;
351         cache->timestamp = jiffies;
352 }
353
354 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
355 {
356         struct inquiry_cache *cache = &hdev->inq_cache;
357         struct inquiry_info *info = (struct inquiry_info *) buf;
358         struct inquiry_entry *e;
359         int copied = 0;
360
361         for (e = cache->list; e && copied < num; e = e->next, copied++) {
362                 struct inquiry_data *data = &e->data;
363                 bacpy(&info->bdaddr, &data->bdaddr);
364                 info->pscan_rep_mode    = data->pscan_rep_mode;
365                 info->pscan_period_mode = data->pscan_period_mode;
366                 info->pscan_mode        = data->pscan_mode;
367                 memcpy(info->dev_class, data->dev_class, 3);
368                 info->clock_offset      = data->clock_offset;
369                 info++;
370         }
371
372         BT_DBG("cache %p, copied %d", cache, copied);
373         return copied;
374 }
375
376 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
377 {
378         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
379         struct hci_cp_inquiry cp;
380
381         BT_DBG("%s", hdev->name);
382
383         if (test_bit(HCI_INQUIRY, &hdev->flags))
384                 return;
385
386         /* Start Inquiry */
387         memcpy(&cp.lap, &ir->lap, 3);
388         cp.length  = ir->length;
389         cp.num_rsp = ir->num_rsp;
390         hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
391 }
392
393 int hci_inquiry(void __user *arg)
394 {
395         __u8 __user *ptr = arg;
396         struct hci_inquiry_req ir;
397         struct hci_dev *hdev;
398         int err = 0, do_inquiry = 0, max_rsp;
399         long timeo;
400         __u8 *buf;
401
402         if (copy_from_user(&ir, ptr, sizeof(ir)))
403                 return -EFAULT;
404
405         if (!(hdev = hci_dev_get(ir.dev_id)))
406                 return -ENODEV;
407
408         hci_dev_lock_bh(hdev);
409         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 
410                                         inquiry_cache_empty(hdev) ||
411                                         ir.flags & IREQ_CACHE_FLUSH) {
412                 inquiry_cache_flush(hdev);
413                 do_inquiry = 1;
414         }
415         hci_dev_unlock_bh(hdev);
416
417         timeo = ir.length * 2 * HZ;
418         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
419                 goto done;
420
421         /* for unlimited number of responses we will use buffer with 255 entries */
422         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
423
424         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
425          * copy it to the user space.
426          */
427         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
428                 err = -ENOMEM;
429                 goto done;
430         }
431
432         hci_dev_lock_bh(hdev);
433         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
434         hci_dev_unlock_bh(hdev);
435
436         BT_DBG("num_rsp %d", ir.num_rsp);
437
438         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
439                 ptr += sizeof(ir);
440                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
441                                         ir.num_rsp))
442                         err = -EFAULT;
443         } else 
444                 err = -EFAULT;
445
446         kfree(buf);
447
448 done:
449         hci_dev_put(hdev);
450         return err;
451 }
452
453 /* ---- HCI ioctl helpers ---- */
454
455 int hci_dev_open(__u16 dev)
456 {
457         struct hci_dev *hdev;
458         int ret = 0;
459
460         if (!(hdev = hci_dev_get(dev)))
461                 return -ENODEV;
462
463         BT_DBG("%s %p", hdev->name, hdev);
464
465         hci_req_lock(hdev);
466
467         if (test_bit(HCI_UP, &hdev->flags)) {
468                 ret = -EALREADY;
469                 goto done;
470         }
471
472         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
473                 set_bit(HCI_RAW, &hdev->flags);
474
475         if (hdev->open(hdev)) {
476                 ret = -EIO;
477                 goto done;
478         }
479
480         if (!test_bit(HCI_RAW, &hdev->flags)) {
481                 atomic_set(&hdev->cmd_cnt, 1);
482                 set_bit(HCI_INIT, &hdev->flags);
483
484                 //__hci_request(hdev, hci_reset_req, 0, HZ);
485                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
486
487                 clear_bit(HCI_INIT, &hdev->flags);
488         }
489
490         if (!ret) {
491                 hci_dev_hold(hdev);
492                 set_bit(HCI_UP, &hdev->flags);
493                 hci_notify(hdev, HCI_DEV_UP);
494         } else {        
495                 /* Init failed, cleanup */
496                 tasklet_kill(&hdev->rx_task);
497                 tasklet_kill(&hdev->tx_task);
498                 tasklet_kill(&hdev->cmd_task);
499
500                 skb_queue_purge(&hdev->cmd_q);
501                 skb_queue_purge(&hdev->rx_q);
502
503                 if (hdev->flush)
504                         hdev->flush(hdev);
505
506                 if (hdev->sent_cmd) {
507                         kfree_skb(hdev->sent_cmd);
508                         hdev->sent_cmd = NULL;
509                 }
510
511                 hdev->close(hdev);
512                 hdev->flags = 0;
513         }
514
515 done:
516         hci_req_unlock(hdev);
517         hci_dev_put(hdev);
518         return ret;
519 }
520
521 static int hci_dev_do_close(struct hci_dev *hdev)
522 {
523         BT_DBG("%s %p", hdev->name, hdev);
524
525         hci_req_cancel(hdev, ENODEV);
526         hci_req_lock(hdev);
527
528         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
529                 hci_req_unlock(hdev);
530                 return 0;
531         }
532
533         /* Kill RX and TX tasks */
534         tasklet_kill(&hdev->rx_task);
535         tasklet_kill(&hdev->tx_task);
536
537         hci_dev_lock_bh(hdev);
538         inquiry_cache_flush(hdev);
539         hci_conn_hash_flush(hdev);
540         hci_dev_unlock_bh(hdev);
541
542         hci_notify(hdev, HCI_DEV_DOWN);
543
544         if (hdev->flush)
545                 hdev->flush(hdev);
546
547         /* Reset device */
548         skb_queue_purge(&hdev->cmd_q);
549         atomic_set(&hdev->cmd_cnt, 1);
550         if (!test_bit(HCI_RAW, &hdev->flags)) {
551                 set_bit(HCI_INIT, &hdev->flags);
552                 __hci_request(hdev, hci_reset_req, 0, HZ/4);
553                 clear_bit(HCI_INIT, &hdev->flags);
554         }
555
556         /* Kill cmd task */
557         tasklet_kill(&hdev->cmd_task);
558
559         /* Drop queues */
560         skb_queue_purge(&hdev->rx_q);
561         skb_queue_purge(&hdev->cmd_q);
562         skb_queue_purge(&hdev->raw_q);
563
564         /* Drop last sent command */
565         if (hdev->sent_cmd) {
566                 kfree_skb(hdev->sent_cmd);
567                 hdev->sent_cmd = NULL;
568         }
569
570         /* After this point our queues are empty
571          * and no tasks are scheduled. */
572         hdev->close(hdev);
573
574         /* Clear flags */
575         hdev->flags = 0;
576
577         hci_req_unlock(hdev);
578
579         hci_dev_put(hdev);
580         return 0;
581 }
582
583 int hci_dev_close(__u16 dev)
584 {
585         struct hci_dev *hdev;
586         int err;
587
588         if (!(hdev = hci_dev_get(dev)))
589                 return -ENODEV;
590         err = hci_dev_do_close(hdev);
591         hci_dev_put(hdev);
592         return err;
593 }
594
595 int hci_dev_reset(__u16 dev)
596 {
597         struct hci_dev *hdev;
598         int ret = 0;
599
600         if (!(hdev = hci_dev_get(dev)))
601                 return -ENODEV;
602
603         hci_req_lock(hdev);
604         tasklet_disable(&hdev->tx_task);
605
606         if (!test_bit(HCI_UP, &hdev->flags))
607                 goto done;
608
609         /* Drop queues */
610         skb_queue_purge(&hdev->rx_q);
611         skb_queue_purge(&hdev->cmd_q);
612
613         hci_dev_lock_bh(hdev);
614         inquiry_cache_flush(hdev);
615         hci_conn_hash_flush(hdev);
616         hci_dev_unlock_bh(hdev);
617
618         if (hdev->flush)
619                 hdev->flush(hdev);
620
621         atomic_set(&hdev->cmd_cnt, 1); 
622         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
623
624         if (!test_bit(HCI_RAW, &hdev->flags))
625                 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
626
627 done:
628         tasklet_enable(&hdev->tx_task);
629         hci_req_unlock(hdev);
630         hci_dev_put(hdev);
631         return ret;
632 }
633
634 int hci_dev_reset_stat(__u16 dev)
635 {
636         struct hci_dev *hdev;
637         int ret = 0;
638
639         if (!(hdev = hci_dev_get(dev)))
640                 return -ENODEV;
641
642         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
643
644         hci_dev_put(hdev);
645
646         return ret;
647 }
648
649 int hci_dev_cmd(unsigned int cmd, void __user *arg)
650 {
651         struct hci_dev *hdev;
652         struct hci_dev_req dr;
653         int err = 0;
654
655         if (copy_from_user(&dr, arg, sizeof(dr)))
656                 return -EFAULT;
657
658         if (!(hdev = hci_dev_get(dr.dev_id)))
659                 return -ENODEV;
660
661         switch (cmd) {
662         case HCISETAUTH:
663                 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
664                 break;
665
666         case HCISETENCRYPT:
667                 if (!lmp_encrypt_capable(hdev)) {
668                         err = -EOPNOTSUPP;
669                         break;
670                 }
671
672                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
673                         /* Auth must be enabled first */
674                         err = hci_request(hdev, hci_auth_req,
675                                         dr.dev_opt, HCI_INIT_TIMEOUT);
676                         if (err)
677                                 break;
678                 }
679
680                 err = hci_request(hdev, hci_encrypt_req,
681                                         dr.dev_opt, HCI_INIT_TIMEOUT);
682                 break;
683
684         case HCISETSCAN:
685                 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
686                 break;
687
688         case HCISETPTYPE:
689                 hdev->pkt_type = (__u16) dr.dev_opt;
690                 break;
691
692         case HCISETLINKPOL:
693                 hdev->link_policy = (__u16) dr.dev_opt;
694                 break;
695
696         case HCISETLINKMODE:
697                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
698                 break;
699
700         case HCISETACLMTU:
701                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
702                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
703                 break;
704
705         case HCISETSCOMTU:
706                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
707                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
708                 break;
709
710         default:
711                 err = -EINVAL;
712                 break;
713         }
714         hci_dev_put(hdev);
715         return err;
716 }
717
718 int hci_get_dev_list(void __user *arg)
719 {
720         struct hci_dev_list_req *dl;
721         struct hci_dev_req *dr;
722         struct list_head *p;
723         int n = 0, size, err;
724         __u16 dev_num;
725
726         if (get_user(dev_num, (__u16 __user *) arg))
727                 return -EFAULT;
728
729         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
730                 return -EINVAL;
731
732         size = sizeof(*dl) + dev_num * sizeof(*dr);
733
734         if (!(dl = kmalloc(size, GFP_KERNEL)))
735                 return -ENOMEM;
736
737         dr = dl->dev_req;
738
739         read_lock_bh(&hci_dev_list_lock);
740         list_for_each(p, &hci_dev_list) {
741                 struct hci_dev *hdev;
742                 hdev = list_entry(p, struct hci_dev, list);
743                 (dr + n)->dev_id  = hdev->id;
744                 (dr + n)->dev_opt = hdev->flags;
745                 if (++n >= dev_num)
746                         break;
747         }
748         read_unlock_bh(&hci_dev_list_lock);
749
750         dl->dev_num = n;
751         size = sizeof(*dl) + n * sizeof(*dr);
752
753         err = copy_to_user(arg, dl, size);
754         kfree(dl);
755
756         return err ? -EFAULT : 0;
757 }
758
759 int hci_get_dev_info(void __user *arg)
760 {
761         struct hci_dev *hdev;
762         struct hci_dev_info di;
763         int err = 0;
764
765         if (copy_from_user(&di, arg, sizeof(di)))
766                 return -EFAULT;
767
768         if (!(hdev = hci_dev_get(di.dev_id)))
769                 return -ENODEV;
770
771         strcpy(di.name, hdev->name);
772         di.bdaddr   = hdev->bdaddr;
773         di.type     = hdev->type;
774         di.flags    = hdev->flags;
775         di.pkt_type = hdev->pkt_type;
776         di.acl_mtu  = hdev->acl_mtu;
777         di.acl_pkts = hdev->acl_pkts;
778         di.sco_mtu  = hdev->sco_mtu;
779         di.sco_pkts = hdev->sco_pkts;
780         di.link_policy = hdev->link_policy;
781         di.link_mode   = hdev->link_mode;
782
783         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
784         memcpy(&di.features, &hdev->features, sizeof(di.features));
785
786         if (copy_to_user(arg, &di, sizeof(di)))
787                 err = -EFAULT;
788
789         hci_dev_put(hdev);
790
791         return err;
792 }
793
794 /* ---- Interface to HCI drivers ---- */
795
796 /* Alloc HCI device */
797 struct hci_dev *hci_alloc_dev(void)
798 {
799         struct hci_dev *hdev;
800
801         hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
802         if (!hdev)
803                 return NULL;
804
805         memset(hdev, 0, sizeof(struct hci_dev));
806
807         skb_queue_head_init(&hdev->driver_init);
808
809         return hdev;
810 }
811 EXPORT_SYMBOL(hci_alloc_dev);
812
813 /* Free HCI device */
814 void hci_free_dev(struct hci_dev *hdev)
815 {
816         skb_queue_purge(&hdev->driver_init);
817
818         /* will free via class release */
819         class_device_put(&hdev->class_dev);
820 }
821 EXPORT_SYMBOL(hci_free_dev);
822
823 /* Register HCI device */
824 int hci_register_dev(struct hci_dev *hdev)
825 {
826         struct list_head *head = &hci_dev_list, *p;
827         int id = 0;
828
829         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
830
831         if (!hdev->open || !hdev->close || !hdev->destruct)
832                 return -EINVAL;
833
834         write_lock_bh(&hci_dev_list_lock);
835
836         /* Find first available device id */
837         list_for_each(p, &hci_dev_list) {
838                 if (list_entry(p, struct hci_dev, list)->id != id)
839                         break;
840                 head = p; id++;
841         }
842         
843         sprintf(hdev->name, "hci%d", id);
844         hdev->id = id;
845         list_add(&hdev->list, head);
846
847         atomic_set(&hdev->refcnt, 1);
848         spin_lock_init(&hdev->lock);
849
850         hdev->flags = 0;
851         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
852         hdev->link_mode = (HCI_LM_ACCEPT);
853
854         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
855         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
856         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
857
858         skb_queue_head_init(&hdev->rx_q);
859         skb_queue_head_init(&hdev->cmd_q);
860         skb_queue_head_init(&hdev->raw_q);
861
862         init_waitqueue_head(&hdev->req_wait_q);
863         init_MUTEX(&hdev->req_lock);
864
865         inquiry_cache_init(hdev);
866
867         hci_conn_hash_init(hdev);
868
869         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
870
871         atomic_set(&hdev->promisc, 0);
872
873         write_unlock_bh(&hci_dev_list_lock);
874
875         hci_register_sysfs(hdev);
876
877         hci_notify(hdev, HCI_DEV_REG);
878
879         return id;
880 }
881 EXPORT_SYMBOL(hci_register_dev);
882
883 /* Unregister HCI device */
884 int hci_unregister_dev(struct hci_dev *hdev)
885 {
886         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
887
888         hci_unregister_sysfs(hdev);
889
890         write_lock_bh(&hci_dev_list_lock);
891         list_del(&hdev->list);
892         write_unlock_bh(&hci_dev_list_lock);
893
894         hci_dev_do_close(hdev);
895
896         hci_notify(hdev, HCI_DEV_UNREG);
897
898         __hci_dev_put(hdev);
899         return 0;
900 }
901 EXPORT_SYMBOL(hci_unregister_dev);
902
903 /* Suspend HCI device */
904 int hci_suspend_dev(struct hci_dev *hdev)
905 {
906         hci_notify(hdev, HCI_DEV_SUSPEND);
907         return 0;
908 }
909 EXPORT_SYMBOL(hci_suspend_dev);
910
911 /* Resume HCI device */
912 int hci_resume_dev(struct hci_dev *hdev)
913 {
914         hci_notify(hdev, HCI_DEV_RESUME);
915         return 0;
916 }
917 EXPORT_SYMBOL(hci_resume_dev);
918
919 /* ---- Interface to upper protocols ---- */
920
921 /* Register/Unregister protocols.
922  * hci_task_lock is used to ensure that no tasks are running. */
923 int hci_register_proto(struct hci_proto *hp)
924 {
925         int err = 0;
926
927         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
928
929         if (hp->id >= HCI_MAX_PROTO)
930                 return -EINVAL;
931
932         write_lock_bh(&hci_task_lock);
933
934         if (!hci_proto[hp->id])
935                 hci_proto[hp->id] = hp;
936         else
937                 err = -EEXIST;
938
939         write_unlock_bh(&hci_task_lock);
940
941         return err;
942 }
943 EXPORT_SYMBOL(hci_register_proto);
944
945 int hci_unregister_proto(struct hci_proto *hp)
946 {
947         int err = 0;
948
949         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
950
951         if (hp->id >= HCI_MAX_PROTO)
952                 return -EINVAL;
953
954         write_lock_bh(&hci_task_lock);
955
956         if (hci_proto[hp->id])
957                 hci_proto[hp->id] = NULL;
958         else
959                 err = -ENOENT;
960
961         write_unlock_bh(&hci_task_lock);
962
963         return err;
964 }
965 EXPORT_SYMBOL(hci_unregister_proto);
966
967 int hci_register_cb(struct hci_cb *cb)
968 {
969         BT_DBG("%p name %s", cb, cb->name);
970
971         write_lock_bh(&hci_cb_list_lock);
972         list_add(&cb->list, &hci_cb_list);
973         write_unlock_bh(&hci_cb_list_lock);
974
975         return 0;
976 }
977 EXPORT_SYMBOL(hci_register_cb);
978
979 int hci_unregister_cb(struct hci_cb *cb)
980 {
981         BT_DBG("%p name %s", cb, cb->name);
982
983         write_lock_bh(&hci_cb_list_lock);
984         list_del(&cb->list);
985         write_unlock_bh(&hci_cb_list_lock);
986
987         return 0;
988 }
989 EXPORT_SYMBOL(hci_unregister_cb);
990
991 static int hci_send_frame(struct sk_buff *skb)
992 {
993         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
994
995         if (!hdev) {
996                 kfree_skb(skb);
997                 return -ENODEV;
998         }
999
1000         BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
1001
1002         if (atomic_read(&hdev->promisc)) {
1003                 /* Time stamp */
1004                 do_gettimeofday(&skb->stamp);
1005
1006                 hci_send_to_sock(hdev, skb);
1007         }
1008
1009         /* Get rid of skb owner, prior to sending to the driver. */
1010         skb_orphan(skb);
1011
1012         return hdev->send(skb);
1013 }
1014
1015 /* Send HCI command */
1016 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1017 {
1018         int len = HCI_COMMAND_HDR_SIZE + plen;
1019         struct hci_command_hdr *hdr;
1020         struct sk_buff *skb;
1021
1022         BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1023
1024         skb = bt_skb_alloc(len, GFP_ATOMIC);
1025         if (!skb) {
1026                 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1027                 return -ENOMEM;
1028         }
1029
1030         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1031         hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
1032         hdr->plen   = plen;
1033
1034         if (plen)
1035                 memcpy(skb_put(skb, plen), param, plen);
1036
1037         BT_DBG("skb len %d", skb->len);
1038
1039         skb->pkt_type = HCI_COMMAND_PKT;
1040         skb->dev = (void *) hdev;
1041         skb_queue_tail(&hdev->cmd_q, skb);
1042         hci_sched_cmd(hdev);
1043
1044         return 0;
1045 }
1046 EXPORT_SYMBOL(hci_send_cmd);
1047
1048 /* Get data from the previously sent command */
1049 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1050 {
1051         struct hci_command_hdr *hdr;
1052
1053         if (!hdev->sent_cmd)
1054                 return NULL;
1055
1056         hdr = (void *) hdev->sent_cmd->data;
1057
1058         if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1059                 return NULL;
1060
1061         BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1062
1063         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1064 }
1065
1066 /* Send ACL data */
1067 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1068 {
1069         struct hci_acl_hdr *hdr;
1070         int len = skb->len;
1071
1072         hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1073         hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1074         hdr->dlen   = __cpu_to_le16(len);
1075
1076         skb->h.raw = (void *) hdr;
1077 }
1078
1079 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1080 {
1081         struct hci_dev *hdev = conn->hdev;
1082         struct sk_buff *list;
1083
1084         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1085
1086         skb->dev = (void *) hdev;
1087         skb->pkt_type = HCI_ACLDATA_PKT;
1088         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1089
1090         if (!(list = skb_shinfo(skb)->frag_list)) {
1091                 /* Non fragmented */
1092                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1093
1094                 skb_queue_tail(&conn->data_q, skb);
1095         } else {
1096                 /* Fragmented */
1097                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1098
1099                 skb_shinfo(skb)->frag_list = NULL;
1100
1101                 /* Queue all fragments atomically */
1102                 spin_lock_bh(&conn->data_q.lock);
1103
1104                 __skb_queue_tail(&conn->data_q, skb);
1105                 do {
1106                         skb = list; list = list->next;
1107                         
1108                         skb->dev = (void *) hdev;
1109                         skb->pkt_type = HCI_ACLDATA_PKT;
1110                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1111
1112                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1113
1114                         __skb_queue_tail(&conn->data_q, skb);
1115                 } while (list);
1116
1117                 spin_unlock_bh(&conn->data_q.lock);
1118         }
1119
1120         hci_sched_tx(hdev);
1121         return 0;
1122 }
1123 EXPORT_SYMBOL(hci_send_acl);
1124
1125 /* Send SCO data */
1126 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1127 {
1128         struct hci_dev *hdev = conn->hdev;
1129         struct hci_sco_hdr hdr;
1130
1131         BT_DBG("%s len %d", hdev->name, skb->len);
1132
1133         if (skb->len > hdev->sco_mtu) {
1134                 kfree_skb(skb);
1135                 return -EINVAL;
1136         }
1137
1138         hdr.handle = __cpu_to_le16(conn->handle);
1139         hdr.dlen   = skb->len;
1140
1141         skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1142         memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1143
1144         skb->dev = (void *) hdev;
1145         skb->pkt_type = HCI_SCODATA_PKT;
1146         skb_queue_tail(&conn->data_q, skb);
1147         hci_sched_tx(hdev);
1148         return 0;
1149 }
1150 EXPORT_SYMBOL(hci_send_sco);
1151
1152 /* ---- HCI TX task (outgoing data) ---- */
1153
1154 /* HCI Connection scheduler */
1155 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1156 {
1157         struct hci_conn_hash *h = &hdev->conn_hash;
1158         struct hci_conn  *conn = NULL;
1159         int num = 0, min = ~0;
1160         struct list_head *p;
1161
1162         /* We don't have to lock device here. Connections are always 
1163          * added and removed with TX task disabled. */
1164         list_for_each(p, &h->list) {
1165                 struct hci_conn *c;
1166                 c = list_entry(p, struct hci_conn, list);
1167
1168                 if (c->type != type || c->state != BT_CONNECTED
1169                                 || skb_queue_empty(&c->data_q))
1170                         continue;
1171                 num++;
1172
1173                 if (c->sent < min) {
1174                         min  = c->sent;
1175                         conn = c;
1176                 }
1177         }
1178
1179         if (conn) {
1180                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1181                 int q = cnt / num;
1182                 *quote = q ? q : 1;
1183         } else
1184                 *quote = 0;
1185
1186         BT_DBG("conn %p quote %d", conn, *quote);
1187         return conn;
1188 }
1189
1190 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1191 {
1192         struct hci_conn_hash *h = &hdev->conn_hash;
1193         struct list_head *p;
1194         struct hci_conn  *c;
1195
1196         BT_ERR("%s ACL tx timeout", hdev->name);
1197
1198         /* Kill stalled connections */
1199         list_for_each(p, &h->list) {
1200                 c = list_entry(p, struct hci_conn, list);
1201                 if (c->type == ACL_LINK && c->sent) {
1202                         BT_ERR("%s killing stalled ACL connection %s",
1203                                 hdev->name, batostr(&c->dst));
1204                         hci_acl_disconn(c, 0x13);
1205                 }
1206         }
1207 }
1208
1209 static inline void hci_sched_acl(struct hci_dev *hdev)
1210 {
1211         struct hci_conn *conn;
1212         struct sk_buff *skb;
1213         int quote;
1214
1215         BT_DBG("%s", hdev->name);
1216
1217         if (!test_bit(HCI_RAW, &hdev->flags)) {
1218                 /* ACL tx timeout must be longer than maximum
1219                  * link supervision timeout (40.9 seconds) */
1220                 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1221                         hci_acl_tx_to(hdev);
1222         }
1223
1224         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1225                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1226                         BT_DBG("skb %p len %d", skb, skb->len);
1227                         hci_send_frame(skb);
1228                         hdev->acl_last_tx = jiffies;
1229
1230                         hdev->acl_cnt--;
1231                         conn->sent++;
1232                 }
1233         }
1234 }
1235
1236 /* Schedule SCO */
1237 static inline void hci_sched_sco(struct hci_dev *hdev)
1238 {
1239         struct hci_conn *conn;
1240         struct sk_buff *skb;
1241         int quote;
1242
1243         BT_DBG("%s", hdev->name);
1244
1245         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1246                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1247                         BT_DBG("skb %p len %d", skb, skb->len);
1248                         hci_send_frame(skb);
1249
1250                         conn->sent++;
1251                         if (conn->sent == ~0)
1252                                 conn->sent = 0;
1253                 }
1254         }
1255 }
1256
1257 static void hci_tx_task(unsigned long arg)
1258 {
1259         struct hci_dev *hdev = (struct hci_dev *) arg;
1260         struct sk_buff *skb;
1261
1262         read_lock(&hci_task_lock);
1263
1264         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1265
1266         /* Schedule queues and send stuff to HCI driver */
1267
1268         hci_sched_acl(hdev);
1269
1270         hci_sched_sco(hdev);
1271
1272         /* Send next queued raw (unknown type) packet */
1273         while ((skb = skb_dequeue(&hdev->raw_q)))
1274                 hci_send_frame(skb);
1275
1276         read_unlock(&hci_task_lock);
1277 }
1278
1279 /* ----- HCI RX task (incoming data proccessing) ----- */
1280
1281 /* ACL data packet */
1282 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1283 {
1284         struct hci_acl_hdr *hdr = (void *) skb->data;
1285         struct hci_conn *conn;
1286         __u16 handle, flags;
1287
1288         skb_pull(skb, HCI_ACL_HDR_SIZE);
1289
1290         handle = __le16_to_cpu(hdr->handle);
1291         flags  = hci_flags(handle);
1292         handle = hci_handle(handle);
1293
1294         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1295
1296         hdev->stat.acl_rx++;
1297
1298         hci_dev_lock(hdev);
1299         conn = hci_conn_hash_lookup_handle(hdev, handle);
1300         hci_dev_unlock(hdev);
1301         
1302         if (conn) {
1303                 register struct hci_proto *hp;
1304
1305                 /* Send to upper protocol */
1306                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1307                         hp->recv_acldata(conn, skb, flags);
1308                         return;
1309                 }
1310         } else {
1311                 BT_ERR("%s ACL packet for unknown connection handle %d", 
1312                         hdev->name, handle);
1313         }
1314
1315         kfree_skb(skb);
1316 }
1317
1318 /* SCO data packet */
1319 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1320 {
1321         struct hci_sco_hdr *hdr = (void *) skb->data;
1322         struct hci_conn *conn;
1323         __u16 handle;
1324
1325         skb_pull(skb, HCI_SCO_HDR_SIZE);
1326
1327         handle = __le16_to_cpu(hdr->handle);
1328
1329         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1330
1331         hdev->stat.sco_rx++;
1332
1333         hci_dev_lock(hdev);
1334         conn = hci_conn_hash_lookup_handle(hdev, handle);
1335         hci_dev_unlock(hdev);
1336
1337         if (conn) {
1338                 register struct hci_proto *hp;
1339
1340                 /* Send to upper protocol */
1341                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1342                         hp->recv_scodata(conn, skb);
1343                         return;
1344                 }
1345         } else {
1346                 BT_ERR("%s SCO packet for unknown connection handle %d", 
1347                         hdev->name, handle);
1348         }
1349
1350         kfree_skb(skb);
1351 }
1352
1353 void hci_rx_task(unsigned long arg)
1354 {
1355         struct hci_dev *hdev = (struct hci_dev *) arg;
1356         struct sk_buff *skb;
1357
1358         BT_DBG("%s", hdev->name);
1359
1360         read_lock(&hci_task_lock);
1361
1362         while ((skb = skb_dequeue(&hdev->rx_q))) {
1363                 if (atomic_read(&hdev->promisc)) {
1364                         /* Send copy to the sockets */
1365                         hci_send_to_sock(hdev, skb);
1366                 }
1367
1368                 if (test_bit(HCI_RAW, &hdev->flags)) {
1369                         kfree_skb(skb);
1370                         continue;
1371                 }
1372
1373                 if (test_bit(HCI_INIT, &hdev->flags)) {
1374                         /* Don't process data packets in this states. */
1375                         switch (skb->pkt_type) {
1376                         case HCI_ACLDATA_PKT:
1377                         case HCI_SCODATA_PKT:
1378                                 kfree_skb(skb);
1379                                 continue;
1380                         };
1381                 }
1382
1383                 /* Process frame */
1384                 switch (skb->pkt_type) {
1385                 case HCI_EVENT_PKT:
1386                         hci_event_packet(hdev, skb);
1387                         break;
1388
1389                 case HCI_ACLDATA_PKT:
1390                         BT_DBG("%s ACL data packet", hdev->name);
1391                         hci_acldata_packet(hdev, skb);
1392                         break;
1393
1394                 case HCI_SCODATA_PKT:
1395                         BT_DBG("%s SCO data packet", hdev->name);
1396                         hci_scodata_packet(hdev, skb);
1397                         break;
1398
1399                 default:
1400                         kfree_skb(skb);
1401                         break;
1402                 }
1403         }
1404
1405         read_unlock(&hci_task_lock);
1406 }
1407
1408 static void hci_cmd_task(unsigned long arg)
1409 {
1410         struct hci_dev *hdev = (struct hci_dev *) arg;
1411         struct sk_buff *skb;
1412
1413         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1414
1415         if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1416                 BT_ERR("%s command tx timeout", hdev->name);
1417                 atomic_set(&hdev->cmd_cnt, 1);
1418         }
1419
1420         /* Send queued commands */
1421         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1422                 if (hdev->sent_cmd)
1423                         kfree_skb(hdev->sent_cmd);
1424
1425                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1426                         atomic_dec(&hdev->cmd_cnt);
1427                         hci_send_frame(skb);
1428                         hdev->cmd_last_tx = jiffies;
1429                 } else {
1430                         skb_queue_head(&hdev->cmd_q, skb);
1431                         hci_sched_cmd(hdev);
1432                 }
1433         }
1434 }