upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / net / bluetooth / hci_core.c
1 /* 
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/major.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51
52 #ifndef CONFIG_BT_HCI_CORE_DEBUG
53 #undef  BT_DBG
54 #define BT_DBG(D...)
55 #endif
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 static void hci_notify(struct hci_dev *hdev, int event);
61
62 rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 rwlock_t hci_dev_list_lock = RW_LOCK_UNLOCKED;
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 rwlock_t hci_cb_list_lock = RW_LOCK_UNLOCKED;
71
72 /* HCI protocols */
73 #define HCI_MAX_PROTO   2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75
76 /* HCI notifiers list */
77 static struct notifier_block *hci_notifier;
78
79 /* ---- HCI notifications ---- */
80
81 int hci_register_notifier(struct notifier_block *nb)
82 {
83         return notifier_chain_register(&hci_notifier, nb);
84 }
85
86 int hci_unregister_notifier(struct notifier_block *nb)
87 {
88         return notifier_chain_unregister(&hci_notifier, nb);
89 }
90
91 void hci_notify(struct hci_dev *hdev, int event)
92 {
93         notifier_call_chain(&hci_notifier, event, hdev);
94 }
95
96 /* ---- HCI requests ---- */
97
98 void hci_req_complete(struct hci_dev *hdev, int result)
99 {
100         BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = result;
104                 hdev->req_status = HCI_REQ_DONE;
105                 wake_up_interruptible(&hdev->req_wait_q);
106         }
107 }
108
109 void hci_req_cancel(struct hci_dev *hdev, int err)
110 {
111         BT_DBG("%s err 0x%2.2x", hdev->name, err);
112
113         if (hdev->req_status == HCI_REQ_PEND) {
114                 hdev->req_result = err;
115                 hdev->req_status = HCI_REQ_CANCELED;
116                 wake_up_interruptible(&hdev->req_wait_q);
117         }
118 }
119
120 /* Execute request and wait for completion. */
121 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 
122                                 unsigned long opt, __u32 timeout)
123 {
124         DECLARE_WAITQUEUE(wait, current);
125         int err = 0;
126
127         BT_DBG("%s start", hdev->name);
128
129         hdev->req_status = HCI_REQ_PEND;
130
131         add_wait_queue(&hdev->req_wait_q, &wait);
132         set_current_state(TASK_INTERRUPTIBLE);
133
134         req(hdev, opt);
135         schedule_timeout(timeout);
136
137         remove_wait_queue(&hdev->req_wait_q, &wait);
138
139         if (signal_pending(current))
140                 return -EINTR;
141
142         switch (hdev->req_status) {
143         case HCI_REQ_DONE:
144                 err = -bt_err(hdev->req_result);
145                 break;
146
147         case HCI_REQ_CANCELED:
148                 err = -hdev->req_result;
149                 break;
150
151         default:
152                 err = -ETIMEDOUT;
153                 break;
154         };
155
156         hdev->req_status = hdev->req_result = 0;
157
158         BT_DBG("%s end: err %d", hdev->name, err);
159
160         return err;
161 }
162
163 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
164                                 unsigned long opt, __u32 timeout)
165 {
166         int ret;
167
168         /* Serialize all requests */
169         hci_req_lock(hdev);
170         ret = __hci_request(hdev, req, opt, timeout);
171         hci_req_unlock(hdev);
172
173         return ret;
174 }
175
176 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 {
178         BT_DBG("%s %ld", hdev->name, opt);
179
180         /* Reset device */
181         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
182 }
183
184 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         __u16 param;
187
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Mandatory initialization */
191
192         /* Reset */
193         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
194                         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
195
196         /* Read Local Supported Features */
197         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
198
199         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
200         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
201
202 #if 0
203         /* Host buffer size */
204         {
205                 struct hci_cp_host_buffer_size cp;
206                 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
207                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
208                 cp.acl_max_pkt = __cpu_to_le16(0xffff);
209                 cp.sco_max_pkt = __cpu_to_le16(0xffff);
210                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
211         }
212 #endif
213
214         /* Read BD Address */
215         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
216
217         /* Read Voice Setting */
218         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
219
220         /* Optional initialization */
221
222         /* Clear Event Filters */
223         {
224                 struct hci_cp_set_event_flt cp;
225                 cp.flt_type  = HCI_FLT_CLEAR_ALL;
226                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
227         }
228
229         /* Page timeout ~20 secs */
230         param = __cpu_to_le16(0x8000);
231         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
232
233         /* Connection accept timeout ~20 secs */
234         param = __cpu_to_le16(0x7d00);
235         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
236 }
237
238 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
239 {
240         __u8 scan = opt;
241
242         BT_DBG("%s %x", hdev->name, scan);
243
244         /* Inquiry and Page scans */
245         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
246 }
247
248 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
249 {
250         __u8 auth = opt;
251
252         BT_DBG("%s %x", hdev->name, auth);
253
254         /* Authentication */
255         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
256 }
257
258 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
259 {
260         __u8 encrypt = opt;
261
262         BT_DBG("%s %x", hdev->name, encrypt);
263
264         /* Authentication */
265         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
266 }
267
268 /* Get HCI device by index. 
269  * Device is held on return. */
270 struct hci_dev *hci_dev_get(int index)
271 {
272         struct hci_dev *hdev = NULL;
273         struct list_head *p;
274
275         BT_DBG("%d", index);
276
277         if (index < 0)
278                 return NULL;
279
280         read_lock(&hci_dev_list_lock);
281         list_for_each(p, &hci_dev_list) {
282                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
283                 if (d->id == index) {
284                         hdev = hci_dev_hold(d);
285                         break;
286                 }
287         }
288         read_unlock(&hci_dev_list_lock);
289         return hdev;
290 }
291 EXPORT_SYMBOL(hci_dev_get);
292
293 /* ---- Inquiry support ---- */
294 static void inquiry_cache_flush(struct hci_dev *hdev)
295 {
296         struct inquiry_cache *cache = &hdev->inq_cache;
297         struct inquiry_entry *next  = cache->list, *e;
298
299         BT_DBG("cache %p", cache);
300
301         cache->list = NULL;
302         while ((e = next)) {
303                 next = e->next;
304                 kfree(e);
305         }
306 }
307
308 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
309 {
310         struct inquiry_cache *cache = &hdev->inq_cache;
311         struct inquiry_entry *e;
312
313         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
314
315         for (e = cache->list; e; e = e->next)
316                 if (!bacmp(&e->data.bdaddr, bdaddr))
317                         break;
318         return e;
319 }
320
321 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
322 {
323         struct inquiry_cache *cache = &hdev->inq_cache;
324         struct inquiry_entry *e;
325
326         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
327
328         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
329                 /* Entry not in the cache. Add new one. */
330                 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
331                         return;
332                 memset(e, 0, sizeof(struct inquiry_entry));
333                 e->next     = cache->list;
334                 cache->list = e;
335         }
336
337         memcpy(&e->data, data, sizeof(*data));
338         e->timestamp = jiffies;
339         cache->timestamp = jiffies;
340 }
341
342 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
343 {
344         struct inquiry_cache *cache = &hdev->inq_cache;
345         struct inquiry_info *info = (struct inquiry_info *) buf;
346         struct inquiry_entry *e;
347         int copied = 0;
348
349         for (e = cache->list; e && copied < num; e = e->next, copied++) {
350                 struct inquiry_data *data = &e->data;
351                 bacpy(&info->bdaddr, &data->bdaddr);
352                 info->pscan_rep_mode    = data->pscan_rep_mode;
353                 info->pscan_period_mode = data->pscan_period_mode;
354                 info->pscan_mode        = data->pscan_mode;
355                 memcpy(info->dev_class, data->dev_class, 3);
356                 info->clock_offset      = data->clock_offset;
357                 info++;
358         }
359
360         BT_DBG("cache %p, copied %d", cache, copied);
361         return copied;
362 }
363
364 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
365 {
366         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
367         struct hci_cp_inquiry cp;
368
369         BT_DBG("%s", hdev->name);
370
371         if (test_bit(HCI_INQUIRY, &hdev->flags))
372                 return;
373
374         /* Start Inquiry */
375         memcpy(&cp.lap, &ir->lap, 3);
376         cp.length  = ir->length;
377         cp.num_rsp = ir->num_rsp;
378         hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
379 }
380
381 int hci_inquiry(void __user *arg)
382 {
383         __u8 __user *ptr = arg;
384         struct hci_inquiry_req ir;
385         struct hci_dev *hdev;
386         int err = 0, do_inquiry = 0, max_rsp;
387         long timeo;
388         __u8 *buf;
389
390         if (copy_from_user(&ir, ptr, sizeof(ir)))
391                 return -EFAULT;
392
393         if (!(hdev = hci_dev_get(ir.dev_id)))
394                 return -ENODEV;
395
396         hci_dev_lock_bh(hdev);
397         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 
398                                         inquiry_cache_empty(hdev) ||
399                                         ir.flags & IREQ_CACHE_FLUSH) {
400                 inquiry_cache_flush(hdev);
401                 do_inquiry = 1;
402         }
403         hci_dev_unlock_bh(hdev);
404
405         timeo = ir.length * 2 * HZ;
406         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
407                 goto done;
408
409         /* for unlimited number of responses we will use buffer with 255 entries */
410         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
411
412         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
413          * copy it to the user space.
414          */
415         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
416                 err = -ENOMEM;
417                 goto done;
418         }
419
420         hci_dev_lock_bh(hdev);
421         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
422         hci_dev_unlock_bh(hdev);
423
424         BT_DBG("num_rsp %d", ir.num_rsp);
425
426         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
427                 ptr += sizeof(ir);
428                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
429                                         ir.num_rsp))
430                         err = -EFAULT;
431         } else 
432                 err = -EFAULT;
433
434         kfree(buf);
435
436 done:
437         hci_dev_put(hdev);
438         return err;
439 }
440
441 /* ---- HCI ioctl helpers ---- */
442
443 int hci_dev_open(__u16 dev)
444 {
445         struct hci_dev *hdev;
446         int ret = 0;
447
448         if (!(hdev = hci_dev_get(dev)))
449                 return -ENODEV;
450
451         BT_DBG("%s %p", hdev->name, hdev);
452
453         hci_req_lock(hdev);
454
455         if (test_bit(HCI_UP, &hdev->flags)) {
456                 ret = -EALREADY;
457                 goto done;
458         }
459
460         if (hdev->open(hdev)) {
461                 ret = -EIO;
462                 goto done;
463         }
464
465         if (!test_bit(HCI_RAW, &hdev->flags)) {
466                 atomic_set(&hdev->cmd_cnt, 1);
467                 set_bit(HCI_INIT, &hdev->flags);
468
469                 //__hci_request(hdev, hci_reset_req, 0, HZ);
470                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
471
472                 clear_bit(HCI_INIT, &hdev->flags);
473         }
474
475         if (!ret) {
476                 hci_dev_hold(hdev);
477                 set_bit(HCI_UP, &hdev->flags);
478                 hci_notify(hdev, HCI_DEV_UP);
479         } else {        
480                 /* Init failed, cleanup */
481                 tasklet_kill(&hdev->rx_task);
482                 tasklet_kill(&hdev->tx_task);
483                 tasklet_kill(&hdev->cmd_task);
484
485                 skb_queue_purge(&hdev->cmd_q);
486                 skb_queue_purge(&hdev->rx_q);
487
488                 if (hdev->flush)
489                         hdev->flush(hdev);
490
491                 if (hdev->sent_cmd) {
492                         kfree_skb(hdev->sent_cmd);
493                         hdev->sent_cmd = NULL;
494                 }
495
496                 hdev->close(hdev);
497                 hdev->flags = 0;
498         }
499
500 done:
501         hci_req_unlock(hdev);
502         hci_dev_put(hdev);
503         return ret;
504 }
505
506 static int hci_dev_do_close(struct hci_dev *hdev)
507 {
508         BT_DBG("%s %p", hdev->name, hdev);
509
510         hci_req_cancel(hdev, ENODEV);
511         hci_req_lock(hdev);
512
513         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
514                 hci_req_unlock(hdev);
515                 return 0;
516         }
517
518         /* Kill RX and TX tasks */
519         tasklet_kill(&hdev->rx_task);
520         tasklet_kill(&hdev->tx_task);
521
522         hci_dev_lock_bh(hdev);
523         inquiry_cache_flush(hdev);
524         hci_conn_hash_flush(hdev);
525         hci_dev_unlock_bh(hdev);
526
527         hci_notify(hdev, HCI_DEV_DOWN);
528
529         if (hdev->flush)
530                 hdev->flush(hdev);
531
532         /* Reset device */
533         skb_queue_purge(&hdev->cmd_q);
534         atomic_set(&hdev->cmd_cnt, 1);
535         set_bit(HCI_INIT, &hdev->flags);
536         __hci_request(hdev, hci_reset_req, 0, HZ/4);
537         clear_bit(HCI_INIT, &hdev->flags);
538
539         /* Kill cmd task */
540         tasklet_kill(&hdev->cmd_task);
541
542         /* Drop queues */
543         skb_queue_purge(&hdev->rx_q);
544         skb_queue_purge(&hdev->cmd_q);
545         skb_queue_purge(&hdev->raw_q);
546
547         /* Drop last sent command */
548         if (hdev->sent_cmd) {
549                 kfree_skb(hdev->sent_cmd);
550                 hdev->sent_cmd = NULL;
551         }
552
553         /* After this point our queues are empty
554          * and no tasks are scheduled. */
555         hdev->close(hdev);
556
557         /* Clear flags */
558         hdev->flags = 0;
559
560         hci_req_unlock(hdev);
561
562         hci_dev_put(hdev);
563         return 0;
564 }
565
566 int hci_dev_close(__u16 dev)
567 {
568         struct hci_dev *hdev;
569         int err;
570
571         if (!(hdev = hci_dev_get(dev)))
572                 return -ENODEV;
573         err = hci_dev_do_close(hdev);
574         hci_dev_put(hdev);
575         return err;
576 }
577
578 int hci_dev_reset(__u16 dev)
579 {
580         struct hci_dev *hdev;
581         int ret = 0;
582
583         if (!(hdev = hci_dev_get(dev)))
584                 return -ENODEV;
585
586         hci_req_lock(hdev);
587         tasklet_disable(&hdev->tx_task);
588
589         if (!test_bit(HCI_UP, &hdev->flags))
590                 goto done;
591
592         /* Drop queues */
593         skb_queue_purge(&hdev->rx_q);
594         skb_queue_purge(&hdev->cmd_q);
595
596         hci_dev_lock_bh(hdev);
597         inquiry_cache_flush(hdev);
598         hci_conn_hash_flush(hdev);
599         hci_dev_unlock_bh(hdev);
600
601         if (hdev->flush)
602                 hdev->flush(hdev);
603
604         atomic_set(&hdev->cmd_cnt, 1); 
605         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
606
607         ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
608
609 done:
610         tasklet_enable(&hdev->tx_task);
611         hci_req_unlock(hdev);
612         hci_dev_put(hdev);
613         return ret;
614 }
615
616 int hci_dev_reset_stat(__u16 dev)
617 {
618         struct hci_dev *hdev;
619         int ret = 0;
620
621         if (!(hdev = hci_dev_get(dev)))
622                 return -ENODEV;
623
624         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
625
626         hci_dev_put(hdev);
627
628         return ret;
629 }
630
631 int hci_dev_cmd(unsigned int cmd, void __user *arg)
632 {
633         struct hci_dev *hdev;
634         struct hci_dev_req dr;
635         int err = 0;
636
637         if (copy_from_user(&dr, arg, sizeof(dr)))
638                 return -EFAULT;
639
640         if (!(hdev = hci_dev_get(dr.dev_id)))
641                 return -ENODEV;
642
643         switch (cmd) {
644         case HCISETAUTH:
645                 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
646                 break;
647
648         case HCISETENCRYPT:
649                 if (!lmp_encrypt_capable(hdev)) {
650                         err = -EOPNOTSUPP;
651                         break;
652                 }
653
654                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
655                         /* Auth must be enabled first */
656                         err = hci_request(hdev, hci_auth_req,
657                                         dr.dev_opt, HCI_INIT_TIMEOUT);
658                         if (err)
659                                 break;
660                 }
661
662                 err = hci_request(hdev, hci_encrypt_req,
663                                         dr.dev_opt, HCI_INIT_TIMEOUT);
664                 break;
665
666         case HCISETSCAN:
667                 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
668                 break;
669
670         case HCISETPTYPE:
671                 hdev->pkt_type = (__u16) dr.dev_opt;
672                 break;
673
674         case HCISETLINKPOL:
675                 hdev->link_policy = (__u16) dr.dev_opt;
676                 break;
677
678         case HCISETLINKMODE:
679                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
680                 break;
681
682         case HCISETACLMTU:
683                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
684                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
685                 break;
686
687         case HCISETSCOMTU:
688                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
689                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
690                 break;
691
692         default:
693                 err = -EINVAL;
694                 break;
695         }
696         hci_dev_put(hdev);
697         return err;
698 }
699
700 int hci_get_dev_list(void __user *arg)
701 {
702         struct hci_dev_list_req *dl;
703         struct hci_dev_req *dr;
704         struct list_head *p;
705         int n = 0, size, err;
706         __u16 dev_num;
707
708         if (get_user(dev_num, (__u16 __user *) arg))
709                 return -EFAULT;
710
711         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
712                 return -EINVAL;
713
714         size = sizeof(*dl) + dev_num * sizeof(*dr);
715
716         if (!(dl = kmalloc(size, GFP_KERNEL)))
717                 return -ENOMEM;
718
719         dr = dl->dev_req;
720
721         read_lock_bh(&hci_dev_list_lock);
722         list_for_each(p, &hci_dev_list) {
723                 struct hci_dev *hdev;
724                 hdev = list_entry(p, struct hci_dev, list);
725                 (dr + n)->dev_id  = hdev->id;
726                 (dr + n)->dev_opt = hdev->flags;
727                 if (++n >= dev_num)
728                         break;
729         }
730         read_unlock_bh(&hci_dev_list_lock);
731
732         dl->dev_num = n;
733         size = sizeof(*dl) + n * sizeof(*dr);
734
735         err = copy_to_user(arg, dl, size);
736         kfree(dl);
737
738         return err ? -EFAULT : 0;
739 }
740
741 int hci_get_dev_info(void __user *arg)
742 {
743         struct hci_dev *hdev;
744         struct hci_dev_info di;
745         int err = 0;
746
747         if (copy_from_user(&di, arg, sizeof(di)))
748                 return -EFAULT;
749
750         if (!(hdev = hci_dev_get(di.dev_id)))
751                 return -ENODEV;
752
753         strcpy(di.name, hdev->name);
754         di.bdaddr   = hdev->bdaddr;
755         di.type     = hdev->type;
756         di.flags    = hdev->flags;
757         di.pkt_type = hdev->pkt_type;
758         di.acl_mtu  = hdev->acl_mtu;
759         di.acl_pkts = hdev->acl_pkts;
760         di.sco_mtu  = hdev->sco_mtu;
761         di.sco_pkts = hdev->sco_pkts;
762         di.link_policy = hdev->link_policy;
763         di.link_mode   = hdev->link_mode;
764
765         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
766         memcpy(&di.features, &hdev->features, sizeof(di.features));
767
768         if (copy_to_user(arg, &di, sizeof(di)))
769                 err = -EFAULT;
770
771         hci_dev_put(hdev);
772
773         return err;
774 }
775
776 /* ---- Interface to HCI drivers ---- */
777
778 /* Alloc HCI device */
779 struct hci_dev *hci_alloc_dev(void)
780 {
781         struct hci_dev *hdev;
782
783         hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
784         if (!hdev)
785                 return NULL;
786
787         memset(hdev, 0, sizeof(struct hci_dev));
788
789         return hdev;
790 }
791 EXPORT_SYMBOL(hci_alloc_dev);
792
793 /* Free HCI device */
794 void hci_free_dev(struct hci_dev *hdev)
795 {
796         /* will free via class release */
797         class_device_put(&hdev->class_dev);
798 }
799 EXPORT_SYMBOL(hci_free_dev);
800
801 /* Register HCI device */
802 int hci_register_dev(struct hci_dev *hdev)
803 {
804         struct list_head *head = &hci_dev_list, *p;
805         int id = 0;
806
807         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
808
809         if (!hdev->open || !hdev->close || !hdev->destruct)
810                 return -EINVAL;
811
812         write_lock_bh(&hci_dev_list_lock);
813
814         /* Find first available device id */
815         list_for_each(p, &hci_dev_list) {
816                 if (list_entry(p, struct hci_dev, list)->id != id)
817                         break;
818                 head = p; id++;
819         }
820         
821         sprintf(hdev->name, "hci%d", id);
822         hdev->id = id;
823         list_add(&hdev->list, head);
824
825         atomic_set(&hdev->refcnt, 1);
826         spin_lock_init(&hdev->lock);
827
828         hdev->flags = 0;
829         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
830         hdev->link_mode = (HCI_LM_ACCEPT);
831
832         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
833         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
834         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
835
836         skb_queue_head_init(&hdev->rx_q);
837         skb_queue_head_init(&hdev->cmd_q);
838         skb_queue_head_init(&hdev->raw_q);
839
840         init_waitqueue_head(&hdev->req_wait_q);
841         init_MUTEX(&hdev->req_lock);
842
843         inquiry_cache_init(hdev);
844
845         hci_conn_hash_init(hdev);
846
847         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
848
849         atomic_set(&hdev->promisc, 0);
850
851         write_unlock_bh(&hci_dev_list_lock);
852
853         hci_register_sysfs(hdev);
854
855         hci_notify(hdev, HCI_DEV_REG);
856
857         return id;
858 }
859 EXPORT_SYMBOL(hci_register_dev);
860
861 /* Unregister HCI device */
862 int hci_unregister_dev(struct hci_dev *hdev)
863 {
864         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
865
866         hci_unregister_sysfs(hdev);
867
868         write_lock_bh(&hci_dev_list_lock);
869         list_del(&hdev->list);
870         write_unlock_bh(&hci_dev_list_lock);
871
872         hci_dev_do_close(hdev);
873
874         hci_notify(hdev, HCI_DEV_UNREG);
875
876         __hci_dev_put(hdev);
877         return 0;
878 }
879 EXPORT_SYMBOL(hci_unregister_dev);
880
881 /* Suspend HCI device */
882 int hci_suspend_dev(struct hci_dev *hdev)
883 {
884         hci_notify(hdev, HCI_DEV_SUSPEND);
885         return 0;
886 }
887 EXPORT_SYMBOL(hci_suspend_dev);
888
889 /* Resume HCI device */
890 int hci_resume_dev(struct hci_dev *hdev)
891 {
892         hci_notify(hdev, HCI_DEV_RESUME);
893         return 0;
894 }
895 EXPORT_SYMBOL(hci_resume_dev);
896
897 /* ---- Interface to upper protocols ---- */
898
899 /* Register/Unregister protocols.
900  * hci_task_lock is used to ensure that no tasks are running. */
901 int hci_register_proto(struct hci_proto *hp)
902 {
903         int err = 0;
904
905         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
906
907         if (hp->id >= HCI_MAX_PROTO)
908                 return -EINVAL;
909
910         write_lock_bh(&hci_task_lock);
911
912         if (!hci_proto[hp->id])
913                 hci_proto[hp->id] = hp;
914         else
915                 err = -EEXIST;
916
917         write_unlock_bh(&hci_task_lock);
918
919         return err;
920 }
921 EXPORT_SYMBOL(hci_register_proto);
922
923 int hci_unregister_proto(struct hci_proto *hp)
924 {
925         int err = 0;
926
927         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
928
929         if (hp->id >= HCI_MAX_PROTO)
930                 return -EINVAL;
931
932         write_lock_bh(&hci_task_lock);
933
934         if (hci_proto[hp->id])
935                 hci_proto[hp->id] = NULL;
936         else
937                 err = -ENOENT;
938
939         write_unlock_bh(&hci_task_lock);
940
941         return err;
942 }
943 EXPORT_SYMBOL(hci_unregister_proto);
944
945 int hci_register_cb(struct hci_cb *cb)
946 {
947         BT_DBG("%p name %s", cb, cb->name);
948
949         write_lock_bh(&hci_cb_list_lock);
950         list_add(&cb->list, &hci_cb_list);
951         write_unlock_bh(&hci_cb_list_lock);
952
953         return 0;
954 }
955 EXPORT_SYMBOL(hci_register_cb);
956
957 int hci_unregister_cb(struct hci_cb *cb)
958 {
959         BT_DBG("%p name %s", cb, cb->name);
960
961         write_lock_bh(&hci_cb_list_lock);
962         list_del(&cb->list);
963         write_unlock_bh(&hci_cb_list_lock);
964
965         return 0;
966 }
967 EXPORT_SYMBOL(hci_unregister_cb);
968
969 static int hci_send_frame(struct sk_buff *skb)
970 {
971         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
972
973         if (!hdev) {
974                 kfree_skb(skb);
975                 return -ENODEV;
976         }
977
978         BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
979
980         if (atomic_read(&hdev->promisc)) {
981                 /* Time stamp */
982                 do_gettimeofday(&skb->stamp);
983
984                 hci_send_to_sock(hdev, skb);
985         }
986
987         /* Get rid of skb owner, prior to sending to the driver. */
988         skb_orphan(skb);
989
990         return hdev->send(skb);
991 }
992
993 /* Send HCI command */
994 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
995 {
996         int len = HCI_COMMAND_HDR_SIZE + plen;
997         struct hci_command_hdr *hdr;
998         struct sk_buff *skb;
999
1000         BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1001
1002         skb = bt_skb_alloc(len, GFP_ATOMIC);
1003         if (!skb) {
1004                 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1005                 return -ENOMEM;
1006         }
1007
1008         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1009         hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
1010         hdr->plen   = plen;
1011
1012         if (plen)
1013                 memcpy(skb_put(skb, plen), param, plen);
1014
1015         BT_DBG("skb len %d", skb->len);
1016
1017         skb->pkt_type = HCI_COMMAND_PKT;
1018         skb->dev = (void *) hdev;
1019         skb_queue_tail(&hdev->cmd_q, skb);
1020         hci_sched_cmd(hdev);
1021
1022         return 0;
1023 }
1024 EXPORT_SYMBOL(hci_send_cmd);
1025
1026 /* Get data from the previously sent command */
1027 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1028 {
1029         struct hci_command_hdr *hdr;
1030
1031         if (!hdev->sent_cmd)
1032                 return NULL;
1033
1034         hdr = (void *) hdev->sent_cmd->data;
1035
1036         if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1037                 return NULL;
1038
1039         BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1040
1041         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1042 }
1043
1044 /* Send ACL data */
1045 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1046 {
1047         struct hci_acl_hdr *hdr;
1048         int len = skb->len;
1049
1050         hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1051         hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1052         hdr->dlen   = __cpu_to_le16(len);
1053
1054         skb->h.raw = (void *) hdr;
1055 }
1056
1057 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1058 {
1059         struct hci_dev *hdev = conn->hdev;
1060         struct sk_buff *list;
1061
1062         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1063
1064         skb->dev = (void *) hdev;
1065         skb->pkt_type = HCI_ACLDATA_PKT;
1066         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1067
1068         if (!(list = skb_shinfo(skb)->frag_list)) {
1069                 /* Non fragmented */
1070                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1071
1072                 skb_queue_tail(&conn->data_q, skb);
1073         } else {
1074                 /* Fragmented */
1075                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1076
1077                 skb_shinfo(skb)->frag_list = NULL;
1078
1079                 /* Queue all fragments atomically */
1080                 spin_lock_bh(&conn->data_q.lock);
1081
1082                 __skb_queue_tail(&conn->data_q, skb);
1083                 do {
1084                         skb = list; list = list->next;
1085                         
1086                         skb->dev = (void *) hdev;
1087                         skb->pkt_type = HCI_ACLDATA_PKT;
1088                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1089
1090                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1091
1092                         __skb_queue_tail(&conn->data_q, skb);
1093                 } while (list);
1094
1095                 spin_unlock_bh(&conn->data_q.lock);
1096         }
1097
1098         hci_sched_tx(hdev);
1099         return 0;
1100 }
1101 EXPORT_SYMBOL(hci_send_acl);
1102
1103 /* Send SCO data */
1104 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1105 {
1106         struct hci_dev *hdev = conn->hdev;
1107         struct hci_sco_hdr hdr;
1108
1109         BT_DBG("%s len %d", hdev->name, skb->len);
1110
1111         if (skb->len > hdev->sco_mtu) {
1112                 kfree_skb(skb);
1113                 return -EINVAL;
1114         }
1115
1116         hdr.handle = __cpu_to_le16(conn->handle);
1117         hdr.dlen   = skb->len;
1118
1119         skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1120         memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1121
1122         skb->dev = (void *) hdev;
1123         skb->pkt_type = HCI_SCODATA_PKT;
1124         skb_queue_tail(&conn->data_q, skb);
1125         hci_sched_tx(hdev);
1126         return 0;
1127 }
1128 EXPORT_SYMBOL(hci_send_sco);
1129
1130 /* ---- HCI TX task (outgoing data) ---- */
1131
1132 /* HCI Connection scheduler */
1133 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1134 {
1135         struct hci_conn_hash *h = &hdev->conn_hash;
1136         struct hci_conn  *conn = NULL;
1137         int num = 0, min = ~0;
1138         struct list_head *p;
1139
1140         /* We don't have to lock device here. Connections are always 
1141          * added and removed with TX task disabled. */
1142         list_for_each(p, &h->list) {
1143                 struct hci_conn *c;
1144                 c = list_entry(p, struct hci_conn, list);
1145
1146                 if (c->type != type || c->state != BT_CONNECTED
1147                                 || skb_queue_empty(&c->data_q))
1148                         continue;
1149                 num++;
1150
1151                 if (c->sent < min) {
1152                         min  = c->sent;
1153                         conn = c;
1154                 }
1155         }
1156
1157         if (conn) {
1158                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1159                 int q = cnt / num;
1160                 *quote = q ? q : 1;
1161         } else
1162                 *quote = 0;
1163
1164         BT_DBG("conn %p quote %d", conn, *quote);
1165         return conn;
1166 }
1167
1168 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1169 {
1170         struct hci_conn_hash *h = &hdev->conn_hash;
1171         struct list_head *p;
1172         struct hci_conn  *c;
1173
1174         BT_ERR("%s ACL tx timeout", hdev->name);
1175
1176         /* Kill stalled connections */
1177         list_for_each(p, &h->list) {
1178                 c = list_entry(p, struct hci_conn, list);
1179                 if (c->type == ACL_LINK && c->sent) {
1180                         BT_ERR("%s killing stalled ACL connection %s",
1181                                 hdev->name, batostr(&c->dst));
1182                         hci_acl_disconn(c, 0x13);
1183                 }
1184         }
1185 }
1186
1187 static inline void hci_sched_acl(struct hci_dev *hdev)
1188 {
1189         struct hci_conn *conn;
1190         struct sk_buff *skb;
1191         int quote;
1192
1193         BT_DBG("%s", hdev->name);
1194
1195         /* ACL tx timeout must be longer than maximum
1196          * link supervision timeout (40.9 seconds) */
1197         if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1198                 hci_acl_tx_to(hdev);
1199
1200         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1201                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1202                         BT_DBG("skb %p len %d", skb, skb->len);
1203                         hci_send_frame(skb);
1204                         hdev->acl_last_tx = jiffies;
1205
1206                         hdev->acl_cnt--;
1207                         conn->sent++;
1208                 }
1209         }
1210 }
1211
1212 /* Schedule SCO */
1213 static inline void hci_sched_sco(struct hci_dev *hdev)
1214 {
1215         struct hci_conn *conn;
1216         struct sk_buff *skb;
1217         int quote;
1218
1219         BT_DBG("%s", hdev->name);
1220
1221         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1222                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1223                         BT_DBG("skb %p len %d", skb, skb->len);
1224                         hci_send_frame(skb);
1225
1226                         conn->sent++;
1227                         if (conn->sent == ~0)
1228                                 conn->sent = 0;
1229                 }
1230         }
1231 }
1232
1233 static void hci_tx_task(unsigned long arg)
1234 {
1235         struct hci_dev *hdev = (struct hci_dev *) arg;
1236         struct sk_buff *skb;
1237
1238         read_lock(&hci_task_lock);
1239
1240         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1241
1242         /* Schedule queues and send stuff to HCI driver */
1243
1244         hci_sched_acl(hdev);
1245
1246         hci_sched_sco(hdev);
1247
1248         /* Send next queued raw (unknown type) packet */
1249         while ((skb = skb_dequeue(&hdev->raw_q)))
1250                 hci_send_frame(skb);
1251
1252         read_unlock(&hci_task_lock);
1253 }
1254
1255 /* ----- HCI RX task (incoming data proccessing) ----- */
1256
1257 /* ACL data packet */
1258 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1259 {
1260         struct hci_acl_hdr *hdr = (void *) skb->data;
1261         struct hci_conn *conn;
1262         __u16 handle, flags;
1263
1264         skb_pull(skb, HCI_ACL_HDR_SIZE);
1265
1266         handle = __le16_to_cpu(hdr->handle);
1267         flags  = hci_flags(handle);
1268         handle = hci_handle(handle);
1269
1270         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1271
1272         hdev->stat.acl_rx++;
1273
1274         hci_dev_lock(hdev);
1275         conn = hci_conn_hash_lookup_handle(hdev, handle);
1276         hci_dev_unlock(hdev);
1277         
1278         if (conn) {
1279                 register struct hci_proto *hp;
1280
1281                 /* Send to upper protocol */
1282                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1283                         hp->recv_acldata(conn, skb, flags);
1284                         return;
1285                 }
1286         } else {
1287                 BT_ERR("%s ACL packet for unknown connection handle %d", 
1288                         hdev->name, handle);
1289         }
1290
1291         kfree_skb(skb);
1292 }
1293
1294 /* SCO data packet */
1295 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1296 {
1297         struct hci_sco_hdr *hdr = (void *) skb->data;
1298         struct hci_conn *conn;
1299         __u16 handle;
1300
1301         skb_pull(skb, HCI_SCO_HDR_SIZE);
1302
1303         handle = __le16_to_cpu(hdr->handle);
1304
1305         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1306
1307         hdev->stat.sco_rx++;
1308
1309         hci_dev_lock(hdev);
1310         conn = hci_conn_hash_lookup_handle(hdev, handle);
1311         hci_dev_unlock(hdev);
1312
1313         if (conn) {
1314                 register struct hci_proto *hp;
1315
1316                 /* Send to upper protocol */
1317                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1318                         hp->recv_scodata(conn, skb);
1319                         return;
1320                 }
1321         } else {
1322                 BT_ERR("%s SCO packet for unknown connection handle %d", 
1323                         hdev->name, handle);
1324         }
1325
1326         kfree_skb(skb);
1327 }
1328
1329 void hci_rx_task(unsigned long arg)
1330 {
1331         struct hci_dev *hdev = (struct hci_dev *) arg;
1332         struct sk_buff *skb;
1333
1334         BT_DBG("%s", hdev->name);
1335
1336         read_lock(&hci_task_lock);
1337
1338         while ((skb = skb_dequeue(&hdev->rx_q))) {
1339                 if (atomic_read(&hdev->promisc)) {
1340                         /* Send copy to the sockets */
1341                         hci_send_to_sock(hdev, skb);
1342                 }
1343
1344                 if (test_bit(HCI_RAW, &hdev->flags)) {
1345                         kfree_skb(skb);
1346                         continue;
1347                 }
1348
1349                 if (test_bit(HCI_INIT, &hdev->flags)) {
1350                         /* Don't process data packets in this states. */
1351                         switch (skb->pkt_type) {
1352                         case HCI_ACLDATA_PKT:
1353                         case HCI_SCODATA_PKT:
1354                                 kfree_skb(skb);
1355                                 continue;
1356                         };
1357                 }
1358
1359                 /* Process frame */
1360                 switch (skb->pkt_type) {
1361                 case HCI_EVENT_PKT:
1362                         hci_event_packet(hdev, skb);
1363                         break;
1364
1365                 case HCI_ACLDATA_PKT:
1366                         BT_DBG("%s ACL data packet", hdev->name);
1367                         hci_acldata_packet(hdev, skb);
1368                         break;
1369
1370                 case HCI_SCODATA_PKT:
1371                         BT_DBG("%s SCO data packet", hdev->name);
1372                         hci_scodata_packet(hdev, skb);
1373                         break;
1374
1375                 default:
1376                         kfree_skb(skb);
1377                         break;
1378                 }
1379         }
1380
1381         read_unlock(&hci_task_lock);
1382 }
1383
1384 static void hci_cmd_task(unsigned long arg)
1385 {
1386         struct hci_dev *hdev = (struct hci_dev *) arg;
1387         struct sk_buff *skb;
1388
1389         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1390
1391         if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1392                 BT_ERR("%s command tx timeout", hdev->name);
1393                 atomic_set(&hdev->cmd_cnt, 1);
1394         }
1395
1396         /* Send queued commands */
1397         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1398                 if (hdev->sent_cmd)
1399                         kfree_skb(hdev->sent_cmd);
1400
1401                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1402                         atomic_dec(&hdev->cmd_cnt);
1403                         hci_send_frame(skb);
1404                         hdev->cmd_last_tx = jiffies;
1405                 } else {
1406                         skb_queue_head(&hdev->cmd_q, skb);
1407                         hci_sched_cmd(hdev);
1408                 }
1409         }
1410 }