2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/major.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
52 #ifndef CONFIG_BT_HCI_CORE_DEBUG
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 static void hci_notify(struct hci_dev *hdev, int event);
62 rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
65 LIST_HEAD(hci_dev_list);
66 rwlock_t hci_dev_list_lock = RW_LOCK_UNLOCKED;
69 #define HCI_MAX_PROTO 2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
72 /* HCI notifiers list */
73 static struct notifier_block *hci_notifier;
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return notifier_chain_unregister(&hci_notifier, nb);
87 void hci_notify(struct hci_dev *hdev, int event)
89 notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, int result)
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
105 void hci_req_cancel(struct hci_dev *hdev, int err)
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118 unsigned long opt, __u32 timeout)
120 DECLARE_WAITQUEUE(wait, current);
123 BT_DBG("%s start", hdev->name);
125 hdev->req_status = HCI_REQ_PEND;
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
131 schedule_timeout(timeout);
133 remove_wait_queue(&hdev->req_wait_q, &wait);
135 if (signal_pending(current))
138 switch (hdev->req_status) {
140 err = -bt_err(hdev->req_result);
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
152 hdev->req_status = hdev->req_result = 0;
154 BT_DBG("%s end: err %d", hdev->name, err);
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
164 /* Serialize all requests */
166 ret = __hci_request(hdev, req, opt, timeout);
167 hci_req_unlock(hdev);
172 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
174 BT_DBG("%s %ld", hdev->name, opt);
177 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
180 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 BT_DBG("%s %ld", hdev->name, opt);
186 /* Mandatory initialization */
189 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
190 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
192 /* Read Local Supported Features */
193 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
199 /* Host buffer size */
201 struct hci_cp_host_buffer_size cp;
202 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
203 cp.sco_mtu = HCI_MAX_SCO_SIZE;
204 cp.acl_max_pkt = __cpu_to_le16(0xffff);
205 cp.sco_max_pkt = __cpu_to_le16(0xffff);
206 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
210 /* Read BD Address */
211 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
213 /* Read Voice Setting */
214 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
216 /* Optional initialization */
218 /* Clear Event Filters */
220 struct hci_cp_set_event_flt cp;
221 cp.flt_type = HCI_FLT_CLEAR_ALL;
222 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
225 /* Page timeout ~20 secs */
226 param = __cpu_to_le16(0x8000);
227 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m);
229 /* Connection accept timeout ~20 secs */
230 param = __cpu_to_le16(0x7d00);
231 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m);
234 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
238 BT_DBG("%s %x", hdev->name, scan);
240 /* Inquiry and Page scans */
241 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
244 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
248 BT_DBG("%s %x", hdev->name, auth);
251 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
254 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
258 BT_DBG("%s %x", hdev->name, encrypt);
261 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
264 /* Get HCI device by index.
265 * Device is held on return. */
266 struct hci_dev *hci_dev_get(int index)
268 struct hci_dev *hdev = NULL;
276 read_lock(&hci_dev_list_lock);
277 list_for_each(p, &hci_dev_list) {
278 struct hci_dev *d = list_entry(p, struct hci_dev, list);
279 if (d->id == index) {
280 hdev = hci_dev_hold(d);
284 read_unlock(&hci_dev_list_lock);
287 EXPORT_SYMBOL(hci_dev_get);
289 /* ---- Inquiry support ---- */
290 static void inquiry_cache_flush(struct hci_dev *hdev)
292 struct inquiry_cache *cache = &hdev->inq_cache;
293 struct inquiry_entry *next = cache->list, *e;
295 BT_DBG("cache %p", cache);
304 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
306 struct inquiry_cache *cache = &hdev->inq_cache;
307 struct inquiry_entry *e;
309 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
311 for (e = cache->list; e; e = e->next)
312 if (!bacmp(&e->info.bdaddr, bdaddr))
317 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_info *info)
319 struct inquiry_cache *cache = &hdev->inq_cache;
320 struct inquiry_entry *e;
322 BT_DBG("cache %p, %s", cache, batostr(&info->bdaddr));
324 if (!(e = hci_inquiry_cache_lookup(hdev, &info->bdaddr))) {
325 /* Entry not in the cache. Add new one. */
326 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
328 memset(e, 0, sizeof(struct inquiry_entry));
329 e->next = cache->list;
333 memcpy(&e->info, info, sizeof(*info));
334 e->timestamp = jiffies;
335 cache->timestamp = jiffies;
338 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
340 struct inquiry_cache *cache = &hdev->inq_cache;
341 struct inquiry_info *info = (struct inquiry_info *) buf;
342 struct inquiry_entry *e;
345 for (e = cache->list; e && copied < num; e = e->next, copied++)
346 memcpy(info++, &e->info, sizeof(*info));
348 BT_DBG("cache %p, copied %d", cache, copied);
352 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 struct hci_cp_inquiry cp;
357 BT_DBG("%s", hdev->name);
359 if (test_bit(HCI_INQUIRY, &hdev->flags))
363 memcpy(&cp.lap, &ir->lap, 3);
364 cp.length = ir->length;
365 cp.num_rsp = ir->num_rsp;
366 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
369 int hci_inquiry(void __user *arg)
371 __u8 __user *ptr = arg;
372 struct hci_inquiry_req ir;
373 struct hci_dev *hdev;
374 int err = 0, do_inquiry = 0, max_rsp;
378 if (copy_from_user(&ir, ptr, sizeof(ir)))
381 if (!(hdev = hci_dev_get(ir.dev_id)))
384 hci_dev_lock_bh(hdev);
385 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
386 inquiry_cache_empty(hdev) ||
387 ir.flags & IREQ_CACHE_FLUSH) {
388 inquiry_cache_flush(hdev);
391 hci_dev_unlock_bh(hdev);
393 timeo = ir.length * 2 * HZ;
394 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
397 /* for unlimited number of responses we will use buffer with 255 entries */
398 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
400 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
401 * copy it to the user space.
403 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
408 hci_dev_lock_bh(hdev);
409 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
410 hci_dev_unlock_bh(hdev);
412 BT_DBG("num_rsp %d", ir.num_rsp);
414 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
416 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
429 /* ---- HCI ioctl helpers ---- */
431 int hci_dev_open(__u16 dev)
433 struct hci_dev *hdev;
436 if (!(hdev = hci_dev_get(dev)))
439 BT_DBG("%s %p", hdev->name, hdev);
443 if (test_bit(HCI_UP, &hdev->flags)) {
448 if (hdev->open(hdev)) {
453 if (!test_bit(HCI_RAW, &hdev->flags)) {
454 atomic_set(&hdev->cmd_cnt, 1);
455 set_bit(HCI_INIT, &hdev->flags);
457 //__hci_request(hdev, hci_reset_req, 0, HZ);
458 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
460 clear_bit(HCI_INIT, &hdev->flags);
465 set_bit(HCI_UP, &hdev->flags);
466 hci_notify(hdev, HCI_DEV_UP);
468 /* Init failed, cleanup */
469 tasklet_kill(&hdev->rx_task);
470 tasklet_kill(&hdev->tx_task);
471 tasklet_kill(&hdev->cmd_task);
473 skb_queue_purge(&hdev->cmd_q);
474 skb_queue_purge(&hdev->rx_q);
479 if (hdev->sent_cmd) {
480 kfree_skb(hdev->sent_cmd);
481 hdev->sent_cmd = NULL;
489 hci_req_unlock(hdev);
494 static int hci_dev_do_close(struct hci_dev *hdev)
496 BT_DBG("%s %p", hdev->name, hdev);
498 hci_req_cancel(hdev, ENODEV);
501 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
502 hci_req_unlock(hdev);
506 /* Kill RX and TX tasks */
507 tasklet_kill(&hdev->rx_task);
508 tasklet_kill(&hdev->tx_task);
510 hci_dev_lock_bh(hdev);
511 inquiry_cache_flush(hdev);
512 hci_conn_hash_flush(hdev);
513 hci_dev_unlock_bh(hdev);
515 hci_notify(hdev, HCI_DEV_DOWN);
521 skb_queue_purge(&hdev->cmd_q);
522 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags);
524 __hci_request(hdev, hci_reset_req, 0, HZ/4);
525 clear_bit(HCI_INIT, &hdev->flags);
528 tasklet_kill(&hdev->cmd_task);
531 skb_queue_purge(&hdev->rx_q);
532 skb_queue_purge(&hdev->cmd_q);
533 skb_queue_purge(&hdev->raw_q);
535 /* Drop last sent command */
536 if (hdev->sent_cmd) {
537 kfree_skb(hdev->sent_cmd);
538 hdev->sent_cmd = NULL;
541 /* After this point our queues are empty
542 * and no tasks are scheduled. */
548 hci_req_unlock(hdev);
554 int hci_dev_close(__u16 dev)
556 struct hci_dev *hdev;
559 if (!(hdev = hci_dev_get(dev)))
561 err = hci_dev_do_close(hdev);
566 int hci_dev_reset(__u16 dev)
568 struct hci_dev *hdev;
571 if (!(hdev = hci_dev_get(dev)))
575 tasklet_disable(&hdev->tx_task);
577 if (!test_bit(HCI_UP, &hdev->flags))
581 skb_queue_purge(&hdev->rx_q);
582 skb_queue_purge(&hdev->cmd_q);
584 hci_dev_lock_bh(hdev);
585 inquiry_cache_flush(hdev);
586 hci_conn_hash_flush(hdev);
587 hci_dev_unlock_bh(hdev);
592 atomic_set(&hdev->cmd_cnt, 1);
593 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
595 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
598 tasklet_enable(&hdev->tx_task);
599 hci_req_unlock(hdev);
604 int hci_dev_reset_stat(__u16 dev)
606 struct hci_dev *hdev;
609 if (!(hdev = hci_dev_get(dev)))
612 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
619 int hci_dev_cmd(unsigned int cmd, void __user *arg)
621 struct hci_dev *hdev;
622 struct hci_dev_req dr;
625 if (copy_from_user(&dr, arg, sizeof(dr)))
628 if (!(hdev = hci_dev_get(dr.dev_id)))
633 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
637 if (!lmp_encrypt_capable(hdev)) {
642 if (!test_bit(HCI_AUTH, &hdev->flags)) {
643 /* Auth must be enabled first */
644 err = hci_request(hdev, hci_auth_req,
645 dr.dev_opt, HCI_INIT_TIMEOUT);
650 err = hci_request(hdev, hci_encrypt_req,
651 dr.dev_opt, HCI_INIT_TIMEOUT);
655 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
659 hdev->pkt_type = (__u16) dr.dev_opt;
663 hdev->link_policy = (__u16) dr.dev_opt;
667 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
671 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
672 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
676 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
677 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
688 int hci_get_dev_list(void __user *arg)
690 struct hci_dev_list_req *dl;
691 struct hci_dev_req *dr;
693 int n = 0, size, err;
696 if (get_user(dev_num, (__u16 __user *) arg))
699 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
702 size = sizeof(*dl) + dev_num * sizeof(*dr);
704 if (!(dl = kmalloc(size, GFP_KERNEL)))
709 read_lock_bh(&hci_dev_list_lock);
710 list_for_each(p, &hci_dev_list) {
711 struct hci_dev *hdev;
712 hdev = list_entry(p, struct hci_dev, list);
713 (dr + n)->dev_id = hdev->id;
714 (dr + n)->dev_opt = hdev->flags;
718 read_unlock_bh(&hci_dev_list_lock);
721 size = sizeof(*dl) + n * sizeof(*dr);
723 err = copy_to_user(arg, dl, size);
726 return err ? -EFAULT : 0;
729 int hci_get_dev_info(void __user *arg)
731 struct hci_dev *hdev;
732 struct hci_dev_info di;
735 if (copy_from_user(&di, arg, sizeof(di)))
738 if (!(hdev = hci_dev_get(di.dev_id)))
741 strcpy(di.name, hdev->name);
742 di.bdaddr = hdev->bdaddr;
743 di.type = hdev->type;
744 di.flags = hdev->flags;
745 di.pkt_type = hdev->pkt_type;
746 di.acl_mtu = hdev->acl_mtu;
747 di.acl_pkts = hdev->acl_pkts;
748 di.sco_mtu = hdev->sco_mtu;
749 di.sco_pkts = hdev->sco_pkts;
750 di.link_policy = hdev->link_policy;
751 di.link_mode = hdev->link_mode;
753 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
754 memcpy(&di.features, &hdev->features, sizeof(di.features));
756 if (copy_to_user(arg, &di, sizeof(di)))
764 /* ---- Interface to HCI drivers ---- */
766 /* Alloc HCI device */
767 struct hci_dev *hci_alloc_dev(void)
769 struct hci_dev *hdev;
771 hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
775 memset(hdev, 0, sizeof(struct hci_dev));
779 EXPORT_SYMBOL(hci_alloc_dev);
781 /* Free HCI device */
782 void hci_free_dev(struct hci_dev *hdev)
784 /* will free via class release */
785 class_device_put(&hdev->class_dev);
787 EXPORT_SYMBOL(hci_free_dev);
789 /* Register HCI device */
790 int hci_register_dev(struct hci_dev *hdev)
792 struct list_head *head = &hci_dev_list, *p;
795 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
797 if (!hdev->open || !hdev->close || !hdev->destruct)
800 write_lock_bh(&hci_dev_list_lock);
802 /* Find first available device id */
803 list_for_each(p, &hci_dev_list) {
804 if (list_entry(p, struct hci_dev, list)->id != id)
809 sprintf(hdev->name, "hci%d", id);
811 list_add(&hdev->list, head);
813 atomic_set(&hdev->refcnt, 1);
814 spin_lock_init(&hdev->lock);
817 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
818 hdev->link_mode = (HCI_LM_ACCEPT);
820 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
821 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
822 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
824 skb_queue_head_init(&hdev->rx_q);
825 skb_queue_head_init(&hdev->cmd_q);
826 skb_queue_head_init(&hdev->raw_q);
828 init_waitqueue_head(&hdev->req_wait_q);
829 init_MUTEX(&hdev->req_lock);
831 inquiry_cache_init(hdev);
833 hci_conn_hash_init(hdev);
835 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
837 atomic_set(&hdev->promisc, 0);
839 write_unlock_bh(&hci_dev_list_lock);
841 hci_register_sysfs(hdev);
843 hci_notify(hdev, HCI_DEV_REG);
847 EXPORT_SYMBOL(hci_register_dev);
849 /* Unregister HCI device */
850 int hci_unregister_dev(struct hci_dev *hdev)
852 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
854 hci_unregister_sysfs(hdev);
856 write_lock_bh(&hci_dev_list_lock);
857 list_del(&hdev->list);
858 write_unlock_bh(&hci_dev_list_lock);
860 hci_dev_do_close(hdev);
862 hci_notify(hdev, HCI_DEV_UNREG);
867 EXPORT_SYMBOL(hci_unregister_dev);
869 /* Suspend HCI device */
870 int hci_suspend_dev(struct hci_dev *hdev)
872 hci_notify(hdev, HCI_DEV_SUSPEND);
875 EXPORT_SYMBOL(hci_suspend_dev);
877 /* Resume HCI device */
878 int hci_resume_dev(struct hci_dev *hdev)
880 hci_notify(hdev, HCI_DEV_RESUME);
883 EXPORT_SYMBOL(hci_resume_dev);
885 /* ---- Interface to upper protocols ---- */
887 /* Register/Unregister protocols.
888 * hci_task_lock is used to ensure that no tasks are running. */
889 int hci_register_proto(struct hci_proto *hp)
893 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
895 if (hp->id >= HCI_MAX_PROTO)
898 write_lock_bh(&hci_task_lock);
900 if (!hci_proto[hp->id])
901 hci_proto[hp->id] = hp;
905 write_unlock_bh(&hci_task_lock);
909 EXPORT_SYMBOL(hci_register_proto);
911 int hci_unregister_proto(struct hci_proto *hp)
915 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
917 if (hp->id >= HCI_MAX_PROTO)
920 write_lock_bh(&hci_task_lock);
922 if (hci_proto[hp->id])
923 hci_proto[hp->id] = NULL;
927 write_unlock_bh(&hci_task_lock);
931 EXPORT_SYMBOL(hci_unregister_proto);
933 static int hci_send_frame(struct sk_buff *skb)
935 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
942 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
944 if (atomic_read(&hdev->promisc)) {
946 do_gettimeofday(&skb->stamp);
948 hci_send_to_sock(hdev, skb);
951 /* Get rid of skb owner, prior to sending to the driver. */
954 return hdev->send(skb);
957 /* Send HCI command */
958 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
960 int len = HCI_COMMAND_HDR_SIZE + plen;
961 struct hci_command_hdr *hdr;
964 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
966 skb = bt_skb_alloc(len, GFP_ATOMIC);
968 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
972 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
973 hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
977 memcpy(skb_put(skb, plen), param, plen);
979 BT_DBG("skb len %d", skb->len);
981 skb->pkt_type = HCI_COMMAND_PKT;
982 skb->dev = (void *) hdev;
983 skb_queue_tail(&hdev->cmd_q, skb);
988 EXPORT_SYMBOL(hci_send_cmd);
990 /* Get data from the previously sent command */
991 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
993 struct hci_command_hdr *hdr;
998 hdr = (void *) hdev->sent_cmd->data;
1000 if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1003 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1005 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1009 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1011 struct hci_acl_hdr *hdr;
1014 hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1015 hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1016 hdr->dlen = __cpu_to_le16(len);
1018 skb->h.raw = (void *) hdr;
1021 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1023 struct hci_dev *hdev = conn->hdev;
1024 struct sk_buff *list;
1026 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1028 skb->dev = (void *) hdev;
1029 skb->pkt_type = HCI_ACLDATA_PKT;
1030 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1032 if (!(list = skb_shinfo(skb)->frag_list)) {
1033 /* Non fragmented */
1034 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1036 skb_queue_tail(&conn->data_q, skb);
1039 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1041 skb_shinfo(skb)->frag_list = NULL;
1043 /* Queue all fragments atomically */
1044 spin_lock_bh(&conn->data_q.lock);
1046 __skb_queue_tail(&conn->data_q, skb);
1048 skb = list; list = list->next;
1050 skb->dev = (void *) hdev;
1051 skb->pkt_type = HCI_ACLDATA_PKT;
1052 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1054 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1056 __skb_queue_tail(&conn->data_q, skb);
1059 spin_unlock_bh(&conn->data_q.lock);
1065 EXPORT_SYMBOL(hci_send_acl);
1068 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1070 struct hci_dev *hdev = conn->hdev;
1071 struct hci_sco_hdr hdr;
1073 BT_DBG("%s len %d", hdev->name, skb->len);
1075 if (skb->len > hdev->sco_mtu) {
1080 hdr.handle = __cpu_to_le16(conn->handle);
1081 hdr.dlen = skb->len;
1083 skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1084 memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1086 skb->dev = (void *) hdev;
1087 skb->pkt_type = HCI_SCODATA_PKT;
1088 skb_queue_tail(&conn->data_q, skb);
1092 EXPORT_SYMBOL(hci_send_sco);
1094 /* ---- HCI TX task (outgoing data) ---- */
1096 /* HCI Connection scheduler */
1097 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1099 struct hci_conn_hash *h = &hdev->conn_hash;
1100 struct hci_conn *conn = NULL;
1101 int num = 0, min = ~0;
1102 struct list_head *p;
1104 /* We don't have to lock device here. Connections are always
1105 * added and removed with TX task disabled. */
1106 list_for_each(p, &h->list) {
1108 c = list_entry(p, struct hci_conn, list);
1110 if (c->type != type || c->state != BT_CONNECTED
1111 || skb_queue_empty(&c->data_q))
1115 if (c->sent < min) {
1122 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1128 BT_DBG("conn %p quote %d", conn, *quote);
1132 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1134 struct hci_conn_hash *h = &hdev->conn_hash;
1135 struct list_head *p;
1138 BT_ERR("%s ACL tx timeout", hdev->name);
1140 /* Kill stalled connections */
1141 list_for_each(p, &h->list) {
1142 c = list_entry(p, struct hci_conn, list);
1143 if (c->type == ACL_LINK && c->sent) {
1144 BT_ERR("%s killing stalled ACL connection %s",
1145 hdev->name, batostr(&c->dst));
1146 hci_acl_disconn(c, 0x13);
1151 static inline void hci_sched_acl(struct hci_dev *hdev)
1153 struct hci_conn *conn;
1154 struct sk_buff *skb;
1157 BT_DBG("%s", hdev->name);
1159 /* ACL tx timeout must be longer than maximum
1160 * link supervision timeout (40.9 seconds) */
1161 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1162 hci_acl_tx_to(hdev);
1164 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1165 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1166 BT_DBG("skb %p len %d", skb, skb->len);
1167 hci_send_frame(skb);
1168 hdev->acl_last_tx = jiffies;
1177 static inline void hci_sched_sco(struct hci_dev *hdev)
1179 struct hci_conn *conn;
1180 struct sk_buff *skb;
1183 BT_DBG("%s", hdev->name);
1185 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1186 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1187 BT_DBG("skb %p len %d", skb, skb->len);
1188 hci_send_frame(skb);
1191 if (conn->sent == ~0)
1197 static void hci_tx_task(unsigned long arg)
1199 struct hci_dev *hdev = (struct hci_dev *) arg;
1200 struct sk_buff *skb;
1202 read_lock(&hci_task_lock);
1204 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1206 /* Schedule queues and send stuff to HCI driver */
1208 hci_sched_acl(hdev);
1210 hci_sched_sco(hdev);
1212 /* Send next queued raw (unknown type) packet */
1213 while ((skb = skb_dequeue(&hdev->raw_q)))
1214 hci_send_frame(skb);
1216 read_unlock(&hci_task_lock);
1219 /* ----- HCI RX task (incoming data proccessing) ----- */
1221 /* ACL data packet */
1222 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1224 struct hci_acl_hdr *hdr = (void *) skb->data;
1225 struct hci_conn *conn;
1226 __u16 handle, flags;
1228 skb_pull(skb, HCI_ACL_HDR_SIZE);
1230 handle = __le16_to_cpu(hdr->handle);
1231 flags = hci_flags(handle);
1232 handle = hci_handle(handle);
1234 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1236 hdev->stat.acl_rx++;
1239 conn = hci_conn_hash_lookup_handle(hdev, handle);
1240 hci_dev_unlock(hdev);
1243 register struct hci_proto *hp;
1245 /* Send to upper protocol */
1246 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1247 hp->recv_acldata(conn, skb, flags);
1251 BT_ERR("%s ACL packet for unknown connection handle %d",
1252 hdev->name, handle);
1258 /* SCO data packet */
1259 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1261 struct hci_sco_hdr *hdr = (void *) skb->data;
1262 struct hci_conn *conn;
1265 skb_pull(skb, HCI_SCO_HDR_SIZE);
1267 handle = __le16_to_cpu(hdr->handle);
1269 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1271 hdev->stat.sco_rx++;
1274 conn = hci_conn_hash_lookup_handle(hdev, handle);
1275 hci_dev_unlock(hdev);
1278 register struct hci_proto *hp;
1280 /* Send to upper protocol */
1281 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1282 hp->recv_scodata(conn, skb);
1286 BT_ERR("%s SCO packet for unknown connection handle %d",
1287 hdev->name, handle);
1293 void hci_rx_task(unsigned long arg)
1295 struct hci_dev *hdev = (struct hci_dev *) arg;
1296 struct sk_buff *skb;
1298 BT_DBG("%s", hdev->name);
1300 read_lock(&hci_task_lock);
1302 while ((skb = skb_dequeue(&hdev->rx_q))) {
1303 if (atomic_read(&hdev->promisc)) {
1304 /* Send copy to the sockets */
1305 hci_send_to_sock(hdev, skb);
1308 if (test_bit(HCI_RAW, &hdev->flags)) {
1313 if (test_bit(HCI_INIT, &hdev->flags)) {
1314 /* Don't process data packets in this states. */
1315 switch (skb->pkt_type) {
1316 case HCI_ACLDATA_PKT:
1317 case HCI_SCODATA_PKT:
1324 switch (skb->pkt_type) {
1326 hci_event_packet(hdev, skb);
1329 case HCI_ACLDATA_PKT:
1330 BT_DBG("%s ACL data packet", hdev->name);
1331 hci_acldata_packet(hdev, skb);
1334 case HCI_SCODATA_PKT:
1335 BT_DBG("%s SCO data packet", hdev->name);
1336 hci_scodata_packet(hdev, skb);
1345 read_unlock(&hci_task_lock);
1348 static void hci_cmd_task(unsigned long arg)
1350 struct hci_dev *hdev = (struct hci_dev *) arg;
1351 struct sk_buff *skb;
1353 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1355 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1356 BT_ERR("%s command tx timeout", hdev->name);
1357 atomic_set(&hdev->cmd_cnt, 1);
1360 /* Send queued commands */
1361 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1363 kfree_skb(hdev->sent_cmd);
1365 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1366 atomic_dec(&hdev->cmd_cnt);
1367 hci_send_frame(skb);
1368 hdev->cmd_last_tx = jiffies;
1370 skb_queue_head(&hdev->cmd_q, skb);
1371 hci_sched_cmd(hdev);