2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/major.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
52 #ifndef CONFIG_BT_HCI_CORE_DEBUG
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 static void hci_notify(struct hci_dev *hdev, int event);
62 rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
65 LIST_HEAD(hci_dev_list);
66 rwlock_t hci_dev_list_lock = RW_LOCK_UNLOCKED;
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 rwlock_t hci_cb_list_lock = RW_LOCK_UNLOCKED;
73 #define HCI_MAX_PROTO 2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
76 /* HCI notifiers list */
77 static struct notifier_block *hci_notifier;
79 /* ---- HCI notifications ---- */
81 int hci_register_notifier(struct notifier_block *nb)
83 return notifier_chain_register(&hci_notifier, nb);
86 int hci_unregister_notifier(struct notifier_block *nb)
88 return notifier_chain_unregister(&hci_notifier, nb);
91 void hci_notify(struct hci_dev *hdev, int event)
93 notifier_call_chain(&hci_notifier, event, hdev);
96 /* ---- HCI requests ---- */
98 void hci_req_complete(struct hci_dev *hdev, int result)
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 wake_up_interruptible(&hdev->req_wait_q);
109 void hci_req_cancel(struct hci_dev *hdev, int err)
111 BT_DBG("%s err 0x%2.2x", hdev->name, err);
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = err;
115 hdev->req_status = HCI_REQ_CANCELED;
116 wake_up_interruptible(&hdev->req_wait_q);
120 /* Execute request and wait for completion. */
121 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
122 unsigned long opt, __u32 timeout)
124 DECLARE_WAITQUEUE(wait, current);
127 BT_DBG("%s start", hdev->name);
129 hdev->req_status = HCI_REQ_PEND;
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
135 schedule_timeout(timeout);
137 remove_wait_queue(&hdev->req_wait_q, &wait);
139 if (signal_pending(current))
142 switch (hdev->req_status) {
144 err = -bt_err(hdev->req_result);
147 case HCI_REQ_CANCELED:
148 err = -hdev->req_result;
156 hdev->req_status = hdev->req_result = 0;
158 BT_DBG("%s end: err %d", hdev->name, err);
163 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
164 unsigned long opt, __u32 timeout)
168 /* Serialize all requests */
170 ret = __hci_request(hdev, req, opt, timeout);
171 hci_req_unlock(hdev);
176 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
178 BT_DBG("%s %ld", hdev->name, opt);
181 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
184 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
188 BT_DBG("%s %ld", hdev->name, opt);
190 /* Mandatory initialization */
193 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
194 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
196 /* Read Local Supported Features */
197 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
199 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
200 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
203 /* Host buffer size */
205 struct hci_cp_host_buffer_size cp;
206 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
207 cp.sco_mtu = HCI_MAX_SCO_SIZE;
208 cp.acl_max_pkt = __cpu_to_le16(0xffff);
209 cp.sco_max_pkt = __cpu_to_le16(0xffff);
210 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
214 /* Read BD Address */
215 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
217 /* Read Voice Setting */
218 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
220 /* Optional initialization */
222 /* Clear Event Filters */
224 struct hci_cp_set_event_flt cp;
225 cp.flt_type = HCI_FLT_CLEAR_ALL;
226 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
229 /* Page timeout ~20 secs */
230 param = __cpu_to_le16(0x8000);
231 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m);
233 /* Connection accept timeout ~20 secs */
234 param = __cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m);
238 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242 BT_DBG("%s %x", hdev->name, scan);
244 /* Inquiry and Page scans */
245 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
248 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252 BT_DBG("%s %x", hdev->name, auth);
255 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
258 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262 BT_DBG("%s %x", hdev->name, encrypt);
265 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
268 /* Get HCI device by index.
269 * Device is held on return. */
270 struct hci_dev *hci_dev_get(int index)
272 struct hci_dev *hdev = NULL;
280 read_lock(&hci_dev_list_lock);
281 list_for_each(p, &hci_dev_list) {
282 struct hci_dev *d = list_entry(p, struct hci_dev, list);
283 if (d->id == index) {
284 hdev = hci_dev_hold(d);
288 read_unlock(&hci_dev_list_lock);
291 EXPORT_SYMBOL(hci_dev_get);
293 /* ---- Inquiry support ---- */
294 static void inquiry_cache_flush(struct hci_dev *hdev)
296 struct inquiry_cache *cache = &hdev->inq_cache;
297 struct inquiry_entry *next = cache->list, *e;
299 BT_DBG("cache %p", cache);
308 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
310 struct inquiry_cache *cache = &hdev->inq_cache;
311 struct inquiry_entry *e;
313 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
315 for (e = cache->list; e; e = e->next)
316 if (!bacmp(&e->data.bdaddr, bdaddr))
321 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
323 struct inquiry_cache *cache = &hdev->inq_cache;
324 struct inquiry_entry *e;
326 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
328 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
329 /* Entry not in the cache. Add new one. */
330 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
332 memset(e, 0, sizeof(struct inquiry_entry));
333 e->next = cache->list;
337 memcpy(&e->data, data, sizeof(*data));
338 e->timestamp = jiffies;
339 cache->timestamp = jiffies;
342 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
344 struct inquiry_cache *cache = &hdev->inq_cache;
345 struct inquiry_info *info = (struct inquiry_info *) buf;
346 struct inquiry_entry *e;
349 for (e = cache->list; e && copied < num; e = e->next, copied++) {
350 struct inquiry_data *data = &e->data;
351 bacpy(&info->bdaddr, &data->bdaddr);
352 info->pscan_rep_mode = data->pscan_rep_mode;
353 info->pscan_period_mode = data->pscan_period_mode;
354 info->pscan_mode = data->pscan_mode;
355 memcpy(info->dev_class, data->dev_class, 3);
356 info->clock_offset = data->clock_offset;
360 BT_DBG("cache %p, copied %d", cache, copied);
364 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
366 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
367 struct hci_cp_inquiry cp;
369 BT_DBG("%s", hdev->name);
371 if (test_bit(HCI_INQUIRY, &hdev->flags))
375 memcpy(&cp.lap, &ir->lap, 3);
376 cp.length = ir->length;
377 cp.num_rsp = ir->num_rsp;
378 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
381 int hci_inquiry(void __user *arg)
383 __u8 __user *ptr = arg;
384 struct hci_inquiry_req ir;
385 struct hci_dev *hdev;
386 int err = 0, do_inquiry = 0, max_rsp;
390 if (copy_from_user(&ir, ptr, sizeof(ir)))
393 if (!(hdev = hci_dev_get(ir.dev_id)))
396 hci_dev_lock_bh(hdev);
397 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
398 inquiry_cache_empty(hdev) ||
399 ir.flags & IREQ_CACHE_FLUSH) {
400 inquiry_cache_flush(hdev);
403 hci_dev_unlock_bh(hdev);
405 timeo = ir.length * 2 * HZ;
406 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
409 /* for unlimited number of responses we will use buffer with 255 entries */
410 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
412 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
413 * copy it to the user space.
415 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
420 hci_dev_lock_bh(hdev);
421 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
422 hci_dev_unlock_bh(hdev);
424 BT_DBG("num_rsp %d", ir.num_rsp);
426 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
428 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
441 /* ---- HCI ioctl helpers ---- */
443 int hci_dev_open(__u16 dev)
445 struct hci_dev *hdev;
448 if (!(hdev = hci_dev_get(dev)))
451 BT_DBG("%s %p", hdev->name, hdev);
455 if (test_bit(HCI_UP, &hdev->flags)) {
460 if (hdev->open(hdev)) {
465 if (!test_bit(HCI_RAW, &hdev->flags)) {
466 atomic_set(&hdev->cmd_cnt, 1);
467 set_bit(HCI_INIT, &hdev->flags);
469 //__hci_request(hdev, hci_reset_req, 0, HZ);
470 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
472 clear_bit(HCI_INIT, &hdev->flags);
477 set_bit(HCI_UP, &hdev->flags);
478 hci_notify(hdev, HCI_DEV_UP);
480 /* Init failed, cleanup */
481 tasklet_kill(&hdev->rx_task);
482 tasklet_kill(&hdev->tx_task);
483 tasklet_kill(&hdev->cmd_task);
485 skb_queue_purge(&hdev->cmd_q);
486 skb_queue_purge(&hdev->rx_q);
491 if (hdev->sent_cmd) {
492 kfree_skb(hdev->sent_cmd);
493 hdev->sent_cmd = NULL;
501 hci_req_unlock(hdev);
506 static int hci_dev_do_close(struct hci_dev *hdev)
508 BT_DBG("%s %p", hdev->name, hdev);
510 hci_req_cancel(hdev, ENODEV);
513 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
514 hci_req_unlock(hdev);
518 /* Kill RX and TX tasks */
519 tasklet_kill(&hdev->rx_task);
520 tasklet_kill(&hdev->tx_task);
522 hci_dev_lock_bh(hdev);
523 inquiry_cache_flush(hdev);
524 hci_conn_hash_flush(hdev);
525 hci_dev_unlock_bh(hdev);
527 hci_notify(hdev, HCI_DEV_DOWN);
533 skb_queue_purge(&hdev->cmd_q);
534 atomic_set(&hdev->cmd_cnt, 1);
535 set_bit(HCI_INIT, &hdev->flags);
536 __hci_request(hdev, hci_reset_req, 0, HZ/4);
537 clear_bit(HCI_INIT, &hdev->flags);
540 tasklet_kill(&hdev->cmd_task);
543 skb_queue_purge(&hdev->rx_q);
544 skb_queue_purge(&hdev->cmd_q);
545 skb_queue_purge(&hdev->raw_q);
547 /* Drop last sent command */
548 if (hdev->sent_cmd) {
549 kfree_skb(hdev->sent_cmd);
550 hdev->sent_cmd = NULL;
553 /* After this point our queues are empty
554 * and no tasks are scheduled. */
560 hci_req_unlock(hdev);
566 int hci_dev_close(__u16 dev)
568 struct hci_dev *hdev;
571 if (!(hdev = hci_dev_get(dev)))
573 err = hci_dev_do_close(hdev);
578 int hci_dev_reset(__u16 dev)
580 struct hci_dev *hdev;
583 if (!(hdev = hci_dev_get(dev)))
587 tasklet_disable(&hdev->tx_task);
589 if (!test_bit(HCI_UP, &hdev->flags))
593 skb_queue_purge(&hdev->rx_q);
594 skb_queue_purge(&hdev->cmd_q);
596 hci_dev_lock_bh(hdev);
597 inquiry_cache_flush(hdev);
598 hci_conn_hash_flush(hdev);
599 hci_dev_unlock_bh(hdev);
604 atomic_set(&hdev->cmd_cnt, 1);
605 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
607 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
610 tasklet_enable(&hdev->tx_task);
611 hci_req_unlock(hdev);
616 int hci_dev_reset_stat(__u16 dev)
618 struct hci_dev *hdev;
621 if (!(hdev = hci_dev_get(dev)))
624 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
631 int hci_dev_cmd(unsigned int cmd, void __user *arg)
633 struct hci_dev *hdev;
634 struct hci_dev_req dr;
637 if (copy_from_user(&dr, arg, sizeof(dr)))
640 if (!(hdev = hci_dev_get(dr.dev_id)))
645 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
649 if (!lmp_encrypt_capable(hdev)) {
654 if (!test_bit(HCI_AUTH, &hdev->flags)) {
655 /* Auth must be enabled first */
656 err = hci_request(hdev, hci_auth_req,
657 dr.dev_opt, HCI_INIT_TIMEOUT);
662 err = hci_request(hdev, hci_encrypt_req,
663 dr.dev_opt, HCI_INIT_TIMEOUT);
667 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
671 hdev->pkt_type = (__u16) dr.dev_opt;
675 hdev->link_policy = (__u16) dr.dev_opt;
679 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
683 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
684 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
688 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
689 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
700 int hci_get_dev_list(void __user *arg)
702 struct hci_dev_list_req *dl;
703 struct hci_dev_req *dr;
705 int n = 0, size, err;
708 if (get_user(dev_num, (__u16 __user *) arg))
711 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
714 size = sizeof(*dl) + dev_num * sizeof(*dr);
716 if (!(dl = kmalloc(size, GFP_KERNEL)))
721 read_lock_bh(&hci_dev_list_lock);
722 list_for_each(p, &hci_dev_list) {
723 struct hci_dev *hdev;
724 hdev = list_entry(p, struct hci_dev, list);
725 (dr + n)->dev_id = hdev->id;
726 (dr + n)->dev_opt = hdev->flags;
730 read_unlock_bh(&hci_dev_list_lock);
733 size = sizeof(*dl) + n * sizeof(*dr);
735 err = copy_to_user(arg, dl, size);
738 return err ? -EFAULT : 0;
741 int hci_get_dev_info(void __user *arg)
743 struct hci_dev *hdev;
744 struct hci_dev_info di;
747 if (copy_from_user(&di, arg, sizeof(di)))
750 if (!(hdev = hci_dev_get(di.dev_id)))
753 strcpy(di.name, hdev->name);
754 di.bdaddr = hdev->bdaddr;
755 di.type = hdev->type;
756 di.flags = hdev->flags;
757 di.pkt_type = hdev->pkt_type;
758 di.acl_mtu = hdev->acl_mtu;
759 di.acl_pkts = hdev->acl_pkts;
760 di.sco_mtu = hdev->sco_mtu;
761 di.sco_pkts = hdev->sco_pkts;
762 di.link_policy = hdev->link_policy;
763 di.link_mode = hdev->link_mode;
765 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
766 memcpy(&di.features, &hdev->features, sizeof(di.features));
768 if (copy_to_user(arg, &di, sizeof(di)))
776 /* ---- Interface to HCI drivers ---- */
778 /* Alloc HCI device */
779 struct hci_dev *hci_alloc_dev(void)
781 struct hci_dev *hdev;
783 hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
787 memset(hdev, 0, sizeof(struct hci_dev));
791 EXPORT_SYMBOL(hci_alloc_dev);
793 /* Free HCI device */
794 void hci_free_dev(struct hci_dev *hdev)
796 /* will free via class release */
797 class_device_put(&hdev->class_dev);
799 EXPORT_SYMBOL(hci_free_dev);
801 /* Register HCI device */
802 int hci_register_dev(struct hci_dev *hdev)
804 struct list_head *head = &hci_dev_list, *p;
807 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
809 if (!hdev->open || !hdev->close || !hdev->destruct)
812 write_lock_bh(&hci_dev_list_lock);
814 /* Find first available device id */
815 list_for_each(p, &hci_dev_list) {
816 if (list_entry(p, struct hci_dev, list)->id != id)
821 sprintf(hdev->name, "hci%d", id);
823 list_add(&hdev->list, head);
825 atomic_set(&hdev->refcnt, 1);
826 spin_lock_init(&hdev->lock);
829 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
830 hdev->link_mode = (HCI_LM_ACCEPT);
832 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
833 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
834 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
836 skb_queue_head_init(&hdev->rx_q);
837 skb_queue_head_init(&hdev->cmd_q);
838 skb_queue_head_init(&hdev->raw_q);
840 init_waitqueue_head(&hdev->req_wait_q);
841 init_MUTEX(&hdev->req_lock);
843 inquiry_cache_init(hdev);
845 hci_conn_hash_init(hdev);
847 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
849 atomic_set(&hdev->promisc, 0);
851 write_unlock_bh(&hci_dev_list_lock);
853 hci_register_sysfs(hdev);
855 hci_notify(hdev, HCI_DEV_REG);
859 EXPORT_SYMBOL(hci_register_dev);
861 /* Unregister HCI device */
862 int hci_unregister_dev(struct hci_dev *hdev)
864 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
866 hci_unregister_sysfs(hdev);
868 write_lock_bh(&hci_dev_list_lock);
869 list_del(&hdev->list);
870 write_unlock_bh(&hci_dev_list_lock);
872 hci_dev_do_close(hdev);
874 hci_notify(hdev, HCI_DEV_UNREG);
879 EXPORT_SYMBOL(hci_unregister_dev);
881 /* Suspend HCI device */
882 int hci_suspend_dev(struct hci_dev *hdev)
884 hci_notify(hdev, HCI_DEV_SUSPEND);
887 EXPORT_SYMBOL(hci_suspend_dev);
889 /* Resume HCI device */
890 int hci_resume_dev(struct hci_dev *hdev)
892 hci_notify(hdev, HCI_DEV_RESUME);
895 EXPORT_SYMBOL(hci_resume_dev);
897 /* ---- Interface to upper protocols ---- */
899 /* Register/Unregister protocols.
900 * hci_task_lock is used to ensure that no tasks are running. */
901 int hci_register_proto(struct hci_proto *hp)
905 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
907 if (hp->id >= HCI_MAX_PROTO)
910 write_lock_bh(&hci_task_lock);
912 if (!hci_proto[hp->id])
913 hci_proto[hp->id] = hp;
917 write_unlock_bh(&hci_task_lock);
921 EXPORT_SYMBOL(hci_register_proto);
923 int hci_unregister_proto(struct hci_proto *hp)
927 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
929 if (hp->id >= HCI_MAX_PROTO)
932 write_lock_bh(&hci_task_lock);
934 if (hci_proto[hp->id])
935 hci_proto[hp->id] = NULL;
939 write_unlock_bh(&hci_task_lock);
943 EXPORT_SYMBOL(hci_unregister_proto);
945 int hci_register_cb(struct hci_cb *cb)
947 BT_DBG("%p name %s", cb, cb->name);
949 write_lock_bh(&hci_cb_list_lock);
950 list_add(&cb->list, &hci_cb_list);
951 write_unlock_bh(&hci_cb_list_lock);
955 EXPORT_SYMBOL(hci_register_cb);
957 int hci_unregister_cb(struct hci_cb *cb)
959 BT_DBG("%p name %s", cb, cb->name);
961 write_lock_bh(&hci_cb_list_lock);
963 write_unlock_bh(&hci_cb_list_lock);
967 EXPORT_SYMBOL(hci_unregister_cb);
969 static int hci_send_frame(struct sk_buff *skb)
971 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
978 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
980 if (atomic_read(&hdev->promisc)) {
982 do_gettimeofday(&skb->stamp);
984 hci_send_to_sock(hdev, skb);
987 /* Get rid of skb owner, prior to sending to the driver. */
990 return hdev->send(skb);
993 /* Send HCI command */
994 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
996 int len = HCI_COMMAND_HDR_SIZE + plen;
997 struct hci_command_hdr *hdr;
1000 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1002 skb = bt_skb_alloc(len, GFP_ATOMIC);
1004 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1008 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1009 hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
1013 memcpy(skb_put(skb, plen), param, plen);
1015 BT_DBG("skb len %d", skb->len);
1017 skb->pkt_type = HCI_COMMAND_PKT;
1018 skb->dev = (void *) hdev;
1019 skb_queue_tail(&hdev->cmd_q, skb);
1020 hci_sched_cmd(hdev);
1024 EXPORT_SYMBOL(hci_send_cmd);
1026 /* Get data from the previously sent command */
1027 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1029 struct hci_command_hdr *hdr;
1031 if (!hdev->sent_cmd)
1034 hdr = (void *) hdev->sent_cmd->data;
1036 if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1039 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1041 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1045 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1047 struct hci_acl_hdr *hdr;
1050 hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1051 hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1052 hdr->dlen = __cpu_to_le16(len);
1054 skb->h.raw = (void *) hdr;
1057 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1059 struct hci_dev *hdev = conn->hdev;
1060 struct sk_buff *list;
1062 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1064 skb->dev = (void *) hdev;
1065 skb->pkt_type = HCI_ACLDATA_PKT;
1066 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1068 if (!(list = skb_shinfo(skb)->frag_list)) {
1069 /* Non fragmented */
1070 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1072 skb_queue_tail(&conn->data_q, skb);
1075 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1077 skb_shinfo(skb)->frag_list = NULL;
1079 /* Queue all fragments atomically */
1080 spin_lock_bh(&conn->data_q.lock);
1082 __skb_queue_tail(&conn->data_q, skb);
1084 skb = list; list = list->next;
1086 skb->dev = (void *) hdev;
1087 skb->pkt_type = HCI_ACLDATA_PKT;
1088 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1090 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1092 __skb_queue_tail(&conn->data_q, skb);
1095 spin_unlock_bh(&conn->data_q.lock);
1101 EXPORT_SYMBOL(hci_send_acl);
1104 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1106 struct hci_dev *hdev = conn->hdev;
1107 struct hci_sco_hdr hdr;
1109 BT_DBG("%s len %d", hdev->name, skb->len);
1111 if (skb->len > hdev->sco_mtu) {
1116 hdr.handle = __cpu_to_le16(conn->handle);
1117 hdr.dlen = skb->len;
1119 skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1120 memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1122 skb->dev = (void *) hdev;
1123 skb->pkt_type = HCI_SCODATA_PKT;
1124 skb_queue_tail(&conn->data_q, skb);
1128 EXPORT_SYMBOL(hci_send_sco);
1130 /* ---- HCI TX task (outgoing data) ---- */
1132 /* HCI Connection scheduler */
1133 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1135 struct hci_conn_hash *h = &hdev->conn_hash;
1136 struct hci_conn *conn = NULL;
1137 int num = 0, min = ~0;
1138 struct list_head *p;
1140 /* We don't have to lock device here. Connections are always
1141 * added and removed with TX task disabled. */
1142 list_for_each(p, &h->list) {
1144 c = list_entry(p, struct hci_conn, list);
1146 if (c->type != type || c->state != BT_CONNECTED
1147 || skb_queue_empty(&c->data_q))
1151 if (c->sent < min) {
1158 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1164 BT_DBG("conn %p quote %d", conn, *quote);
1168 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1170 struct hci_conn_hash *h = &hdev->conn_hash;
1171 struct list_head *p;
1174 BT_ERR("%s ACL tx timeout", hdev->name);
1176 /* Kill stalled connections */
1177 list_for_each(p, &h->list) {
1178 c = list_entry(p, struct hci_conn, list);
1179 if (c->type == ACL_LINK && c->sent) {
1180 BT_ERR("%s killing stalled ACL connection %s",
1181 hdev->name, batostr(&c->dst));
1182 hci_acl_disconn(c, 0x13);
1187 static inline void hci_sched_acl(struct hci_dev *hdev)
1189 struct hci_conn *conn;
1190 struct sk_buff *skb;
1193 BT_DBG("%s", hdev->name);
1195 /* ACL tx timeout must be longer than maximum
1196 * link supervision timeout (40.9 seconds) */
1197 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1198 hci_acl_tx_to(hdev);
1200 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1201 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1202 BT_DBG("skb %p len %d", skb, skb->len);
1203 hci_send_frame(skb);
1204 hdev->acl_last_tx = jiffies;
1213 static inline void hci_sched_sco(struct hci_dev *hdev)
1215 struct hci_conn *conn;
1216 struct sk_buff *skb;
1219 BT_DBG("%s", hdev->name);
1221 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1222 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1223 BT_DBG("skb %p len %d", skb, skb->len);
1224 hci_send_frame(skb);
1227 if (conn->sent == ~0)
1233 static void hci_tx_task(unsigned long arg)
1235 struct hci_dev *hdev = (struct hci_dev *) arg;
1236 struct sk_buff *skb;
1238 read_lock(&hci_task_lock);
1240 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1242 /* Schedule queues and send stuff to HCI driver */
1244 hci_sched_acl(hdev);
1246 hci_sched_sco(hdev);
1248 /* Send next queued raw (unknown type) packet */
1249 while ((skb = skb_dequeue(&hdev->raw_q)))
1250 hci_send_frame(skb);
1252 read_unlock(&hci_task_lock);
1255 /* ----- HCI RX task (incoming data proccessing) ----- */
1257 /* ACL data packet */
1258 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1260 struct hci_acl_hdr *hdr = (void *) skb->data;
1261 struct hci_conn *conn;
1262 __u16 handle, flags;
1264 skb_pull(skb, HCI_ACL_HDR_SIZE);
1266 handle = __le16_to_cpu(hdr->handle);
1267 flags = hci_flags(handle);
1268 handle = hci_handle(handle);
1270 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1272 hdev->stat.acl_rx++;
1275 conn = hci_conn_hash_lookup_handle(hdev, handle);
1276 hci_dev_unlock(hdev);
1279 register struct hci_proto *hp;
1281 /* Send to upper protocol */
1282 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1283 hp->recv_acldata(conn, skb, flags);
1287 BT_ERR("%s ACL packet for unknown connection handle %d",
1288 hdev->name, handle);
1294 /* SCO data packet */
1295 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1297 struct hci_sco_hdr *hdr = (void *) skb->data;
1298 struct hci_conn *conn;
1301 skb_pull(skb, HCI_SCO_HDR_SIZE);
1303 handle = __le16_to_cpu(hdr->handle);
1305 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1307 hdev->stat.sco_rx++;
1310 conn = hci_conn_hash_lookup_handle(hdev, handle);
1311 hci_dev_unlock(hdev);
1314 register struct hci_proto *hp;
1316 /* Send to upper protocol */
1317 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1318 hp->recv_scodata(conn, skb);
1322 BT_ERR("%s SCO packet for unknown connection handle %d",
1323 hdev->name, handle);
1329 void hci_rx_task(unsigned long arg)
1331 struct hci_dev *hdev = (struct hci_dev *) arg;
1332 struct sk_buff *skb;
1334 BT_DBG("%s", hdev->name);
1336 read_lock(&hci_task_lock);
1338 while ((skb = skb_dequeue(&hdev->rx_q))) {
1339 if (atomic_read(&hdev->promisc)) {
1340 /* Send copy to the sockets */
1341 hci_send_to_sock(hdev, skb);
1344 if (test_bit(HCI_RAW, &hdev->flags)) {
1349 if (test_bit(HCI_INIT, &hdev->flags)) {
1350 /* Don't process data packets in this states. */
1351 switch (skb->pkt_type) {
1352 case HCI_ACLDATA_PKT:
1353 case HCI_SCODATA_PKT:
1360 switch (skb->pkt_type) {
1362 hci_event_packet(hdev, skb);
1365 case HCI_ACLDATA_PKT:
1366 BT_DBG("%s ACL data packet", hdev->name);
1367 hci_acldata_packet(hdev, skb);
1370 case HCI_SCODATA_PKT:
1371 BT_DBG("%s SCO data packet", hdev->name);
1372 hci_scodata_packet(hdev, skb);
1381 read_unlock(&hci_task_lock);
1384 static void hci_cmd_task(unsigned long arg)
1386 struct hci_dev *hdev = (struct hci_dev *) arg;
1387 struct sk_buff *skb;
1389 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1391 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1392 BT_ERR("%s command tx timeout", hdev->name);
1393 atomic_set(&hdev->cmd_cnt, 1);
1396 /* Send queued commands */
1397 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1399 kfree_skb(hdev->sent_cmd);
1401 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1402 atomic_dec(&hdev->cmd_cnt);
1403 hci_send_frame(skb);
1404 hdev->cmd_last_tx = jiffies;
1406 skb_queue_head(&hdev->cmd_q, skb);
1407 hci_sched_cmd(hdev);