2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 * $Id: hci_core.c,v 1.6 2002/04/17 17:37:16 maxk Exp $
31 #include <linux/config.h>
32 #include <linux/module.h>
33 #include <linux/kmod.h>
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/major.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/skbuff.h>
45 #include <linux/interrupt.h>
46 #include <linux/notifier.h>
49 #include <asm/system.h>
50 #include <asm/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #ifndef CONFIG_BT_HCI_CORE_DEBUG
58 #define BT_DBG( A... )
61 static void hci_cmd_task(unsigned long arg);
62 static void hci_rx_task(unsigned long arg);
63 static void hci_tx_task(unsigned long arg);
64 static void hci_notify(struct hci_dev *hdev, int event);
66 rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
69 LIST_HEAD(hci_dev_list);
70 rwlock_t hci_dev_list_lock = RW_LOCK_UNLOCKED;
73 #define HCI_MAX_PROTO 2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
76 /* HCI notifiers list */
77 static struct notifier_block *hci_notifier;
79 /* ---- HCI notifications ---- */
81 int hci_register_notifier(struct notifier_block *nb)
83 return notifier_chain_register(&hci_notifier, nb);
86 int hci_unregister_notifier(struct notifier_block *nb)
88 return notifier_chain_unregister(&hci_notifier, nb);
91 void hci_notify(struct hci_dev *hdev, int event)
93 notifier_call_chain(&hci_notifier, event, hdev);
96 /* ---- HCI requests ---- */
98 void hci_req_complete(struct hci_dev *hdev, int result)
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 wake_up_interruptible(&hdev->req_wait_q);
109 void hci_req_cancel(struct hci_dev *hdev, int err)
111 BT_DBG("%s err 0x%2.2x", hdev->name, err);
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = err;
115 hdev->req_status = HCI_REQ_CANCELED;
116 wake_up_interruptible(&hdev->req_wait_q);
120 /* Execute request and wait for completion. */
121 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
122 unsigned long opt, __u32 timeout)
124 DECLARE_WAITQUEUE(wait, current);
127 BT_DBG("%s start", hdev->name);
129 hdev->req_status = HCI_REQ_PEND;
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
135 schedule_timeout(timeout);
137 remove_wait_queue(&hdev->req_wait_q, &wait);
139 if (signal_pending(current))
142 switch (hdev->req_status) {
144 err = -bt_err(hdev->req_result);
147 case HCI_REQ_CANCELED:
148 err = -hdev->req_result;
156 hdev->req_status = hdev->req_result = 0;
158 BT_DBG("%s end: err %d", hdev->name, err);
163 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
164 unsigned long opt, __u32 timeout)
168 /* Serialize all requests */
170 ret = __hci_request(hdev, req, opt, timeout);
171 hci_req_unlock(hdev);
176 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
178 BT_DBG("%s %ld", hdev->name, opt);
181 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
184 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
188 BT_DBG("%s %ld", hdev->name, opt);
190 /* Mandatory initialization */
193 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
194 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
196 /* Read Local Supported Features */
197 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
199 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
200 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
203 /* Host buffer size */
205 struct hci_cp_host_buffer_size cp;
206 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
207 cp.sco_mtu = HCI_MAX_SCO_SIZE;
208 cp.acl_max_pkt = __cpu_to_le16(0xffff);
209 cp.sco_max_pkt = __cpu_to_le16(0xffff);
210 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
214 /* Read BD Address */
215 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
217 /* Read Voice Setting */
218 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
220 /* Optional initialization */
222 /* Clear Event Filters */
224 struct hci_cp_set_event_flt cp;
225 cp.flt_type = HCI_FLT_CLEAR_ALL;
226 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
229 /* Page timeout ~20 secs */
230 param = __cpu_to_le16(0x8000);
231 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m);
233 /* Connection accept timeout ~20 secs */
234 param = __cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m);
238 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242 BT_DBG("%s %x", hdev->name, scan);
244 /* Inquiry and Page scans */
245 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
248 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252 BT_DBG("%s %x", hdev->name, auth);
255 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
258 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262 BT_DBG("%s %x", hdev->name, encrypt);
265 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
268 /* Get HCI device by index.
269 * Device is held on return. */
270 struct hci_dev *hci_dev_get(int index)
272 struct hci_dev *hdev = NULL;
280 read_lock(&hci_dev_list_lock);
281 list_for_each(p, &hci_dev_list) {
282 struct hci_dev *d = list_entry(p, struct hci_dev, list);
283 if (d->id == index) {
284 hdev = hci_dev_hold(d);
288 read_unlock(&hci_dev_list_lock);
292 /* ---- Inquiry support ---- */
293 void inquiry_cache_flush(struct hci_dev *hdev)
295 struct inquiry_cache *cache = &hdev->inq_cache;
296 struct inquiry_entry *next = cache->list, *e;
298 BT_DBG("cache %p", cache);
307 struct inquiry_entry *inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
309 struct inquiry_cache *cache = &hdev->inq_cache;
310 struct inquiry_entry *e;
312 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
314 for (e = cache->list; e; e = e->next)
315 if (!bacmp(&e->info.bdaddr, bdaddr))
320 void inquiry_cache_update(struct hci_dev *hdev, struct inquiry_info *info)
322 struct inquiry_cache *cache = &hdev->inq_cache;
323 struct inquiry_entry *e;
325 BT_DBG("cache %p, %s", cache, batostr(&info->bdaddr));
327 if (!(e = inquiry_cache_lookup(hdev, &info->bdaddr))) {
328 /* Entry not in the cache. Add new one. */
329 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
331 memset(e, 0, sizeof(struct inquiry_entry));
332 e->next = cache->list;
336 memcpy(&e->info, info, sizeof(*info));
337 e->timestamp = jiffies;
338 cache->timestamp = jiffies;
341 int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_info *info = (struct inquiry_info *) buf;
345 struct inquiry_entry *e;
348 for (e = cache->list; e && copied < num; e = e->next, copied++)
349 memcpy(info++, &e->info, sizeof(*info));
351 BT_DBG("cache %p, copied %d", cache, copied);
355 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
357 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
358 struct hci_cp_inquiry cp;
360 BT_DBG("%s", hdev->name);
362 if (test_bit(HCI_INQUIRY, &hdev->flags))
366 memcpy(&cp.lap, &ir->lap, 3);
367 cp.length = ir->length;
368 cp.num_rsp = ir->num_rsp;
369 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
372 int hci_inquiry(unsigned long arg)
374 struct hci_inquiry_req ir;
375 struct hci_dev *hdev;
376 int err = 0, do_inquiry = 0, max_rsp;
381 if (copy_from_user(&ir, ptr, sizeof(ir)))
384 if (!(hdev = hci_dev_get(ir.dev_id)))
387 hci_dev_lock_bh(hdev);
388 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
389 inquiry_cache_empty(hdev) ||
390 ir.flags & IREQ_CACHE_FLUSH) {
391 inquiry_cache_flush(hdev);
394 hci_dev_unlock_bh(hdev);
396 timeo = ir.length * 2 * HZ;
397 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
400 /* for unlimited number of responses we will use buffer with 255 entries */
401 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
403 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
404 * copy it to the user space.
406 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
411 hci_dev_lock_bh(hdev);
412 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
413 hci_dev_unlock_bh(hdev);
415 BT_DBG("num_rsp %d", ir.num_rsp);
417 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
419 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
432 /* ---- HCI ioctl helpers ---- */
434 int hci_dev_open(__u16 dev)
436 struct hci_dev *hdev;
439 if (!(hdev = hci_dev_get(dev)))
442 BT_DBG("%s %p", hdev->name, hdev);
446 if (test_bit(HCI_UP, &hdev->flags)) {
451 if (hdev->open(hdev)) {
456 if (!test_bit(HCI_RAW, &hdev->flags)) {
457 atomic_set(&hdev->cmd_cnt, 1);
458 set_bit(HCI_INIT, &hdev->flags);
460 //__hci_request(hdev, hci_reset_req, 0, HZ);
461 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
463 clear_bit(HCI_INIT, &hdev->flags);
468 set_bit(HCI_UP, &hdev->flags);
469 hci_notify(hdev, HCI_DEV_UP);
471 /* Init failed, cleanup */
472 tasklet_kill(&hdev->rx_task);
473 tasklet_kill(&hdev->tx_task);
474 tasklet_kill(&hdev->cmd_task);
476 skb_queue_purge(&hdev->cmd_q);
477 skb_queue_purge(&hdev->rx_q);
482 if (hdev->sent_cmd) {
483 kfree_skb(hdev->sent_cmd);
484 hdev->sent_cmd = NULL;
492 hci_req_unlock(hdev);
497 static int hci_dev_do_close(struct hci_dev *hdev)
499 BT_DBG("%s %p", hdev->name, hdev);
501 hci_req_cancel(hdev, ENODEV);
504 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
505 hci_req_unlock(hdev);
509 /* Kill RX and TX tasks */
510 tasklet_kill(&hdev->rx_task);
511 tasklet_kill(&hdev->tx_task);
513 hci_dev_lock_bh(hdev);
514 inquiry_cache_flush(hdev);
515 hci_conn_hash_flush(hdev);
516 hci_dev_unlock_bh(hdev);
518 hci_notify(hdev, HCI_DEV_DOWN);
524 skb_queue_purge(&hdev->cmd_q);
525 atomic_set(&hdev->cmd_cnt, 1);
526 set_bit(HCI_INIT, &hdev->flags);
527 __hci_request(hdev, hci_reset_req, 0, HZ/4);
528 clear_bit(HCI_INIT, &hdev->flags);
531 tasklet_kill(&hdev->cmd_task);
534 skb_queue_purge(&hdev->rx_q);
535 skb_queue_purge(&hdev->cmd_q);
536 skb_queue_purge(&hdev->raw_q);
538 /* Drop last sent command */
539 if (hdev->sent_cmd) {
540 kfree_skb(hdev->sent_cmd);
541 hdev->sent_cmd = NULL;
544 /* After this point our queues are empty
545 * and no tasks are scheduled. */
551 hci_req_unlock(hdev);
557 int hci_dev_close(__u16 dev)
559 struct hci_dev *hdev;
562 if (!(hdev = hci_dev_get(dev)))
564 err = hci_dev_do_close(hdev);
569 int hci_dev_reset(__u16 dev)
571 struct hci_dev *hdev;
574 if (!(hdev = hci_dev_get(dev)))
578 tasklet_disable(&hdev->tx_task);
580 if (!test_bit(HCI_UP, &hdev->flags))
584 skb_queue_purge(&hdev->rx_q);
585 skb_queue_purge(&hdev->cmd_q);
587 hci_dev_lock_bh(hdev);
588 inquiry_cache_flush(hdev);
589 hci_conn_hash_flush(hdev);
590 hci_dev_unlock_bh(hdev);
595 atomic_set(&hdev->cmd_cnt, 1);
596 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
598 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
601 tasklet_enable(&hdev->tx_task);
602 hci_req_unlock(hdev);
607 int hci_dev_reset_stat(__u16 dev)
609 struct hci_dev *hdev;
612 if (!(hdev = hci_dev_get(dev)))
615 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
622 int hci_dev_cmd(unsigned int cmd, unsigned long arg)
624 struct hci_dev *hdev;
625 struct hci_dev_req dr;
628 if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
631 if (!(hdev = hci_dev_get(dr.dev_id)))
636 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
640 if (!lmp_encrypt_capable(hdev)) {
645 if (!test_bit(HCI_AUTH, &hdev->flags)) {
646 /* Auth must be enabled first */
647 err = hci_request(hdev, hci_auth_req,
648 dr.dev_opt, HCI_INIT_TIMEOUT);
653 err = hci_request(hdev, hci_encrypt_req,
654 dr.dev_opt, HCI_INIT_TIMEOUT);
658 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
662 hdev->pkt_type = (__u16) dr.dev_opt;
666 hdev->link_policy = (__u16) dr.dev_opt;
670 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
674 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
675 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
679 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
680 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
691 int hci_get_dev_list(unsigned long arg)
693 struct hci_dev_list_req *dl;
694 struct hci_dev_req *dr;
696 int n = 0, size, err;
699 if (get_user(dev_num, (__u16 *) arg))
702 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
705 size = sizeof(*dl) + dev_num * sizeof(*dr);
707 if (!(dl = kmalloc(size, GFP_KERNEL)))
712 read_lock_bh(&hci_dev_list_lock);
713 list_for_each(p, &hci_dev_list) {
714 struct hci_dev *hdev;
715 hdev = list_entry(p, struct hci_dev, list);
716 (dr + n)->dev_id = hdev->id;
717 (dr + n)->dev_opt = hdev->flags;
721 read_unlock_bh(&hci_dev_list_lock);
724 size = sizeof(*dl) + n * sizeof(*dr);
726 err = copy_to_user((void *) arg, dl, size);
729 return err ? -EFAULT : 0;
732 int hci_get_dev_info(unsigned long arg)
734 struct hci_dev *hdev;
735 struct hci_dev_info di;
738 if (copy_from_user(&di, (void *) arg, sizeof(di)))
741 if (!(hdev = hci_dev_get(di.dev_id)))
744 strcpy(di.name, hdev->name);
745 di.bdaddr = hdev->bdaddr;
746 di.type = hdev->type;
747 di.flags = hdev->flags;
748 di.pkt_type = hdev->pkt_type;
749 di.acl_mtu = hdev->acl_mtu;
750 di.acl_pkts = hdev->acl_pkts;
751 di.sco_mtu = hdev->sco_mtu;
752 di.sco_pkts = hdev->sco_pkts;
753 di.link_policy = hdev->link_policy;
754 di.link_mode = hdev->link_mode;
756 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
757 memcpy(&di.features, &hdev->features, sizeof(di.features));
759 if (copy_to_user((void *) arg, &di, sizeof(di)))
767 /* ---- Interface to HCI drivers ---- */
769 /* Alloc HCI device */
770 struct hci_dev *hci_alloc_dev(void)
772 struct hci_dev *hdev;
774 hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
778 memset(hdev, 0, sizeof(struct hci_dev));
783 /* Free HCI device */
784 void hci_free_dev(struct hci_dev *hdev)
786 /* will free via class release */
787 class_device_put(&hdev->class_dev);
790 /* Register HCI device */
791 int hci_register_dev(struct hci_dev *hdev)
793 struct list_head *head = &hci_dev_list, *p;
796 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
798 if (!hdev->open || !hdev->close || !hdev->destruct)
801 write_lock_bh(&hci_dev_list_lock);
803 /* Find first available device id */
804 list_for_each(p, &hci_dev_list) {
805 if (list_entry(p, struct hci_dev, list)->id != id)
810 sprintf(hdev->name, "hci%d", id);
812 list_add(&hdev->list, head);
814 atomic_set(&hdev->refcnt, 1);
815 spin_lock_init(&hdev->lock);
818 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
819 hdev->link_mode = (HCI_LM_ACCEPT);
821 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
822 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
823 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
825 skb_queue_head_init(&hdev->rx_q);
826 skb_queue_head_init(&hdev->cmd_q);
827 skb_queue_head_init(&hdev->raw_q);
829 init_waitqueue_head(&hdev->req_wait_q);
830 init_MUTEX(&hdev->req_lock);
832 inquiry_cache_init(hdev);
834 hci_conn_hash_init(hdev);
836 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
838 atomic_set(&hdev->promisc, 0);
840 write_unlock_bh(&hci_dev_list_lock);
842 hci_register_sysfs(hdev);
844 hci_notify(hdev, HCI_DEV_REG);
849 /* Unregister HCI device */
850 int hci_unregister_dev(struct hci_dev *hdev)
852 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
854 hci_unregister_sysfs(hdev);
856 write_lock_bh(&hci_dev_list_lock);
857 list_del(&hdev->list);
858 write_unlock_bh(&hci_dev_list_lock);
860 hci_dev_do_close(hdev);
862 hci_notify(hdev, HCI_DEV_UNREG);
868 /* Suspend HCI device */
869 int hci_suspend_dev(struct hci_dev *hdev)
871 hci_notify(hdev, HCI_DEV_SUSPEND);
875 /* Resume HCI device */
876 int hci_resume_dev(struct hci_dev *hdev)
878 hci_notify(hdev, HCI_DEV_RESUME);
882 /* ---- Interface to upper protocols ---- */
884 /* Register/Unregister protocols.
885 * hci_task_lock is used to ensure that no tasks are running. */
886 int hci_register_proto(struct hci_proto *hp)
890 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
892 if (hp->id >= HCI_MAX_PROTO)
895 write_lock_bh(&hci_task_lock);
897 if (!hci_proto[hp->id])
898 hci_proto[hp->id] = hp;
902 write_unlock_bh(&hci_task_lock);
907 int hci_unregister_proto(struct hci_proto *hp)
911 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
913 if (hp->id >= HCI_MAX_PROTO)
916 write_lock_bh(&hci_task_lock);
918 if (hci_proto[hp->id])
919 hci_proto[hp->id] = NULL;
923 write_unlock_bh(&hci_task_lock);
928 static int hci_send_frame(struct sk_buff *skb)
930 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
937 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
939 if (atomic_read(&hdev->promisc)) {
941 do_gettimeofday(&skb->stamp);
943 hci_send_to_sock(hdev, skb);
946 /* Get rid of skb owner, prior to sending to the driver. */
949 return hdev->send(skb);
952 /* Send HCI command */
953 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
955 int len = HCI_COMMAND_HDR_SIZE + plen;
956 struct hci_command_hdr *hdr;
959 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
961 skb = bt_skb_alloc(len, GFP_ATOMIC);
963 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
967 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
968 hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
972 memcpy(skb_put(skb, plen), param, plen);
974 BT_DBG("skb len %d", skb->len);
976 skb->pkt_type = HCI_COMMAND_PKT;
977 skb->dev = (void *) hdev;
978 skb_queue_tail(&hdev->cmd_q, skb);
984 /* Get data from the previously sent command */
985 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
987 struct hci_command_hdr *hdr;
992 hdr = (void *) hdev->sent_cmd->data;
994 if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
997 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
999 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1003 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1005 struct hci_acl_hdr *hdr;
1008 hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1009 hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1010 hdr->dlen = __cpu_to_le16(len);
1012 skb->h.raw = (void *) hdr;
1015 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1017 struct hci_dev *hdev = conn->hdev;
1018 struct sk_buff *list;
1020 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1022 skb->dev = (void *) hdev;
1023 skb->pkt_type = HCI_ACLDATA_PKT;
1024 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1026 if (!(list = skb_shinfo(skb)->frag_list)) {
1027 /* Non fragmented */
1028 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1030 skb_queue_tail(&conn->data_q, skb);
1033 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1035 skb_shinfo(skb)->frag_list = NULL;
1037 /* Queue all fragments atomically */
1038 spin_lock_bh(&conn->data_q.lock);
1040 __skb_queue_tail(&conn->data_q, skb);
1042 skb = list; list = list->next;
1044 skb->dev = (void *) hdev;
1045 skb->pkt_type = HCI_ACLDATA_PKT;
1046 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1048 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1050 __skb_queue_tail(&conn->data_q, skb);
1053 spin_unlock_bh(&conn->data_q.lock);
1061 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1063 struct hci_dev *hdev = conn->hdev;
1064 struct hci_sco_hdr hdr;
1066 BT_DBG("%s len %d", hdev->name, skb->len);
1068 if (skb->len > hdev->sco_mtu) {
1073 hdr.handle = __cpu_to_le16(conn->handle);
1074 hdr.dlen = skb->len;
1076 skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1077 memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1079 skb->dev = (void *) hdev;
1080 skb->pkt_type = HCI_SCODATA_PKT;
1081 skb_queue_tail(&conn->data_q, skb);
1086 /* ---- HCI TX task (outgoing data) ---- */
1088 /* HCI Connection scheduler */
1089 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1091 struct hci_conn_hash *h = &hdev->conn_hash;
1092 struct hci_conn *conn = NULL;
1093 int num = 0, min = ~0;
1094 struct list_head *p;
1096 /* We don't have to lock device here. Connections are always
1097 * added and removed with TX task disabled. */
1098 list_for_each(p, &h->list) {
1100 c = list_entry(p, struct hci_conn, list);
1102 if (c->type != type || c->state != BT_CONNECTED
1103 || skb_queue_empty(&c->data_q))
1107 if (c->sent < min) {
1114 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1120 BT_DBG("conn %p quote %d", conn, *quote);
1124 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1126 struct hci_conn_hash *h = &hdev->conn_hash;
1127 struct list_head *p;
1130 BT_ERR("%s ACL tx timeout", hdev->name);
1132 /* Kill stalled connections */
1133 list_for_each(p, &h->list) {
1134 c = list_entry(p, struct hci_conn, list);
1135 if (c->type == ACL_LINK && c->sent) {
1136 BT_ERR("%s killing stalled ACL connection %s",
1137 hdev->name, batostr(&c->dst));
1138 hci_acl_disconn(c, 0x13);
1143 static inline void hci_sched_acl(struct hci_dev *hdev)
1145 struct hci_conn *conn;
1146 struct sk_buff *skb;
1149 BT_DBG("%s", hdev->name);
1151 /* ACL tx timeout must be longer than maximum
1152 * link supervision timeout (40.9 seconds) */
1153 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1154 hci_acl_tx_to(hdev);
1156 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1157 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1158 BT_DBG("skb %p len %d", skb, skb->len);
1159 hci_send_frame(skb);
1160 hdev->acl_last_tx = jiffies;
1169 static inline void hci_sched_sco(struct hci_dev *hdev)
1171 struct hci_conn *conn;
1172 struct sk_buff *skb;
1175 BT_DBG("%s", hdev->name);
1177 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1178 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1179 BT_DBG("skb %p len %d", skb, skb->len);
1180 hci_send_frame(skb);
1183 if (conn->sent == ~0)
1189 static void hci_tx_task(unsigned long arg)
1191 struct hci_dev *hdev = (struct hci_dev *) arg;
1192 struct sk_buff *skb;
1194 read_lock(&hci_task_lock);
1196 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1198 /* Schedule queues and send stuff to HCI driver */
1200 hci_sched_acl(hdev);
1202 hci_sched_sco(hdev);
1204 /* Send next queued raw (unknown type) packet */
1205 while ((skb = skb_dequeue(&hdev->raw_q)))
1206 hci_send_frame(skb);
1208 read_unlock(&hci_task_lock);
1211 /* ----- HCI RX task (incoming data proccessing) ----- */
1213 /* ACL data packet */
1214 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1216 struct hci_acl_hdr *hdr = (void *) skb->data;
1217 struct hci_conn *conn;
1218 __u16 handle, flags;
1220 skb_pull(skb, HCI_ACL_HDR_SIZE);
1222 handle = __le16_to_cpu(hdr->handle);
1223 flags = hci_flags(handle);
1224 handle = hci_handle(handle);
1226 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1228 hdev->stat.acl_rx++;
1231 conn = hci_conn_hash_lookup_handle(hdev, handle);
1232 hci_dev_unlock(hdev);
1235 register struct hci_proto *hp;
1237 /* Send to upper protocol */
1238 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1239 hp->recv_acldata(conn, skb, flags);
1243 BT_ERR("%s ACL packet for unknown connection handle %d",
1244 hdev->name, handle);
1250 /* SCO data packet */
1251 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1253 struct hci_sco_hdr *hdr = (void *) skb->data;
1254 struct hci_conn *conn;
1257 skb_pull(skb, HCI_SCO_HDR_SIZE);
1259 handle = __le16_to_cpu(hdr->handle);
1261 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1263 hdev->stat.sco_rx++;
1266 conn = hci_conn_hash_lookup_handle(hdev, handle);
1267 hci_dev_unlock(hdev);
1270 register struct hci_proto *hp;
1272 /* Send to upper protocol */
1273 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1274 hp->recv_scodata(conn, skb);
1278 BT_ERR("%s SCO packet for unknown connection handle %d",
1279 hdev->name, handle);
1285 void hci_rx_task(unsigned long arg)
1287 struct hci_dev *hdev = (struct hci_dev *) arg;
1288 struct sk_buff *skb;
1290 BT_DBG("%s", hdev->name);
1292 read_lock(&hci_task_lock);
1294 while ((skb = skb_dequeue(&hdev->rx_q))) {
1295 if (atomic_read(&hdev->promisc)) {
1296 /* Send copy to the sockets */
1297 hci_send_to_sock(hdev, skb);
1300 if (test_bit(HCI_RAW, &hdev->flags)) {
1305 if (test_bit(HCI_INIT, &hdev->flags)) {
1306 /* Don't process data packets in this states. */
1307 switch (skb->pkt_type) {
1308 case HCI_ACLDATA_PKT:
1309 case HCI_SCODATA_PKT:
1316 switch (skb->pkt_type) {
1318 hci_event_packet(hdev, skb);
1321 case HCI_ACLDATA_PKT:
1322 BT_DBG("%s ACL data packet", hdev->name);
1323 hci_acldata_packet(hdev, skb);
1326 case HCI_SCODATA_PKT:
1327 BT_DBG("%s SCO data packet", hdev->name);
1328 hci_scodata_packet(hdev, skb);
1337 read_unlock(&hci_task_lock);
1340 static void hci_cmd_task(unsigned long arg)
1342 struct hci_dev *hdev = (struct hci_dev *) arg;
1343 struct sk_buff *skb;
1345 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1347 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1348 BT_ERR("%s command tx timeout", hdev->name);
1349 atomic_set(&hdev->cmd_cnt, 1);
1352 /* Send queued commands */
1353 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1355 kfree_skb(hdev->sent_cmd);
1357 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1358 atomic_dec(&hdev->cmd_cnt);
1359 hci_send_frame(skb);
1360 hdev->cmd_last_tx = jiffies;
1362 skb_queue_head(&hdev->cmd_q, skb);
1363 hci_sched_cmd(hdev);