static void hci_tx_task(unsigned long arg);
static void hci_notify(struct hci_dev *hdev, int event);
-rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(hci_task_lock);
/* HCI device list */
LIST_HEAD(hci_dev_list);
-rwlock_t hci_dev_list_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(hci_dev_list_lock);
+
+/* HCI callback list */
+LIST_HEAD(hci_cb_list);
+DEFINE_RWLOCK(hci_cb_list_lock);
/* HCI protocols */
#define HCI_MAX_PROTO 2
}
}
-void hci_req_cancel(struct hci_dev *hdev, int err)
+static void hci_req_cancel(struct hci_dev *hdev, int err)
{
BT_DBG("%s err 0x%2.2x", hdev->name, err);
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
+ struct sk_buff *skb;
__u16 param;
BT_DBG("%s %ld", hdev->name, opt);
+ /* Driver initialization */
+
+ /* Special commands */
+ while ((skb = skb_dequeue(&hdev->driver_init))) {
+ skb->pkt_type = HCI_COMMAND_PKT;
+ skb->dev = (void *) hdev;
+ skb_queue_tail(&hdev->cmd_q, skb);
+ hci_sched_cmd(hdev);
+ }
+ skb_queue_purge(&hdev->driver_init);
+
/* Mandatory initialization */
/* Reset */
BT_DBG("cache %p, %s", cache, batostr(bdaddr));
for (e = cache->list; e; e = e->next)
- if (!bacmp(&e->info.bdaddr, bdaddr))
+ if (!bacmp(&e->data.bdaddr, bdaddr))
break;
return e;
}
-void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_info *info)
+void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
{
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
- BT_DBG("cache %p, %s", cache, batostr(&info->bdaddr));
+ BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
- if (!(e = hci_inquiry_cache_lookup(hdev, &info->bdaddr))) {
+ if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
/* Entry not in the cache. Add new one. */
if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
return;
cache->list = e;
}
- memcpy(&e->info, info, sizeof(*info));
+ memcpy(&e->data, data, sizeof(*data));
e->timestamp = jiffies;
cache->timestamp = jiffies;
}
struct inquiry_entry *e;
int copied = 0;
- for (e = cache->list; e && copied < num; e = e->next, copied++)
- memcpy(info++, &e->info, sizeof(*info));
+ for (e = cache->list; e && copied < num; e = e->next, copied++) {
+ struct inquiry_data *data = &e->data;
+ bacpy(&info->bdaddr, &data->bdaddr);
+ info->pscan_rep_mode = data->pscan_rep_mode;
+ info->pscan_period_mode = data->pscan_period_mode;
+ info->pscan_mode = data->pscan_mode;
+ memcpy(info->dev_class, data->dev_class, 3);
+ info->clock_offset = data->clock_offset;
+ info++;
+ }
BT_DBG("cache %p, copied %d", cache, copied);
return copied;
goto done;
}
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ set_bit(HCI_RAW, &hdev->flags);
+
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
- set_bit(HCI_INIT, &hdev->flags);
- __hci_request(hdev, hci_reset_req, 0, HZ/4);
- clear_bit(HCI_INIT, &hdev->flags);
+ if (!test_bit(HCI_RAW, &hdev->flags)) {
+ set_bit(HCI_INIT, &hdev->flags);
+ __hci_request(hdev, hci_reset_req, 0, HZ/4);
+ clear_bit(HCI_INIT, &hdev->flags);
+ }
/* Kill cmd task */
tasklet_kill(&hdev->cmd_task);
atomic_set(&hdev->cmd_cnt, 1);
hdev->acl_cnt = 0; hdev->sco_cnt = 0;
- ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
+ if (!test_bit(HCI_RAW, &hdev->flags))
+ ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
done:
tasklet_enable(&hdev->tx_task);
memset(hdev, 0, sizeof(struct hci_dev));
+ skb_queue_head_init(&hdev->driver_init);
+
return hdev;
}
EXPORT_SYMBOL(hci_alloc_dev);
/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
+ skb_queue_purge(&hdev->driver_init);
+
/* will free via class release */
class_device_put(&hdev->class_dev);
}
}
EXPORT_SYMBOL(hci_unregister_proto);
+int hci_register_cb(struct hci_cb *cb)
+{
+ BT_DBG("%p name %s", cb, cb->name);
+
+ write_lock_bh(&hci_cb_list_lock);
+ list_add(&cb->list, &hci_cb_list);
+ write_unlock_bh(&hci_cb_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hci_register_cb);
+
+int hci_unregister_cb(struct hci_cb *cb)
+{
+ BT_DBG("%p name %s", cb, cb->name);
+
+ write_lock_bh(&hci_cb_list_lock);
+ list_del(&cb->list);
+ write_unlock_bh(&hci_cb_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hci_unregister_cb);
+
static int hci_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
BT_DBG("%s", hdev->name);
- /* ACL tx timeout must be longer than maximum
- * link supervision timeout (40.9 seconds) */
- if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
- hci_acl_tx_to(hdev);
+ if (!test_bit(HCI_RAW, &hdev->flags)) {
+ /* ACL tx timeout must be longer than maximum
+ * link supervision timeout (40.9 seconds) */
+ if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
+ hci_acl_tx_to(hdev);
+ }
while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {