patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / net / bluetooth / hci_core.c
1 /* 
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/major.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51
52 #ifndef CONFIG_BT_HCI_CORE_DEBUG
53 #undef  BT_DBG
54 #define BT_DBG(D...)
55 #endif
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 static void hci_notify(struct hci_dev *hdev, int event);
61
62 rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 rwlock_t hci_dev_list_lock = RW_LOCK_UNLOCKED;
67
68 /* HCI protocols */
69 #define HCI_MAX_PROTO   2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72 /* HCI notifiers list */
73 static struct notifier_block *hci_notifier;
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 void hci_notify(struct hci_dev *hdev, int event)
88 {
89         notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, int result)
95 {
96         BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98         if (hdev->req_status == HCI_REQ_PEND) {
99                 hdev->req_result = result;
100                 hdev->req_status = HCI_REQ_DONE;
101                 wake_up_interruptible(&hdev->req_wait_q);
102         }
103 }
104
105 void hci_req_cancel(struct hci_dev *hdev, int err)
106 {
107         BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109         if (hdev->req_status == HCI_REQ_PEND) {
110                 hdev->req_result = err;
111                 hdev->req_status = HCI_REQ_CANCELED;
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 
118                                 unsigned long opt, __u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         int err = 0;
122
123         BT_DBG("%s start", hdev->name);
124
125         hdev->req_status = HCI_REQ_PEND;
126
127         add_wait_queue(&hdev->req_wait_q, &wait);
128         set_current_state(TASK_INTERRUPTIBLE);
129
130         req(hdev, opt);
131         schedule_timeout(timeout);
132
133         remove_wait_queue(&hdev->req_wait_q, &wait);
134
135         if (signal_pending(current))
136                 return -EINTR;
137
138         switch (hdev->req_status) {
139         case HCI_REQ_DONE:
140                 err = -bt_err(hdev->req_result);
141                 break;
142
143         case HCI_REQ_CANCELED:
144                 err = -hdev->req_result;
145                 break;
146
147         default:
148                 err = -ETIMEDOUT;
149                 break;
150         };
151
152         hdev->req_status = hdev->req_result = 0;
153
154         BT_DBG("%s end: err %d", hdev->name, err);
155
156         return err;
157 }
158
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160                                 unsigned long opt, __u32 timeout)
161 {
162         int ret;
163
164         /* Serialize all requests */
165         hci_req_lock(hdev);
166         ret = __hci_request(hdev, req, opt, timeout);
167         hci_req_unlock(hdev);
168
169         return ret;
170 }
171
172 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
173 {
174         BT_DBG("%s %ld", hdev->name, opt);
175
176         /* Reset device */
177         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
178 }
179
180 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
181 {
182         __u16 param;
183
184         BT_DBG("%s %ld", hdev->name, opt);
185
186         /* Mandatory initialization */
187
188         /* Reset */
189         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
190                         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
191
192         /* Read Local Supported Features */
193         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
194
195         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
197
198 #if 0
199         /* Host buffer size */
200         {
201                 struct hci_cp_host_buffer_size cp;
202                 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
203                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
204                 cp.acl_max_pkt = __cpu_to_le16(0xffff);
205                 cp.sco_max_pkt = __cpu_to_le16(0xffff);
206                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
207         }
208 #endif
209
210         /* Read BD Address */
211         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
212
213         /* Read Voice Setting */
214         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
215
216         /* Optional initialization */
217
218         /* Clear Event Filters */
219         {
220                 struct hci_cp_set_event_flt cp;
221                 cp.flt_type  = HCI_FLT_CLEAR_ALL;
222                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
223         }
224
225         /* Page timeout ~20 secs */
226         param = __cpu_to_le16(0x8000);
227         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
228
229         /* Connection accept timeout ~20 secs */
230         param = __cpu_to_le16(0x7d00);
231         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
232 }
233
234 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
235 {
236         __u8 scan = opt;
237
238         BT_DBG("%s %x", hdev->name, scan);
239
240         /* Inquiry and Page scans */
241         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
242 }
243
244 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
245 {
246         __u8 auth = opt;
247
248         BT_DBG("%s %x", hdev->name, auth);
249
250         /* Authentication */
251         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
252 }
253
254 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
255 {
256         __u8 encrypt = opt;
257
258         BT_DBG("%s %x", hdev->name, encrypt);
259
260         /* Authentication */
261         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
262 }
263
264 /* Get HCI device by index. 
265  * Device is held on return. */
266 struct hci_dev *hci_dev_get(int index)
267 {
268         struct hci_dev *hdev = NULL;
269         struct list_head *p;
270
271         BT_DBG("%d", index);
272
273         if (index < 0)
274                 return NULL;
275
276         read_lock(&hci_dev_list_lock);
277         list_for_each(p, &hci_dev_list) {
278                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
279                 if (d->id == index) {
280                         hdev = hci_dev_hold(d);
281                         break;
282                 }
283         }
284         read_unlock(&hci_dev_list_lock);
285         return hdev;
286 }
287 EXPORT_SYMBOL(hci_dev_get);
288
289 /* ---- Inquiry support ---- */
290 void inquiry_cache_flush(struct hci_dev *hdev)
291 {
292         struct inquiry_cache *cache = &hdev->inq_cache;
293         struct inquiry_entry *next  = cache->list, *e;
294
295         BT_DBG("cache %p", cache);
296
297         cache->list = NULL;
298         while ((e = next)) {
299                 next = e->next;
300                 kfree(e);
301         }
302 }
303
304 struct inquiry_entry *inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
305 {
306         struct inquiry_cache *cache = &hdev->inq_cache;
307         struct inquiry_entry *e;
308
309         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
310
311         for (e = cache->list; e; e = e->next)
312                 if (!bacmp(&e->info.bdaddr, bdaddr))
313                         break;
314         return e;
315 }
316
317 void inquiry_cache_update(struct hci_dev *hdev, struct inquiry_info *info)
318 {
319         struct inquiry_cache *cache = &hdev->inq_cache;
320         struct inquiry_entry *e;
321
322         BT_DBG("cache %p, %s", cache, batostr(&info->bdaddr));
323
324         if (!(e = inquiry_cache_lookup(hdev, &info->bdaddr))) {
325                 /* Entry not in the cache. Add new one. */
326                 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
327                         return;
328                 memset(e, 0, sizeof(struct inquiry_entry));
329                 e->next     = cache->list;
330                 cache->list = e;
331         }
332
333         memcpy(&e->info, info, sizeof(*info));
334         e->timestamp = jiffies;
335         cache->timestamp = jiffies;
336 }
337
338 int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
339 {
340         struct inquiry_cache *cache = &hdev->inq_cache;
341         struct inquiry_info *info = (struct inquiry_info *) buf;
342         struct inquiry_entry *e;
343         int copied = 0;
344
345         for (e = cache->list; e && copied < num; e = e->next, copied++)
346                 memcpy(info++, &e->info, sizeof(*info));
347
348         BT_DBG("cache %p, copied %d", cache, copied);
349         return copied;
350 }
351
352 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
353 {
354         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355         struct hci_cp_inquiry cp;
356
357         BT_DBG("%s", hdev->name);
358
359         if (test_bit(HCI_INQUIRY, &hdev->flags))
360                 return;
361
362         /* Start Inquiry */
363         memcpy(&cp.lap, &ir->lap, 3);
364         cp.length  = ir->length;
365         cp.num_rsp = ir->num_rsp;
366         hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
367 }
368
369 int hci_inquiry(void __user *arg)
370 {
371         __u8 __user *ptr = arg;
372         struct hci_inquiry_req ir;
373         struct hci_dev *hdev;
374         int err = 0, do_inquiry = 0, max_rsp;
375         long timeo;
376         __u8 *buf;
377
378         if (copy_from_user(&ir, ptr, sizeof(ir)))
379                 return -EFAULT;
380
381         if (!(hdev = hci_dev_get(ir.dev_id)))
382                 return -ENODEV;
383
384         hci_dev_lock_bh(hdev);
385         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 
386                                         inquiry_cache_empty(hdev) ||
387                                         ir.flags & IREQ_CACHE_FLUSH) {
388                 inquiry_cache_flush(hdev);
389                 do_inquiry = 1;
390         }
391         hci_dev_unlock_bh(hdev);
392
393         timeo = ir.length * 2 * HZ;
394         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
395                 goto done;
396
397         /* for unlimited number of responses we will use buffer with 255 entries */
398         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
399
400         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
401          * copy it to the user space.
402          */
403         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
404                 err = -ENOMEM;
405                 goto done;
406         }
407
408         hci_dev_lock_bh(hdev);
409         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
410         hci_dev_unlock_bh(hdev);
411
412         BT_DBG("num_rsp %d", ir.num_rsp);
413
414         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
415                 ptr += sizeof(ir);
416                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
417                                         ir.num_rsp))
418                         err = -EFAULT;
419         } else 
420                 err = -EFAULT;
421
422         kfree(buf);
423
424 done:
425         hci_dev_put(hdev);
426         return err;
427 }
428
429 /* ---- HCI ioctl helpers ---- */
430
431 int hci_dev_open(__u16 dev)
432 {
433         struct hci_dev *hdev;
434         int ret = 0;
435
436         if (!(hdev = hci_dev_get(dev)))
437                 return -ENODEV;
438
439         BT_DBG("%s %p", hdev->name, hdev);
440
441         hci_req_lock(hdev);
442
443         if (test_bit(HCI_UP, &hdev->flags)) {
444                 ret = -EALREADY;
445                 goto done;
446         }
447
448         if (hdev->open(hdev)) {
449                 ret = -EIO;
450                 goto done;
451         }
452
453         if (!test_bit(HCI_RAW, &hdev->flags)) {
454                 atomic_set(&hdev->cmd_cnt, 1);
455                 set_bit(HCI_INIT, &hdev->flags);
456
457                 //__hci_request(hdev, hci_reset_req, 0, HZ);
458                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
459
460                 clear_bit(HCI_INIT, &hdev->flags);
461         }
462
463         if (!ret) {
464                 hci_dev_hold(hdev);
465                 set_bit(HCI_UP, &hdev->flags);
466                 hci_notify(hdev, HCI_DEV_UP);
467         } else {        
468                 /* Init failed, cleanup */
469                 tasklet_kill(&hdev->rx_task);
470                 tasklet_kill(&hdev->tx_task);
471                 tasklet_kill(&hdev->cmd_task);
472
473                 skb_queue_purge(&hdev->cmd_q);
474                 skb_queue_purge(&hdev->rx_q);
475
476                 if (hdev->flush)
477                         hdev->flush(hdev);
478
479                 if (hdev->sent_cmd) {
480                         kfree_skb(hdev->sent_cmd);
481                         hdev->sent_cmd = NULL;
482                 }
483
484                 hdev->close(hdev);
485                 hdev->flags = 0;
486         }
487
488 done:
489         hci_req_unlock(hdev);
490         hci_dev_put(hdev);
491         return ret;
492 }
493
494 static int hci_dev_do_close(struct hci_dev *hdev)
495 {
496         BT_DBG("%s %p", hdev->name, hdev);
497
498         hci_req_cancel(hdev, ENODEV);
499         hci_req_lock(hdev);
500
501         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
502                 hci_req_unlock(hdev);
503                 return 0;
504         }
505
506         /* Kill RX and TX tasks */
507         tasklet_kill(&hdev->rx_task);
508         tasklet_kill(&hdev->tx_task);
509
510         hci_dev_lock_bh(hdev);
511         inquiry_cache_flush(hdev);
512         hci_conn_hash_flush(hdev);
513         hci_dev_unlock_bh(hdev);
514
515         hci_notify(hdev, HCI_DEV_DOWN);
516
517         if (hdev->flush)
518                 hdev->flush(hdev);
519
520         /* Reset device */
521         skb_queue_purge(&hdev->cmd_q);
522         atomic_set(&hdev->cmd_cnt, 1);
523         set_bit(HCI_INIT, &hdev->flags);
524         __hci_request(hdev, hci_reset_req, 0, HZ/4);
525         clear_bit(HCI_INIT, &hdev->flags);
526
527         /* Kill cmd task */
528         tasklet_kill(&hdev->cmd_task);
529
530         /* Drop queues */
531         skb_queue_purge(&hdev->rx_q);
532         skb_queue_purge(&hdev->cmd_q);
533         skb_queue_purge(&hdev->raw_q);
534
535         /* Drop last sent command */
536         if (hdev->sent_cmd) {
537                 kfree_skb(hdev->sent_cmd);
538                 hdev->sent_cmd = NULL;
539         }
540
541         /* After this point our queues are empty
542          * and no tasks are scheduled. */
543         hdev->close(hdev);
544
545         /* Clear flags */
546         hdev->flags = 0;
547
548         hci_req_unlock(hdev);
549
550         hci_dev_put(hdev);
551         return 0;
552 }
553
554 int hci_dev_close(__u16 dev)
555 {
556         struct hci_dev *hdev;
557         int err;
558
559         if (!(hdev = hci_dev_get(dev)))
560                 return -ENODEV;
561         err = hci_dev_do_close(hdev);
562         hci_dev_put(hdev);
563         return err;
564 }
565
566 int hci_dev_reset(__u16 dev)
567 {
568         struct hci_dev *hdev;
569         int ret = 0;
570
571         if (!(hdev = hci_dev_get(dev)))
572                 return -ENODEV;
573
574         hci_req_lock(hdev);
575         tasklet_disable(&hdev->tx_task);
576
577         if (!test_bit(HCI_UP, &hdev->flags))
578                 goto done;
579
580         /* Drop queues */
581         skb_queue_purge(&hdev->rx_q);
582         skb_queue_purge(&hdev->cmd_q);
583
584         hci_dev_lock_bh(hdev);
585         inquiry_cache_flush(hdev);
586         hci_conn_hash_flush(hdev);
587         hci_dev_unlock_bh(hdev);
588
589         if (hdev->flush)
590                 hdev->flush(hdev);
591
592         atomic_set(&hdev->cmd_cnt, 1); 
593         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
594
595         ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
596
597 done:
598         tasklet_enable(&hdev->tx_task);
599         hci_req_unlock(hdev);
600         hci_dev_put(hdev);
601         return ret;
602 }
603
604 int hci_dev_reset_stat(__u16 dev)
605 {
606         struct hci_dev *hdev;
607         int ret = 0;
608
609         if (!(hdev = hci_dev_get(dev)))
610                 return -ENODEV;
611
612         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
613
614         hci_dev_put(hdev);
615
616         return ret;
617 }
618
619 int hci_dev_cmd(unsigned int cmd, void __user *arg)
620 {
621         struct hci_dev *hdev;
622         struct hci_dev_req dr;
623         int err = 0;
624
625         if (copy_from_user(&dr, arg, sizeof(dr)))
626                 return -EFAULT;
627
628         if (!(hdev = hci_dev_get(dr.dev_id)))
629                 return -ENODEV;
630
631         switch (cmd) {
632         case HCISETAUTH:
633                 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
634                 break;
635
636         case HCISETENCRYPT:
637                 if (!lmp_encrypt_capable(hdev)) {
638                         err = -EOPNOTSUPP;
639                         break;
640                 }
641
642                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
643                         /* Auth must be enabled first */
644                         err = hci_request(hdev, hci_auth_req,
645                                         dr.dev_opt, HCI_INIT_TIMEOUT);
646                         if (err)
647                                 break;
648                 }
649
650                 err = hci_request(hdev, hci_encrypt_req,
651                                         dr.dev_opt, HCI_INIT_TIMEOUT);
652                 break;
653
654         case HCISETSCAN:
655                 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
656                 break;
657
658         case HCISETPTYPE:
659                 hdev->pkt_type = (__u16) dr.dev_opt;
660                 break;
661
662         case HCISETLINKPOL:
663                 hdev->link_policy = (__u16) dr.dev_opt;
664                 break;
665
666         case HCISETLINKMODE:
667                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
668                 break;
669
670         case HCISETACLMTU:
671                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
672                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
673                 break;
674
675         case HCISETSCOMTU:
676                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
677                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
678                 break;
679
680         default:
681                 err = -EINVAL;
682                 break;
683         }
684         hci_dev_put(hdev);
685         return err;
686 }
687
688 int hci_get_dev_list(void __user *arg)
689 {
690         struct hci_dev_list_req *dl;
691         struct hci_dev_req *dr;
692         struct list_head *p;
693         int n = 0, size, err;
694         __u16 dev_num;
695
696         if (get_user(dev_num, (__u16 __user *) arg))
697                 return -EFAULT;
698
699         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
700                 return -EINVAL;
701
702         size = sizeof(*dl) + dev_num * sizeof(*dr);
703
704         if (!(dl = kmalloc(size, GFP_KERNEL)))
705                 return -ENOMEM;
706
707         dr = dl->dev_req;
708
709         read_lock_bh(&hci_dev_list_lock);
710         list_for_each(p, &hci_dev_list) {
711                 struct hci_dev *hdev;
712                 hdev = list_entry(p, struct hci_dev, list);
713                 (dr + n)->dev_id  = hdev->id;
714                 (dr + n)->dev_opt = hdev->flags;
715                 if (++n >= dev_num)
716                         break;
717         }
718         read_unlock_bh(&hci_dev_list_lock);
719
720         dl->dev_num = n;
721         size = sizeof(*dl) + n * sizeof(*dr);
722
723         err = copy_to_user(arg, dl, size);
724         kfree(dl);
725
726         return err ? -EFAULT : 0;
727 }
728
729 int hci_get_dev_info(void __user *arg)
730 {
731         struct hci_dev *hdev;
732         struct hci_dev_info di;
733         int err = 0;
734
735         if (copy_from_user(&di, arg, sizeof(di)))
736                 return -EFAULT;
737
738         if (!(hdev = hci_dev_get(di.dev_id)))
739                 return -ENODEV;
740
741         strcpy(di.name, hdev->name);
742         di.bdaddr   = hdev->bdaddr;
743         di.type     = hdev->type;
744         di.flags    = hdev->flags;
745         di.pkt_type = hdev->pkt_type;
746         di.acl_mtu  = hdev->acl_mtu;
747         di.acl_pkts = hdev->acl_pkts;
748         di.sco_mtu  = hdev->sco_mtu;
749         di.sco_pkts = hdev->sco_pkts;
750         di.link_policy = hdev->link_policy;
751         di.link_mode   = hdev->link_mode;
752
753         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
754         memcpy(&di.features, &hdev->features, sizeof(di.features));
755
756         if (copy_to_user(arg, &di, sizeof(di)))
757                 err = -EFAULT;
758
759         hci_dev_put(hdev);
760
761         return err;
762 }
763
764 /* ---- Interface to HCI drivers ---- */
765
766 /* Alloc HCI device */
767 struct hci_dev *hci_alloc_dev(void)
768 {
769         struct hci_dev *hdev;
770
771         hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
772         if (!hdev)
773                 return NULL;
774
775         memset(hdev, 0, sizeof(struct hci_dev));
776
777         return hdev;
778 }
779 EXPORT_SYMBOL(hci_alloc_dev);
780
781 /* Free HCI device */
782 void hci_free_dev(struct hci_dev *hdev)
783 {
784         /* will free via class release */
785         class_device_put(&hdev->class_dev);
786 }
787 EXPORT_SYMBOL(hci_free_dev);
788
789 /* Register HCI device */
790 int hci_register_dev(struct hci_dev *hdev)
791 {
792         struct list_head *head = &hci_dev_list, *p;
793         int id = 0;
794
795         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
796
797         if (!hdev->open || !hdev->close || !hdev->destruct)
798                 return -EINVAL;
799
800         write_lock_bh(&hci_dev_list_lock);
801
802         /* Find first available device id */
803         list_for_each(p, &hci_dev_list) {
804                 if (list_entry(p, struct hci_dev, list)->id != id)
805                         break;
806                 head = p; id++;
807         }
808         
809         sprintf(hdev->name, "hci%d", id);
810         hdev->id = id;
811         list_add(&hdev->list, head);
812
813         atomic_set(&hdev->refcnt, 1);
814         spin_lock_init(&hdev->lock);
815
816         hdev->flags = 0;
817         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
818         hdev->link_mode = (HCI_LM_ACCEPT);
819
820         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
821         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
822         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
823
824         skb_queue_head_init(&hdev->rx_q);
825         skb_queue_head_init(&hdev->cmd_q);
826         skb_queue_head_init(&hdev->raw_q);
827
828         init_waitqueue_head(&hdev->req_wait_q);
829         init_MUTEX(&hdev->req_lock);
830
831         inquiry_cache_init(hdev);
832
833         hci_conn_hash_init(hdev);
834
835         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
836
837         atomic_set(&hdev->promisc, 0);
838
839         write_unlock_bh(&hci_dev_list_lock);
840
841         hci_register_sysfs(hdev);
842
843         hci_notify(hdev, HCI_DEV_REG);
844
845         return id;
846 }
847 EXPORT_SYMBOL(hci_register_dev);
848
849 /* Unregister HCI device */
850 int hci_unregister_dev(struct hci_dev *hdev)
851 {
852         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
853
854         hci_unregister_sysfs(hdev);
855
856         write_lock_bh(&hci_dev_list_lock);
857         list_del(&hdev->list);
858         write_unlock_bh(&hci_dev_list_lock);
859
860         hci_dev_do_close(hdev);
861
862         hci_notify(hdev, HCI_DEV_UNREG);
863
864         __hci_dev_put(hdev);
865         return 0;
866 }
867 EXPORT_SYMBOL(hci_unregister_dev);
868
869 /* Suspend HCI device */
870 int hci_suspend_dev(struct hci_dev *hdev)
871 {
872         hci_notify(hdev, HCI_DEV_SUSPEND);
873         return 0;
874 }
875 EXPORT_SYMBOL(hci_suspend_dev);
876
877 /* Resume HCI device */
878 int hci_resume_dev(struct hci_dev *hdev)
879 {
880         hci_notify(hdev, HCI_DEV_RESUME);
881         return 0;
882 }
883 EXPORT_SYMBOL(hci_resume_dev);
884
885 /* ---- Interface to upper protocols ---- */
886
887 /* Register/Unregister protocols.
888  * hci_task_lock is used to ensure that no tasks are running. */
889 int hci_register_proto(struct hci_proto *hp)
890 {
891         int err = 0;
892
893         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
894
895         if (hp->id >= HCI_MAX_PROTO)
896                 return -EINVAL;
897
898         write_lock_bh(&hci_task_lock);
899
900         if (!hci_proto[hp->id])
901                 hci_proto[hp->id] = hp;
902         else
903                 err = -EEXIST;
904
905         write_unlock_bh(&hci_task_lock);
906
907         return err;
908 }
909 EXPORT_SYMBOL(hci_register_proto);
910
911 int hci_unregister_proto(struct hci_proto *hp)
912 {
913         int err = 0;
914
915         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
916
917         if (hp->id >= HCI_MAX_PROTO)
918                 return -EINVAL;
919
920         write_lock_bh(&hci_task_lock);
921
922         if (hci_proto[hp->id])
923                 hci_proto[hp->id] = NULL;
924         else
925                 err = -ENOENT;
926
927         write_unlock_bh(&hci_task_lock);
928
929         return err;
930 }
931 EXPORT_SYMBOL(hci_unregister_proto);
932
933 static int hci_send_frame(struct sk_buff *skb)
934 {
935         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
936
937         if (!hdev) {
938                 kfree_skb(skb);
939                 return -ENODEV;
940         }
941
942         BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
943
944         if (atomic_read(&hdev->promisc)) {
945                 /* Time stamp */
946                 do_gettimeofday(&skb->stamp);
947
948                 hci_send_to_sock(hdev, skb);
949         }
950
951         /* Get rid of skb owner, prior to sending to the driver. */
952         skb_orphan(skb);
953
954         return hdev->send(skb);
955 }
956
957 /* Send HCI command */
958 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
959 {
960         int len = HCI_COMMAND_HDR_SIZE + plen;
961         struct hci_command_hdr *hdr;
962         struct sk_buff *skb;
963
964         BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
965
966         skb = bt_skb_alloc(len, GFP_ATOMIC);
967         if (!skb) {
968                 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
969                 return -ENOMEM;
970         }
971
972         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
973         hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
974         hdr->plen   = plen;
975
976         if (plen)
977                 memcpy(skb_put(skb, plen), param, plen);
978
979         BT_DBG("skb len %d", skb->len);
980
981         skb->pkt_type = HCI_COMMAND_PKT;
982         skb->dev = (void *) hdev;
983         skb_queue_tail(&hdev->cmd_q, skb);
984         hci_sched_cmd(hdev);
985
986         return 0;
987 }
988 EXPORT_SYMBOL(hci_send_cmd);
989
990 /* Get data from the previously sent command */
991 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
992 {
993         struct hci_command_hdr *hdr;
994
995         if (!hdev->sent_cmd)
996                 return NULL;
997
998         hdr = (void *) hdev->sent_cmd->data;
999
1000         if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1001                 return NULL;
1002
1003         BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1004
1005         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1006 }
1007
1008 /* Send ACL data */
1009 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1010 {
1011         struct hci_acl_hdr *hdr;
1012         int len = skb->len;
1013
1014         hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1015         hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1016         hdr->dlen   = __cpu_to_le16(len);
1017
1018         skb->h.raw = (void *) hdr;
1019 }
1020
1021 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1022 {
1023         struct hci_dev *hdev = conn->hdev;
1024         struct sk_buff *list;
1025
1026         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1027
1028         skb->dev = (void *) hdev;
1029         skb->pkt_type = HCI_ACLDATA_PKT;
1030         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1031
1032         if (!(list = skb_shinfo(skb)->frag_list)) {
1033                 /* Non fragmented */
1034                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1035
1036                 skb_queue_tail(&conn->data_q, skb);
1037         } else {
1038                 /* Fragmented */
1039                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1040
1041                 skb_shinfo(skb)->frag_list = NULL;
1042
1043                 /* Queue all fragments atomically */
1044                 spin_lock_bh(&conn->data_q.lock);
1045
1046                 __skb_queue_tail(&conn->data_q, skb);
1047                 do {
1048                         skb = list; list = list->next;
1049                         
1050                         skb->dev = (void *) hdev;
1051                         skb->pkt_type = HCI_ACLDATA_PKT;
1052                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1053
1054                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1055
1056                         __skb_queue_tail(&conn->data_q, skb);
1057                 } while (list);
1058
1059                 spin_unlock_bh(&conn->data_q.lock);
1060         }
1061
1062         hci_sched_tx(hdev);
1063         return 0;
1064 }
1065 EXPORT_SYMBOL(hci_send_acl);
1066
1067 /* Send SCO data */
1068 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1069 {
1070         struct hci_dev *hdev = conn->hdev;
1071         struct hci_sco_hdr hdr;
1072
1073         BT_DBG("%s len %d", hdev->name, skb->len);
1074
1075         if (skb->len > hdev->sco_mtu) {
1076                 kfree_skb(skb);
1077                 return -EINVAL;
1078         }
1079
1080         hdr.handle = __cpu_to_le16(conn->handle);
1081         hdr.dlen   = skb->len;
1082
1083         skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1084         memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1085
1086         skb->dev = (void *) hdev;
1087         skb->pkt_type = HCI_SCODATA_PKT;
1088         skb_queue_tail(&conn->data_q, skb);
1089         hci_sched_tx(hdev);
1090         return 0;
1091 }
1092 EXPORT_SYMBOL(hci_send_sco);
1093
1094 /* ---- HCI TX task (outgoing data) ---- */
1095
1096 /* HCI Connection scheduler */
1097 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1098 {
1099         struct hci_conn_hash *h = &hdev->conn_hash;
1100         struct hci_conn  *conn = NULL;
1101         int num = 0, min = ~0;
1102         struct list_head *p;
1103
1104         /* We don't have to lock device here. Connections are always 
1105          * added and removed with TX task disabled. */
1106         list_for_each(p, &h->list) {
1107                 struct hci_conn *c;
1108                 c = list_entry(p, struct hci_conn, list);
1109
1110                 if (c->type != type || c->state != BT_CONNECTED
1111                                 || skb_queue_empty(&c->data_q))
1112                         continue;
1113                 num++;
1114
1115                 if (c->sent < min) {
1116                         min  = c->sent;
1117                         conn = c;
1118                 }
1119         }
1120
1121         if (conn) {
1122                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1123                 int q = cnt / num;
1124                 *quote = q ? q : 1;
1125         } else
1126                 *quote = 0;
1127
1128         BT_DBG("conn %p quote %d", conn, *quote);
1129         return conn;
1130 }
1131
1132 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1133 {
1134         struct hci_conn_hash *h = &hdev->conn_hash;
1135         struct list_head *p;
1136         struct hci_conn  *c;
1137
1138         BT_ERR("%s ACL tx timeout", hdev->name);
1139
1140         /* Kill stalled connections */
1141         list_for_each(p, &h->list) {
1142                 c = list_entry(p, struct hci_conn, list);
1143                 if (c->type == ACL_LINK && c->sent) {
1144                         BT_ERR("%s killing stalled ACL connection %s",
1145                                 hdev->name, batostr(&c->dst));
1146                         hci_acl_disconn(c, 0x13);
1147                 }
1148         }
1149 }
1150
1151 static inline void hci_sched_acl(struct hci_dev *hdev)
1152 {
1153         struct hci_conn *conn;
1154         struct sk_buff *skb;
1155         int quote;
1156
1157         BT_DBG("%s", hdev->name);
1158
1159         /* ACL tx timeout must be longer than maximum
1160          * link supervision timeout (40.9 seconds) */
1161         if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1162                 hci_acl_tx_to(hdev);
1163
1164         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1165                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1166                         BT_DBG("skb %p len %d", skb, skb->len);
1167                         hci_send_frame(skb);
1168                         hdev->acl_last_tx = jiffies;
1169
1170                         hdev->acl_cnt--;
1171                         conn->sent++;
1172                 }
1173         }
1174 }
1175
1176 /* Schedule SCO */
1177 static inline void hci_sched_sco(struct hci_dev *hdev)
1178 {
1179         struct hci_conn *conn;
1180         struct sk_buff *skb;
1181         int quote;
1182
1183         BT_DBG("%s", hdev->name);
1184
1185         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1186                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1187                         BT_DBG("skb %p len %d", skb, skb->len);
1188                         hci_send_frame(skb);
1189
1190                         conn->sent++;
1191                         if (conn->sent == ~0)
1192                                 conn->sent = 0;
1193                 }
1194         }
1195 }
1196
1197 static void hci_tx_task(unsigned long arg)
1198 {
1199         struct hci_dev *hdev = (struct hci_dev *) arg;
1200         struct sk_buff *skb;
1201
1202         read_lock(&hci_task_lock);
1203
1204         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1205
1206         /* Schedule queues and send stuff to HCI driver */
1207
1208         hci_sched_acl(hdev);
1209
1210         hci_sched_sco(hdev);
1211
1212         /* Send next queued raw (unknown type) packet */
1213         while ((skb = skb_dequeue(&hdev->raw_q)))
1214                 hci_send_frame(skb);
1215
1216         read_unlock(&hci_task_lock);
1217 }
1218
1219 /* ----- HCI RX task (incoming data proccessing) ----- */
1220
1221 /* ACL data packet */
1222 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1223 {
1224         struct hci_acl_hdr *hdr = (void *) skb->data;
1225         struct hci_conn *conn;
1226         __u16 handle, flags;
1227
1228         skb_pull(skb, HCI_ACL_HDR_SIZE);
1229
1230         handle = __le16_to_cpu(hdr->handle);
1231         flags  = hci_flags(handle);
1232         handle = hci_handle(handle);
1233
1234         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1235
1236         hdev->stat.acl_rx++;
1237
1238         hci_dev_lock(hdev);
1239         conn = hci_conn_hash_lookup_handle(hdev, handle);
1240         hci_dev_unlock(hdev);
1241         
1242         if (conn) {
1243                 register struct hci_proto *hp;
1244
1245                 /* Send to upper protocol */
1246                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1247                         hp->recv_acldata(conn, skb, flags);
1248                         return;
1249                 }
1250         } else {
1251                 BT_ERR("%s ACL packet for unknown connection handle %d", 
1252                         hdev->name, handle);
1253         }
1254
1255         kfree_skb(skb);
1256 }
1257
1258 /* SCO data packet */
1259 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1260 {
1261         struct hci_sco_hdr *hdr = (void *) skb->data;
1262         struct hci_conn *conn;
1263         __u16 handle;
1264
1265         skb_pull(skb, HCI_SCO_HDR_SIZE);
1266
1267         handle = __le16_to_cpu(hdr->handle);
1268
1269         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1270
1271         hdev->stat.sco_rx++;
1272
1273         hci_dev_lock(hdev);
1274         conn = hci_conn_hash_lookup_handle(hdev, handle);
1275         hci_dev_unlock(hdev);
1276
1277         if (conn) {
1278                 register struct hci_proto *hp;
1279
1280                 /* Send to upper protocol */
1281                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1282                         hp->recv_scodata(conn, skb);
1283                         return;
1284                 }
1285         } else {
1286                 BT_ERR("%s SCO packet for unknown connection handle %d", 
1287                         hdev->name, handle);
1288         }
1289
1290         kfree_skb(skb);
1291 }
1292
1293 void hci_rx_task(unsigned long arg)
1294 {
1295         struct hci_dev *hdev = (struct hci_dev *) arg;
1296         struct sk_buff *skb;
1297
1298         BT_DBG("%s", hdev->name);
1299
1300         read_lock(&hci_task_lock);
1301
1302         while ((skb = skb_dequeue(&hdev->rx_q))) {
1303                 if (atomic_read(&hdev->promisc)) {
1304                         /* Send copy to the sockets */
1305                         hci_send_to_sock(hdev, skb);
1306                 }
1307
1308                 if (test_bit(HCI_RAW, &hdev->flags)) {
1309                         kfree_skb(skb);
1310                         continue;
1311                 }
1312
1313                 if (test_bit(HCI_INIT, &hdev->flags)) {
1314                         /* Don't process data packets in this states. */
1315                         switch (skb->pkt_type) {
1316                         case HCI_ACLDATA_PKT:
1317                         case HCI_SCODATA_PKT:
1318                                 kfree_skb(skb);
1319                                 continue;
1320                         };
1321                 }
1322
1323                 /* Process frame */
1324                 switch (skb->pkt_type) {
1325                 case HCI_EVENT_PKT:
1326                         hci_event_packet(hdev, skb);
1327                         break;
1328
1329                 case HCI_ACLDATA_PKT:
1330                         BT_DBG("%s ACL data packet", hdev->name);
1331                         hci_acldata_packet(hdev, skb);
1332                         break;
1333
1334                 case HCI_SCODATA_PKT:
1335                         BT_DBG("%s SCO data packet", hdev->name);
1336                         hci_scodata_packet(hdev, skb);
1337                         break;
1338
1339                 default:
1340                         kfree_skb(skb);
1341                         break;
1342                 }
1343         }
1344
1345         read_unlock(&hci_task_lock);
1346 }
1347
1348 static void hci_cmd_task(unsigned long arg)
1349 {
1350         struct hci_dev *hdev = (struct hci_dev *) arg;
1351         struct sk_buff *skb;
1352
1353         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1354
1355         if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1356                 BT_ERR("%s command tx timeout", hdev->name);
1357                 atomic_set(&hdev->cmd_cnt, 1);
1358         }
1359
1360         /* Send queued commands */
1361         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1362                 if (hdev->sent_cmd)
1363                         kfree_skb(hdev->sent_cmd);
1364
1365                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1366                         atomic_dec(&hdev->cmd_cnt);
1367                         hci_send_frame(skb);
1368                         hdev->cmd_last_tx = jiffies;
1369                 } else {
1370                         skb_queue_head(&hdev->cmd_q, skb);
1371                         hci_sched_cmd(hdev);
1372                 }
1373         }
1374 }