ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / net / bluetooth / hci_core.c
1 /* 
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /*
26  * Bluetooth HCI Core.
27  *
28  * $Id: hci_core.c,v 1.6 2002/04/17 17:37:16 maxk Exp $
29  */
30
31 #include <linux/config.h>
32 #include <linux/module.h>
33 #include <linux/kmod.h>
34
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/major.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/skbuff.h>
45 #include <linux/interrupt.h>
46 #include <linux/notifier.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #ifndef CONFIG_BT_HCI_CORE_DEBUG
57 #undef  BT_DBG
58 #define BT_DBG( A... )
59 #endif
60
61 static void hci_cmd_task(unsigned long arg);
62 static void hci_rx_task(unsigned long arg);
63 static void hci_tx_task(unsigned long arg);
64 static void hci_notify(struct hci_dev *hdev, int event);
65
66 rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
67
68 /* HCI device list */
69 LIST_HEAD(hci_dev_list);
70 rwlock_t hci_dev_list_lock = RW_LOCK_UNLOCKED;
71
72 /* HCI protocols */
73 #define HCI_MAX_PROTO   2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75
76 /* HCI notifiers list */
77 static struct notifier_block *hci_notifier;
78
79 /* ---- HCI notifications ---- */
80
81 int hci_register_notifier(struct notifier_block *nb)
82 {
83         return notifier_chain_register(&hci_notifier, nb);
84 }
85
86 int hci_unregister_notifier(struct notifier_block *nb)
87 {
88         return notifier_chain_unregister(&hci_notifier, nb);
89 }
90
91 void hci_notify(struct hci_dev *hdev, int event)
92 {
93         notifier_call_chain(&hci_notifier, event, hdev);
94 }
95
96 /* ---- HCI requests ---- */
97
98 void hci_req_complete(struct hci_dev *hdev, int result)
99 {
100         BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = result;
104                 hdev->req_status = HCI_REQ_DONE;
105                 wake_up_interruptible(&hdev->req_wait_q);
106         }
107 }
108
109 void hci_req_cancel(struct hci_dev *hdev, int err)
110 {
111         BT_DBG("%s err 0x%2.2x", hdev->name, err);
112
113         if (hdev->req_status == HCI_REQ_PEND) {
114                 hdev->req_result = err;
115                 hdev->req_status = HCI_REQ_CANCELED;
116                 wake_up_interruptible(&hdev->req_wait_q);
117         }
118 }
119
120 /* Execute request and wait for completion. */
121 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 
122                                 unsigned long opt, __u32 timeout)
123 {
124         DECLARE_WAITQUEUE(wait, current);
125         int err = 0;
126
127         BT_DBG("%s start", hdev->name);
128
129         hdev->req_status = HCI_REQ_PEND;
130
131         add_wait_queue(&hdev->req_wait_q, &wait);
132         set_current_state(TASK_INTERRUPTIBLE);
133
134         req(hdev, opt);
135         schedule_timeout(timeout);
136
137         remove_wait_queue(&hdev->req_wait_q, &wait);
138
139         if (signal_pending(current))
140                 return -EINTR;
141
142         switch (hdev->req_status) {
143         case HCI_REQ_DONE:
144                 err = -bt_err(hdev->req_result);
145                 break;
146
147         case HCI_REQ_CANCELED:
148                 err = -hdev->req_result;
149                 break;
150
151         default:
152                 err = -ETIMEDOUT;
153                 break;
154         };
155
156         hdev->req_status = hdev->req_result = 0;
157
158         BT_DBG("%s end: err %d", hdev->name, err);
159
160         return err;
161 }
162
163 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
164                                 unsigned long opt, __u32 timeout)
165 {
166         int ret;
167
168         /* Serialize all requests */
169         hci_req_lock(hdev);
170         ret = __hci_request(hdev, req, opt, timeout);
171         hci_req_unlock(hdev);
172
173         return ret;
174 }
175
176 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 {
178         BT_DBG("%s %ld", hdev->name, opt);
179
180         /* Reset device */
181         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
182 }
183
184 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         __u16 param;
187
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Mandatory initialization */
191
192         /* Reset */
193         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
194                         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
195
196         /* Read Local Supported Features */
197         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
198
199         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
200         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
201
202 #if 0
203         /* Host buffer size */
204         {
205                 struct hci_cp_host_buffer_size cp;
206                 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
207                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
208                 cp.acl_max_pkt = __cpu_to_le16(0xffff);
209                 cp.sco_max_pkt = __cpu_to_le16(0xffff);
210                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
211         }
212 #endif
213
214         /* Read BD Address */
215         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
216
217         /* Read Voice Setting */
218         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
219
220         /* Optional initialization */
221
222         /* Clear Event Filters */
223         {
224                 struct hci_cp_set_event_flt cp;
225                 cp.flt_type  = HCI_FLT_CLEAR_ALL;
226                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
227         }
228
229         /* Page timeout ~20 secs */
230         param = __cpu_to_le16(0x8000);
231         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
232
233         /* Connection accept timeout ~20 secs */
234         param = __cpu_to_le16(0x7d00);
235         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
236 }
237
238 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
239 {
240         __u8 scan = opt;
241
242         BT_DBG("%s %x", hdev->name, scan);
243
244         /* Inquiry and Page scans */
245         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
246 }
247
248 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
249 {
250         __u8 auth = opt;
251
252         BT_DBG("%s %x", hdev->name, auth);
253
254         /* Authentication */
255         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
256 }
257
258 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
259 {
260         __u8 encrypt = opt;
261
262         BT_DBG("%s %x", hdev->name, encrypt);
263
264         /* Authentication */
265         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
266 }
267
268 /* Get HCI device by index. 
269  * Device is held on return. */
270 struct hci_dev *hci_dev_get(int index)
271 {
272         struct hci_dev *hdev = NULL;
273         struct list_head *p;
274
275         BT_DBG("%d", index);
276
277         if (index < 0)
278                 return NULL;
279
280         read_lock(&hci_dev_list_lock);
281         list_for_each(p, &hci_dev_list) {
282                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
283                 if (d->id == index) {
284                         hdev = hci_dev_hold(d);
285                         break;
286                 }
287         }
288         read_unlock(&hci_dev_list_lock);
289         return hdev;
290 }
291
292 /* ---- Inquiry support ---- */
293 void inquiry_cache_flush(struct hci_dev *hdev)
294 {
295         struct inquiry_cache *cache = &hdev->inq_cache;
296         struct inquiry_entry *next  = cache->list, *e;
297
298         BT_DBG("cache %p", cache);
299
300         cache->list = NULL;
301         while ((e = next)) {
302                 next = e->next;
303                 kfree(e);
304         }
305 }
306
307 struct inquiry_entry *inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
308 {
309         struct inquiry_cache *cache = &hdev->inq_cache;
310         struct inquiry_entry *e;
311
312         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
313
314         for (e = cache->list; e; e = e->next)
315                 if (!bacmp(&e->info.bdaddr, bdaddr))
316                         break;
317         return e;
318 }
319
320 void inquiry_cache_update(struct hci_dev *hdev, struct inquiry_info *info)
321 {
322         struct inquiry_cache *cache = &hdev->inq_cache;
323         struct inquiry_entry *e;
324
325         BT_DBG("cache %p, %s", cache, batostr(&info->bdaddr));
326
327         if (!(e = inquiry_cache_lookup(hdev, &info->bdaddr))) {
328                 /* Entry not in the cache. Add new one. */
329                 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
330                         return;
331                 memset(e, 0, sizeof(struct inquiry_entry));
332                 e->next     = cache->list;
333                 cache->list = e;
334         }
335
336         memcpy(&e->info, info, sizeof(*info));
337         e->timestamp = jiffies;
338         cache->timestamp = jiffies;
339 }
340
341 int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
342 {
343         struct inquiry_cache *cache = &hdev->inq_cache;
344         struct inquiry_info *info = (struct inquiry_info *) buf;
345         struct inquiry_entry *e;
346         int copied = 0;
347
348         for (e = cache->list; e && copied < num; e = e->next, copied++)
349                 memcpy(info++, &e->info, sizeof(*info));
350
351         BT_DBG("cache %p, copied %d", cache, copied);
352         return copied;
353 }
354
355 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
356 {
357         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
358         struct hci_cp_inquiry cp;
359
360         BT_DBG("%s", hdev->name);
361
362         if (test_bit(HCI_INQUIRY, &hdev->flags))
363                 return;
364
365         /* Start Inquiry */
366         memcpy(&cp.lap, &ir->lap, 3);
367         cp.length  = ir->length;
368         cp.num_rsp = ir->num_rsp;
369         hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
370 }
371
372 int hci_inquiry(unsigned long arg)
373 {
374         struct hci_inquiry_req ir;
375         struct hci_dev *hdev;
376         int err = 0, do_inquiry = 0, max_rsp;
377         long timeo;
378         __u8 *buf, *ptr;
379
380         ptr = (void *) arg;
381         if (copy_from_user(&ir, ptr, sizeof(ir)))
382                 return -EFAULT;
383
384         if (!(hdev = hci_dev_get(ir.dev_id)))
385                 return -ENODEV;
386
387         hci_dev_lock_bh(hdev);
388         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 
389                                         inquiry_cache_empty(hdev) ||
390                                         ir.flags & IREQ_CACHE_FLUSH) {
391                 inquiry_cache_flush(hdev);
392                 do_inquiry = 1;
393         }
394         hci_dev_unlock_bh(hdev);
395
396         timeo = ir.length * 2 * HZ;
397         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
398                 goto done;
399
400         /* for unlimited number of responses we will use buffer with 255 entries */
401         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
402
403         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
404          * copy it to the user space.
405          */
406         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
407                 err = -ENOMEM;
408                 goto done;
409         }
410
411         hci_dev_lock_bh(hdev);
412         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
413         hci_dev_unlock_bh(hdev);
414
415         BT_DBG("num_rsp %d", ir.num_rsp);
416
417         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
418                 ptr += sizeof(ir);
419                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
420                                         ir.num_rsp))
421                         err = -EFAULT;
422         } else 
423                 err = -EFAULT;
424
425         kfree(buf);
426
427 done:
428         hci_dev_put(hdev);
429         return err;
430 }
431
432 /* ---- HCI ioctl helpers ---- */
433
434 int hci_dev_open(__u16 dev)
435 {
436         struct hci_dev *hdev;
437         int ret = 0;
438
439         if (!(hdev = hci_dev_get(dev)))
440                 return -ENODEV;
441
442         BT_DBG("%s %p", hdev->name, hdev);
443
444         hci_req_lock(hdev);
445
446         if (test_bit(HCI_UP, &hdev->flags)) {
447                 ret = -EALREADY;
448                 goto done;
449         }
450
451         if (hdev->open(hdev)) {
452                 ret = -EIO;
453                 goto done;
454         }
455
456         if (!test_bit(HCI_RAW, &hdev->flags)) {
457                 atomic_set(&hdev->cmd_cnt, 1);
458                 set_bit(HCI_INIT, &hdev->flags);
459
460                 //__hci_request(hdev, hci_reset_req, 0, HZ);
461                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
462        
463                 clear_bit(HCI_INIT, &hdev->flags);
464         }
465
466         if (!ret) {
467                 hci_dev_hold(hdev);
468                 set_bit(HCI_UP, &hdev->flags);
469                 hci_notify(hdev, HCI_DEV_UP);
470         } else {        
471                 /* Init failed, cleanup */
472                 tasklet_kill(&hdev->rx_task);
473                 tasklet_kill(&hdev->tx_task);
474                 tasklet_kill(&hdev->cmd_task);
475
476                 skb_queue_purge(&hdev->cmd_q);
477                 skb_queue_purge(&hdev->rx_q);
478
479                 if (hdev->flush)
480                         hdev->flush(hdev);
481
482                 if (hdev->sent_cmd) {
483                         kfree_skb(hdev->sent_cmd);
484                         hdev->sent_cmd = NULL;
485                 }
486
487                 hdev->close(hdev);
488                 hdev->flags = 0;
489         }
490
491 done:
492         hci_req_unlock(hdev);
493         hci_dev_put(hdev);
494         return ret;
495 }
496
497 static int hci_dev_do_close(struct hci_dev *hdev)
498 {
499         BT_DBG("%s %p", hdev->name, hdev);
500
501         hci_req_cancel(hdev, ENODEV);
502         hci_req_lock(hdev);
503
504         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
505                 hci_req_unlock(hdev);
506                 return 0;
507         }
508
509         /* Kill RX and TX tasks */
510         tasklet_kill(&hdev->rx_task);
511         tasklet_kill(&hdev->tx_task);
512
513         hci_dev_lock_bh(hdev);
514         inquiry_cache_flush(hdev);
515         hci_conn_hash_flush(hdev);
516         hci_dev_unlock_bh(hdev);
517         
518         hci_notify(hdev, HCI_DEV_DOWN);
519
520         if (hdev->flush)
521                 hdev->flush(hdev);
522
523         /* Reset device */
524         skb_queue_purge(&hdev->cmd_q);
525         atomic_set(&hdev->cmd_cnt, 1);
526         set_bit(HCI_INIT, &hdev->flags);
527         __hci_request(hdev, hci_reset_req, 0, HZ/4);
528         clear_bit(HCI_INIT, &hdev->flags);
529
530         /* Kill cmd task */
531         tasklet_kill(&hdev->cmd_task);
532
533         /* Drop queues */
534         skb_queue_purge(&hdev->rx_q);
535         skb_queue_purge(&hdev->cmd_q);
536         skb_queue_purge(&hdev->raw_q);
537
538         /* Drop last sent command */
539         if (hdev->sent_cmd) {
540                 kfree_skb(hdev->sent_cmd);
541                 hdev->sent_cmd = NULL;
542         }
543
544         /* After this point our queues are empty
545          * and no tasks are scheduled. */
546         hdev->close(hdev);
547
548         /* Clear flags */
549         hdev->flags = 0;
550
551         hci_req_unlock(hdev);
552
553         hci_dev_put(hdev);
554         return 0;
555 }
556
557 int hci_dev_close(__u16 dev)
558 {
559         struct hci_dev *hdev;
560         int err;
561         
562         if (!(hdev = hci_dev_get(dev)))
563                 return -ENODEV;
564         err = hci_dev_do_close(hdev);
565         hci_dev_put(hdev);
566         return err;
567 }
568
569 int hci_dev_reset(__u16 dev)
570 {
571         struct hci_dev *hdev;
572         int ret = 0;
573
574         if (!(hdev = hci_dev_get(dev)))
575                 return -ENODEV;
576
577         hci_req_lock(hdev);
578         tasklet_disable(&hdev->tx_task);
579
580         if (!test_bit(HCI_UP, &hdev->flags))
581                 goto done;
582
583         /* Drop queues */
584         skb_queue_purge(&hdev->rx_q);
585         skb_queue_purge(&hdev->cmd_q);
586
587         hci_dev_lock_bh(hdev);
588         inquiry_cache_flush(hdev);
589         hci_conn_hash_flush(hdev);
590         hci_dev_unlock_bh(hdev);
591
592         if (hdev->flush)
593                 hdev->flush(hdev);
594
595         atomic_set(&hdev->cmd_cnt, 1); 
596         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
597
598         ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
599
600 done:
601         tasklet_enable(&hdev->tx_task);
602         hci_req_unlock(hdev);
603         hci_dev_put(hdev);
604         return ret;
605 }
606
607 int hci_dev_reset_stat(__u16 dev)
608 {
609         struct hci_dev *hdev;
610         int ret = 0;
611
612         if (!(hdev = hci_dev_get(dev)))
613                 return -ENODEV;
614
615         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
616
617         hci_dev_put(hdev);
618
619         return ret;
620 }
621
622 int hci_dev_cmd(unsigned int cmd, unsigned long arg)
623 {
624         struct hci_dev *hdev;
625         struct hci_dev_req dr;
626         int err = 0;
627
628         if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
629                 return -EFAULT;
630
631         if (!(hdev = hci_dev_get(dr.dev_id)))
632                 return -ENODEV;
633
634         switch (cmd) {
635         case HCISETAUTH:
636                 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
637                 break;
638
639         case HCISETENCRYPT:
640                 if (!lmp_encrypt_capable(hdev)) {
641                         err = -EOPNOTSUPP;
642                         break;
643                 }
644
645                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
646                         /* Auth must be enabled first */
647                         err = hci_request(hdev, hci_auth_req,
648                                         dr.dev_opt, HCI_INIT_TIMEOUT);
649                         if (err)
650                                 break;
651                 }
652                         
653                 err = hci_request(hdev, hci_encrypt_req,
654                                         dr.dev_opt, HCI_INIT_TIMEOUT);
655                 break;
656         
657         case HCISETSCAN:
658                 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
659                 break;
660         
661         case HCISETPTYPE:
662                 hdev->pkt_type = (__u16) dr.dev_opt;
663                 break;
664                 
665         case HCISETLINKPOL:
666                 hdev->link_policy = (__u16) dr.dev_opt;
667                 break;
668
669         case HCISETLINKMODE:
670                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
671                 break;
672
673         case HCISETACLMTU:
674                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
675                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
676                 break;
677
678         case HCISETSCOMTU:
679                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
680                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
681                 break;
682
683         default:
684                 err = -EINVAL;
685                 break;
686         }       
687         hci_dev_put(hdev);
688         return err;
689 }
690
691 int hci_get_dev_list(unsigned long arg)
692 {
693         struct hci_dev_list_req *dl;
694         struct hci_dev_req *dr;
695         struct list_head *p;
696         int n = 0, size, err;
697         __u16 dev_num;
698
699         if (get_user(dev_num, (__u16 *) arg))
700                 return -EFAULT;
701
702         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
703                 return -EINVAL;
704
705         size = sizeof(*dl) + dev_num * sizeof(*dr);
706
707         if (!(dl = kmalloc(size, GFP_KERNEL)))
708                 return -ENOMEM;
709
710         dr = dl->dev_req;
711
712         read_lock_bh(&hci_dev_list_lock);
713         list_for_each(p, &hci_dev_list) {
714                 struct hci_dev *hdev;
715                 hdev = list_entry(p, struct hci_dev, list);
716                 (dr + n)->dev_id  = hdev->id;
717                 (dr + n)->dev_opt = hdev->flags;
718                 if (++n >= dev_num)
719                         break;
720         }
721         read_unlock_bh(&hci_dev_list_lock);
722
723         dl->dev_num = n;
724         size = sizeof(*dl) + n * sizeof(*dr);
725
726         err = copy_to_user((void *) arg, dl, size);
727         kfree(dl);
728
729         return err ? -EFAULT : 0;
730 }
731
732 int hci_get_dev_info(unsigned long arg)
733 {
734         struct hci_dev *hdev;
735         struct hci_dev_info di;
736         int err = 0;
737
738         if (copy_from_user(&di, (void *) arg, sizeof(di)))
739                 return -EFAULT;
740
741         if (!(hdev = hci_dev_get(di.dev_id)))
742                 return -ENODEV;
743
744         strcpy(di.name, hdev->name);
745         di.bdaddr   = hdev->bdaddr;
746         di.type     = hdev->type;
747         di.flags    = hdev->flags;
748         di.pkt_type = hdev->pkt_type;
749         di.acl_mtu  = hdev->acl_mtu;
750         di.acl_pkts = hdev->acl_pkts;
751         di.sco_mtu  = hdev->sco_mtu;
752         di.sco_pkts = hdev->sco_pkts;
753         di.link_policy = hdev->link_policy;
754         di.link_mode   = hdev->link_mode;
755
756         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
757         memcpy(&di.features, &hdev->features, sizeof(di.features));
758
759         if (copy_to_user((void *) arg, &di, sizeof(di)))
760                 err = -EFAULT;
761
762         hci_dev_put(hdev);
763
764         return err;
765 }
766
767 /* ---- Interface to HCI drivers ---- */
768
769 /* Alloc HCI device */
770 struct hci_dev *hci_alloc_dev(void)
771 {
772         struct hci_dev *hdev;
773
774         hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
775         if (!hdev)
776                 return NULL;
777
778         memset(hdev, 0, sizeof(struct hci_dev));
779
780         return hdev;
781 }
782
783 /* Free HCI device */
784 void hci_free_dev(struct hci_dev *hdev)
785 {
786         /* will free via class release */
787         class_device_put(&hdev->class_dev);
788 }
789
790 /* Register HCI device */
791 int hci_register_dev(struct hci_dev *hdev)
792 {
793         struct list_head *head = &hci_dev_list, *p;
794         int id = 0;
795
796         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
797
798         if (!hdev->open || !hdev->close || !hdev->destruct)
799                 return -EINVAL;
800
801         write_lock_bh(&hci_dev_list_lock);
802
803         /* Find first available device id */
804         list_for_each(p, &hci_dev_list) {
805                 if (list_entry(p, struct hci_dev, list)->id != id)
806                         break;
807                 head = p; id++;
808         }
809         
810         sprintf(hdev->name, "hci%d", id);
811         hdev->id = id;
812         list_add(&hdev->list, head);
813
814         atomic_set(&hdev->refcnt, 1);
815         spin_lock_init(&hdev->lock);
816                         
817         hdev->flags = 0;
818         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
819         hdev->link_mode = (HCI_LM_ACCEPT);
820
821         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
822         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
823         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
824
825         skb_queue_head_init(&hdev->rx_q);
826         skb_queue_head_init(&hdev->cmd_q);
827         skb_queue_head_init(&hdev->raw_q);
828
829         init_waitqueue_head(&hdev->req_wait_q);
830         init_MUTEX(&hdev->req_lock);
831
832         inquiry_cache_init(hdev);
833
834         hci_conn_hash_init(hdev);
835
836         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
837
838         atomic_set(&hdev->promisc, 0);
839                 
840         write_unlock_bh(&hci_dev_list_lock);
841
842         hci_register_sysfs(hdev);
843
844         hci_notify(hdev, HCI_DEV_REG);
845
846         return id;
847 }
848
849 /* Unregister HCI device */
850 int hci_unregister_dev(struct hci_dev *hdev)
851 {
852         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
853
854         hci_unregister_sysfs(hdev);
855
856         write_lock_bh(&hci_dev_list_lock);
857         list_del(&hdev->list);
858         write_unlock_bh(&hci_dev_list_lock);
859
860         hci_dev_do_close(hdev);
861
862         hci_notify(hdev, HCI_DEV_UNREG);
863
864         __hci_dev_put(hdev);
865         return 0;
866 }
867
868 /* Suspend HCI device */
869 int hci_suspend_dev(struct hci_dev *hdev)
870 {
871         hci_notify(hdev, HCI_DEV_SUSPEND);
872         return 0;
873 }
874
875 /* Resume HCI device */
876 int hci_resume_dev(struct hci_dev *hdev)
877 {
878         hci_notify(hdev, HCI_DEV_RESUME);
879         return 0;
880 }       
881
882 /* ---- Interface to upper protocols ---- */
883
884 /* Register/Unregister protocols.
885  * hci_task_lock is used to ensure that no tasks are running. */
886 int hci_register_proto(struct hci_proto *hp)
887 {
888         int err = 0;
889
890         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
891
892         if (hp->id >= HCI_MAX_PROTO)
893                 return -EINVAL;
894
895         write_lock_bh(&hci_task_lock);
896
897         if (!hci_proto[hp->id])
898                 hci_proto[hp->id] = hp;
899         else
900                 err = -EEXIST;
901
902         write_unlock_bh(&hci_task_lock);
903
904         return err;
905 }
906
907 int hci_unregister_proto(struct hci_proto *hp)
908 {
909         int err = 0;
910
911         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
912
913         if (hp->id >= HCI_MAX_PROTO)
914                 return -EINVAL;
915
916         write_lock_bh(&hci_task_lock);
917
918         if (hci_proto[hp->id])
919                 hci_proto[hp->id] = NULL;
920         else
921                 err = -ENOENT;
922
923         write_unlock_bh(&hci_task_lock);
924
925         return err;
926 }
927
928 static int hci_send_frame(struct sk_buff *skb)
929 {
930         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
931
932         if (!hdev) {
933                 kfree_skb(skb);
934                 return -ENODEV;
935         }
936
937         BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
938
939         if (atomic_read(&hdev->promisc)) {
940                 /* Time stamp */
941                 do_gettimeofday(&skb->stamp);
942
943                 hci_send_to_sock(hdev, skb);
944         }
945
946         /* Get rid of skb owner, prior to sending to the driver. */
947         skb_orphan(skb);
948
949         return hdev->send(skb);
950 }
951
952 /* Send HCI command */
953 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
954 {
955         int len = HCI_COMMAND_HDR_SIZE + plen;
956         struct hci_command_hdr *hdr;
957         struct sk_buff *skb;
958
959         BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
960
961         skb = bt_skb_alloc(len, GFP_ATOMIC);
962         if (!skb) {
963                 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
964                 return -ENOMEM;
965         }
966
967         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
968         hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
969         hdr->plen   = plen;
970
971         if (plen)
972                 memcpy(skb_put(skb, plen), param, plen);
973
974         BT_DBG("skb len %d", skb->len);
975
976         skb->pkt_type = HCI_COMMAND_PKT;
977         skb->dev = (void *) hdev;
978         skb_queue_tail(&hdev->cmd_q, skb);
979         hci_sched_cmd(hdev);
980
981         return 0;
982 }
983
984 /* Get data from the previously sent command */
985 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
986 {
987         struct hci_command_hdr *hdr;
988
989         if (!hdev->sent_cmd)
990                 return NULL;
991
992         hdr = (void *) hdev->sent_cmd->data;
993
994         if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
995                 return NULL;
996
997         BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
998
999         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1000 }
1001
1002 /* Send ACL data */
1003 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1004 {
1005         struct hci_acl_hdr *hdr;
1006         int len = skb->len;
1007
1008         hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1009         hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1010         hdr->dlen   = __cpu_to_le16(len);
1011
1012         skb->h.raw = (void *) hdr;
1013 }
1014
1015 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1016 {
1017         struct hci_dev *hdev = conn->hdev;
1018         struct sk_buff *list;
1019
1020         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1021
1022         skb->dev = (void *) hdev;
1023         skb->pkt_type = HCI_ACLDATA_PKT;
1024         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1025
1026         if (!(list = skb_shinfo(skb)->frag_list)) {
1027                 /* Non fragmented */
1028                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1029                 
1030                 skb_queue_tail(&conn->data_q, skb);
1031         } else {
1032                 /* Fragmented */
1033                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1034
1035                 skb_shinfo(skb)->frag_list = NULL;
1036
1037                 /* Queue all fragments atomically */
1038                 spin_lock_bh(&conn->data_q.lock);
1039
1040                 __skb_queue_tail(&conn->data_q, skb);
1041                 do {
1042                         skb = list; list = list->next;
1043                         
1044                         skb->dev = (void *) hdev;
1045                         skb->pkt_type = HCI_ACLDATA_PKT;
1046                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1047                 
1048                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1049
1050                         __skb_queue_tail(&conn->data_q, skb);
1051                 } while (list);
1052
1053                 spin_unlock_bh(&conn->data_q.lock);
1054         }
1055                 
1056         hci_sched_tx(hdev);
1057         return 0;
1058 }
1059
1060 /* Send SCO data */
1061 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1062 {
1063         struct hci_dev *hdev = conn->hdev;
1064         struct hci_sco_hdr hdr;
1065
1066         BT_DBG("%s len %d", hdev->name, skb->len);
1067
1068         if (skb->len > hdev->sco_mtu) {
1069                 kfree_skb(skb);
1070                 return -EINVAL;
1071         }
1072
1073         hdr.handle = __cpu_to_le16(conn->handle);
1074         hdr.dlen   = skb->len;
1075
1076         skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1077         memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1078
1079         skb->dev = (void *) hdev;
1080         skb->pkt_type = HCI_SCODATA_PKT;
1081         skb_queue_tail(&conn->data_q, skb);
1082         hci_sched_tx(hdev);
1083         return 0;
1084 }
1085
1086 /* ---- HCI TX task (outgoing data) ---- */
1087
1088 /* HCI Connection scheduler */
1089 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1090 {
1091         struct hci_conn_hash *h = &hdev->conn_hash;
1092         struct hci_conn  *conn = NULL;
1093         int num = 0, min = ~0;
1094         struct list_head *p;
1095
1096         /* We don't have to lock device here. Connections are always 
1097          * added and removed with TX task disabled. */
1098         list_for_each(p, &h->list) {
1099                 struct hci_conn *c;
1100                 c = list_entry(p, struct hci_conn, list);
1101
1102                 if (c->type != type || c->state != BT_CONNECTED
1103                                 || skb_queue_empty(&c->data_q))
1104                         continue;
1105                 num++;
1106
1107                 if (c->sent < min) {
1108                         min  = c->sent;
1109                         conn = c;
1110                 }
1111         }
1112
1113         if (conn) {
1114                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1115                 int q = cnt / num;
1116                 *quote = q ? q : 1;
1117         } else
1118                 *quote = 0;
1119
1120         BT_DBG("conn %p quote %d", conn, *quote);
1121         return conn;
1122 }
1123
1124 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1125 {
1126         struct hci_conn_hash *h = &hdev->conn_hash;
1127         struct list_head *p;
1128         struct hci_conn  *c;
1129
1130         BT_ERR("%s ACL tx timeout", hdev->name);
1131
1132         /* Kill stalled connections */
1133         list_for_each(p, &h->list) {
1134                 c = list_entry(p, struct hci_conn, list);
1135                 if (c->type == ACL_LINK && c->sent) {
1136                         BT_ERR("%s killing stalled ACL connection %s",
1137                                 hdev->name, batostr(&c->dst));
1138                         hci_acl_disconn(c, 0x13);
1139                 }
1140         }
1141 }
1142
1143 static inline void hci_sched_acl(struct hci_dev *hdev)
1144 {
1145         struct hci_conn *conn;
1146         struct sk_buff *skb;
1147         int quote;
1148
1149         BT_DBG("%s", hdev->name);
1150
1151         /* ACL tx timeout must be longer than maximum
1152          * link supervision timeout (40.9 seconds) */
1153         if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1154                 hci_acl_tx_to(hdev);
1155
1156         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1157                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1158                         BT_DBG("skb %p len %d", skb, skb->len);
1159                         hci_send_frame(skb);
1160                         hdev->acl_last_tx = jiffies;
1161
1162                         hdev->acl_cnt--;
1163                         conn->sent++;
1164                 }
1165         }
1166 }
1167
1168 /* Schedule SCO */
1169 static inline void hci_sched_sco(struct hci_dev *hdev)
1170 {
1171         struct hci_conn *conn;
1172         struct sk_buff *skb;
1173         int quote;
1174
1175         BT_DBG("%s", hdev->name);
1176
1177         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1178                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1179                         BT_DBG("skb %p len %d", skb, skb->len);
1180                         hci_send_frame(skb);
1181
1182                         conn->sent++;
1183                         if (conn->sent == ~0)
1184                                 conn->sent = 0;
1185                 }
1186         }
1187 }
1188
1189 static void hci_tx_task(unsigned long arg)
1190 {
1191         struct hci_dev *hdev = (struct hci_dev *) arg;
1192         struct sk_buff *skb;
1193
1194         read_lock(&hci_task_lock);
1195
1196         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1197
1198         /* Schedule queues and send stuff to HCI driver */
1199
1200         hci_sched_acl(hdev);
1201
1202         hci_sched_sco(hdev);
1203
1204         /* Send next queued raw (unknown type) packet */
1205         while ((skb = skb_dequeue(&hdev->raw_q)))
1206                 hci_send_frame(skb);
1207
1208         read_unlock(&hci_task_lock);
1209 }
1210
1211 /* ----- HCI RX task (incoming data proccessing) ----- */
1212
1213 /* ACL data packet */
1214 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1215 {
1216         struct hci_acl_hdr *hdr = (void *) skb->data;
1217         struct hci_conn *conn;
1218         __u16 handle, flags;
1219
1220         skb_pull(skb, HCI_ACL_HDR_SIZE);
1221
1222         handle = __le16_to_cpu(hdr->handle);
1223         flags  = hci_flags(handle);
1224         handle = hci_handle(handle);
1225
1226         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1227
1228         hdev->stat.acl_rx++;
1229
1230         hci_dev_lock(hdev);
1231         conn = hci_conn_hash_lookup_handle(hdev, handle);
1232         hci_dev_unlock(hdev);
1233         
1234         if (conn) {
1235                 register struct hci_proto *hp;
1236
1237                 /* Send to upper protocol */
1238                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1239                         hp->recv_acldata(conn, skb, flags);
1240                         return;
1241                 }
1242         } else {
1243                 BT_ERR("%s ACL packet for unknown connection handle %d", 
1244                         hdev->name, handle);
1245         }
1246
1247         kfree_skb(skb);
1248 }
1249
1250 /* SCO data packet */
1251 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1252 {
1253         struct hci_sco_hdr *hdr = (void *) skb->data;
1254         struct hci_conn *conn;
1255         __u16 handle;
1256
1257         skb_pull(skb, HCI_SCO_HDR_SIZE);
1258
1259         handle = __le16_to_cpu(hdr->handle);
1260
1261         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1262
1263         hdev->stat.sco_rx++;
1264
1265         hci_dev_lock(hdev);
1266         conn = hci_conn_hash_lookup_handle(hdev, handle);
1267         hci_dev_unlock(hdev);
1268         
1269         if (conn) {
1270                 register struct hci_proto *hp;
1271
1272                 /* Send to upper protocol */
1273                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1274                         hp->recv_scodata(conn, skb);
1275                         return;
1276                 }
1277         } else {
1278                 BT_ERR("%s SCO packet for unknown connection handle %d", 
1279                         hdev->name, handle);
1280         }
1281
1282         kfree_skb(skb);
1283 }
1284
1285 void hci_rx_task(unsigned long arg)
1286 {
1287         struct hci_dev *hdev = (struct hci_dev *) arg;
1288         struct sk_buff *skb;
1289
1290         BT_DBG("%s", hdev->name);
1291
1292         read_lock(&hci_task_lock);
1293
1294         while ((skb = skb_dequeue(&hdev->rx_q))) {
1295                 if (atomic_read(&hdev->promisc)) {
1296                         /* Send copy to the sockets */
1297                         hci_send_to_sock(hdev, skb);
1298                 }
1299
1300                 if (test_bit(HCI_RAW, &hdev->flags)) {
1301                         kfree_skb(skb);
1302                         continue;
1303                 }
1304
1305                 if (test_bit(HCI_INIT, &hdev->flags)) {
1306                         /* Don't process data packets in this states. */
1307                         switch (skb->pkt_type) {
1308                         case HCI_ACLDATA_PKT:
1309                         case HCI_SCODATA_PKT:
1310                                 kfree_skb(skb);
1311                                 continue;
1312                         };
1313                 }
1314
1315                 /* Process frame */
1316                 switch (skb->pkt_type) {
1317                 case HCI_EVENT_PKT:
1318                         hci_event_packet(hdev, skb);
1319                         break;
1320
1321                 case HCI_ACLDATA_PKT:
1322                         BT_DBG("%s ACL data packet", hdev->name);
1323                         hci_acldata_packet(hdev, skb);
1324                         break;
1325
1326                 case HCI_SCODATA_PKT:
1327                         BT_DBG("%s SCO data packet", hdev->name);
1328                         hci_scodata_packet(hdev, skb);
1329                         break;
1330
1331                 default:
1332                         kfree_skb(skb);
1333                         break;
1334                 }
1335         }
1336
1337         read_unlock(&hci_task_lock);
1338 }
1339
1340 static void hci_cmd_task(unsigned long arg)
1341 {
1342         struct hci_dev *hdev = (struct hci_dev *) arg;
1343         struct sk_buff *skb;
1344
1345         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1346
1347         if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1348                 BT_ERR("%s command tx timeout", hdev->name);
1349                 atomic_set(&hdev->cmd_cnt, 1);
1350         }
1351         
1352         /* Send queued commands */
1353         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1354                 if (hdev->sent_cmd)
1355                         kfree_skb(hdev->sent_cmd);
1356
1357                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1358                         atomic_dec(&hdev->cmd_cnt);
1359                         hci_send_frame(skb);
1360                         hdev->cmd_last_tx = jiffies;
1361                 } else {
1362                         skb_queue_head(&hdev->cmd_q, skb);
1363                         hci_sched_cmd(hdev);
1364                 }
1365         }
1366 }