2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/list.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 #include <net/bluetooth/l2cap.h>
55 #ifndef CONFIG_BT_L2CAP_DEBUG
62 static struct proto_ops l2cap_sock_ops;
64 struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static int l2cap_conn_del(struct hci_conn *conn, int err);
70 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
71 static void l2cap_chan_del(struct sock *sk, int err);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data);
78 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
85 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 __l2cap_sock_close(sk, ETIMEDOUT);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 static void l2cap_sock_init_timer(struct sock *sk)
109 init_timer(&sk->sk_timer);
110 sk->sk_timer.function = l2cap_sock_timeout;
111 sk->sk_timer.data = (unsigned long)sk;
114 /* ---- L2CAP connections ---- */
115 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
117 struct l2cap_conn *conn;
119 if ((conn = hcon->l2cap_data))
125 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
127 memset(conn, 0, sizeof(struct l2cap_conn));
129 hcon->l2cap_data = conn;
132 conn->mtu = hcon->hdev->acl_mtu;
133 conn->src = &hcon->hdev->bdaddr;
134 conn->dst = &hcon->dst;
136 spin_lock_init(&conn->lock);
137 conn->chan_list.lock = RW_LOCK_UNLOCKED;
139 BT_DBG("hcon %p conn %p", hcon, conn);
143 static int l2cap_conn_del(struct hci_conn *hcon, int err)
145 struct l2cap_conn *conn;
148 if (!(conn = hcon->l2cap_data))
151 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
154 kfree_skb(conn->rx_skb);
157 while ((sk = conn->chan_list.head)) {
159 l2cap_chan_del(sk, err);
164 hcon->l2cap_data = NULL;
169 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
171 struct l2cap_chan_list *l = &conn->chan_list;
172 write_lock(&l->lock);
173 __l2cap_chan_add(conn, sk, parent);
174 write_unlock(&l->lock);
177 /* ---- Socket interface ---- */
178 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
181 struct hlist_node *node;
182 sk_for_each(sk, node, &l2cap_sk_list.head)
183 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
190 /* Find socket with psm and source bdaddr.
191 * Returns closest match.
193 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
195 struct sock *sk = NULL, *sk1 = NULL;
196 struct hlist_node *node;
198 sk_for_each(sk, node, &l2cap_sk_list.head) {
199 if (state && sk->sk_state != state)
202 if (l2cap_pi(sk)->psm == psm) {
204 if (!bacmp(&bt_sk(sk)->src, src))
208 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
212 return node ? sk : sk1;
215 /* Find socket with given address (psm, src).
216 * Returns locked socket */
217 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
220 read_lock(&l2cap_sk_list.lock);
221 s = __l2cap_get_sock_by_psm(state, psm, src);
222 if (s) bh_lock_sock(s);
223 read_unlock(&l2cap_sk_list.lock);
227 static void l2cap_sock_destruct(struct sock *sk)
231 skb_queue_purge(&sk->sk_receive_queue);
232 skb_queue_purge(&sk->sk_write_queue);
235 kfree(sk->sk_protinfo);
238 static void l2cap_sock_cleanup_listen(struct sock *parent)
242 BT_DBG("parent %p", parent);
244 /* Close not yet accepted channels */
245 while ((sk = bt_accept_dequeue(parent, NULL)))
246 l2cap_sock_close(sk);
248 parent->sk_state = BT_CLOSED;
249 parent->sk_zapped = 1;
252 /* Kill socket (only if zapped and orphan)
253 * Must be called on unlocked socket.
255 static void l2cap_sock_kill(struct sock *sk)
257 if (!sk->sk_zapped || sk->sk_socket)
260 BT_DBG("sk %p state %d", sk, sk->sk_state);
262 /* Kill poor orphan */
263 bt_sock_unlink(&l2cap_sk_list, sk);
264 sock_set_flag(sk, SOCK_DEAD);
268 static void __l2cap_sock_close(struct sock *sk, int reason)
270 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
272 switch (sk->sk_state) {
274 l2cap_sock_cleanup_listen(sk);
280 if (sk->sk_type == SOCK_SEQPACKET) {
281 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
282 struct l2cap_disconn_req req;
284 sk->sk_state = BT_DISCONN;
285 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
287 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
288 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
289 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
291 l2cap_chan_del(sk, reason);
297 l2cap_chan_del(sk, reason);
306 /* Must be called on unlocked socket. */
307 static void l2cap_sock_close(struct sock *sk)
309 l2cap_sock_clear_timer(sk);
311 __l2cap_sock_close(sk, ECONNRESET);
316 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
318 struct l2cap_pinfo *pi = l2cap_pi(sk);
323 sk->sk_type = parent->sk_type;
324 pi->imtu = l2cap_pi(parent)->imtu;
325 pi->omtu = l2cap_pi(parent)->omtu;
326 pi->link_mode = l2cap_pi(parent)->link_mode;
328 pi->imtu = L2CAP_DEFAULT_MTU;
333 /* Default config options */
334 pi->conf_mtu = L2CAP_DEFAULT_MTU;
335 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
338 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
342 sk = bt_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
346 sk_set_owner(sk, THIS_MODULE);
348 sk->sk_destruct = l2cap_sock_destruct;
349 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
351 sk->sk_protocol = proto;
352 sk->sk_state = BT_OPEN;
354 l2cap_sock_init_timer(sk);
356 bt_sock_link(&l2cap_sk_list, sk);
360 static int l2cap_sock_create(struct socket *sock, int protocol)
364 BT_DBG("sock %p", sock);
366 sock->state = SS_UNCONNECTED;
368 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
369 return -ESOCKTNOSUPPORT;
371 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
374 sock->ops = &l2cap_sock_ops;
376 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
380 l2cap_sock_init(sk, NULL);
384 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
386 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
387 struct sock *sk = sock->sk;
390 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
392 if (!addr || addr->sa_family != AF_BLUETOOTH)
397 if (sk->sk_state != BT_OPEN) {
402 write_lock_bh(&l2cap_sk_list.lock);
404 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
407 /* Save source address */
408 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
409 l2cap_pi(sk)->psm = la->l2_psm;
410 l2cap_pi(sk)->sport = la->l2_psm;
411 sk->sk_state = BT_BOUND;
414 write_unlock_bh(&l2cap_sk_list.lock);
421 static int l2cap_do_connect(struct sock *sk)
423 bdaddr_t *src = &bt_sk(sk)->src;
424 bdaddr_t *dst = &bt_sk(sk)->dst;
425 struct l2cap_conn *conn;
426 struct hci_conn *hcon;
427 struct hci_dev *hdev;
430 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
432 if (!(hdev = hci_get_route(dst, src)))
433 return -EHOSTUNREACH;
435 hci_dev_lock_bh(hdev);
439 hcon = hci_connect(hdev, ACL_LINK, dst);
443 conn = l2cap_conn_add(hcon, 0);
451 /* Update source addr of the socket */
452 bacpy(src, conn->src);
454 l2cap_chan_add(conn, sk, NULL);
456 sk->sk_state = BT_CONNECT;
457 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
459 if (hcon->state == BT_CONNECTED) {
460 if (sk->sk_type == SOCK_SEQPACKET) {
461 struct l2cap_conn_req req;
462 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
463 req.psm = l2cap_pi(sk)->psm;
464 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
466 l2cap_sock_clear_timer(sk);
467 sk->sk_state = BT_CONNECTED;
472 hci_dev_unlock_bh(hdev);
477 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
479 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
480 struct sock *sk = sock->sk;
487 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
492 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
497 switch(sk->sk_state) {
501 /* Already connecting */
505 /* Already connected */
518 /* Set destination address and psm */
519 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
520 l2cap_pi(sk)->psm = la->l2_psm;
522 if ((err = l2cap_do_connect(sk)))
526 err = bt_sock_wait_state(sk, BT_CONNECTED,
527 sock_sndtimeo(sk, flags & O_NONBLOCK));
533 static int l2cap_sock_listen(struct socket *sock, int backlog)
535 struct sock *sk = sock->sk;
538 BT_DBG("sk %p backlog %d", sk, backlog);
542 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
547 if (!l2cap_pi(sk)->psm) {
548 bdaddr_t *src = &bt_sk(sk)->src;
553 write_lock_bh(&l2cap_sk_list.lock);
555 for (psm = 0x1001; psm < 0x1100; psm += 2)
556 if (!__l2cap_get_sock_by_addr(psm, src)) {
557 l2cap_pi(sk)->psm = htobs(psm);
558 l2cap_pi(sk)->sport = htobs(psm);
563 write_unlock_bh(&l2cap_sk_list.lock);
569 sk->sk_max_ack_backlog = backlog;
570 sk->sk_ack_backlog = 0;
571 sk->sk_state = BT_LISTEN;
578 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
580 DECLARE_WAITQUEUE(wait, current);
581 struct sock *sk = sock->sk, *nsk;
587 if (sk->sk_state != BT_LISTEN) {
592 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
594 BT_DBG("sk %p timeo %ld", sk, timeo);
596 /* Wait for an incoming connection. (wake-one). */
597 add_wait_queue_exclusive(sk->sk_sleep, &wait);
598 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
599 set_current_state(TASK_INTERRUPTIBLE);
606 timeo = schedule_timeout(timeo);
609 if (sk->sk_state != BT_LISTEN) {
614 if (signal_pending(current)) {
615 err = sock_intr_errno(timeo);
619 set_current_state(TASK_RUNNING);
620 remove_wait_queue(sk->sk_sleep, &wait);
625 newsock->state = SS_CONNECTED;
627 BT_DBG("new socket %p", nsk);
634 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
636 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
637 struct sock *sk = sock->sk;
639 BT_DBG("sock %p, sk %p", sock, sk);
641 addr->sa_family = AF_BLUETOOTH;
642 *len = sizeof(struct sockaddr_l2);
645 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
647 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
649 la->l2_psm = l2cap_pi(sk)->psm;
653 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
655 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
656 struct sk_buff *skb, **frag;
657 int err, hlen, count, sent=0;
658 struct l2cap_hdr *lh;
660 BT_DBG("sk %p len %d", sk, len);
662 /* First fragment (with L2CAP header) */
663 if (sk->sk_type == SOCK_DGRAM)
664 hlen = L2CAP_HDR_SIZE + 2;
666 hlen = L2CAP_HDR_SIZE;
668 count = min_t(unsigned int, (conn->mtu - hlen), len);
670 skb = bt_skb_send_alloc(sk, hlen + count,
671 msg->msg_flags & MSG_DONTWAIT, &err);
675 /* Create L2CAP header */
676 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
677 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
678 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
680 if (sk->sk_type == SOCK_DGRAM)
681 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
683 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
691 /* Continuation fragments (no L2CAP header) */
692 frag = &skb_shinfo(skb)->frag_list;
694 count = min_t(unsigned int, conn->mtu, len);
696 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
700 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
708 frag = &(*frag)->next;
711 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
721 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
723 struct sock *sk = sock->sk;
726 BT_DBG("sock %p, sk %p", sock, sk);
729 return sock_error(sk);
731 if (msg->msg_flags & MSG_OOB)
734 /* Check outgoing MTU */
735 if (len > l2cap_pi(sk)->omtu)
740 if (sk->sk_state == BT_CONNECTED)
741 err = l2cap_do_send(sk, msg, len);
749 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
751 struct sock *sk = sock->sk;
752 struct l2cap_options opts;
762 len = min_t(unsigned int, sizeof(opts), optlen);
763 if (copy_from_user((char *)&opts, optval, len)) {
767 l2cap_pi(sk)->imtu = opts.imtu;
768 l2cap_pi(sk)->omtu = opts.omtu;
772 if (get_user(opt, (u32 __user *)optval)) {
777 l2cap_pi(sk)->link_mode = opt;
789 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
791 struct sock *sk = sock->sk;
792 struct l2cap_options opts;
793 struct l2cap_conninfo cinfo;
796 if (get_user(len, optlen))
803 opts.imtu = l2cap_pi(sk)->imtu;
804 opts.omtu = l2cap_pi(sk)->omtu;
805 opts.flush_to = l2cap_pi(sk)->flush_to;
807 len = min_t(unsigned int, len, sizeof(opts));
808 if (copy_to_user(optval, (char *)&opts, len))
814 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *)optval))
819 if (sk->sk_state != BT_CONNECTED) {
824 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
826 len = min_t(unsigned int, len, sizeof(cinfo));
827 if (copy_to_user(optval, (char *)&cinfo, len))
841 static int l2cap_sock_shutdown(struct socket *sock, int how)
843 struct sock *sk = sock->sk;
846 BT_DBG("sock %p, sk %p", sock, sk);
852 if (!sk->sk_shutdown) {
853 sk->sk_shutdown = SHUTDOWN_MASK;
854 l2cap_sock_clear_timer(sk);
855 __l2cap_sock_close(sk, 0);
857 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
858 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
864 static int l2cap_sock_release(struct socket *sock)
866 struct sock *sk = sock->sk;
869 BT_DBG("sock %p, sk %p", sock, sk);
874 err = l2cap_sock_shutdown(sock, 2);
881 /* ---- L2CAP channels ---- */
882 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
885 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
886 if (l2cap_pi(s)->dcid == cid)
892 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
895 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
896 if (l2cap_pi(s)->scid == cid)
902 /* Find channel with given SCID.
903 * Returns locked socket */
904 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
908 s = __l2cap_get_chan_by_scid(l, cid);
909 if (s) bh_lock_sock(s);
910 read_unlock(&l->lock);
914 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
918 for (; cid < 0xffff; cid++) {
919 if(!__l2cap_get_chan_by_scid(l, cid))
926 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
931 l2cap_pi(l->head)->prev_c = sk;
933 l2cap_pi(sk)->next_c = l->head;
934 l2cap_pi(sk)->prev_c = NULL;
938 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
940 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
942 write_lock(&l->lock);
947 l2cap_pi(next)->prev_c = prev;
949 l2cap_pi(prev)->next_c = next;
950 write_unlock(&l->lock);
955 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
957 struct l2cap_chan_list *l = &conn->chan_list;
959 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
961 l2cap_pi(sk)->conn = conn;
963 if (sk->sk_type == SOCK_SEQPACKET) {
964 /* Alloc CID for connection-oriented socket */
965 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
966 } else if (sk->sk_type == SOCK_DGRAM) {
967 /* Connectionless socket */
968 l2cap_pi(sk)->scid = 0x0002;
969 l2cap_pi(sk)->dcid = 0x0002;
970 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
972 /* Raw socket can send/recv signalling messages only */
973 l2cap_pi(sk)->scid = 0x0001;
974 l2cap_pi(sk)->dcid = 0x0001;
975 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
978 __l2cap_chan_link(l, sk);
981 bt_accept_enqueue(parent, sk);
985 * Must be called on the locked socket. */
986 static void l2cap_chan_del(struct sock *sk, int err)
988 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
989 struct sock *parent = bt_sk(sk)->parent;
991 l2cap_sock_clear_timer(sk);
993 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
996 /* Unlink from channel list */
997 l2cap_chan_unlink(&conn->chan_list, sk);
998 l2cap_pi(sk)->conn = NULL;
999 hci_conn_put(conn->hcon);
1002 sk->sk_state = BT_CLOSED;
1009 parent->sk_data_ready(parent, 0);
1011 sk->sk_state_change(sk);
1014 static void l2cap_conn_ready(struct l2cap_conn *conn)
1016 struct l2cap_chan_list *l = &conn->chan_list;
1019 BT_DBG("conn %p", conn);
1021 read_lock(&l->lock);
1023 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1026 if (sk->sk_type != SOCK_SEQPACKET) {
1027 l2cap_sock_clear_timer(sk);
1028 sk->sk_state = BT_CONNECTED;
1029 sk->sk_state_change(sk);
1030 } else if (sk->sk_state == BT_CONNECT) {
1031 struct l2cap_conn_req req;
1032 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1033 req.psm = l2cap_pi(sk)->psm;
1034 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
1040 read_unlock(&l->lock);
1043 static void l2cap_chan_ready(struct sock *sk)
1045 struct sock *parent = bt_sk(sk)->parent;
1047 BT_DBG("sk %p, parent %p", sk, parent);
1049 l2cap_pi(sk)->conf_state = 0;
1050 l2cap_sock_clear_timer(sk);
1053 /* Outgoing channel.
1054 * Wake up socket sleeping on connect.
1056 sk->sk_state = BT_CONNECTED;
1057 sk->sk_state_change(sk);
1059 /* Incoming channel.
1060 * Wake up socket sleeping on accept.
1062 parent->sk_data_ready(parent, 0);
1066 /* Copy frame to all raw sockets on that connection */
1067 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1069 struct l2cap_chan_list *l = &conn->chan_list;
1070 struct sk_buff *nskb;
1073 BT_DBG("conn %p", conn);
1075 read_lock(&l->lock);
1076 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1077 if (sk->sk_type != SOCK_RAW)
1080 /* Don't send frame to the socket it came from */
1084 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1087 if (sock_queue_rcv_skb(sk, nskb))
1090 read_unlock(&l->lock);
1093 /* ---- L2CAP signalling commands ---- */
1094 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
1098 /* Get next available identificator.
1099 * 1 - 199 are used by kernel.
1100 * 200 - 254 are used by utilities like l2ping, etc
1103 spin_lock(&conn->lock);
1105 if (++conn->tx_ident > 199)
1108 id = conn->tx_ident;
1110 spin_unlock(&conn->lock);
1115 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1116 u8 code, u8 ident, u16 dlen, void *data)
1118 struct sk_buff *skb, **frag;
1119 struct l2cap_cmd_hdr *cmd;
1120 struct l2cap_hdr *lh;
1123 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1125 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1126 count = min_t(unsigned int, conn->mtu, len);
1128 skb = bt_skb_alloc(count, GFP_ATOMIC);
1132 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1133 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1134 lh->cid = __cpu_to_le16(0x0001);
1136 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1139 cmd->len = __cpu_to_le16(dlen);
1142 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1143 memcpy(skb_put(skb, count), data, count);
1149 /* Continuation fragments (no L2CAP header) */
1150 frag = &skb_shinfo(skb)->frag_list;
1152 count = min_t(unsigned int, conn->mtu, len);
1154 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1158 memcpy(skb_put(*frag, count), data, count);
1163 frag = &(*frag)->next;
1173 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data)
1175 u8 ident = l2cap_get_ident(conn);
1176 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1178 BT_DBG("code 0x%2.2x", code);
1182 return hci_send_acl(conn->hcon, skb, 0);
1185 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
1187 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1189 BT_DBG("code 0x%2.2x", code);
1193 return hci_send_acl(conn->hcon, skb, 0);
1196 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1198 struct l2cap_conf_opt *opt = *ptr;
1201 len = L2CAP_CONF_OPT_SIZE + opt->len;
1209 *val = *((u8 *) opt->val);
1213 *val = __le16_to_cpu(*((u16 *)opt->val));
1217 *val = __le32_to_cpu(*((u32 *)opt->val));
1221 *val = (unsigned long) opt->val;
1225 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1229 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1231 int type, hint, olen;
1235 BT_DBG("sk %p len %d", sk, len);
1237 while (len >= L2CAP_CONF_OPT_SIZE) {
1238 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1244 case L2CAP_CONF_MTU:
1245 l2cap_pi(sk)->conf_mtu = val;
1248 case L2CAP_CONF_FLUSH_TO:
1249 l2cap_pi(sk)->flush_to = val;
1252 case L2CAP_CONF_QOS:
1259 /* FIXME: Reject unknown option */
1265 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1267 struct l2cap_conf_opt *opt = *ptr;
1269 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1276 *((u8 *) opt->val) = val;
1280 *((u16 *) opt->val) = __cpu_to_le16(val);
1284 *((u32 *) opt->val) = __cpu_to_le32(val);
1288 memcpy(opt->val, (void *) val, len);
1292 *ptr += L2CAP_CONF_OPT_SIZE + len;
1295 static int l2cap_build_conf_req(struct sock *sk, void *data)
1297 struct l2cap_pinfo *pi = l2cap_pi(sk);
1298 struct l2cap_conf_req *req = data;
1299 void *ptr = req->data;
1301 BT_DBG("sk %p", sk);
1303 if (pi->imtu != L2CAP_DEFAULT_MTU)
1304 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1306 /* FIXME. Need actual value of the flush timeout */
1307 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1308 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1310 req->dcid = __cpu_to_le16(pi->dcid);
1311 req->flags = __cpu_to_le16(0);
1316 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1318 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 /* Configure output options and let the other side know
1322 * which ones we don't like. */
1323 if (pi->conf_mtu < pi->omtu) {
1324 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1325 result = L2CAP_CONF_UNACCEPT;
1327 pi->omtu = pi->conf_mtu;
1330 BT_DBG("sk %p result %d", sk, result);
1334 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1336 struct l2cap_conf_rsp *rsp = data;
1337 void *ptr = rsp->data;
1340 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1343 *result = l2cap_conf_output(sk, &ptr);
1347 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1348 rsp->result = __cpu_to_le16(result ? *result : 0);
1349 rsp->flags = __cpu_to_le16(flags);
1354 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1356 struct l2cap_chan_list *list = &conn->chan_list;
1357 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1358 struct l2cap_conn_rsp rsp;
1359 struct sock *sk, *parent;
1360 int result = 0, status = 0;
1362 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1365 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1367 /* Check if we have socket listening on psm */
1368 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1370 result = L2CAP_CR_BAD_PSM;
1374 result = L2CAP_CR_NO_MEM;
1376 /* Check for backlog size */
1377 if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
1378 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1382 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1386 write_lock(&list->lock);
1388 /* Check if we already have channel with that dcid */
1389 if (__l2cap_get_chan_by_dcid(list, scid)) {
1390 write_unlock(&list->lock);
1392 l2cap_sock_kill(sk);
1396 hci_conn_hold(conn->hcon);
1398 l2cap_sock_init(sk, parent);
1399 bacpy(&bt_sk(sk)->src, conn->src);
1400 bacpy(&bt_sk(sk)->dst, conn->dst);
1401 l2cap_pi(sk)->psm = psm;
1402 l2cap_pi(sk)->dcid = scid;
1404 __l2cap_chan_add(conn, sk, parent);
1405 dcid = l2cap_pi(sk)->scid;
1407 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1409 /* Service level security */
1410 result = L2CAP_CR_PEND;
1411 status = L2CAP_CS_AUTHEN_PEND;
1412 sk->sk_state = BT_CONNECT2;
1413 l2cap_pi(sk)->ident = cmd->ident;
1415 if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
1416 if (!hci_conn_encrypt(conn->hcon))
1418 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1419 if (!hci_conn_auth(conn->hcon))
1423 sk->sk_state = BT_CONFIG;
1424 result = status = 0;
1427 write_unlock(&list->lock);
1430 bh_unlock_sock(parent);
1433 rsp.scid = __cpu_to_le16(scid);
1434 rsp.dcid = __cpu_to_le16(dcid);
1435 rsp.result = __cpu_to_le16(result);
1436 rsp.status = __cpu_to_le16(status);
1437 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1441 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1443 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1444 u16 scid, dcid, result, status;
1448 scid = __le16_to_cpu(rsp->scid);
1449 dcid = __le16_to_cpu(rsp->dcid);
1450 result = __le16_to_cpu(rsp->result);
1451 status = __le16_to_cpu(rsp->status);
1453 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1455 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1459 case L2CAP_CR_SUCCESS:
1460 sk->sk_state = BT_CONFIG;
1461 l2cap_pi(sk)->dcid = dcid;
1462 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1464 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1471 l2cap_chan_del(sk, ECONNREFUSED);
1479 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1481 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1487 dcid = __le16_to_cpu(req->dcid);
1488 flags = __le16_to_cpu(req->flags);
1490 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1492 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1495 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1497 if (flags & 0x0001) {
1498 /* Incomplete config. Send empty response. */
1499 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1503 /* Complete config. */
1504 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1509 /* Output config done */
1510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1512 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1513 sk->sk_state = BT_CONNECTED;
1514 l2cap_chan_ready(sk);
1515 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1517 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1525 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1527 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1528 u16 scid, flags, result;
1532 scid = __le16_to_cpu(rsp->scid);
1533 flags = __le16_to_cpu(rsp->flags);
1534 result = __le16_to_cpu(rsp->result);
1536 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1538 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1542 case L2CAP_CONF_SUCCESS:
1545 case L2CAP_CONF_UNACCEPT:
1546 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1548 /* It does not make sense to adjust L2CAP parameters
1549 * that are currently defined in the spec. We simply
1550 * resend config request that we sent earlier. It is
1551 * stupid, but it helps qualification testing which
1552 * expects at least some response from us. */
1553 l2cap_send_req(conn, L2CAP_CONF_REQ,
1554 l2cap_build_conf_req(sk, req), req);
1559 sk->sk_state = BT_DISCONN;
1560 sk->sk_err = ECONNRESET;
1561 l2cap_sock_set_timer(sk, HZ * 5);
1563 struct l2cap_disconn_req req;
1564 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1565 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1566 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
1574 /* Input config done */
1575 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1577 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1578 sk->sk_state = BT_CONNECTED;
1579 l2cap_chan_ready(sk);
1587 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1589 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1590 struct l2cap_disconn_rsp rsp;
1594 scid = __le16_to_cpu(req->scid);
1595 dcid = __le16_to_cpu(req->dcid);
1597 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1599 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1602 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1603 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1604 l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1606 sk->sk_shutdown = SHUTDOWN_MASK;
1608 l2cap_chan_del(sk, ECONNRESET);
1611 l2cap_sock_kill(sk);
1615 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1617 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1621 scid = __le16_to_cpu(rsp->scid);
1622 dcid = __le16_to_cpu(rsp->dcid);
1624 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1626 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1628 l2cap_chan_del(sk, 0);
1631 l2cap_sock_kill(sk);
1635 static inline int l2cap_info_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1637 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1638 struct l2cap_info_rsp rsp;
1641 type = __le16_to_cpu(req->type);
1643 BT_DBG("type 0x%4.4x", type);
1645 rsp.type = __cpu_to_le16(type);
1646 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1647 l2cap_send_rsp(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1651 static inline int l2cap_info_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1653 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1656 type = __le16_to_cpu(rsp->type);
1657 result = __le16_to_cpu(rsp->result);
1659 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1664 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1666 u8 *data = skb->data;
1668 struct l2cap_cmd_hdr cmd;
1671 l2cap_raw_recv(conn, skb);
1673 while (len >= L2CAP_CMD_HDR_SIZE) {
1674 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1675 data += L2CAP_CMD_HDR_SIZE;
1676 len -= L2CAP_CMD_HDR_SIZE;
1678 cmd.len = __le16_to_cpu(cmd.len);
1680 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1682 if (cmd.len > len || !cmd.ident) {
1683 BT_DBG("corrupted command");
1688 case L2CAP_COMMAND_REJ:
1689 /* FIXME: We should process this */
1692 case L2CAP_CONN_REQ:
1693 err = l2cap_connect_req(conn, &cmd, data);
1696 case L2CAP_CONN_RSP:
1697 err = l2cap_connect_rsp(conn, &cmd, data);
1700 case L2CAP_CONF_REQ:
1701 err = l2cap_config_req(conn, &cmd, data);
1704 case L2CAP_CONF_RSP:
1705 err = l2cap_config_rsp(conn, &cmd, data);
1708 case L2CAP_DISCONN_REQ:
1709 err = l2cap_disconnect_req(conn, &cmd, data);
1712 case L2CAP_DISCONN_RSP:
1713 err = l2cap_disconnect_rsp(conn, &cmd, data);
1716 case L2CAP_ECHO_REQ:
1717 l2cap_send_rsp(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1720 case L2CAP_ECHO_RSP:
1723 case L2CAP_INFO_REQ:
1724 err = l2cap_info_req(conn, &cmd, data);
1727 case L2CAP_INFO_RSP:
1728 err = l2cap_info_rsp(conn, &cmd, data);
1732 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1738 struct l2cap_cmd_rej rej;
1739 BT_DBG("error %d", err);
1741 /* FIXME: Map err to a valid reason */
1742 rej.reason = __cpu_to_le16(0);
1743 l2cap_send_rsp(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1753 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1757 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1759 BT_DBG("unknown cid 0x%4.4x", cid);
1763 BT_DBG("sk %p, len %d", sk, skb->len);
1765 if (sk->sk_state != BT_CONNECTED)
1768 if (l2cap_pi(sk)->imtu < skb->len)
1771 /* If socket recv buffers overflows we drop data here
1772 * which is *bad* because L2CAP has to be reliable.
1773 * But we don't have any other choice. L2CAP doesn't
1774 * provide flow control mechanism. */
1776 if (!sock_queue_rcv_skb(sk, skb))
1783 if (sk) bh_unlock_sock(sk);
1787 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1791 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1795 BT_DBG("sk %p, len %d", sk, skb->len);
1797 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1800 if (l2cap_pi(sk)->imtu < skb->len)
1803 if (!sock_queue_rcv_skb(sk, skb))
1810 if (sk) bh_unlock_sock(sk);
1814 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1816 struct l2cap_hdr *lh = (void *) skb->data;
1819 skb_pull(skb, L2CAP_HDR_SIZE);
1820 cid = __le16_to_cpu(lh->cid);
1821 len = __le16_to_cpu(lh->len);
1823 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1827 l2cap_sig_channel(conn, skb);
1831 psm = get_unaligned((u16 *) skb->data);
1833 l2cap_conless_channel(conn, psm, skb);
1837 l2cap_data_channel(conn, cid, skb);
1842 /* ---- L2CAP interface with lower layer (HCI) ---- */
1844 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1846 int exact = 0, lm1 = 0, lm2 = 0;
1847 register struct sock *sk;
1848 struct hlist_node *node;
1850 if (type != ACL_LINK)
1853 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1855 /* Find listening sockets and check their link_mode */
1856 read_lock(&l2cap_sk_list.lock);
1857 sk_for_each(sk, node, &l2cap_sk_list.head) {
1858 if (sk->sk_state != BT_LISTEN)
1861 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1862 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1864 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1865 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1867 read_unlock(&l2cap_sk_list.lock);
1869 return exact ? lm1 : lm2;
1872 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1874 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1876 if (hcon->type != ACL_LINK)
1880 struct l2cap_conn *conn;
1882 conn = l2cap_conn_add(hcon, status);
1884 l2cap_conn_ready(conn);
1886 l2cap_conn_del(hcon, bt_err(status));
1891 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1893 BT_DBG("hcon %p reason %d", hcon, reason);
1895 if (hcon->type != ACL_LINK)
1898 l2cap_conn_del(hcon, bt_err(reason));
1902 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1904 struct l2cap_chan_list *l;
1905 struct l2cap_conn *conn;
1906 struct l2cap_conn_rsp rsp;
1910 if (!(conn = hcon->l2cap_data))
1912 l = &conn->chan_list;
1914 BT_DBG("conn %p", conn);
1916 read_lock(&l->lock);
1918 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1921 if (sk->sk_state != BT_CONNECT2 ||
1922 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) {
1928 sk->sk_state = BT_CONFIG;
1931 sk->sk_state = BT_DISCONN;
1932 l2cap_sock_set_timer(sk, HZ/10);
1933 result = L2CAP_CR_SEC_BLOCK;
1936 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1937 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1938 rsp.result = __cpu_to_le16(result);
1939 rsp.status = __cpu_to_le16(0);
1940 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1945 read_unlock(&l->lock);
1949 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
1951 struct l2cap_chan_list *l;
1952 struct l2cap_conn *conn;
1953 struct l2cap_conn_rsp rsp;
1957 if (!(conn = hcon->l2cap_data))
1959 l = &conn->chan_list;
1961 BT_DBG("conn %p", conn);
1963 read_lock(&l->lock);
1965 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1968 if (sk->sk_state != BT_CONNECT2) {
1974 sk->sk_state = BT_CONFIG;
1977 sk->sk_state = BT_DISCONN;
1978 l2cap_sock_set_timer(sk, HZ/10);
1979 result = L2CAP_CR_SEC_BLOCK;
1982 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1983 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1984 rsp.result = __cpu_to_le16(result);
1985 rsp.status = __cpu_to_le16(0);
1986 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1991 read_unlock(&l->lock);
1995 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
1997 struct l2cap_conn *conn = hcon->l2cap_data;
1999 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2002 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2004 if (flags & ACL_START) {
2005 struct l2cap_hdr *hdr;
2009 BT_ERR("Unexpected start frame (len %d)", skb->len);
2010 kfree_skb(conn->rx_skb);
2011 conn->rx_skb = NULL;
2016 BT_ERR("Frame is too short (len %d)", skb->len);
2020 hdr = (struct l2cap_hdr *) skb->data;
2021 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2023 if (len == skb->len) {
2024 /* Complete frame received */
2025 l2cap_recv_frame(conn, skb);
2029 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2031 if (skb->len > len) {
2032 BT_ERR("Frame is too long (len %d, expected len %d)",
2037 /* Allocate skb for the complete frame (with header) */
2038 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2041 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2042 conn->rx_len = len - skb->len;
2044 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2046 if (!conn->rx_len) {
2047 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2051 if (skb->len > conn->rx_len) {
2052 BT_ERR("Fragment is too long (len %d, expected %d)",
2053 skb->len, conn->rx_len);
2054 kfree_skb(conn->rx_skb);
2055 conn->rx_skb = NULL;
2060 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2061 conn->rx_len -= skb->len;
2063 if (!conn->rx_len) {
2064 /* Complete frame received */
2065 l2cap_recv_frame(conn, conn->rx_skb);
2066 conn->rx_skb = NULL;
2075 /* ---- Proc fs support ---- */
2076 #ifdef CONFIG_PROC_FS
2077 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2080 struct hlist_node *node;
2083 read_lock_bh(&l2cap_sk_list.lock);
2085 sk_for_each(sk, node, &l2cap_sk_list.head)
2093 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2099 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2101 read_unlock_bh(&l2cap_sk_list.lock);
2104 static int l2cap_seq_show(struct seq_file *seq, void *e)
2106 struct sock *sk = e;
2107 struct l2cap_pinfo *pi = l2cap_pi(sk);
2109 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2110 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2111 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2112 pi->omtu, pi->link_mode);
2116 static struct seq_operations l2cap_seq_ops = {
2117 .start = l2cap_seq_start,
2118 .next = l2cap_seq_next,
2119 .stop = l2cap_seq_stop,
2120 .show = l2cap_seq_show
2123 static int l2cap_seq_open(struct inode *inode, struct file *file)
2125 return seq_open(file, &l2cap_seq_ops);
2128 static struct file_operations l2cap_seq_fops = {
2129 .owner = THIS_MODULE,
2130 .open = l2cap_seq_open,
2132 .llseek = seq_lseek,
2133 .release = seq_release,
2136 static int __init l2cap_proc_init(void)
2138 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2141 p->owner = THIS_MODULE;
2142 p->proc_fops = &l2cap_seq_fops;
2146 static void __exit l2cap_proc_cleanup(void)
2148 remove_proc_entry("l2cap", proc_bt);
2151 #else /* CONFIG_PROC_FS */
2153 static int __init l2cap_proc_init(void)
2158 static void __exit l2cap_proc_cleanup(void)
2162 #endif /* CONFIG_PROC_FS */
2164 static struct proto_ops l2cap_sock_ops = {
2165 .family = PF_BLUETOOTH,
2166 .owner = THIS_MODULE,
2167 .release = l2cap_sock_release,
2168 .bind = l2cap_sock_bind,
2169 .connect = l2cap_sock_connect,
2170 .listen = l2cap_sock_listen,
2171 .accept = l2cap_sock_accept,
2172 .getname = l2cap_sock_getname,
2173 .sendmsg = l2cap_sock_sendmsg,
2174 .recvmsg = bt_sock_recvmsg,
2175 .poll = bt_sock_poll,
2176 .mmap = sock_no_mmap,
2177 .socketpair = sock_no_socketpair,
2178 .ioctl = sock_no_ioctl,
2179 .shutdown = l2cap_sock_shutdown,
2180 .setsockopt = l2cap_sock_setsockopt,
2181 .getsockopt = l2cap_sock_getsockopt
2184 static struct net_proto_family l2cap_sock_family_ops = {
2185 .family = PF_BLUETOOTH,
2186 .owner = THIS_MODULE,
2187 .create = l2cap_sock_create,
2190 static struct hci_proto l2cap_hci_proto = {
2192 .id = HCI_PROTO_L2CAP,
2193 .connect_ind = l2cap_connect_ind,
2194 .connect_cfm = l2cap_connect_cfm,
2195 .disconn_ind = l2cap_disconn_ind,
2196 .auth_cfm = l2cap_auth_cfm,
2197 .encrypt_cfm = l2cap_encrypt_cfm,
2198 .recv_acldata = l2cap_recv_acldata
2201 static int __init l2cap_init(void)
2205 if ((err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
2206 BT_ERR("L2CAP socket registration failed");
2210 if ((err = hci_register_proto(&l2cap_hci_proto))) {
2211 BT_ERR("L2CAP protocol registration failed");
2217 BT_INFO("L2CAP ver %s", VERSION);
2218 BT_INFO("L2CAP socket layer initialized");
2223 static void __exit l2cap_exit(void)
2225 l2cap_proc_cleanup();
2227 /* Unregister socket and protocol */
2228 if (bt_sock_unregister(BTPROTO_L2CAP))
2229 BT_ERR("L2CAP socket unregistration failed");
2231 if (hci_unregister_proto(&l2cap_hci_proto))
2232 BT_ERR("L2CAP protocol unregistration failed");
2235 void l2cap_load(void)
2237 /* Dummy function to trigger automatic L2CAP module loading by
2238 * other modules that use L2CAP sockets but don not use any othe
2239 * symbols from it. */
2242 EXPORT_SYMBOL(l2cap_load);
2244 module_init(l2cap_init);
2245 module_exit(l2cap_exit);
2247 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2248 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2249 MODULE_VERSION(VERSION);
2250 MODULE_LICENSE("GPL");
2251 MODULE_ALIAS("bt-proto-0");