2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/list.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 #include <net/bluetooth/l2cap.h>
55 #ifndef CONFIG_BT_L2CAP_DEBUG
62 static struct proto_ops l2cap_sock_ops;
64 struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static int l2cap_conn_del(struct hci_conn *conn, int err);
70 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
71 static void l2cap_chan_del(struct sock *sk, int err);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data);
78 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
85 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 __l2cap_sock_close(sk, ETIMEDOUT);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 static void l2cap_sock_init_timer(struct sock *sk)
109 init_timer(&sk->sk_timer);
110 sk->sk_timer.function = l2cap_sock_timeout;
111 sk->sk_timer.data = (unsigned long)sk;
114 /* ---- L2CAP connections ---- */
115 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
117 struct l2cap_conn *conn;
119 if ((conn = hcon->l2cap_data))
125 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
127 memset(conn, 0, sizeof(struct l2cap_conn));
129 hcon->l2cap_data = conn;
132 conn->mtu = hcon->hdev->acl_mtu;
133 conn->src = &hcon->hdev->bdaddr;
134 conn->dst = &hcon->dst;
136 spin_lock_init(&conn->lock);
137 conn->chan_list.lock = RW_LOCK_UNLOCKED;
139 BT_DBG("hcon %p conn %p", hcon, conn);
143 static int l2cap_conn_del(struct hci_conn *hcon, int err)
145 struct l2cap_conn *conn;
148 if (!(conn = hcon->l2cap_data))
151 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
154 kfree_skb(conn->rx_skb);
157 while ((sk = conn->chan_list.head)) {
159 l2cap_chan_del(sk, err);
164 hcon->l2cap_data = NULL;
169 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
171 struct l2cap_chan_list *l = &conn->chan_list;
172 write_lock(&l->lock);
173 __l2cap_chan_add(conn, sk, parent);
174 write_unlock(&l->lock);
177 /* ---- Socket interface ---- */
178 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
181 struct hlist_node *node;
182 sk_for_each(sk, node, &l2cap_sk_list.head)
183 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
190 /* Find socket with psm and source bdaddr.
191 * Returns closest match.
193 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
195 struct sock *sk = NULL, *sk1 = NULL;
196 struct hlist_node *node;
198 sk_for_each(sk, node, &l2cap_sk_list.head) {
199 if (state && sk->sk_state != state)
202 if (l2cap_pi(sk)->psm == psm) {
204 if (!bacmp(&bt_sk(sk)->src, src))
208 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
212 return node ? sk : sk1;
215 /* Find socket with given address (psm, src).
216 * Returns locked socket */
217 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
220 read_lock(&l2cap_sk_list.lock);
221 s = __l2cap_get_sock_by_psm(state, psm, src);
222 if (s) bh_lock_sock(s);
223 read_unlock(&l2cap_sk_list.lock);
227 static void l2cap_sock_destruct(struct sock *sk)
231 skb_queue_purge(&sk->sk_receive_queue);
232 skb_queue_purge(&sk->sk_write_queue);
235 kfree(sk->sk_protinfo);
238 static void l2cap_sock_cleanup_listen(struct sock *parent)
242 BT_DBG("parent %p", parent);
244 /* Close not yet accepted channels */
245 while ((sk = bt_accept_dequeue(parent, NULL)))
246 l2cap_sock_close(sk);
248 parent->sk_state = BT_CLOSED;
249 parent->sk_zapped = 1;
252 /* Kill socket (only if zapped and orphan)
253 * Must be called on unlocked socket.
255 static void l2cap_sock_kill(struct sock *sk)
257 if (!sk->sk_zapped || sk->sk_socket)
260 BT_DBG("sk %p state %d", sk, sk->sk_state);
262 /* Kill poor orphan */
263 bt_sock_unlink(&l2cap_sk_list, sk);
264 sock_set_flag(sk, SOCK_DEAD);
268 static void __l2cap_sock_close(struct sock *sk, int reason)
270 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
272 switch (sk->sk_state) {
274 l2cap_sock_cleanup_listen(sk);
280 if (sk->sk_type == SOCK_SEQPACKET) {
281 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
282 struct l2cap_disconn_req req;
284 sk->sk_state = BT_DISCONN;
285 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
287 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
288 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
289 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
291 l2cap_chan_del(sk, reason);
297 l2cap_chan_del(sk, reason);
306 /* Must be called on unlocked socket. */
307 static void l2cap_sock_close(struct sock *sk)
309 l2cap_sock_clear_timer(sk);
311 __l2cap_sock_close(sk, ECONNRESET);
316 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
318 struct l2cap_pinfo *pi = l2cap_pi(sk);
323 sk->sk_type = parent->sk_type;
324 pi->imtu = l2cap_pi(parent)->imtu;
325 pi->omtu = l2cap_pi(parent)->omtu;
326 pi->link_mode = l2cap_pi(parent)->link_mode;
328 pi->imtu = L2CAP_DEFAULT_MTU;
333 /* Default config options */
334 pi->conf_mtu = L2CAP_DEFAULT_MTU;
335 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
338 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
342 sk = bt_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
346 sk_set_owner(sk, THIS_MODULE);
348 sk->sk_destruct = l2cap_sock_destruct;
349 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
351 sk->sk_protocol = proto;
352 sk->sk_state = BT_OPEN;
354 l2cap_sock_init_timer(sk);
356 bt_sock_link(&l2cap_sk_list, sk);
360 static int l2cap_sock_create(struct socket *sock, int protocol)
364 BT_DBG("sock %p", sock);
366 sock->state = SS_UNCONNECTED;
368 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
369 return -ESOCKTNOSUPPORT;
371 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
374 sock->ops = &l2cap_sock_ops;
376 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
380 l2cap_sock_init(sk, NULL);
384 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
386 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
387 struct sock *sk = sock->sk;
390 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
392 if (!addr || addr->sa_family != AF_BLUETOOTH)
397 if (sk->sk_state != BT_OPEN) {
402 write_lock_bh(&l2cap_sk_list.lock);
404 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
407 /* Save source address */
408 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
409 l2cap_pi(sk)->psm = la->l2_psm;
410 l2cap_pi(sk)->sport = la->l2_psm;
411 sk->sk_state = BT_BOUND;
414 write_unlock_bh(&l2cap_sk_list.lock);
421 static int l2cap_do_connect(struct sock *sk)
423 bdaddr_t *src = &bt_sk(sk)->src;
424 bdaddr_t *dst = &bt_sk(sk)->dst;
425 struct l2cap_conn *conn;
426 struct hci_conn *hcon;
427 struct hci_dev *hdev;
430 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
432 if (!(hdev = hci_get_route(dst, src)))
433 return -EHOSTUNREACH;
435 hci_dev_lock_bh(hdev);
439 hcon = hci_connect(hdev, ACL_LINK, dst);
443 conn = l2cap_conn_add(hcon, 0);
451 /* Update source addr of the socket */
452 bacpy(src, conn->src);
454 l2cap_chan_add(conn, sk, NULL);
456 sk->sk_state = BT_CONNECT;
457 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
459 if (hcon->state == BT_CONNECTED) {
460 if (sk->sk_type == SOCK_SEQPACKET) {
461 struct l2cap_conn_req req;
462 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
463 req.psm = l2cap_pi(sk)->psm;
464 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
466 l2cap_sock_clear_timer(sk);
467 sk->sk_state = BT_CONNECTED;
472 hci_dev_unlock_bh(hdev);
477 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
479 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
480 struct sock *sk = sock->sk;
487 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
492 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
497 switch(sk->sk_state) {
501 /* Already connecting */
505 /* Already connected */
518 /* Set destination address and psm */
519 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
520 l2cap_pi(sk)->psm = la->l2_psm;
522 if ((err = l2cap_do_connect(sk)))
526 err = bt_sock_wait_state(sk, BT_CONNECTED,
527 sock_sndtimeo(sk, flags & O_NONBLOCK));
533 static int l2cap_sock_listen(struct socket *sock, int backlog)
535 struct sock *sk = sock->sk;
538 BT_DBG("sk %p backlog %d", sk, backlog);
542 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
547 if (!l2cap_pi(sk)->psm) {
548 bdaddr_t *src = &bt_sk(sk)->src;
553 write_lock_bh(&l2cap_sk_list.lock);
555 for (psm = 0x1001; psm < 0x1100; psm += 2)
556 if (!__l2cap_get_sock_by_addr(psm, src)) {
557 l2cap_pi(sk)->psm = htobs(psm);
558 l2cap_pi(sk)->sport = htobs(psm);
563 write_unlock_bh(&l2cap_sk_list.lock);
569 sk->sk_max_ack_backlog = backlog;
570 sk->sk_ack_backlog = 0;
571 sk->sk_state = BT_LISTEN;
578 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
580 DECLARE_WAITQUEUE(wait, current);
581 struct sock *sk = sock->sk, *nsk;
587 if (sk->sk_state != BT_LISTEN) {
592 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
594 BT_DBG("sk %p timeo %ld", sk, timeo);
596 /* Wait for an incoming connection. (wake-one). */
597 add_wait_queue_exclusive(sk->sk_sleep, &wait);
598 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
599 set_current_state(TASK_INTERRUPTIBLE);
606 timeo = schedule_timeout(timeo);
609 if (sk->sk_state != BT_LISTEN) {
614 if (signal_pending(current)) {
615 err = sock_intr_errno(timeo);
619 set_current_state(TASK_RUNNING);
620 remove_wait_queue(sk->sk_sleep, &wait);
625 newsock->state = SS_CONNECTED;
627 BT_DBG("new socket %p", nsk);
634 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
636 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
637 struct sock *sk = sock->sk;
639 BT_DBG("sock %p, sk %p", sock, sk);
641 addr->sa_family = AF_BLUETOOTH;
642 *len = sizeof(struct sockaddr_l2);
645 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
647 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
649 la->l2_psm = l2cap_pi(sk)->psm;
653 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
655 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
656 struct sk_buff *skb, **frag;
657 int err, hlen, count, sent=0;
658 struct l2cap_hdr *lh;
660 BT_DBG("sk %p len %d", sk, len);
662 /* First fragment (with L2CAP header) */
663 if (sk->sk_type == SOCK_DGRAM)
664 hlen = L2CAP_HDR_SIZE + 2;
666 hlen = L2CAP_HDR_SIZE;
668 count = min_t(unsigned int, (conn->mtu - hlen), len);
670 skb = bt_skb_send_alloc(sk, hlen + count,
671 msg->msg_flags & MSG_DONTWAIT, &err);
675 /* Create L2CAP header */
676 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
677 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
678 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
680 if (sk->sk_type == SOCK_DGRAM)
681 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
683 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
691 /* Continuation fragments (no L2CAP header) */
692 frag = &skb_shinfo(skb)->frag_list;
694 count = min_t(unsigned int, conn->mtu, len);
696 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
700 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
708 frag = &(*frag)->next;
711 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
721 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
723 struct sock *sk = sock->sk;
726 BT_DBG("sock %p, sk %p", sock, sk);
729 return sock_error(sk);
731 if (msg->msg_flags & MSG_OOB)
734 /* Check outgoing MTU */
735 if (len > l2cap_pi(sk)->omtu)
740 if (sk->sk_state == BT_CONNECTED)
741 err = l2cap_do_send(sk, msg, len);
749 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
751 struct sock *sk = sock->sk;
752 struct l2cap_options opts;
762 len = min_t(unsigned int, sizeof(opts), optlen);
763 if (copy_from_user((char *)&opts, optval, len)) {
767 l2cap_pi(sk)->imtu = opts.imtu;
768 l2cap_pi(sk)->omtu = opts.omtu;
772 if (get_user(opt, (u32 __user *)optval)) {
777 l2cap_pi(sk)->link_mode = opt;
789 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
791 struct sock *sk = sock->sk;
792 struct l2cap_options opts;
793 struct l2cap_conninfo cinfo;
796 if (get_user(len, optlen))
803 opts.imtu = l2cap_pi(sk)->imtu;
804 opts.omtu = l2cap_pi(sk)->omtu;
805 opts.flush_to = l2cap_pi(sk)->flush_to;
807 len = min_t(unsigned int, len, sizeof(opts));
808 if (copy_to_user(optval, (char *)&opts, len))
814 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *)optval))
819 if (sk->sk_state != BT_CONNECTED) {
824 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
826 len = min_t(unsigned int, len, sizeof(cinfo));
827 if (copy_to_user(optval, (char *)&cinfo, len))
841 static int l2cap_sock_shutdown(struct socket *sock, int how)
843 struct sock *sk = sock->sk;
846 BT_DBG("sock %p, sk %p", sock, sk);
852 if (!sk->sk_shutdown) {
853 sk->sk_shutdown = SHUTDOWN_MASK;
854 l2cap_sock_clear_timer(sk);
855 __l2cap_sock_close(sk, 0);
857 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
858 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
864 static int l2cap_sock_release(struct socket *sock)
866 struct sock *sk = sock->sk;
869 BT_DBG("sock %p, sk %p", sock, sk);
874 err = l2cap_sock_shutdown(sock, 2);
881 /* ---- L2CAP channels ---- */
882 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
885 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
886 if (l2cap_pi(s)->dcid == cid)
892 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
895 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
896 if (l2cap_pi(s)->scid == cid)
902 /* Find channel with given SCID.
903 * Returns locked socket */
904 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
908 s = __l2cap_get_chan_by_scid(l, cid);
909 if (s) bh_lock_sock(s);
910 read_unlock(&l->lock);
914 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
918 for (; cid < 0xffff; cid++) {
919 if(!__l2cap_get_chan_by_scid(l, cid))
926 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
931 l2cap_pi(l->head)->prev_c = sk;
933 l2cap_pi(sk)->next_c = l->head;
934 l2cap_pi(sk)->prev_c = NULL;
938 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
940 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
942 write_lock(&l->lock);
947 l2cap_pi(next)->prev_c = prev;
949 l2cap_pi(prev)->next_c = next;
950 write_unlock(&l->lock);
955 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
957 struct l2cap_chan_list *l = &conn->chan_list;
959 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
961 l2cap_pi(sk)->conn = conn;
963 if (sk->sk_type == SOCK_SEQPACKET) {
964 /* Alloc CID for connection-oriented socket */
965 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
966 } else if (sk->sk_type == SOCK_DGRAM) {
967 /* Connectionless socket */
968 l2cap_pi(sk)->scid = 0x0002;
969 l2cap_pi(sk)->dcid = 0x0002;
970 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
972 /* Raw socket can send/recv signalling messages only */
973 l2cap_pi(sk)->scid = 0x0001;
974 l2cap_pi(sk)->dcid = 0x0001;
975 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
978 __l2cap_chan_link(l, sk);
981 bt_accept_enqueue(parent, sk);
985 * Must be called on the locked socket. */
986 static void l2cap_chan_del(struct sock *sk, int err)
988 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
989 struct sock *parent = bt_sk(sk)->parent;
991 l2cap_sock_clear_timer(sk);
993 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
996 /* Unlink from channel list */
997 l2cap_chan_unlink(&conn->chan_list, sk);
998 l2cap_pi(sk)->conn = NULL;
999 hci_conn_put(conn->hcon);
1002 sk->sk_state = BT_CLOSED;
1009 parent->sk_data_ready(parent, 0);
1011 sk->sk_state_change(sk);
1014 static void l2cap_conn_ready(struct l2cap_conn *conn)
1016 struct l2cap_chan_list *l = &conn->chan_list;
1019 BT_DBG("conn %p", conn);
1021 read_lock(&l->lock);
1023 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1026 if (sk->sk_type != SOCK_SEQPACKET) {
1027 l2cap_sock_clear_timer(sk);
1028 sk->sk_state = BT_CONNECTED;
1029 sk->sk_state_change(sk);
1030 } else if (sk->sk_state == BT_CONNECT) {
1031 struct l2cap_conn_req req;
1032 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1033 req.psm = l2cap_pi(sk)->psm;
1034 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
1040 read_unlock(&l->lock);
1043 static void l2cap_chan_ready(struct sock *sk)
1045 struct sock *parent = bt_sk(sk)->parent;
1047 BT_DBG("sk %p, parent %p", sk, parent);
1049 l2cap_pi(sk)->conf_state = 0;
1050 l2cap_sock_clear_timer(sk);
1053 /* Outgoing channel.
1054 * Wake up socket sleeping on connect.
1056 sk->sk_state = BT_CONNECTED;
1057 sk->sk_state_change(sk);
1059 /* Incoming channel.
1060 * Wake up socket sleeping on accept.
1062 parent->sk_data_ready(parent, 0);
1066 /* Copy frame to all raw sockets on that connection */
1067 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1069 struct l2cap_chan_list *l = &conn->chan_list;
1070 struct sk_buff *nskb;
1073 BT_DBG("conn %p", conn);
1075 read_lock(&l->lock);
1076 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1077 if (sk->sk_type != SOCK_RAW)
1080 /* Don't send frame to the socket it came from */
1084 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1087 if (sock_queue_rcv_skb(sk, nskb))
1090 read_unlock(&l->lock);
1093 /* ---- L2CAP signalling commands ---- */
1094 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
1098 /* Get next available identificator.
1099 * 1 - 199 are used by kernel.
1100 * 200 - 254 are used by utilities like l2ping, etc
1103 spin_lock(&conn->lock);
1105 if (++conn->tx_ident > 199)
1108 id = conn->tx_ident;
1110 spin_unlock(&conn->lock);
1115 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1116 u8 code, u8 ident, u16 dlen, void *data)
1118 struct sk_buff *skb, **frag;
1119 struct l2cap_cmd_hdr *cmd;
1120 struct l2cap_hdr *lh;
1123 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1125 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1126 count = min_t(unsigned int, conn->mtu, len);
1128 skb = bt_skb_alloc(count, GFP_ATOMIC);
1132 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1133 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1134 lh->cid = __cpu_to_le16(0x0001);
1136 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1139 cmd->len = __cpu_to_le16(dlen);
1142 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1143 memcpy(skb_put(skb, count), data, count);
1149 /* Continuation fragments (no L2CAP header) */
1150 frag = &skb_shinfo(skb)->frag_list;
1152 count = min_t(unsigned int, conn->mtu, len);
1154 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1158 memcpy(skb_put(*frag, count), data, count);
1163 frag = &(*frag)->next;
1173 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data)
1175 u8 ident = l2cap_get_ident(conn);
1176 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1178 BT_DBG("code 0x%2.2x", code);
1182 return hci_send_acl(conn->hcon, skb, 0);
1185 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
1187 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1189 BT_DBG("code 0x%2.2x", code);
1193 return hci_send_acl(conn->hcon, skb, 0);
1196 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1198 struct l2cap_conf_opt *opt = *ptr;
1201 len = L2CAP_CONF_OPT_SIZE + opt->len;
1209 *val = *((u8 *) opt->val);
1213 *val = __le16_to_cpu(*((u16 *)opt->val));
1217 *val = __le32_to_cpu(*((u32 *)opt->val));
1221 *val = (unsigned long) opt->val;
1225 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1229 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1231 int type, hint, olen;
1235 BT_DBG("sk %p len %d", sk, len);
1237 while (len >= L2CAP_CONF_OPT_SIZE) {
1238 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1244 case L2CAP_CONF_MTU:
1245 l2cap_pi(sk)->conf_mtu = val;
1248 case L2CAP_CONF_FLUSH_TO:
1249 l2cap_pi(sk)->flush_to = val;
1252 case L2CAP_CONF_QOS:
1259 /* FIXME: Reject unknown option */
1265 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1267 struct l2cap_conf_opt *opt = *ptr;
1269 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1276 *((u8 *) opt->val) = val;
1280 *((u16 *) opt->val) = __cpu_to_le16(val);
1284 *((u32 *) opt->val) = __cpu_to_le32(val);
1288 memcpy(opt->val, (void *) val, len);
1292 *ptr += L2CAP_CONF_OPT_SIZE + len;
1295 static int l2cap_build_conf_req(struct sock *sk, void *data)
1297 struct l2cap_pinfo *pi = l2cap_pi(sk);
1298 struct l2cap_conf_req *req = data;
1299 void *ptr = req->data;
1301 BT_DBG("sk %p", sk);
1303 if (pi->imtu != L2CAP_DEFAULT_MTU)
1304 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1306 /* FIXME. Need actual value of the flush timeout */
1307 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1308 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1310 req->dcid = __cpu_to_le16(pi->dcid);
1311 req->flags = __cpu_to_le16(0);
1316 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1318 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 /* Configure output options and let the other side know
1322 * which ones we don't like. */
1323 if (pi->conf_mtu < pi->omtu) {
1324 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1325 result = L2CAP_CONF_UNACCEPT;
1327 pi->omtu = pi->conf_mtu;
1330 BT_DBG("sk %p result %d", sk, result);
1334 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1336 struct l2cap_conf_rsp *rsp = data;
1337 void *ptr = rsp->data;
1340 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1343 *result = l2cap_conf_output(sk, &ptr);
1347 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1348 rsp->result = __cpu_to_le16(result ? *result : 0);
1349 rsp->flags = __cpu_to_le16(flags);
1354 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1356 struct l2cap_chan_list *list = &conn->chan_list;
1357 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1358 struct l2cap_conn_rsp rsp;
1359 struct sock *sk, *parent;
1360 int result = 0, status = 0;
1362 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1365 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1367 /* Check if we have socket listening on psm */
1368 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1370 result = L2CAP_CR_BAD_PSM;
1374 result = L2CAP_CR_NO_MEM;
1376 /* Check for backlog size */
1377 if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
1378 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1382 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1386 write_lock(&list->lock);
1388 /* Check if we already have channel with that dcid */
1389 if (__l2cap_get_chan_by_dcid(list, scid)) {
1390 write_unlock(&list->lock);
1392 l2cap_sock_kill(sk);
1396 hci_conn_hold(conn->hcon);
1398 l2cap_sock_init(sk, parent);
1399 bacpy(&bt_sk(sk)->src, conn->src);
1400 bacpy(&bt_sk(sk)->dst, conn->dst);
1401 l2cap_pi(sk)->psm = psm;
1402 l2cap_pi(sk)->dcid = scid;
1404 __l2cap_chan_add(conn, sk, parent);
1405 dcid = l2cap_pi(sk)->scid;
1407 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1409 /* Service level security */
1410 result = L2CAP_CR_PEND;
1411 status = L2CAP_CS_AUTHEN_PEND;
1412 sk->sk_state = BT_CONNECT2;
1413 l2cap_pi(sk)->ident = cmd->ident;
1415 if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
1416 if (!hci_conn_encrypt(conn->hcon))
1418 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1419 if (!hci_conn_auth(conn->hcon))
1423 sk->sk_state = BT_CONFIG;
1424 result = status = 0;
1427 write_unlock(&list->lock);
1430 bh_unlock_sock(parent);
1433 rsp.scid = __cpu_to_le16(scid);
1434 rsp.dcid = __cpu_to_le16(dcid);
1435 rsp.result = __cpu_to_le16(result);
1436 rsp.status = __cpu_to_le16(status);
1437 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1441 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1443 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1444 u16 scid, dcid, result, status;
1448 scid = __le16_to_cpu(rsp->scid);
1449 dcid = __le16_to_cpu(rsp->dcid);
1450 result = __le16_to_cpu(rsp->result);
1451 status = __le16_to_cpu(rsp->status);
1453 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1455 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1459 case L2CAP_CR_SUCCESS:
1460 sk->sk_state = BT_CONFIG;
1461 l2cap_pi(sk)->dcid = dcid;
1462 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1464 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1471 l2cap_chan_del(sk, ECONNREFUSED);
1479 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1481 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1487 dcid = __le16_to_cpu(req->dcid);
1488 flags = __le16_to_cpu(req->flags);
1490 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1492 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1495 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1497 if (flags & 0x0001) {
1498 /* Incomplete config. Send empty response. */
1499 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1503 /* Complete config. */
1504 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1509 /* Output config done */
1510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1512 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1513 sk->sk_state = BT_CONNECTED;
1514 l2cap_chan_ready(sk);
1515 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1517 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1525 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1527 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1528 u16 scid, flags, result;
1531 scid = __le16_to_cpu(rsp->scid);
1532 flags = __le16_to_cpu(rsp->flags);
1533 result = __le16_to_cpu(rsp->result);
1535 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1537 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1541 case L2CAP_CONF_SUCCESS:
1544 case L2CAP_CONF_UNACCEPT:
1545 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1547 /* It does not make sense to adjust L2CAP parameters
1548 * that are currently defined in the spec. We simply
1549 * resend config request that we sent earlier. It is
1550 * stupid, but it helps qualification testing which
1551 * expects at least some response from us. */
1552 l2cap_send_req(conn, L2CAP_CONF_REQ,
1553 l2cap_build_conf_req(sk, req), req);
1558 sk->sk_state = BT_DISCONN;
1559 sk->sk_err = ECONNRESET;
1560 l2cap_sock_set_timer(sk, HZ * 5);
1562 struct l2cap_disconn_req req;
1563 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1564 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1565 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
1573 /* Input config done */
1574 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1576 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1577 sk->sk_state = BT_CONNECTED;
1578 l2cap_chan_ready(sk);
1586 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1588 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1589 struct l2cap_disconn_rsp rsp;
1593 scid = __le16_to_cpu(req->scid);
1594 dcid = __le16_to_cpu(req->dcid);
1596 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1598 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1601 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1602 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1603 l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1605 sk->sk_shutdown = SHUTDOWN_MASK;
1607 l2cap_chan_del(sk, ECONNRESET);
1610 l2cap_sock_kill(sk);
1614 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1616 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1620 scid = __le16_to_cpu(rsp->scid);
1621 dcid = __le16_to_cpu(rsp->dcid);
1623 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1625 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1628 l2cap_chan_del(sk, 0);
1631 l2cap_sock_kill(sk);
1635 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1637 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1638 struct l2cap_info_rsp rsp;
1641 type = __le16_to_cpu(req->type);
1643 BT_DBG("type 0x%4.4x", type);
1645 rsp.type = __cpu_to_le16(type);
1646 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1647 l2cap_send_rsp(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1652 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1654 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1657 type = __le16_to_cpu(rsp->type);
1658 result = __le16_to_cpu(rsp->result);
1660 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1665 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1667 u8 *data = skb->data;
1669 struct l2cap_cmd_hdr cmd;
1672 l2cap_raw_recv(conn, skb);
1674 while (len >= L2CAP_CMD_HDR_SIZE) {
1675 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1676 data += L2CAP_CMD_HDR_SIZE;
1677 len -= L2CAP_CMD_HDR_SIZE;
1679 cmd.len = __le16_to_cpu(cmd.len);
1681 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1683 if (cmd.len > len || !cmd.ident) {
1684 BT_DBG("corrupted command");
1689 case L2CAP_COMMAND_REJ:
1690 /* FIXME: We should process this */
1693 case L2CAP_CONN_REQ:
1694 err = l2cap_connect_req(conn, &cmd, data);
1697 case L2CAP_CONN_RSP:
1698 err = l2cap_connect_rsp(conn, &cmd, data);
1701 case L2CAP_CONF_REQ:
1702 err = l2cap_config_req(conn, &cmd, data);
1705 case L2CAP_CONF_RSP:
1706 err = l2cap_config_rsp(conn, &cmd, data);
1709 case L2CAP_DISCONN_REQ:
1710 err = l2cap_disconnect_req(conn, &cmd, data);
1713 case L2CAP_DISCONN_RSP:
1714 err = l2cap_disconnect_rsp(conn, &cmd, data);
1717 case L2CAP_ECHO_REQ:
1718 l2cap_send_rsp(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1721 case L2CAP_ECHO_RSP:
1724 case L2CAP_INFO_REQ:
1725 err = l2cap_information_req(conn, &cmd, data);
1728 case L2CAP_INFO_RSP:
1729 err = l2cap_information_rsp(conn, &cmd, data);
1733 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1739 struct l2cap_cmd_rej rej;
1740 BT_DBG("error %d", err);
1742 /* FIXME: Map err to a valid reason */
1743 rej.reason = __cpu_to_le16(0);
1744 l2cap_send_rsp(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1754 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1758 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1760 BT_DBG("unknown cid 0x%4.4x", cid);
1764 BT_DBG("sk %p, len %d", sk, skb->len);
1766 if (sk->sk_state != BT_CONNECTED)
1769 if (l2cap_pi(sk)->imtu < skb->len)
1772 /* If socket recv buffers overflows we drop data here
1773 * which is *bad* because L2CAP has to be reliable.
1774 * But we don't have any other choice. L2CAP doesn't
1775 * provide flow control mechanism. */
1777 if (!sock_queue_rcv_skb(sk, skb))
1784 if (sk) bh_unlock_sock(sk);
1788 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1792 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1796 BT_DBG("sk %p, len %d", sk, skb->len);
1798 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1801 if (l2cap_pi(sk)->imtu < skb->len)
1804 if (!sock_queue_rcv_skb(sk, skb))
1811 if (sk) bh_unlock_sock(sk);
1815 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1817 struct l2cap_hdr *lh = (void *) skb->data;
1820 skb_pull(skb, L2CAP_HDR_SIZE);
1821 cid = __le16_to_cpu(lh->cid);
1822 len = __le16_to_cpu(lh->len);
1824 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1828 l2cap_sig_channel(conn, skb);
1832 psm = get_unaligned((u16 *) skb->data);
1834 l2cap_conless_channel(conn, psm, skb);
1838 l2cap_data_channel(conn, cid, skb);
1843 /* ---- L2CAP interface with lower layer (HCI) ---- */
1845 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1847 int exact = 0, lm1 = 0, lm2 = 0;
1848 register struct sock *sk;
1849 struct hlist_node *node;
1851 if (type != ACL_LINK)
1854 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1856 /* Find listening sockets and check their link_mode */
1857 read_lock(&l2cap_sk_list.lock);
1858 sk_for_each(sk, node, &l2cap_sk_list.head) {
1859 if (sk->sk_state != BT_LISTEN)
1862 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1863 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1865 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1866 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1868 read_unlock(&l2cap_sk_list.lock);
1870 return exact ? lm1 : lm2;
1873 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1875 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1877 if (hcon->type != ACL_LINK)
1881 struct l2cap_conn *conn;
1883 conn = l2cap_conn_add(hcon, status);
1885 l2cap_conn_ready(conn);
1887 l2cap_conn_del(hcon, bt_err(status));
1892 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1894 BT_DBG("hcon %p reason %d", hcon, reason);
1896 if (hcon->type != ACL_LINK)
1899 l2cap_conn_del(hcon, bt_err(reason));
1903 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1905 struct l2cap_chan_list *l;
1906 struct l2cap_conn *conn;
1907 struct l2cap_conn_rsp rsp;
1911 if (!(conn = hcon->l2cap_data))
1913 l = &conn->chan_list;
1915 BT_DBG("conn %p", conn);
1917 read_lock(&l->lock);
1919 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1922 if (sk->sk_state != BT_CONNECT2 ||
1923 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) {
1929 sk->sk_state = BT_CONFIG;
1932 sk->sk_state = BT_DISCONN;
1933 l2cap_sock_set_timer(sk, HZ/10);
1934 result = L2CAP_CR_SEC_BLOCK;
1937 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1938 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1939 rsp.result = __cpu_to_le16(result);
1940 rsp.status = __cpu_to_le16(0);
1941 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1946 read_unlock(&l->lock);
1950 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
1952 struct l2cap_chan_list *l;
1953 struct l2cap_conn *conn;
1954 struct l2cap_conn_rsp rsp;
1958 if (!(conn = hcon->l2cap_data))
1960 l = &conn->chan_list;
1962 BT_DBG("conn %p", conn);
1964 read_lock(&l->lock);
1966 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1969 if (sk->sk_state != BT_CONNECT2) {
1975 sk->sk_state = BT_CONFIG;
1978 sk->sk_state = BT_DISCONN;
1979 l2cap_sock_set_timer(sk, HZ/10);
1980 result = L2CAP_CR_SEC_BLOCK;
1983 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1984 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1985 rsp.result = __cpu_to_le16(result);
1986 rsp.status = __cpu_to_le16(0);
1987 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1992 read_unlock(&l->lock);
1996 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
1998 struct l2cap_conn *conn = hcon->l2cap_data;
2000 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2003 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2005 if (flags & ACL_START) {
2006 struct l2cap_hdr *hdr;
2010 BT_ERR("Unexpected start frame (len %d)", skb->len);
2011 kfree_skb(conn->rx_skb);
2012 conn->rx_skb = NULL;
2017 BT_ERR("Frame is too short (len %d)", skb->len);
2021 hdr = (struct l2cap_hdr *) skb->data;
2022 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2024 if (len == skb->len) {
2025 /* Complete frame received */
2026 l2cap_recv_frame(conn, skb);
2030 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2032 if (skb->len > len) {
2033 BT_ERR("Frame is too long (len %d, expected len %d)",
2038 /* Allocate skb for the complete frame (with header) */
2039 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2042 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2043 conn->rx_len = len - skb->len;
2045 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2047 if (!conn->rx_len) {
2048 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2052 if (skb->len > conn->rx_len) {
2053 BT_ERR("Fragment is too long (len %d, expected %d)",
2054 skb->len, conn->rx_len);
2055 kfree_skb(conn->rx_skb);
2056 conn->rx_skb = NULL;
2061 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2062 conn->rx_len -= skb->len;
2064 if (!conn->rx_len) {
2065 /* Complete frame received */
2066 l2cap_recv_frame(conn, conn->rx_skb);
2067 conn->rx_skb = NULL;
2076 /* ---- Proc fs support ---- */
2077 #ifdef CONFIG_PROC_FS
2078 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2081 struct hlist_node *node;
2084 read_lock_bh(&l2cap_sk_list.lock);
2086 sk_for_each(sk, node, &l2cap_sk_list.head)
2094 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2100 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2102 read_unlock_bh(&l2cap_sk_list.lock);
2105 static int l2cap_seq_show(struct seq_file *seq, void *e)
2107 struct sock *sk = e;
2108 struct l2cap_pinfo *pi = l2cap_pi(sk);
2110 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2111 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2112 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2113 pi->omtu, pi->link_mode);
2117 static struct seq_operations l2cap_seq_ops = {
2118 .start = l2cap_seq_start,
2119 .next = l2cap_seq_next,
2120 .stop = l2cap_seq_stop,
2121 .show = l2cap_seq_show
2124 static int l2cap_seq_open(struct inode *inode, struct file *file)
2126 return seq_open(file, &l2cap_seq_ops);
2129 static struct file_operations l2cap_seq_fops = {
2130 .owner = THIS_MODULE,
2131 .open = l2cap_seq_open,
2133 .llseek = seq_lseek,
2134 .release = seq_release,
2137 static int __init l2cap_proc_init(void)
2139 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2142 p->owner = THIS_MODULE;
2143 p->proc_fops = &l2cap_seq_fops;
2147 static void __exit l2cap_proc_cleanup(void)
2149 remove_proc_entry("l2cap", proc_bt);
2152 #else /* CONFIG_PROC_FS */
2154 static int __init l2cap_proc_init(void)
2159 static void __exit l2cap_proc_cleanup(void)
2163 #endif /* CONFIG_PROC_FS */
2165 static struct proto_ops l2cap_sock_ops = {
2166 .family = PF_BLUETOOTH,
2167 .owner = THIS_MODULE,
2168 .release = l2cap_sock_release,
2169 .bind = l2cap_sock_bind,
2170 .connect = l2cap_sock_connect,
2171 .listen = l2cap_sock_listen,
2172 .accept = l2cap_sock_accept,
2173 .getname = l2cap_sock_getname,
2174 .sendmsg = l2cap_sock_sendmsg,
2175 .recvmsg = bt_sock_recvmsg,
2176 .poll = bt_sock_poll,
2177 .mmap = sock_no_mmap,
2178 .socketpair = sock_no_socketpair,
2179 .ioctl = sock_no_ioctl,
2180 .shutdown = l2cap_sock_shutdown,
2181 .setsockopt = l2cap_sock_setsockopt,
2182 .getsockopt = l2cap_sock_getsockopt
2185 static struct net_proto_family l2cap_sock_family_ops = {
2186 .family = PF_BLUETOOTH,
2187 .owner = THIS_MODULE,
2188 .create = l2cap_sock_create,
2191 static struct hci_proto l2cap_hci_proto = {
2193 .id = HCI_PROTO_L2CAP,
2194 .connect_ind = l2cap_connect_ind,
2195 .connect_cfm = l2cap_connect_cfm,
2196 .disconn_ind = l2cap_disconn_ind,
2197 .auth_cfm = l2cap_auth_cfm,
2198 .encrypt_cfm = l2cap_encrypt_cfm,
2199 .recv_acldata = l2cap_recv_acldata
2202 static int __init l2cap_init(void)
2206 if ((err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
2207 BT_ERR("L2CAP socket registration failed");
2211 if ((err = hci_register_proto(&l2cap_hci_proto))) {
2212 BT_ERR("L2CAP protocol registration failed");
2218 BT_INFO("L2CAP ver %s", VERSION);
2219 BT_INFO("L2CAP socket layer initialized");
2224 static void __exit l2cap_exit(void)
2226 l2cap_proc_cleanup();
2228 /* Unregister socket and protocol */
2229 if (bt_sock_unregister(BTPROTO_L2CAP))
2230 BT_ERR("L2CAP socket unregistration failed");
2232 if (hci_unregister_proto(&l2cap_hci_proto))
2233 BT_ERR("L2CAP protocol unregistration failed");
2236 void l2cap_load(void)
2238 /* Dummy function to trigger automatic L2CAP module loading by
2239 * other modules that use L2CAP sockets but don not use any othe
2240 * symbols from it. */
2243 EXPORT_SYMBOL(l2cap_load);
2245 module_init(l2cap_init);
2246 module_exit(l2cap_exit);
2248 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2249 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2250 MODULE_VERSION(VERSION);
2251 MODULE_LICENSE("GPL");
2252 MODULE_ALIAS("bt-proto-0");