2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/list.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 #include <net/bluetooth/l2cap.h>
55 #ifndef CONFIG_BT_L2CAP_DEBUG
62 static struct proto_ops l2cap_sock_ops;
64 struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static int l2cap_conn_del(struct hci_conn *conn, int err);
70 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
71 static void l2cap_chan_del(struct sock *sk, int err);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data);
78 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
85 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 __l2cap_sock_close(sk, ETIMEDOUT);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 static void l2cap_sock_init_timer(struct sock *sk)
109 init_timer(&sk->sk_timer);
110 sk->sk_timer.function = l2cap_sock_timeout;
111 sk->sk_timer.data = (unsigned long)sk;
114 /* ---- L2CAP connections ---- */
115 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
117 struct l2cap_conn *conn;
119 if ((conn = hcon->l2cap_data))
125 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
127 memset(conn, 0, sizeof(struct l2cap_conn));
129 hcon->l2cap_data = conn;
132 conn->mtu = hcon->hdev->acl_mtu;
133 conn->src = &hcon->hdev->bdaddr;
134 conn->dst = &hcon->dst;
136 spin_lock_init(&conn->lock);
137 conn->chan_list.lock = RW_LOCK_UNLOCKED;
139 BT_DBG("hcon %p conn %p", hcon, conn);
143 static int l2cap_conn_del(struct hci_conn *hcon, int err)
145 struct l2cap_conn *conn;
148 if (!(conn = hcon->l2cap_data))
151 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
154 kfree_skb(conn->rx_skb);
157 while ((sk = conn->chan_list.head)) {
159 l2cap_chan_del(sk, err);
164 hcon->l2cap_data = NULL;
169 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
171 struct l2cap_chan_list *l = &conn->chan_list;
172 write_lock(&l->lock);
173 __l2cap_chan_add(conn, sk, parent);
174 write_unlock(&l->lock);
177 /* ---- Socket interface ---- */
178 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
181 struct hlist_node *node;
182 sk_for_each(sk, node, &l2cap_sk_list.head)
183 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
190 /* Find socket with psm and source bdaddr.
191 * Returns closest match.
193 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
195 struct sock *sk = NULL, *sk1 = NULL;
196 struct hlist_node *node;
198 sk_for_each(sk, node, &l2cap_sk_list.head) {
199 if (state && sk->sk_state != state)
202 if (l2cap_pi(sk)->psm == psm) {
204 if (!bacmp(&bt_sk(sk)->src, src))
208 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
212 return node ? sk : sk1;
215 /* Find socket with given address (psm, src).
216 * Returns locked socket */
217 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
220 read_lock(&l2cap_sk_list.lock);
221 s = __l2cap_get_sock_by_psm(state, psm, src);
222 if (s) bh_lock_sock(s);
223 read_unlock(&l2cap_sk_list.lock);
227 static void l2cap_sock_destruct(struct sock *sk)
231 skb_queue_purge(&sk->sk_receive_queue);
232 skb_queue_purge(&sk->sk_write_queue);
235 kfree(sk->sk_protinfo);
238 static void l2cap_sock_cleanup_listen(struct sock *parent)
242 BT_DBG("parent %p", parent);
244 /* Close not yet accepted channels */
245 while ((sk = bt_accept_dequeue(parent, NULL)))
246 l2cap_sock_close(sk);
248 parent->sk_state = BT_CLOSED;
249 parent->sk_zapped = 1;
252 /* Kill socket (only if zapped and orphan)
253 * Must be called on unlocked socket.
255 static void l2cap_sock_kill(struct sock *sk)
257 if (!sk->sk_zapped || sk->sk_socket)
260 BT_DBG("sk %p state %d", sk, sk->sk_state);
262 /* Kill poor orphan */
263 bt_sock_unlink(&l2cap_sk_list, sk);
264 sock_set_flag(sk, SOCK_DEAD);
268 static void __l2cap_sock_close(struct sock *sk, int reason)
270 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
272 switch (sk->sk_state) {
274 l2cap_sock_cleanup_listen(sk);
280 if (sk->sk_type == SOCK_SEQPACKET) {
281 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
282 struct l2cap_disconn_req req;
284 sk->sk_state = BT_DISCONN;
285 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
287 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
288 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
289 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
291 l2cap_chan_del(sk, reason);
297 l2cap_chan_del(sk, reason);
306 /* Must be called on unlocked socket. */
307 static void l2cap_sock_close(struct sock *sk)
309 l2cap_sock_clear_timer(sk);
311 __l2cap_sock_close(sk, ECONNRESET);
316 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
318 struct l2cap_pinfo *pi = l2cap_pi(sk);
323 sk->sk_type = parent->sk_type;
324 pi->imtu = l2cap_pi(parent)->imtu;
325 pi->omtu = l2cap_pi(parent)->omtu;
326 pi->link_mode = l2cap_pi(parent)->link_mode;
328 pi->imtu = L2CAP_DEFAULT_MTU;
333 /* Default config options */
334 pi->conf_mtu = L2CAP_DEFAULT_MTU;
335 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
338 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
342 sk = bt_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
346 sk_set_owner(sk, THIS_MODULE);
348 sk->sk_destruct = l2cap_sock_destruct;
349 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
351 sk->sk_protocol = proto;
352 sk->sk_state = BT_OPEN;
354 l2cap_sock_init_timer(sk);
356 bt_sock_link(&l2cap_sk_list, sk);
360 static int l2cap_sock_create(struct socket *sock, int protocol)
364 BT_DBG("sock %p", sock);
366 sock->state = SS_UNCONNECTED;
368 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
369 return -ESOCKTNOSUPPORT;
371 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
374 sock->ops = &l2cap_sock_ops;
376 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
380 l2cap_sock_init(sk, NULL);
384 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
386 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
387 struct sock *sk = sock->sk;
390 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
392 if (!addr || addr->sa_family != AF_BLUETOOTH)
397 if (sk->sk_state != BT_OPEN) {
402 write_lock_bh(&l2cap_sk_list.lock);
404 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
407 /* Save source address */
408 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
409 l2cap_pi(sk)->psm = la->l2_psm;
410 l2cap_pi(sk)->sport = la->l2_psm;
411 sk->sk_state = BT_BOUND;
414 write_unlock_bh(&l2cap_sk_list.lock);
421 static int l2cap_do_connect(struct sock *sk)
423 bdaddr_t *src = &bt_sk(sk)->src;
424 bdaddr_t *dst = &bt_sk(sk)->dst;
425 struct l2cap_conn *conn;
426 struct hci_conn *hcon;
427 struct hci_dev *hdev;
430 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
432 if (!(hdev = hci_get_route(dst, src)))
433 return -EHOSTUNREACH;
435 hci_dev_lock_bh(hdev);
439 hcon = hci_connect(hdev, ACL_LINK, dst);
443 conn = l2cap_conn_add(hcon, 0);
451 /* Update source addr of the socket */
452 bacpy(src, conn->src);
454 l2cap_chan_add(conn, sk, NULL);
456 sk->sk_state = BT_CONNECT;
457 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
459 if (hcon->state == BT_CONNECTED) {
460 if (sk->sk_type == SOCK_SEQPACKET) {
461 struct l2cap_conn_req req;
462 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
463 req.psm = l2cap_pi(sk)->psm;
464 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
466 l2cap_sock_clear_timer(sk);
467 sk->sk_state = BT_CONNECTED;
472 hci_dev_unlock_bh(hdev);
477 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
479 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
480 struct sock *sk = sock->sk;
487 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
492 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
497 switch(sk->sk_state) {
501 /* Already connecting */
505 /* Already connected */
518 /* Set destination address and psm */
519 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
520 l2cap_pi(sk)->psm = la->l2_psm;
522 if ((err = l2cap_do_connect(sk)))
526 err = bt_sock_wait_state(sk, BT_CONNECTED,
527 sock_sndtimeo(sk, flags & O_NONBLOCK));
533 static int l2cap_sock_listen(struct socket *sock, int backlog)
535 struct sock *sk = sock->sk;
538 BT_DBG("sk %p backlog %d", sk, backlog);
542 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
547 if (!l2cap_pi(sk)->psm) {
548 bdaddr_t *src = &bt_sk(sk)->src;
553 write_lock_bh(&l2cap_sk_list.lock);
555 for (psm = 0x1001; psm < 0x1100; psm += 2)
556 if (!__l2cap_get_sock_by_addr(psm, src)) {
557 l2cap_pi(sk)->psm = htobs(psm);
558 l2cap_pi(sk)->sport = htobs(psm);
563 write_unlock_bh(&l2cap_sk_list.lock);
569 sk->sk_max_ack_backlog = backlog;
570 sk->sk_ack_backlog = 0;
571 sk->sk_state = BT_LISTEN;
578 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
580 DECLARE_WAITQUEUE(wait, current);
581 struct sock *sk = sock->sk, *nsk;
587 if (sk->sk_state != BT_LISTEN) {
592 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
594 BT_DBG("sk %p timeo %ld", sk, timeo);
596 /* Wait for an incoming connection. (wake-one). */
597 add_wait_queue_exclusive(sk->sk_sleep, &wait);
598 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
599 set_current_state(TASK_INTERRUPTIBLE);
606 timeo = schedule_timeout(timeo);
609 if (sk->sk_state != BT_LISTEN) {
614 if (signal_pending(current)) {
615 err = sock_intr_errno(timeo);
619 set_current_state(TASK_RUNNING);
620 remove_wait_queue(sk->sk_sleep, &wait);
625 newsock->state = SS_CONNECTED;
627 BT_DBG("new socket %p", nsk);
634 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
636 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
637 struct sock *sk = sock->sk;
639 BT_DBG("sock %p, sk %p", sock, sk);
641 addr->sa_family = AF_BLUETOOTH;
642 *len = sizeof(struct sockaddr_l2);
645 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
647 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
649 la->l2_psm = l2cap_pi(sk)->psm;
653 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
655 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
656 struct sk_buff *skb, **frag;
657 int err, hlen, count, sent=0;
658 struct l2cap_hdr *lh;
660 BT_DBG("sk %p len %d", sk, len);
662 /* First fragment (with L2CAP header) */
663 if (sk->sk_type == SOCK_DGRAM)
664 hlen = L2CAP_HDR_SIZE + 2;
666 hlen = L2CAP_HDR_SIZE;
668 count = min_t(unsigned int, (conn->mtu - hlen), len);
670 skb = bt_skb_send_alloc(sk, hlen + count,
671 msg->msg_flags & MSG_DONTWAIT, &err);
675 /* Create L2CAP header */
676 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
677 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
678 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
680 if (sk->sk_type == SOCK_DGRAM)
681 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
683 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
691 /* Continuation fragments (no L2CAP header) */
692 frag = &skb_shinfo(skb)->frag_list;
694 count = min_t(unsigned int, conn->mtu, len);
696 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
700 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
708 frag = &(*frag)->next;
711 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
721 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
722 struct msghdr *msg, size_t len)
724 struct sock *sk = sock->sk;
727 BT_DBG("sock %p, sk %p", sock, sk);
730 return sock_error(sk);
732 if (msg->msg_flags & MSG_OOB)
735 /* Check outgoing MTU */
736 if (len > l2cap_pi(sk)->omtu)
741 if (sk->sk_state == BT_CONNECTED)
742 err = l2cap_do_send(sk, msg, len);
750 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
752 struct sock *sk = sock->sk;
753 struct l2cap_options opts;
763 len = min_t(unsigned int, sizeof(opts), optlen);
764 if (copy_from_user((char *)&opts, optval, len)) {
768 l2cap_pi(sk)->imtu = opts.imtu;
769 l2cap_pi(sk)->omtu = opts.omtu;
773 if (get_user(opt, (u32 __user *)optval)) {
778 l2cap_pi(sk)->link_mode = opt;
790 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
792 struct sock *sk = sock->sk;
793 struct l2cap_options opts;
794 struct l2cap_conninfo cinfo;
797 if (get_user(len, optlen))
804 opts.imtu = l2cap_pi(sk)->imtu;
805 opts.omtu = l2cap_pi(sk)->omtu;
806 opts.flush_to = l2cap_pi(sk)->flush_to;
808 len = min_t(unsigned int, len, sizeof(opts));
809 if (copy_to_user(optval, (char *)&opts, len))
815 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *)optval))
820 if (sk->sk_state != BT_CONNECTED) {
825 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
827 len = min_t(unsigned int, len, sizeof(cinfo));
828 if (copy_to_user(optval, (char *)&cinfo, len))
842 static int l2cap_sock_shutdown(struct socket *sock, int how)
844 struct sock *sk = sock->sk;
847 BT_DBG("sock %p, sk %p", sock, sk);
853 if (!sk->sk_shutdown) {
854 sk->sk_shutdown = SHUTDOWN_MASK;
855 l2cap_sock_clear_timer(sk);
856 __l2cap_sock_close(sk, 0);
858 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
859 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
865 static int l2cap_sock_release(struct socket *sock)
867 struct sock *sk = sock->sk;
870 BT_DBG("sock %p, sk %p", sock, sk);
875 err = l2cap_sock_shutdown(sock, 2);
882 /* ---- L2CAP channels ---- */
883 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
886 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
887 if (l2cap_pi(s)->dcid == cid)
893 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
896 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
897 if (l2cap_pi(s)->scid == cid)
903 /* Find channel with given SCID.
904 * Returns locked socket */
905 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
909 s = __l2cap_get_chan_by_scid(l, cid);
910 if (s) bh_lock_sock(s);
911 read_unlock(&l->lock);
915 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
919 for (; cid < 0xffff; cid++) {
920 if(!__l2cap_get_chan_by_scid(l, cid))
927 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
932 l2cap_pi(l->head)->prev_c = sk;
934 l2cap_pi(sk)->next_c = l->head;
935 l2cap_pi(sk)->prev_c = NULL;
939 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
941 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
943 write_lock(&l->lock);
948 l2cap_pi(next)->prev_c = prev;
950 l2cap_pi(prev)->next_c = next;
951 write_unlock(&l->lock);
956 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
958 struct l2cap_chan_list *l = &conn->chan_list;
960 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
962 l2cap_pi(sk)->conn = conn;
964 if (sk->sk_type == SOCK_SEQPACKET) {
965 /* Alloc CID for connection-oriented socket */
966 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
967 } else if (sk->sk_type == SOCK_DGRAM) {
968 /* Connectionless socket */
969 l2cap_pi(sk)->scid = 0x0002;
970 l2cap_pi(sk)->dcid = 0x0002;
971 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
973 /* Raw socket can send/recv signalling messages only */
974 l2cap_pi(sk)->scid = 0x0001;
975 l2cap_pi(sk)->dcid = 0x0001;
976 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
979 __l2cap_chan_link(l, sk);
982 bt_accept_enqueue(parent, sk);
986 * Must be called on the locked socket. */
987 static void l2cap_chan_del(struct sock *sk, int err)
989 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
990 struct sock *parent = bt_sk(sk)->parent;
992 l2cap_sock_clear_timer(sk);
994 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
997 /* Unlink from channel list */
998 l2cap_chan_unlink(&conn->chan_list, sk);
999 l2cap_pi(sk)->conn = NULL;
1000 hci_conn_put(conn->hcon);
1003 sk->sk_state = BT_CLOSED;
1010 parent->sk_data_ready(parent, 0);
1012 sk->sk_state_change(sk);
1015 static void l2cap_conn_ready(struct l2cap_conn *conn)
1017 struct l2cap_chan_list *l = &conn->chan_list;
1020 BT_DBG("conn %p", conn);
1022 read_lock(&l->lock);
1024 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1027 if (sk->sk_type != SOCK_SEQPACKET) {
1028 l2cap_sock_clear_timer(sk);
1029 sk->sk_state = BT_CONNECTED;
1030 sk->sk_state_change(sk);
1031 } else if (sk->sk_state == BT_CONNECT) {
1032 struct l2cap_conn_req req;
1033 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1034 req.psm = l2cap_pi(sk)->psm;
1035 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
1041 read_unlock(&l->lock);
1044 static void l2cap_chan_ready(struct sock *sk)
1046 struct sock *parent = bt_sk(sk)->parent;
1048 BT_DBG("sk %p, parent %p", sk, parent);
1050 l2cap_pi(sk)->conf_state = 0;
1051 l2cap_sock_clear_timer(sk);
1054 /* Outgoing channel.
1055 * Wake up socket sleeping on connect.
1057 sk->sk_state = BT_CONNECTED;
1058 sk->sk_state_change(sk);
1060 /* Incoming channel.
1061 * Wake up socket sleeping on accept.
1063 parent->sk_data_ready(parent, 0);
1067 /* Copy frame to all raw sockets on that connection */
1068 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1070 struct l2cap_chan_list *l = &conn->chan_list;
1071 struct sk_buff *nskb;
1074 BT_DBG("conn %p", conn);
1076 read_lock(&l->lock);
1077 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1078 if (sk->sk_type != SOCK_RAW)
1081 /* Don't send frame to the socket it came from */
1085 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1088 if (sock_queue_rcv_skb(sk, nskb))
1091 read_unlock(&l->lock);
1094 /* ---- L2CAP signalling commands ---- */
1095 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
1099 /* Get next available identificator.
1100 * 1 - 199 are used by kernel.
1101 * 200 - 254 are used by utilities like l2ping, etc
1104 spin_lock(&conn->lock);
1106 if (++conn->tx_ident > 199)
1109 id = conn->tx_ident;
1111 spin_unlock(&conn->lock);
1116 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1117 u8 code, u8 ident, u16 dlen, void *data)
1119 struct sk_buff *skb, **frag;
1120 struct l2cap_cmd_hdr *cmd;
1121 struct l2cap_hdr *lh;
1124 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1126 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1127 count = min_t(unsigned int, conn->mtu, len);
1129 skb = bt_skb_alloc(count, GFP_ATOMIC);
1133 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1134 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1135 lh->cid = __cpu_to_le16(0x0001);
1137 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1140 cmd->len = __cpu_to_le16(dlen);
1143 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1144 memcpy(skb_put(skb, count), data, count);
1150 /* Continuation fragments (no L2CAP header) */
1151 frag = &skb_shinfo(skb)->frag_list;
1153 count = min_t(unsigned int, conn->mtu, len);
1155 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1159 memcpy(skb_put(*frag, count), data, count);
1164 frag = &(*frag)->next;
1174 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data)
1176 u8 ident = l2cap_get_ident(conn);
1177 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1179 BT_DBG("code 0x%2.2x", code);
1183 return hci_send_acl(conn->hcon, skb, 0);
1186 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
1188 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1190 BT_DBG("code 0x%2.2x", code);
1194 return hci_send_acl(conn->hcon, skb, 0);
1197 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1199 struct l2cap_conf_opt *opt = *ptr;
1202 len = L2CAP_CONF_OPT_SIZE + opt->len;
1210 *val = *((u8 *) opt->val);
1214 *val = __le16_to_cpu(*((u16 *)opt->val));
1218 *val = __le32_to_cpu(*((u32 *)opt->val));
1222 *val = (unsigned long) opt->val;
1226 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1230 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1232 int type, hint, olen;
1236 BT_DBG("sk %p len %d", sk, len);
1238 while (len >= L2CAP_CONF_OPT_SIZE) {
1239 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1245 case L2CAP_CONF_MTU:
1246 l2cap_pi(sk)->conf_mtu = val;
1249 case L2CAP_CONF_FLUSH_TO:
1250 l2cap_pi(sk)->flush_to = val;
1253 case L2CAP_CONF_QOS:
1260 /* FIXME: Reject unknown option */
1266 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1268 struct l2cap_conf_opt *opt = *ptr;
1270 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1277 *((u8 *) opt->val) = val;
1281 *((u16 *) opt->val) = __cpu_to_le16(val);
1285 *((u32 *) opt->val) = __cpu_to_le32(val);
1289 memcpy(opt->val, (void *) val, len);
1293 *ptr += L2CAP_CONF_OPT_SIZE + len;
1296 static int l2cap_build_conf_req(struct sock *sk, void *data)
1298 struct l2cap_pinfo *pi = l2cap_pi(sk);
1299 struct l2cap_conf_req *req = data;
1300 void *ptr = req->data;
1302 BT_DBG("sk %p", sk);
1304 if (pi->imtu != L2CAP_DEFAULT_MTU)
1305 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1307 /* FIXME. Need actual value of the flush timeout */
1308 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1309 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1311 req->dcid = __cpu_to_le16(pi->dcid);
1312 req->flags = __cpu_to_le16(0);
1317 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1319 struct l2cap_pinfo *pi = l2cap_pi(sk);
1322 /* Configure output options and let the other side know
1323 * which ones we don't like. */
1324 if (pi->conf_mtu < pi->omtu) {
1325 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1326 result = L2CAP_CONF_UNACCEPT;
1328 pi->omtu = pi->conf_mtu;
1331 BT_DBG("sk %p result %d", sk, result);
1335 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1337 struct l2cap_conf_rsp *rsp = data;
1338 void *ptr = rsp->data;
1341 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1344 *result = l2cap_conf_output(sk, &ptr);
1348 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1349 rsp->result = __cpu_to_le16(result ? *result : 0);
1350 rsp->flags = __cpu_to_le16(flags);
1355 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1357 struct l2cap_chan_list *list = &conn->chan_list;
1358 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1359 struct l2cap_conn_rsp rsp;
1360 struct sock *sk, *parent;
1361 int result = 0, status = 0;
1363 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1366 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1368 /* Check if we have socket listening on psm */
1369 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1371 result = L2CAP_CR_BAD_PSM;
1375 result = L2CAP_CR_NO_MEM;
1377 /* Check for backlog size */
1378 if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
1379 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1383 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1387 write_lock(&list->lock);
1389 /* Check if we already have channel with that dcid */
1390 if (__l2cap_get_chan_by_dcid(list, scid)) {
1391 write_unlock(&list->lock);
1393 l2cap_sock_kill(sk);
1397 hci_conn_hold(conn->hcon);
1399 l2cap_sock_init(sk, parent);
1400 bacpy(&bt_sk(sk)->src, conn->src);
1401 bacpy(&bt_sk(sk)->dst, conn->dst);
1402 l2cap_pi(sk)->psm = psm;
1403 l2cap_pi(sk)->dcid = scid;
1405 __l2cap_chan_add(conn, sk, parent);
1406 dcid = l2cap_pi(sk)->scid;
1408 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1410 /* Service level security */
1411 result = L2CAP_CR_PEND;
1412 status = L2CAP_CS_AUTHEN_PEND;
1413 sk->sk_state = BT_CONNECT2;
1414 l2cap_pi(sk)->ident = cmd->ident;
1416 if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
1417 if (!hci_conn_encrypt(conn->hcon))
1419 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1420 if (!hci_conn_auth(conn->hcon))
1424 sk->sk_state = BT_CONFIG;
1425 result = status = 0;
1428 write_unlock(&list->lock);
1431 bh_unlock_sock(parent);
1434 rsp.scid = __cpu_to_le16(scid);
1435 rsp.dcid = __cpu_to_le16(dcid);
1436 rsp.result = __cpu_to_le16(result);
1437 rsp.status = __cpu_to_le16(status);
1438 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1442 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1444 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1445 u16 scid, dcid, result, status;
1449 scid = __le16_to_cpu(rsp->scid);
1450 dcid = __le16_to_cpu(rsp->dcid);
1451 result = __le16_to_cpu(rsp->result);
1452 status = __le16_to_cpu(rsp->status);
1454 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1456 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1460 case L2CAP_CR_SUCCESS:
1461 sk->sk_state = BT_CONFIG;
1462 l2cap_pi(sk)->dcid = dcid;
1463 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1465 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1472 l2cap_chan_del(sk, ECONNREFUSED);
1480 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1482 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1488 dcid = __le16_to_cpu(req->dcid);
1489 flags = __le16_to_cpu(req->flags);
1491 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1493 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1496 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1498 if (flags & 0x0001) {
1499 /* Incomplete config. Send empty response. */
1500 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1504 /* Complete config. */
1505 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1510 /* Output config done */
1511 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1513 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1514 sk->sk_state = BT_CONNECTED;
1515 l2cap_chan_ready(sk);
1516 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1518 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1526 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1528 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1529 u16 scid, flags, result;
1533 scid = __le16_to_cpu(rsp->scid);
1534 flags = __le16_to_cpu(rsp->flags);
1535 result = __le16_to_cpu(rsp->result);
1537 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1539 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1543 case L2CAP_CONF_SUCCESS:
1546 case L2CAP_CONF_UNACCEPT:
1547 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1549 /* It does not make sense to adjust L2CAP parameters
1550 * that are currently defined in the spec. We simply
1551 * resend config request that we sent earlier. It is
1552 * stupid, but it helps qualification testing which
1553 * expects at least some response from us. */
1554 l2cap_send_req(conn, L2CAP_CONF_REQ,
1555 l2cap_build_conf_req(sk, req), req);
1560 sk->sk_state = BT_DISCONN;
1561 sk->sk_err = ECONNRESET;
1562 l2cap_sock_set_timer(sk, HZ * 5);
1564 struct l2cap_disconn_req req;
1565 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1566 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1567 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
1575 /* Input config done */
1576 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1578 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1579 sk->sk_state = BT_CONNECTED;
1580 l2cap_chan_ready(sk);
1588 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1590 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1591 struct l2cap_disconn_rsp rsp;
1595 scid = __le16_to_cpu(req->scid);
1596 dcid = __le16_to_cpu(req->dcid);
1598 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1600 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1603 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1604 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1605 l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1607 sk->sk_shutdown = SHUTDOWN_MASK;
1609 l2cap_chan_del(sk, ECONNRESET);
1612 l2cap_sock_kill(sk);
1616 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1618 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1622 scid = __le16_to_cpu(rsp->scid);
1623 dcid = __le16_to_cpu(rsp->dcid);
1625 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1627 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1629 l2cap_chan_del(sk, 0);
1632 l2cap_sock_kill(sk);
1636 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1638 u8 *data = skb->data;
1640 struct l2cap_cmd_hdr cmd;
1643 l2cap_raw_recv(conn, skb);
1645 while (len >= L2CAP_CMD_HDR_SIZE) {
1646 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1647 data += L2CAP_CMD_HDR_SIZE;
1648 len -= L2CAP_CMD_HDR_SIZE;
1650 cmd.len = __le16_to_cpu(cmd.len);
1652 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1654 if (cmd.len > len || !cmd.ident) {
1655 BT_DBG("corrupted command");
1660 case L2CAP_CONN_REQ:
1661 err = l2cap_connect_req(conn, &cmd, data);
1664 case L2CAP_CONN_RSP:
1665 err = l2cap_connect_rsp(conn, &cmd, data);
1668 case L2CAP_CONF_REQ:
1669 err = l2cap_config_req(conn, &cmd, data);
1672 case L2CAP_CONF_RSP:
1673 err = l2cap_config_rsp(conn, &cmd, data);
1676 case L2CAP_DISCONN_REQ:
1677 err = l2cap_disconnect_req(conn, &cmd, data);
1680 case L2CAP_DISCONN_RSP:
1681 err = l2cap_disconnect_rsp(conn, &cmd, data);
1684 case L2CAP_COMMAND_REJ:
1685 /* FIXME: We should process this */
1688 case L2CAP_ECHO_REQ:
1689 l2cap_send_rsp(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1692 case L2CAP_ECHO_RSP:
1693 case L2CAP_INFO_REQ:
1694 case L2CAP_INFO_RSP:
1698 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1704 struct l2cap_cmd_rej rej;
1705 BT_DBG("error %d", err);
1707 /* FIXME: Map err to a valid reason. */
1708 rej.reason = __cpu_to_le16(0);
1709 l2cap_send_rsp(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1719 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1723 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1725 BT_DBG("unknown cid 0x%4.4x", cid);
1729 BT_DBG("sk %p, len %d", sk, skb->len);
1731 if (sk->sk_state != BT_CONNECTED)
1734 if (l2cap_pi(sk)->imtu < skb->len)
1737 /* If socket recv buffers overflows we drop data here
1738 * which is *bad* because L2CAP has to be reliable.
1739 * But we don't have any other choice. L2CAP doesn't
1740 * provide flow control mechanism */
1742 if (!sock_queue_rcv_skb(sk, skb))
1749 if (sk) bh_unlock_sock(sk);
1753 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1757 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1761 BT_DBG("sk %p, len %d", sk, skb->len);
1763 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1766 if (l2cap_pi(sk)->imtu < skb->len)
1769 if (!sock_queue_rcv_skb(sk, skb))
1776 if (sk) bh_unlock_sock(sk);
1780 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1782 struct l2cap_hdr *lh = (void *) skb->data;
1785 skb_pull(skb, L2CAP_HDR_SIZE);
1786 cid = __le16_to_cpu(lh->cid);
1787 len = __le16_to_cpu(lh->len);
1789 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1793 l2cap_sig_channel(conn, skb);
1797 psm = get_unaligned((u16 *) skb->data);
1799 l2cap_conless_channel(conn, psm, skb);
1803 l2cap_data_channel(conn, cid, skb);
1808 /* ---- L2CAP interface with lower layer (HCI) ---- */
1810 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1812 int exact = 0, lm1 = 0, lm2 = 0;
1813 register struct sock *sk;
1814 struct hlist_node *node;
1816 if (type != ACL_LINK)
1819 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1821 /* Find listening sockets and check their link_mode */
1822 read_lock(&l2cap_sk_list.lock);
1823 sk_for_each(sk, node, &l2cap_sk_list.head) {
1824 if (sk->sk_state != BT_LISTEN)
1827 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1828 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1830 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1831 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1833 read_unlock(&l2cap_sk_list.lock);
1835 return exact ? lm1 : lm2;
1838 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1840 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1842 if (hcon->type != ACL_LINK)
1846 struct l2cap_conn *conn;
1848 conn = l2cap_conn_add(hcon, status);
1850 l2cap_conn_ready(conn);
1852 l2cap_conn_del(hcon, bt_err(status));
1857 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1859 BT_DBG("hcon %p reason %d", hcon, reason);
1861 if (hcon->type != ACL_LINK)
1864 l2cap_conn_del(hcon, bt_err(reason));
1868 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1870 struct l2cap_chan_list *l;
1871 struct l2cap_conn *conn;
1872 struct l2cap_conn_rsp rsp;
1876 if (!(conn = hcon->l2cap_data))
1878 l = &conn->chan_list;
1880 BT_DBG("conn %p", conn);
1882 read_lock(&l->lock);
1884 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1887 if (sk->sk_state != BT_CONNECT2 ||
1888 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) {
1894 sk->sk_state = BT_CONFIG;
1897 sk->sk_state = BT_DISCONN;
1898 l2cap_sock_set_timer(sk, HZ/10);
1899 result = L2CAP_CR_SEC_BLOCK;
1902 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1903 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1904 rsp.result = __cpu_to_le16(result);
1905 rsp.status = __cpu_to_le16(0);
1906 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1911 read_unlock(&l->lock);
1915 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
1917 struct l2cap_chan_list *l;
1918 struct l2cap_conn *conn;
1919 struct l2cap_conn_rsp rsp;
1923 if (!(conn = hcon->l2cap_data))
1925 l = &conn->chan_list;
1927 BT_DBG("conn %p", conn);
1929 read_lock(&l->lock);
1931 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1934 if (sk->sk_state != BT_CONNECT2) {
1940 sk->sk_state = BT_CONFIG;
1943 sk->sk_state = BT_DISCONN;
1944 l2cap_sock_set_timer(sk, HZ/10);
1945 result = L2CAP_CR_SEC_BLOCK;
1948 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1949 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1950 rsp.result = __cpu_to_le16(result);
1951 rsp.status = __cpu_to_le16(0);
1952 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1957 read_unlock(&l->lock);
1961 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
1963 struct l2cap_conn *conn = hcon->l2cap_data;
1965 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
1968 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
1970 if (flags & ACL_START) {
1971 struct l2cap_hdr *hdr;
1975 BT_ERR("Unexpected start frame (len %d)", skb->len);
1976 kfree_skb(conn->rx_skb);
1977 conn->rx_skb = NULL;
1982 BT_ERR("Frame is too short (len %d)", skb->len);
1986 hdr = (struct l2cap_hdr *) skb->data;
1987 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
1989 if (len == skb->len) {
1990 /* Complete frame received */
1991 l2cap_recv_frame(conn, skb);
1995 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
1997 if (skb->len > len) {
1998 BT_ERR("Frame is too long (len %d, expected len %d)",
2003 /* Allocate skb for the complete frame (with header) */
2004 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2007 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2008 conn->rx_len = len - skb->len;
2010 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2012 if (!conn->rx_len) {
2013 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2017 if (skb->len > conn->rx_len) {
2018 BT_ERR("Fragment is too long (len %d, expected %d)",
2019 skb->len, conn->rx_len);
2020 kfree_skb(conn->rx_skb);
2021 conn->rx_skb = NULL;
2026 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2027 conn->rx_len -= skb->len;
2029 if (!conn->rx_len) {
2030 /* Complete frame received */
2031 l2cap_recv_frame(conn, conn->rx_skb);
2032 conn->rx_skb = NULL;
2041 /* ---- Proc fs support ---- */
2042 #ifdef CONFIG_PROC_FS
2043 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2046 struct hlist_node *node;
2049 read_lock_bh(&l2cap_sk_list.lock);
2051 sk_for_each(sk, node, &l2cap_sk_list.head)
2059 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2065 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2067 read_unlock_bh(&l2cap_sk_list.lock);
2070 static int l2cap_seq_show(struct seq_file *seq, void *e)
2072 struct sock *sk = e;
2073 struct l2cap_pinfo *pi = l2cap_pi(sk);
2075 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2076 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2077 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2078 pi->omtu, pi->link_mode);
2082 static struct seq_operations l2cap_seq_ops = {
2083 .start = l2cap_seq_start,
2084 .next = l2cap_seq_next,
2085 .stop = l2cap_seq_stop,
2086 .show = l2cap_seq_show
2089 static int l2cap_seq_open(struct inode *inode, struct file *file)
2091 return seq_open(file, &l2cap_seq_ops);
2094 static struct file_operations l2cap_seq_fops = {
2095 .owner = THIS_MODULE,
2096 .open = l2cap_seq_open,
2098 .llseek = seq_lseek,
2099 .release = seq_release,
2102 static int __init l2cap_proc_init(void)
2104 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2107 p->owner = THIS_MODULE;
2108 p->proc_fops = &l2cap_seq_fops;
2112 static void __exit l2cap_proc_cleanup(void)
2114 remove_proc_entry("l2cap", proc_bt);
2117 #else /* CONFIG_PROC_FS */
2119 static int __init l2cap_proc_init(void)
2124 static void __exit l2cap_proc_cleanup(void)
2128 #endif /* CONFIG_PROC_FS */
2130 static struct proto_ops l2cap_sock_ops = {
2131 .family = PF_BLUETOOTH,
2132 .owner = THIS_MODULE,
2133 .release = l2cap_sock_release,
2134 .bind = l2cap_sock_bind,
2135 .connect = l2cap_sock_connect,
2136 .listen = l2cap_sock_listen,
2137 .accept = l2cap_sock_accept,
2138 .getname = l2cap_sock_getname,
2139 .sendmsg = l2cap_sock_sendmsg,
2140 .recvmsg = bt_sock_recvmsg,
2141 .poll = bt_sock_poll,
2142 .mmap = sock_no_mmap,
2143 .socketpair = sock_no_socketpair,
2144 .ioctl = sock_no_ioctl,
2145 .shutdown = l2cap_sock_shutdown,
2146 .setsockopt = l2cap_sock_setsockopt,
2147 .getsockopt = l2cap_sock_getsockopt
2150 static struct net_proto_family l2cap_sock_family_ops = {
2151 .family = PF_BLUETOOTH,
2152 .owner = THIS_MODULE,
2153 .create = l2cap_sock_create,
2156 static struct hci_proto l2cap_hci_proto = {
2158 .id = HCI_PROTO_L2CAP,
2159 .connect_ind = l2cap_connect_ind,
2160 .connect_cfm = l2cap_connect_cfm,
2161 .disconn_ind = l2cap_disconn_ind,
2162 .auth_cfm = l2cap_auth_cfm,
2163 .encrypt_cfm = l2cap_encrypt_cfm,
2164 .recv_acldata = l2cap_recv_acldata
2167 static int __init l2cap_init(void)
2171 if ((err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
2172 BT_ERR("L2CAP socket registration failed");
2176 if ((err = hci_register_proto(&l2cap_hci_proto))) {
2177 BT_ERR("L2CAP protocol registration failed");
2183 BT_INFO("L2CAP ver %s", VERSION);
2184 BT_INFO("L2CAP socket layer initialized");
2189 static void __exit l2cap_exit(void)
2191 l2cap_proc_cleanup();
2193 /* Unregister socket and protocol */
2194 if (bt_sock_unregister(BTPROTO_L2CAP))
2195 BT_ERR("L2CAP socket unregistration failed");
2197 if (hci_unregister_proto(&l2cap_hci_proto))
2198 BT_ERR("L2CAP protocol unregistration failed");
2201 void l2cap_load(void)
2203 /* Dummy function to trigger automatic L2CAP module loading by
2204 * other modules that use L2CAP sockets but don not use any othe
2205 * symbols from it. */
2208 EXPORT_SYMBOL(l2cap_load);
2210 module_init(l2cap_init);
2211 module_exit(l2cap_exit);
2213 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
2214 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2215 MODULE_VERSION(VERSION);
2216 MODULE_LICENSE("GPL");
2217 MODULE_ALIAS("bt-proto-0");