2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/list.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 #include <net/bluetooth/l2cap.h>
55 #ifndef CONFIG_BT_L2CAP_DEBUG
62 static struct proto_ops l2cap_sock_ops;
64 struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static int l2cap_conn_del(struct hci_conn *conn, int err);
70 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
71 static void l2cap_chan_del(struct sock *sk, int err);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data);
78 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
85 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 __l2cap_sock_close(sk, ETIMEDOUT);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
99 if (!mod_timer(&sk->sk_timer, jiffies + timeout))
103 static void l2cap_sock_clear_timer(struct sock *sk)
105 BT_DBG("sock %p state %d", sk, sk->sk_state);
107 if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
111 static void l2cap_sock_init_timer(struct sock *sk)
113 init_timer(&sk->sk_timer);
114 sk->sk_timer.function = l2cap_sock_timeout;
115 sk->sk_timer.data = (unsigned long)sk;
118 /* ---- L2CAP connections ---- */
119 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
121 struct l2cap_conn *conn;
123 if ((conn = hcon->l2cap_data))
129 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
131 memset(conn, 0, sizeof(struct l2cap_conn));
133 hcon->l2cap_data = conn;
136 conn->mtu = hcon->hdev->acl_mtu;
137 conn->src = &hcon->hdev->bdaddr;
138 conn->dst = &hcon->dst;
140 spin_lock_init(&conn->lock);
141 conn->chan_list.lock = RW_LOCK_UNLOCKED;
143 BT_DBG("hcon %p conn %p", hcon, conn);
147 static int l2cap_conn_del(struct hci_conn *hcon, int err)
149 struct l2cap_conn *conn;
152 if (!(conn = hcon->l2cap_data))
155 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
158 kfree_skb(conn->rx_skb);
161 while ((sk = conn->chan_list.head)) {
163 l2cap_chan_del(sk, err);
168 hcon->l2cap_data = NULL;
173 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
175 struct l2cap_chan_list *l = &conn->chan_list;
176 write_lock(&l->lock);
177 __l2cap_chan_add(conn, sk, parent);
178 write_unlock(&l->lock);
181 /* ---- Socket interface ---- */
182 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
185 struct hlist_node *node;
186 sk_for_each(sk, node, &l2cap_sk_list.head)
187 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
194 /* Find socket with psm and source bdaddr.
195 * Returns closest match.
197 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
199 struct sock *sk = NULL, *sk1 = NULL;
200 struct hlist_node *node;
202 sk_for_each(sk, node, &l2cap_sk_list.head) {
203 if (state && sk->sk_state != state)
206 if (l2cap_pi(sk)->psm == psm) {
208 if (!bacmp(&bt_sk(sk)->src, src))
212 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
216 return node ? sk : sk1;
219 /* Find socket with given address (psm, src).
220 * Returns locked socket */
221 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
224 read_lock(&l2cap_sk_list.lock);
225 s = __l2cap_get_sock_by_psm(state, psm, src);
226 if (s) bh_lock_sock(s);
227 read_unlock(&l2cap_sk_list.lock);
231 static void l2cap_sock_destruct(struct sock *sk)
235 skb_queue_purge(&sk->sk_receive_queue);
236 skb_queue_purge(&sk->sk_write_queue);
239 kfree(sk->sk_protinfo);
242 static void l2cap_sock_cleanup_listen(struct sock *parent)
246 BT_DBG("parent %p", parent);
248 /* Close not yet accepted channels */
249 while ((sk = bt_accept_dequeue(parent, NULL)))
250 l2cap_sock_close(sk);
252 parent->sk_state = BT_CLOSED;
253 parent->sk_zapped = 1;
256 /* Kill socket (only if zapped and orphan)
257 * Must be called on unlocked socket.
259 static void l2cap_sock_kill(struct sock *sk)
261 if (!sk->sk_zapped || sk->sk_socket)
264 BT_DBG("sk %p state %d", sk, sk->sk_state);
266 /* Kill poor orphan */
267 bt_sock_unlink(&l2cap_sk_list, sk);
268 sock_set_flag(sk, SOCK_DEAD);
272 static void __l2cap_sock_close(struct sock *sk, int reason)
274 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
276 switch (sk->sk_state) {
278 l2cap_sock_cleanup_listen(sk);
284 if (sk->sk_type == SOCK_SEQPACKET) {
285 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
286 struct l2cap_disconn_req req;
288 sk->sk_state = BT_DISCONN;
289 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
291 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
292 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
293 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
295 l2cap_chan_del(sk, reason);
301 l2cap_chan_del(sk, reason);
310 /* Must be called on unlocked socket. */
311 static void l2cap_sock_close(struct sock *sk)
313 l2cap_sock_clear_timer(sk);
315 __l2cap_sock_close(sk, ECONNRESET);
320 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
322 struct l2cap_pinfo *pi = l2cap_pi(sk);
327 sk->sk_type = parent->sk_type;
328 pi->imtu = l2cap_pi(parent)->imtu;
329 pi->omtu = l2cap_pi(parent)->omtu;
330 pi->link_mode = l2cap_pi(parent)->link_mode;
332 pi->imtu = L2CAP_DEFAULT_MTU;
337 /* Default config options */
338 pi->conf_mtu = L2CAP_DEFAULT_MTU;
339 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
342 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
346 sk = bt_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
350 sk_set_owner(sk, THIS_MODULE);
352 sk->sk_destruct = l2cap_sock_destruct;
353 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
355 sk->sk_protocol = proto;
356 sk->sk_state = BT_OPEN;
358 l2cap_sock_init_timer(sk);
360 bt_sock_link(&l2cap_sk_list, sk);
364 static int l2cap_sock_create(struct socket *sock, int protocol)
368 BT_DBG("sock %p", sock);
370 sock->state = SS_UNCONNECTED;
372 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
373 return -ESOCKTNOSUPPORT;
375 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
378 sock->ops = &l2cap_sock_ops;
380 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
384 l2cap_sock_init(sk, NULL);
388 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
390 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
391 struct sock *sk = sock->sk;
394 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
396 if (!addr || addr->sa_family != AF_BLUETOOTH)
401 if (sk->sk_state != BT_OPEN) {
406 write_lock_bh(&l2cap_sk_list.lock);
408 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
411 /* Save source address */
412 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
413 l2cap_pi(sk)->psm = la->l2_psm;
414 l2cap_pi(sk)->sport = la->l2_psm;
415 sk->sk_state = BT_BOUND;
418 write_unlock_bh(&l2cap_sk_list.lock);
425 static int l2cap_do_connect(struct sock *sk)
427 bdaddr_t *src = &bt_sk(sk)->src;
428 bdaddr_t *dst = &bt_sk(sk)->dst;
429 struct l2cap_conn *conn;
430 struct hci_conn *hcon;
431 struct hci_dev *hdev;
434 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
436 if (!(hdev = hci_get_route(dst, src)))
437 return -EHOSTUNREACH;
439 hci_dev_lock_bh(hdev);
443 hcon = hci_connect(hdev, ACL_LINK, dst);
447 conn = l2cap_conn_add(hcon, 0);
455 /* Update source addr of the socket */
456 bacpy(src, conn->src);
458 l2cap_chan_add(conn, sk, NULL);
460 sk->sk_state = BT_CONNECT;
461 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
463 if (hcon->state == BT_CONNECTED) {
464 if (sk->sk_type == SOCK_SEQPACKET) {
465 struct l2cap_conn_req req;
466 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
467 req.psm = l2cap_pi(sk)->psm;
468 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
470 l2cap_sock_clear_timer(sk);
471 sk->sk_state = BT_CONNECTED;
476 hci_dev_unlock_bh(hdev);
481 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
483 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
484 struct sock *sk = sock->sk;
491 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
496 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
501 switch(sk->sk_state) {
505 /* Already connecting */
509 /* Already connected */
522 /* Set destination address and psm */
523 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
524 l2cap_pi(sk)->psm = la->l2_psm;
526 if ((err = l2cap_do_connect(sk)))
530 err = bt_sock_wait_state(sk, BT_CONNECTED,
531 sock_sndtimeo(sk, flags & O_NONBLOCK));
537 static int l2cap_sock_listen(struct socket *sock, int backlog)
539 struct sock *sk = sock->sk;
542 BT_DBG("sk %p backlog %d", sk, backlog);
546 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
551 if (!l2cap_pi(sk)->psm) {
552 bdaddr_t *src = &bt_sk(sk)->src;
557 write_lock_bh(&l2cap_sk_list.lock);
559 for (psm = 0x1001; psm < 0x1100; psm += 2)
560 if (!__l2cap_get_sock_by_addr(psm, src)) {
561 l2cap_pi(sk)->psm = htobs(psm);
562 l2cap_pi(sk)->sport = htobs(psm);
567 write_unlock_bh(&l2cap_sk_list.lock);
573 sk->sk_max_ack_backlog = backlog;
574 sk->sk_ack_backlog = 0;
575 sk->sk_state = BT_LISTEN;
582 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
584 DECLARE_WAITQUEUE(wait, current);
585 struct sock *sk = sock->sk, *nsk;
591 if (sk->sk_state != BT_LISTEN) {
596 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
598 BT_DBG("sk %p timeo %ld", sk, timeo);
600 /* Wait for an incoming connection. (wake-one). */
601 add_wait_queue_exclusive(sk->sk_sleep, &wait);
602 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
603 set_current_state(TASK_INTERRUPTIBLE);
610 timeo = schedule_timeout(timeo);
613 if (sk->sk_state != BT_LISTEN) {
618 if (signal_pending(current)) {
619 err = sock_intr_errno(timeo);
623 set_current_state(TASK_RUNNING);
624 remove_wait_queue(sk->sk_sleep, &wait);
629 newsock->state = SS_CONNECTED;
631 BT_DBG("new socket %p", nsk);
638 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
640 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
641 struct sock *sk = sock->sk;
643 BT_DBG("sock %p, sk %p", sock, sk);
645 addr->sa_family = AF_BLUETOOTH;
646 *len = sizeof(struct sockaddr_l2);
649 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
651 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
653 la->l2_psm = l2cap_pi(sk)->psm;
657 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
659 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
660 struct sk_buff *skb, **frag;
661 int err, hlen, count, sent=0;
662 struct l2cap_hdr *lh;
664 BT_DBG("sk %p len %d", sk, len);
666 /* First fragment (with L2CAP header) */
667 if (sk->sk_type == SOCK_DGRAM)
668 hlen = L2CAP_HDR_SIZE + 2;
670 hlen = L2CAP_HDR_SIZE;
672 count = min_t(unsigned int, (conn->mtu - hlen), len);
674 skb = bt_skb_send_alloc(sk, hlen + count,
675 msg->msg_flags & MSG_DONTWAIT, &err);
679 /* Create L2CAP header */
680 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
681 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
682 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
684 if (sk->sk_type == SOCK_DGRAM)
685 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
687 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
695 /* Continuation fragments (no L2CAP header) */
696 frag = &skb_shinfo(skb)->frag_list;
698 count = min_t(unsigned int, conn->mtu, len);
700 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
704 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
712 frag = &(*frag)->next;
715 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
725 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
726 struct msghdr *msg, size_t len)
728 struct sock *sk = sock->sk;
731 BT_DBG("sock %p, sk %p", sock, sk);
734 return sock_error(sk);
736 if (msg->msg_flags & MSG_OOB)
739 /* Check outgoing MTU */
740 if (len > l2cap_pi(sk)->omtu)
745 if (sk->sk_state == BT_CONNECTED)
746 err = l2cap_do_send(sk, msg, len);
754 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
756 struct sock *sk = sock->sk;
757 struct l2cap_options opts;
767 len = min_t(unsigned int, sizeof(opts), optlen);
768 if (copy_from_user((char *)&opts, optval, len)) {
772 l2cap_pi(sk)->imtu = opts.imtu;
773 l2cap_pi(sk)->omtu = opts.omtu;
777 if (get_user(opt, (u32 __user *)optval)) {
782 l2cap_pi(sk)->link_mode = opt;
794 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
796 struct sock *sk = sock->sk;
797 struct l2cap_options opts;
798 struct l2cap_conninfo cinfo;
801 if (get_user(len, optlen))
808 opts.imtu = l2cap_pi(sk)->imtu;
809 opts.omtu = l2cap_pi(sk)->omtu;
810 opts.flush_to = l2cap_pi(sk)->flush_to;
812 len = min_t(unsigned int, len, sizeof(opts));
813 if (copy_to_user(optval, (char *)&opts, len))
819 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *)optval))
824 if (sk->sk_state != BT_CONNECTED) {
829 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
831 len = min_t(unsigned int, len, sizeof(cinfo));
832 if (copy_to_user(optval, (char *)&cinfo, len))
846 static int l2cap_sock_shutdown(struct socket *sock, int how)
848 struct sock *sk = sock->sk;
851 BT_DBG("sock %p, sk %p", sock, sk);
857 if (!sk->sk_shutdown) {
858 sk->sk_shutdown = SHUTDOWN_MASK;
859 l2cap_sock_clear_timer(sk);
860 __l2cap_sock_close(sk, 0);
862 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
863 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
869 static int l2cap_sock_release(struct socket *sock)
871 struct sock *sk = sock->sk;
874 BT_DBG("sock %p, sk %p", sock, sk);
879 err = l2cap_sock_shutdown(sock, 2);
886 /* ---- L2CAP channels ---- */
887 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
890 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
891 if (l2cap_pi(s)->dcid == cid)
897 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
900 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
901 if (l2cap_pi(s)->scid == cid)
907 /* Find channel with given SCID.
908 * Returns locked socket */
909 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
913 s = __l2cap_get_chan_by_scid(l, cid);
914 if (s) bh_lock_sock(s);
915 read_unlock(&l->lock);
919 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
923 for (; cid < 0xffff; cid++) {
924 if(!__l2cap_get_chan_by_scid(l, cid))
931 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
936 l2cap_pi(l->head)->prev_c = sk;
938 l2cap_pi(sk)->next_c = l->head;
939 l2cap_pi(sk)->prev_c = NULL;
943 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
945 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
947 write_lock(&l->lock);
952 l2cap_pi(next)->prev_c = prev;
954 l2cap_pi(prev)->next_c = next;
955 write_unlock(&l->lock);
960 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
962 struct l2cap_chan_list *l = &conn->chan_list;
964 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
966 l2cap_pi(sk)->conn = conn;
968 if (sk->sk_type == SOCK_SEQPACKET) {
969 /* Alloc CID for connection-oriented socket */
970 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
971 } else if (sk->sk_type == SOCK_DGRAM) {
972 /* Connectionless socket */
973 l2cap_pi(sk)->scid = 0x0002;
974 l2cap_pi(sk)->dcid = 0x0002;
975 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
977 /* Raw socket can send/recv signalling messages only */
978 l2cap_pi(sk)->scid = 0x0001;
979 l2cap_pi(sk)->dcid = 0x0001;
980 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
983 __l2cap_chan_link(l, sk);
986 bt_accept_enqueue(parent, sk);
990 * Must be called on the locked socket. */
991 static void l2cap_chan_del(struct sock *sk, int err)
993 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
994 struct sock *parent = bt_sk(sk)->parent;
996 l2cap_sock_clear_timer(sk);
998 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
1001 /* Unlink from channel list */
1002 l2cap_chan_unlink(&conn->chan_list, sk);
1003 l2cap_pi(sk)->conn = NULL;
1004 hci_conn_put(conn->hcon);
1007 sk->sk_state = BT_CLOSED;
1014 parent->sk_data_ready(parent, 0);
1016 sk->sk_state_change(sk);
1019 static void l2cap_conn_ready(struct l2cap_conn *conn)
1021 struct l2cap_chan_list *l = &conn->chan_list;
1024 BT_DBG("conn %p", conn);
1026 read_lock(&l->lock);
1028 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1031 if (sk->sk_type != SOCK_SEQPACKET) {
1032 l2cap_sock_clear_timer(sk);
1033 sk->sk_state = BT_CONNECTED;
1034 sk->sk_state_change(sk);
1035 } else if (sk->sk_state == BT_CONNECT) {
1036 struct l2cap_conn_req req;
1037 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1038 req.psm = l2cap_pi(sk)->psm;
1039 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
1045 read_unlock(&l->lock);
1048 static void l2cap_chan_ready(struct sock *sk)
1050 struct sock *parent = bt_sk(sk)->parent;
1052 BT_DBG("sk %p, parent %p", sk, parent);
1054 l2cap_pi(sk)->conf_state = 0;
1055 l2cap_sock_clear_timer(sk);
1058 /* Outgoing channel.
1059 * Wake up socket sleeping on connect.
1061 sk->sk_state = BT_CONNECTED;
1062 sk->sk_state_change(sk);
1064 /* Incoming channel.
1065 * Wake up socket sleeping on accept.
1067 parent->sk_data_ready(parent, 0);
1071 /* Copy frame to all raw sockets on that connection */
1072 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1074 struct l2cap_chan_list *l = &conn->chan_list;
1075 struct sk_buff *nskb;
1078 BT_DBG("conn %p", conn);
1080 read_lock(&l->lock);
1081 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1082 if (sk->sk_type != SOCK_RAW)
1085 /* Don't send frame to the socket it came from */
1089 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1092 if (sock_queue_rcv_skb(sk, nskb))
1095 read_unlock(&l->lock);
1098 /* ---- L2CAP signalling commands ---- */
1099 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
1103 /* Get next available identificator.
1104 * 1 - 199 are used by kernel.
1105 * 200 - 254 are used by utilities like l2ping, etc
1108 spin_lock(&conn->lock);
1110 if (++conn->tx_ident > 199)
1113 id = conn->tx_ident;
1115 spin_unlock(&conn->lock);
1120 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1121 u8 code, u8 ident, u16 dlen, void *data)
1123 struct sk_buff *skb, **frag;
1124 struct l2cap_cmd_hdr *cmd;
1125 struct l2cap_hdr *lh;
1128 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1130 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1131 count = min_t(unsigned int, conn->mtu, len);
1133 skb = bt_skb_alloc(count, GFP_ATOMIC);
1137 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1138 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1139 lh->cid = __cpu_to_le16(0x0001);
1141 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1144 cmd->len = __cpu_to_le16(dlen);
1147 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1148 memcpy(skb_put(skb, count), data, count);
1154 /* Continuation fragments (no L2CAP header) */
1155 frag = &skb_shinfo(skb)->frag_list;
1157 count = min_t(unsigned int, conn->mtu, len);
1159 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1163 memcpy(skb_put(*frag, count), data, count);
1168 frag = &(*frag)->next;
1178 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data)
1180 u8 ident = l2cap_get_ident(conn);
1181 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1183 BT_DBG("code 0x%2.2x", code);
1187 return hci_send_acl(conn->hcon, skb, 0);
1190 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
1192 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1194 BT_DBG("code 0x%2.2x", code);
1198 return hci_send_acl(conn->hcon, skb, 0);
1201 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1203 struct l2cap_conf_opt *opt = *ptr;
1206 len = L2CAP_CONF_OPT_SIZE + opt->len;
1214 *val = *((u8 *) opt->val);
1218 *val = __le16_to_cpu(*((u16 *)opt->val));
1222 *val = __le32_to_cpu(*((u32 *)opt->val));
1226 *val = (unsigned long) opt->val;
1230 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1234 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1236 int type, hint, olen;
1240 BT_DBG("sk %p len %d", sk, len);
1242 while (len >= L2CAP_CONF_OPT_SIZE) {
1243 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1249 case L2CAP_CONF_MTU:
1250 l2cap_pi(sk)->conf_mtu = val;
1253 case L2CAP_CONF_FLUSH_TO:
1254 l2cap_pi(sk)->flush_to = val;
1257 case L2CAP_CONF_QOS:
1264 /* FIXME: Reject unknown option */
1270 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1272 struct l2cap_conf_opt *opt = *ptr;
1274 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1281 *((u8 *) opt->val) = val;
1285 *((u16 *) opt->val) = __cpu_to_le16(val);
1289 *((u32 *) opt->val) = __cpu_to_le32(val);
1293 memcpy(opt->val, (void *) val, len);
1297 *ptr += L2CAP_CONF_OPT_SIZE + len;
1300 static int l2cap_build_conf_req(struct sock *sk, void *data)
1302 struct l2cap_pinfo *pi = l2cap_pi(sk);
1303 struct l2cap_conf_req *req = data;
1304 void *ptr = req->data;
1306 BT_DBG("sk %p", sk);
1308 if (pi->imtu != L2CAP_DEFAULT_MTU)
1309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1311 /* FIXME. Need actual value of the flush timeout */
1312 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1313 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1315 req->dcid = __cpu_to_le16(pi->dcid);
1316 req->flags = __cpu_to_le16(0);
1321 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1323 struct l2cap_pinfo *pi = l2cap_pi(sk);
1326 /* Configure output options and let the other side know
1327 * which ones we don't like. */
1328 if (pi->conf_mtu < pi->omtu) {
1329 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1330 result = L2CAP_CONF_UNACCEPT;
1332 pi->omtu = pi->conf_mtu;
1335 BT_DBG("sk %p result %d", sk, result);
1339 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1341 struct l2cap_conf_rsp *rsp = data;
1342 void *ptr = rsp->data;
1345 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1348 *result = l2cap_conf_output(sk, &ptr);
1352 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1353 rsp->result = __cpu_to_le16(result ? *result : 0);
1354 rsp->flags = __cpu_to_le16(flags);
1359 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1361 struct l2cap_chan_list *list = &conn->chan_list;
1362 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1363 struct l2cap_conn_rsp rsp;
1364 struct sock *sk, *parent;
1365 int result = 0, status = 0;
1367 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1370 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1372 /* Check if we have socket listening on psm */
1373 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1375 result = L2CAP_CR_BAD_PSM;
1379 result = L2CAP_CR_NO_MEM;
1381 /* Check for backlog size */
1382 if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
1383 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1387 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1391 write_lock(&list->lock);
1393 /* Check if we already have channel with that dcid */
1394 if (__l2cap_get_chan_by_dcid(list, scid)) {
1395 write_unlock(&list->lock);
1397 l2cap_sock_kill(sk);
1401 hci_conn_hold(conn->hcon);
1403 l2cap_sock_init(sk, parent);
1404 bacpy(&bt_sk(sk)->src, conn->src);
1405 bacpy(&bt_sk(sk)->dst, conn->dst);
1406 l2cap_pi(sk)->psm = psm;
1407 l2cap_pi(sk)->dcid = scid;
1409 __l2cap_chan_add(conn, sk, parent);
1410 dcid = l2cap_pi(sk)->scid;
1412 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1414 /* Service level security */
1415 result = L2CAP_CR_PEND;
1416 status = L2CAP_CS_AUTHEN_PEND;
1417 sk->sk_state = BT_CONNECT2;
1418 l2cap_pi(sk)->ident = cmd->ident;
1420 if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
1421 if (!hci_conn_encrypt(conn->hcon))
1423 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1424 if (!hci_conn_auth(conn->hcon))
1428 sk->sk_state = BT_CONFIG;
1429 result = status = 0;
1432 write_unlock(&list->lock);
1435 bh_unlock_sock(parent);
1438 rsp.scid = __cpu_to_le16(scid);
1439 rsp.dcid = __cpu_to_le16(dcid);
1440 rsp.result = __cpu_to_le16(result);
1441 rsp.status = __cpu_to_le16(status);
1442 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1446 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1448 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1449 u16 scid, dcid, result, status;
1453 scid = __le16_to_cpu(rsp->scid);
1454 dcid = __le16_to_cpu(rsp->dcid);
1455 result = __le16_to_cpu(rsp->result);
1456 status = __le16_to_cpu(rsp->status);
1458 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1460 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1464 case L2CAP_CR_SUCCESS:
1465 sk->sk_state = BT_CONFIG;
1466 l2cap_pi(sk)->dcid = dcid;
1467 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1469 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1476 l2cap_chan_del(sk, ECONNREFUSED);
1484 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1486 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1492 dcid = __le16_to_cpu(req->dcid);
1493 flags = __le16_to_cpu(req->flags);
1495 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1497 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1500 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1502 if (flags & 0x0001) {
1503 /* Incomplete config. Send empty response. */
1504 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1508 /* Complete config. */
1509 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1514 /* Output config done */
1515 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1517 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1518 sk->sk_state = BT_CONNECTED;
1519 l2cap_chan_ready(sk);
1520 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1522 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1530 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1532 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1533 u16 scid, flags, result;
1537 scid = __le16_to_cpu(rsp->scid);
1538 flags = __le16_to_cpu(rsp->flags);
1539 result = __le16_to_cpu(rsp->result);
1541 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1543 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1547 case L2CAP_CONF_SUCCESS:
1550 case L2CAP_CONF_UNACCEPT:
1551 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1553 /* It does not make sense to adjust L2CAP parameters
1554 * that are currently defined in the spec. We simply
1555 * resend config request that we sent earlier. It is
1556 * stupid, but it helps qualification testing which
1557 * expects at least some response from us. */
1558 l2cap_send_req(conn, L2CAP_CONF_REQ,
1559 l2cap_build_conf_req(sk, req), req);
1564 sk->sk_state = BT_DISCONN;
1565 sk->sk_err = ECONNRESET;
1566 l2cap_sock_set_timer(sk, HZ * 5);
1568 struct l2cap_disconn_req req;
1569 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1570 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1571 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
1579 /* Input config done */
1580 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1583 sk->sk_state = BT_CONNECTED;
1584 l2cap_chan_ready(sk);
1592 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1594 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1595 struct l2cap_disconn_rsp rsp;
1599 scid = __le16_to_cpu(req->scid);
1600 dcid = __le16_to_cpu(req->dcid);
1602 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1604 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1607 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1608 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1609 l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1611 sk->sk_shutdown = SHUTDOWN_MASK;
1613 l2cap_chan_del(sk, ECONNRESET);
1616 l2cap_sock_kill(sk);
1620 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1622 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1626 scid = __le16_to_cpu(rsp->scid);
1627 dcid = __le16_to_cpu(rsp->dcid);
1629 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1631 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1633 l2cap_chan_del(sk, 0);
1636 l2cap_sock_kill(sk);
1640 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1642 u8 *data = skb->data;
1644 struct l2cap_cmd_hdr cmd;
1647 l2cap_raw_recv(conn, skb);
1649 while (len >= L2CAP_CMD_HDR_SIZE) {
1650 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1651 data += L2CAP_CMD_HDR_SIZE;
1652 len -= L2CAP_CMD_HDR_SIZE;
1654 cmd.len = __le16_to_cpu(cmd.len);
1656 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1658 if (cmd.len > len || !cmd.ident) {
1659 BT_DBG("corrupted command");
1664 case L2CAP_CONN_REQ:
1665 err = l2cap_connect_req(conn, &cmd, data);
1668 case L2CAP_CONN_RSP:
1669 err = l2cap_connect_rsp(conn, &cmd, data);
1672 case L2CAP_CONF_REQ:
1673 err = l2cap_config_req(conn, &cmd, data);
1676 case L2CAP_CONF_RSP:
1677 err = l2cap_config_rsp(conn, &cmd, data);
1680 case L2CAP_DISCONN_REQ:
1681 err = l2cap_disconnect_req(conn, &cmd, data);
1684 case L2CAP_DISCONN_RSP:
1685 err = l2cap_disconnect_rsp(conn, &cmd, data);
1688 case L2CAP_COMMAND_REJ:
1689 /* FIXME: We should process this */
1692 case L2CAP_ECHO_REQ:
1693 l2cap_send_rsp(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1696 case L2CAP_ECHO_RSP:
1697 case L2CAP_INFO_REQ:
1698 case L2CAP_INFO_RSP:
1702 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1708 struct l2cap_cmd_rej rej;
1709 BT_DBG("error %d", err);
1711 /* FIXME: Map err to a valid reason. */
1712 rej.reason = __cpu_to_le16(0);
1713 l2cap_send_rsp(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1723 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1727 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1729 BT_DBG("unknown cid 0x%4.4x", cid);
1733 BT_DBG("sk %p, len %d", sk, skb->len);
1735 if (sk->sk_state != BT_CONNECTED)
1738 if (l2cap_pi(sk)->imtu < skb->len)
1741 /* If socket recv buffers overflows we drop data here
1742 * which is *bad* because L2CAP has to be reliable.
1743 * But we don't have any other choice. L2CAP doesn't
1744 * provide flow control mechanism */
1746 if (!sock_queue_rcv_skb(sk, skb))
1753 if (sk) bh_unlock_sock(sk);
1757 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1761 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1765 BT_DBG("sk %p, len %d", sk, skb->len);
1767 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1770 if (l2cap_pi(sk)->imtu < skb->len)
1773 if (!sock_queue_rcv_skb(sk, skb))
1780 if (sk) bh_unlock_sock(sk);
1784 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1786 struct l2cap_hdr *lh = (void *) skb->data;
1789 skb_pull(skb, L2CAP_HDR_SIZE);
1790 cid = __le16_to_cpu(lh->cid);
1791 len = __le16_to_cpu(lh->len);
1793 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1797 l2cap_sig_channel(conn, skb);
1801 psm = get_unaligned((u16 *) skb->data);
1803 l2cap_conless_channel(conn, psm, skb);
1807 l2cap_data_channel(conn, cid, skb);
1812 /* ---- L2CAP interface with lower layer (HCI) ---- */
1814 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1816 int exact = 0, lm1 = 0, lm2 = 0;
1817 register struct sock *sk;
1818 struct hlist_node *node;
1820 if (type != ACL_LINK)
1823 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1825 /* Find listening sockets and check their link_mode */
1826 read_lock(&l2cap_sk_list.lock);
1827 sk_for_each(sk, node, &l2cap_sk_list.head) {
1828 if (sk->sk_state != BT_LISTEN)
1831 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1832 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1834 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1835 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1837 read_unlock(&l2cap_sk_list.lock);
1839 return exact ? lm1 : lm2;
1842 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1844 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1846 if (hcon->type != ACL_LINK)
1850 struct l2cap_conn *conn;
1852 conn = l2cap_conn_add(hcon, status);
1854 l2cap_conn_ready(conn);
1856 l2cap_conn_del(hcon, bt_err(status));
1861 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1863 BT_DBG("hcon %p reason %d", hcon, reason);
1865 if (hcon->type != ACL_LINK)
1868 l2cap_conn_del(hcon, bt_err(reason));
1872 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1874 struct l2cap_chan_list *l;
1875 struct l2cap_conn *conn;
1876 struct l2cap_conn_rsp rsp;
1880 if (!(conn = hcon->l2cap_data))
1882 l = &conn->chan_list;
1884 BT_DBG("conn %p", conn);
1886 read_lock(&l->lock);
1888 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1891 if (sk->sk_state != BT_CONNECT2 ||
1892 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) {
1898 sk->sk_state = BT_CONFIG;
1901 sk->sk_state = BT_DISCONN;
1902 l2cap_sock_set_timer(sk, HZ/10);
1903 result = L2CAP_CR_SEC_BLOCK;
1906 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1907 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1908 rsp.result = __cpu_to_le16(result);
1909 rsp.status = __cpu_to_le16(0);
1910 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1915 read_unlock(&l->lock);
1919 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
1921 struct l2cap_chan_list *l;
1922 struct l2cap_conn *conn;
1923 struct l2cap_conn_rsp rsp;
1927 if (!(conn = hcon->l2cap_data))
1929 l = &conn->chan_list;
1931 BT_DBG("conn %p", conn);
1933 read_lock(&l->lock);
1935 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1938 if (sk->sk_state != BT_CONNECT2) {
1944 sk->sk_state = BT_CONFIG;
1947 sk->sk_state = BT_DISCONN;
1948 l2cap_sock_set_timer(sk, HZ/10);
1949 result = L2CAP_CR_SEC_BLOCK;
1952 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1953 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1954 rsp.result = __cpu_to_le16(result);
1955 rsp.status = __cpu_to_le16(0);
1956 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1961 read_unlock(&l->lock);
1965 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
1967 struct l2cap_conn *conn = hcon->l2cap_data;
1969 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
1972 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
1974 if (flags & ACL_START) {
1975 struct l2cap_hdr *hdr;
1979 BT_ERR("Unexpected start frame (len %d)", skb->len);
1980 kfree_skb(conn->rx_skb);
1981 conn->rx_skb = NULL;
1986 BT_ERR("Frame is too short (len %d)", skb->len);
1990 hdr = (struct l2cap_hdr *) skb->data;
1991 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
1993 if (len == skb->len) {
1994 /* Complete frame received */
1995 l2cap_recv_frame(conn, skb);
1999 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2001 if (skb->len > len) {
2002 BT_ERR("Frame is too long (len %d, expected len %d)",
2007 /* Allocate skb for the complete frame (with header) */
2008 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2011 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2012 conn->rx_len = len - skb->len;
2014 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2016 if (!conn->rx_len) {
2017 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2021 if (skb->len > conn->rx_len) {
2022 BT_ERR("Fragment is too long (len %d, expected %d)",
2023 skb->len, conn->rx_len);
2024 kfree_skb(conn->rx_skb);
2025 conn->rx_skb = NULL;
2030 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2031 conn->rx_len -= skb->len;
2033 if (!conn->rx_len) {
2034 /* Complete frame received */
2035 l2cap_recv_frame(conn, conn->rx_skb);
2036 conn->rx_skb = NULL;
2045 /* ---- Proc fs support ---- */
2046 #ifdef CONFIG_PROC_FS
2047 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2050 struct hlist_node *node;
2053 read_lock_bh(&l2cap_sk_list.lock);
2055 sk_for_each(sk, node, &l2cap_sk_list.head)
2063 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2069 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2071 read_unlock_bh(&l2cap_sk_list.lock);
2074 static int l2cap_seq_show(struct seq_file *seq, void *e)
2076 struct sock *sk = e;
2077 struct l2cap_pinfo *pi = l2cap_pi(sk);
2079 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2080 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2081 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2082 pi->omtu, pi->link_mode);
2086 static struct seq_operations l2cap_seq_ops = {
2087 .start = l2cap_seq_start,
2088 .next = l2cap_seq_next,
2089 .stop = l2cap_seq_stop,
2090 .show = l2cap_seq_show
2093 static int l2cap_seq_open(struct inode *inode, struct file *file)
2095 return seq_open(file, &l2cap_seq_ops);
2098 static struct file_operations l2cap_seq_fops = {
2099 .owner = THIS_MODULE,
2100 .open = l2cap_seq_open,
2102 .llseek = seq_lseek,
2103 .release = seq_release,
2106 static int __init l2cap_proc_init(void)
2108 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2111 p->owner = THIS_MODULE;
2112 p->proc_fops = &l2cap_seq_fops;
2116 static void __exit l2cap_proc_cleanup(void)
2118 remove_proc_entry("l2cap", proc_bt);
2121 #else /* CONFIG_PROC_FS */
2123 static int __init l2cap_proc_init(void)
2128 static void __exit l2cap_proc_cleanup(void)
2132 #endif /* CONFIG_PROC_FS */
2134 static struct proto_ops l2cap_sock_ops = {
2135 .family = PF_BLUETOOTH,
2136 .owner = THIS_MODULE,
2137 .release = l2cap_sock_release,
2138 .bind = l2cap_sock_bind,
2139 .connect = l2cap_sock_connect,
2140 .listen = l2cap_sock_listen,
2141 .accept = l2cap_sock_accept,
2142 .getname = l2cap_sock_getname,
2143 .sendmsg = l2cap_sock_sendmsg,
2144 .recvmsg = bt_sock_recvmsg,
2145 .poll = bt_sock_poll,
2146 .mmap = sock_no_mmap,
2147 .socketpair = sock_no_socketpair,
2148 .ioctl = sock_no_ioctl,
2149 .shutdown = l2cap_sock_shutdown,
2150 .setsockopt = l2cap_sock_setsockopt,
2151 .getsockopt = l2cap_sock_getsockopt
2154 static struct net_proto_family l2cap_sock_family_ops = {
2155 .family = PF_BLUETOOTH,
2156 .owner = THIS_MODULE,
2157 .create = l2cap_sock_create,
2160 static struct hci_proto l2cap_hci_proto = {
2162 .id = HCI_PROTO_L2CAP,
2163 .connect_ind = l2cap_connect_ind,
2164 .connect_cfm = l2cap_connect_cfm,
2165 .disconn_ind = l2cap_disconn_ind,
2166 .auth_cfm = l2cap_auth_cfm,
2167 .encrypt_cfm = l2cap_encrypt_cfm,
2168 .recv_acldata = l2cap_recv_acldata
2171 static int __init l2cap_init(void)
2175 if ((err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
2176 BT_ERR("L2CAP socket registration failed");
2180 if ((err = hci_register_proto(&l2cap_hci_proto))) {
2181 BT_ERR("L2CAP protocol registration failed");
2187 BT_INFO("L2CAP ver %s", VERSION);
2188 BT_INFO("L2CAP socket layer initialized");
2193 static void __exit l2cap_exit(void)
2195 l2cap_proc_cleanup();
2197 /* Unregister socket and protocol */
2198 if (bt_sock_unregister(BTPROTO_L2CAP))
2199 BT_ERR("L2CAP socket unregistration failed");
2201 if (hci_unregister_proto(&l2cap_hci_proto))
2202 BT_ERR("L2CAP protocol unregistration failed");
2205 void l2cap_load(void)
2207 /* Dummy function to trigger automatic L2CAP module loading by
2208 * other modules that use L2CAP sockets but don not use any othe
2209 * symbols from it. */
2212 EXPORT_SYMBOL(l2cap_load);
2214 module_init(l2cap_init);
2215 module_exit(l2cap_exit);
2217 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
2218 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2219 MODULE_VERSION(VERSION);
2220 MODULE_LICENSE("GPL");
2221 MODULE_ALIAS("bt-proto-0");