2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
26 * Bluetooth L2CAP core and sockets.
28 * $Id: l2cap.c,v 1.15 2002/09/09 01:14:52 maxk Exp $
32 #include <linux/config.h>
33 #include <linux/module.h>
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/major.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/interrupt.h>
45 #include <linux/socket.h>
46 #include <linux/skbuff.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/list.h>
52 #include <asm/system.h>
53 #include <asm/uaccess.h>
54 #include <asm/unaligned.h>
56 #include <net/bluetooth/bluetooth.h>
57 #include <net/bluetooth/hci_core.h>
58 #include <net/bluetooth/l2cap.h>
60 #ifndef CONFIG_BT_L2CAP_DEBUG
62 #define BT_DBG( A... )
65 static struct proto_ops l2cap_sock_ops;
67 struct bt_sock_list l2cap_sk_list = {
68 .lock = RW_LOCK_UNLOCKED
71 static int l2cap_conn_del(struct hci_conn *conn, int err);
73 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
74 static void l2cap_chan_del(struct sock *sk, int err);
76 static void __l2cap_sock_close(struct sock *sk, int reason);
77 static void l2cap_sock_close(struct sock *sk);
78 static void l2cap_sock_kill(struct sock *sk);
80 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data);
81 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
83 /* ---- L2CAP timers ---- */
84 static void l2cap_sock_timeout(unsigned long arg)
86 struct sock *sk = (struct sock *) arg;
88 BT_DBG("sock %p state %d", sk, sk->sk_state);
91 __l2cap_sock_close(sk, ETIMEDOUT);
98 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
100 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
102 if (!mod_timer(&sk->sk_timer, jiffies + timeout))
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
110 if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
114 static void l2cap_sock_init_timer(struct sock *sk)
116 init_timer(&sk->sk_timer);
117 sk->sk_timer.function = l2cap_sock_timeout;
118 sk->sk_timer.data = (unsigned long)sk;
121 /* ---- L2CAP connections ---- */
122 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
124 struct l2cap_conn *conn;
126 if ((conn = hcon->l2cap_data))
132 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
134 memset(conn, 0, sizeof(struct l2cap_conn));
136 hcon->l2cap_data = conn;
139 conn->mtu = hcon->hdev->acl_mtu;
140 conn->src = &hcon->hdev->bdaddr;
141 conn->dst = &hcon->dst;
143 spin_lock_init(&conn->lock);
144 conn->chan_list.lock = RW_LOCK_UNLOCKED;
146 BT_DBG("hcon %p conn %p", hcon, conn);
150 static int l2cap_conn_del(struct hci_conn *hcon, int err)
152 struct l2cap_conn *conn;
155 if (!(conn = hcon->l2cap_data))
158 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
161 kfree_skb(conn->rx_skb);
164 while ((sk = conn->chan_list.head)) {
166 l2cap_chan_del(sk, err);
171 hcon->l2cap_data = NULL;
176 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
178 struct l2cap_chan_list *l = &conn->chan_list;
179 write_lock(&l->lock);
180 __l2cap_chan_add(conn, sk, parent);
181 write_unlock(&l->lock);
184 /* ---- Socket interface ---- */
185 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
188 struct hlist_node *node;
189 sk_for_each(sk, node, &l2cap_sk_list.head)
190 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
197 /* Find socket with psm and source bdaddr.
198 * Returns closest match.
200 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
202 struct sock *sk = NULL, *sk1 = NULL;
203 struct hlist_node *node;
205 sk_for_each(sk, node, &l2cap_sk_list.head) {
206 if (state && sk->sk_state != state)
209 if (l2cap_pi(sk)->psm == psm) {
211 if (!bacmp(&bt_sk(sk)->src, src))
215 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
219 return node ? sk : sk1;
222 /* Find socket with given address (psm, src).
223 * Returns locked socket */
224 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
227 read_lock(&l2cap_sk_list.lock);
228 s = __l2cap_get_sock_by_psm(state, psm, src);
229 if (s) bh_lock_sock(s);
230 read_unlock(&l2cap_sk_list.lock);
234 static void l2cap_sock_destruct(struct sock *sk)
238 skb_queue_purge(&sk->sk_receive_queue);
239 skb_queue_purge(&sk->sk_write_queue);
242 kfree(sk->sk_protinfo);
245 static void l2cap_sock_cleanup_listen(struct sock *parent)
249 BT_DBG("parent %p", parent);
251 /* Close not yet accepted channels */
252 while ((sk = bt_accept_dequeue(parent, NULL)))
253 l2cap_sock_close(sk);
255 parent->sk_state = BT_CLOSED;
256 parent->sk_zapped = 1;
259 /* Kill socket (only if zapped and orphan)
260 * Must be called on unlocked socket.
262 static void l2cap_sock_kill(struct sock *sk)
264 if (!sk->sk_zapped || sk->sk_socket)
267 BT_DBG("sk %p state %d", sk, sk->sk_state);
269 /* Kill poor orphan */
270 bt_sock_unlink(&l2cap_sk_list, sk);
271 sock_set_flag(sk, SOCK_DEAD);
275 static void __l2cap_sock_close(struct sock *sk, int reason)
277 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
279 switch (sk->sk_state) {
281 l2cap_sock_cleanup_listen(sk);
287 if (sk->sk_type == SOCK_SEQPACKET) {
288 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
289 struct l2cap_disconn_req req;
291 sk->sk_state = BT_DISCONN;
292 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
294 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
295 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
296 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
298 l2cap_chan_del(sk, reason);
304 l2cap_chan_del(sk, reason);
313 /* Must be called on unlocked socket. */
314 static void l2cap_sock_close(struct sock *sk)
316 l2cap_sock_clear_timer(sk);
318 __l2cap_sock_close(sk, ECONNRESET);
323 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
325 struct l2cap_pinfo *pi = l2cap_pi(sk);
330 sk->sk_type = parent->sk_type;
331 pi->imtu = l2cap_pi(parent)->imtu;
332 pi->omtu = l2cap_pi(parent)->omtu;
333 pi->link_mode = l2cap_pi(parent)->link_mode;
335 pi->imtu = L2CAP_DEFAULT_MTU;
340 /* Default config options */
341 pi->conf_mtu = L2CAP_DEFAULT_MTU;
342 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
345 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
349 sk = bt_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
353 sk_set_owner(sk, THIS_MODULE);
355 sk->sk_destruct = l2cap_sock_destruct;
356 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
358 sk->sk_protocol = proto;
359 sk->sk_state = BT_OPEN;
361 l2cap_sock_init_timer(sk);
363 bt_sock_link(&l2cap_sk_list, sk);
367 static int l2cap_sock_create(struct socket *sock, int protocol)
371 BT_DBG("sock %p", sock);
373 sock->state = SS_UNCONNECTED;
375 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
376 return -ESOCKTNOSUPPORT;
378 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
381 sock->ops = &l2cap_sock_ops;
383 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
387 l2cap_sock_init(sk, NULL);
391 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
393 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
394 struct sock *sk = sock->sk;
397 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
399 if (!addr || addr->sa_family != AF_BLUETOOTH)
404 if (sk->sk_state != BT_OPEN) {
409 write_lock_bh(&l2cap_sk_list.lock);
410 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
413 /* Save source address */
414 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
415 l2cap_pi(sk)->psm = la->l2_psm;
416 l2cap_pi(sk)->sport = la->l2_psm;
417 sk->sk_state = BT_BOUND;
419 write_unlock_bh(&l2cap_sk_list.lock);
426 static int l2cap_do_connect(struct sock *sk)
428 bdaddr_t *src = &bt_sk(sk)->src;
429 bdaddr_t *dst = &bt_sk(sk)->dst;
430 struct l2cap_conn *conn;
431 struct hci_conn *hcon;
432 struct hci_dev *hdev;
435 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
437 if (!(hdev = hci_get_route(dst, src)))
438 return -EHOSTUNREACH;
440 hci_dev_lock_bh(hdev);
444 hcon = hci_connect(hdev, ACL_LINK, dst);
448 conn = l2cap_conn_add(hcon, 0);
456 /* Update source addr of the socket */
457 bacpy(src, conn->src);
459 l2cap_chan_add(conn, sk, NULL);
461 sk->sk_state = BT_CONNECT;
462 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
464 if (hcon->state == BT_CONNECTED) {
465 if (sk->sk_type == SOCK_SEQPACKET) {
466 struct l2cap_conn_req req;
467 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
468 req.psm = l2cap_pi(sk)->psm;
469 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
471 l2cap_sock_clear_timer(sk);
472 sk->sk_state = BT_CONNECTED;
477 hci_dev_unlock_bh(hdev);
482 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
484 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
485 struct sock *sk = sock->sk;
492 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
497 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
502 switch(sk->sk_state) {
506 /* Already connecting */
510 /* Already connected */
523 /* Set destination address and psm */
524 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
525 l2cap_pi(sk)->psm = la->l2_psm;
527 if ((err = l2cap_do_connect(sk)))
531 err = bt_sock_wait_state(sk, BT_CONNECTED,
532 sock_sndtimeo(sk, flags & O_NONBLOCK));
538 static int l2cap_sock_listen(struct socket *sock, int backlog)
540 struct sock *sk = sock->sk;
543 BT_DBG("sk %p backlog %d", sk, backlog);
547 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
552 if (!l2cap_pi(sk)->psm) {
557 sk->sk_max_ack_backlog = backlog;
558 sk->sk_ack_backlog = 0;
559 sk->sk_state = BT_LISTEN;
566 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
568 DECLARE_WAITQUEUE(wait, current);
569 struct sock *sk = sock->sk, *nsk;
575 if (sk->sk_state != BT_LISTEN) {
580 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
582 BT_DBG("sk %p timeo %ld", sk, timeo);
584 /* Wait for an incoming connection. (wake-one). */
585 add_wait_queue_exclusive(sk->sk_sleep, &wait);
586 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
587 set_current_state(TASK_INTERRUPTIBLE);
594 timeo = schedule_timeout(timeo);
597 if (sk->sk_state != BT_LISTEN) {
602 if (signal_pending(current)) {
603 err = sock_intr_errno(timeo);
607 set_current_state(TASK_RUNNING);
608 remove_wait_queue(sk->sk_sleep, &wait);
613 newsock->state = SS_CONNECTED;
615 BT_DBG("new socket %p", nsk);
622 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
624 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
625 struct sock *sk = sock->sk;
627 BT_DBG("sock %p, sk %p", sock, sk);
629 addr->sa_family = AF_BLUETOOTH;
630 *len = sizeof(struct sockaddr_l2);
633 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
635 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
637 la->l2_psm = l2cap_pi(sk)->psm;
641 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
643 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
644 struct sk_buff *skb, **frag;
645 int err, hlen, count, sent=0;
646 struct l2cap_hdr *lh;
648 BT_DBG("sk %p len %d", sk, len);
650 /* First fragment (with L2CAP header) */
651 if (sk->sk_type == SOCK_DGRAM)
652 hlen = L2CAP_HDR_SIZE + 2;
654 hlen = L2CAP_HDR_SIZE;
656 count = min_t(unsigned int, (conn->mtu - hlen), len);
658 skb = bt_skb_send_alloc(sk, hlen + count,
659 msg->msg_flags & MSG_DONTWAIT, &err);
663 /* Create L2CAP header */
664 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
665 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
666 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
668 if (sk->sk_type == SOCK_DGRAM)
669 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
671 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
679 /* Continuation fragments (no L2CAP header) */
680 frag = &skb_shinfo(skb)->frag_list;
682 count = min_t(unsigned int, conn->mtu, len);
684 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
688 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
696 frag = &(*frag)->next;
699 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
709 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
710 struct msghdr *msg, size_t len)
712 struct sock *sk = sock->sk;
715 BT_DBG("sock %p, sk %p", sock, sk);
718 return sock_error(sk);
720 if (msg->msg_flags & MSG_OOB)
723 /* Check outgoing MTU */
724 if (len > l2cap_pi(sk)->omtu)
729 if (sk->sk_state == BT_CONNECTED)
730 err = l2cap_do_send(sk, msg, len);
738 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen)
740 struct sock *sk = sock->sk;
741 struct l2cap_options opts;
751 len = min_t(unsigned int, sizeof(opts), optlen);
752 if (copy_from_user((char *)&opts, optval, len)) {
756 l2cap_pi(sk)->imtu = opts.imtu;
757 l2cap_pi(sk)->omtu = opts.omtu;
761 if (get_user(opt, (u32 *)optval)) {
766 l2cap_pi(sk)->link_mode = opt;
778 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen)
780 struct sock *sk = sock->sk;
781 struct l2cap_options opts;
782 struct l2cap_conninfo cinfo;
785 if (get_user(len, optlen))
792 opts.imtu = l2cap_pi(sk)->imtu;
793 opts.omtu = l2cap_pi(sk)->omtu;
794 opts.flush_to = l2cap_pi(sk)->flush_to;
796 len = min_t(unsigned int, len, sizeof(opts));
797 if (copy_to_user(optval, (char *)&opts, len))
803 if (put_user(l2cap_pi(sk)->link_mode, (u32 *)optval))
808 if (sk->sk_state != BT_CONNECTED) {
813 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
815 len = min_t(unsigned int, len, sizeof(cinfo));
816 if (copy_to_user(optval, (char *)&cinfo, len))
830 static int l2cap_sock_shutdown(struct socket *sock, int how)
832 struct sock *sk = sock->sk;
835 BT_DBG("sock %p, sk %p", sock, sk);
840 if (!sk->sk_shutdown) {
841 sk->sk_shutdown = SHUTDOWN_MASK;
842 l2cap_sock_clear_timer(sk);
843 __l2cap_sock_close(sk, 0);
845 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
846 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
852 static int l2cap_sock_release(struct socket *sock)
854 struct sock *sk = sock->sk;
857 BT_DBG("sock %p, sk %p", sock, sk);
861 err = l2cap_sock_shutdown(sock, 2);
868 /* ---- L2CAP channels ---- */
869 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
872 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
873 if (l2cap_pi(s)->dcid == cid)
879 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
882 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
883 if (l2cap_pi(s)->scid == cid)
889 /* Find channel with given SCID.
890 * Returns locked socket */
891 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
895 s = __l2cap_get_chan_by_scid(l, cid);
896 if (s) bh_lock_sock(s);
897 read_unlock(&l->lock);
901 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
905 for (; cid < 0xffff; cid++) {
906 if(!__l2cap_get_chan_by_scid(l, cid))
913 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
918 l2cap_pi(l->head)->prev_c = sk;
920 l2cap_pi(sk)->next_c = l->head;
921 l2cap_pi(sk)->prev_c = NULL;
925 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
927 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
929 write_lock(&l->lock);
934 l2cap_pi(next)->prev_c = prev;
936 l2cap_pi(prev)->next_c = next;
937 write_unlock(&l->lock);
942 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
944 struct l2cap_chan_list *l = &conn->chan_list;
946 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
948 l2cap_pi(sk)->conn = conn;
950 if (sk->sk_type == SOCK_SEQPACKET) {
951 /* Alloc CID for connection-oriented socket */
952 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
953 } else if (sk->sk_type == SOCK_DGRAM) {
954 /* Connectionless socket */
955 l2cap_pi(sk)->scid = 0x0002;
956 l2cap_pi(sk)->dcid = 0x0002;
957 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
959 /* Raw socket can send/recv signalling messages only */
960 l2cap_pi(sk)->scid = 0x0001;
961 l2cap_pi(sk)->dcid = 0x0001;
962 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
965 __l2cap_chan_link(l, sk);
968 bt_accept_enqueue(parent, sk);
972 * Must be called on the locked socket. */
973 static void l2cap_chan_del(struct sock *sk, int err)
975 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
976 struct sock *parent = bt_sk(sk)->parent;
978 l2cap_sock_clear_timer(sk);
980 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
983 /* Unlink from channel list */
984 l2cap_chan_unlink(&conn->chan_list, sk);
985 l2cap_pi(sk)->conn = NULL;
986 hci_conn_put(conn->hcon);
989 sk->sk_state = BT_CLOSED;
996 parent->sk_data_ready(parent, 0);
998 sk->sk_state_change(sk);
1001 static void l2cap_conn_ready(struct l2cap_conn *conn)
1003 struct l2cap_chan_list *l = &conn->chan_list;
1006 BT_DBG("conn %p", conn);
1008 read_lock(&l->lock);
1010 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1013 if (sk->sk_type != SOCK_SEQPACKET) {
1014 l2cap_sock_clear_timer(sk);
1015 sk->sk_state = BT_CONNECTED;
1016 sk->sk_state_change(sk);
1017 } else if (sk->sk_state == BT_CONNECT) {
1018 struct l2cap_conn_req req;
1019 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1020 req.psm = l2cap_pi(sk)->psm;
1021 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
1027 read_unlock(&l->lock);
1030 static void l2cap_chan_ready(struct sock *sk)
1032 struct sock *parent = bt_sk(sk)->parent;
1034 BT_DBG("sk %p, parent %p", sk, parent);
1036 l2cap_pi(sk)->conf_state = 0;
1037 l2cap_sock_clear_timer(sk);
1040 /* Outgoing channel.
1041 * Wake up socket sleeping on connect.
1043 sk->sk_state = BT_CONNECTED;
1044 sk->sk_state_change(sk);
1046 /* Incoming channel.
1047 * Wake up socket sleeping on accept.
1049 parent->sk_data_ready(parent, 0);
1053 /* Copy frame to all raw sockets on that connection */
1054 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1056 struct l2cap_chan_list *l = &conn->chan_list;
1057 struct sk_buff *nskb;
1060 BT_DBG("conn %p", conn);
1062 read_lock(&l->lock);
1063 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1064 if (sk->sk_type != SOCK_RAW)
1067 /* Don't send frame to the socket it came from */
1071 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1074 if (sock_queue_rcv_skb(sk, nskb))
1077 read_unlock(&l->lock);
1080 /* ---- L2CAP signalling commands ---- */
1081 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
1085 /* Get next available identificator.
1086 * 1 - 199 are used by kernel.
1087 * 200 - 254 are used by utilities like l2ping, etc
1090 spin_lock(&conn->lock);
1092 if (++conn->tx_ident > 199)
1095 id = conn->tx_ident;
1097 spin_unlock(&conn->lock);
1102 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1103 u8 code, u8 ident, u16 dlen, void *data)
1105 struct sk_buff *skb, **frag;
1106 struct l2cap_cmd_hdr *cmd;
1107 struct l2cap_hdr *lh;
1110 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1112 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1113 count = min_t(unsigned int, conn->mtu, len);
1115 skb = bt_skb_alloc(count, GFP_ATOMIC);
1119 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1120 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1121 lh->cid = __cpu_to_le16(0x0001);
1123 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1126 cmd->len = __cpu_to_le16(dlen);
1129 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1130 memcpy(skb_put(skb, count), data, count);
1136 /* Continuation fragments (no L2CAP header) */
1137 frag = &skb_shinfo(skb)->frag_list;
1139 count = min_t(unsigned int, conn->mtu, len);
1141 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1145 memcpy(skb_put(*frag, count), data, count);
1150 frag = &(*frag)->next;
1160 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data)
1162 u8 ident = l2cap_get_ident(conn);
1163 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1165 BT_DBG("code 0x%2.2x", code);
1169 return hci_send_acl(conn->hcon, skb, 0);
1172 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
1174 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1176 BT_DBG("code 0x%2.2x", code);
1180 return hci_send_acl(conn->hcon, skb, 0);
1183 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1185 struct l2cap_conf_opt *opt = *ptr;
1188 len = L2CAP_CONF_OPT_SIZE + opt->len;
1196 *val = *((u8 *) opt->val);
1200 *val = __le16_to_cpu(*((u16 *)opt->val));
1204 *val = __le32_to_cpu(*((u32 *)opt->val));
1208 *val = (unsigned long) opt->val;
1212 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1216 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1218 int type, hint, olen;
1222 BT_DBG("sk %p len %d", sk, len);
1224 while (len >= L2CAP_CONF_OPT_SIZE) {
1225 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1231 case L2CAP_CONF_MTU:
1232 l2cap_pi(sk)->conf_mtu = val;
1235 case L2CAP_CONF_FLUSH_TO:
1236 l2cap_pi(sk)->flush_to = val;
1239 case L2CAP_CONF_QOS:
1246 /* FIXME: Reject unknown option */
1252 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1254 struct l2cap_conf_opt *opt = *ptr;
1256 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1263 *((u8 *) opt->val) = val;
1267 *((u16 *) opt->val) = __cpu_to_le16(val);
1271 *((u32 *) opt->val) = __cpu_to_le32(val);
1275 memcpy(opt->val, (void *) val, len);
1279 *ptr += L2CAP_CONF_OPT_SIZE + len;
1282 static int l2cap_build_conf_req(struct sock *sk, void *data)
1284 struct l2cap_pinfo *pi = l2cap_pi(sk);
1285 struct l2cap_conf_req *req = data;
1286 void *ptr = req->data;
1288 BT_DBG("sk %p", sk);
1290 if (pi->imtu != L2CAP_DEFAULT_MTU)
1291 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1293 /* FIXME. Need actual value of the flush timeout */
1294 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1295 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1297 req->dcid = __cpu_to_le16(pi->dcid);
1298 req->flags = __cpu_to_le16(0);
1303 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1305 struct l2cap_pinfo *pi = l2cap_pi(sk);
1308 /* Configure output options and let the other side know
1309 * which ones we don't like.
1311 if (pi->conf_mtu < pi->omtu) {
1312 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1313 result = L2CAP_CONF_UNACCEPT;
1315 pi->omtu = pi->conf_mtu;
1318 BT_DBG("sk %p result %d", sk, result);
1322 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1324 struct l2cap_conf_rsp *rsp = data;
1325 void *ptr = rsp->data;
1328 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1331 *result = l2cap_conf_output(sk, &ptr);
1335 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1336 rsp->result = __cpu_to_le16(result ? *result : 0);
1337 rsp->flags = __cpu_to_le16(flags);
1342 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1344 struct l2cap_chan_list *list = &conn->chan_list;
1345 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1346 struct l2cap_conn_rsp rsp;
1347 struct sock *sk, *parent;
1348 int result = 0, status = 0;
1350 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1353 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1355 /* Check if we have socket listening on psm */
1356 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1358 result = L2CAP_CR_BAD_PSM;
1362 result = L2CAP_CR_NO_MEM;
1364 /* Check for backlog size */
1365 if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
1366 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1370 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1374 write_lock(&list->lock);
1376 /* Check if we already have channel with that dcid */
1377 if (__l2cap_get_chan_by_dcid(list, scid)) {
1378 write_unlock(&list->lock);
1380 l2cap_sock_kill(sk);
1384 hci_conn_hold(conn->hcon);
1386 l2cap_sock_init(sk, parent);
1387 bacpy(&bt_sk(sk)->src, conn->src);
1388 bacpy(&bt_sk(sk)->dst, conn->dst);
1389 l2cap_pi(sk)->psm = psm;
1390 l2cap_pi(sk)->dcid = scid;
1392 __l2cap_chan_add(conn, sk, parent);
1393 dcid = l2cap_pi(sk)->scid;
1395 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1397 /* Service level security */
1398 result = L2CAP_CR_PEND;
1399 status = L2CAP_CS_AUTHEN_PEND;
1400 sk->sk_state = BT_CONNECT2;
1401 l2cap_pi(sk)->ident = cmd->ident;
1403 if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
1404 if (!hci_conn_encrypt(conn->hcon))
1406 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1407 if (!hci_conn_auth(conn->hcon))
1411 sk->sk_state = BT_CONFIG;
1412 result = status = 0;
1415 write_unlock(&list->lock);
1418 bh_unlock_sock(parent);
1421 rsp.scid = __cpu_to_le16(scid);
1422 rsp.dcid = __cpu_to_le16(dcid);
1423 rsp.result = __cpu_to_le16(result);
1424 rsp.status = __cpu_to_le16(status);
1425 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1429 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1431 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1432 u16 scid, dcid, result, status;
1436 scid = __le16_to_cpu(rsp->scid);
1437 dcid = __le16_to_cpu(rsp->dcid);
1438 result = __le16_to_cpu(rsp->result);
1439 status = __le16_to_cpu(rsp->status);
1441 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1443 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1447 case L2CAP_CR_SUCCESS:
1448 sk->sk_state = BT_CONFIG;
1449 l2cap_pi(sk)->dcid = dcid;
1450 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1452 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1459 l2cap_chan_del(sk, ECONNREFUSED);
1467 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1469 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1475 dcid = __le16_to_cpu(req->dcid);
1476 flags = __le16_to_cpu(req->flags);
1478 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1480 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1483 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1485 if (flags & 0x0001) {
1486 /* Incomplete config. Send empty response. */
1487 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1491 /* Complete config. */
1492 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1497 /* Output config done */
1498 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1500 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1501 sk->sk_state = BT_CONNECTED;
1502 l2cap_chan_ready(sk);
1503 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1505 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1513 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1515 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1516 u16 scid, flags, result;
1520 scid = __le16_to_cpu(rsp->scid);
1521 flags = __le16_to_cpu(rsp->flags);
1522 result = __le16_to_cpu(rsp->result);
1524 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1526 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1530 case L2CAP_CONF_SUCCESS:
1533 case L2CAP_CONF_UNACCEPT:
1534 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1537 It does not make sense to adjust L2CAP parameters
1538 that are currently defined in the spec. We simply
1539 resend config request that we sent earlier. It is
1540 stupid :) but it helps qualification testing
1541 which expects at least some response from us.
1543 l2cap_send_req(conn, L2CAP_CONF_REQ,
1544 l2cap_build_conf_req(sk, req), req);
1549 sk->sk_state = BT_DISCONN;
1550 sk->sk_err = ECONNRESET;
1551 l2cap_sock_set_timer(sk, HZ * 5);
1553 struct l2cap_disconn_req req;
1554 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1555 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1556 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
1564 /* Input config done */
1565 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1567 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1568 sk->sk_state = BT_CONNECTED;
1569 l2cap_chan_ready(sk);
1577 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1579 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1580 struct l2cap_disconn_rsp rsp;
1584 scid = __le16_to_cpu(req->scid);
1585 dcid = __le16_to_cpu(req->dcid);
1587 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1589 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1592 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1593 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1594 l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1596 sk->sk_shutdown = SHUTDOWN_MASK;
1598 l2cap_chan_del(sk, ECONNRESET);
1601 l2cap_sock_kill(sk);
1605 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1607 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1611 scid = __le16_to_cpu(rsp->scid);
1612 dcid = __le16_to_cpu(rsp->dcid);
1614 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1616 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1618 l2cap_chan_del(sk, 0);
1621 l2cap_sock_kill(sk);
1625 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1627 u8 *data = skb->data;
1629 struct l2cap_cmd_hdr cmd;
1632 l2cap_raw_recv(conn, skb);
1634 while (len >= L2CAP_CMD_HDR_SIZE) {
1635 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1636 data += L2CAP_CMD_HDR_SIZE;
1637 len -= L2CAP_CMD_HDR_SIZE;
1639 cmd.len = __le16_to_cpu(cmd.len);
1641 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1643 if (cmd.len > len || !cmd.ident) {
1644 BT_DBG("corrupted command");
1649 case L2CAP_CONN_REQ:
1650 err = l2cap_connect_req(conn, &cmd, data);
1653 case L2CAP_CONN_RSP:
1654 err = l2cap_connect_rsp(conn, &cmd, data);
1657 case L2CAP_CONF_REQ:
1658 err = l2cap_config_req(conn, &cmd, data);
1661 case L2CAP_CONF_RSP:
1662 err = l2cap_config_rsp(conn, &cmd, data);
1665 case L2CAP_DISCONN_REQ:
1666 err = l2cap_disconnect_req(conn, &cmd, data);
1669 case L2CAP_DISCONN_RSP:
1670 err = l2cap_disconnect_rsp(conn, &cmd, data);
1673 case L2CAP_COMMAND_REJ:
1674 /* FIXME: We should process this */
1677 case L2CAP_ECHO_REQ:
1678 l2cap_send_rsp(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1681 case L2CAP_ECHO_RSP:
1682 case L2CAP_INFO_REQ:
1683 case L2CAP_INFO_RSP:
1687 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1693 struct l2cap_cmd_rej rej;
1694 BT_DBG("error %d", err);
1696 /* FIXME: Map err to a valid reason. */
1697 rej.reason = __cpu_to_le16(0);
1698 l2cap_send_rsp(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1708 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1712 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1714 BT_DBG("unknown cid 0x%4.4x", cid);
1718 BT_DBG("sk %p, len %d", sk, skb->len);
1720 if (sk->sk_state != BT_CONNECTED)
1723 if (l2cap_pi(sk)->imtu < skb->len)
1726 /* If socket recv buffers overflows we drop data here
1727 * which is *bad* because L2CAP has to be reliable.
1728 * But we don't have any other choice. L2CAP doesn't
1729 * provide flow control mechanism */
1731 if (!sock_queue_rcv_skb(sk, skb))
1738 if (sk) bh_unlock_sock(sk);
1742 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1746 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1750 BT_DBG("sk %p, len %d", sk, skb->len);
1752 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1755 if (l2cap_pi(sk)->imtu < skb->len)
1758 if (!sock_queue_rcv_skb(sk, skb))
1765 if (sk) bh_unlock_sock(sk);
1769 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1771 struct l2cap_hdr *lh = (void *) skb->data;
1774 skb_pull(skb, L2CAP_HDR_SIZE);
1775 cid = __le16_to_cpu(lh->cid);
1776 len = __le16_to_cpu(lh->len);
1778 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1782 l2cap_sig_channel(conn, skb);
1786 psm = get_unaligned((u16 *) skb->data);
1788 l2cap_conless_channel(conn, psm, skb);
1792 l2cap_data_channel(conn, cid, skb);
1797 /* ---- L2CAP interface with lower layer (HCI) ---- */
1799 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1801 int exact = 0, lm1 = 0, lm2 = 0;
1802 register struct sock *sk;
1803 struct hlist_node *node;
1805 if (type != ACL_LINK)
1808 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1810 /* Find listening sockets and check their link_mode */
1811 read_lock(&l2cap_sk_list.lock);
1812 sk_for_each(sk, node, &l2cap_sk_list.head) {
1813 if (sk->sk_state != BT_LISTEN)
1816 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1817 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1819 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1820 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1822 read_unlock(&l2cap_sk_list.lock);
1824 return exact ? lm1 : lm2;
1827 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1829 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1831 if (hcon->type != ACL_LINK)
1835 struct l2cap_conn *conn;
1837 conn = l2cap_conn_add(hcon, status);
1839 l2cap_conn_ready(conn);
1841 l2cap_conn_del(hcon, bt_err(status));
1846 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1848 BT_DBG("hcon %p reason %d", hcon, reason);
1850 if (hcon->type != ACL_LINK)
1853 l2cap_conn_del(hcon, bt_err(reason));
1857 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1859 struct l2cap_chan_list *l;
1860 struct l2cap_conn *conn;
1861 struct l2cap_conn_rsp rsp;
1865 if (!(conn = hcon->l2cap_data))
1867 l = &conn->chan_list;
1869 BT_DBG("conn %p", conn);
1871 read_lock(&l->lock);
1873 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1876 if (sk->sk_state != BT_CONNECT2 ||
1877 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) {
1883 sk->sk_state = BT_CONFIG;
1886 sk->sk_state = BT_DISCONN;
1887 l2cap_sock_set_timer(sk, HZ/10);
1888 result = L2CAP_CR_SEC_BLOCK;
1891 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1892 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1893 rsp.result = __cpu_to_le16(result);
1894 rsp.status = __cpu_to_le16(0);
1895 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1900 read_unlock(&l->lock);
1904 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
1906 struct l2cap_chan_list *l;
1907 struct l2cap_conn *conn;
1908 struct l2cap_conn_rsp rsp;
1912 if (!(conn = hcon->l2cap_data))
1914 l = &conn->chan_list;
1916 BT_DBG("conn %p", conn);
1918 read_lock(&l->lock);
1920 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1923 if (sk->sk_state != BT_CONNECT2) {
1929 sk->sk_state = BT_CONFIG;
1932 sk->sk_state = BT_DISCONN;
1933 l2cap_sock_set_timer(sk, HZ/10);
1934 result = L2CAP_CR_SEC_BLOCK;
1937 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1938 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1939 rsp.result = __cpu_to_le16(result);
1940 rsp.status = __cpu_to_le16(0);
1941 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1946 read_unlock(&l->lock);
1950 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
1952 struct l2cap_conn *conn = hcon->l2cap_data;
1954 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
1957 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
1959 if (flags & ACL_START) {
1960 struct l2cap_hdr *hdr;
1964 BT_ERR("Unexpected start frame (len %d)", skb->len);
1965 kfree_skb(conn->rx_skb);
1966 conn->rx_skb = NULL;
1971 BT_ERR("Frame is too short (len %d)", skb->len);
1975 hdr = (struct l2cap_hdr *) skb->data;
1976 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
1978 if (len == skb->len) {
1979 /* Complete frame received */
1980 l2cap_recv_frame(conn, skb);
1984 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
1986 if (skb->len > len) {
1987 BT_ERR("Frame is too long (len %d, expected len %d)",
1992 /* Allocate skb for the complete frame (with header) */
1993 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
1996 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
1997 conn->rx_len = len - skb->len;
1999 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2001 if (!conn->rx_len) {
2002 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2006 if (skb->len > conn->rx_len) {
2007 BT_ERR("Fragment is too long (len %d, expected %d)",
2008 skb->len, conn->rx_len);
2009 kfree_skb(conn->rx_skb);
2010 conn->rx_skb = NULL;
2015 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2016 conn->rx_len -= skb->len;
2018 if (!conn->rx_len) {
2019 /* Complete frame received */
2020 l2cap_recv_frame(conn, conn->rx_skb);
2021 conn->rx_skb = NULL;
2030 /* ---- Proc fs support ---- */
2031 #ifdef CONFIG_PROC_FS
2032 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2035 struct hlist_node *node;
2038 read_lock_bh(&l2cap_sk_list.lock);
2040 sk_for_each(sk, node, &l2cap_sk_list.head)
2048 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2054 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2056 read_unlock_bh(&l2cap_sk_list.lock);
2059 static int l2cap_seq_show(struct seq_file *seq, void *e)
2061 struct sock *sk = e;
2062 struct l2cap_pinfo *pi = l2cap_pi(sk);
2064 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2065 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2066 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2067 pi->omtu, pi->link_mode);
2071 static struct seq_operations l2cap_seq_ops = {
2072 .start = l2cap_seq_start,
2073 .next = l2cap_seq_next,
2074 .stop = l2cap_seq_stop,
2075 .show = l2cap_seq_show
2078 static int l2cap_seq_open(struct inode *inode, struct file *file)
2080 return seq_open(file, &l2cap_seq_ops);
2083 static struct file_operations l2cap_seq_fops = {
2084 .owner = THIS_MODULE,
2085 .open = l2cap_seq_open,
2087 .llseek = seq_lseek,
2088 .release = seq_release,
2091 static int __init l2cap_proc_init(void)
2093 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2096 p->owner = THIS_MODULE;
2097 p->proc_fops = &l2cap_seq_fops;
2101 static void __exit l2cap_proc_cleanup(void)
2103 remove_proc_entry("l2cap", proc_bt);
2106 #else /* CONFIG_PROC_FS */
2108 static int __init l2cap_proc_init(void)
2113 static void __exit l2cap_proc_cleanup(void)
2117 #endif /* CONFIG_PROC_FS */
2119 static struct proto_ops l2cap_sock_ops = {
2120 .family = PF_BLUETOOTH,
2121 .owner = THIS_MODULE,
2122 .release = l2cap_sock_release,
2123 .bind = l2cap_sock_bind,
2124 .connect = l2cap_sock_connect,
2125 .listen = l2cap_sock_listen,
2126 .accept = l2cap_sock_accept,
2127 .getname = l2cap_sock_getname,
2128 .sendmsg = l2cap_sock_sendmsg,
2129 .recvmsg = bt_sock_recvmsg,
2130 .poll = bt_sock_poll,
2131 .mmap = sock_no_mmap,
2132 .socketpair = sock_no_socketpair,
2133 .ioctl = sock_no_ioctl,
2134 .shutdown = l2cap_sock_shutdown,
2135 .setsockopt = l2cap_sock_setsockopt,
2136 .getsockopt = l2cap_sock_getsockopt
2139 static struct net_proto_family l2cap_sock_family_ops = {
2140 .family = PF_BLUETOOTH,
2141 .create = l2cap_sock_create,
2142 .owner = THIS_MODULE,
2145 static struct hci_proto l2cap_hci_proto = {
2147 .id = HCI_PROTO_L2CAP,
2148 .connect_ind = l2cap_connect_ind,
2149 .connect_cfm = l2cap_connect_cfm,
2150 .disconn_ind = l2cap_disconn_ind,
2151 .auth_cfm = l2cap_auth_cfm,
2152 .encrypt_cfm = l2cap_encrypt_cfm,
2153 .recv_acldata = l2cap_recv_acldata
2156 static int __init l2cap_init(void)
2160 if ((err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
2161 BT_ERR("L2CAP socket registration failed");
2165 if ((err = hci_register_proto(&l2cap_hci_proto))) {
2166 BT_ERR("L2CAP protocol registration failed");
2172 BT_INFO("L2CAP ver %s", VERSION);
2173 BT_INFO("L2CAP socket layer initialized");
2178 static void __exit l2cap_exit(void)
2180 l2cap_proc_cleanup();
2182 /* Unregister socket and protocol */
2183 if (bt_sock_unregister(BTPROTO_L2CAP))
2184 BT_ERR("L2CAP socket unregistration failed");
2186 if (hci_unregister_proto(&l2cap_hci_proto))
2187 BT_ERR("L2CAP protocol unregistration failed");
2190 void l2cap_load(void)
2192 /* Dummy function to trigger automatic L2CAP module loading by
2193 other modules that use L2CAP sockets but don not use any other
2197 EXPORT_SYMBOL(l2cap_load);
2199 module_init(l2cap_init);
2200 module_exit(l2cap_exit);
2202 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
2203 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2204 MODULE_VERSION(VERSION);
2205 MODULE_LICENSE("GPL");
2206 MODULE_ALIAS("bt-proto-0");