2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/list.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 #include <net/bluetooth/l2cap.h>
55 #ifndef CONFIG_BT_L2CAP_DEBUG
62 static struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static int l2cap_conn_del(struct hci_conn *conn, int err);
70 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
71 static void l2cap_chan_del(struct sock *sk, int err);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
85 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 __l2cap_sock_close(sk, ETIMEDOUT);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 static void l2cap_sock_init_timer(struct sock *sk)
109 init_timer(&sk->sk_timer);
110 sk->sk_timer.function = l2cap_sock_timeout;
111 sk->sk_timer.data = (unsigned long)sk;
114 /* ---- L2CAP connections ---- */
115 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
117 struct l2cap_conn *conn;
119 if ((conn = hcon->l2cap_data))
125 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
127 memset(conn, 0, sizeof(struct l2cap_conn));
129 hcon->l2cap_data = conn;
132 conn->mtu = hcon->hdev->acl_mtu;
133 conn->src = &hcon->hdev->bdaddr;
134 conn->dst = &hcon->dst;
136 spin_lock_init(&conn->lock);
137 rwlock_init(&conn->chan_list.lock);
139 BT_DBG("hcon %p conn %p", hcon, conn);
143 static int l2cap_conn_del(struct hci_conn *hcon, int err)
145 struct l2cap_conn *conn;
148 if (!(conn = hcon->l2cap_data))
151 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
154 kfree_skb(conn->rx_skb);
157 while ((sk = conn->chan_list.head)) {
159 l2cap_chan_del(sk, err);
164 hcon->l2cap_data = NULL;
169 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
171 struct l2cap_chan_list *l = &conn->chan_list;
172 write_lock(&l->lock);
173 __l2cap_chan_add(conn, sk, parent);
174 write_unlock(&l->lock);
177 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
181 /* Get next available identificator.
182 * 1 - 128 are used by kernel.
183 * 129 - 199 are reserved.
184 * 200 - 254 are used by utilities like l2ping, etc.
187 spin_lock(&conn->lock);
189 if (++conn->tx_ident > 128)
194 spin_unlock(&conn->lock);
199 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
201 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
203 BT_DBG("code 0x%2.2x", code);
208 return hci_send_acl(conn->hcon, skb, 0);
211 /* ---- Socket interface ---- */
212 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
215 struct hlist_node *node;
216 sk_for_each(sk, node, &l2cap_sk_list.head)
217 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
224 /* Find socket with psm and source bdaddr.
225 * Returns closest match.
227 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
229 struct sock *sk = NULL, *sk1 = NULL;
230 struct hlist_node *node;
232 sk_for_each(sk, node, &l2cap_sk_list.head) {
233 if (state && sk->sk_state != state)
236 if (l2cap_pi(sk)->psm == psm) {
238 if (!bacmp(&bt_sk(sk)->src, src))
242 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
246 return node ? sk : sk1;
249 /* Find socket with given address (psm, src).
250 * Returns locked socket */
251 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
254 read_lock(&l2cap_sk_list.lock);
255 s = __l2cap_get_sock_by_psm(state, psm, src);
256 if (s) bh_lock_sock(s);
257 read_unlock(&l2cap_sk_list.lock);
261 static void l2cap_sock_destruct(struct sock *sk)
265 skb_queue_purge(&sk->sk_receive_queue);
266 skb_queue_purge(&sk->sk_write_queue);
269 kfree(sk->sk_protinfo);
272 static void l2cap_sock_cleanup_listen(struct sock *parent)
276 BT_DBG("parent %p", parent);
278 /* Close not yet accepted channels */
279 while ((sk = bt_accept_dequeue(parent, NULL)))
280 l2cap_sock_close(sk);
282 parent->sk_state = BT_CLOSED;
283 parent->sk_zapped = 1;
286 /* Kill socket (only if zapped and orphan)
287 * Must be called on unlocked socket.
289 static void l2cap_sock_kill(struct sock *sk)
291 if (!sk->sk_zapped || sk->sk_socket)
294 BT_DBG("sk %p state %d", sk, sk->sk_state);
296 /* Kill poor orphan */
297 bt_sock_unlink(&l2cap_sk_list, sk);
298 sock_set_flag(sk, SOCK_DEAD);
302 static void __l2cap_sock_close(struct sock *sk, int reason)
304 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
306 switch (sk->sk_state) {
308 l2cap_sock_cleanup_listen(sk);
314 if (sk->sk_type == SOCK_SEQPACKET) {
315 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
316 struct l2cap_disconn_req req;
318 sk->sk_state = BT_DISCONN;
319 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
321 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
322 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
323 l2cap_send_cmd(conn, l2cap_get_ident(conn),
324 L2CAP_DISCONN_REQ, sizeof(req), &req);
326 l2cap_chan_del(sk, reason);
332 l2cap_chan_del(sk, reason);
341 /* Must be called on unlocked socket. */
342 static void l2cap_sock_close(struct sock *sk)
344 l2cap_sock_clear_timer(sk);
346 __l2cap_sock_close(sk, ECONNRESET);
351 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
353 struct l2cap_pinfo *pi = l2cap_pi(sk);
358 sk->sk_type = parent->sk_type;
359 pi->imtu = l2cap_pi(parent)->imtu;
360 pi->omtu = l2cap_pi(parent)->omtu;
361 pi->link_mode = l2cap_pi(parent)->link_mode;
363 pi->imtu = L2CAP_DEFAULT_MTU;
368 /* Default config options */
369 pi->conf_mtu = L2CAP_DEFAULT_MTU;
370 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
373 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
377 sk = bt_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
381 sk_set_owner(sk, THIS_MODULE);
383 sk->sk_destruct = l2cap_sock_destruct;
384 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
386 sk->sk_protocol = proto;
387 sk->sk_state = BT_OPEN;
389 l2cap_sock_init_timer(sk);
391 bt_sock_link(&l2cap_sk_list, sk);
395 static int l2cap_sock_create(struct socket *sock, int protocol)
399 BT_DBG("sock %p", sock);
401 sock->state = SS_UNCONNECTED;
403 if (sock->type != SOCK_SEQPACKET &&
404 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
405 return -ESOCKTNOSUPPORT;
407 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
410 sock->ops = &l2cap_sock_ops;
412 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
416 l2cap_sock_init(sk, NULL);
420 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
422 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
423 struct sock *sk = sock->sk;
426 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
428 if (!addr || addr->sa_family != AF_BLUETOOTH)
433 if (sk->sk_state != BT_OPEN) {
438 write_lock_bh(&l2cap_sk_list.lock);
440 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
443 /* Save source address */
444 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
445 l2cap_pi(sk)->psm = la->l2_psm;
446 l2cap_pi(sk)->sport = la->l2_psm;
447 sk->sk_state = BT_BOUND;
450 write_unlock_bh(&l2cap_sk_list.lock);
457 static int l2cap_do_connect(struct sock *sk)
459 bdaddr_t *src = &bt_sk(sk)->src;
460 bdaddr_t *dst = &bt_sk(sk)->dst;
461 struct l2cap_conn *conn;
462 struct hci_conn *hcon;
463 struct hci_dev *hdev;
466 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
468 if (!(hdev = hci_get_route(dst, src)))
469 return -EHOSTUNREACH;
471 hci_dev_lock_bh(hdev);
475 hcon = hci_connect(hdev, ACL_LINK, dst);
479 conn = l2cap_conn_add(hcon, 0);
487 /* Update source addr of the socket */
488 bacpy(src, conn->src);
490 l2cap_chan_add(conn, sk, NULL);
492 sk->sk_state = BT_CONNECT;
493 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
495 if (hcon->state == BT_CONNECTED) {
496 if (sk->sk_type == SOCK_SEQPACKET) {
497 struct l2cap_conn_req req;
498 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
499 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
500 req.psm = l2cap_pi(sk)->psm;
501 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
502 L2CAP_CONN_REQ, sizeof(req), &req);
504 l2cap_sock_clear_timer(sk);
505 sk->sk_state = BT_CONNECTED;
510 hci_dev_unlock_bh(hdev);
515 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
517 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
518 struct sock *sk = sock->sk;
525 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
530 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
535 switch(sk->sk_state) {
539 /* Already connecting */
543 /* Already connected */
556 /* Set destination address and psm */
557 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
558 l2cap_pi(sk)->psm = la->l2_psm;
560 if ((err = l2cap_do_connect(sk)))
564 err = bt_sock_wait_state(sk, BT_CONNECTED,
565 sock_sndtimeo(sk, flags & O_NONBLOCK));
571 static int l2cap_sock_listen(struct socket *sock, int backlog)
573 struct sock *sk = sock->sk;
576 BT_DBG("sk %p backlog %d", sk, backlog);
580 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
585 if (!l2cap_pi(sk)->psm) {
586 bdaddr_t *src = &bt_sk(sk)->src;
591 write_lock_bh(&l2cap_sk_list.lock);
593 for (psm = 0x1001; psm < 0x1100; psm += 2)
594 if (!__l2cap_get_sock_by_addr(psm, src)) {
595 l2cap_pi(sk)->psm = htobs(psm);
596 l2cap_pi(sk)->sport = htobs(psm);
601 write_unlock_bh(&l2cap_sk_list.lock);
607 sk->sk_max_ack_backlog = backlog;
608 sk->sk_ack_backlog = 0;
609 sk->sk_state = BT_LISTEN;
616 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
618 DECLARE_WAITQUEUE(wait, current);
619 struct sock *sk = sock->sk, *nsk;
625 if (sk->sk_state != BT_LISTEN) {
630 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
632 BT_DBG("sk %p timeo %ld", sk, timeo);
634 /* Wait for an incoming connection. (wake-one). */
635 add_wait_queue_exclusive(sk->sk_sleep, &wait);
636 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
637 set_current_state(TASK_INTERRUPTIBLE);
644 timeo = schedule_timeout(timeo);
647 if (sk->sk_state != BT_LISTEN) {
652 if (signal_pending(current)) {
653 err = sock_intr_errno(timeo);
657 set_current_state(TASK_RUNNING);
658 remove_wait_queue(sk->sk_sleep, &wait);
663 newsock->state = SS_CONNECTED;
665 BT_DBG("new socket %p", nsk);
672 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
674 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
675 struct sock *sk = sock->sk;
677 BT_DBG("sock %p, sk %p", sock, sk);
679 addr->sa_family = AF_BLUETOOTH;
680 *len = sizeof(struct sockaddr_l2);
683 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
685 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
687 la->l2_psm = l2cap_pi(sk)->psm;
691 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
693 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
694 struct sk_buff *skb, **frag;
695 int err, hlen, count, sent=0;
696 struct l2cap_hdr *lh;
698 BT_DBG("sk %p len %d", sk, len);
700 /* First fragment (with L2CAP header) */
701 if (sk->sk_type == SOCK_DGRAM)
702 hlen = L2CAP_HDR_SIZE + 2;
704 hlen = L2CAP_HDR_SIZE;
706 count = min_t(unsigned int, (conn->mtu - hlen), len);
708 skb = bt_skb_send_alloc(sk, hlen + count,
709 msg->msg_flags & MSG_DONTWAIT, &err);
713 /* Create L2CAP header */
714 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
715 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
716 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
718 if (sk->sk_type == SOCK_DGRAM)
719 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
721 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
729 /* Continuation fragments (no L2CAP header) */
730 frag = &skb_shinfo(skb)->frag_list;
732 count = min_t(unsigned int, conn->mtu, len);
734 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
738 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
746 frag = &(*frag)->next;
749 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
759 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
761 struct sock *sk = sock->sk;
764 BT_DBG("sock %p, sk %p", sock, sk);
767 return sock_error(sk);
769 if (msg->msg_flags & MSG_OOB)
772 /* Check outgoing MTU */
773 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
778 if (sk->sk_state == BT_CONNECTED)
779 err = l2cap_do_send(sk, msg, len);
787 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
789 struct sock *sk = sock->sk;
790 struct l2cap_options opts;
800 len = min_t(unsigned int, sizeof(opts), optlen);
801 if (copy_from_user((char *) &opts, optval, len)) {
805 l2cap_pi(sk)->imtu = opts.imtu;
806 l2cap_pi(sk)->omtu = opts.omtu;
810 if (get_user(opt, (u32 __user *) optval)) {
815 l2cap_pi(sk)->link_mode = opt;
827 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
829 struct sock *sk = sock->sk;
830 struct l2cap_options opts;
831 struct l2cap_conninfo cinfo;
836 if (get_user(len, optlen))
843 opts.imtu = l2cap_pi(sk)->imtu;
844 opts.omtu = l2cap_pi(sk)->omtu;
845 opts.flush_to = l2cap_pi(sk)->flush_to;
848 len = min_t(unsigned int, len, sizeof(opts));
849 if (copy_to_user(optval, (char *) &opts, len))
855 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
860 if (sk->sk_state != BT_CONNECTED) {
865 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
866 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
868 len = min_t(unsigned int, len, sizeof(cinfo));
869 if (copy_to_user(optval, (char *) &cinfo, len))
883 static int l2cap_sock_shutdown(struct socket *sock, int how)
885 struct sock *sk = sock->sk;
888 BT_DBG("sock %p, sk %p", sock, sk);
894 if (!sk->sk_shutdown) {
895 sk->sk_shutdown = SHUTDOWN_MASK;
896 l2cap_sock_clear_timer(sk);
897 __l2cap_sock_close(sk, 0);
899 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
900 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
906 static int l2cap_sock_release(struct socket *sock)
908 struct sock *sk = sock->sk;
911 BT_DBG("sock %p, sk %p", sock, sk);
916 err = l2cap_sock_shutdown(sock, 2);
923 /* ---- L2CAP channels ---- */
924 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
927 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
928 if (l2cap_pi(s)->dcid == cid)
934 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
937 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
938 if (l2cap_pi(s)->scid == cid)
944 /* Find channel with given SCID.
945 * Returns locked socket */
946 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
950 s = __l2cap_get_chan_by_scid(l, cid);
951 if (s) bh_lock_sock(s);
952 read_unlock(&l->lock);
956 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
959 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
960 if (l2cap_pi(s)->ident == ident)
966 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
970 s = __l2cap_get_chan_by_ident(l, ident);
971 if (s) bh_lock_sock(s);
972 read_unlock(&l->lock);
976 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
980 for (; cid < 0xffff; cid++) {
981 if(!__l2cap_get_chan_by_scid(l, cid))
988 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
993 l2cap_pi(l->head)->prev_c = sk;
995 l2cap_pi(sk)->next_c = l->head;
996 l2cap_pi(sk)->prev_c = NULL;
1000 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
1002 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
1004 write_lock(&l->lock);
1009 l2cap_pi(next)->prev_c = prev;
1011 l2cap_pi(prev)->next_c = next;
1012 write_unlock(&l->lock);
1017 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
1019 struct l2cap_chan_list *l = &conn->chan_list;
1021 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
1023 l2cap_pi(sk)->conn = conn;
1025 if (sk->sk_type == SOCK_SEQPACKET) {
1026 /* Alloc CID for connection-oriented socket */
1027 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
1028 } else if (sk->sk_type == SOCK_DGRAM) {
1029 /* Connectionless socket */
1030 l2cap_pi(sk)->scid = 0x0002;
1031 l2cap_pi(sk)->dcid = 0x0002;
1032 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1034 /* Raw socket can send/recv signalling messages only */
1035 l2cap_pi(sk)->scid = 0x0001;
1036 l2cap_pi(sk)->dcid = 0x0001;
1037 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1040 __l2cap_chan_link(l, sk);
1043 bt_accept_enqueue(parent, sk);
1047 * Must be called on the locked socket. */
1048 static void l2cap_chan_del(struct sock *sk, int err)
1050 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1051 struct sock *parent = bt_sk(sk)->parent;
1053 l2cap_sock_clear_timer(sk);
1055 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
1058 /* Unlink from channel list */
1059 l2cap_chan_unlink(&conn->chan_list, sk);
1060 l2cap_pi(sk)->conn = NULL;
1061 hci_conn_put(conn->hcon);
1064 sk->sk_state = BT_CLOSED;
1071 bt_accept_unlink(sk);
1072 parent->sk_data_ready(parent, 0);
1074 sk->sk_state_change(sk);
1077 static void l2cap_conn_ready(struct l2cap_conn *conn)
1079 struct l2cap_chan_list *l = &conn->chan_list;
1082 BT_DBG("conn %p", conn);
1084 read_lock(&l->lock);
1086 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1089 if (sk->sk_type != SOCK_SEQPACKET) {
1090 l2cap_sock_clear_timer(sk);
1091 sk->sk_state = BT_CONNECTED;
1092 sk->sk_state_change(sk);
1093 } else if (sk->sk_state == BT_CONNECT) {
1094 struct l2cap_conn_req req;
1095 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1096 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1097 req.psm = l2cap_pi(sk)->psm;
1098 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1104 read_unlock(&l->lock);
1107 /* Notify sockets that we cannot guaranty reliability anymore */
1108 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1110 struct l2cap_chan_list *l = &conn->chan_list;
1113 BT_DBG("conn %p", conn);
1115 read_lock(&l->lock);
1116 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1117 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1120 read_unlock(&l->lock);
1123 static void l2cap_chan_ready(struct sock *sk)
1125 struct sock *parent = bt_sk(sk)->parent;
1127 BT_DBG("sk %p, parent %p", sk, parent);
1129 l2cap_pi(sk)->conf_state = 0;
1130 l2cap_sock_clear_timer(sk);
1133 /* Outgoing channel.
1134 * Wake up socket sleeping on connect.
1136 sk->sk_state = BT_CONNECTED;
1137 sk->sk_state_change(sk);
1139 /* Incoming channel.
1140 * Wake up socket sleeping on accept.
1142 parent->sk_data_ready(parent, 0);
1146 /* Copy frame to all raw sockets on that connection */
1147 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1149 struct l2cap_chan_list *l = &conn->chan_list;
1150 struct sk_buff *nskb;
1153 BT_DBG("conn %p", conn);
1155 read_lock(&l->lock);
1156 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1157 if (sk->sk_type != SOCK_RAW)
1160 /* Don't send frame to the socket it came from */
1164 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1167 if (sock_queue_rcv_skb(sk, nskb))
1170 read_unlock(&l->lock);
1173 /* ---- L2CAP signalling commands ---- */
1174 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1175 u8 code, u8 ident, u16 dlen, void *data)
1177 struct sk_buff *skb, **frag;
1178 struct l2cap_cmd_hdr *cmd;
1179 struct l2cap_hdr *lh;
1182 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1184 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1185 count = min_t(unsigned int, conn->mtu, len);
1187 skb = bt_skb_alloc(count, GFP_ATOMIC);
1191 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1192 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1193 lh->cid = __cpu_to_le16(0x0001);
1195 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1198 cmd->len = __cpu_to_le16(dlen);
1201 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1202 memcpy(skb_put(skb, count), data, count);
1208 /* Continuation fragments (no L2CAP header) */
1209 frag = &skb_shinfo(skb)->frag_list;
1211 count = min_t(unsigned int, conn->mtu, len);
1213 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1217 memcpy(skb_put(*frag, count), data, count);
1222 frag = &(*frag)->next;
1232 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1234 struct l2cap_conf_opt *opt = *ptr;
1237 len = L2CAP_CONF_OPT_SIZE + opt->len;
1245 *val = *((u8 *) opt->val);
1249 *val = __le16_to_cpu(*((u16 *)opt->val));
1253 *val = __le32_to_cpu(*((u32 *)opt->val));
1257 *val = (unsigned long) opt->val;
1261 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1265 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1267 int type, hint, olen;
1271 BT_DBG("sk %p len %d", sk, len);
1273 while (len >= L2CAP_CONF_OPT_SIZE) {
1274 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1280 case L2CAP_CONF_MTU:
1281 l2cap_pi(sk)->conf_mtu = val;
1284 case L2CAP_CONF_FLUSH_TO:
1285 l2cap_pi(sk)->flush_to = val;
1288 case L2CAP_CONF_QOS:
1295 /* FIXME: Reject unknown option */
1301 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1303 struct l2cap_conf_opt *opt = *ptr;
1305 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1312 *((u8 *) opt->val) = val;
1316 *((u16 *) opt->val) = __cpu_to_le16(val);
1320 *((u32 *) opt->val) = __cpu_to_le32(val);
1324 memcpy(opt->val, (void *) val, len);
1328 *ptr += L2CAP_CONF_OPT_SIZE + len;
1331 static int l2cap_build_conf_req(struct sock *sk, void *data)
1333 struct l2cap_pinfo *pi = l2cap_pi(sk);
1334 struct l2cap_conf_req *req = data;
1335 void *ptr = req->data;
1337 BT_DBG("sk %p", sk);
1339 if (pi->imtu != L2CAP_DEFAULT_MTU)
1340 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1342 /* FIXME: Need actual value of the flush timeout */
1343 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1344 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1346 req->dcid = __cpu_to_le16(pi->dcid);
1347 req->flags = __cpu_to_le16(0);
1352 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1354 struct l2cap_pinfo *pi = l2cap_pi(sk);
1357 /* Configure output options and let the other side know
1358 * which ones we don't like. */
1359 if (pi->conf_mtu < pi->omtu) {
1360 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1361 result = L2CAP_CONF_UNACCEPT;
1363 pi->omtu = pi->conf_mtu;
1366 BT_DBG("sk %p result %d", sk, result);
1370 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1372 struct l2cap_conf_rsp *rsp = data;
1373 void *ptr = rsp->data;
1376 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1379 *result = l2cap_conf_output(sk, &ptr);
1383 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1384 rsp->result = __cpu_to_le16(result ? *result : 0);
1385 rsp->flags = __cpu_to_le16(flags);
1390 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1392 struct l2cap_chan_list *list = &conn->chan_list;
1393 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1394 struct l2cap_conn_rsp rsp;
1395 struct sock *sk, *parent;
1396 int result = 0, status = 0;
1398 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1401 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1403 /* Check if we have socket listening on psm */
1404 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1406 result = L2CAP_CR_BAD_PSM;
1410 result = L2CAP_CR_NO_MEM;
1412 /* Check for backlog size */
1413 if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
1414 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1418 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1422 write_lock(&list->lock);
1424 /* Check if we already have channel with that dcid */
1425 if (__l2cap_get_chan_by_dcid(list, scid)) {
1426 write_unlock(&list->lock);
1428 l2cap_sock_kill(sk);
1432 hci_conn_hold(conn->hcon);
1434 l2cap_sock_init(sk, parent);
1435 bacpy(&bt_sk(sk)->src, conn->src);
1436 bacpy(&bt_sk(sk)->dst, conn->dst);
1437 l2cap_pi(sk)->psm = psm;
1438 l2cap_pi(sk)->dcid = scid;
1440 __l2cap_chan_add(conn, sk, parent);
1441 dcid = l2cap_pi(sk)->scid;
1443 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1445 /* Service level security */
1446 result = L2CAP_CR_PEND;
1447 status = L2CAP_CS_AUTHEN_PEND;
1448 sk->sk_state = BT_CONNECT2;
1449 l2cap_pi(sk)->ident = cmd->ident;
1451 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1452 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1453 if (!hci_conn_encrypt(conn->hcon))
1455 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1456 if (!hci_conn_auth(conn->hcon))
1460 sk->sk_state = BT_CONFIG;
1461 result = status = 0;
1464 write_unlock(&list->lock);
1467 bh_unlock_sock(parent);
1470 rsp.scid = __cpu_to_le16(scid);
1471 rsp.dcid = __cpu_to_le16(dcid);
1472 rsp.result = __cpu_to_le16(result);
1473 rsp.status = __cpu_to_le16(status);
1474 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1478 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1480 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1481 u16 scid, dcid, result, status;
1485 scid = __le16_to_cpu(rsp->scid);
1486 dcid = __le16_to_cpu(rsp->dcid);
1487 result = __le16_to_cpu(rsp->result);
1488 status = __le16_to_cpu(rsp->status);
1490 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1493 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1496 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1501 case L2CAP_CR_SUCCESS:
1502 sk->sk_state = BT_CONFIG;
1503 l2cap_pi(sk)->ident = 0;
1504 l2cap_pi(sk)->dcid = dcid;
1505 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1507 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1508 l2cap_build_conf_req(sk, req), req);
1515 l2cap_chan_del(sk, ECONNREFUSED);
1523 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1525 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1531 dcid = __le16_to_cpu(req->dcid);
1532 flags = __le16_to_cpu(req->flags);
1534 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1536 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1539 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1541 if (flags & 0x0001) {
1542 /* Incomplete config. Send empty response. */
1543 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1544 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1548 /* Complete config. */
1549 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1550 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1555 /* Output config done */
1556 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1558 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1559 sk->sk_state = BT_CONNECTED;
1560 l2cap_chan_ready(sk);
1561 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1563 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1564 l2cap_build_conf_req(sk, req), req);
1572 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1574 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1575 u16 scid, flags, result;
1578 scid = __le16_to_cpu(rsp->scid);
1579 flags = __le16_to_cpu(rsp->flags);
1580 result = __le16_to_cpu(rsp->result);
1582 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1584 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1588 case L2CAP_CONF_SUCCESS:
1591 case L2CAP_CONF_UNACCEPT:
1592 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1594 /* It does not make sense to adjust L2CAP parameters
1595 * that are currently defined in the spec. We simply
1596 * resend config request that we sent earlier. It is
1597 * stupid, but it helps qualification testing which
1598 * expects at least some response from us. */
1599 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1600 l2cap_build_conf_req(sk, req), req);
1605 sk->sk_state = BT_DISCONN;
1606 sk->sk_err = ECONNRESET;
1607 l2cap_sock_set_timer(sk, HZ * 5);
1609 struct l2cap_disconn_req req;
1610 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1611 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1612 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1613 L2CAP_DISCONN_REQ, sizeof(req), &req);
1621 /* Input config done */
1622 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1624 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1625 sk->sk_state = BT_CONNECTED;
1626 l2cap_chan_ready(sk);
1634 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1636 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1637 struct l2cap_disconn_rsp rsp;
1641 scid = __le16_to_cpu(req->scid);
1642 dcid = __le16_to_cpu(req->dcid);
1644 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1646 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1649 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1650 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1651 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1653 sk->sk_shutdown = SHUTDOWN_MASK;
1655 l2cap_chan_del(sk, ECONNRESET);
1658 l2cap_sock_kill(sk);
1662 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1664 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1668 scid = __le16_to_cpu(rsp->scid);
1669 dcid = __le16_to_cpu(rsp->dcid);
1671 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1673 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1676 l2cap_chan_del(sk, 0);
1679 l2cap_sock_kill(sk);
1683 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1685 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1686 struct l2cap_info_rsp rsp;
1689 type = __le16_to_cpu(req->type);
1691 BT_DBG("type 0x%4.4x", type);
1693 rsp.type = __cpu_to_le16(type);
1694 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1695 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1700 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1702 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1705 type = __le16_to_cpu(rsp->type);
1706 result = __le16_to_cpu(rsp->result);
1708 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1713 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1715 u8 *data = skb->data;
1717 struct l2cap_cmd_hdr cmd;
1720 l2cap_raw_recv(conn, skb);
1722 while (len >= L2CAP_CMD_HDR_SIZE) {
1723 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1724 data += L2CAP_CMD_HDR_SIZE;
1725 len -= L2CAP_CMD_HDR_SIZE;
1727 cmd.len = __le16_to_cpu(cmd.len);
1729 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1731 if (cmd.len > len || !cmd.ident) {
1732 BT_DBG("corrupted command");
1737 case L2CAP_COMMAND_REJ:
1738 /* FIXME: We should process this */
1741 case L2CAP_CONN_REQ:
1742 err = l2cap_connect_req(conn, &cmd, data);
1745 case L2CAP_CONN_RSP:
1746 err = l2cap_connect_rsp(conn, &cmd, data);
1749 case L2CAP_CONF_REQ:
1750 err = l2cap_config_req(conn, &cmd, data);
1753 case L2CAP_CONF_RSP:
1754 err = l2cap_config_rsp(conn, &cmd, data);
1757 case L2CAP_DISCONN_REQ:
1758 err = l2cap_disconnect_req(conn, &cmd, data);
1761 case L2CAP_DISCONN_RSP:
1762 err = l2cap_disconnect_rsp(conn, &cmd, data);
1765 case L2CAP_ECHO_REQ:
1766 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1769 case L2CAP_ECHO_RSP:
1772 case L2CAP_INFO_REQ:
1773 err = l2cap_information_req(conn, &cmd, data);
1776 case L2CAP_INFO_RSP:
1777 err = l2cap_information_rsp(conn, &cmd, data);
1781 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1787 struct l2cap_cmd_rej rej;
1788 BT_DBG("error %d", err);
1790 /* FIXME: Map err to a valid reason */
1791 rej.reason = __cpu_to_le16(0);
1792 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1802 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1806 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1808 BT_DBG("unknown cid 0x%4.4x", cid);
1812 BT_DBG("sk %p, len %d", sk, skb->len);
1814 if (sk->sk_state != BT_CONNECTED)
1817 if (l2cap_pi(sk)->imtu < skb->len)
1820 /* If socket recv buffers overflows we drop data here
1821 * which is *bad* because L2CAP has to be reliable.
1822 * But we don't have any other choice. L2CAP doesn't
1823 * provide flow control mechanism. */
1825 if (!sock_queue_rcv_skb(sk, skb))
1832 if (sk) bh_unlock_sock(sk);
1836 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1840 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1844 BT_DBG("sk %p, len %d", sk, skb->len);
1846 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1849 if (l2cap_pi(sk)->imtu < skb->len)
1852 if (!sock_queue_rcv_skb(sk, skb))
1859 if (sk) bh_unlock_sock(sk);
1863 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1865 struct l2cap_hdr *lh = (void *) skb->data;
1868 skb_pull(skb, L2CAP_HDR_SIZE);
1869 cid = __le16_to_cpu(lh->cid);
1870 len = __le16_to_cpu(lh->len);
1872 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1876 l2cap_sig_channel(conn, skb);
1880 psm = get_unaligned((u16 *) skb->data);
1882 l2cap_conless_channel(conn, psm, skb);
1886 l2cap_data_channel(conn, cid, skb);
1891 /* ---- L2CAP interface with lower layer (HCI) ---- */
1893 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1895 int exact = 0, lm1 = 0, lm2 = 0;
1896 register struct sock *sk;
1897 struct hlist_node *node;
1899 if (type != ACL_LINK)
1902 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1904 /* Find listening sockets and check their link_mode */
1905 read_lock(&l2cap_sk_list.lock);
1906 sk_for_each(sk, node, &l2cap_sk_list.head) {
1907 if (sk->sk_state != BT_LISTEN)
1910 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1911 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1913 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1914 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1916 read_unlock(&l2cap_sk_list.lock);
1918 return exact ? lm1 : lm2;
1921 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1923 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1925 if (hcon->type != ACL_LINK)
1929 struct l2cap_conn *conn;
1931 conn = l2cap_conn_add(hcon, status);
1933 l2cap_conn_ready(conn);
1935 l2cap_conn_del(hcon, bt_err(status));
1940 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1942 BT_DBG("hcon %p reason %d", hcon, reason);
1944 if (hcon->type != ACL_LINK)
1947 l2cap_conn_del(hcon, bt_err(reason));
1951 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1953 struct l2cap_chan_list *l;
1954 struct l2cap_conn *conn;
1955 struct l2cap_conn_rsp rsp;
1959 if (!(conn = hcon->l2cap_data))
1961 l = &conn->chan_list;
1963 BT_DBG("conn %p", conn);
1965 read_lock(&l->lock);
1967 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1970 if (sk->sk_state != BT_CONNECT2 ||
1971 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1972 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1978 sk->sk_state = BT_CONFIG;
1981 sk->sk_state = BT_DISCONN;
1982 l2cap_sock_set_timer(sk, HZ/10);
1983 result = L2CAP_CR_SEC_BLOCK;
1986 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1987 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1988 rsp.result = __cpu_to_le16(result);
1989 rsp.status = __cpu_to_le16(0);
1990 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
1991 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1996 read_unlock(&l->lock);
2000 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2002 struct l2cap_chan_list *l;
2003 struct l2cap_conn *conn;
2004 struct l2cap_conn_rsp rsp;
2008 if (!(conn = hcon->l2cap_data))
2010 l = &conn->chan_list;
2012 BT_DBG("conn %p", conn);
2014 read_lock(&l->lock);
2016 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2019 if (sk->sk_state != BT_CONNECT2) {
2025 sk->sk_state = BT_CONFIG;
2028 sk->sk_state = BT_DISCONN;
2029 l2cap_sock_set_timer(sk, HZ/10);
2030 result = L2CAP_CR_SEC_BLOCK;
2033 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
2034 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
2035 rsp.result = __cpu_to_le16(result);
2036 rsp.status = __cpu_to_le16(0);
2037 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2038 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2040 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2041 hci_conn_change_link_key(hcon);
2046 read_unlock(&l->lock);
2050 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2052 struct l2cap_conn *conn = hcon->l2cap_data;
2054 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2057 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2059 if (flags & ACL_START) {
2060 struct l2cap_hdr *hdr;
2064 BT_ERR("Unexpected start frame (len %d)", skb->len);
2065 kfree_skb(conn->rx_skb);
2066 conn->rx_skb = NULL;
2068 l2cap_conn_unreliable(conn, ECOMM);
2072 BT_ERR("Frame is too short (len %d)", skb->len);
2073 l2cap_conn_unreliable(conn, ECOMM);
2077 hdr = (struct l2cap_hdr *) skb->data;
2078 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2080 if (len == skb->len) {
2081 /* Complete frame received */
2082 l2cap_recv_frame(conn, skb);
2086 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2088 if (skb->len > len) {
2089 BT_ERR("Frame is too long (len %d, expected len %d)",
2091 l2cap_conn_unreliable(conn, ECOMM);
2095 /* Allocate skb for the complete frame (with header) */
2096 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2099 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2100 conn->rx_len = len - skb->len;
2102 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2104 if (!conn->rx_len) {
2105 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2106 l2cap_conn_unreliable(conn, ECOMM);
2110 if (skb->len > conn->rx_len) {
2111 BT_ERR("Fragment is too long (len %d, expected %d)",
2112 skb->len, conn->rx_len);
2113 kfree_skb(conn->rx_skb);
2114 conn->rx_skb = NULL;
2116 l2cap_conn_unreliable(conn, ECOMM);
2120 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2121 conn->rx_len -= skb->len;
2123 if (!conn->rx_len) {
2124 /* Complete frame received */
2125 l2cap_recv_frame(conn, conn->rx_skb);
2126 conn->rx_skb = NULL;
2135 /* ---- Proc fs support ---- */
2136 #ifdef CONFIG_PROC_FS
2137 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2140 struct hlist_node *node;
2143 read_lock_bh(&l2cap_sk_list.lock);
2145 sk_for_each(sk, node, &l2cap_sk_list.head)
2153 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2159 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2161 read_unlock_bh(&l2cap_sk_list.lock);
2164 static int l2cap_seq_show(struct seq_file *seq, void *e)
2166 struct sock *sk = e;
2167 struct l2cap_pinfo *pi = l2cap_pi(sk);
2169 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2170 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2171 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2172 pi->omtu, pi->link_mode);
2176 static struct seq_operations l2cap_seq_ops = {
2177 .start = l2cap_seq_start,
2178 .next = l2cap_seq_next,
2179 .stop = l2cap_seq_stop,
2180 .show = l2cap_seq_show
2183 static int l2cap_seq_open(struct inode *inode, struct file *file)
2185 return seq_open(file, &l2cap_seq_ops);
2188 static struct file_operations l2cap_seq_fops = {
2189 .owner = THIS_MODULE,
2190 .open = l2cap_seq_open,
2192 .llseek = seq_lseek,
2193 .release = seq_release,
2196 static int __init l2cap_proc_init(void)
2198 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2201 p->owner = THIS_MODULE;
2202 p->proc_fops = &l2cap_seq_fops;
2206 static void __exit l2cap_proc_cleanup(void)
2208 remove_proc_entry("l2cap", proc_bt);
2211 #else /* CONFIG_PROC_FS */
2213 static int __init l2cap_proc_init(void)
2218 static void __exit l2cap_proc_cleanup(void)
2222 #endif /* CONFIG_PROC_FS */
2224 static struct proto_ops l2cap_sock_ops = {
2225 .family = PF_BLUETOOTH,
2226 .owner = THIS_MODULE,
2227 .release = l2cap_sock_release,
2228 .bind = l2cap_sock_bind,
2229 .connect = l2cap_sock_connect,
2230 .listen = l2cap_sock_listen,
2231 .accept = l2cap_sock_accept,
2232 .getname = l2cap_sock_getname,
2233 .sendmsg = l2cap_sock_sendmsg,
2234 .recvmsg = bt_sock_recvmsg,
2235 .poll = bt_sock_poll,
2236 .mmap = sock_no_mmap,
2237 .socketpair = sock_no_socketpair,
2238 .ioctl = sock_no_ioctl,
2239 .shutdown = l2cap_sock_shutdown,
2240 .setsockopt = l2cap_sock_setsockopt,
2241 .getsockopt = l2cap_sock_getsockopt
2244 static struct net_proto_family l2cap_sock_family_ops = {
2245 .family = PF_BLUETOOTH,
2246 .owner = THIS_MODULE,
2247 .create = l2cap_sock_create,
2250 static struct hci_proto l2cap_hci_proto = {
2252 .id = HCI_PROTO_L2CAP,
2253 .connect_ind = l2cap_connect_ind,
2254 .connect_cfm = l2cap_connect_cfm,
2255 .disconn_ind = l2cap_disconn_ind,
2256 .auth_cfm = l2cap_auth_cfm,
2257 .encrypt_cfm = l2cap_encrypt_cfm,
2258 .recv_acldata = l2cap_recv_acldata
2261 static int __init l2cap_init(void)
2265 if ((err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
2266 BT_ERR("L2CAP socket registration failed");
2270 if ((err = hci_register_proto(&l2cap_hci_proto))) {
2271 BT_ERR("L2CAP protocol registration failed");
2277 BT_INFO("L2CAP ver %s", VERSION);
2278 BT_INFO("L2CAP socket layer initialized");
2283 static void __exit l2cap_exit(void)
2285 l2cap_proc_cleanup();
2287 /* Unregister socket and protocol */
2288 if (bt_sock_unregister(BTPROTO_L2CAP))
2289 BT_ERR("L2CAP socket unregistration failed");
2291 if (hci_unregister_proto(&l2cap_hci_proto))
2292 BT_ERR("L2CAP protocol unregistration failed");
2295 void l2cap_load(void)
2297 /* Dummy function to trigger automatic L2CAP module loading by
2298 * other modules that use L2CAP sockets but don't use any other
2299 * symbols from it. */
2302 EXPORT_SYMBOL(l2cap_load);
2304 module_init(l2cap_init);
2305 module_exit(l2cap_exit);
2307 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2308 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2309 MODULE_VERSION(VERSION);
2310 MODULE_LICENSE("GPL");
2311 MODULE_ALIAS("bt-proto-0");