2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/list.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 #include <net/bluetooth/l2cap.h>
55 #ifndef CONFIG_BT_L2CAP_DEBUG
62 static struct proto_ops l2cap_sock_ops;
64 struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static int l2cap_conn_del(struct hci_conn *conn, int err);
70 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
71 static void l2cap_chan_del(struct sock *sk, int err);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
85 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 __l2cap_sock_close(sk, ETIMEDOUT);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 static void l2cap_sock_init_timer(struct sock *sk)
109 init_timer(&sk->sk_timer);
110 sk->sk_timer.function = l2cap_sock_timeout;
111 sk->sk_timer.data = (unsigned long)sk;
114 /* ---- L2CAP connections ---- */
115 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
117 struct l2cap_conn *conn;
119 if ((conn = hcon->l2cap_data))
125 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
127 memset(conn, 0, sizeof(struct l2cap_conn));
129 hcon->l2cap_data = conn;
132 conn->mtu = hcon->hdev->acl_mtu;
133 conn->src = &hcon->hdev->bdaddr;
134 conn->dst = &hcon->dst;
136 spin_lock_init(&conn->lock);
137 rwlock_init(&conn->chan_list.lock);
139 BT_DBG("hcon %p conn %p", hcon, conn);
143 static int l2cap_conn_del(struct hci_conn *hcon, int err)
145 struct l2cap_conn *conn;
148 if (!(conn = hcon->l2cap_data))
151 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
154 kfree_skb(conn->rx_skb);
157 while ((sk = conn->chan_list.head)) {
159 l2cap_chan_del(sk, err);
164 hcon->l2cap_data = NULL;
169 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
171 struct l2cap_chan_list *l = &conn->chan_list;
172 write_lock(&l->lock);
173 __l2cap_chan_add(conn, sk, parent);
174 write_unlock(&l->lock);
177 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
181 /* Get next available identificator.
182 * 1 - 128 are used by kernel.
183 * 129 - 199 are reserved.
184 * 200 - 254 are used by utilities like l2ping, etc.
187 spin_lock(&conn->lock);
189 if (++conn->tx_ident > 128)
194 spin_unlock(&conn->lock);
199 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
201 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
203 BT_DBG("code 0x%2.2x", code);
208 return hci_send_acl(conn->hcon, skb, 0);
211 /* ---- Socket interface ---- */
212 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
215 struct hlist_node *node;
216 sk_for_each(sk, node, &l2cap_sk_list.head)
217 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
224 /* Find socket with psm and source bdaddr.
225 * Returns closest match.
227 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
229 struct sock *sk = NULL, *sk1 = NULL;
230 struct hlist_node *node;
232 sk_for_each(sk, node, &l2cap_sk_list.head) {
233 if (state && sk->sk_state != state)
236 if (l2cap_pi(sk)->psm == psm) {
238 if (!bacmp(&bt_sk(sk)->src, src))
242 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
246 return node ? sk : sk1;
249 /* Find socket with given address (psm, src).
250 * Returns locked socket */
251 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
254 read_lock(&l2cap_sk_list.lock);
255 s = __l2cap_get_sock_by_psm(state, psm, src);
256 if (s) bh_lock_sock(s);
257 read_unlock(&l2cap_sk_list.lock);
261 static void l2cap_sock_destruct(struct sock *sk)
265 skb_queue_purge(&sk->sk_receive_queue);
266 skb_queue_purge(&sk->sk_write_queue);
269 kfree(sk->sk_protinfo);
272 static void l2cap_sock_cleanup_listen(struct sock *parent)
276 BT_DBG("parent %p", parent);
278 /* Close not yet accepted channels */
279 while ((sk = bt_accept_dequeue(parent, NULL)))
280 l2cap_sock_close(sk);
282 parent->sk_state = BT_CLOSED;
283 parent->sk_zapped = 1;
286 /* Kill socket (only if zapped and orphan)
287 * Must be called on unlocked socket.
289 static void l2cap_sock_kill(struct sock *sk)
291 if (!sk->sk_zapped || sk->sk_socket)
294 BT_DBG("sk %p state %d", sk, sk->sk_state);
296 /* Kill poor orphan */
297 bt_sock_unlink(&l2cap_sk_list, sk);
298 sock_set_flag(sk, SOCK_DEAD);
302 static void __l2cap_sock_close(struct sock *sk, int reason)
304 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
306 switch (sk->sk_state) {
308 l2cap_sock_cleanup_listen(sk);
314 if (sk->sk_type == SOCK_SEQPACKET) {
315 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
316 struct l2cap_disconn_req req;
318 sk->sk_state = BT_DISCONN;
319 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
321 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
322 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
323 l2cap_send_cmd(conn, l2cap_get_ident(conn),
324 L2CAP_DISCONN_REQ, sizeof(req), &req);
326 l2cap_chan_del(sk, reason);
332 l2cap_chan_del(sk, reason);
341 /* Must be called on unlocked socket. */
342 static void l2cap_sock_close(struct sock *sk)
344 l2cap_sock_clear_timer(sk);
346 __l2cap_sock_close(sk, ECONNRESET);
351 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
353 struct l2cap_pinfo *pi = l2cap_pi(sk);
358 sk->sk_type = parent->sk_type;
359 pi->imtu = l2cap_pi(parent)->imtu;
360 pi->omtu = l2cap_pi(parent)->omtu;
361 pi->link_mode = l2cap_pi(parent)->link_mode;
363 pi->imtu = L2CAP_DEFAULT_MTU;
368 /* Default config options */
369 pi->conf_mtu = L2CAP_DEFAULT_MTU;
370 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
373 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
377 sk = bt_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
381 sk_set_owner(sk, THIS_MODULE);
383 sk->sk_destruct = l2cap_sock_destruct;
384 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
386 sk->sk_protocol = proto;
387 sk->sk_state = BT_OPEN;
389 l2cap_sock_init_timer(sk);
391 bt_sock_link(&l2cap_sk_list, sk);
395 static int l2cap_sock_create(struct socket *sock, int protocol)
399 BT_DBG("sock %p", sock);
401 sock->state = SS_UNCONNECTED;
403 if (sock->type != SOCK_SEQPACKET &&
404 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
405 return -ESOCKTNOSUPPORT;
407 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
410 sock->ops = &l2cap_sock_ops;
412 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
416 l2cap_sock_init(sk, NULL);
420 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
422 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
423 struct sock *sk = sock->sk;
426 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
428 if (!addr || addr->sa_family != AF_BLUETOOTH)
433 if (sk->sk_state != BT_OPEN) {
438 write_lock_bh(&l2cap_sk_list.lock);
440 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
443 /* Save source address */
444 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
445 l2cap_pi(sk)->psm = la->l2_psm;
446 l2cap_pi(sk)->sport = la->l2_psm;
447 sk->sk_state = BT_BOUND;
450 write_unlock_bh(&l2cap_sk_list.lock);
457 static int l2cap_do_connect(struct sock *sk)
459 bdaddr_t *src = &bt_sk(sk)->src;
460 bdaddr_t *dst = &bt_sk(sk)->dst;
461 struct l2cap_conn *conn;
462 struct hci_conn *hcon;
463 struct hci_dev *hdev;
466 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
468 if (!(hdev = hci_get_route(dst, src)))
469 return -EHOSTUNREACH;
471 hci_dev_lock_bh(hdev);
475 hcon = hci_connect(hdev, ACL_LINK, dst);
479 conn = l2cap_conn_add(hcon, 0);
487 /* Update source addr of the socket */
488 bacpy(src, conn->src);
490 l2cap_chan_add(conn, sk, NULL);
492 sk->sk_state = BT_CONNECT;
493 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
495 if (hcon->state == BT_CONNECTED) {
496 if (sk->sk_type == SOCK_SEQPACKET) {
497 struct l2cap_conn_req req;
498 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
499 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
500 req.psm = l2cap_pi(sk)->psm;
501 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
502 L2CAP_CONN_REQ, sizeof(req), &req);
504 l2cap_sock_clear_timer(sk);
505 sk->sk_state = BT_CONNECTED;
510 hci_dev_unlock_bh(hdev);
515 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
517 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
518 struct sock *sk = sock->sk;
525 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
530 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
535 switch(sk->sk_state) {
539 /* Already connecting */
543 /* Already connected */
556 /* Set destination address and psm */
557 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
558 l2cap_pi(sk)->psm = la->l2_psm;
560 if ((err = l2cap_do_connect(sk)))
564 err = bt_sock_wait_state(sk, BT_CONNECTED,
565 sock_sndtimeo(sk, flags & O_NONBLOCK));
571 static int l2cap_sock_listen(struct socket *sock, int backlog)
573 struct sock *sk = sock->sk;
576 BT_DBG("sk %p backlog %d", sk, backlog);
580 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
585 if (!l2cap_pi(sk)->psm) {
586 bdaddr_t *src = &bt_sk(sk)->src;
591 write_lock_bh(&l2cap_sk_list.lock);
593 for (psm = 0x1001; psm < 0x1100; psm += 2)
594 if (!__l2cap_get_sock_by_addr(psm, src)) {
595 l2cap_pi(sk)->psm = htobs(psm);
596 l2cap_pi(sk)->sport = htobs(psm);
601 write_unlock_bh(&l2cap_sk_list.lock);
607 sk->sk_max_ack_backlog = backlog;
608 sk->sk_ack_backlog = 0;
609 sk->sk_state = BT_LISTEN;
616 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
618 DECLARE_WAITQUEUE(wait, current);
619 struct sock *sk = sock->sk, *nsk;
625 if (sk->sk_state != BT_LISTEN) {
630 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
632 BT_DBG("sk %p timeo %ld", sk, timeo);
634 /* Wait for an incoming connection. (wake-one). */
635 add_wait_queue_exclusive(sk->sk_sleep, &wait);
636 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
637 set_current_state(TASK_INTERRUPTIBLE);
644 timeo = schedule_timeout(timeo);
647 if (sk->sk_state != BT_LISTEN) {
652 if (signal_pending(current)) {
653 err = sock_intr_errno(timeo);
657 set_current_state(TASK_RUNNING);
658 remove_wait_queue(sk->sk_sleep, &wait);
663 newsock->state = SS_CONNECTED;
665 BT_DBG("new socket %p", nsk);
672 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
674 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
675 struct sock *sk = sock->sk;
677 BT_DBG("sock %p, sk %p", sock, sk);
679 addr->sa_family = AF_BLUETOOTH;
680 *len = sizeof(struct sockaddr_l2);
683 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
685 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
687 la->l2_psm = l2cap_pi(sk)->psm;
691 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
693 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
694 struct sk_buff *skb, **frag;
695 int err, hlen, count, sent=0;
696 struct l2cap_hdr *lh;
698 BT_DBG("sk %p len %d", sk, len);
700 /* First fragment (with L2CAP header) */
701 if (sk->sk_type == SOCK_DGRAM)
702 hlen = L2CAP_HDR_SIZE + 2;
704 hlen = L2CAP_HDR_SIZE;
706 count = min_t(unsigned int, (conn->mtu - hlen), len);
708 skb = bt_skb_send_alloc(sk, hlen + count,
709 msg->msg_flags & MSG_DONTWAIT, &err);
713 /* Create L2CAP header */
714 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
715 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
716 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
718 if (sk->sk_type == SOCK_DGRAM)
719 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
721 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
729 /* Continuation fragments (no L2CAP header) */
730 frag = &skb_shinfo(skb)->frag_list;
732 count = min_t(unsigned int, conn->mtu, len);
734 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
738 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
746 frag = &(*frag)->next;
749 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
759 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
761 struct sock *sk = sock->sk;
764 BT_DBG("sock %p, sk %p", sock, sk);
767 return sock_error(sk);
769 if (msg->msg_flags & MSG_OOB)
772 /* Check outgoing MTU */
773 if (len > l2cap_pi(sk)->omtu)
778 if (sk->sk_state == BT_CONNECTED)
779 err = l2cap_do_send(sk, msg, len);
787 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
789 struct sock *sk = sock->sk;
790 struct l2cap_options opts;
800 len = min_t(unsigned int, sizeof(opts), optlen);
801 if (copy_from_user((char *)&opts, optval, len)) {
805 l2cap_pi(sk)->imtu = opts.imtu;
806 l2cap_pi(sk)->omtu = opts.omtu;
810 if (get_user(opt, (u32 __user *)optval)) {
815 l2cap_pi(sk)->link_mode = opt;
827 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
829 struct sock *sk = sock->sk;
830 struct l2cap_options opts;
831 struct l2cap_conninfo cinfo;
834 if (get_user(len, optlen))
841 opts.imtu = l2cap_pi(sk)->imtu;
842 opts.omtu = l2cap_pi(sk)->omtu;
843 opts.flush_to = l2cap_pi(sk)->flush_to;
845 len = min_t(unsigned int, len, sizeof(opts));
846 if (copy_to_user(optval, (char *)&opts, len))
852 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *)optval))
857 if (sk->sk_state != BT_CONNECTED) {
862 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
864 len = min_t(unsigned int, len, sizeof(cinfo));
865 if (copy_to_user(optval, (char *)&cinfo, len))
879 static int l2cap_sock_shutdown(struct socket *sock, int how)
881 struct sock *sk = sock->sk;
884 BT_DBG("sock %p, sk %p", sock, sk);
890 if (!sk->sk_shutdown) {
891 sk->sk_shutdown = SHUTDOWN_MASK;
892 l2cap_sock_clear_timer(sk);
893 __l2cap_sock_close(sk, 0);
895 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
896 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
902 static int l2cap_sock_release(struct socket *sock)
904 struct sock *sk = sock->sk;
907 BT_DBG("sock %p, sk %p", sock, sk);
912 err = l2cap_sock_shutdown(sock, 2);
919 /* ---- L2CAP channels ---- */
920 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
923 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
924 if (l2cap_pi(s)->dcid == cid)
930 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
933 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
934 if (l2cap_pi(s)->scid == cid)
940 /* Find channel with given SCID.
941 * Returns locked socket */
942 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
946 s = __l2cap_get_chan_by_scid(l, cid);
947 if (s) bh_lock_sock(s);
948 read_unlock(&l->lock);
952 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
955 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
956 if (l2cap_pi(s)->ident == ident)
962 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
966 s = __l2cap_get_chan_by_ident(l, ident);
967 if (s) bh_lock_sock(s);
968 read_unlock(&l->lock);
972 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
976 for (; cid < 0xffff; cid++) {
977 if(!__l2cap_get_chan_by_scid(l, cid))
984 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
989 l2cap_pi(l->head)->prev_c = sk;
991 l2cap_pi(sk)->next_c = l->head;
992 l2cap_pi(sk)->prev_c = NULL;
996 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
998 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
1000 write_lock(&l->lock);
1005 l2cap_pi(next)->prev_c = prev;
1007 l2cap_pi(prev)->next_c = next;
1008 write_unlock(&l->lock);
1013 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
1015 struct l2cap_chan_list *l = &conn->chan_list;
1017 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
1019 l2cap_pi(sk)->conn = conn;
1021 if (sk->sk_type == SOCK_SEQPACKET) {
1022 /* Alloc CID for connection-oriented socket */
1023 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
1024 } else if (sk->sk_type == SOCK_DGRAM) {
1025 /* Connectionless socket */
1026 l2cap_pi(sk)->scid = 0x0002;
1027 l2cap_pi(sk)->dcid = 0x0002;
1028 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1030 /* Raw socket can send/recv signalling messages only */
1031 l2cap_pi(sk)->scid = 0x0001;
1032 l2cap_pi(sk)->dcid = 0x0001;
1033 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1036 __l2cap_chan_link(l, sk);
1039 bt_accept_enqueue(parent, sk);
1043 * Must be called on the locked socket. */
1044 static void l2cap_chan_del(struct sock *sk, int err)
1046 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1047 struct sock *parent = bt_sk(sk)->parent;
1049 l2cap_sock_clear_timer(sk);
1051 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
1054 /* Unlink from channel list */
1055 l2cap_chan_unlink(&conn->chan_list, sk);
1056 l2cap_pi(sk)->conn = NULL;
1057 hci_conn_put(conn->hcon);
1060 sk->sk_state = BT_CLOSED;
1067 bt_accept_unlink(sk);
1068 parent->sk_data_ready(parent, 0);
1070 sk->sk_state_change(sk);
1073 static void l2cap_conn_ready(struct l2cap_conn *conn)
1075 struct l2cap_chan_list *l = &conn->chan_list;
1078 BT_DBG("conn %p", conn);
1080 read_lock(&l->lock);
1082 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1085 if (sk->sk_type != SOCK_SEQPACKET) {
1086 l2cap_sock_clear_timer(sk);
1087 sk->sk_state = BT_CONNECTED;
1088 sk->sk_state_change(sk);
1089 } else if (sk->sk_state == BT_CONNECT) {
1090 struct l2cap_conn_req req;
1091 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1092 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1093 req.psm = l2cap_pi(sk)->psm;
1094 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1100 read_unlock(&l->lock);
1103 /* Notify sockets that we cannot guaranty reliability anymore */
1104 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1106 struct l2cap_chan_list *l = &conn->chan_list;
1109 BT_DBG("conn %p", conn);
1111 read_lock(&l->lock);
1112 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1113 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1116 read_unlock(&l->lock);
1119 static void l2cap_chan_ready(struct sock *sk)
1121 struct sock *parent = bt_sk(sk)->parent;
1123 BT_DBG("sk %p, parent %p", sk, parent);
1125 l2cap_pi(sk)->conf_state = 0;
1126 l2cap_sock_clear_timer(sk);
1129 /* Outgoing channel.
1130 * Wake up socket sleeping on connect.
1132 sk->sk_state = BT_CONNECTED;
1133 sk->sk_state_change(sk);
1135 /* Incoming channel.
1136 * Wake up socket sleeping on accept.
1138 parent->sk_data_ready(parent, 0);
1142 /* Copy frame to all raw sockets on that connection */
1143 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1145 struct l2cap_chan_list *l = &conn->chan_list;
1146 struct sk_buff *nskb;
1149 BT_DBG("conn %p", conn);
1151 read_lock(&l->lock);
1152 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1153 if (sk->sk_type != SOCK_RAW)
1156 /* Don't send frame to the socket it came from */
1160 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1163 if (sock_queue_rcv_skb(sk, nskb))
1166 read_unlock(&l->lock);
1169 /* ---- L2CAP signalling commands ---- */
1170 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1171 u8 code, u8 ident, u16 dlen, void *data)
1173 struct sk_buff *skb, **frag;
1174 struct l2cap_cmd_hdr *cmd;
1175 struct l2cap_hdr *lh;
1178 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1180 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1181 count = min_t(unsigned int, conn->mtu, len);
1183 skb = bt_skb_alloc(count, GFP_ATOMIC);
1187 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1188 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1189 lh->cid = __cpu_to_le16(0x0001);
1191 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1194 cmd->len = __cpu_to_le16(dlen);
1197 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1198 memcpy(skb_put(skb, count), data, count);
1204 /* Continuation fragments (no L2CAP header) */
1205 frag = &skb_shinfo(skb)->frag_list;
1207 count = min_t(unsigned int, conn->mtu, len);
1209 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1213 memcpy(skb_put(*frag, count), data, count);
1218 frag = &(*frag)->next;
1228 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1230 struct l2cap_conf_opt *opt = *ptr;
1233 len = L2CAP_CONF_OPT_SIZE + opt->len;
1241 *val = *((u8 *) opt->val);
1245 *val = __le16_to_cpu(*((u16 *)opt->val));
1249 *val = __le32_to_cpu(*((u32 *)opt->val));
1253 *val = (unsigned long) opt->val;
1257 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1261 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1263 int type, hint, olen;
1267 BT_DBG("sk %p len %d", sk, len);
1269 while (len >= L2CAP_CONF_OPT_SIZE) {
1270 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1276 case L2CAP_CONF_MTU:
1277 l2cap_pi(sk)->conf_mtu = val;
1280 case L2CAP_CONF_FLUSH_TO:
1281 l2cap_pi(sk)->flush_to = val;
1284 case L2CAP_CONF_QOS:
1291 /* FIXME: Reject unknown option */
1297 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1299 struct l2cap_conf_opt *opt = *ptr;
1301 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1308 *((u8 *) opt->val) = val;
1312 *((u16 *) opt->val) = __cpu_to_le16(val);
1316 *((u32 *) opt->val) = __cpu_to_le32(val);
1320 memcpy(opt->val, (void *) val, len);
1324 *ptr += L2CAP_CONF_OPT_SIZE + len;
1327 static int l2cap_build_conf_req(struct sock *sk, void *data)
1329 struct l2cap_pinfo *pi = l2cap_pi(sk);
1330 struct l2cap_conf_req *req = data;
1331 void *ptr = req->data;
1333 BT_DBG("sk %p", sk);
1335 if (pi->imtu != L2CAP_DEFAULT_MTU)
1336 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1338 /* FIXME: Need actual value of the flush timeout */
1339 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1340 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1342 req->dcid = __cpu_to_le16(pi->dcid);
1343 req->flags = __cpu_to_le16(0);
1348 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1350 struct l2cap_pinfo *pi = l2cap_pi(sk);
1353 /* Configure output options and let the other side know
1354 * which ones we don't like. */
1355 if (pi->conf_mtu < pi->omtu) {
1356 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1357 result = L2CAP_CONF_UNACCEPT;
1359 pi->omtu = pi->conf_mtu;
1362 BT_DBG("sk %p result %d", sk, result);
1366 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1368 struct l2cap_conf_rsp *rsp = data;
1369 void *ptr = rsp->data;
1372 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1375 *result = l2cap_conf_output(sk, &ptr);
1379 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1380 rsp->result = __cpu_to_le16(result ? *result : 0);
1381 rsp->flags = __cpu_to_le16(flags);
1386 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1388 struct l2cap_chan_list *list = &conn->chan_list;
1389 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1390 struct l2cap_conn_rsp rsp;
1391 struct sock *sk, *parent;
1392 int result = 0, status = 0;
1394 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1397 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1399 /* Check if we have socket listening on psm */
1400 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1402 result = L2CAP_CR_BAD_PSM;
1406 result = L2CAP_CR_NO_MEM;
1408 /* Check for backlog size */
1409 if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
1410 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1414 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1418 write_lock(&list->lock);
1420 /* Check if we already have channel with that dcid */
1421 if (__l2cap_get_chan_by_dcid(list, scid)) {
1422 write_unlock(&list->lock);
1424 l2cap_sock_kill(sk);
1428 hci_conn_hold(conn->hcon);
1430 l2cap_sock_init(sk, parent);
1431 bacpy(&bt_sk(sk)->src, conn->src);
1432 bacpy(&bt_sk(sk)->dst, conn->dst);
1433 l2cap_pi(sk)->psm = psm;
1434 l2cap_pi(sk)->dcid = scid;
1436 __l2cap_chan_add(conn, sk, parent);
1437 dcid = l2cap_pi(sk)->scid;
1439 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1441 /* Service level security */
1442 result = L2CAP_CR_PEND;
1443 status = L2CAP_CS_AUTHEN_PEND;
1444 sk->sk_state = BT_CONNECT2;
1445 l2cap_pi(sk)->ident = cmd->ident;
1447 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1448 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1449 if (!hci_conn_encrypt(conn->hcon))
1451 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1452 if (!hci_conn_auth(conn->hcon))
1456 sk->sk_state = BT_CONFIG;
1457 result = status = 0;
1460 write_unlock(&list->lock);
1463 bh_unlock_sock(parent);
1466 rsp.scid = __cpu_to_le16(scid);
1467 rsp.dcid = __cpu_to_le16(dcid);
1468 rsp.result = __cpu_to_le16(result);
1469 rsp.status = __cpu_to_le16(status);
1470 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1474 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1476 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1477 u16 scid, dcid, result, status;
1481 scid = __le16_to_cpu(rsp->scid);
1482 dcid = __le16_to_cpu(rsp->dcid);
1483 result = __le16_to_cpu(rsp->result);
1484 status = __le16_to_cpu(rsp->status);
1486 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1489 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1492 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1497 case L2CAP_CR_SUCCESS:
1498 sk->sk_state = BT_CONFIG;
1499 l2cap_pi(sk)->ident = 0;
1500 l2cap_pi(sk)->dcid = dcid;
1501 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1503 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1504 l2cap_build_conf_req(sk, req), req);
1511 l2cap_chan_del(sk, ECONNREFUSED);
1519 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1521 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1527 dcid = __le16_to_cpu(req->dcid);
1528 flags = __le16_to_cpu(req->flags);
1530 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1532 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1535 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1537 if (flags & 0x0001) {
1538 /* Incomplete config. Send empty response. */
1539 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1540 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1544 /* Complete config. */
1545 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1546 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1551 /* Output config done */
1552 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1554 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1555 sk->sk_state = BT_CONNECTED;
1556 l2cap_chan_ready(sk);
1557 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1559 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1560 l2cap_build_conf_req(sk, req), req);
1568 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1570 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1571 u16 scid, flags, result;
1574 scid = __le16_to_cpu(rsp->scid);
1575 flags = __le16_to_cpu(rsp->flags);
1576 result = __le16_to_cpu(rsp->result);
1578 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1580 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1584 case L2CAP_CONF_SUCCESS:
1587 case L2CAP_CONF_UNACCEPT:
1588 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1590 /* It does not make sense to adjust L2CAP parameters
1591 * that are currently defined in the spec. We simply
1592 * resend config request that we sent earlier. It is
1593 * stupid, but it helps qualification testing which
1594 * expects at least some response from us. */
1595 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1596 l2cap_build_conf_req(sk, req), req);
1601 sk->sk_state = BT_DISCONN;
1602 sk->sk_err = ECONNRESET;
1603 l2cap_sock_set_timer(sk, HZ * 5);
1605 struct l2cap_disconn_req req;
1606 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1607 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1608 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1609 L2CAP_DISCONN_REQ, sizeof(req), &req);
1617 /* Input config done */
1618 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1620 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1621 sk->sk_state = BT_CONNECTED;
1622 l2cap_chan_ready(sk);
1630 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1632 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1633 struct l2cap_disconn_rsp rsp;
1637 scid = __le16_to_cpu(req->scid);
1638 dcid = __le16_to_cpu(req->dcid);
1640 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1642 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1645 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1646 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1647 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1649 sk->sk_shutdown = SHUTDOWN_MASK;
1651 l2cap_chan_del(sk, ECONNRESET);
1654 l2cap_sock_kill(sk);
1658 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1660 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1664 scid = __le16_to_cpu(rsp->scid);
1665 dcid = __le16_to_cpu(rsp->dcid);
1667 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1669 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1672 l2cap_chan_del(sk, 0);
1675 l2cap_sock_kill(sk);
1679 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1681 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1682 struct l2cap_info_rsp rsp;
1685 type = __le16_to_cpu(req->type);
1687 BT_DBG("type 0x%4.4x", type);
1689 rsp.type = __cpu_to_le16(type);
1690 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1691 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1696 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1698 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1701 type = __le16_to_cpu(rsp->type);
1702 result = __le16_to_cpu(rsp->result);
1704 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1709 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1711 u8 *data = skb->data;
1713 struct l2cap_cmd_hdr cmd;
1716 l2cap_raw_recv(conn, skb);
1718 while (len >= L2CAP_CMD_HDR_SIZE) {
1719 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1720 data += L2CAP_CMD_HDR_SIZE;
1721 len -= L2CAP_CMD_HDR_SIZE;
1723 cmd.len = __le16_to_cpu(cmd.len);
1725 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1727 if (cmd.len > len || !cmd.ident) {
1728 BT_DBG("corrupted command");
1733 case L2CAP_COMMAND_REJ:
1734 /* FIXME: We should process this */
1737 case L2CAP_CONN_REQ:
1738 err = l2cap_connect_req(conn, &cmd, data);
1741 case L2CAP_CONN_RSP:
1742 err = l2cap_connect_rsp(conn, &cmd, data);
1745 case L2CAP_CONF_REQ:
1746 err = l2cap_config_req(conn, &cmd, data);
1749 case L2CAP_CONF_RSP:
1750 err = l2cap_config_rsp(conn, &cmd, data);
1753 case L2CAP_DISCONN_REQ:
1754 err = l2cap_disconnect_req(conn, &cmd, data);
1757 case L2CAP_DISCONN_RSP:
1758 err = l2cap_disconnect_rsp(conn, &cmd, data);
1761 case L2CAP_ECHO_REQ:
1762 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1765 case L2CAP_ECHO_RSP:
1768 case L2CAP_INFO_REQ:
1769 err = l2cap_information_req(conn, &cmd, data);
1772 case L2CAP_INFO_RSP:
1773 err = l2cap_information_rsp(conn, &cmd, data);
1777 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1783 struct l2cap_cmd_rej rej;
1784 BT_DBG("error %d", err);
1786 /* FIXME: Map err to a valid reason */
1787 rej.reason = __cpu_to_le16(0);
1788 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1798 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1802 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1804 BT_DBG("unknown cid 0x%4.4x", cid);
1808 BT_DBG("sk %p, len %d", sk, skb->len);
1810 if (sk->sk_state != BT_CONNECTED)
1813 if (l2cap_pi(sk)->imtu < skb->len)
1816 /* If socket recv buffers overflows we drop data here
1817 * which is *bad* because L2CAP has to be reliable.
1818 * But we don't have any other choice. L2CAP doesn't
1819 * provide flow control mechanism. */
1821 if (!sock_queue_rcv_skb(sk, skb))
1828 if (sk) bh_unlock_sock(sk);
1832 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1836 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1840 BT_DBG("sk %p, len %d", sk, skb->len);
1842 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1845 if (l2cap_pi(sk)->imtu < skb->len)
1848 if (!sock_queue_rcv_skb(sk, skb))
1855 if (sk) bh_unlock_sock(sk);
1859 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1861 struct l2cap_hdr *lh = (void *) skb->data;
1864 skb_pull(skb, L2CAP_HDR_SIZE);
1865 cid = __le16_to_cpu(lh->cid);
1866 len = __le16_to_cpu(lh->len);
1868 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1872 l2cap_sig_channel(conn, skb);
1876 psm = get_unaligned((u16 *) skb->data);
1878 l2cap_conless_channel(conn, psm, skb);
1882 l2cap_data_channel(conn, cid, skb);
1887 /* ---- L2CAP interface with lower layer (HCI) ---- */
1889 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1891 int exact = 0, lm1 = 0, lm2 = 0;
1892 register struct sock *sk;
1893 struct hlist_node *node;
1895 if (type != ACL_LINK)
1898 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1900 /* Find listening sockets and check their link_mode */
1901 read_lock(&l2cap_sk_list.lock);
1902 sk_for_each(sk, node, &l2cap_sk_list.head) {
1903 if (sk->sk_state != BT_LISTEN)
1906 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1907 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1909 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1910 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1912 read_unlock(&l2cap_sk_list.lock);
1914 return exact ? lm1 : lm2;
1917 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1919 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1921 if (hcon->type != ACL_LINK)
1925 struct l2cap_conn *conn;
1927 conn = l2cap_conn_add(hcon, status);
1929 l2cap_conn_ready(conn);
1931 l2cap_conn_del(hcon, bt_err(status));
1936 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1938 BT_DBG("hcon %p reason %d", hcon, reason);
1940 if (hcon->type != ACL_LINK)
1943 l2cap_conn_del(hcon, bt_err(reason));
1947 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1949 struct l2cap_chan_list *l;
1950 struct l2cap_conn *conn;
1951 struct l2cap_conn_rsp rsp;
1955 if (!(conn = hcon->l2cap_data))
1957 l = &conn->chan_list;
1959 BT_DBG("conn %p", conn);
1961 read_lock(&l->lock);
1963 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1966 if (sk->sk_state != BT_CONNECT2 ||
1967 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1968 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1974 sk->sk_state = BT_CONFIG;
1977 sk->sk_state = BT_DISCONN;
1978 l2cap_sock_set_timer(sk, HZ/10);
1979 result = L2CAP_CR_SEC_BLOCK;
1982 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1983 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1984 rsp.result = __cpu_to_le16(result);
1985 rsp.status = __cpu_to_le16(0);
1986 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
1987 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1992 read_unlock(&l->lock);
1996 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
1998 struct l2cap_chan_list *l;
1999 struct l2cap_conn *conn;
2000 struct l2cap_conn_rsp rsp;
2004 if (!(conn = hcon->l2cap_data))
2006 l = &conn->chan_list;
2008 BT_DBG("conn %p", conn);
2010 read_lock(&l->lock);
2012 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2015 if (sk->sk_state != BT_CONNECT2) {
2021 sk->sk_state = BT_CONFIG;
2024 sk->sk_state = BT_DISCONN;
2025 l2cap_sock_set_timer(sk, HZ/10);
2026 result = L2CAP_CR_SEC_BLOCK;
2029 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
2030 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
2031 rsp.result = __cpu_to_le16(result);
2032 rsp.status = __cpu_to_le16(0);
2033 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2034 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2036 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2037 hci_conn_change_link_key(hcon);
2042 read_unlock(&l->lock);
2046 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2048 struct l2cap_conn *conn = hcon->l2cap_data;
2050 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2053 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2055 if (flags & ACL_START) {
2056 struct l2cap_hdr *hdr;
2060 BT_ERR("Unexpected start frame (len %d)", skb->len);
2061 kfree_skb(conn->rx_skb);
2062 conn->rx_skb = NULL;
2064 l2cap_conn_unreliable(conn, ECOMM);
2068 BT_ERR("Frame is too short (len %d)", skb->len);
2069 l2cap_conn_unreliable(conn, ECOMM);
2073 hdr = (struct l2cap_hdr *) skb->data;
2074 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2076 if (len == skb->len) {
2077 /* Complete frame received */
2078 l2cap_recv_frame(conn, skb);
2082 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2084 if (skb->len > len) {
2085 BT_ERR("Frame is too long (len %d, expected len %d)",
2087 l2cap_conn_unreliable(conn, ECOMM);
2091 /* Allocate skb for the complete frame (with header) */
2092 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2095 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2096 conn->rx_len = len - skb->len;
2098 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2100 if (!conn->rx_len) {
2101 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2102 l2cap_conn_unreliable(conn, ECOMM);
2106 if (skb->len > conn->rx_len) {
2107 BT_ERR("Fragment is too long (len %d, expected %d)",
2108 skb->len, conn->rx_len);
2109 kfree_skb(conn->rx_skb);
2110 conn->rx_skb = NULL;
2112 l2cap_conn_unreliable(conn, ECOMM);
2116 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2117 conn->rx_len -= skb->len;
2119 if (!conn->rx_len) {
2120 /* Complete frame received */
2121 l2cap_recv_frame(conn, conn->rx_skb);
2122 conn->rx_skb = NULL;
2131 /* ---- Proc fs support ---- */
2132 #ifdef CONFIG_PROC_FS
2133 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2136 struct hlist_node *node;
2139 read_lock_bh(&l2cap_sk_list.lock);
2141 sk_for_each(sk, node, &l2cap_sk_list.head)
2149 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2155 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2157 read_unlock_bh(&l2cap_sk_list.lock);
2160 static int l2cap_seq_show(struct seq_file *seq, void *e)
2162 struct sock *sk = e;
2163 struct l2cap_pinfo *pi = l2cap_pi(sk);
2165 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2166 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2167 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2168 pi->omtu, pi->link_mode);
2172 static struct seq_operations l2cap_seq_ops = {
2173 .start = l2cap_seq_start,
2174 .next = l2cap_seq_next,
2175 .stop = l2cap_seq_stop,
2176 .show = l2cap_seq_show
2179 static int l2cap_seq_open(struct inode *inode, struct file *file)
2181 return seq_open(file, &l2cap_seq_ops);
2184 static struct file_operations l2cap_seq_fops = {
2185 .owner = THIS_MODULE,
2186 .open = l2cap_seq_open,
2188 .llseek = seq_lseek,
2189 .release = seq_release,
2192 static int __init l2cap_proc_init(void)
2194 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2197 p->owner = THIS_MODULE;
2198 p->proc_fops = &l2cap_seq_fops;
2202 static void __exit l2cap_proc_cleanup(void)
2204 remove_proc_entry("l2cap", proc_bt);
2207 #else /* CONFIG_PROC_FS */
2209 static int __init l2cap_proc_init(void)
2214 static void __exit l2cap_proc_cleanup(void)
2218 #endif /* CONFIG_PROC_FS */
2220 static struct proto_ops l2cap_sock_ops = {
2221 .family = PF_BLUETOOTH,
2222 .owner = THIS_MODULE,
2223 .release = l2cap_sock_release,
2224 .bind = l2cap_sock_bind,
2225 .connect = l2cap_sock_connect,
2226 .listen = l2cap_sock_listen,
2227 .accept = l2cap_sock_accept,
2228 .getname = l2cap_sock_getname,
2229 .sendmsg = l2cap_sock_sendmsg,
2230 .recvmsg = bt_sock_recvmsg,
2231 .poll = bt_sock_poll,
2232 .mmap = sock_no_mmap,
2233 .socketpair = sock_no_socketpair,
2234 .ioctl = sock_no_ioctl,
2235 .shutdown = l2cap_sock_shutdown,
2236 .setsockopt = l2cap_sock_setsockopt,
2237 .getsockopt = l2cap_sock_getsockopt
2240 static struct net_proto_family l2cap_sock_family_ops = {
2241 .family = PF_BLUETOOTH,
2242 .owner = THIS_MODULE,
2243 .create = l2cap_sock_create,
2246 static struct hci_proto l2cap_hci_proto = {
2248 .id = HCI_PROTO_L2CAP,
2249 .connect_ind = l2cap_connect_ind,
2250 .connect_cfm = l2cap_connect_cfm,
2251 .disconn_ind = l2cap_disconn_ind,
2252 .auth_cfm = l2cap_auth_cfm,
2253 .encrypt_cfm = l2cap_encrypt_cfm,
2254 .recv_acldata = l2cap_recv_acldata
2257 static int __init l2cap_init(void)
2261 if ((err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
2262 BT_ERR("L2CAP socket registration failed");
2266 if ((err = hci_register_proto(&l2cap_hci_proto))) {
2267 BT_ERR("L2CAP protocol registration failed");
2273 BT_INFO("L2CAP ver %s", VERSION);
2274 BT_INFO("L2CAP socket layer initialized");
2279 static void __exit l2cap_exit(void)
2281 l2cap_proc_cleanup();
2283 /* Unregister socket and protocol */
2284 if (bt_sock_unregister(BTPROTO_L2CAP))
2285 BT_ERR("L2CAP socket unregistration failed");
2287 if (hci_unregister_proto(&l2cap_hci_proto))
2288 BT_ERR("L2CAP protocol unregistration failed");
2291 void l2cap_load(void)
2293 /* Dummy function to trigger automatic L2CAP module loading by
2294 * other modules that use L2CAP sockets but don't use any other
2295 * symbols from it. */
2298 EXPORT_SYMBOL(l2cap_load);
2300 module_init(l2cap_init);
2301 module_exit(l2cap_exit);
2303 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2304 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2305 MODULE_VERSION(VERSION);
2306 MODULE_LICENSE("GPL");
2307 MODULE_ALIAS("bt-proto-0");