1 /* connection.c: Rx connection routines
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <rxrpc/rxrpc.h>
16 #include <rxrpc/transport.h>
17 #include <rxrpc/peer.h>
18 #include <rxrpc/connection.h>
19 #include <rxrpc/call.h>
20 #include <rxrpc/message.h>
21 #include <linux/udp.h>
24 #include <asm/uaccess.h>
27 __RXACCT_DECL(atomic_t rxrpc_connection_count);
29 LIST_HEAD(rxrpc_conns);
30 DECLARE_RWSEM(rxrpc_conns_sem);
31 unsigned long rxrpc_conn_timeout = 60 * 60;
33 static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
35 struct rxrpc_connection *conn =
36 list_entry(timer, struct rxrpc_connection, timeout);
38 _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage));
40 rxrpc_conn_do_timeout(conn);
43 static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
44 .timed_out = __rxrpc_conn_timeout,
47 /*****************************************************************************/
49 * create a new connection record
51 static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
52 struct rxrpc_connection **_conn)
54 struct rxrpc_connection *conn;
58 /* allocate and initialise a connection record */
59 conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
65 memset(conn, 0, sizeof(struct rxrpc_connection));
66 atomic_set(&conn->usage, 1);
68 INIT_LIST_HEAD(&conn->link);
69 INIT_LIST_HEAD(&conn->id_link);
70 init_waitqueue_head(&conn->chanwait);
71 spin_lock_init(&conn->lock);
72 rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops);
74 do_gettimeofday(&conn->atime);
75 conn->mtu_size = 1024;
77 conn->trans = peer->trans;
79 __RXACCT(atomic_inc(&rxrpc_connection_count));
81 _leave(" = 0 (%p)", conn);
84 } /* end __rxrpc_create_connection() */
86 /*****************************************************************************/
88 * create a new connection record for outgoing connections
90 int rxrpc_create_connection(struct rxrpc_transport *trans,
95 struct rxrpc_connection **_conn)
97 struct rxrpc_connection *candidate, *conn;
98 struct rxrpc_peer *peer;
103 _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id);
105 /* get a peer record */
106 ret = rxrpc_peer_lookup(trans, addr, &peer);
108 _leave(" = %d", ret);
112 /* allocate and initialise a connection record */
113 ret = __rxrpc_create_connection(peer, &candidate);
115 rxrpc_put_peer(peer);
116 _leave(" = %d", ret);
120 /* fill in the specific bits */
121 candidate->addr.sin_family = AF_INET;
122 candidate->addr.sin_port = port;
123 candidate->addr.sin_addr.s_addr = addr;
125 candidate->in_epoch = rxrpc_epoch;
126 candidate->out_epoch = rxrpc_epoch;
127 candidate->in_clientflag = 0;
128 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
129 candidate->service_id = htons(service_id);
131 /* invent a unique connection ID */
132 write_lock(&peer->conn_idlock);
135 connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK);
136 peer->conn_idcounter += RXRPC_MAXCALLS;
138 list_for_each(_p, &peer->conn_idlist) {
139 conn = list_entry(_p, struct rxrpc_connection, id_link);
140 if (connid == conn->conn_id)
142 if (connid > conn->conn_id)
146 _debug("selected candidate conn ID %x.%u",
147 ntohl(peer->addr.s_addr), ntohl(connid));
149 candidate->conn_id = connid;
150 list_add_tail(&candidate->id_link, _p);
152 write_unlock(&peer->conn_idlock);
155 candidate->peer = peer;
157 write_lock(&peer->conn_lock);
159 /* search the peer's transport graveyard list */
160 spin_lock(&peer->conn_gylock);
161 list_for_each(_p, &peer->conn_graveyard) {
162 conn = list_entry(_p, struct rxrpc_connection, link);
163 if (conn->addr.sin_port == candidate->addr.sin_port &&
164 conn->security_ix == candidate->security_ix &&
165 conn->service_id == candidate->service_id &&
166 conn->in_clientflag == 0)
167 goto found_in_graveyard;
169 spin_unlock(&peer->conn_gylock);
171 /* pick the new candidate */
172 _debug("created connection: {%08x} [out]", htonl(candidate->conn_id));
173 atomic_inc(&peer->conn_count);
178 list_add_tail(&conn->link, &peer->conn_active);
179 write_unlock(&peer->conn_lock);
182 write_lock(&peer->conn_idlock);
183 list_del(&candidate->id_link);
184 write_unlock(&peer->conn_idlock);
186 __RXACCT(atomic_dec(&rxrpc_connection_count));
190 down_write(&rxrpc_conns_sem);
191 list_add_tail(&conn->proc_link, &rxrpc_conns);
192 up_write(&rxrpc_conns_sem);
196 _leave(" = 0 (%p)", conn);
200 /* handle resurrecting a connection from the graveyard */
202 _debug("resurrecting connection: {%08x} [out]", htonl(conn->conn_id));
203 rxrpc_get_connection(conn);
204 rxrpc_krxtimod_del_timer(&conn->timeout);
205 list_del_init(&conn->link);
206 spin_unlock(&peer->conn_gylock);
208 } /* end rxrpc_create_connection() */
210 /*****************************************************************************/
212 * lookup the connection for an incoming packet
213 * - create a new connection record for unrecorded incoming connections
215 int rxrpc_connection_lookup(struct rxrpc_peer *peer,
216 struct rxrpc_message *msg,
217 struct rxrpc_connection **_conn)
219 struct rxrpc_connection *conn, *candidate = NULL;
220 struct list_head *_p;
222 u32 x_epoch, x_connid;
223 u16 x_port, x_secix, x_servid;
226 _enter("%p{{%hu}},%u,%hu",
229 ntohs(msg->pkt->h.uh->source),
230 ntohs(msg->hdr.serviceId));
232 x_port = msg->pkt->h.uh->source;
233 x_epoch = msg->hdr.epoch;
234 x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
235 x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
236 x_servid = msg->hdr.serviceId;
237 x_secix = msg->hdr.securityIndex;
239 /* [common case] search the transport's active list first */
240 read_lock(&peer->conn_lock);
241 list_for_each(_p, &peer->conn_active) {
242 conn = list_entry(_p, struct rxrpc_connection, link);
243 if (conn->addr.sin_port == x_port &&
244 conn->in_epoch == x_epoch &&
245 conn->conn_id == x_connid &&
246 conn->security_ix == x_secix &&
247 conn->service_id == x_servid &&
248 conn->in_clientflag == x_clflag)
251 read_unlock(&peer->conn_lock);
253 /* [uncommon case] not active
254 * - create a candidate for a new record if an inbound connection
255 * - only examine the graveyard for an outbound connection
258 ret = __rxrpc_create_connection(peer, &candidate);
260 _leave(" = %d", ret);
264 /* fill in the specifics */
265 candidate->addr.sin_family = AF_INET;
266 candidate->addr.sin_port = x_port;
267 candidate->addr.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
268 candidate->in_epoch = x_epoch;
269 candidate->out_epoch = x_epoch;
270 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
271 candidate->out_clientflag = 0;
272 candidate->conn_id = x_connid;
273 candidate->service_id = x_servid;
274 candidate->security_ix = x_secix;
277 /* search the active list again, just in case it appeared whilst we
279 write_lock(&peer->conn_lock);
280 list_for_each(_p, &peer->conn_active) {
281 conn = list_entry(_p, struct rxrpc_connection, link);
282 if (conn->addr.sin_port == x_port &&
283 conn->in_epoch == x_epoch &&
284 conn->conn_id == x_connid &&
285 conn->security_ix == x_secix &&
286 conn->service_id == x_servid &&
287 conn->in_clientflag == x_clflag)
288 goto found_active_second_chance;
291 /* search the transport's graveyard list */
292 spin_lock(&peer->conn_gylock);
293 list_for_each(_p, &peer->conn_graveyard) {
294 conn = list_entry(_p, struct rxrpc_connection, link);
295 if (conn->addr.sin_port == x_port &&
296 conn->in_epoch == x_epoch &&
297 conn->conn_id == x_connid &&
298 conn->security_ix == x_secix &&
299 conn->service_id == x_servid &&
300 conn->in_clientflag == x_clflag)
301 goto found_in_graveyard;
303 spin_unlock(&peer->conn_gylock);
305 /* outbound connections aren't created here */
307 write_unlock(&peer->conn_lock);
308 _leave(" = -ENOENT");
312 /* we can now add the new candidate to the list */
313 _debug("created connection: {%08x} [in]", htonl(candidate->conn_id));
314 rxrpc_get_peer(peer);
317 atomic_inc(&peer->conn_count);
321 list_add_tail(&conn->link, &peer->conn_active);
324 write_unlock(&peer->conn_lock);
327 write_lock(&peer->conn_idlock);
328 list_del(&candidate->id_link);
329 write_unlock(&peer->conn_idlock);
331 __RXACCT(atomic_dec(&rxrpc_connection_count));
336 down_write(&rxrpc_conns_sem);
337 list_add_tail(&conn->proc_link, &rxrpc_conns);
338 up_write(&rxrpc_conns_sem);
343 _leave(" = 0 (%p)", conn);
346 /* handle the connection being found in the active list straight off */
348 rxrpc_get_connection(conn);
349 read_unlock(&peer->conn_lock);
352 /* handle resurrecting a connection from the graveyard */
354 _debug("resurrecting connection: {%08x} [in]", htonl(conn->conn_id));
355 rxrpc_get_peer(peer);
356 rxrpc_get_connection(conn);
357 rxrpc_krxtimod_del_timer(&conn->timeout);
358 list_del_init(&conn->link);
359 spin_unlock(&peer->conn_gylock);
362 /* handle finding the connection on the second time through the active
364 found_active_second_chance:
365 rxrpc_get_connection(conn);
368 } /* end rxrpc_connection_lookup() */
370 /*****************************************************************************/
372 * finish using a connection record
373 * - it will be transferred to the peer's connection graveyard when refcount
376 void rxrpc_put_connection(struct rxrpc_connection *conn)
378 struct rxrpc_peer *peer;
383 _enter("%p{u=%d p=%hu}",
384 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
387 spin_lock(&peer->conn_gylock);
390 if (atomic_read(&conn->usage) <= 0)
393 if (likely(!atomic_dec_and_test(&conn->usage))) {
394 spin_unlock(&peer->conn_gylock);
399 /* move to graveyard queue */
400 _debug("burying connection: {%08x}", htonl(conn->conn_id));
401 list_del(&conn->link);
402 list_add_tail(&conn->link, &peer->conn_graveyard);
404 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
406 spin_unlock(&peer->conn_gylock);
408 rxrpc_put_peer(conn->peer);
411 } /* end rxrpc_put_connection() */
413 /*****************************************************************************/
415 * free a connection record
417 void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
419 struct rxrpc_peer *peer;
421 _enter("%p{u=%d p=%hu}",
422 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
426 if (atomic_read(&conn->usage) < 0)
429 /* remove from graveyard if still dead */
430 spin_lock(&peer->conn_gylock);
431 if (atomic_read(&conn->usage) == 0) {
432 list_del_init(&conn->link);
437 spin_unlock(&peer->conn_gylock);
441 return; /* resurrected */
444 _debug("--- Destroying Connection %p{%08x} ---",
445 conn, htonl(conn->conn_id));
447 down_write(&rxrpc_conns_sem);
448 list_del(&conn->proc_link);
449 up_write(&rxrpc_conns_sem);
451 write_lock(&peer->conn_idlock);
452 list_del(&conn->id_link);
453 write_unlock(&peer->conn_idlock);
455 __RXACCT(atomic_dec(&rxrpc_connection_count));
458 /* if the graveyard is now empty, wake up anyone waiting for that */
459 if (atomic_dec_and_test(&peer->conn_count))
460 wake_up(&peer->conn_gy_waitq);
462 _leave(" [destroyed]");
463 } /* end rxrpc_conn_do_timeout() */
465 /*****************************************************************************/
467 * clear all connection records from a peer endpoint
469 void rxrpc_conn_clearall(struct rxrpc_peer *peer)
471 DECLARE_WAITQUEUE(myself, current);
473 struct rxrpc_connection *conn;
478 /* there shouldn't be any active conns remaining */
479 if (!list_empty(&peer->conn_active))
482 /* manually timeout all conns in the graveyard */
483 spin_lock(&peer->conn_gylock);
484 while (!list_empty(&peer->conn_graveyard)) {
485 conn = list_entry(peer->conn_graveyard.next,
486 struct rxrpc_connection, link);
487 err = rxrpc_krxtimod_del_timer(&conn->timeout);
488 spin_unlock(&peer->conn_gylock);
491 rxrpc_conn_do_timeout(conn);
493 spin_lock(&peer->conn_gylock);
495 spin_unlock(&peer->conn_gylock);
497 /* wait for the the conn graveyard to be completely cleared */
498 set_current_state(TASK_UNINTERRUPTIBLE);
499 add_wait_queue(&peer->conn_gy_waitq, &myself);
501 while (atomic_read(&peer->conn_count) != 0) {
503 set_current_state(TASK_UNINTERRUPTIBLE);
506 remove_wait_queue(&peer->conn_gy_waitq, &myself);
507 set_current_state(TASK_RUNNING);
510 } /* end rxrpc_conn_clearall() */
512 /*****************************************************************************/
514 * allocate and prepare a message for sending out through the transport
517 int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
518 struct rxrpc_call *call,
523 struct rxrpc_message **_msg)
525 struct rxrpc_message *msg;
528 _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type);
531 _leave(" = -EINVAL");
535 msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags);
537 _leave(" = -ENOMEM");
541 memset(msg, 0, sizeof(*msg));
542 atomic_set(&msg->usage, 1);
544 INIT_LIST_HEAD(&msg->link);
546 msg->state = RXRPC_MSG_PREPARED;
548 msg->hdr.epoch = conn->out_epoch;
549 msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0);
550 msg->hdr.callNumber = call ? call->call_id : 0;
551 msg->hdr.type = type;
552 msg->hdr.flags = conn->out_clientflag;
553 msg->hdr.securityIndex = conn->security_ix;
554 msg->hdr.serviceId = conn->service_id;
556 /* generate sequence numbers for data packets */
559 case RXRPC_PACKET_TYPE_DATA:
560 msg->seq = ++call->snd_seq_count;
561 msg->hdr.seq = htonl(msg->seq);
563 case RXRPC_PACKET_TYPE_ACK:
564 /* ACK sequence numbers are complicated. The following
566 * - jumbo packet ACKs should have a seq number
567 * - normal ACKs should not
574 msg->dcount = dcount + 1;
575 msg->dsize = sizeof(msg->hdr);
576 msg->data[0].iov_len = sizeof(msg->hdr);
577 msg->data[0].iov_base = &msg->hdr;
579 for (loop=0; loop < dcount; loop++) {
580 msg->dsize += diov[loop].iov_len;
581 msg->data[loop+1].iov_len = diov[loop].iov_len;
582 msg->data[loop+1].iov_base = diov[loop].iov_base;
585 __RXACCT(atomic_inc(&rxrpc_message_count));
587 _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count));
589 } /* end rxrpc_conn_newmsg() */
591 /*****************************************************************************/
595 void __rxrpc_put_message(struct rxrpc_message *msg)
599 _enter("%p #%d", msg, atomic_read(&rxrpc_message_count));
603 rxrpc_put_connection(msg->conn);
605 for (loop = 0; loop < 8; loop++)
606 if (test_bit(loop, &msg->dfree))
607 kfree(msg->data[loop].iov_base);
609 __RXACCT(atomic_dec(&rxrpc_message_count));
613 } /* end __rxrpc_put_message() */
615 /*****************************************************************************/
617 * send a message out through the transport endpoint
619 int rxrpc_conn_sendmsg(struct rxrpc_connection *conn,
620 struct rxrpc_message *msg)
622 struct msghdr msghdr;
626 _enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
628 /* fill in some fields in the header */
629 spin_lock(&conn->lock);
630 msg->hdr.serial = htonl(++conn->serial_counter);
632 spin_unlock(&conn->lock);
634 /* set up the message to be transmitted */
635 msghdr.msg_name = &conn->addr;
636 msghdr.msg_namelen = sizeof(conn->addr);
638 * the following is safe, since for compiler definitions of kvec and
639 * iovec are identical, yielding the same in-core layout and alignment
641 msghdr.msg_iov = (struct iovec *)msg->data;
642 msghdr.msg_iovlen = msg->dcount;
643 msghdr.msg_control = NULL;
644 msghdr.msg_controllen = 0;
645 msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
647 _net("Sending message type %d of %Zd bytes to %08x:%d",
650 htonl(conn->addr.sin_addr.s_addr),
651 htons(conn->addr.sin_port));
653 /* send the message */
656 ret = sock_sendmsg(conn->trans->socket, &msghdr, msg->dsize);
660 msg->state = RXRPC_MSG_ERROR;
663 msg->state = RXRPC_MSG_SENT;
666 spin_lock(&conn->lock);
667 do_gettimeofday(&conn->atime);
668 msg->stamp = conn->atime;
669 spin_unlock(&conn->lock);
672 _leave(" = %d", ret);
675 } /* end rxrpc_conn_sendmsg() */
677 /*****************************************************************************/
679 * deal with a subsequent call packet
681 int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
682 struct rxrpc_call *call,
683 struct rxrpc_message *msg)
685 struct rxrpc_message *pmsg;
686 struct list_head *_p;
690 _enter("%p,%p,%p", conn, call, msg);
693 cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
695 spin_lock(&conn->lock);
696 call = conn->channels[cix];
698 if (!call || call->call_id != msg->hdr.callNumber) {
699 spin_unlock(&conn->lock);
700 rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT);
704 rxrpc_get_call(call);
705 spin_unlock(&conn->lock);
709 rxrpc_get_call(call);
712 _proto("Received packet %%%u [%u] on call %hu:%u:%u",
713 htonl(msg->hdr.serial),
715 htons(msg->hdr.serviceId),
716 htonl(conn->conn_id),
717 htonl(call->call_id));
719 call->pkt_rcv_count++;
721 if (msg->pkt->dst && msg->pkt->dst->dev)
723 msg->pkt->dst->dev->mtu -
724 msg->pkt->dst->dev->hard_header_len;
726 /* queue on the call in seq order */
727 rxrpc_get_message(msg);
730 spin_lock(&call->lock);
731 list_for_each(_p, &call->rcv_receiveq) {
732 pmsg = list_entry(_p, struct rxrpc_message, link);
736 list_add_tail(&msg->link, _p);
738 /* reset the activity timeout */
739 call->flags |= RXRPC_CALL_RCV_PKT;
740 mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
742 spin_unlock(&call->lock);
744 rxrpc_krxiod_queue_call(call);
746 rxrpc_put_call(call);
748 _leave(" = %d", ret);
750 } /* end rxrpc_conn_receive_call_packet() */
752 /*****************************************************************************/
754 * handle an ICMP error being applied to a connection
756 void rxrpc_conn_handle_error(struct rxrpc_connection *conn,
757 int local, int errno)
759 struct rxrpc_call *calls[4];
762 _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno);
764 /* get a ref to all my calls in one go */
765 memset(calls, 0, sizeof(calls));
766 spin_lock(&conn->lock);
768 for (loop = 3; loop >= 0; loop--) {
769 if (conn->channels[loop]) {
770 calls[loop] = conn->channels[loop];
771 rxrpc_get_call(calls[loop]);
775 spin_unlock(&conn->lock);
777 /* now kick them all */
778 for (loop = 3; loop >= 0; loop--) {
780 rxrpc_call_handle_error(calls[loop], local, errno);
781 rxrpc_put_call(calls[loop]);
786 } /* end rxrpc_conn_handle_error() */