static void xprt_bind_socket(struct rpc_xprt *, struct socket *);
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
+static int xprt_clear_backlog(struct rpc_xprt *xprt);
+
#ifdef RPC_DEBUG_DATA
/*
* Print the buffer contents (first 128 bytes only--just enough for
struct rpc_rqst *rovr;
struct sk_buff *skb;
int err, repsize, copied;
- u32 xid;
+ u32 _xid, *xp;
read_lock(&sk->sk_callback_lock);
dprintk("RPC: udp_data_ready...\n");
}
/* Copy the XID from the skb... */
- if (skb_copy_bits(skb, sizeof(struct udphdr), &xid, sizeof(xid)) < 0)
+ xp = skb_header_pointer(skb, sizeof(struct udphdr),
+ sizeof(_xid), &_xid);
+ if (xp == NULL)
goto dropit;
/* Look up and lock the request corresponding to the given XID */
spin_lock(&xprt->sock_lock);
- rovr = xprt_lookup_rqst(xprt, xid);
+ rovr = xprt_lookup_rqst(xprt, *xp);
if (!rovr)
goto out_unlock;
task = rovr->rq_task;
xprt->tcp_flags &= ~XPRT_COPY_XID;
xprt->tcp_flags |= XPRT_COPY_DATA;
xprt->tcp_copied = 4;
- dprintk("RPC: reading reply for XID %08x\n", xprt->tcp_xid);
+ dprintk("RPC: reading reply for XID %08x\n",
+ ntohl(xprt->tcp_xid));
tcp_check_recm(xprt);
}
if (!req) {
xprt->tcp_flags &= ~XPRT_COPY_DATA;
dprintk("RPC: XID %08x request not found!\n",
- xprt->tcp_xid);
+ ntohl(xprt->tcp_xid));
spin_unlock(&xprt->sock_lock);
return;
}
tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
unsigned int offset, size_t len)
{
- struct rpc_xprt *xprt = (struct rpc_xprt *)rd_desc->buf;
+ struct rpc_xprt *xprt = rd_desc->arg.data;
skb_reader_t desc = {
.skb = skb,
.offset = offset,
goto out;
/* We use rd_desc to pass struct xprt to tcp_data_recv */
- rd_desc.buf = (char *)xprt;
+ rd_desc.arg.data = xprt;
rd_desc.count = 65536;
tcp_read_sock(sk, &rd_desc, tcp_data_recv);
out:
dprintk("RPC: tcp_state_change client %p...\n", xprt);
dprintk("RPC: state %x conn %d dead %d zapped %d\n",
sk->sk_state, xprt_connected(xprt),
- sock_flag(sk, SOCK_DEAD), sk->sk_zapped);
+ sock_flag(sk, SOCK_DEAD),
+ sock_flag(sk, SOCK_ZAPPED));
switch (sk->sk_state) {
case TCP_ESTABLISHED:
/* Wait until we have enough socket memory */
if (xprt->stream) {
- /* from net/ipv4/tcp.c:tcp_write_space */
- if (tcp_wspace(sk) < tcp_min_write_space(sk))
+ /* from net/core/stream.c:sk_stream_write_space */
+ if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))
goto out;
} else {
/* from net/core/sock.c:sock_def_write_space */
goto out;
spin_lock_bh(&xprt->sock_lock);
- if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->pending)
+ if (xprt->snd_task)
rpc_wake_up_task(xprt->snd_task);
spin_unlock_bh(&xprt->sock_lock);
out:
/*
* Reserve an RPC call slot.
*/
-void
-xprt_reserve(struct rpc_task *task)
-{
- struct rpc_xprt *xprt = task->tk_xprt;
-
- task->tk_status = -EIO;
- if (!xprt->shutdown) {
- spin_lock(&xprt->xprt_lock);
- do_xprt_reserve(task);
- spin_unlock(&xprt->xprt_lock);
- if (task->tk_rqstp)
- del_timer_sync(&xprt->timer);
- }
-}
-
static inline void
do_xprt_reserve(struct rpc_task *task)
{
rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
}
+void
+xprt_reserve(struct rpc_task *task)
+{
+ struct rpc_xprt *xprt = task->tk_xprt;
+
+ task->tk_status = -EIO;
+ if (!xprt->shutdown) {
+ spin_lock(&xprt->xprt_lock);
+ do_xprt_reserve(task);
+ spin_unlock(&xprt->xprt_lock);
+ if (task->tk_rqstp)
+ del_timer_sync(&xprt->timer);
+ }
+}
+
/*
* Allocate a 'unique' XID
*/
req->rq_xprt = xprt;
req->rq_xid = xprt_alloc_xid(xprt);
dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
- req, req->rq_xid);
+ req, ntohl(req->rq_xid));
}
/*
/*
* Set default timeout parameters
*/
-void
+static void
xprt_default_timeout(struct rpc_timeout *to, int proto)
{
if (proto == IPPROTO_UDP)
if (xprt->stream) {
xprt->cwnd = RPC_MAXCWND(xprt);
xprt->nocong = 1;
- } else
+ xprt->max_payload = (1U << 31) - 1;
+ } else {
xprt->cwnd = RPC_INITCWND;
+ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
+ }
spin_lock_init(&xprt->sock_lock);
spin_lock_init(&xprt->xprt_lock);
init_waitqueue_head(&xprt->cong_wait);
sk->sk_no_check = UDP_CSUM_NORCV;
xprt_set_connected(xprt);
} else {
- struct tcp_opt *tp = tcp_sk(sk);
- tp->nonagle = 1; /* disable Nagle's algorithm */
+ tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */
sk->sk_data_ready = tcp_data_ready;
sk->sk_state_change = tcp_state_change;
xprt_clear_connected(xprt);
/*
* Prepare for transport shutdown.
*/
-void
+static void
xprt_shutdown(struct rpc_xprt *xprt)
{
xprt->shutdown = 1;
/*
* Clear the xprt backlog queue
*/
-int
+static int
xprt_clear_backlog(struct rpc_xprt *xprt) {
rpc_wake_up_next(&xprt->backlog);
wake_up(&xprt->cong_wait);