git://git.onelab.eu
/
linux-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git]
/
net
/
ipv4
/
tcp_cong.c
diff --git
a/net/ipv4/tcp_cong.c
b/net/ipv4/tcp_cong.c
index
7ff2e42
..
e688c68
100644
(file)
--- a/
net/ipv4/tcp_cong.c
+++ b/
net/ipv4/tcp_cong.c
@@
-6,6
+6,7
@@
* Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
*/
* Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
@@
-37,7
+38,7
@@
int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
int ret = 0;
/* all algorithms must implement ssthresh and cong_avoid ops */
int ret = 0;
/* all algorithms must implement ssthresh and cong_avoid ops */
- if (!ca->ssthresh || !ca->cong_avoid) {
+ if (!ca->ssthresh || !ca->cong_avoid
|| !ca->min_cwnd
) {
printk(KERN_ERR "TCP %s does not implement required ops\n",
ca->name);
return -EINVAL;
printk(KERN_ERR "TCP %s does not implement required ops\n",
ca->name);
return -EINVAL;
@@
-189,7
+190,7
@@
void tcp_slow_start(struct tcp_sock *tp)
return;
/* We MAY increase by 2 if discovered delayed ack */
return;
/* We MAY increase by 2 if discovered delayed ack */
- if (sysctl_tcp_abc > 1 && tp->bytes_acked >
=
2*tp->mss_cache) {
+ if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) {
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
}
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
}
@@
-222,7
+223,7
@@
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
/* In dangerous area, increase slowly. */
else if (sysctl_tcp_abc) {
/* In dangerous area, increase slowly. */
else if (sysctl_tcp_abc) {
- /* RFC3465: App
ro
priate Byte Count
+ /* RFC3465: Apppriate Byte Count
* increase once for each full cwnd acked
*/
if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
* increase once for each full cwnd acked
*/
if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
@@
-250,8
+251,8
@@
u32 tcp_reno_ssthresh(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
-/* Lower bound on congestion window
with halving
. */
-u32 tcp_reno_min_cwnd(
const
struct sock *sk)
+/* Lower bound on congestion window. */
+u32 tcp_reno_min_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return tp->snd_ssthresh/2;
{
const struct tcp_sock *tp = tcp_sk(sk);
return tp->snd_ssthresh/2;