git://git.onelab.eu
/
linux-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git]
/
net
/
ipv4
/
ipvs
/
ip_vs_ctl.c
diff --git
a/net/ipv4/ipvs/ip_vs_ctl.c
b/net/ipv4/ipvs/ip_vs_ctl.c
index
5df55ca
..
f28ec68
100644
(file)
--- a/
net/ipv4/ipvs/ip_vs_ctl.c
+++ b/
net/ipv4/ipvs/ip_vs_ctl.c
@@
-23,18
+23,21
@@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
-#include <linux/
timer
.h>
+#include <linux/
workqueue
.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/mutex.h>
#include <net/ip.h>
#include <net/ip.h>
+#include <net/route.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <net/sock.h>
#include <asm/uaccess.h>
@@
-42,27
+45,27
@@
#include <net/ip_vs.h>
/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
#include <net/ip_vs.h>
/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
-static DE
CLAR
E_MUTEX(__ip_vs_mutex);
+static DE
FIN
E_MUTEX(__ip_vs_mutex);
/* lock for service table */
/* lock for service table */
-static
rwlock_t __ip_vs_svc_lock = RW_LOCK_UNLOCKED
;
+static
DEFINE_RWLOCK(__ip_vs_svc_lock)
;
/* lock for table with the real services */
/* lock for table with the real services */
-static
rwlock_t __ip_vs_rs_lock = RW_LOCK_UNLOCKED
;
+static
DEFINE_RWLOCK(__ip_vs_rs_lock)
;
/* lock for state and timeout tables */
/* lock for state and timeout tables */
-static
rwlock_t __ip_vs_securetcp_lock = RW_LOCK_UNLOCKED
;
+static
DEFINE_RWLOCK(__ip_vs_securetcp_lock)
;
/* lock for drop entry handling */
/* lock for drop entry handling */
-static
spinlock_t __ip_vs_dropentry_lock = SPIN_LOCK_UNLOCKED
;
+static
DEFINE_SPINLOCK(__ip_vs_dropentry_lock)
;
/* lock for drop packet handling */
/* lock for drop packet handling */
-static
spinlock_t __ip_vs_droppacket_lock = SPIN_LOCK_UNLOCKED
;
+static
DEFINE_SPINLOCK(__ip_vs_droppacket_lock)
;
/* 1/rate drop and drop-entry variables */
int ip_vs_drop_rate = 0;
int ip_vs_drop_counter = 0;
/* 1/rate drop and drop-entry variables */
int ip_vs_drop_rate = 0;
int ip_vs_drop_counter = 0;
-atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
+
static
atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
/* number of virtual services */
static int ip_vs_num_services = 0;
/* number of virtual services */
static int ip_vs_num_services = 0;
@@
-75,6
+78,7
@@
static int sysctl_ip_vs_amemthresh = 1024;
static int sysctl_ip_vs_am_droprate = 10;
int sysctl_ip_vs_cache_bypass = 0;
int sysctl_ip_vs_expire_nodest_conn = 0;
static int sysctl_ip_vs_am_droprate = 10;
int sysctl_ip_vs_cache_bypass = 0;
int sysctl_ip_vs_expire_nodest_conn = 0;
+int sysctl_ip_vs_expire_quiescent_template = 0;
int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
int sysctl_ip_vs_nat_icmp_send = 0;
int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
int sysctl_ip_vs_nat_icmp_send = 0;
@@
-89,7
+93,8
@@
int ip_vs_get_debug_level(void)
#endif
/*
#endif
/*
- * update_defense_level is called from timer bh and from sysctl.
+ * update_defense_level is called from keventd and from sysctl,
+ * so it needs to protect itself from softirqs
*/
static void update_defense_level(void)
{
*/
static void update_defense_level(void)
{
@@
-109,6
+114,8
@@
static void update_defense_level(void)
nomem = (availmem < sysctl_ip_vs_amemthresh);
nomem = (availmem < sysctl_ip_vs_amemthresh);
+ local_bh_disable();
+
/* drop_entry */
spin_lock(&__ip_vs_dropentry_lock);
switch (sysctl_ip_vs_drop_entry) {
/* drop_entry */
spin_lock(&__ip_vs_dropentry_lock);
switch (sysctl_ip_vs_drop_entry) {
@@
-205,25
+212,27
@@
static void update_defense_level(void)
if (to_change >= 0)
ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
write_unlock(&__ip_vs_securetcp_lock);
if (to_change >= 0)
ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
write_unlock(&__ip_vs_securetcp_lock);
+
+ local_bh_enable();
}
/*
* Timer for checking the defense
*/
}
/*
* Timer for checking the defense
*/
-static struct timer_list defense_timer;
#define DEFENSE_TIMER_PERIOD 1*HZ
#define DEFENSE_TIMER_PERIOD 1*HZ
+static void defense_work_handler(void *data);
+static DECLARE_WORK(defense_work, defense_work_handler, NULL);
-static void defense_
timer_handler(unsigned long
data)
+static void defense_
work_handler(void *
data)
{
update_defense_level();
if (atomic_read(&ip_vs_dropentry))
ip_vs_random_dropentry();
{
update_defense_level();
if (atomic_read(&ip_vs_dropentry))
ip_vs_random_dropentry();
-
mod_timer(&defense_timer, jiffies +
DEFENSE_TIMER_PERIOD);
+
schedule_delayed_work(&defense_work,
DEFENSE_TIMER_PERIOD);
}
}
-
int
ip_vs_use_count_inc(void)
{
int
ip_vs_use_count_inc(void)
{
@@
-441,7
+450,7
@@
ip_vs_service_get(__u32 fwmark, __u16 protocol, __u32 vaddr, __u16 vport)
out:
read_unlock(&__ip_vs_svc_lock);
out:
read_unlock(&__ip_vs_svc_lock);
- IP_VS_DBG(
6
, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n",
+ IP_VS_DBG(
9
, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n",
fwmark, ip_vs_proto_name(protocol),
NIPQUAD(vaddr), ntohs(vport),
svc?"hit":"not hit");
fwmark, ip_vs_proto_name(protocol),
NIPQUAD(vaddr), ntohs(vport),
svc?"hit":"not hit");
@@
-591,7
+600,7
@@
ip_vs_trash_get_dest(struct ip_vs_service *svc, __u32 daddr, __u16 dport)
*/
list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
IP_VS_DBG(3, "Destination %u/%u.%u.%u.%u:%u still in trash, "
*/
list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
IP_VS_DBG(3, "Destination %u/%u.%u.%u.%u:%u still in trash, "
- "refcnt=%d\n",
+ "
dest->
refcnt=%d\n",
dest->vfwmark,
NIPQUAD(dest->addr), ntohs(dest->port),
atomic_read(&dest->refcnt));
dest->vfwmark,
NIPQUAD(dest->addr), ntohs(dest->port),
atomic_read(&dest->refcnt));
@@
-746,8
+755,8
@@
ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
atomic_set(&dest->refcnt, 0);
INIT_LIST_HEAD(&dest->d_list);
atomic_set(&dest->refcnt, 0);
INIT_LIST_HEAD(&dest->d_list);
-
dest->dst_lock = SPIN_LOCK_UNLOCKED
;
-
dest->stats.lock = SPIN_LOCK_UNLOCKED
;
+
spin_lock_init(&dest->dst_lock)
;
+
spin_lock_init(&dest->stats.lock)
;
__ip_vs_update_dest(svc, dest, udest);
ip_vs_new_estimator(&dest->stats);
__ip_vs_update_dest(svc, dest, udest);
ip_vs_new_estimator(&dest->stats);
@@
-798,7
+807,7
@@
ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
dest = ip_vs_trash_get_dest(svc, daddr, dport);
if (dest != NULL) {
IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, "
dest = ip_vs_trash_get_dest(svc, daddr, dport);
if (dest != NULL) {
IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, "
- "refcnt=%d, service %u/%u.%u.%u.%u:%u\n",
+ "
dest->
refcnt=%d, service %u/%u.%u.%u.%u:%u\n",
NIPQUAD(daddr), ntohs(dport),
atomic_read(&dest->refcnt),
dest->vfwmark,
NIPQUAD(daddr), ntohs(dport),
atomic_read(&dest->refcnt),
dest->vfwmark,
@@
-943,7
+952,8
@@
static void __ip_vs_del_dest(struct ip_vs_dest *dest)
atomic_dec(&dest->svc->refcnt);
kfree(dest);
} else {
atomic_dec(&dest->svc->refcnt);
kfree(dest);
} else {
- IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, refcnt=%d\n",
+ IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, "
+ "dest->refcnt=%d\n",
NIPQUAD(dest->addr), ntohs(dest->port),
atomic_read(&dest->refcnt));
list_add(&dest->n_list, &ip_vs_dest_trash);
NIPQUAD(dest->addr), ntohs(dest->port),
atomic_read(&dest->refcnt));
list_add(&dest->n_list, &ip_vs_dest_trash);
@@
-1062,8
+1072,8
@@
ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
svc->netmask = u->netmask;
INIT_LIST_HEAD(&svc->destinations);
svc->netmask = u->netmask;
INIT_LIST_HEAD(&svc->destinations);
-
svc->sched_lock = RW_LOCK_UNLOCKED
;
- s
vc->stats.lock = SPIN_LOCK_UNLOCKED
;
+
rwlock_init(&svc->sched_lock)
;
+ s
pin_lock_init(&svc->stats.lock)
;
/* Bind the scheduler */
ret = ip_vs_bind_scheduler(svc, sched);
/* Bind the scheduler */
ret = ip_vs_bind_scheduler(svc, sched);
@@
-1347,21
+1357,19
@@
static int ip_vs_zero_all(void)
static int
proc_do_defense_mode(ctl_table *table, int write, struct file * filp,
static int
proc_do_defense_mode(ctl_table *table, int write, struct file * filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp
, loff_t *ppos
)
{
int *valp = table->data;
int val = *valp;
int rc;
{
int *valp = table->data;
int val = *valp;
int rc;
- rc = proc_dointvec(table, write, filp, buffer, lenp);
+ rc = proc_dointvec(table, write, filp, buffer, lenp
, ppos
);
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 3)) {
/* Restore the correct value */
*valp = val;
} else {
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 3)) {
/* Restore the correct value */
*valp = val;
} else {
- local_bh_disable();
update_defense_level();
update_defense_level();
- local_bh_enable();
}
}
return rc;
}
}
return rc;
@@
-1370,7
+1378,7
@@
proc_do_defense_mode(ctl_table *table, int write, struct file * filp,
static int
proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
static int
proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp
, loff_t *ppos
)
{
int *valp = table->data;
int val[2];
{
int *valp = table->data;
int val[2];
@@
-1379,7
+1387,7
@@
proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
/* backup the value first */
memcpy(val, valp, sizeof(val));
/* backup the value first */
memcpy(val, valp, sizeof(val));
- rc = proc_dointvec(table, write, filp, buffer, lenp);
+ rc = proc_dointvec(table, write, filp, buffer, lenp
, ppos
);
if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) {
/* Restore the correct value */
memcpy(valp, val, sizeof(val));
if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) {
/* Restore the correct value */
memcpy(valp, val, sizeof(val));
@@
-1447,9
+1455,9
@@
static struct ctl_table vs_vars[] = {
{
.ctl_name = NET_IPV4_VS_TO_ES,
.procname = "timeout_established",
{
.ctl_name = NET_IPV4_VS_TO_ES,
.procname = "timeout_established",
- .data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED],
+ .data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED],
.maxlen = sizeof(int),
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1457,7
+1465,7
@@
static struct ctl_table vs_vars[] = {
.procname = "timeout_synsent",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT],
.maxlen = sizeof(int),
.procname = "timeout_synsent",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT],
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1465,7
+1473,7
@@
static struct ctl_table vs_vars[] = {
.procname = "timeout_synrecv",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV],
.maxlen = sizeof(int),
.procname = "timeout_synrecv",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV],
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1473,7
+1481,7
@@
static struct ctl_table vs_vars[] = {
.procname = "timeout_finwait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT],
.maxlen = sizeof(int),
.procname = "timeout_finwait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT],
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1489,7
+1497,7
@@
static struct ctl_table vs_vars[] = {
.procname = "timeout_close",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE],
.maxlen = sizeof(int),
.procname = "timeout_close",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE],
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1497,7
+1505,7
@@
static struct ctl_table vs_vars[] = {
.procname = "timeout_closewait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT],
.maxlen = sizeof(int),
.procname = "timeout_closewait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT],
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1505,7
+1513,7
@@
static struct ctl_table vs_vars[] = {
.procname = "timeout_lastack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK],
.maxlen = sizeof(int),
.procname = "timeout_lastack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK],
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1513,7
+1521,7
@@
static struct ctl_table vs_vars[] = {
.procname = "timeout_listen",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN],
.maxlen = sizeof(int),
.procname = "timeout_listen",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN],
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1521,7
+1529,7
@@
static struct ctl_table vs_vars[] = {
.procname = "timeout_synack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK],
.maxlen = sizeof(int),
.procname = "timeout_synack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK],
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1529,7
+1537,7
@@
static struct ctl_table vs_vars[] = {
.procname = "timeout_udp",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP],
.maxlen = sizeof(int),
.procname = "timeout_udp",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP],
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
},
{
.proc_handler = &proc_dointvec_jiffies,
},
{
@@
-1557,6
+1565,14
@@
static struct ctl_table vs_vars[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .ctl_name = NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE,
+ .procname = "expire_quiescent_template",
+ .data = &sysctl_ip_vs_expire_quiescent_template,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
{
.ctl_name = NET_IPV4_VS_SYNC_THRESHOLD,
.procname = "sync_threshold",
{
.ctl_name = NET_IPV4_VS_SYNC_THRESHOLD,
.procname = "sync_threshold",
@@
-1586,7
+1602,7
@@
static ctl_table vs_table[] = {
{ .ctl_name = 0 }
};
{ .ctl_name = 0 }
};
-static ctl_table ipv4_table[] = {
+static ctl_table ipv
s_ipv
4_table[] = {
{
.ctl_name = NET_IPV4,
.procname = "ipv4",
{
.ctl_name = NET_IPV4,
.procname = "ipv4",
@@
-1601,7
+1617,7
@@
static ctl_table vs_root_table[] = {
.ctl_name = CTL_NET,
.procname = "net",
.mode = 0555,
.ctl_name = CTL_NET,
.procname = "net",
.mode = 0555,
- .child = ipv4_table,
+ .child = ipv
s_ipv
4_table,
},
{ .ctl_name = 0 }
};
},
{ .ctl_name = 0 }
};
@@
-1897,7
+1913,7
@@
static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
#define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user))
#define MAX_ARG_LEN SVCDEST_ARG_LEN
#define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user))
#define MAX_ARG_LEN SVCDEST_ARG_LEN
-static unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = {
+static
const
unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = {
[SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN,
[SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN,
@@
-1935,7
+1951,7
@@
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* increase the module use count */
ip_vs_use_count_inc();
/* increase the module use count */
ip_vs_use_count_inc();
- if (
down
_interruptible(&__ip_vs_mutex)) {
+ if (
mutex_lock
_interruptible(&__ip_vs_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
ret = -ERESTARTSYS;
goto out_dec;
}
@@
-2026,7
+2042,7
@@
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
ip_vs_service_put(svc);
out_unlock:
ip_vs_service_put(svc);
out_unlock:
-
up
(&__ip_vs_mutex);
+
mutex_unlock
(&__ip_vs_mutex);
out_dec:
/* decrease the module use count */
ip_vs_use_count_dec();
out_dec:
/* decrease the module use count */
ip_vs_use_count_dec();
@@
-2050,7
+2066,7
@@
ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
dst->addr = src->addr;
dst->port = src->port;
dst->fwmark = src->fwmark;
dst->addr = src->addr;
dst->port = src->port;
dst->fwmark = src->fwmark;
- str
cpy(dst->sched_name, src->scheduler->name
);
+ str
lcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)
);
dst->flags = src->flags;
dst->timeout = src->timeout / HZ;
dst->netmask = src->netmask;
dst->flags = src->flags;
dst->timeout = src->timeout / HZ;
dst->netmask = src->netmask;
@@
-2071,6
+2087,7
@@
__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
if (count >= get->num_services)
goto out;
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
if (count >= get->num_services)
goto out;
+ memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
@@
-2085,6
+2102,7
@@
__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
if (count >= get->num_services)
goto out;
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
if (count >= get->num_services)
goto out;
+ memset(&entry, 0, sizeof(entry));
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
ip_vs_copy_service(&entry, svc);
if (copy_to_user(&uptr->entrytable[count],
&entry, sizeof(entry))) {
@@
-2166,7
+2184,7
@@
__ip_vs_get_timeouts(struct ip_vs_timeout_user *u)
#define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
#define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2)
#define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
#define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2)
-static unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = {
+static
const
unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = {
[GET_CMDID(IP_VS_SO_GET_VERSION)] = 64,
[GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_VERSION)] = 64,
[GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN,
[GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN,
@@
-2194,7
+2212,7
@@
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
return -EFAULT;
if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
return -EFAULT;
- if (
down
_interruptible(&__ip_vs_mutex))
+ if (
mutex_lock
_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
return -ERESTARTSYS;
switch (cmd) {
@@
-2295,12
+2313,12
@@
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
memset(&d, 0, sizeof(d));
if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
memset(&d, 0, sizeof(d));
if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
- str
cpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn
);
+ str
lcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn)
);
d[0].syncid = ip_vs_master_syncid;
}
if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
d[0].syncid = ip_vs_master_syncid;
}
if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
- str
cpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn
);
+ str
lcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn)
);
d[1].syncid = ip_vs_backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
d[1].syncid = ip_vs_backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
@@
-2313,7
+2331,7
@@
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
}
out:
}
out:
-
up
(&__ip_vs_mutex);
+
mutex_unlock
(&__ip_vs_mutex);
return ret;
}
return ret;
}
@@
-2357,14
+2375,11
@@
int ip_vs_control_init(void)
}
memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
}
memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
-
ip_vs_stats.lock = SPIN_LOCK_UNLOCKED
;
+
spin_lock_init(&ip_vs_stats.lock)
;
ip_vs_new_estimator(&ip_vs_stats);
/* Hook the defense timer */
ip_vs_new_estimator(&ip_vs_stats);
/* Hook the defense timer */
- init_timer(&defense_timer);
- defense_timer.function = defense_timer_handler;
- defense_timer.expires = jiffies + DEFENSE_TIMER_PERIOD;
- add_timer(&defense_timer);
+ schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
LeaveFunction(2);
return 0;
LeaveFunction(2);
return 0;
@@
-2375,7
+2390,7
@@
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
ip_vs_trash_cleanup();
{
EnterFunction(2);
ip_vs_trash_cleanup();
-
del_timer_sync(&defense_timer
);
+
cancel_rearming_delayed_work(&defense_work
);
ip_vs_kill_estimator(&ip_vs_stats);
unregister_sysctl_table(sysctl_header);
proc_net_remove("ip_vs_stats");
ip_vs_kill_estimator(&ip_vs_stats);
unregister_sysctl_table(sysctl_header);
proc_net_remove("ip_vs_stats");