X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fipv4%2Fipvs%2Fip_vs_ctl.c;h=9b933381ebbe7fc711b8f17d8ce6616d8dff0bdc;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=5df55ca9eef06e54a1b91b0fbd5335de9ecd2fa2;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 5df55ca9e..9b933381e 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -23,18 +23,21 @@ #include #include #include +#include #include #include #include -#include +#include #include #include #include #include #include +#include #include +#include #include #include @@ -42,27 +45,27 @@ #include /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ -static DECLARE_MUTEX(__ip_vs_mutex); +static DEFINE_MUTEX(__ip_vs_mutex); /* lock for service table */ -static rwlock_t __ip_vs_svc_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(__ip_vs_svc_lock); /* lock for table with the real services */ -static rwlock_t __ip_vs_rs_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(__ip_vs_rs_lock); /* lock for state and timeout tables */ -static rwlock_t __ip_vs_securetcp_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(__ip_vs_securetcp_lock); /* lock for drop entry handling */ -static spinlock_t __ip_vs_dropentry_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(__ip_vs_dropentry_lock); /* lock for drop packet handling */ -static spinlock_t __ip_vs_droppacket_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(__ip_vs_droppacket_lock); /* 1/rate drop and drop-entry variables */ int ip_vs_drop_rate = 0; int ip_vs_drop_counter = 0; -atomic_t ip_vs_dropentry = ATOMIC_INIT(0); +static atomic_t ip_vs_dropentry = ATOMIC_INIT(0); /* number of virtual services */ static int ip_vs_num_services = 0; @@ -75,6 +78,7 @@ static int sysctl_ip_vs_amemthresh = 1024; static int sysctl_ip_vs_am_droprate = 10; int sysctl_ip_vs_cache_bypass = 0; int sysctl_ip_vs_expire_nodest_conn = 0; +int sysctl_ip_vs_expire_quiescent_template = 0; int sysctl_ip_vs_sync_threshold[2] = { 3, 50 }; int sysctl_ip_vs_nat_icmp_send = 0; @@ -89,7 +93,8 @@ int ip_vs_get_debug_level(void) #endif /* - * update_defense_level is called from timer bh and from sysctl. + * update_defense_level is called from keventd and from sysctl, + * so it needs to protect itself from softirqs */ static void update_defense_level(void) { @@ -109,6 +114,8 @@ static void update_defense_level(void) nomem = (availmem < sysctl_ip_vs_amemthresh); + local_bh_disable(); + /* drop_entry */ spin_lock(&__ip_vs_dropentry_lock); switch (sysctl_ip_vs_drop_entry) { @@ -205,25 +212,27 @@ static void update_defense_level(void) if (to_change >= 0) ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1); write_unlock(&__ip_vs_securetcp_lock); + + local_bh_enable(); } /* * Timer for checking the defense */ -static struct timer_list defense_timer; #define DEFENSE_TIMER_PERIOD 1*HZ +static void defense_work_handler(struct work_struct *work); +static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); -static void defense_timer_handler(unsigned long data) +static void defense_work_handler(struct work_struct *work) { update_defense_level(); if (atomic_read(&ip_vs_dropentry)) ip_vs_random_dropentry(); - mod_timer(&defense_timer, jiffies + DEFENSE_TIMER_PERIOD); + schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD); } - int ip_vs_use_count_inc(void) { @@ -274,7 +283,7 @@ static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0); * Returns hash value for virtual service */ static __inline__ unsigned -ip_vs_svc_hashkey(unsigned proto, __u32 addr, __u16 port) +ip_vs_svc_hashkey(unsigned proto, __be32 addr, __be16 port) { register unsigned porth = ntohs(port); @@ -356,7 +365,7 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc) * Get service by {proto,addr,port} in the service table. */ static __inline__ struct ip_vs_service * -__ip_vs_service_get(__u16 protocol, __u32 vaddr, __u16 vport) +__ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport) { unsigned hash; struct ip_vs_service *svc; @@ -401,7 +410,7 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark) } struct ip_vs_service * -ip_vs_service_get(__u32 fwmark, __u16 protocol, __u32 vaddr, __u16 vport) +ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) { struct ip_vs_service *svc; @@ -441,7 +450,7 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __u32 vaddr, __u16 vport) out: read_unlock(&__ip_vs_svc_lock); - IP_VS_DBG(6, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n", + IP_VS_DBG(9, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n", fwmark, ip_vs_proto_name(protocol), NIPQUAD(vaddr), ntohs(vport), svc?"hit":"not hit"); @@ -471,7 +480,7 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest) /* * Returns hash value for real service */ -static __inline__ unsigned ip_vs_rs_hashkey(__u32 addr, __u16 port) +static __inline__ unsigned ip_vs_rs_hashkey(__be32 addr, __be16 port) { register unsigned porth = ntohs(port); @@ -522,7 +531,7 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest) * Lookup real service by in the real service table. */ struct ip_vs_dest * -ip_vs_lookup_real_service(__u16 protocol, __u32 daddr, __u16 dport) +ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport) { unsigned hash; struct ip_vs_dest *dest; @@ -553,7 +562,7 @@ ip_vs_lookup_real_service(__u16 protocol, __u32 daddr, __u16 dport) * Lookup destination by {addr,port} in the given service */ static struct ip_vs_dest * -ip_vs_lookup_dest(struct ip_vs_service *svc, __u32 daddr, __u16 dport) +ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) { struct ip_vs_dest *dest; @@ -582,7 +591,7 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __u32 daddr, __u16 dport) * scheduling. */ static struct ip_vs_dest * -ip_vs_trash_get_dest(struct ip_vs_service *svc, __u32 daddr, __u16 dport) +ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) { struct ip_vs_dest *dest, *nxt; @@ -591,7 +600,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, __u32 daddr, __u16 dport) */ list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { IP_VS_DBG(3, "Destination %u/%u.%u.%u.%u:%u still in trash, " - "refcnt=%d\n", + "dest->refcnt=%d\n", dest->vfwmark, NIPQUAD(dest->addr), ntohs(dest->port), atomic_read(&dest->refcnt)); @@ -726,12 +735,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, if (atype != RTN_LOCAL && atype != RTN_UNICAST) return -EINVAL; - dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); + dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); if (dest == NULL) { IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n"); return -ENOMEM; } - memset(dest, 0, sizeof(struct ip_vs_dest)); dest->protocol = svc->protocol; dest->vaddr = svc->addr; @@ -746,8 +754,8 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, atomic_set(&dest->refcnt, 0); INIT_LIST_HEAD(&dest->d_list); - dest->dst_lock = SPIN_LOCK_UNLOCKED; - dest->stats.lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&dest->dst_lock); + spin_lock_init(&dest->stats.lock); __ip_vs_update_dest(svc, dest, udest); ip_vs_new_estimator(&dest->stats); @@ -765,8 +773,8 @@ static int ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) { struct ip_vs_dest *dest; - __u32 daddr = udest->addr; - __u16 dport = udest->port; + __be32 daddr = udest->addr; + __be16 dport = udest->port; int ret; EnterFunction(2); @@ -798,7 +806,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) dest = ip_vs_trash_get_dest(svc, daddr, dport); if (dest != NULL) { IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, " - "refcnt=%d, service %u/%u.%u.%u.%u:%u\n", + "dest->refcnt=%d, service %u/%u.%u.%u.%u:%u\n", NIPQUAD(daddr), ntohs(dport), atomic_read(&dest->refcnt), dest->vfwmark, @@ -871,8 +879,8 @@ static int ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) { struct ip_vs_dest *dest; - __u32 daddr = udest->addr; - __u16 dport = udest->port; + __be32 daddr = udest->addr; + __be16 dport = udest->port; EnterFunction(2); @@ -943,7 +951,8 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest) atomic_dec(&dest->svc->refcnt); kfree(dest); } else { - IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, refcnt=%d\n", + IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, " + "dest->refcnt=%d\n", NIPQUAD(dest->addr), ntohs(dest->port), atomic_read(&dest->refcnt)); list_add(&dest->n_list, &ip_vs_dest_trash); @@ -982,8 +991,8 @@ static int ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest) { struct ip_vs_dest *dest; - __u32 daddr = udest->addr; - __u16 dport = udest->port; + __be32 daddr = udest->addr; + __be16 dport = udest->port; EnterFunction(2); @@ -1040,14 +1049,12 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) goto out_mod_dec; } - svc = (struct ip_vs_service *) - kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); + svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); if (svc == NULL) { IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); ret = -ENOMEM; goto out_err; } - memset(svc, 0, sizeof(struct ip_vs_service)); /* I'm the first user of the service */ atomic_set(&svc->usecnt, 1); @@ -1062,8 +1069,8 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) svc->netmask = u->netmask; INIT_LIST_HEAD(&svc->destinations); - svc->sched_lock = RW_LOCK_UNLOCKED; - svc->stats.lock = SPIN_LOCK_UNLOCKED; + rwlock_init(&svc->sched_lock); + spin_lock_init(&svc->stats.lock); /* Bind the scheduler */ ret = ip_vs_bind_scheduler(svc, sched); @@ -1347,21 +1354,19 @@ static int ip_vs_zero_all(void) static int proc_do_defense_mode(ctl_table *table, int write, struct file * filp, - void __user *buffer, size_t *lenp) + void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; int val = *valp; int rc; - rc = proc_dointvec(table, write, filp, buffer, lenp); + rc = proc_dointvec(table, write, filp, buffer, lenp, ppos); if (write && (*valp != val)) { if ((*valp < 0) || (*valp > 3)) { /* Restore the correct value */ *valp = val; } else { - local_bh_disable(); update_defense_level(); - local_bh_enable(); } } return rc; @@ -1370,7 +1375,7 @@ proc_do_defense_mode(ctl_table *table, int write, struct file * filp, static int proc_do_sync_threshold(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp) + void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; int val[2]; @@ -1379,7 +1384,7 @@ proc_do_sync_threshold(ctl_table *table, int write, struct file *filp, /* backup the value first */ memcpy(val, valp, sizeof(val)); - rc = proc_dointvec(table, write, filp, buffer, lenp); + rc = proc_dointvec(table, write, filp, buffer, lenp, ppos); if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) { /* Restore the correct value */ memcpy(valp, val, sizeof(val)); @@ -1447,9 +1452,9 @@ static struct ctl_table vs_vars[] = { { .ctl_name = NET_IPV4_VS_TO_ES, .procname = "timeout_established", - .data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED], + .data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1457,7 +1462,7 @@ static struct ctl_table vs_vars[] = { .procname = "timeout_synsent", .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1465,7 +1470,7 @@ static struct ctl_table vs_vars[] = { .procname = "timeout_synrecv", .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1473,7 +1478,7 @@ static struct ctl_table vs_vars[] = { .procname = "timeout_finwait", .data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1489,7 +1494,7 @@ static struct ctl_table vs_vars[] = { .procname = "timeout_close", .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1497,7 +1502,7 @@ static struct ctl_table vs_vars[] = { .procname = "timeout_closewait", .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1505,7 +1510,7 @@ static struct ctl_table vs_vars[] = { .procname = "timeout_lastack", .data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1513,7 +1518,7 @@ static struct ctl_table vs_vars[] = { .procname = "timeout_listen", .data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1521,7 +1526,7 @@ static struct ctl_table vs_vars[] = { .procname = "timeout_synack", .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1529,7 +1534,7 @@ static struct ctl_table vs_vars[] = { .procname = "timeout_udp", .data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP], .maxlen = sizeof(int), - .mode = 0644, + .mode = 0644, .proc_handler = &proc_dointvec_jiffies, }, { @@ -1557,6 +1562,14 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, + .procname = "expire_quiescent_template", + .data = &sysctl_ip_vs_expire_quiescent_template, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, { .ctl_name = NET_IPV4_VS_SYNC_THRESHOLD, .procname = "sync_threshold", @@ -1586,7 +1599,7 @@ static ctl_table vs_table[] = { { .ctl_name = 0 } }; -static ctl_table ipv4_table[] = { +static ctl_table ipvs_ipv4_table[] = { { .ctl_name = NET_IPV4, .procname = "ipv4", @@ -1601,7 +1614,7 @@ static ctl_table vs_root_table[] = { .ctl_name = CTL_NET, .procname = "net", .mode = 0555, - .child = ipv4_table, + .child = ipvs_ipv4_table, }, { .ctl_name = 0 } }; @@ -1781,7 +1794,7 @@ static int ip_vs_info_open(struct inode *inode, struct file *file) { struct seq_file *seq; int rc = -ENOMEM; - struct ip_vs_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct ip_vs_iter *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) goto out; @@ -1792,7 +1805,6 @@ static int ip_vs_info_open(struct inode *inode, struct file *file) seq = file->private_data; seq->private = s; - memset(s, 0, sizeof(*s)); out: return rc; out_kfree: @@ -1897,7 +1909,7 @@ static int ip_vs_set_timeout(struct ip_vs_timeout_user *u) #define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user)) #define MAX_ARG_LEN SVCDEST_ARG_LEN -static unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = { +static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = { [SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN, @@ -1935,7 +1947,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) /* increase the module use count */ ip_vs_use_count_inc(); - if (down_interruptible(&__ip_vs_mutex)) { + if (mutex_lock_interruptible(&__ip_vs_mutex)) { ret = -ERESTARTSYS; goto out_dec; } @@ -2026,7 +2038,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) ip_vs_service_put(svc); out_unlock: - up(&__ip_vs_mutex); + mutex_unlock(&__ip_vs_mutex); out_dec: /* decrease the module use count */ ip_vs_use_count_dec(); @@ -2050,7 +2062,7 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) dst->addr = src->addr; dst->port = src->port; dst->fwmark = src->fwmark; - strcpy(dst->sched_name, src->scheduler->name); + strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); dst->flags = src->flags; dst->timeout = src->timeout / HZ; dst->netmask = src->netmask; @@ -2071,6 +2083,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get, list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { if (count >= get->num_services) goto out; + memset(&entry, 0, sizeof(entry)); ip_vs_copy_service(&entry, svc); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { @@ -2085,6 +2098,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get, list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { if (count >= get->num_services) goto out; + memset(&entry, 0, sizeof(entry)); ip_vs_copy_service(&entry, svc); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { @@ -2166,7 +2180,7 @@ __ip_vs_get_timeouts(struct ip_vs_timeout_user *u) #define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user)) #define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2) -static unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = { +static const unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = { [GET_CMDID(IP_VS_SO_GET_VERSION)] = 64, [GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN, [GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN, @@ -2194,7 +2208,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0) return -EFAULT; - if (down_interruptible(&__ip_vs_mutex)) + if (mutex_lock_interruptible(&__ip_vs_mutex)) return -ERESTARTSYS; switch (cmd) { @@ -2295,12 +2309,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) memset(&d, 0, sizeof(d)); if (ip_vs_sync_state & IP_VS_STATE_MASTER) { d[0].state = IP_VS_STATE_MASTER; - strcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn); + strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn)); d[0].syncid = ip_vs_master_syncid; } if (ip_vs_sync_state & IP_VS_STATE_BACKUP) { d[1].state = IP_VS_STATE_BACKUP; - strcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn); + strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn)); d[1].syncid = ip_vs_backup_syncid; } if (copy_to_user(user, &d, sizeof(d)) != 0) @@ -2313,7 +2327,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) } out: - up(&__ip_vs_mutex); + mutex_unlock(&__ip_vs_mutex); return ret; } @@ -2357,14 +2371,11 @@ int ip_vs_control_init(void) } memset(&ip_vs_stats, 0, sizeof(ip_vs_stats)); - ip_vs_stats.lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&ip_vs_stats.lock); ip_vs_new_estimator(&ip_vs_stats); /* Hook the defense timer */ - init_timer(&defense_timer); - defense_timer.function = defense_timer_handler; - defense_timer.expires = jiffies + DEFENSE_TIMER_PERIOD; - add_timer(&defense_timer); + schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD); LeaveFunction(2); return 0; @@ -2375,7 +2386,7 @@ void ip_vs_control_cleanup(void) { EnterFunction(2); ip_vs_trash_cleanup(); - del_timer_sync(&defense_timer); + cancel_rearming_delayed_work(&defense_work); ip_vs_kill_estimator(&ip_vs_stats); unregister_sysctl_table(sysctl_header); proc_net_remove("ip_vs_stats");