1 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
2 * Patrick Schaaf <bof@bof.de>
3 * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 /* Kernel module for IP set management */
12 #include <linux/version.h>
13 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
14 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kmod.h>
20 #include <linux/skbuff.h>
21 #include <linux/random.h>
22 #include <linux/jhash.h>
23 #include <linux/netfilter_ipv4/ip_tables.h>
24 #include <linux/errno.h>
25 #include <asm/uaccess.h>
26 #include <asm/bitops.h>
27 #include <asm/semaphore.h>
28 #include <linux/spinlock.h>
29 #include <linux/vmalloc.h>
31 #define ASSERT_READ_LOCK(x)
32 #define ASSERT_WRITE_LOCK(x)
33 #include <linux/netfilter_ipv4/ip_set.h>
35 static struct list_head set_type_list; /* all registered sets */
36 static struct ip_set **ip_set_list; /* all individual sets */
37 static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
38 static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
39 static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
40 static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
41 static struct list_head *ip_set_hash; /* hash of bindings */
42 static unsigned int ip_set_hash_random; /* random seed */
45 * Sets are identified either by the index in ip_set_list or by id.
46 * The id never changes and is used to find a key in the hash.
47 * The index may change by swapping and used at all other places
48 * (set/SET netfilter modules, binding value, etc.)
50 * Userspace requests are serialized by ip_set_mutex and sets can
51 * be deleted only from userspace. Therefore ip_set_list locking
52 * must obey the following rules:
54 * - kernel requests: read and write locking mandatory
55 * - user requests: read locking optional, write locking mandatory
59 __ip_set_get(ip_set_id_t index)
61 atomic_inc(&ip_set_list[index]->ref);
65 __ip_set_put(ip_set_id_t index)
67 atomic_dec(&ip_set_list[index]->ref);
74 static inline struct ip_set_hash *
75 __ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
77 struct ip_set_hash *set_hash;
79 list_for_each_entry(set_hash, &ip_set_hash[key], list)
80 if (set_hash->id == id && set_hash->ip == ip)
87 ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
89 u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
90 % ip_set_bindings_hash_size;
91 struct ip_set_hash *set_hash;
93 ASSERT_READ_LOCK(&ip_set_lock);
94 IP_SET_ASSERT(ip_set_list[id]);
95 DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
97 set_hash = __ip_set_find(key, id, ip);
99 DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
101 set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
103 return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
107 __set_hash_del(struct ip_set_hash *set_hash)
109 ASSERT_WRITE_LOCK(&ip_set_lock);
110 IP_SET_ASSERT(ip_set_list[set_hash->binding]);
112 __ip_set_put(set_hash->binding);
113 list_del(&set_hash->list);
118 ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
120 u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
121 % ip_set_bindings_hash_size;
122 struct ip_set_hash *set_hash;
124 IP_SET_ASSERT(ip_set_list[id]);
125 DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
126 write_lock_bh(&ip_set_lock);
127 set_hash = __ip_set_find(key, id, ip);
128 DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
130 set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
132 if (set_hash != NULL)
133 __set_hash_del(set_hash);
134 write_unlock_bh(&ip_set_lock);
139 ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
141 u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
142 % ip_set_bindings_hash_size;
143 struct ip_set_hash *set_hash;
146 IP_SET_ASSERT(ip_set_list[id]);
147 IP_SET_ASSERT(ip_set_list[binding]);
148 DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
149 HIPQUAD(ip), ip_set_list[binding]->name);
150 write_lock_bh(&ip_set_lock);
151 set_hash = __ip_set_find(key, id, ip);
153 set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_KERNEL);
158 INIT_LIST_HEAD(&set_hash->list);
161 list_add(&ip_set_hash[key], &set_hash->list);
163 IP_SET_ASSERT(ip_set_list[set_hash->binding]);
164 DP("overwrite binding: %s",
165 ip_set_list[set_hash->binding]->name);
166 __ip_set_put(set_hash->binding);
168 set_hash->binding = binding;
169 __ip_set_get(set_hash->binding);
171 write_unlock_bh(&ip_set_lock);
175 #define FOREACH_HASH_DO(fn, args...) \
178 struct ip_set_hash *__set_hash; \
180 for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
181 list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
182 fn(__set_hash , ## args); \
186 #define FOREACH_HASH_RW_DO(fn, args...) \
189 struct ip_set_hash *__set_hash, *__n; \
191 ASSERT_WRITE_LOCK(&ip_set_lock); \
192 for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
193 list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
194 fn(__set_hash , ## args); \
198 /* Add, del and test set entries from kernel */
200 #define follow_bindings(index, set, ip) \
201 ((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
202 || (index = (set)->binding) != IP_SET_INVALID_ID)
205 ip_set_testip_kernel(ip_set_id_t index,
206 const struct sk_buff *skb,
207 const u_int32_t *flags)
214 IP_SET_ASSERT(flags[i]);
215 read_lock_bh(&ip_set_lock);
217 set = ip_set_list[index];
219 DP("set %s, index %u", set->name, index);
220 read_lock_bh(&set->lock);
221 res = set->type->testip_kernel(set, skb, &ip, flags, i++);
222 read_unlock_bh(&set->lock);
223 i += !!(set->type->features & IPSET_DATA_DOUBLE);
226 && follow_bindings(index, set, ip));
227 read_unlock_bh(&ip_set_lock);
233 ip_set_addip_kernel(ip_set_id_t index,
234 const struct sk_buff *skb,
235 const u_int32_t *flags)
242 IP_SET_ASSERT(flags[i]);
244 read_lock_bh(&ip_set_lock);
246 set = ip_set_list[index];
248 DP("set %s, index %u", set->name, index);
249 write_lock_bh(&set->lock);
250 res = set->type->addip_kernel(set, skb, &ip, flags, i++);
251 write_unlock_bh(&set->lock);
252 i += !!(set->type->features & IPSET_DATA_DOUBLE);
253 } while ((res == 0 || res == -EEXIST)
255 && follow_bindings(index, set, ip));
256 read_unlock_bh(&ip_set_lock);
260 && (res = set->type->retry(set)) == 0)
265 ip_set_delip_kernel(ip_set_id_t index,
266 const struct sk_buff *skb,
267 const u_int32_t *flags)
274 IP_SET_ASSERT(flags[i]);
275 read_lock_bh(&ip_set_lock);
277 set = ip_set_list[index];
279 DP("set %s, index %u", set->name, index);
280 write_lock_bh(&set->lock);
281 res = set->type->delip_kernel(set, skb, &ip, flags, i++);
282 write_unlock_bh(&set->lock);
283 i += !!(set->type->features & IPSET_DATA_DOUBLE);
284 } while ((res == 0 || res == -EEXIST)
286 && follow_bindings(index, set, ip));
287 read_unlock_bh(&ip_set_lock);
290 /* Register and deregister settype */
292 static inline struct ip_set_type *
293 find_set_type(const char *name)
295 struct ip_set_type *set_type;
297 list_for_each_entry(set_type, &set_type_list, list)
298 if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
304 ip_set_register_set_type(struct ip_set_type *set_type)
308 if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
309 ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
311 set_type->protocol_version,
312 IP_SET_PROTOCOL_VERSION);
316 write_lock_bh(&ip_set_lock);
317 if (find_set_type(set_type->typename)) {
319 ip_set_printk("'%s' already registered!",
324 if (!try_module_get(THIS_MODULE)) {
328 list_add(&set_type->list, &set_type_list);
329 DP("'%s' registered.", set_type->typename);
331 write_unlock_bh(&ip_set_lock);
336 ip_set_unregister_set_type(struct ip_set_type *set_type)
338 write_lock_bh(&ip_set_lock);
339 if (!find_set_type(set_type->typename)) {
340 ip_set_printk("'%s' not registered?",
344 list_del(&set_type->list);
345 module_put(THIS_MODULE);
346 DP("'%s' unregistered.", set_type->typename);
348 write_unlock_bh(&ip_set_lock);
357 * Find set by name, reference it once. The reference makes sure the
358 * thing pointed to, does not go away under our feet. Drop the reference
359 * later, using ip_set_put().
362 ip_set_get_byname(const char *name)
364 ip_set_id_t i, index = IP_SET_INVALID_ID;
366 down(&ip_set_app_mutex);
367 for (i = 0; i < ip_set_max; i++) {
368 if (ip_set_list[i] != NULL
369 && strcmp(ip_set_list[i]->name, name) == 0) {
375 up(&ip_set_app_mutex);
380 * Find set by index, reference it once. The reference makes sure the
381 * thing pointed to, does not go away under our feet. Drop the reference
382 * later, using ip_set_put().
385 ip_set_get_byindex(ip_set_id_t index)
387 down(&ip_set_app_mutex);
389 if (index >= ip_set_max)
390 return IP_SET_INVALID_ID;
392 if (ip_set_list[index])
395 index = IP_SET_INVALID_ID;
397 up(&ip_set_app_mutex);
402 * If the given set pointer points to a valid set, decrement
403 * reference count by 1. The caller shall not assume the index
404 * to be valid, after calling this function.
406 void ip_set_put(ip_set_id_t index)
408 down(&ip_set_app_mutex);
409 if (ip_set_list[index])
411 up(&ip_set_app_mutex);
414 /* Find a set by name or index */
416 ip_set_find_byname(const char *name)
418 ip_set_id_t i, index = IP_SET_INVALID_ID;
420 for (i = 0; i < ip_set_max; i++) {
421 if (ip_set_list[i] != NULL
422 && strcmp(ip_set_list[i]->name, name) == 0) {
431 ip_set_find_byindex(ip_set_id_t index)
433 if (index >= ip_set_max || ip_set_list[index] == NULL)
434 index = IP_SET_INVALID_ID;
440 * Add, del, test, bind and unbind
444 __ip_set_testip(struct ip_set *set,
451 read_lock_bh(&set->lock);
452 res = set->type->testip(set, data, size, ip);
453 read_unlock_bh(&set->lock);
459 __ip_set_addip(ip_set_id_t index,
463 struct ip_set *set = ip_set_list[index];
469 write_lock_bh(&set->lock);
470 res = set->type->addip(set, data, size, &ip);
471 write_unlock_bh(&set->lock);
472 } while (res == -EAGAIN
474 && (res = set->type->retry(set)) == 0);
480 ip_set_addip(ip_set_id_t index,
485 return __ip_set_addip(index,
486 data + sizeof(struct ip_set_req_adt),
487 size - sizeof(struct ip_set_req_adt));
491 ip_set_delip(ip_set_id_t index,
495 struct ip_set *set = ip_set_list[index];
500 write_lock_bh(&set->lock);
501 res = set->type->delip(set,
502 data + sizeof(struct ip_set_req_adt),
503 size - sizeof(struct ip_set_req_adt),
505 write_unlock_bh(&set->lock);
511 ip_set_testip(ip_set_id_t index,
515 struct ip_set *set = ip_set_list[index];
520 res = __ip_set_testip(set,
521 data + sizeof(struct ip_set_req_adt),
522 size - sizeof(struct ip_set_req_adt),
525 return (res > 0 ? -EEXIST : res);
529 ip_set_bindip(ip_set_id_t index,
533 struct ip_set *set = ip_set_list[index];
534 struct ip_set_req_bind *req_bind;
540 if (size < sizeof(struct ip_set_req_bind))
543 req_bind = (struct ip_set_req_bind *) data;
544 req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
546 if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
547 /* Default binding of a set */
550 if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
553 binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
554 binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
556 binding = ip_set_find_byname(binding_name);
557 if (binding == IP_SET_INVALID_ID)
560 write_lock_bh(&ip_set_lock);
561 /* Sets as binding values are referenced */
562 if (set->binding != IP_SET_INVALID_ID)
563 __ip_set_put(set->binding);
564 set->binding = binding;
565 __ip_set_get(set->binding);
566 write_unlock_bh(&ip_set_lock);
570 binding = ip_set_find_byname(req_bind->binding);
571 if (binding == IP_SET_INVALID_ID)
574 res = __ip_set_testip(set,
575 data + sizeof(struct ip_set_req_bind),
576 size - sizeof(struct ip_set_req_bind),
578 DP("set %s, ip: %u.%u.%u.%u, binding %s",
579 set->name, HIPQUAD(ip), ip_set_list[binding]->name);
582 res = ip_set_hash_add(set->id, ip, binding);
587 #define FOREACH_SET_DO(fn, args...) \
590 struct ip_set *__set; \
592 for (__i = 0; __i < ip_set_max; __i++) { \
593 __set = ip_set_list[__i]; \
595 fn(__set , ##args); \
600 __set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
602 if (set_hash->id == id)
603 __set_hash_del(set_hash);
607 __unbind_default(struct ip_set *set)
609 if (set->binding != IP_SET_INVALID_ID) {
610 /* Sets as binding values are referenced */
611 __ip_set_put(set->binding);
612 set->binding = IP_SET_INVALID_ID;
617 ip_set_unbindip(ip_set_id_t index,
622 struct ip_set_req_bind *req_bind;
627 if (size < sizeof(struct ip_set_req_bind))
630 req_bind = (struct ip_set_req_bind *) data;
631 req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
633 DP("%u %s", index, req_bind->binding);
634 if (index == IP_SET_INVALID_ID) {
636 if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
637 /* Default binding of sets */
638 write_lock_bh(&ip_set_lock);
639 FOREACH_SET_DO(__unbind_default);
640 write_unlock_bh(&ip_set_lock);
642 } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
643 /* Flush all bindings of all sets*/
644 write_lock_bh(&ip_set_lock);
645 FOREACH_HASH_RW_DO(__set_hash_del);
646 write_unlock_bh(&ip_set_lock);
649 DP("unreachable reached!");
653 set = ip_set_list[index];
655 if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
656 /* Default binding of set */
657 ip_set_id_t binding = ip_set_find_byindex(set->binding);
659 if (binding == IP_SET_INVALID_ID)
662 write_lock_bh(&ip_set_lock);
663 /* Sets in hash values are referenced */
664 __ip_set_put(set->binding);
665 set->binding = IP_SET_INVALID_ID;
666 write_unlock_bh(&ip_set_lock);
669 } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
670 /* Flush all bindings */
672 write_lock_bh(&ip_set_lock);
673 FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
674 write_unlock_bh(&ip_set_lock);
678 res = __ip_set_testip(set,
679 data + sizeof(struct ip_set_req_bind),
680 size - sizeof(struct ip_set_req_bind),
683 DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
685 res = ip_set_hash_del(set->id, ip);
691 ip_set_testbind(ip_set_id_t index,
695 struct ip_set *set = ip_set_list[index];
696 struct ip_set_req_bind *req_bind;
702 if (size < sizeof(struct ip_set_req_bind))
705 req_bind = (struct ip_set_req_bind *) data;
706 req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
708 if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
709 /* Default binding of set */
712 if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
715 binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
716 binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
718 binding = ip_set_find_byname(binding_name);
719 if (binding == IP_SET_INVALID_ID)
722 res = (set->binding == binding) ? -EEXIST : 0;
726 binding = ip_set_find_byname(req_bind->binding);
727 if (binding == IP_SET_INVALID_ID)
731 res = __ip_set_testip(set,
732 data + sizeof(struct ip_set_req_bind),
733 size - sizeof(struct ip_set_req_bind),
735 DP("set %s, ip: %u.%u.%u.%u, binding %s",
736 set->name, HIPQUAD(ip), ip_set_list[binding]->name);
739 res = (ip_set_find_in_hash(set->id, ip) == binding)
745 static struct ip_set_type *
746 find_set_type_rlock(const char *typename)
748 struct ip_set_type *type;
750 read_lock_bh(&ip_set_lock);
751 type = find_set_type(typename);
753 read_unlock_bh(&ip_set_lock);
759 find_free_id(const char *name,
765 *id = IP_SET_INVALID_ID;
766 for (i = 0; i < ip_set_max; i++) {
767 if (ip_set_list[i] == NULL) {
768 if (*id == IP_SET_INVALID_ID)
770 } else if (strcmp(name, ip_set_list[i]->name) == 0)
774 if (*id == IP_SET_INVALID_ID)
775 /* No free slot remained */
777 /* Check that index is usable as id (swapping) */
779 for (i = 0; i < ip_set_max; i++) {
780 if (ip_set_list[i] != NULL
781 && ip_set_list[i]->id == *id) {
793 ip_set_create(const char *name,
794 const char *typename,
800 ip_set_id_t index = 0, id;
803 DP("setname: %s, typename: %s, id: %u", name, typename, restore);
805 * First, and without any locks, allocate and initialize
806 * a normal base set structure.
808 set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
811 set->lock = RW_LOCK_UNLOCKED;
812 strncpy(set->name, name, IP_SET_MAXNAMELEN);
813 set->binding = IP_SET_INVALID_ID;
814 atomic_set(&set->ref, 0);
817 * Next, take the &ip_set_lock, check that we know the type,
818 * and take a reference on the type, to make sure it
819 * stays available while constructing our new set.
821 * After referencing the type, we drop the &ip_set_lock,
822 * and let the new set construction run without locks.
824 set->type = find_set_type_rlock(typename);
825 if (set->type == NULL) {
826 /* Try loading the module */
827 char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
828 strcpy(modulename, "ip_set_");
829 strcat(modulename, typename);
830 DP("try to load %s", modulename);
831 request_module(modulename);
832 set->type = find_set_type_rlock(typename);
834 if (set->type == NULL) {
835 ip_set_printk("no set type '%s', set '%s' not created",
840 if (!try_module_get(set->type->me)) {
841 read_unlock_bh(&ip_set_lock);
845 read_unlock_bh(&ip_set_lock);
848 * Without holding any locks, create private part.
850 res = set->type->create(set, data, size);
854 /* BTW, res==0 here. */
857 * Here, we have a valid, constructed set. &ip_set_lock again,
858 * find free id/index and check that it is not already in
861 write_lock_bh(&ip_set_lock);
862 if ((res = find_free_id(set->name, &index, &id)) != 0) {
867 /* Make sure restore gets the same index */
868 if (restore != IP_SET_INVALID_ID && index != restore) {
869 DP("Can't restore, sets are screwed up");
875 * Finally! Add our shiny new set to the list, and be done.
877 DP("create: '%s' created with index %u, id %u!", set->name, index, id);
879 ip_set_list[index] = set;
880 write_unlock_bh(&ip_set_lock);
884 write_unlock_bh(&ip_set_lock);
885 set->type->destroy(set);
887 module_put(set->type->me);
894 * Destroy a given existing set
897 ip_set_destroy_set(ip_set_id_t index)
899 struct ip_set *set = ip_set_list[index];
902 DP("set: %s", set->name);
903 write_lock_bh(&ip_set_lock);
904 FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
905 if (set->binding != IP_SET_INVALID_ID)
906 __ip_set_put(set->binding);
907 ip_set_list[index] = NULL;
908 write_unlock_bh(&ip_set_lock);
910 /* Must call it without holding any lock */
911 set->type->destroy(set);
912 module_put(set->type->me);
917 * Destroy a set - or all sets
918 * Sets must not be referenced/used.
921 ip_set_destroy(ip_set_id_t index)
925 /* ref modification always protected by the mutex */
926 if (index != IP_SET_INVALID_ID) {
927 if (atomic_read(&ip_set_list[index]->ref))
929 ip_set_destroy_set(index);
931 for (i = 0; i < ip_set_max; i++) {
932 if (ip_set_list[i] != NULL
933 && (atomic_read(&ip_set_list[i]->ref)))
937 for (i = 0; i < ip_set_max; i++) {
938 if (ip_set_list[i] != NULL)
939 ip_set_destroy_set(i);
946 ip_set_flush_set(struct ip_set *set)
948 DP("set: %s %u", set->name, set->id);
950 write_lock_bh(&set->lock);
951 set->type->flush(set);
952 write_unlock_bh(&set->lock);
956 * Flush data in a set - or in all sets
959 ip_set_flush(ip_set_id_t index)
961 if (index != IP_SET_INVALID_ID) {
962 IP_SET_ASSERT(ip_set_list[index]);
963 ip_set_flush_set(ip_set_list[index]);
965 FOREACH_SET_DO(ip_set_flush_set);
972 ip_set_rename(ip_set_id_t index, const char *name)
974 struct ip_set *set = ip_set_list[index];
978 DP("set: %s to %s", set->name, name);
979 write_lock_bh(&ip_set_lock);
980 for (i = 0; i < ip_set_max; i++) {
981 if (ip_set_list[i] != NULL
982 && strncmp(ip_set_list[i]->name,
984 IP_SET_MAXNAMELEN - 1) == 0) {
989 strncpy(set->name, name, IP_SET_MAXNAMELEN);
991 write_unlock_bh(&ip_set_lock);
996 * Swap two sets so that name/index points to the other.
997 * References are also swapped.
1000 ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
1002 struct ip_set *from = ip_set_list[from_index];
1003 struct ip_set *to = ip_set_list[to_index];
1004 char from_name[IP_SET_MAXNAMELEN];
1007 DP("set: %s to %s", from->name, to->name);
1008 /* Features must not change. Artifical restriction. */
1009 if (from->type->features != to->type->features)
1012 /* No magic here: ref munging protected by the mutex */
1013 write_lock_bh(&ip_set_lock);
1014 strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
1015 from_ref = atomic_read(&from->ref);
1017 strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
1018 atomic_set(&from->ref, atomic_read(&to->ref));
1019 strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
1020 atomic_set(&to->ref, from_ref);
1022 ip_set_list[from_index] = to;
1023 ip_set_list[to_index] = from;
1025 write_unlock_bh(&ip_set_lock);
1034 __set_hash_bindings_size_list(struct ip_set_hash *set_hash,
1035 ip_set_id_t id, size_t *size)
1037 if (set_hash->id == id)
1038 *size += sizeof(struct ip_set_hash_list);
1042 __set_hash_bindings_size_save(struct ip_set_hash *set_hash,
1043 ip_set_id_t id, size_t *size)
1045 if (set_hash->id == id)
1046 *size += sizeof(struct ip_set_hash_save);
1050 __set_hash_bindings(struct ip_set_hash *set_hash,
1051 ip_set_id_t id, void *data, int *used)
1053 if (set_hash->id == id) {
1054 struct ip_set_hash_list *hash_list =
1055 (struct ip_set_hash_list *)(data + *used);
1057 hash_list->ip = set_hash->ip;
1058 hash_list->binding = set_hash->binding;
1059 *used += sizeof(struct ip_set_hash_list);
1063 static int ip_set_list_set(ip_set_id_t index,
1068 struct ip_set *set = ip_set_list[index];
1069 struct ip_set_list *set_list;
1071 /* Pointer to our header */
1072 set_list = (struct ip_set_list *) (data + *used);
1074 DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
1076 /* Get and ensure header size */
1077 if (*used + sizeof(struct ip_set_list) > len)
1078 goto not_enough_mem;
1079 *used += sizeof(struct ip_set_list);
1081 read_lock_bh(&set->lock);
1082 /* Get and ensure set specific header size */
1083 set_list->header_size = set->type->header_size;
1084 if (*used + set_list->header_size > len)
1087 /* Fill in the header */
1088 set_list->index = index;
1089 set_list->binding = set->binding;
1090 set_list->ref = atomic_read(&set->ref);
1092 /* Fill in set spefific header data */
1093 set->type->list_header(set, data + *used);
1094 *used += set_list->header_size;
1096 /* Get and ensure set specific members size */
1097 set_list->members_size = set->type->list_members_size(set);
1098 if (*used + set_list->members_size > len)
1101 /* Fill in set spefific members data */
1102 set->type->list_members(set, data + *used);
1103 *used += set_list->members_size;
1104 read_unlock_bh(&set->lock);
1108 /* Get and ensure set specific bindings size */
1109 set_list->bindings_size = 0;
1110 FOREACH_HASH_DO(__set_hash_bindings_size_list,
1111 set->id, &set_list->bindings_size);
1112 if (*used + set_list->bindings_size > len)
1113 goto not_enough_mem;
1115 /* Fill in set spefific bindings data */
1116 FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
1121 read_unlock_bh(&set->lock);
1123 DP("not enough mem, try again");
1130 static int ip_set_save_set(ip_set_id_t index,
1136 struct ip_set_save *set_save;
1138 /* Pointer to our header */
1139 set_save = (struct ip_set_save *) (data + *used);
1141 /* Get and ensure header size */
1142 if (*used + sizeof(struct ip_set_save) > len)
1143 goto not_enough_mem;
1144 *used += sizeof(struct ip_set_save);
1146 set = ip_set_list[index];
1147 DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
1148 data, data + *used);
1150 read_lock_bh(&set->lock);
1151 /* Get and ensure set specific header size */
1152 set_save->header_size = set->type->header_size;
1153 if (*used + set_save->header_size > len)
1156 /* Fill in the header */
1157 set_save->index = index;
1158 set_save->binding = set->binding;
1160 /* Fill in set spefific header data */
1161 set->type->list_header(set, data + *used);
1162 *used += set_save->header_size;
1164 DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
1165 set_save->header_size, data, data + *used);
1166 /* Get and ensure set specific members size */
1167 set_save->members_size = set->type->list_members_size(set);
1168 if (*used + set_save->members_size > len)
1171 /* Fill in set spefific members data */
1172 set->type->list_members(set, data + *used);
1173 *used += set_save->members_size;
1174 read_unlock_bh(&set->lock);
1175 DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
1176 set_save->members_size, data, data + *used);
1180 read_unlock_bh(&set->lock);
1182 DP("not enough mem, try again");
1187 __set_hash_save_bindings(struct ip_set_hash *set_hash,
1195 && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
1196 struct ip_set_hash_save *hash_save =
1197 (struct ip_set_hash_save *)(data + *used);
1198 /* Ensure bindings size */
1199 if (*used + sizeof(struct ip_set_hash_save) > len) {
1203 hash_save->id = set_hash->id;
1204 hash_save->ip = set_hash->ip;
1205 hash_save->binding = set_hash->binding;
1206 *used += sizeof(struct ip_set_hash_save);
1210 static int ip_set_save_bindings(ip_set_id_t index,
1216 struct ip_set_save *set_save;
1218 DP("used %u, len %u", *used, len);
1219 /* Get and ensure header size */
1220 if (*used + sizeof(struct ip_set_save) > len)
1224 set_save = (struct ip_set_save *) (data + *used);
1225 set_save->index = IP_SET_INVALID_ID;
1226 set_save->header_size = 0;
1227 set_save->members_size = 0;
1228 *used += sizeof(struct ip_set_save);
1230 DP("marker added used %u, len %u", *used, len);
1231 /* Fill in bindings data */
1232 if (index != IP_SET_INVALID_ID)
1233 /* Sets are identified by id in hash */
1234 index = ip_set_list[index]->id;
1235 FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
1243 static int ip_set_restore(void *data,
1247 int line = 0, used = 0, members_size;
1249 struct ip_set_hash_save *hash_save;
1250 struct ip_set_restore *set_restore;
1253 /* Loop to restore sets */
1257 DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
1258 /* Get and ensure header size */
1259 if (used + sizeof(struct ip_set_restore) > len)
1261 set_restore = (struct ip_set_restore *) (data + used);
1262 used += sizeof(struct ip_set_restore);
1264 /* Ensure data size */
1266 + set_restore->header_size
1267 + set_restore->members_size > len)
1271 if (set_restore->index == IP_SET_INVALID_ID) {
1276 /* Try to create the set */
1277 DP("restore %s %s", set_restore->name, set_restore->typename);
1278 res = ip_set_create(set_restore->name,
1279 set_restore->typename,
1282 set_restore->header_size);
1286 used += set_restore->header_size;
1288 index = ip_set_find_byindex(set_restore->index);
1289 DP("index %u, restore_index %u", index, set_restore->index);
1290 if (index != set_restore->index)
1292 /* Try to restore members data */
1293 set = ip_set_list[index];
1295 DP("members_size %u reqsize %u",
1296 set_restore->members_size, set->type->reqsize);
1297 while (members_size + set->type->reqsize <=
1298 set_restore->members_size) {
1300 DP("members: %u, line %u", members_size, line);
1301 res = __ip_set_addip(index,
1302 data + used + members_size,
1303 set->type->reqsize);
1304 if (!(res == 0 || res == -EEXIST))
1306 members_size += set->type->reqsize;
1309 DP("members_size %u %u",
1310 set_restore->members_size, members_size);
1311 if (members_size != set_restore->members_size)
1313 used += set_restore->members_size;
1317 /* Loop to restore bindings */
1318 while (used < len) {
1321 DP("restore binding, line %u", line);
1322 /* Get and ensure size */
1323 if (used + sizeof(struct ip_set_hash_save) > len)
1325 hash_save = (struct ip_set_hash_save *) (data + used);
1326 used += sizeof(struct ip_set_hash_save);
1328 /* hash_save->id is used to store the index */
1329 index = ip_set_find_byindex(hash_save->id);
1330 DP("restore binding index %u, id %u, %u -> %u",
1331 index, hash_save->id, hash_save->ip, hash_save->binding);
1332 if (index != hash_save->id)
1335 set = ip_set_list[hash_save->id];
1336 /* Null valued IP means default binding */
1338 res = ip_set_hash_add(set->id,
1340 hash_save->binding);
1342 IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
1343 write_lock_bh(&ip_set_lock);
1344 set->binding = hash_save->binding;
1345 __ip_set_get(set->binding);
1346 write_unlock_bh(&ip_set_lock);
1347 DP("default binding: %u", set->binding);
1359 ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
1362 int res = 0; /* Assume OK */
1364 struct ip_set_req_adt *req_adt;
1365 ip_set_id_t index = IP_SET_INVALID_ID;
1366 int (*adtfn)(ip_set_id_t index,
1367 const void *data, size_t size);
1369 int (*fn)(ip_set_id_t index,
1370 const void *data, size_t size);
1372 { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
1373 { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
1376 DP("optval=%d, user=%p, len=%d", optval, user, len);
1377 if (!capable(CAP_NET_ADMIN))
1379 if (optval != SO_IP_SET)
1381 if (len <= sizeof(unsigned)) {
1382 ip_set_printk("short userdata (want >%zu, got %u)",
1383 sizeof(unsigned), len);
1386 data = vmalloc(len);
1388 DP("out of mem for %u bytes", len);
1391 if (copy_from_user(data, user, len) != 0) {
1395 if (down_interruptible(&ip_set_app_mutex)) {
1400 op = (unsigned *)data;
1403 if (*op < IP_SET_OP_VERSION) {
1404 /* Check the version at the beginning of operations */
1405 struct ip_set_req_version *req_version =
1406 (struct ip_set_req_version *) data;
1407 if (req_version->version != IP_SET_PROTOCOL_VERSION) {
1414 case IP_SET_OP_CREATE:{
1415 struct ip_set_req_create *req_create
1416 = (struct ip_set_req_create *) data;
1418 if (len < sizeof(struct ip_set_req_create)) {
1419 ip_set_printk("short CREATE data (want >=%zu, got %u)",
1420 sizeof(struct ip_set_req_create), len);
1424 req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
1425 req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
1426 res = ip_set_create(req_create->name,
1427 req_create->typename,
1429 data + sizeof(struct ip_set_req_create),
1430 len - sizeof(struct ip_set_req_create));
1433 case IP_SET_OP_DESTROY:{
1434 struct ip_set_req_std *req_destroy
1435 = (struct ip_set_req_std *) data;
1437 if (len != sizeof(struct ip_set_req_std)) {
1438 ip_set_printk("invalid DESTROY data (want %zu, got %u)",
1439 sizeof(struct ip_set_req_std), len);
1443 if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
1444 /* Destroy all sets */
1445 index = IP_SET_INVALID_ID;
1447 req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
1448 index = ip_set_find_byname(req_destroy->name);
1450 if (index == IP_SET_INVALID_ID) {
1456 res = ip_set_destroy(index);
1459 case IP_SET_OP_FLUSH:{
1460 struct ip_set_req_std *req_flush =
1461 (struct ip_set_req_std *) data;
1463 if (len != sizeof(struct ip_set_req_std)) {
1464 ip_set_printk("invalid FLUSH data (want %zu, got %u)",
1465 sizeof(struct ip_set_req_std), len);
1469 if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
1470 /* Flush all sets */
1471 index = IP_SET_INVALID_ID;
1473 req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
1474 index = ip_set_find_byname(req_flush->name);
1476 if (index == IP_SET_INVALID_ID) {
1481 res = ip_set_flush(index);
1484 case IP_SET_OP_RENAME:{
1485 struct ip_set_req_create *req_rename
1486 = (struct ip_set_req_create *) data;
1488 if (len != sizeof(struct ip_set_req_create)) {
1489 ip_set_printk("invalid RENAME data (want %zu, got %u)",
1490 sizeof(struct ip_set_req_create), len);
1495 req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
1496 req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
1498 index = ip_set_find_byname(req_rename->name);
1499 if (index == IP_SET_INVALID_ID) {
1503 res = ip_set_rename(index, req_rename->typename);
1506 case IP_SET_OP_SWAP:{
1507 struct ip_set_req_create *req_swap
1508 = (struct ip_set_req_create *) data;
1509 ip_set_id_t to_index;
1511 if (len != sizeof(struct ip_set_req_create)) {
1512 ip_set_printk("invalid SWAP data (want %zu, got %u)",
1513 sizeof(struct ip_set_req_create), len);
1518 req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
1519 req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
1521 index = ip_set_find_byname(req_swap->name);
1522 if (index == IP_SET_INVALID_ID) {
1526 to_index = ip_set_find_byname(req_swap->typename);
1527 if (to_index == IP_SET_INVALID_ID) {
1531 res = ip_set_swap(index, to_index);
1535 break; /* Set identified by id */
1538 /* There we may have add/del/test/bind/unbind/test_bind operations */
1539 if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
1543 adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
1545 if (len < sizeof(struct ip_set_req_adt)) {
1546 ip_set_printk("short data in adt request (want >=%zu, got %u)",
1547 sizeof(struct ip_set_req_adt), len);
1551 req_adt = (struct ip_set_req_adt *) data;
1553 /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
1554 if (!(*op == IP_SET_OP_UNBIND_SET
1555 && req_adt->index == IP_SET_INVALID_ID)) {
1556 index = ip_set_find_byindex(req_adt->index);
1557 if (index == IP_SET_INVALID_ID) {
1562 res = adtfn(index, data, len);
1565 up(&ip_set_app_mutex);
1569 DP("final result %d", res);
1574 ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
1578 ip_set_id_t index = IP_SET_INVALID_ID;
1582 DP("optval=%d, user=%p, len=%d", optval, user, *len);
1583 if (!capable(CAP_NET_ADMIN))
1585 if (optval != SO_IP_SET)
1587 if (*len < sizeof(unsigned)) {
1588 ip_set_printk("short userdata (want >=%zu, got %d)",
1589 sizeof(unsigned), *len);
1592 data = vmalloc(*len);
1594 DP("out of mem for %d bytes", *len);
1597 if (copy_from_user(data, user, *len) != 0) {
1601 if (down_interruptible(&ip_set_app_mutex)) {
1606 op = (unsigned *) data;
1609 if (*op < IP_SET_OP_VERSION) {
1610 /* Check the version at the beginning of operations */
1611 struct ip_set_req_version *req_version =
1612 (struct ip_set_req_version *) data;
1613 if (req_version->version != IP_SET_PROTOCOL_VERSION) {
1620 case IP_SET_OP_VERSION: {
1621 struct ip_set_req_version *req_version =
1622 (struct ip_set_req_version *) data;
1624 if (*len != sizeof(struct ip_set_req_version)) {
1625 ip_set_printk("invalid VERSION (want %zu, got %d)",
1626 sizeof(struct ip_set_req_version),
1632 req_version->version = IP_SET_PROTOCOL_VERSION;
1633 res = copy_to_user(user, req_version,
1634 sizeof(struct ip_set_req_version));
1637 case IP_SET_OP_GET_BYNAME: {
1638 struct ip_set_req_get_set *req_get
1639 = (struct ip_set_req_get_set *) data;
1641 if (*len != sizeof(struct ip_set_req_get_set)) {
1642 ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
1643 sizeof(struct ip_set_req_get_set), *len);
1647 req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
1648 index = ip_set_find_byname(req_get->set.name);
1649 req_get->set.index = index;
1652 case IP_SET_OP_GET_BYINDEX: {
1653 struct ip_set_req_get_set *req_get
1654 = (struct ip_set_req_get_set *) data;
1656 if (*len != sizeof(struct ip_set_req_get_set)) {
1657 ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
1658 sizeof(struct ip_set_req_get_set), *len);
1662 req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
1663 index = ip_set_find_byindex(req_get->set.index);
1664 strncpy(req_get->set.name,
1665 index == IP_SET_INVALID_ID ? ""
1666 : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
1669 case IP_SET_OP_ADT_GET: {
1670 struct ip_set_req_adt_get *req_get
1671 = (struct ip_set_req_adt_get *) data;
1673 if (*len != sizeof(struct ip_set_req_adt_get)) {
1674 ip_set_printk("invalid ADT_GET (want %zu, got %d)",
1675 sizeof(struct ip_set_req_adt_get), *len);
1679 req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
1680 index = ip_set_find_byname(req_get->set.name);
1681 if (index != IP_SET_INVALID_ID) {
1682 req_get->set.index = index;
1683 strncpy(req_get->typename,
1684 ip_set_list[index]->type->typename,
1685 IP_SET_MAXNAMELEN - 1);
1692 case IP_SET_OP_MAX_SETS: {
1693 struct ip_set_req_max_sets *req_max_sets
1694 = (struct ip_set_req_max_sets *) data;
1697 if (*len != sizeof(struct ip_set_req_max_sets)) {
1698 ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
1699 sizeof(struct ip_set_req_max_sets), *len);
1704 if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
1705 req_max_sets->set.index = IP_SET_INVALID_ID;
1707 req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
1708 req_max_sets->set.index =
1709 ip_set_find_byname(req_max_sets->set.name);
1710 if (req_max_sets->set.index == IP_SET_INVALID_ID) {
1715 req_max_sets->max_sets = ip_set_max;
1716 req_max_sets->sets = 0;
1717 for (i = 0; i < ip_set_max; i++) {
1718 if (ip_set_list[i] != NULL)
1719 req_max_sets->sets++;
1723 case IP_SET_OP_LIST_SIZE:
1724 case IP_SET_OP_SAVE_SIZE: {
1725 struct ip_set_req_setnames *req_setnames
1726 = (struct ip_set_req_setnames *) data;
1727 struct ip_set_name_list *name_list;
1732 if (*len < sizeof(struct ip_set_req_setnames)) {
1733 ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
1734 sizeof(struct ip_set_req_setnames), *len);
1739 req_setnames->size = 0;
1740 used = sizeof(struct ip_set_req_setnames);
1741 for (i = 0; i < ip_set_max; i++) {
1742 if (ip_set_list[i] == NULL)
1744 name_list = (struct ip_set_name_list *)
1746 used += sizeof(struct ip_set_name_list);
1747 if (used > copylen) {
1751 set = ip_set_list[i];
1752 /* Fill in index, name, etc. */
1753 name_list->index = i;
1754 name_list->id = set->id;
1755 strncpy(name_list->name,
1757 IP_SET_MAXNAMELEN - 1);
1758 strncpy(name_list->typename,
1759 set->type->typename,
1760 IP_SET_MAXNAMELEN - 1);
1761 DP("filled %s of type %s, index %u\n",
1762 name_list->name, name_list->typename,
1764 if (!(req_setnames->index == IP_SET_INVALID_ID
1765 || req_setnames->index == i))
1769 case IP_SET_OP_LIST_SIZE: {
1770 req_setnames->size += sizeof(struct ip_set_list)
1771 + set->type->header_size
1772 + set->type->list_members_size(set);
1773 /* Sets are identified by id in the hash */
1774 FOREACH_HASH_DO(__set_hash_bindings_size_list,
1775 set->id, &req_setnames->size);
1778 case IP_SET_OP_SAVE_SIZE: {
1779 req_setnames->size += sizeof(struct ip_set_save)
1780 + set->type->header_size
1781 + set->type->list_members_size(set);
1782 FOREACH_HASH_DO(__set_hash_bindings_size_save,
1783 set->id, &req_setnames->size);
1790 if (copylen != used) {
1796 case IP_SET_OP_LIST: {
1797 struct ip_set_req_list *req_list
1798 = (struct ip_set_req_list *) data;
1802 if (*len < sizeof(struct ip_set_req_list)) {
1803 ip_set_printk("short LIST (want >=%zu, got %d)",
1804 sizeof(struct ip_set_req_list), *len);
1808 index = req_list->index;
1809 if (index != IP_SET_INVALID_ID
1810 && ip_set_find_byindex(index) != index) {
1815 if (index == IP_SET_INVALID_ID) {
1817 for (i = 0; i < ip_set_max && res == 0; i++) {
1818 if (ip_set_list[i] != NULL)
1819 res = ip_set_list_set(i, data, &used, *len);
1822 /* List an individual set */
1823 res = ip_set_list_set(index, data, &used, *len);
1827 else if (copylen != used) {
1833 case IP_SET_OP_SAVE: {
1834 struct ip_set_req_list *req_save
1835 = (struct ip_set_req_list *) data;
1839 if (*len < sizeof(struct ip_set_req_list)) {
1840 ip_set_printk("short SAVE (want >=%zu, got %d)",
1841 sizeof(struct ip_set_req_list), *len);
1845 index = req_save->index;
1846 if (index != IP_SET_INVALID_ID
1847 && ip_set_find_byindex(index) != index) {
1852 if (index == IP_SET_INVALID_ID) {
1854 for (i = 0; i < ip_set_max && res == 0; i++) {
1855 if (ip_set_list[i] != NULL)
1856 res = ip_set_save_set(i, data, &used, *len);
1859 /* Save an individual set */
1860 res = ip_set_save_set(index, data, &used, *len);
1863 res = ip_set_save_bindings(index, data, &used, *len);
1867 else if (copylen != used) {
1873 case IP_SET_OP_RESTORE: {
1874 struct ip_set_req_setnames *req_restore
1875 = (struct ip_set_req_setnames *) data;
1878 if (*len < sizeof(struct ip_set_req_setnames)
1879 || *len != req_restore->size) {
1880 ip_set_printk("invalid RESTORE (want =%zu, got %d)",
1881 req_restore->size, *len);
1885 line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
1886 req_restore->size - sizeof(struct ip_set_req_setnames));
1887 DP("ip_set_restore: %u", line);
1890 req_restore->size = line;
1891 copylen = sizeof(struct ip_set_req_setnames);
1899 } /* end of switch(op) */
1902 DP("set %s, copylen %u", index != IP_SET_INVALID_ID
1903 && ip_set_list[index]
1904 ? ip_set_list[index]->name
1905 : ":all:", copylen);
1906 res = copy_to_user(user, data, copylen);
1909 up(&ip_set_app_mutex);
1913 DP("final result %d", res);
1917 static struct nf_sockopt_ops so_set = {
1919 .set_optmin = SO_IP_SET,
1920 .set_optmax = SO_IP_SET + 1,
1921 .set = &ip_set_sockfn_set,
1922 .get_optmin = SO_IP_SET,
1923 .get_optmax = SO_IP_SET + 1,
1924 .get = &ip_set_sockfn_get,
1928 static int max_sets, hash_size;
1929 module_param(max_sets, int, 0600);
1930 MODULE_PARM_DESC(max_sets, "maximal number of sets");
1931 module_param(hash_size, int, 0600);
1932 MODULE_PARM_DESC(hash_size, "hash size for bindings");
1933 MODULE_LICENSE("GPL");
1934 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
1935 MODULE_DESCRIPTION("module implementing core IP set support");
1937 static int __init init(void)
1942 get_random_bytes(&ip_set_hash_random, 4);
1944 ip_set_max = max_sets;
1945 ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
1947 printk(KERN_ERR "Unable to create ip_set_list\n");
1950 memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
1952 ip_set_bindings_hash_size = hash_size;
1953 ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
1955 printk(KERN_ERR "Unable to create ip_set_hash\n");
1959 for (i = 0; i < ip_set_bindings_hash_size; i++)
1960 INIT_LIST_HEAD(&ip_set_hash[i]);
1962 INIT_LIST_HEAD(&set_type_list);
1964 res = nf_register_sockopt(&so_set);
1966 ip_set_printk("SO_SET registry failed: %d", res);
1974 static void __exit fini(void)
1976 /* There can't be any existing set or binding */
1977 nf_unregister_sockopt(&so_set);
1980 DP("these are the famous last words");
1983 EXPORT_SYMBOL(ip_set_register_set_type);
1984 EXPORT_SYMBOL(ip_set_unregister_set_type);
1986 EXPORT_SYMBOL(ip_set_get_byname);
1987 EXPORT_SYMBOL(ip_set_get_byindex);
1988 EXPORT_SYMBOL(ip_set_put);
1990 EXPORT_SYMBOL(ip_set_addip_kernel);
1991 EXPORT_SYMBOL(ip_set_delip_kernel);
1992 EXPORT_SYMBOL(ip_set_testip_kernel);