1 /* Rule-based Classification Engine (RBCE) module
3 * Copyright (C) Hubertus Franke, IBM Corp. 2003
4 * (C) Chandra Seetharaman, IBM Corp. 2003
5 * (C) Vivek Kashyap, IBM Corp. 2004
7 * Module for loading of classification policies and providing
8 * a user API for Class-based Kernel Resource Management (CKRM)
10 * Latest version, more details at http://ckrm.sf.net
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
22 * Created. First cut with much scope for cleanup !
24 * Made modifications to suit the new RBCE module.
25 * Made modifications to address sampling and delivery
27 * Integrated changes from original RBCE module
29 * Merged RBCE and CRBCE into common code base
31 * Incorporated listen call back and IPv4 match support
33 * Added Multi-Classtype Support
36 #include <linux/init.h>
37 #include <linux/module.h>
38 #include <linux/kernel.h>
40 #include <asm/uaccess.h>
42 #include <linux/mount.h>
43 #include <linux/proc_fs.h>
44 #include <linux/limits.h>
45 #include <linux/pid.h>
46 #include <linux/sysctl.h>
48 #include <linux/ckrm_rc.h>
49 #include <linux/ckrm_ce.h>
50 #include <linux/ckrm_net.h>
51 #include "bitvector.h"
56 MODULE_DESCRIPTION(RBCE_MOD_DESCR);
57 MODULE_AUTHOR("Hubertus Franke, Chandra Seetharaman (IBM)");
58 MODULE_LICENSE("GPL");
60 static char modname[] = RBCE_MOD_NAME;
62 /* ==================== typedef, global variables etc., ==================== */
63 struct named_obj_hdr {
64 struct list_head link;
69 #define GET_REF(x) ((x)->obj.referenced)
70 #define INC_REF(x) (GET_REF(x)++)
71 #define DEC_REF(x) (--GET_REF(x))
73 struct named_obj_hdr obj;
79 RBCE_RULE_CMD_PATH = 1, // full qualified path
80 RBCE_RULE_CMD, // basename of the command
81 RBCE_RULE_ARGS, // arguments of the command
82 RBCE_RULE_REAL_UID, // task's real uid
83 RBCE_RULE_REAL_GID, // task's real gid
84 RBCE_RULE_EFFECTIVE_UID, // task's effective uid
85 RBCE_RULE_EFFECTIVE_GID, // task's effective gid
86 RBCE_RULE_APP_TAG, // task's application tag
87 RBCE_RULE_IPV4, // IP address of listen(), ipv4 format
88 RBCE_RULE_IPV6, // IP address of listen(), ipv6 format
89 RBCE_RULE_DEP_RULE, // dependent rule; must be the first term
90 RBCE_RULE_INVALID, // invalid, for filler
91 RBCE_RULE_INVALID2, // invalid, for filler
101 struct rbce_rule_term {
103 rbce_operator_t operator;
105 char *string; // path, cmd, arg, tag, ipv4 and ipv6
106 long id; // uid, gid, euid, egid
107 struct rbce_rule *deprule;
112 struct named_obj_hdr obj;
113 struct rbce_class *target_class;
116 int *terms; // vector of indices into the global term vector
117 int index; // index of this rule into the global term vector
118 int termflag; // which term ids would require a recalculation
119 int do_opt; // do we have to consider this rule during optimize
120 char *strtab; // string table to store the strings of all terms
121 int order; // order of execution of this rule
122 int state; // RBCE_RULE_ENABLED/RBCE_RULE_DISABLED
126 #define RBCE_RULE_DISABLED 0
127 #define RBCE_RULE_ENABLED 1
130 // Data structures and macros used for optimization
131 #define RBCE_TERM_CMD (0)
132 #define RBCE_TERM_UID (1)
133 #define RBCE_TERM_GID (2)
134 #define RBCE_TERM_TAG (3)
135 #define RBCE_TERM_IPV4 (4)
136 #define RBCE_TERM_IPV6 (5)
138 #define NUM_TERM_MASK_VECTOR (6)
140 // Rule flags. 1 bit for each type of rule term
141 #define RBCE_TERMFLAG_CMD (1 << RBCE_TERM_CMD)
142 #define RBCE_TERMFLAG_UID (1 << RBCE_TERM_UID)
143 #define RBCE_TERMFLAG_GID (1 << RBCE_TERM_GID)
144 #define RBCE_TERMFLAG_TAG (1 << RBCE_TERM_TAG)
145 #define RBCE_TERMFLAG_IPV4 (1 << RBCE_TERM_IPV4)
146 #define RBCE_TERMFLAG_IPV6 (1 << RBCE_TERM_IPV6)
147 #define RBCE_TERMFLAG_ALL (RBCE_TERMFLAG_CMD | RBCE_TERMFLAG_UID | \
148 RBCE_TERMFLAG_GID | RBCE_TERMFLAG_TAG | \
149 RBCE_TERMFLAG_IPV4 | RBCE_TERMFLAG_IPV6)
151 int termop_2_vecidx[RBCE_RULE_INVALID] = {
152 [RBCE_RULE_CMD_PATH] = RBCE_TERM_CMD,
153 [RBCE_RULE_CMD] = RBCE_TERM_CMD,
154 [RBCE_RULE_ARGS] = RBCE_TERM_CMD,
155 [RBCE_RULE_REAL_UID] = RBCE_TERM_UID,
156 [RBCE_RULE_REAL_GID] = RBCE_TERM_GID,
157 [RBCE_RULE_EFFECTIVE_UID] = RBCE_TERM_UID,
158 [RBCE_RULE_EFFECTIVE_GID] = RBCE_TERM_GID,
159 [RBCE_RULE_APP_TAG] = RBCE_TERM_TAG,
160 [RBCE_RULE_IPV4] = RBCE_TERM_IPV4,
161 [RBCE_RULE_IPV6] = RBCE_TERM_IPV6,
162 [RBCE_RULE_DEP_RULE] = -1
165 #define TERMOP_2_TERMFLAG(x) (1 << termop_2_vecidx[x])
166 #define TERM_2_TERMFLAG(x) (1 << x)
168 #define POLICY_INC_NUMTERMS (BITS_PER_LONG) // No. of terms added at a time
169 #define POLICY_ACTION_NEW_VERSION 0x01 // Force reallocation
170 #define POLICY_ACTION_REDO_ALL 0x02 // Recompute all rule flags
171 #define POLICY_ACTION_PACK_TERMS 0x04 // Time to pack the terms
173 struct ckrm_eng_callback ckrm_ecbs;
177 static int gl_bitmap_version, gl_action, gl_num_terms;
178 static int gl_allocated, gl_released;
179 struct rbce_rule_term *gl_terms;
180 bitvector_t *gl_mask_vecs[NUM_TERM_MASK_VECTOR];
183 static void optimize_policy(void);
185 #ifndef CKRM_MAX_CLASSTYPES
186 #define CKRM_MAX_CLASSTYPES 32
189 struct list_head rules_list[CKRM_MAX_CLASSTYPES];
190 LIST_HEAD(class_list); // List of classes used
192 static int gl_num_rules;
193 static int gl_rules_version;
194 int rbce_enabled = 1;
195 static rwlock_t global_rwlock = RW_LOCK_UNLOCKED;
197 * One lock to protect them all !!!
198 * Additions, deletions to rules must
199 * happen with this lock being held in write mode.
200 * Access(read/write) to any of the data structures must happen
201 * with this lock held in read mode.
202 * Since, rule related changes do not happen very often it is ok to
203 * have single rwlock.
207 * data structure rbce_private_data holds the bit vector 'eval' which
208 * specifies if rules and terms of rules are evaluated against the task
209 * and if they were evaluated, bit vector 'true' holds the result of that
212 * This data structure is maintained in a task, and the bitvectors are
213 * updated only when needed.
215 * Each rule and each term of a rule has a corresponding bit in the vector.
218 struct rbce_private_data {
219 struct rbce_ext_private_data ext_data;
220 int evaluate; // whether to evaluate rules or not ?
221 int rules_version; // whether to evaluate rules or not ?
223 unsigned long bitmap_version;
226 char data[0]; // eval points to this variable size data array
229 #define RBCE_DATA(tsk) ((struct rbce_private_data*)((tsk)->ce_data))
230 #define RBCE_DATAP(tsk) ((tsk)->ce_data)
232 /* ======================= DEBUG Functions ========================= */
236 int rbcedebug = 0x00;
238 #define DBG_CLASSIFY_RES ( 0x01 )
239 #define DBG_CLASSIFY_DETAILS ( 0x02 )
240 #define DBG_OPTIMIZATION ( 0x04 )
241 #define DBG_SHOW_RESCTL ( 0x08 )
242 #define DBG_CLASS ( 0x10 )
243 #define DBG_RULE ( 0x20 )
244 #define DBG_POLICY ( 0x40 )
246 #define DPRINTK(x, y...) if (rbcedebug & (x)) printk(y)
247 // debugging selectively enabled through /proc/sys/debug/rbce
249 static void print_context_vectors(void)
253 if ((rbcedebug & DBG_OPTIMIZATION) == 0) {
256 for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
258 bitvector_print(DBG_OPTIMIZATION, gl_mask_vecs[i]);
264 #define DPRINTK(x, y...)
265 #define print_context_vectors(x)
268 /* ======================= Helper Functions ========================= */
272 static struct ckrm_core_class *rbce_classify(struct task_struct *,
273 struct ckrm_net_struct *,
274 unsigned long, int classtype);
276 static inline struct rbce_rule *find_rule_name(const char *name)
278 struct named_obj_hdr *pos;
281 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
282 list_for_each_entry(pos, &rules_list[i], link) {
283 if (!strcmp(pos->name, name)) {
284 return ((struct rbce_rule *)pos);
291 static inline struct rbce_class *find_class_name(const char *name)
293 struct named_obj_hdr *pos;
295 list_for_each_entry(pos, &class_list, link) {
296 if (!strcmp(pos->name, name))
297 return (struct rbce_class *)pos;
303 * Insert the given rule at the specified order
304 * order = -1 ==> insert at the tail.
306 * Caller must hold global_rwlock in write mode.
308 static int insert_rule(struct rbce_rule *rule, int order)
310 #define ORDER_COUNTER_INCR 10
311 static int order_counter;
313 struct list_head *head = &rules_list[rule->classtype];
314 struct list_head *insert = head;
315 struct rbce_rule *tmp;
317 if (gl_num_rules == 0) {
323 rule->order = order_counter;
324 // FIXME: order_counter overflow/wraparound!!
325 order_counter += ORDER_COUNTER_INCR;
328 old_counter = order_counter;
329 if (order_counter < order) {
330 order_counter = order;
333 order_counter += ORDER_COUNTER_INCR;
334 list_for_each_entry(tmp, head, obj.link) {
335 if (rule->order == tmp->order) {
336 order_counter = old_counter;
339 if (rule->order < tmp->order) {
340 insert = &tmp->obj.link;
345 list_add_tail(&rule->obj.link, insert);
346 // protect the module from removed when any rule is
348 try_module_get(THIS_MODULE);
355 * Remove the rule and reinsert at the specified order.
357 * Caller must hold global_rwlock in write mode.
359 static int reinsert_rule(struct rbce_rule *rule, int order)
361 list_del(&rule->obj.link);
364 module_put(THIS_MODULE);
365 return insert_rule(rule, order);
369 * Get a refernece to the class, create one if it doesn't exist
371 * Caller need to hold global_rwlock in write mode.
375 static struct rbce_class *create_rbce_class(const char *classname,
376 int classtype, void *classobj)
378 struct rbce_class *cls;
380 if (classtype >= CKRM_MAX_CLASSTYPES) {
382 "ckrm_classobj returned %d as classtype which cannot "
383 " be handled by RBCE\n", classtype);
387 cls = kmalloc(sizeof(struct rbce_class), GFP_ATOMIC);
391 cls->obj.name = kmalloc(strlen(classname) + 1, GFP_ATOMIC);
394 cls->classobj = classobj;
395 strcpy(cls->obj.name, classname);
396 list_add_tail(&cls->obj.link, &class_list);
397 cls->classtype = classtype;
405 static struct rbce_class *get_class(char *classname, int *classtype)
407 struct rbce_class *cls;
413 cls = find_class_name(classname);
417 *classtype = cls->classtype;
422 classobj = ckrm_classobj(classname, classtype);
427 return create_rbce_class(classname, *classtype, classobj);
431 * Drop a refernece to the class, create one if it doesn't exist
433 * Caller need to hold global_rwlock in write mode.
435 static void put_class(struct rbce_class *cls)
438 if (DEC_REF(cls) <= 0) {
439 list_del(&cls->obj.link);
440 kfree(cls->obj.name);
448 * Callback from core when a class is added
451 #ifdef RBCE_EXTENSION
452 static void rbce_class_addcb(const char *classname, void *clsobj, int classtype)
454 struct rbce_class *cls;
456 write_lock(&global_rwlock);
457 cls = find_class_name((char *)classname);
459 cls->classobj = clsobj;
461 cls = create_rbce_class(classname, classtype, clsobj);
464 notify_class_action(cls, 1);
465 write_unlock(&global_rwlock);
471 * Callback from core when a class is deleted.
474 rbce_class_deletecb(const char *classname, void *classobj, int classtype)
476 static struct rbce_class *cls;
477 struct named_obj_hdr *pos;
478 struct rbce_rule *rule;
480 write_lock(&global_rwlock);
481 cls = find_class_name(classname);
483 if (cls->classobj != classobj) {
484 printk(KERN_ERR "rbce: class %s changed identity\n",
487 notify_class_action(cls, 0);
488 cls->classobj = NULL;
489 list_for_each_entry(pos, &rules_list[cls->classtype], link) {
490 rule = (struct rbce_rule *)pos;
491 if (rule->target_class) {
493 (rule->target_class->obj.name, classname)) {
495 rule->target_class = NULL;
496 rule->classtype = -1;
501 if ((cls = find_class_name(classname)) != NULL) {
503 "rbce ERROR: class %s exists in rbce after "
504 "removal in core\n", classname);
507 write_unlock(&global_rwlock);
512 * Allocate an index in the global term vector
513 * On success, returns the index. On failure returns -errno.
514 * Caller must hold the global_rwlock in write mode as global data is
517 static int alloc_term_index(void)
519 int size = gl_allocated;
521 if (gl_num_terms >= size) {
523 struct rbce_rule_term *oldv, *newv;
524 int newsize = size + POLICY_INC_NUMTERMS;
528 kmalloc(newsize * sizeof(struct rbce_rule_term),
533 memcpy(newv, oldv, size * sizeof(struct rbce_rule_term));
534 for (i = size; i < newsize; i++) {
538 gl_allocated = newsize;
541 gl_action |= POLICY_ACTION_NEW_VERSION;
542 DPRINTK(DBG_OPTIMIZATION,
543 "alloc_term_index: Expanding size from %d to %d\n",
546 return gl_num_terms++;
550 * Release an index in the global term vector
552 * Caller must hold the global_rwlock in write mode as the global data
555 static void release_term_index(int idx)
557 if ((idx < 0) || (idx > gl_num_terms))
560 gl_terms[idx].op = -1;
562 if ((gl_released > POLICY_INC_NUMTERMS) &&
564 (gl_num_terms - gl_released + POLICY_INC_NUMTERMS))) {
565 gl_action |= POLICY_ACTION_PACK_TERMS;
571 * Release the indices, string memory, and terms associated with the given
574 * Caller should be holding global_rwlock
576 static void __release_rule(struct rbce_rule *rule)
578 int i, *terms = rule->terms;
580 // remove memory and references from other rules
581 for (i = rule->num_terms; --i >= 0;) {
582 struct rbce_rule_term *term = &gl_terms[terms[i]];
584 if (term->op == RBCE_RULE_DEP_RULE) {
585 DEC_REF(term->u.deprule);
587 release_term_index(terms[i]);
602 * delete the given rule and all memory associated with it.
604 * Caller is responsible for protecting the global data
606 static inline int __delete_rule(struct rbce_rule *rule)
608 // make sure we are not referenced by other rules
612 __release_rule(rule);
613 put_class(rule->target_class);
614 release_term_index(rule->index);
615 list_del(&rule->obj.link);
618 module_put(THIS_MODULE);
619 kfree(rule->obj.name);
625 * Optimize the rule evaluation logic
627 * Caller must hold global_rwlock in write mode.
629 static void optimize_policy(void)
632 struct rbce_rule *rule;
633 struct rbce_rule_term *terms;
636 bitvector_t **mask_vecs;
641 * Due to dynamic rule addition/deletion of rules the term
642 * vector can get sparse. As a result the bitvectors grow as we don't
643 * reuse returned indices. If it becomes sparse enough we pack them
647 pack_terms = (gl_action & POLICY_ACTION_PACK_TERMS);
648 DPRINTK(DBG_OPTIMIZATION,
649 "----- Optimize Policy ----- act=%x pt=%d (a=%d n=%d r=%d)\n",
650 gl_action, pack_terms, gl_allocated, gl_num_terms, gl_released);
653 int nsz = ALIGN((gl_num_terms - gl_released),
654 POLICY_INC_NUMTERMS);
656 struct rbce_rule_term *newterms;
660 kmalloc(nsz * sizeof(struct rbce_rule_term), GFP_ATOMIC);
662 for (ii = 0; ii < CKRM_MAX_CLASSTYPES; ii++) {
663 // FIXME: check only for task class types
664 list_for_each_entry_reverse(rule,
667 rule->index = newidx++;
668 for (i = rule->num_terms; --i >= 0;) {
669 int idx = rule->terms[i];
670 newterms[newidx] = terms[idx];
671 rule->terms[i] = newidx++;
678 gl_num_terms = newidx;
681 gl_action &= ~POLICY_ACTION_PACK_TERMS;
682 gl_action |= POLICY_ACTION_NEW_VERSION;
686 num_terms = gl_num_terms;
687 bsize = gl_allocated / 8 + sizeof(bitvector_t);
688 mask_vecs = gl_mask_vecs;
691 if (gl_action & POLICY_ACTION_NEW_VERSION) {
692 /* allocate new mask vectors */
693 char *temp = kmalloc(NUM_TERM_MASK_VECTOR * bsize, GFP_ATOMIC);
695 DPRINTK(DBG_OPTIMIZATION,
696 "------ allocmasks act=%x ------- ver=%d\n", gl_action,
701 if (mask_vecs[0]) {// index 0 has the alloc returned address
704 for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
705 mask_vecs[i] = (bitvector_t *) (temp + i * bsize);
706 bitvector_init(mask_vecs[i], gl_allocated);
708 gl_action &= ~POLICY_ACTION_NEW_VERSION;
709 gl_action |= POLICY_ACTION_REDO_ALL;
713 /* We do two things here at once
714 * 1) recompute the rulemask for each required rule
715 * we guarantee proper dependency order during creation time and
716 * by reversely running through this list.
717 * 2) recompute the mask for each term and rule, if required
720 redoall = gl_action & POLICY_ACTION_REDO_ALL;
721 gl_action &= ~POLICY_ACTION_REDO_ALL;
723 DPRINTK(DBG_OPTIMIZATION, "------- run act=%x -------- redoall=%d\n",
725 for (ii = 0; ii < CKRM_MAX_CLASSTYPES; ii++) {
726 // FIXME: check only for task class types
727 list_for_each_entry_reverse(rule, &rules_list[ii], obj.link) {
728 unsigned long termflag;
730 if (!redoall && !rule->do_opt)
733 for (i = rule->num_terms; --i >= 0;) {
734 int j, idx = rule->terms[i];
735 struct rbce_rule_term *term = &terms[idx];
736 int vecidx = termop_2_vecidx[term->op];
739 termflag |= term->u.deprule->termflag;
740 /* mark this term belonging to all
741 contexts of deprule */
742 for (j = 0; j < NUM_TERM_MASK_VECTOR;
744 if (term->u.deprule->termflag
752 termflag |= TERM_2_TERMFLAG(vecidx);
753 /* mark this term belonging to
754 a particular context */
755 bitvector_set(idx, mask_vecs[vecidx]);
758 for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
759 if (termflag & (1 << i)) {
760 bitvector_set(rule->index,
764 rule->termflag = termflag;
766 DPRINTK(DBG_OPTIMIZATION, "r-%s: %x %d\n",
767 rule->obj.name, rule->termflag, rule->index);
770 print_context_vectors();
774 /* ======================= Rule related Functions ========================= */
777 * Caller need to hold global_rwlock in write mode.
780 fill_rule(struct rbce_rule *newrule, struct rbce_rule_term *terms, int nterms)
782 char *class, *strtab;
783 int i, j, order, state, real_nterms, index;
784 int strtablen, rc = 0, counter;
785 struct rbce_rule_term *term = NULL;
786 struct rbce_class *targetcls = NULL;
787 struct rbce_rule *deprule;
792 // Digest filled terms.
794 strtab = class = NULL;
799 for (i = 0; i < nterms; i++) {
800 if (terms[i].op != RBCE_RULE_INVALID) {
803 switch (terms[i].op) {
804 case RBCE_RULE_DEP_RULE:
805 // check if the depend rule is valid
807 deprule = find_rule_name(terms[i].u.string);
808 if (!deprule || deprule == newrule) {
812 // make sure _a_ depend rule
813 // appears in only one term.
814 for (j = 0; j < i; j++) {
817 && terms[j].u.deprule ==
823 terms[i].u.deprule = deprule;
826 // +depend is acceptable and -depend is not
827 if (terms[i].operator != TOKEN_OP_DEP_DEL) {
828 terms[i].operator = RBCE_EQUAL;
835 case RBCE_RULE_CMD_PATH:
838 case RBCE_RULE_APP_TAG:
841 // sum up the string length
842 strtablen += strlen(terms[i].u.string) + 1;
849 switch (terms[i].operator) {
851 order = terms[i].u.id;
858 state = terms[i].u.id != 0;
861 class = terms[i].u.string;
869 // Check if class was specified
872 if ((targetcls = get_class(class, &classtype)) == NULL) {
876 put_class(newrule->target_class);
878 newrule->target_class = targetcls;
879 newrule->classtype = classtype;
881 if (!newrule->target_class) {
887 newrule->state = state;
890 newrule->order = order;
892 newrule->terms = kmalloc(real_nterms * sizeof(int), GFP_ATOMIC);
893 if (!newrule->terms) {
897 newrule->num_terms = real_nterms;
898 if (strtablen && ((strtab = kmalloc(strtablen, GFP_ATOMIC)) == NULL)) {
903 if (newrule->index == -1) {
904 index = alloc_term_index();
909 newrule->index = index;
910 term = &gl_terms[newrule->index];
911 term->op = RBCE_RULE_DEP_RULE;
912 term->u.deprule = newrule;
914 newrule->strtab = strtab;
915 newrule->termflag = 0;
917 // Fill the term vector
920 for (i = 0; i < nterms; i++) {
921 if (terms[i].op == RBCE_RULE_INVALID) {
925 newrule->terms[counter] = alloc_term_index();
926 if (newrule->terms[counter] < 0) {
927 for (j = 0; j < counter; j++) {
928 release_term_index(newrule->terms[j]);
933 term = &gl_terms[newrule->terms[counter]];
934 term->op = terms[i].op;
935 term->operator = terms[i].operator;
936 switch (terms[i].op) {
937 case RBCE_RULE_CMD_PATH:
940 case RBCE_RULE_APP_TAG:
943 term->u.string = &strtab[strtablen];
944 strcpy(term->u.string, terms[i].u.string);
945 strtablen = strlen(term->u.string) + 1;
948 case RBCE_RULE_REAL_UID:
949 case RBCE_RULE_REAL_GID:
950 case RBCE_RULE_EFFECTIVE_UID:
951 case RBCE_RULE_EFFECTIVE_GID:
952 term->u.id = terms[i].u.id;
955 case RBCE_RULE_DEP_RULE:
956 term->u.deprule = terms[i].u.deprule;
957 INC_REF(term->u.deprule);
968 put_class(targetcls);
971 release_term_index(index);
973 kfree(newrule->terms);
980 int change_rule(const char *rname, char *rdefn)
982 struct rbce_rule *rule = NULL, *deprule;
983 struct rbce_rule_term *new_terms = NULL, *term, *terms;
984 int nterms, new_term_mask = 0, oterms, tot_terms;
985 int i, j, k, rc, new_order = 0;
987 if ((nterms = rules_parse(rdefn, &new_terms, &new_term_mask)) <= 0) {
988 return !nterms ? -EINVAL : nterms;
991 write_lock(&global_rwlock);
992 rule = find_rule_name(rname);
994 rule = kmalloc(sizeof(struct rbce_rule), GFP_ATOMIC);
996 rule->obj.name = kmalloc(strlen(rname) + 1, GFP_ATOMIC);
997 if (rule->obj.name) {
998 strcpy(rule->obj.name, rname);
1002 rule->state = RBCE_RULE_ENABLED;
1003 rule->target_class = NULL;
1004 rule->classtype = -1;
1007 INIT_LIST_HEAD(&rule->obj.link);
1008 rc = fill_rule(rule, new_terms, nterms);
1014 rule->order)) == 0) {
1019 __delete_rule(rule);
1030 write_unlock(&global_rwlock);
1034 oterms = rule->num_terms;
1035 tot_terms = nterms + oterms;
1037 terms = kmalloc(tot_terms * sizeof(struct rbce_rule_term), GFP_ATOMIC);
1041 write_unlock(&global_rwlock);
1045 new_term_mask &= ~(1 << RBCE_RULE_DEP_RULE);
1046 //ignore the new deprule terms for the first iteration.
1047 // taken care of later.
1048 for (i = 0; i < oterms; i++) {
1049 term = &gl_terms[rule->terms[i]]; // old term
1051 if ((1 << term->op) & new_term_mask) {
1052 // newrule has this attr/value
1053 for (j = 0; j < nterms; j++) {
1054 if (term->op == new_terms[j].op) {
1055 terms[i].op = new_terms[j].op;
1056 terms[i].operator = new_terms[j].
1059 new_terms[j].u.string;
1060 new_terms[j].op = RBCE_RULE_INVALID2;
1065 terms[i].op = term->op;
1066 terms[i].operator = term->operator;
1067 terms[i].u.string = term->u.string;
1071 i = oterms; // for readability
1073 for (j = 0; j < nterms; j++) {
1074 // handled in the previous iteration
1075 if (new_terms[j].op == RBCE_RULE_INVALID2) {
1079 if (new_terms[j].op == RBCE_RULE_DEP_RULE) {
1080 if (new_terms[j].operator == TOKEN_OP_DEP) {
1081 // "depend=rule" deletes all depends in the
1082 // original rule so, delete all depend rule
1083 // terms in the original rule
1084 for (k = 0; k < oterms; k++) {
1085 if (terms[k].op == RBCE_RULE_DEP_RULE) {
1086 terms[k].op = RBCE_RULE_INVALID;
1089 // must copy the new deprule term
1091 // delete the depend rule term if was defined
1092 // in the original rule for both +depend
1094 deprule = find_rule_name(new_terms[j].u.string);
1096 for (k = 0; k < oterms; k++) {
1099 && terms[k].u.deprule ==
1107 if (new_terms[j].operator == TOKEN_OP_DEP_DEL) {
1108 // No need to copy the new deprule term
1113 if ((new_terms[j].op == RBCE_RULE_INVALID) &&
1114 (new_terms[j].operator == TOKEN_OP_ORDER)) {
1118 terms[i].op = new_terms[j].op;
1119 terms[i].operator = new_terms[j].operator;
1120 terms[i].u.string = new_terms[j].u.string;
1122 new_terms[j].op = RBCE_RULE_INVALID2;
1127 // convert old deprule pointers to name pointers.
1128 for (i = 0; i < oterms; i++) {
1129 if (terms[i].op != RBCE_RULE_DEP_RULE)
1131 terms[i].u.string = terms[i].u.deprule->obj.name;
1135 __release_rule(rule);
1138 rc = fill_rule(rule, terms, tot_terms);
1139 if (rc == 0 && new_order) {
1140 rc = reinsert_rule(rule, rule->order);
1142 if (rc != 0) { // rule creation/insertion failed
1143 __delete_rule(rule);
1148 write_unlock(&global_rwlock);
1155 * Delete the specified rule.
1158 int delete_rule(const char *rname)
1161 struct rbce_rule *rule;
1163 write_lock(&global_rwlock);
1165 if ((rule = find_rule_name(rname)) == NULL) {
1166 write_unlock(&global_rwlock);
1169 rc = __delete_rule(rule);
1170 if (rbce_enabled && (gl_action & POLICY_ACTION_PACK_TERMS)) {
1173 write_unlock(&global_rwlock);
1175 DPRINTK(DBG_RULE, "delete rule %s\n", rname);
1180 * copy the rule specified by rname and to the given result string.
1183 void get_rule(const char *rname, char *result)
1186 struct rbce_rule *rule;
1187 struct rbce_rule_term *term;
1188 char *cp = result, oper, idtype[3], str[5];
1190 read_lock(&global_rwlock);
1192 rule = find_rule_name(rname);
1194 for (i = 0; i < rule->num_terms; i++) {
1195 term = gl_terms + rule->terms[i];
1197 case RBCE_RULE_REAL_UID:
1198 strcpy(idtype, "u");
1200 case RBCE_RULE_REAL_GID:
1201 strcpy(idtype, "g");
1203 case RBCE_RULE_EFFECTIVE_UID:
1204 strcpy(idtype, "eu");
1206 case RBCE_RULE_EFFECTIVE_GID:
1207 strcpy(idtype, "eg");
1209 if (term->operator == RBCE_LESS_THAN) {
1211 } else if (term->operator == RBCE_GREATER_THAN) {
1213 } else if (term->operator == RBCE_NOT) {
1219 sprintf(cp, "%sid%c%ld,", idtype, oper,
1222 case RBCE_RULE_CMD_PATH:
1223 strcpy(str, "path");
1228 case RBCE_RULE_ARGS:
1229 strcpy(str, "args");
1231 case RBCE_RULE_APP_TAG:
1234 case RBCE_RULE_IPV4:
1235 strcpy(str, "ipv4");
1237 case RBCE_RULE_IPV6:
1238 strcpy(str, "ipv6");
1241 sprintf(cp, "%s=%s,", str, term->u.string);
1243 case RBCE_RULE_DEP_RULE:
1245 sprintf(cp, "depend=%s,",
1246 term->u.deprule->obj.name);
1252 if (!rule->num_terms) {
1253 cp += sprintf(cp, "***** no terms defined ***** ");
1257 sprintf(cp, "order=%d,state=%d,", rule->order, rule->state);
1259 sprintf(cp, "class=%s",
1260 rule->target_class ? rule->target_class->obj.
1261 name : "***** REMOVED *****");
1264 sprintf(result, "***** Rule %s doesn't exist *****", rname);
1267 read_unlock(&global_rwlock);
1272 * Change the name of the given rule "from_rname" to "to_rname"
1275 int rename_rule(const char *from_rname, const char *to_rname)
1277 struct rbce_rule *rule;
1278 int nlen, rc = -EINVAL;
1280 if (!to_rname || !*to_rname) {
1283 write_lock(&global_rwlock);
1285 rule = find_rule_name(from_rname);
1287 if ((nlen = strlen(to_rname)) > strlen(rule->obj.name)) {
1288 char *name = kmalloc(nlen + 1, GFP_ATOMIC);
1292 kfree(rule->obj.name);
1293 rule->obj.name = name;
1295 strcpy(rule->obj.name, to_rname);
1298 write_unlock(&global_rwlock);
1303 * Return TRUE if the given rule exists, FALSE otherwise
1306 int rule_exists(const char *rname)
1308 struct rbce_rule *rule;
1310 read_lock(&global_rwlock);
1311 rule = find_rule_name(rname);
1312 read_unlock(&global_rwlock);
1313 return rule != NULL;
1316 /*====================== Magic file handling =======================*/
1320 static struct rbce_private_data *create_private_data(struct rbce_private_data *,
1323 int rbce_ckrm_reclassify(int pid)
1325 printk("ckrm_reclassify_pid ignored\n");
1329 int reclassify_pid(int pid)
1331 struct task_struct *tsk;
1333 // FIXME: Need to treat -pid as process group
1339 rbce_ckrm_reclassify(0); // just reclassify all tasks.
1341 // if pid is +ve take control of the task, start evaluating it
1342 if ((tsk = find_task_by_pid(pid)) == NULL) {
1346 if (unlikely(!RBCE_DATA(tsk))) {
1347 RBCE_DATAP(tsk) = create_private_data(NULL, 0);
1348 if (!RBCE_DATA(tsk)) {
1352 RBCE_DATA(tsk)->evaluate = 1;
1353 rbce_ckrm_reclassify(pid);
1357 int set_tasktag(int pid, char *tag)
1360 struct task_struct *tsk;
1361 struct rbce_private_data *pdata;
1367 if ((tsk = find_task_by_pid(pid)) == NULL) {
1371 tp = kmalloc(strlen(tag) + 1, GFP_ATOMIC);
1377 if (unlikely(!RBCE_DATA(tsk))) {
1378 RBCE_DATAP(tsk) = create_private_data(NULL, 0);
1379 if (!RBCE_DATA(tsk)) {
1384 pdata = RBCE_DATA(tsk);
1385 if (pdata->app_tag) {
1386 kfree(pdata->app_tag);
1388 pdata->app_tag = tp;
1389 strcpy(pdata->app_tag, tag);
1390 rbce_ckrm_reclassify(pid);
1395 /*====================== Classification Functions =======================*/
1398 * Match the given full path name with the command expression.
1399 * This function treats the folowing 2 charaters as special if seen in
1400 * cmd_exp, all other chanracters are compared as is:
1401 * ? - compares to any one single character
1402 * * - compares to one or more single characters
1404 * If fullpath is 1, tsk_comm is compared in full. otherwise only the command
1405 * name (basename(tsk_comm)) is compared.
1407 static int match_cmd(const char *tsk_comm, const char *cmd_exp, int fullpath)
1409 const char *c, *t, *last_ast, *cmd = tsk_comm;
1412 // get the command name if we don't have to match the fullpath
1413 if (!fullpath && ((c = strrchr(tsk_comm, '/')) != NULL)) {
1417 /* now faithfully assume the entire pathname is in cmd */
1419 /* we now have to effectively implement a regular expression
1421 * '?' any single character
1422 * '*' one or more '?'
1428 if (t == NULL || c == NULL) {
1448 // eat up all '*' in c
1449 while (*(c + 1) == '*')
1453 //t++; // Add this for matching '*' with "one"
1455 while (*t && (*t != *(c + 1)) && *t != '/')
1457 if (*t == *(c + 1)) {
1485 /*FALLTHRU*/ default:
1486 if (*t == *c && next_c != *t) {
1490 /* reset to last asterix and
1491 continue from there */
1501 /* check for trailing "*" */
1505 return (!*c && !*t);
1508 static void reverse(char *str, int n)
1513 for (i = 0; i < j; i++, j--) {
1520 static int itoa(int n, char *str)
1525 str[i++] = n % 10 + '0';
1530 (void)reverse(str, sz);
1534 static int v4toa(__u32 y, char *a)
1539 for (i = 0; i < 4; i++) {
1540 size += itoa(y & 0xff, &a[size]);
1547 int match_ipv4(struct ckrm_net_struct *ns, char **string)
1549 char *ptr = *string;
1553 size = v4toa(ns->ns_daddrv4, a4);
1556 return !strncmp(a4, ptr, size);
1559 int match_port(struct ckrm_net_struct *ns, char *ptr)
1562 int size = itoa(ns->ns_dport, a);
1564 return !strncmp(a, ptr, size);
1567 static int __evaluate_rule(struct task_struct *tsk, struct ckrm_net_struct *ns,
1568 struct rbce_rule *rule, bitvector_t * vec_eval,
1569 bitvector_t * vec_true, char **filename);
1571 * evaluate the given task against the given rule with the vec_eval and
1572 * vec_true in context. Return 1 if the task satisfies the given rule, 0
1575 * If the bit corresponding to the rule is set in the vec_eval, then the
1576 * corresponding bit in vec_true is the result. If it is not set, evaluate
1577 * the rule and set the bits in both the vectors accordingly.
1579 * On return, filename will have the pointer to the pathname of the task's
1580 * executable, if the rule had any command related terms.
1582 * Caller must hold the global_rwlock atleast in read mode.
1585 evaluate_rule(struct task_struct *tsk, struct ckrm_net_struct *ns,
1586 struct rbce_rule *rule, bitvector_t * vec_eval,
1587 bitvector_t * vec_true, char **filename)
1589 int tidx = rule->index;
1591 if (!bitvector_test(tidx, vec_eval)) {
1593 (tsk, ns, rule, vec_eval, vec_true, filename)) {
1594 bitvector_set(tidx, vec_true);
1596 bitvector_set(tidx, vec_eval);
1598 return bitvector_test(tidx, vec_true);
1602 * evaluate the given task against every term in the given rule with
1603 * vec_eval and vec_true in context.
1605 * If the bit corresponding to a rule term is set in the vec_eval, then the
1606 * corresponding bit in vec_true is the result for taht particular. If it is
1607 * not set, evaluate the rule term and set the bits in both the vectors
1610 * This fucntions returns true only if all terms in the rule evaluate true.
1612 * On return, filename will have the pointer to the pathname of the task's
1613 * executable, if the rule had any command related terms.
1615 * Caller must hold the global_rwlock atleast in read mode.
1618 __evaluate_rule(struct task_struct *tsk, struct ckrm_net_struct *ns,
1619 struct rbce_rule *rule, bitvector_t * vec_eval,
1620 bitvector_t * vec_true, char **filename)
1625 for (i = rule->num_terms; --i >= 0;) {
1626 int rc = 1, tidx = rule->terms[i];
1628 if (!bitvector_test(tidx, vec_eval)) {
1629 struct rbce_rule_term *term = &gl_terms[tidx];
1633 case RBCE_RULE_CMD_PATH:
1636 if (!*filename) { /* get this once */
1639 GFP_ATOMIC)) == NULL)
1642 (tsk, *filename, NAME_MAX) < 0)) {
1647 rc = match_cmd(*filename, term->u.string,
1649 RBCE_RULE_CMD_PATH));
1651 rc = match_cmd(tsk->comm, term->u.string,
1653 RBCE_RULE_CMD_PATH));
1656 case RBCE_RULE_REAL_UID:
1657 if (term->operator == RBCE_LESS_THAN) {
1658 rc = (tsk->uid < term->u.id);
1659 } else if (term->operator == RBCE_GREATER_THAN){
1660 rc = (tsk->uid > term->u.id);
1661 } else if (term->operator == RBCE_NOT) {
1662 rc = (tsk->uid != term->u.id);
1664 rc = (tsk->uid == term->u.id);
1667 case RBCE_RULE_REAL_GID:
1668 if (term->operator == RBCE_LESS_THAN) {
1669 rc = (tsk->gid < term->u.id);
1670 } else if (term->operator == RBCE_GREATER_THAN){
1671 rc = (tsk->gid > term->u.id);
1672 } else if (term->operator == RBCE_NOT) {
1673 rc = (tsk->gid != term->u.id);
1675 rc = (tsk->gid == term->u.id);
1678 case RBCE_RULE_EFFECTIVE_UID:
1679 if (term->operator == RBCE_LESS_THAN) {
1680 rc = (tsk->euid < term->u.id);
1681 } else if (term->operator == RBCE_GREATER_THAN){
1682 rc = (tsk->euid > term->u.id);
1683 } else if (term->operator == RBCE_NOT) {
1684 rc = (tsk->euid != term->u.id);
1686 rc = (tsk->euid == term->u.id);
1689 case RBCE_RULE_EFFECTIVE_GID:
1690 if (term->operator == RBCE_LESS_THAN) {
1691 rc = (tsk->egid < term->u.id);
1692 } else if (term->operator == RBCE_GREATER_THAN){
1693 rc = (tsk->egid > term->u.id);
1694 } else if (term->operator == RBCE_NOT) {
1695 rc = (tsk->egid != term->u.id);
1697 rc = (tsk->egid == term->u.id);
1700 case RBCE_RULE_APP_TAG:
1701 rc = (RBCE_DATA(tsk)
1703 app_tag) ? !strcmp(RBCE_DATA(tsk)->
1705 term->u.string) : 0;
1707 case RBCE_RULE_DEP_RULE:
1708 rc = evaluate_rule(tsk, NULL, term->u.deprule,
1713 case RBCE_RULE_IPV4:
1714 // TBD: add NOT_EQUAL match. At present rbce
1715 // recognises EQUAL matches only.
1716 if (ns && term->operator == RBCE_EQUAL) {
1719 char *ptr = term->u.string;
1721 if (term->u.string[0] == '*')
1724 ma = match_ipv4(ns, &ptr);
1726 if (*ptr != '\\') { // error
1743 case RBCE_RULE_IPV6: // no support yet
1750 printk(KERN_ERR "Error evaluate term op=%d\n",
1755 bitvector_clear(tidx, vec_true);
1757 bitvector_set(tidx, vec_true);
1759 bitvector_set(tidx, vec_eval);
1761 rc = bitvector_test(tidx, vec_true);
1770 //#define PDATA_DEBUG
1773 #define MAX_PDATA 10000
1774 void *pdata_arr[MAX_PDATA];
1775 int pdata_count, pdata_next;
1776 static spinlock_t pdata_lock = SPIN_LOCK_UNLOCKED;
1778 static inline int valid_pdata(struct rbce_private_data *pdata)
1785 spin_lock(&pdata_lock);
1786 for (i = 0; i < MAX_PDATA; i++) {
1787 if (pdata_arr[i] == pdata) {
1788 spin_unlock(&pdata_lock);
1792 spin_unlock(&pdata_lock);
1793 printk("INVALID/CORRUPT PDATA %p\n", pdata);
1797 static inline void store_pdata(struct rbce_private_data *pdata)
1802 spin_lock(&pdata_lock);
1804 while (i < MAX_PDATA) {
1805 if (pdata_arr[pdata_next] == NULL) {
1806 printk("storing %p at %d, count %d\n", pdata,
1807 pdata_next, pdata_count);
1808 pdata_arr[pdata_next++] = pdata;
1809 if (pdata_next == MAX_PDATA) {
1818 spin_unlock(&pdata_lock);
1820 if (i == MAX_PDATA) {
1821 printk("PDATA BUFFER FULL pdata_count %d pdata %p\n",
1822 pdata_count, pdata);
1826 static inline void unstore_pdata(struct rbce_private_data *pdata)
1830 spin_lock(&pdata_lock);
1831 for (i = 0; i < MAX_PDATA; i++) {
1832 if (pdata_arr[i] == pdata) {
1833 printk("unstoring %p at %d, count %d\n", pdata,
1835 pdata_arr[i] = NULL;
1841 spin_unlock(&pdata_lock);
1842 if (i == MAX_PDATA) {
1843 printk("pdata %p not found in the stored array\n",
1850 #else // PDATA_DEBUG
1852 #define valid_pdata(pdata) (1)
1853 #define store_pdata(pdata)
1854 #define unstore_pdata(pdata)
1856 #endif // PDATA_DEBUG
1858 const int use_persistent_state = 1;
1861 * Allocate and initialize a rbce_private_data data structure.
1863 * Caller must hold global_rwlock atleast in read mode.
1867 copy_ext_private_data(struct rbce_private_data *src,
1868 struct rbce_private_data *dst)
1871 dst->ext_data = src->ext_data;
1873 memset(&dst->ext_data, 0, sizeof(dst->ext_data));
1876 static struct rbce_private_data *create_private_data(struct rbce_private_data
1877 *src, int copy_sample)
1879 int vsize, psize, bsize;
1880 struct rbce_private_data *pdata;
1882 if (use_persistent_state) {
1883 vsize = gl_allocated;
1884 bsize = vsize / 8 + sizeof(bitvector_t);
1885 psize = sizeof(struct rbce_private_data) + 2 * bsize;
1887 psize = sizeof(struct rbce_private_data);
1890 pdata = kmalloc(psize, GFP_ATOMIC);
1891 if (pdata != NULL) {
1892 if (use_persistent_state) {
1893 pdata->bitmap_version = gl_bitmap_version;
1894 pdata->eval = (bitvector_t *) & pdata->data[0];
1895 pdata->true = (bitvector_t *) & pdata->data[bsize];
1896 if (src && (src->bitmap_version == gl_bitmap_version)) {
1897 memcpy(pdata->data, src->data, 2 * bsize);
1899 bitvector_init(pdata->eval, vsize);
1900 bitvector_init(pdata->true, vsize);
1903 copy_ext_private_data(src, pdata);
1904 //if (src) { // inherit evaluate and app_tag
1905 // pdata->evaluate = src->evaluate;
1906 // if(src->app_tag) {
1907 // int len = strlen(src->app_tag)+1;
1908 // printk("CREATE_PRIVATE: apptag %s len %d\n",
1909 // src->app_tag,len);
1910 // pdata->app_tag = kmalloc(len, GFP_ATOMIC);
1911 // if (pdata->app_tag) {
1912 // strcpy(pdata->app_tag, src->app_tag);
1916 pdata->evaluate = 1;
1917 pdata->rules_version = src ? src->rules_version : 0;
1918 pdata->app_tag = NULL;
1925 static inline void free_private_data(struct rbce_private_data *pdata)
1927 if (valid_pdata(pdata)) {
1928 unstore_pdata(pdata);
1933 static void free_all_private_data(void)
1935 struct task_struct *proc, *thread;
1937 read_lock(&tasklist_lock);
1938 do_each_thread(proc, thread) {
1939 struct rbce_private_data *pdata;
1941 pdata = RBCE_DATA(thread);
1942 RBCE_DATAP(thread) = NULL;
1943 free_private_data(pdata);
1944 } while_each_thread(proc, thread);
1945 read_unlock(&tasklist_lock);
1950 * reclassify function, which is called by all the callback functions.
1952 * Takes that task to be reclassified and ruleflags that indicates the
1953 * attributes that caused this reclassification request.
1955 * On success, returns the core class pointer to which the given task should
1958 static struct ckrm_core_class *rbce_classify(struct task_struct *tsk,
1959 struct ckrm_net_struct *ns,
1960 unsigned long termflag,
1964 struct rbce_rule *rule;
1965 bitvector_t *vec_true = NULL, *vec_eval = NULL;
1966 struct rbce_class *tgt = NULL;
1967 struct ckrm_core_class *cls = NULL;
1968 char *filename = NULL;
1970 if (!valid_pdata(RBCE_DATA(tsk))) {
1973 if (classtype >= CKRM_MAX_CLASSTYPES) {
1974 // can't handle more than CKRM_MAX_CLASSTYPES
1977 // fast path to avoid locking in case CE is not enabled or if no rules
1978 // are defined or if the tasks states that no evaluation is needed.
1979 if (!rbce_enabled || !gl_num_rules ||
1980 (RBCE_DATA(tsk) && !RBCE_DATA(tsk)->evaluate)) {
1983 // FIXME: optimize_policy should be called from here if
1984 // gl_action is non-zero. Also, it has to be called with the
1985 // global_rwlock held in write mode.
1987 read_lock(&global_rwlock);
1989 vec_eval = vec_true = NULL;
1990 if (use_persistent_state) {
1991 struct rbce_private_data *pdata = RBCE_DATA(tsk);
1995 && (gl_bitmap_version != pdata->bitmap_version))) {
1996 struct rbce_private_data *new_pdata =
1997 create_private_data(pdata, 1);
2001 new_pdata->rules_version =
2002 pdata->rules_version;
2003 new_pdata->evaluate = pdata->evaluate;
2004 new_pdata->app_tag = pdata->app_tag;
2005 free_private_data(pdata);
2007 pdata = RBCE_DATAP(tsk) = new_pdata;
2008 termflag = RBCE_TERMFLAG_ALL;
2009 // need to evaluate them all
2011 // we shouldn't free the pdata as it has more
2012 // details than the vectors. But, this
2013 // reclassification should go thru
2018 goto cls_determined;
2020 vec_eval = pdata->eval;
2021 vec_true = pdata->true;
2023 int bsize = gl_allocated;
2025 vec_eval = bitvector_alloc(bsize);
2026 vec_true = bitvector_alloc(bsize);
2028 if (vec_eval == NULL || vec_true == NULL) {
2029 goto cls_determined;
2031 termflag = RBCE_TERMFLAG_ALL;
2032 // need to evaluate all of them now
2036 * using bit ops invalidate all terms related to this termflag
2037 * context (only in per task vec)
2039 DPRINTK(DBG_CLASSIFY_DETAILS, "\nClassify: termflag=%lx\n", termflag);
2040 DPRINTK(DBG_CLASSIFY_DETAILS, " eval before: ");
2041 bitvector_print(DBG_CLASSIFY_DETAILS, vec_eval);
2042 DPRINTK(DBG_CLASSIFY_DETAILS, "\n true before: ");
2043 bitvector_print(DBG_CLASSIFY_DETAILS, vec_true);
2044 DPRINTK(DBG_CLASSIFY_DETAILS, "\n redo => ");
2046 if (termflag == RBCE_TERMFLAG_ALL) {
2047 DPRINTK(DBG_CLASSIFY_DETAILS, " redoall ");
2048 bitvector_zero(vec_eval);
2050 for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
2051 if (test_bit(i, &termflag)) {
2052 bitvector_t *maskvec = gl_mask_vecs[i];
2054 DPRINTK(DBG_CLASSIFY_DETAILS, " mask(%d) ", i);
2055 bitvector_print(DBG_CLASSIFY_DETAILS, maskvec);
2056 bitvector_and_not(vec_eval, vec_eval, maskvec);
2060 bitvector_and(vec_true, vec_true, vec_eval);
2062 DPRINTK(DBG_CLASSIFY_DETAILS, "\n eval now: ");
2063 bitvector_print(DBG_CLASSIFY_DETAILS, vec_eval);
2064 DPRINTK(DBG_CLASSIFY_DETAILS, "\n");
2066 /* run through the rules in order and see what needs evaluation */
2067 list_for_each_entry(rule, &rules_list[classtype], obj.link) {
2068 if (rule->state == RBCE_RULE_ENABLED &&
2069 rule->target_class &&
2070 rule->target_class->classobj &&
2071 evaluate_rule(tsk, ns, rule, vec_eval, vec_true,
2073 tgt = rule->target_class;
2074 cls = rule->target_class->classobj;
2080 DPRINTK(DBG_CLASSIFY_RES,
2081 "==> |%s|; pid %d; euid %d; egid %d; ruid %d; rgid %d;"
2082 "tag |%s| ===> class |%s|\n",
2083 filename ? filename : tsk->comm,
2089 RBCE_DATA(tsk) ? RBCE_DATA(tsk)->app_tag : "",
2090 tgt ? tgt->obj.name : "");
2091 DPRINTK(DBG_CLASSIFY_DETAILS, " eval after: ");
2092 bitvector_print(DBG_CLASSIFY_DETAILS, vec_eval);
2093 DPRINTK(DBG_CLASSIFY_DETAILS, "\n true after: ");
2094 bitvector_print(DBG_CLASSIFY_DETAILS, vec_true);
2095 DPRINTK(DBG_CLASSIFY_DETAILS, "\n");
2097 if (!use_persistent_state) {
2099 bitvector_free(vec_eval);
2102 bitvector_free(vec_true);
2105 ckrm_core_grab(cls);
2106 read_unlock(&global_rwlock);
2110 if (RBCE_DATA(tsk)) {
2111 RBCE_DATA(tsk)->rules_version = gl_rules_version;
2116 /*****************************************************************************
2118 * Module specific utilization of core RBCE functionality
2120 * Includes support for the various classtypes
2121 * New classtypes will require extensions here
2123 *****************************************************************************/
2125 /* helper functions that are required in the extended version */
2127 static inline void rbce_tc_manual(struct task_struct *tsk)
2129 read_lock(&global_rwlock);
2131 if (!RBCE_DATA(tsk)) {
2133 (void *)create_private_data(RBCE_DATA(tsk->parent), 0);
2135 if (RBCE_DATA(tsk)) {
2136 RBCE_DATA(tsk)->evaluate = 0;
2138 read_unlock(&global_rwlock);
2142 /*****************************************************************************
2143 * load any extensions
2144 *****************************************************************************/
2146 #ifdef RBCE_EXTENSION
2147 #include "rbcemod_ext.c"
2150 /*****************************************************************************
2151 * VARIOUS CLASSTYPES
2152 *****************************************************************************/
2154 // to enable type coercion of the function pointers
2156 /*============================================================================
2157 * TASKCLASS CLASSTYPE
2158 *============================================================================*/
2160 int tc_classtype = -1;
2163 * fork callback to be registered with core module.
2165 inline static void *rbce_tc_forkcb(struct task_struct *tsk)
2167 int rule_version_changed = 1;
2168 struct ckrm_core_class *cls;
2169 read_lock(&global_rwlock);
2172 (void *)create_private_data(RBCE_DATA(tsk->parent), 0);
2173 read_unlock(&global_rwlock);
2175 if (RBCE_DATA(tsk->parent)) {
2176 rule_version_changed =
2177 (RBCE_DATA(tsk->parent)->rules_version != gl_rules_version);
2179 cls = rule_version_changed ?
2180 rbce_classify(tsk, NULL, RBCE_TERMFLAG_ALL, tc_classtype) : NULL;
2182 // note the fork notification to any user client will be sent through
2183 // the guaranteed fork-reclassification
2188 * exit callback to be registered with core module.
2190 static void rbce_tc_exitcb(struct task_struct *tsk)
2192 struct rbce_private_data *pdata;
2194 send_exit_notification(tsk);
2196 pdata = RBCE_DATA(tsk);
2197 RBCE_DATAP(tsk) = NULL;
2199 if (pdata->app_tag) {
2200 kfree(pdata->app_tag);
2202 free_private_data(pdata);
2207 #define AENT(x) [ CKRM_EVENT_##x] = #x
2208 static const char *event_names[CKRM_NUM_EVENTS] = {
2225 void *rbce_tc_classify(enum ckrm_event event, ...)
2229 struct task_struct *tsk;
2231 va_start(args, event);
2232 tsk = va_arg(args, struct task_struct *);
2235 /* we only have to deal with events between
2236 * [ CKRM_LATCHABLE_EVENTS .. CKRM_NONLATCHABLE_EVENTS )
2239 // printk("tc_classify %p:%d:%s '%s'\n",tsk,tsk->pid,
2240 // tsk->comm,event_names[event]);
2244 case CKRM_EVENT_FORK:
2245 cls = rbce_tc_forkcb(tsk);
2248 case CKRM_EVENT_EXIT:
2249 rbce_tc_exitcb(tsk);
2252 case CKRM_EVENT_EXEC:
2253 cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_CMD |
2254 RBCE_TERMFLAG_UID | RBCE_TERMFLAG_GID,
2258 case CKRM_EVENT_UID:
2259 cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_UID, tc_classtype);
2262 case CKRM_EVENT_GID:
2263 cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_GID, tc_classtype);
2266 case CKRM_EVENT_LOGIN:
2267 case CKRM_EVENT_USERADD:
2268 case CKRM_EVENT_USERDEL:
2269 case CKRM_EVENT_LISTEN_START:
2270 case CKRM_EVENT_LISTEN_STOP:
2271 case CKRM_EVENT_APPTAG:
2272 /* no interest in this events .. */
2279 case CKRM_EVENT_RECLASSIFY:
2280 cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_ALL, tc_classtype);
2284 // printk("tc_classify %p:%d:%s '%s' ==> %p\n",tsk,tsk->pid,
2285 // tsk->comm,event_names[event],cls);
2290 #ifndef RBCE_EXTENSION
2291 static void rbce_tc_notify(int event, void *core, struct task_struct *tsk)
2293 printk("tc_manual %p:%d:%s '%s'\n", tsk, tsk->pid, tsk->comm,
2294 event_names[event]);
2295 if (event != CKRM_EVENT_MANUAL)
2297 rbce_tc_manual(tsk);
2301 static struct ckrm_eng_callback rbce_taskclass_ecbs = {
2302 .c_interest = (unsigned long)(-1), // set whole bitmap
2303 .classify = (ce_classify_fct_t) rbce_tc_classify,
2304 .class_delete = rbce_class_deletecb,
2305 #ifndef RBCE_EXTENSION
2306 .n_interest = (1 << CKRM_EVENT_MANUAL),
2307 .notify = (ce_notify_fct_t) rbce_tc_notify,
2308 .always_callback = 0,
2310 .n_interest = (unsigned long)(-1), // set whole bitmap
2311 .notify = (ce_notify_fct_t) rbce_tc_ext_notify,
2312 .class_add = rbce_class_addcb,
2313 .always_callback = 1,
2317 /*============================================================================
2319 *============================================================================*/
2321 int sc_classtype = -1;
2323 void *rbce_sc_classify(enum ckrm_event event, ...)
2325 // no special consideratation
2328 struct task_struct *tsk;
2329 struct ckrm_net_struct *ns;
2331 va_start(args, event);
2332 ns = va_arg(args, struct ckrm_net_struct *);
2333 tsk = va_arg(args, struct task_struct *);
2336 result = rbce_classify(tsk, ns, RBCE_TERMFLAG_ALL, sc_classtype);
2338 DPRINTK(DBG_CLASSIFY_RES,
2339 "==> %d.%d.%d.%d\\%d , %p:%d:%s '%s' => %p\n",
2340 NIPQUAD(ns->ns_daddrv4), ns->ns_dport,
2341 tsk, tsk ? tsk->pid : 0, tsk ? tsk->comm : "-",
2342 event_names[event], result);
2346 static struct ckrm_eng_callback rbce_acceptQclass_ecbs = {
2347 .c_interest = (unsigned long)(-1),
2348 .always_callback = 0, // enable during debugging only
2349 .classify = (ce_classify_fct_t) & rbce_sc_classify,
2350 .class_delete = rbce_class_deletecb,
2353 /*============================================================================
2354 * Module Initialization ...
2355 *============================================================================*/
2357 #define TASKCLASS_NAME "taskclass"
2358 #define SOCKCLASS_NAME "socket_class"
2360 struct ce_regtable_struct {
2362 struct ckrm_eng_callback *cbs;
2366 struct ce_regtable_struct ce_regtable[] = {
2367 {TASKCLASS_NAME, &rbce_taskclass_ecbs, &tc_classtype},
2368 {SOCKCLASS_NAME, &rbce_acceptQclass_ecbs, &sc_classtype},
2372 static int register_classtype_engines(void)
2375 struct ce_regtable_struct *ceptr = ce_regtable;
2377 while (ceptr->name) {
2378 rc = ckrm_register_engine(ceptr->name, ceptr->cbs);
2379 printk("ce register with <%s> typeId=%d\n", ceptr->name, rc);
2380 if ((rc < 0) && (rc != -ENOENT))
2383 *ceptr->clsvar = rc;
2389 static void unregister_classtype_engines(void)
2392 struct ce_regtable_struct *ceptr = ce_regtable;
2394 while (ceptr->name) {
2395 if (*ceptr->clsvar >= 0) {
2396 printk("ce unregister with <%s>\n", ceptr->name);
2397 rc = ckrm_unregister_engine(ceptr->name);
2398 printk("ce unregister with <%s> rc=%d\n", ceptr->name,
2400 *ceptr->clsvar = -1;
2406 // =========== /proc/sysctl/debug/rbce debug stuff =============
2409 static struct ctl_table_header *rbce_sysctl_table_header;
2411 #define CTL_RBCE_DEBUG (201) // picked some number.. dont know algo to pick
2412 static struct ctl_table rbce_entry_table[] = {
2414 .ctl_name = CTL_RBCE_DEBUG,
2417 .maxlen = sizeof(int),
2419 .proc_handler = &proc_dointvec,
2424 static struct ctl_table rbce_root_table[] = {
2426 .ctl_name = CTL_DEBUG,
2427 .procname = "debug",
2431 .child = rbce_entry_table},
2435 static inline void start_debug(void)
2437 rbce_sysctl_table_header = register_sysctl_table(rbce_root_table, 1);
2439 static inline void stop_debug(void)
2441 if (rbce_sysctl_table_header)
2442 unregister_sysctl_table(rbce_sysctl_table_header);
2447 static inline void start_debug(void)
2450 static inline void stop_debug(void)
2456 extern int rbce_mkdir(struct inode *, struct dentry *, int);
2457 extern int rbce_rmdir(struct inode *, struct dentry *);
2458 extern int rbce_create_magic(void);
2459 extern int rbce_clear_magic(void);
2461 rbce_eng_callback_t rcfs_ecbs = {
2468 /* ======================= Module definition Functions ====================== */
2474 printk("<1>\nInstalling \'%s\' module\n", modname);
2476 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
2477 INIT_LIST_HEAD(&rules_list[i]);
2480 rc = init_rbce_ext_pre();
2485 rc = register_classtype_engines();
2488 goto out_unreg_ckrm; // need to remove anyone opened
2490 /* register any other class type engine here */
2492 rc = rcfs_register_engine(&rcfs_ecbs);
2495 goto out_unreg_ckrm;
2498 rc = rbce_create_magic();
2501 goto out_unreg_rcfs;
2506 rc = init_rbce_ext_post();
2511 return 0; // SUCCESS
2517 rcfs_unregister_engine(&rcfs_ecbs);
2519 unregister_classtype_engines();
2523 printk("<1>%s: error installing rc=%d line=%d\n", __FUNCTION__, rc,
2528 void exit_rbce(void)
2532 printk("<1>Removing \'%s\' module\n", modname);
2537 // Print warnings if lists are not empty, which is a bug
2538 if (!list_empty(&class_list)) {
2539 printk("exit_rbce: Class list is not empty\n");
2542 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
2543 if (!list_empty(&rules_list[i])) {
2544 printk("exit_rbce: Rules list for classtype %d"
2545 " is not empty\n", i);
2552 rcfs_unregister_engine(&rcfs_ecbs);
2553 unregister_classtype_engines();
2554 free_all_private_data();
2557 EXPORT_SYMBOL(get_rule);
2558 EXPORT_SYMBOL(rule_exists);
2559 EXPORT_SYMBOL(change_rule);
2560 EXPORT_SYMBOL(delete_rule);
2561 EXPORT_SYMBOL(rename_rule);
2562 EXPORT_SYMBOL(reclassify_pid);
2563 EXPORT_SYMBOL(set_tasktag);
2565 module_init(init_rbce);
2566 module_exit(exit_rbce);