1 /* Rule-based Classification Engine (RBCE) and
2 * Consolidated RBCE module code (combined)
4 * Copyright (C) Hubertus Franke, IBM Corp. 2003
5 * (C) Chandra Seetharaman, IBM Corp. 2003
6 * (C) Vivek Kashyap, IBM Corp. 2004
8 * Module for loading of classification policies and providing
9 * a user API for Class-based Kernel Resource Management (CKRM)
11 * Latest version, more details at http://ckrm.sf.net
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it would be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
27 * Created. First cut with much scope for cleanup !
29 * Made modifications to suit the new RBCE module.
30 * Made modifications to address sampling and delivery
32 * Integrated changes from original RBCE module
34 * Merged RBCE and CRBCE into common code base
36 * Incorporated listen call back and IPv4 match support
38 * Added Multi-Classtype Support
41 #include <linux/init.h>
42 #include <linux/module.h>
43 #include <linux/kernel.h>
45 #include <asm/uaccess.h>
47 #include <linux/mount.h>
48 #include <linux/proc_fs.h>
49 #include <linux/limits.h>
50 #include <linux/pid.h>
51 #include <linux/sysctl.h>
53 #include <linux/ckrm_rc.h>
54 #include <linux/ckrm_ce.h>
55 #include <linux/ckrm_net.h>
56 #include "bitvector.h"
57 #include <linux/rbce.h>
59 #warning MEF I cannot believe that vserver changes force the following include statement: FIX THIS!
60 #include <linux/vs_cvirt.h>
65 MODULE_DESCRIPTION(RBCE_MOD_DESCR);
66 MODULE_AUTHOR("Hubertus Franke, Chandra Seetharaman (IBM)");
67 MODULE_LICENSE("GPL");
69 static char modname[] = RBCE_MOD_NAME;
71 /* ==================== typedef, global variables etc., ==================== */
72 struct named_obj_hdr {
73 struct list_head link;
78 #define GET_REF(x) ((x)->obj.referenced)
79 #define INC_REF(x) (GET_REF(x)++)
80 #define DEC_REF(x) (--GET_REF(x))
82 struct named_obj_hdr obj;
88 RBCE_RULE_CMD_PATH = 1, // full qualified path
89 RBCE_RULE_CMD, // basename of the command
90 RBCE_RULE_ARGS, // arguments of the command
91 RBCE_RULE_REAL_UID, // task's real uid
92 RBCE_RULE_REAL_GID, // task's real gid
93 RBCE_RULE_EFFECTIVE_UID, // task's effective uid
94 RBCE_RULE_EFFECTIVE_GID, // task's effective gid
95 RBCE_RULE_APP_TAG, // task's application tag
96 RBCE_RULE_IPV4, // IP address of listen(), ipv4 format
97 RBCE_RULE_IPV6, // IP address of listen(), ipv6 format
98 RBCE_RULE_XID, // VSERVER
99 RBCE_RULE_DEP_RULE, // dependent rule; must be the first term
100 RBCE_RULE_INVALID, // invalid, for filler
101 RBCE_RULE_INVALID2, // invalid, for filler
111 struct rbce_rule_term {
113 rbce_operator_t operator;
115 char *string; // path, cmd, arg, tag, ipv4 and ipv6
116 long id; // uid, gid, euid, egid
117 struct rbce_rule *deprule;
122 struct named_obj_hdr obj;
123 struct rbce_class *target_class;
126 int *terms; // vector of indices into the global term vector
127 int index; // index of this rule into the global term vector
128 int termflag; // which term ids would require a recalculation
129 int do_opt; // do we have to consider this rule during optimize
130 char *strtab; // string table to store the strings of all terms
131 int order; // order of execution of this rule
132 int state; // RBCE_RULE_ENABLED/RBCE_RULE_DISABLED
136 #define RBCE_RULE_DISABLED 0
137 #define RBCE_RULE_ENABLED 1
140 // Data structures and macros used for optimization
141 #define RBCE_TERM_CMD (0)
142 #define RBCE_TERM_UID (1)
143 #define RBCE_TERM_GID (2)
144 #define RBCE_TERM_TAG (3)
145 #define RBCE_TERM_IPV4 (4)
146 #define RBCE_TERM_IPV6 (5)
147 #define RBCE_TERM_XID (6)
149 #define NUM_TERM_MASK_VECTOR (7) // must be one more the last RBCE_TERM_...
151 // Rule flags. 1 bit for each type of rule term
152 #define RBCE_TERMFLAG_CMD (1 << RBCE_TERM_CMD)
153 #define RBCE_TERMFLAG_UID (1 << RBCE_TERM_UID)
154 #define RBCE_TERMFLAG_GID (1 << RBCE_TERM_GID)
155 #define RBCE_TERMFLAG_TAG (1 << RBCE_TERM_TAG)
156 #define RBCE_TERMFLAG_IPV4 (1 << RBCE_TERM_IPV4)
157 #define RBCE_TERMFLAG_IPV6 (1 << RBCE_TERM_IPV6)
158 #define RBCE_TERMFLAG_XID (1 << RBCE_TERM_XID)
159 #define RBCE_TERMFLAG_ALL (RBCE_TERMFLAG_CMD | RBCE_TERMFLAG_UID | \
160 RBCE_TERMFLAG_GID | RBCE_TERMFLAG_TAG | RBCE_TERMFLAG_XID | \
161 RBCE_TERMFLAG_IPV4 | RBCE_TERMFLAG_IPV6)
163 int termop_2_vecidx[RBCE_RULE_INVALID] = {
164 [RBCE_RULE_CMD_PATH] = RBCE_TERM_CMD,
165 [RBCE_RULE_CMD] = RBCE_TERM_CMD,
166 [RBCE_RULE_ARGS] = RBCE_TERM_CMD,
167 [RBCE_RULE_REAL_UID] = RBCE_TERM_UID,
168 [RBCE_RULE_REAL_GID] = RBCE_TERM_GID,
169 [RBCE_RULE_EFFECTIVE_UID] = RBCE_TERM_UID,
170 [RBCE_RULE_EFFECTIVE_GID] = RBCE_TERM_GID,
171 [RBCE_RULE_XID] = RBCE_TERM_XID,
172 [RBCE_RULE_APP_TAG] = RBCE_TERM_TAG,
173 [RBCE_RULE_IPV4] = RBCE_TERM_IPV4,
174 [RBCE_RULE_IPV6] = RBCE_TERM_IPV6,
175 [RBCE_RULE_DEP_RULE] = -1
178 #define TERMOP_2_TERMFLAG(x) (1 << termop_2_vecidx[x])
179 #define TERM_2_TERMFLAG(x) (1 << x)
181 #define POLICY_INC_NUMTERMS (BITS_PER_LONG) // No. of terms added at a time
182 #define POLICY_ACTION_NEW_VERSION 0x01 // Force reallocation
183 #define POLICY_ACTION_REDO_ALL 0x02 // Recompute all rule flags
184 #define POLICY_ACTION_PACK_TERMS 0x04 // Time to pack the terms
186 const int use_persistent_state = 1;
188 struct ckrm_eng_callback ckrm_ecbs;
192 static int gl_bitmap_version, gl_action, gl_num_terms;
193 static int gl_allocated, gl_released;
194 struct rbce_rule_term *gl_terms;
195 bitvector_t *gl_mask_vecs[NUM_TERM_MASK_VECTOR];
198 static void optimize_policy(void);
200 #ifndef CKRM_MAX_CLASSTYPES
201 #define CKRM_MAX_CLASSTYPES 32
204 struct list_head rules_list[CKRM_MAX_CLASSTYPES];
205 LIST_HEAD(class_list); // List of classes used
207 static int gl_num_rules;
208 static int gl_rules_version;
209 int rbce_enabled = 1;
210 static rwlock_t global_rwlock = RW_LOCK_UNLOCKED;
212 * One lock to protect them all !!!
213 * Additions, deletions to rules must
214 * happen with this lock being held in write mode.
215 * Access(read/write) to any of the data structures must happen
216 * with this lock held in read mode.
217 * Since, rule related changes do not happen very often it is ok to
218 * have single rwlock.
222 * data structure rbce_private_data holds the bit vector 'eval' which
223 * specifies if rules and terms of rules are evaluated against the task
224 * and if they were evaluated, bit vector 'true' holds the result of that
227 * This data structure is maintained in a task, and the bitvectors are
228 * updated only when needed.
230 * Each rule and each term of a rule has a corresponding bit in the vector.
233 struct rbce_private_data {
234 struct rbce_ext_private_data ext_data;
235 int evaluate; // whether to evaluate rules or not ?
236 int rules_version; // whether to evaluate rules or not ?
238 unsigned long bitmap_version;
241 char data[0]; // eval points to this variable size data array
244 #define RBCE_DATA(tsk) ((struct rbce_private_data*)((tsk)->ce_data))
245 #define RBCE_DATAP(tsk) ((tsk)->ce_data)
247 /* ======================= DEBUG Functions ========================= */
251 int rbcedebug = 0x00;
253 #define DBG_CLASSIFY_RES ( 0x01 )
254 #define DBG_CLASSIFY_DETAILS ( 0x02 )
255 #define DBG_OPTIMIZATION ( 0x04 )
256 #define DBG_SHOW_RESCTL ( 0x08 )
257 #define DBG_CLASS ( 0x10 )
258 #define DBG_RULE ( 0x20 )
259 #define DBG_POLICY ( 0x40 )
261 #define DPRINTK(x, y...) if (rbcedebug & (x)) printk(KERN_DEBUG y)
262 // debugging selectively enabled through /proc/sys/debug/rbce
264 static void print_context_vectors(void)
268 if ((rbcedebug & DBG_OPTIMIZATION) == 0) {
271 for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
272 printk(KERN_DEBUG "%d: ", i);
273 bitvector_print(DBG_OPTIMIZATION, gl_mask_vecs[i]);
274 printk(KERN_DEBUG "\n");
279 #define DPRINTK(x, y...)
280 #define print_context_vectors(x)
283 /* ====================== VSERVER support ========================== */
284 #define CONFIG_VSERVER
285 #ifdef CONFIG_VSERVER
286 #include <linux/vs_base.h>
288 typedef unsigned int xid_t;
289 #define vx_task_xid(t) (0)
292 /* ======================= Helper Functions ========================= */
296 static struct ckrm_core_class *rbce_classify(struct task_struct *,
297 struct ckrm_net_struct *,
298 unsigned long, int classtype);
300 static inline struct rbce_rule *find_rule_name(const char *name)
302 struct named_obj_hdr *pos;
305 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
306 list_for_each_entry(pos, &rules_list[i], link) {
307 if (!strcmp(pos->name, name)) {
308 return ((struct rbce_rule *)pos);
315 static inline struct rbce_class *find_class_name(const char *name)
317 struct named_obj_hdr *pos;
319 list_for_each_entry(pos, &class_list, link) {
320 if (!strcmp(pos->name, name))
321 return (struct rbce_class *)pos;
327 * Insert the given rule at the specified order
328 * order = -1 ==> insert at the tail.
330 * Caller must hold global_rwlock in write mode.
332 static int insert_rule(struct rbce_rule *rule, int order)
334 #define ORDER_COUNTER_INCR 10
335 static int order_counter;
337 struct list_head *head = &rules_list[rule->classtype];
338 struct list_head *insert = head;
339 struct rbce_rule *tmp;
341 if (gl_num_rules == 0) {
347 rule->order = order_counter;
348 // FIXME: order_counter overflow/wraparound!!
349 order_counter += ORDER_COUNTER_INCR;
352 old_counter = order_counter;
353 if (order_counter < order) {
354 order_counter = order;
357 order_counter += ORDER_COUNTER_INCR;
358 list_for_each_entry(tmp, head, obj.link) {
359 if (rule->order == tmp->order) {
360 order_counter = old_counter;
363 if (rule->order < tmp->order) {
364 insert = &tmp->obj.link;
369 list_add_tail(&rule->obj.link, insert);
370 // protect the module from removed when any rule is
372 try_module_get(THIS_MODULE);
379 * Remove the rule and reinsert at the specified order.
381 * Caller must hold global_rwlock in write mode.
383 static int reinsert_rule(struct rbce_rule *rule, int order)
385 if (!list_empty(&rule->obj.link)) {
386 list_del_init(&rule->obj.link);
389 module_put(THIS_MODULE);
391 return insert_rule(rule, order);
395 * Get a refernece to the class, create one if it doesn't exist
397 * Caller need to hold global_rwlock in write mode.
401 static struct rbce_class *create_rbce_class(const char *classname,
402 int classtype, void *classobj)
404 struct rbce_class *cls;
406 if (classtype >= CKRM_MAX_CLASSTYPES) {
408 "ckrm_classobj returned %d as classtype which cannot "
409 " be handled by RBCE\n", classtype);
413 cls = kmalloc(sizeof(struct rbce_class), GFP_ATOMIC);
417 cls->obj.name = kmalloc(strlen(classname) + 1, GFP_ATOMIC);
420 cls->classobj = classobj;
421 strcpy(cls->obj.name, classname);
422 list_add_tail(&cls->obj.link, &class_list);
423 cls->classtype = classtype;
431 static struct rbce_class *get_class(const char *classname, int *classtype)
433 struct rbce_class *cls;
439 cls = find_class_name(classname);
443 *classtype = cls->classtype;
448 classobj = ckrm_classobj(classname, classtype);
453 return create_rbce_class(classname, *classtype, classobj);
457 * Drop a refernece to the class, create one if it doesn't exist
459 * Caller need to hold global_rwlock in write mode.
461 static void put_class(struct rbce_class *cls)
464 if (DEC_REF(cls) <= 0) {
465 list_del(&cls->obj.link);
466 kfree(cls->obj.name);
474 * Callback from core when a class is added
477 #ifdef RBCE_EXTENSION
478 static void rbce_class_addcb(const char *classname, void *clsobj, int classtype)
480 struct rbce_class *cls;
482 write_lock(&global_rwlock);
483 cls = get_class(classname, &classtype);
485 cls->classobj = clsobj;
486 notify_class_action(cls, 1);
488 write_unlock(&global_rwlock);
494 * Callback from core when a class is deleted.
497 rbce_class_deletecb(const char *classname, void *classobj, int classtype)
499 static struct rbce_class *cls;
500 struct named_obj_hdr *pos;
501 struct rbce_rule *rule;
503 write_lock(&global_rwlock);
504 cls = find_class_name(classname);
506 #ifdef RBCE_EXTENSION
509 if (cls->classobj != classobj) {
510 printk(KERN_ERR "rbce: class %s changed identity\n",
513 notify_class_action(cls, 0);
514 cls->classobj = NULL;
515 list_for_each_entry(pos, &rules_list[classtype], link) {
516 rule = (struct rbce_rule *)pos;
517 if (rule->target_class) {
519 (rule->target_class->obj.name, classname)) {
521 rule->target_class = NULL;
522 rule->classtype = -1;
526 if ((cls = find_class_name(classname)) != NULL) {
528 "rbce ERROR: class %s exists in rbce after "
529 "removal in core\n", classname);
532 write_unlock(&global_rwlock);
537 * Allocate an index in the global term vector
538 * On success, returns the index. On failure returns -errno.
539 * Caller must hold the global_rwlock in write mode as global data is
542 static int alloc_term_index(void)
544 int size = gl_allocated;
546 if (gl_num_terms >= size) {
548 struct rbce_rule_term *oldv, *newv;
549 int newsize = size + POLICY_INC_NUMTERMS;
553 kmalloc(newsize * sizeof(struct rbce_rule_term),
558 memcpy(newv, oldv, size * sizeof(struct rbce_rule_term));
559 for (i = size; i < newsize; i++) {
563 gl_allocated = newsize;
566 gl_action |= POLICY_ACTION_NEW_VERSION;
567 DPRINTK(DBG_OPTIMIZATION,
568 "alloc_term_index: Expanding size from %d to %d\n",
571 return gl_num_terms++;
575 * Release an index in the global term vector
577 * Caller must hold the global_rwlock in write mode as the global data
580 static void release_term_index(int idx)
582 if ((idx < 0) || (idx > gl_num_terms))
585 gl_terms[idx].op = -1;
587 if ((gl_released > POLICY_INC_NUMTERMS) &&
589 (gl_num_terms - gl_released + POLICY_INC_NUMTERMS))) {
590 gl_action |= POLICY_ACTION_PACK_TERMS;
596 * Release the indices, string memory, and terms associated with the given
599 * Caller should be holding global_rwlock
601 static void __release_rule(struct rbce_rule *rule)
603 int i, *terms = rule->terms;
605 // remove memory and references from other rules
606 for (i = rule->num_terms; --i >= 0;) {
607 struct rbce_rule_term *term = &gl_terms[terms[i]];
609 if (term->op == RBCE_RULE_DEP_RULE) {
610 DEC_REF(term->u.deprule);
612 release_term_index(terms[i]);
627 * delete the given rule and all memory associated with it.
629 * Caller is responsible for protecting the global data
631 static inline int __delete_rule(struct rbce_rule *rule)
633 // make sure we are not referenced by other rules
634 if (list_empty(&rule->obj.link)) {
640 __release_rule(rule);
641 put_class(rule->target_class);
642 release_term_index(rule->index);
643 list_del_init(&rule->obj.link);
646 module_put(THIS_MODULE);
647 kfree(rule->obj.name);
653 * Optimize the rule evaluation logic
655 * Caller must hold global_rwlock in write mode.
657 static void optimize_policy(void)
660 struct rbce_rule *rule;
661 struct rbce_rule_term *terms;
664 bitvector_t **mask_vecs;
669 * Due to dynamic rule addition/deletion of rules the term
670 * vector can get sparse. As a result the bitvectors grow as we don't
671 * reuse returned indices. If it becomes sparse enough we pack them
675 pack_terms = (gl_action & POLICY_ACTION_PACK_TERMS);
676 DPRINTK(DBG_OPTIMIZATION,
677 "----- Optimize Policy ----- act=%x pt=%d (a=%d n=%d r=%d)\n",
678 gl_action, pack_terms, gl_allocated, gl_num_terms, gl_released);
681 int nsz = ALIGN((gl_num_terms - gl_released),
682 POLICY_INC_NUMTERMS);
684 struct rbce_rule_term *newterms;
688 kmalloc(nsz * sizeof(struct rbce_rule_term), GFP_ATOMIC);
690 for (ii = 0; ii < CKRM_MAX_CLASSTYPES; ii++) {
691 // FIXME: check only for task class types
692 list_for_each_entry_reverse(rule,
695 rule->index = newidx++;
696 for (i = rule->num_terms; --i >= 0;) {
697 int idx = rule->terms[i];
698 newterms[newidx] = terms[idx];
699 rule->terms[i] = newidx++;
706 gl_num_terms = newidx;
709 gl_action &= ~POLICY_ACTION_PACK_TERMS;
710 gl_action |= POLICY_ACTION_NEW_VERSION;
714 num_terms = gl_num_terms;
715 bsize = gl_allocated / 8 + sizeof(bitvector_t);
716 mask_vecs = gl_mask_vecs;
719 if (gl_action & POLICY_ACTION_NEW_VERSION) {
720 /* allocate new mask vectors */
721 char *temp = kmalloc(NUM_TERM_MASK_VECTOR * bsize, GFP_ATOMIC);
723 DPRINTK(DBG_OPTIMIZATION,
724 "------ allocmasks act=%x ------- ver=%d\n", gl_action,
729 if (mask_vecs[0]) {// index 0 has the alloc returned address
732 for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
733 mask_vecs[i] = (bitvector_t *) (temp + i * bsize);
734 bitvector_init(mask_vecs[i], gl_allocated);
736 gl_action &= ~POLICY_ACTION_NEW_VERSION;
737 gl_action |= POLICY_ACTION_REDO_ALL;
741 /* We do two things here at once
742 * 1) recompute the rulemask for each required rule
743 * we guarantee proper dependency order during creation time and
744 * by reversely running through this list.
745 * 2) recompute the mask for each term and rule, if required
748 redoall = gl_action & POLICY_ACTION_REDO_ALL;
749 gl_action &= ~POLICY_ACTION_REDO_ALL;
751 DPRINTK(DBG_OPTIMIZATION, "------- run act=%x -------- redoall=%d\n",
753 for (ii = 0; ii < CKRM_MAX_CLASSTYPES; ii++) {
754 // FIXME: check only for task class types
755 list_for_each_entry_reverse(rule, &rules_list[ii], obj.link) {
756 unsigned long termflag;
758 if (!redoall && !rule->do_opt)
761 for (i = rule->num_terms; --i >= 0;) {
762 int j, idx = rule->terms[i];
763 struct rbce_rule_term *term = &terms[idx];
764 int vecidx = termop_2_vecidx[term->op];
767 termflag |= term->u.deprule->termflag;
768 /* mark this term belonging to all
769 contexts of deprule */
770 for (j = 0; j < NUM_TERM_MASK_VECTOR;
772 if (term->u.deprule->termflag
780 termflag |= TERM_2_TERMFLAG(vecidx);
781 /* mark this term belonging to
782 a particular context */
783 bitvector_set(idx, mask_vecs[vecidx]);
786 for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
787 if (termflag & (1 << i)) {
788 bitvector_set(rule->index,
792 rule->termflag = termflag;
794 DPRINTK(DBG_OPTIMIZATION, "r-%s: %x %d\n",
795 rule->obj.name, rule->termflag, rule->index);
798 print_context_vectors();
802 /* ======================= Rule related Functions ========================= */
805 * Caller need to hold global_rwlock in write mode.
808 fill_rule(struct rbce_rule *newrule, struct rbce_rule_term *terms, int nterms)
810 char *class, *strtab;
811 int i, j, order, state, real_nterms, index;
812 int strtablen, rc = 0, counter;
813 struct rbce_rule_term *term = NULL;
814 struct rbce_class *targetcls = NULL;
815 struct rbce_rule *deprule;
820 // Digest filled terms.
822 strtab = class = NULL;
827 for (i = 0; i < nterms; i++) {
828 if (terms[i].op != RBCE_RULE_INVALID) {
831 switch (terms[i].op) {
832 case RBCE_RULE_DEP_RULE:
833 // check if the depend rule is valid
835 deprule = find_rule_name(terms[i].u.string);
836 if (!deprule || deprule == newrule) {
840 // make sure _a_ depend rule
841 // appears in only one term.
842 for (j = 0; j < i; j++) {
845 && terms[j].u.deprule ==
851 terms[i].u.deprule = deprule;
854 // +depend is acceptable and -depend is not
855 if (terms[i].operator != TOKEN_OP_DEP_DEL) {
856 terms[i].operator = RBCE_EQUAL;
863 case RBCE_RULE_CMD_PATH:
866 case RBCE_RULE_APP_TAG:
869 // sum up the string length
870 strtablen += strlen(terms[i].u.string) + 1;
877 switch (terms[i].operator) {
879 order = terms[i].u.id;
886 state = terms[i].u.id != 0;
889 class = terms[i].u.string;
897 // Check if class was specified
900 if ((targetcls = get_class(class, &classtype)) == NULL) {
904 put_class(newrule->target_class);
906 newrule->target_class = targetcls;
907 newrule->classtype = classtype;
909 if (!newrule->target_class) {
915 newrule->state = state;
918 newrule->order = order;
920 newrule->terms = kmalloc(real_nterms * sizeof(int), GFP_ATOMIC);
921 if (!newrule->terms) {
925 newrule->num_terms = real_nterms;
926 if (strtablen && ((strtab = kmalloc(strtablen, GFP_ATOMIC)) == NULL)) {
931 if (newrule->index == -1) {
932 index = alloc_term_index();
937 newrule->index = index;
938 term = &gl_terms[newrule->index];
939 term->op = RBCE_RULE_DEP_RULE;
940 term->u.deprule = newrule;
942 newrule->strtab = strtab;
943 newrule->termflag = 0;
945 // Fill the term vector
948 for (i = 0; i < nterms; i++) {
949 if (terms[i].op == RBCE_RULE_INVALID) {
953 newrule->terms[counter] = alloc_term_index();
954 if (newrule->terms[counter] < 0) {
955 for (j = 0; j < counter; j++) {
956 release_term_index(newrule->terms[j]);
961 term = &gl_terms[newrule->terms[counter]];
962 term->op = terms[i].op;
963 term->operator = terms[i].operator;
964 switch (terms[i].op) {
965 case RBCE_RULE_CMD_PATH:
968 case RBCE_RULE_APP_TAG:
971 term->u.string = &strtab[strtablen];
972 strcpy(term->u.string, terms[i].u.string);
973 strtablen = strlen(term->u.string) + 1;
976 case RBCE_RULE_REAL_UID:
977 case RBCE_RULE_REAL_GID:
978 case RBCE_RULE_EFFECTIVE_UID:
979 case RBCE_RULE_EFFECTIVE_GID:
981 term->u.id = terms[i].u.id;
984 case RBCE_RULE_DEP_RULE:
985 term->u.deprule = terms[i].u.deprule;
986 INC_REF(term->u.deprule);
997 put_class(targetcls);
1000 release_term_index(index);
1002 kfree(newrule->terms);
1009 int change_rule(const char *rname, char *rdefn)
1011 struct rbce_rule *rule = NULL, *deprule;
1012 struct rbce_rule_term *new_terms = NULL, *term, *terms;
1013 int nterms, new_term_mask = 0, oterms, tot_terms;
1014 int i, j, k, rc, new_order = 0;
1016 if ((nterms = rules_parse(rdefn, &new_terms, &new_term_mask)) <= 0) {
1017 return !nterms ? -EINVAL : nterms;
1020 write_lock(&global_rwlock);
1021 rule = find_rule_name(rname);
1023 rule = kmalloc(sizeof(struct rbce_rule), GFP_ATOMIC);
1025 rule->obj.name = kmalloc(strlen(rname) + 1, GFP_ATOMIC);
1026 if (rule->obj.name) {
1027 strcpy(rule->obj.name, rname);
1031 rule->state = RBCE_RULE_ENABLED;
1032 rule->target_class = NULL;
1033 rule->classtype = -1;
1036 INIT_LIST_HEAD(&rule->obj.link);
1037 rc = fill_rule(rule, new_terms, nterms);
1043 rule->order)) == 0) {
1048 __delete_rule(rule);
1059 write_unlock(&global_rwlock);
1063 oterms = rule->num_terms;
1064 tot_terms = nterms + oterms;
1066 terms = kmalloc(tot_terms * sizeof(struct rbce_rule_term), GFP_ATOMIC);
1070 write_unlock(&global_rwlock);
1074 new_term_mask &= ~(1 << RBCE_RULE_DEP_RULE);
1075 //ignore the new deprule terms for the first iteration.
1076 // taken care of later.
1077 for (i = 0; i < oterms; i++) {
1078 term = &gl_terms[rule->terms[i]]; // old term
1080 if ((1 << term->op) & new_term_mask) {
1081 // newrule has this attr/value
1082 for (j = 0; j < nterms; j++) {
1083 if (term->op == new_terms[j].op) {
1084 terms[i].op = new_terms[j].op;
1085 terms[i].operator = new_terms[j].
1088 new_terms[j].u.string;
1089 new_terms[j].op = RBCE_RULE_INVALID2;
1094 terms[i].op = term->op;
1095 terms[i].operator = term->operator;
1096 terms[i].u.string = term->u.string;
1100 i = oterms; // for readability
1102 for (j = 0; j < nterms; j++) {
1103 // handled in the previous iteration
1104 if (new_terms[j].op == RBCE_RULE_INVALID2) {
1108 if (new_terms[j].op == RBCE_RULE_DEP_RULE) {
1109 if (new_terms[j].operator == TOKEN_OP_DEP) {
1110 // "depend=rule" deletes all depends in the
1111 // original rule so, delete all depend rule
1112 // terms in the original rule
1113 for (k = 0; k < oterms; k++) {
1114 if (terms[k].op == RBCE_RULE_DEP_RULE) {
1115 terms[k].op = RBCE_RULE_INVALID;
1118 // must copy the new deprule term
1120 // delete the depend rule term if was defined
1121 // in the original rule for both +depend
1123 deprule = find_rule_name(new_terms[j].u.string);
1125 for (k = 0; k < oterms; k++) {
1128 && terms[k].u.deprule ==
1136 if (new_terms[j].operator == TOKEN_OP_DEP_DEL) {
1137 // No need to copy the new deprule term
1142 if ((new_terms[j].op == RBCE_RULE_INVALID) &&
1143 (new_terms[j].operator == TOKEN_OP_ORDER)) {
1147 terms[i].op = new_terms[j].op;
1148 terms[i].operator = new_terms[j].operator;
1149 terms[i].u.string = new_terms[j].u.string;
1151 new_terms[j].op = RBCE_RULE_INVALID2;
1156 // convert old deprule pointers to name pointers.
1157 for (i = 0; i < oterms; i++) {
1158 if (terms[i].op != RBCE_RULE_DEP_RULE)
1160 terms[i].u.string = terms[i].u.deprule->obj.name;
1164 __release_rule(rule);
1167 rc = fill_rule(rule, terms, tot_terms);
1168 if (rc == 0 && new_order) {
1169 rc = reinsert_rule(rule, rule->order);
1171 if (rc != 0) { // rule creation/insertion failed
1172 __delete_rule(rule);
1177 write_unlock(&global_rwlock);
1184 * Delete the specified rule.
1187 int delete_rule(const char *rname)
1190 struct rbce_rule *rule;
1192 write_lock(&global_rwlock);
1194 if ((rule = find_rule_name(rname)) == NULL) {
1195 write_unlock(&global_rwlock);
1198 rc = __delete_rule(rule);
1199 if (rbce_enabled && (gl_action & POLICY_ACTION_PACK_TERMS)) {
1202 write_unlock(&global_rwlock);
1204 DPRINTK(DBG_RULE, "delete rule %s\n", rname);
1209 * copy the rule specified by rname and to the given result string.
1212 void get_rule(const char *rname, char *result)
1215 struct rbce_rule *rule;
1216 struct rbce_rule_term *term;
1217 char *cp = result, oper, idtype[3], str[5];
1219 read_lock(&global_rwlock);
1221 rule = find_rule_name(rname);
1223 for (i = 0; i < rule->num_terms; i++) {
1224 term = gl_terms + rule->terms[i];
1226 case RBCE_RULE_REAL_UID:
1227 strcpy(idtype, "u");
1229 case RBCE_RULE_REAL_GID:
1230 strcpy(idtype, "g");
1232 case RBCE_RULE_EFFECTIVE_UID:
1233 strcpy(idtype, "eu");
1235 case RBCE_RULE_EFFECTIVE_GID:
1236 strcpy(idtype, "eg");
1239 strcpy(idtype, "x");
1241 if (term->operator == RBCE_LESS_THAN) {
1243 } else if (term->operator == RBCE_GREATER_THAN) {
1245 } else if (term->operator == RBCE_NOT) {
1251 sprintf(cp, "%sid%c%ld,", idtype, oper,
1254 case RBCE_RULE_CMD_PATH:
1255 strcpy(str, "path");
1260 case RBCE_RULE_ARGS:
1261 strcpy(str, "args");
1263 case RBCE_RULE_APP_TAG:
1266 case RBCE_RULE_IPV4:
1267 strcpy(str, "ipv4");
1269 case RBCE_RULE_IPV6:
1270 strcpy(str, "ipv6");
1273 sprintf(cp, "%s=%s,", str, term->u.string);
1275 case RBCE_RULE_DEP_RULE:
1277 sprintf(cp, "depend=%s,",
1278 term->u.deprule->obj.name);
1284 if (!rule->num_terms) {
1285 cp += sprintf(cp, "***** no terms defined ***** ");
1289 sprintf(cp, "order=%d,state=%d,", rule->order, rule->state);
1291 sprintf(cp, "class=%s",
1292 rule->target_class ? rule->target_class->obj.
1293 name : "***** REMOVED *****");
1296 sprintf(result, "***** Rule %s doesn't exist *****", rname);
1299 read_unlock(&global_rwlock);
1304 * Change the name of the given rule "from_rname" to "to_rname"
1307 int rename_rule(const char *from_rname, const char *to_rname)
1309 struct rbce_rule *rule;
1310 int nlen, rc = -EINVAL;
1312 if (!to_rname || !*to_rname) {
1315 write_lock(&global_rwlock);
1317 rule = find_rule_name(from_rname);
1319 if ((nlen = strlen(to_rname)) > strlen(rule->obj.name)) {
1320 char *name = kmalloc(nlen + 1, GFP_ATOMIC);
1324 kfree(rule->obj.name);
1325 rule->obj.name = name;
1327 strcpy(rule->obj.name, to_rname);
1330 write_unlock(&global_rwlock);
1335 * Return TRUE if the given rule exists, FALSE otherwise
1338 int rule_exists(const char *rname)
1340 struct rbce_rule *rule;
1342 read_lock(&global_rwlock);
1343 rule = find_rule_name(rname);
1344 read_unlock(&global_rwlock);
1345 return rule != NULL;
1348 /*====================== Magic file handling =======================*/
1352 static struct rbce_private_data *create_private_data(struct rbce_private_data *,
1356 void reset_evaluation(struct rbce_private_data *pdata,int termflag)
1358 /* reset TAG ruleterm evaluation results to pick up
1359 * on next classification event
1361 if (use_persistent_state && gl_mask_vecs[termflag]) {
1362 bitvector_and_not( pdata->eval, pdata->eval,
1363 gl_mask_vecs[termflag] );
1364 bitvector_and_not( pdata->true, pdata->true,
1365 gl_mask_vecs[termflag] );
1369 int set_tasktag(int pid, char *tag)
1373 struct task_struct *tsk;
1374 struct rbce_private_data *pdata;
1380 len = strlen(tag) + 1;
1381 tp = kmalloc(len, GFP_ATOMIC);
1385 strncpy(tp,tag,len);
1387 read_lock(&tasklist_lock);
1388 if ((tsk = find_task_by_pid(pid)) == NULL) {
1393 if (unlikely(!RBCE_DATA(tsk))) {
1394 RBCE_DATAP(tsk) = create_private_data(NULL, 0);
1395 if (!RBCE_DATA(tsk)) {
1400 pdata = RBCE_DATA(tsk);
1401 if (pdata->app_tag) {
1402 kfree(pdata->app_tag);
1404 pdata->app_tag = tp;
1405 reset_evaluation(pdata,RBCE_TERMFLAG_TAG);
1408 read_unlock(&tasklist_lock);
1414 /*====================== Classification Functions =======================*/
1417 * Match the given full path name with the command expression.
1418 * This function treats the folowing 2 charaters as special if seen in
1419 * cmd_exp, all other chanracters are compared as is:
1420 * ? - compares to any one single character
1421 * * - compares to one or more single characters
1423 * If fullpath is 1, tsk_comm is compared in full. otherwise only the command
1424 * name (basename(tsk_comm)) is compared.
1426 static int match_cmd(const char *tsk_comm, const char *cmd_exp, int fullpath)
1428 const char *c, *t, *last_ast, *cmd = tsk_comm;
1431 // get the command name if we don't have to match the fullpath
1432 if (!fullpath && ((c = strrchr(tsk_comm, '/')) != NULL)) {
1436 /* now faithfully assume the entire pathname is in cmd */
1438 /* we now have to effectively implement a regular expression
1440 * '?' any single character
1441 * '*' one or more '?'
1447 if (t == NULL || c == NULL) {
1467 // eat up all '*' in c
1468 while (*(c + 1) == '*')
1472 //t++; // Add this for matching '*' with "one"
1474 while (*t && (*t != *(c + 1)) && *t != '/')
1476 if (*t == *(c + 1)) {
1504 /*FALLTHRU*/ default:
1505 if (*t == *c && next_c != *t) {
1509 /* reset to last asterix and
1510 continue from there */
1520 /* check for trailing "*" */
1524 return (!*c && !*t);
1527 static void reverse(char *str, int n)
1532 for (i = 0; i < j; i++, j--) {
1539 static int itoa(int n, char *str)
1544 str[i++] = n % 10 + '0';
1549 (void)reverse(str, sz);
1553 static int v4toa(__u32 y, char *a)
1558 for (i = 0; i < 4; i++) {
1559 size += itoa(y & 0xff, &a[size]);
1566 int match_ipv4(struct ckrm_net_struct *ns, char **string)
1568 char *ptr = *string;
1572 size = v4toa(ns->ns_daddrv4, a4);
1575 return !strncmp(a4, ptr, size);
1578 int match_port(struct ckrm_net_struct *ns, char *ptr)
1581 int size = itoa(ns->ns_dport, a);
1583 return !strncmp(a, ptr, size);
1586 static int __evaluate_rule(struct task_struct *tsk, struct ckrm_net_struct *ns,
1587 struct rbce_rule *rule, bitvector_t * vec_eval,
1588 bitvector_t * vec_true, char **filename);
1590 * evaluate the given task against the given rule with the vec_eval and
1591 * vec_true in context. Return 1 if the task satisfies the given rule, 0
1594 * If the bit corresponding to the rule is set in the vec_eval, then the
1595 * corresponding bit in vec_true is the result. If it is not set, evaluate
1596 * the rule and set the bits in both the vectors accordingly.
1598 * On return, filename will have the pointer to the pathname of the task's
1599 * executable, if the rule had any command related terms.
1601 * Caller must hold the global_rwlock atleast in read mode.
1604 evaluate_rule(struct task_struct *tsk, struct ckrm_net_struct *ns,
1605 struct rbce_rule *rule, bitvector_t * vec_eval,
1606 bitvector_t * vec_true, char **filename)
1608 int tidx = rule->index;
1610 if (!bitvector_test(tidx, vec_eval)) {
1612 (tsk, ns, rule, vec_eval, vec_true, filename)) {
1613 bitvector_set(tidx, vec_true);
1615 bitvector_set(tidx, vec_eval);
1617 return bitvector_test(tidx, vec_true);
1621 * evaluate the given task against every term in the given rule with
1622 * vec_eval and vec_true in context.
1624 * If the bit corresponding to a rule term is set in the vec_eval, then the
1625 * corresponding bit in vec_true is the result for taht particular. If it is
1626 * not set, evaluate the rule term and set the bits in both the vectors
1629 * This fucntions returns true only if all terms in the rule evaluate true.
1631 * On return, filename will have the pointer to the pathname of the task's
1632 * executable, if the rule had any command related terms.
1634 * Caller must hold the global_rwlock atleast in read mode.
1637 __evaluate_rule(struct task_struct *tsk, struct ckrm_net_struct *ns,
1638 struct rbce_rule *rule, bitvector_t * vec_eval,
1639 bitvector_t * vec_true, char **filename)
1644 for (i = rule->num_terms; --i >= 0;) {
1645 int rc = 1, tidx = rule->terms[i];
1647 if (!bitvector_test(tidx, vec_eval)) {
1648 struct rbce_rule_term *term = &gl_terms[tidx];
1652 case RBCE_RULE_CMD_PATH:
1655 if (!*filename) { /* get this once */
1658 GFP_ATOMIC)) == NULL)
1661 (tsk, *filename, NAME_MAX) < 0)) {
1666 rc = match_cmd(*filename, term->u.string,
1668 RBCE_RULE_CMD_PATH));
1670 rc = match_cmd(tsk->comm, term->u.string,
1672 RBCE_RULE_CMD_PATH));
1675 case RBCE_RULE_REAL_UID:
1676 if (term->operator == RBCE_LESS_THAN) {
1677 rc = (tsk->uid < term->u.id);
1678 } else if (term->operator == RBCE_GREATER_THAN){
1679 rc = (tsk->uid > term->u.id);
1680 } else if (term->operator == RBCE_NOT) {
1681 rc = (tsk->uid != term->u.id);
1683 rc = (tsk->uid == term->u.id);
1686 case RBCE_RULE_REAL_GID:
1687 if (term->operator == RBCE_LESS_THAN) {
1688 rc = (tsk->gid < term->u.id);
1689 } else if (term->operator == RBCE_GREATER_THAN){
1690 rc = (tsk->gid > term->u.id);
1691 } else if (term->operator == RBCE_NOT) {
1692 rc = (tsk->gid != term->u.id);
1694 rc = (tsk->gid == term->u.id);
1697 case RBCE_RULE_EFFECTIVE_UID:
1698 if (term->operator == RBCE_LESS_THAN) {
1699 rc = (tsk->euid < term->u.id);
1700 } else if (term->operator == RBCE_GREATER_THAN){
1701 rc = (tsk->euid > term->u.id);
1702 } else if (term->operator == RBCE_NOT) {
1703 rc = (tsk->euid != term->u.id);
1705 rc = (tsk->euid == term->u.id);
1708 case RBCE_RULE_EFFECTIVE_GID:
1709 if (term->operator == RBCE_LESS_THAN) {
1710 rc = (tsk->egid < term->u.id);
1711 } else if (term->operator == RBCE_GREATER_THAN){
1712 rc = (tsk->egid > term->u.id);
1713 } else if (term->operator == RBCE_NOT) {
1714 rc = (tsk->egid != term->u.id);
1716 rc = (tsk->egid == term->u.id);
1719 case RBCE_RULE_APP_TAG:
1720 rc = (RBCE_DATA(tsk)
1722 app_tag) ? !strcmp(RBCE_DATA(tsk)->
1724 term->u.string) : 0;
1726 case RBCE_RULE_DEP_RULE:
1727 rc = evaluate_rule(tsk, NULL, term->u.deprule,
1732 case RBCE_RULE_IPV4:
1733 // TBD: add NOT_EQUAL match. At present rbce
1734 // recognises EQUAL matches only.
1735 if (ns && term->operator == RBCE_EQUAL) {
1738 char *ptr = term->u.string;
1740 if (term->u.string[0] == '*')
1743 ma = match_ipv4(ns, &ptr);
1745 if (*ptr != '\\') { // error
1762 case RBCE_RULE_IPV6: // no support yet
1769 xid_t xid = vx_task_xid(tsk);
1771 if (term->operator == RBCE_LESS_THAN) {
1772 rc = (xid < term->u.id);
1773 } else if (term->operator == RBCE_GREATER_THAN) {
1774 rc = (xid > term->u.id);
1775 } else if (term->operator == RBCE_NOT) {
1776 rc = (xid != term->u.id);
1778 rc = (xid == term->u.id);
1785 printk(KERN_ERR "Error evaluate term op=%d\n",
1790 bitvector_clear(tidx, vec_true);
1792 bitvector_set(tidx, vec_true);
1794 bitvector_set(tidx, vec_eval);
1796 rc = bitvector_test(tidx, vec_true);
1805 //#define PDATA_DEBUG
1808 #define MAX_PDATA 10000
1809 void *pdata_arr[MAX_PDATA];
1810 int pdata_count, pdata_next;
1811 static spinlock_t pdata_lock = SPIN_LOCK_UNLOCKED;
1813 static inline int valid_pdata(struct rbce_private_data *pdata)
1820 spin_lock(&pdata_lock);
1821 for (i = 0; i < MAX_PDATA; i++) {
1822 if (pdata_arr[i] == pdata) {
1823 spin_unlock(&pdata_lock);
1827 spin_unlock(&pdata_lock);
1828 printk(KERN_WARNING "INVALID/CORRUPT PDATA %p\n", pdata);
1832 static inline void store_pdata(struct rbce_private_data *pdata)
1837 spin_lock(&pdata_lock);
1839 while (i < MAX_PDATA) {
1840 if (pdata_arr[pdata_next] == NULL) {
1841 printk(KERN_DEBUG "storing %p at %d, count %d\n", pdata,
1842 pdata_next, pdata_count);
1843 pdata_arr[pdata_next++] = pdata;
1844 if (pdata_next == MAX_PDATA) {
1853 spin_unlock(&pdata_lock);
1855 if (i == MAX_PDATA) {
1856 printk(KERN_DEBUG "PDATA BUFFER FULL pdata_count %d pdata %p\n",
1857 pdata_count, pdata);
1861 static inline void unstore_pdata(struct rbce_private_data *pdata)
1865 spin_lock(&pdata_lock);
1866 for (i = 0; i < MAX_PDATA; i++) {
1867 if (pdata_arr[i] == pdata) {
1868 printk(KERN_DEBUG "unstoring %p at %d, count %d\n", pdata,
1870 pdata_arr[i] = NULL;
1876 spin_unlock(&pdata_lock);
1877 if (i == MAX_PDATA) {
1878 printk(KERN_DEBUG "pdata %p not found in the stored array\n",
1885 #else // PDATA_DEBUG
1887 #define valid_pdata(pdata) (1)
1888 #define store_pdata(pdata)
1889 #define unstore_pdata(pdata)
1891 #endif // PDATA_DEBUG
1894 * Allocate and initialize a rbce_private_data data structure.
1896 * Caller must hold global_rwlock atleast in read mode.
1900 copy_ext_private_data(struct rbce_private_data *src,
1901 struct rbce_private_data *dst)
1904 dst->ext_data = src->ext_data;
1906 memset(&dst->ext_data, 0, sizeof(dst->ext_data));
1909 static struct rbce_private_data *create_private_data(struct rbce_private_data
1910 *src, int copy_sample)
1912 int vsize, psize, bsize;
1913 struct rbce_private_data *pdata;
1915 if (use_persistent_state) {
1916 vsize = gl_allocated;
1917 bsize = vsize / 8 + sizeof(bitvector_t);
1918 psize = sizeof(struct rbce_private_data) + 2 * bsize;
1920 psize = sizeof(struct rbce_private_data);
1923 pdata = kmalloc(psize, GFP_ATOMIC);
1924 if (pdata != NULL) {
1925 if (use_persistent_state) {
1926 pdata->bitmap_version = gl_bitmap_version;
1927 pdata->eval = (bitvector_t *) & pdata->data[0];
1928 pdata->true = (bitvector_t *) & pdata->data[bsize];
1929 if (src && (src->bitmap_version == gl_bitmap_version)) {
1930 memcpy(pdata->data, src->data, 2 * bsize);
1932 bitvector_init(pdata->eval, vsize);
1933 bitvector_init(pdata->true, vsize);
1936 copy_ext_private_data(src, pdata);
1937 //if (src) { // inherit evaluate and app_tag
1938 // pdata->evaluate = src->evaluate;
1939 // if(src->app_tag) {
1940 // int len = strlen(src->app_tag)+1;
1941 // printk(KERN_DEBUG "CREATE_PRIVATE: apptag %s len %d\n",
1942 // src->app_tag,len);
1943 // pdata->app_tag = kmalloc(len, GFP_ATOMIC);
1944 // if (pdata->app_tag) {
1945 // strcpy(pdata->app_tag, src->app_tag);
1949 pdata->evaluate = 1;
1950 pdata->rules_version = src ? src->rules_version : 0;
1951 pdata->app_tag = NULL;
1958 static inline void free_private_data(struct rbce_private_data *pdata)
1960 if (valid_pdata(pdata)) {
1961 unstore_pdata(pdata);
1966 static void free_all_private_data(void)
1968 struct task_struct *proc, *thread;
1970 read_lock(&tasklist_lock);
1971 do_each_thread(proc, thread) {
1972 struct rbce_private_data *pdata;
1974 pdata = RBCE_DATA(thread);
1975 RBCE_DATAP(thread) = NULL;
1976 free_private_data(pdata);
1977 } while_each_thread(proc, thread);
1978 read_unlock(&tasklist_lock);
1983 * reclassify function, which is called by all the callback functions.
1985 * Takes that task to be reclassified and ruleflags that indicates the
1986 * attributes that caused this reclassification request.
1988 * On success, returns the core class pointer to which the given task should
1991 static struct ckrm_core_class *rbce_classify(struct task_struct *tsk,
1992 struct ckrm_net_struct *ns,
1993 unsigned long termflag,
1997 struct rbce_rule *rule;
1998 bitvector_t *vec_true = NULL, *vec_eval = NULL;
1999 struct rbce_class *tgt = NULL;
2000 struct ckrm_core_class *cls = NULL;
2001 char *filename = NULL;
2003 if (!valid_pdata(RBCE_DATA(tsk))) {
2006 if (classtype >= CKRM_MAX_CLASSTYPES) {
2007 // can't handle more than CKRM_MAX_CLASSTYPES
2010 // fast path to avoid locking in case CE is not enabled or if no rules
2011 // are defined or if the tasks states that no evaluation is needed.
2012 if (!rbce_enabled || !gl_num_rules ||
2013 (RBCE_DATA(tsk) && !RBCE_DATA(tsk)->evaluate)) {
2016 // FIXME: optimize_policy should be called from here if
2017 // gl_action is non-zero. Also, it has to be called with the
2018 // global_rwlock held in write mode.
2020 read_lock(&global_rwlock);
2022 vec_eval = vec_true = NULL;
2023 if (use_persistent_state) {
2024 struct rbce_private_data *pdata = RBCE_DATA(tsk);
2028 && (gl_bitmap_version != pdata->bitmap_version))) {
2029 struct rbce_private_data *new_pdata =
2030 create_private_data(pdata, 1);
2034 new_pdata->rules_version =
2035 pdata->rules_version;
2036 new_pdata->evaluate = pdata->evaluate;
2037 new_pdata->app_tag = pdata->app_tag;
2038 free_private_data(pdata);
2040 pdata = RBCE_DATAP(tsk) = new_pdata;
2041 termflag = RBCE_TERMFLAG_ALL;
2042 // need to evaluate them all
2044 // we shouldn't free the pdata as it has more
2045 // details than the vectors. But, this
2046 // reclassification should go thru
2051 goto cls_determined;
2053 vec_eval = pdata->eval;
2054 vec_true = pdata->true;
2056 int bsize = gl_allocated;
2058 vec_eval = bitvector_alloc(bsize);
2059 vec_true = bitvector_alloc(bsize);
2061 if (vec_eval == NULL || vec_true == NULL) {
2062 goto cls_determined;
2064 termflag = RBCE_TERMFLAG_ALL;
2065 // need to evaluate all of them now
2069 * using bit ops invalidate all terms related to this termflag
2070 * context (only in per task vec)
2072 DPRINTK(DBG_CLASSIFY_DETAILS, "\nClassify: termflag=%lx\n", termflag);
2073 DPRINTK(DBG_CLASSIFY_DETAILS, " eval before: ");
2074 bitvector_print(DBG_CLASSIFY_DETAILS, vec_eval);
2075 DPRINTK(DBG_CLASSIFY_DETAILS, "\n true before: ");
2076 bitvector_print(DBG_CLASSIFY_DETAILS, vec_true);
2077 DPRINTK(DBG_CLASSIFY_DETAILS, "\n redo => ");
2079 if (termflag == RBCE_TERMFLAG_ALL) {
2080 DPRINTK(DBG_CLASSIFY_DETAILS, " redoall ");
2081 bitvector_zero(vec_eval);
2083 for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
2084 if (test_bit(i, &termflag)) {
2085 bitvector_t *maskvec = gl_mask_vecs[i];
2087 DPRINTK(DBG_CLASSIFY_DETAILS, " mask(%d) ", i);
2088 bitvector_print(DBG_CLASSIFY_DETAILS, maskvec);
2089 bitvector_and_not(vec_eval, vec_eval, maskvec);
2093 bitvector_and(vec_true, vec_true, vec_eval);
2095 DPRINTK(DBG_CLASSIFY_DETAILS, "\n eval now: ");
2096 bitvector_print(DBG_CLASSIFY_DETAILS, vec_eval);
2097 DPRINTK(DBG_CLASSIFY_DETAILS, "\n");
2099 /* run through the rules in order and see what needs evaluation */
2100 list_for_each_entry(rule, &rules_list[classtype], obj.link) {
2101 if (rule->state == RBCE_RULE_ENABLED &&
2102 rule->target_class &&
2103 rule->target_class->classobj &&
2104 evaluate_rule(tsk, ns, rule, vec_eval, vec_true,
2106 tgt = rule->target_class;
2107 cls = rule->target_class->classobj;
2113 DPRINTK(DBG_CLASSIFY_RES,
2114 "==> |%s|; pid %d; euid %d; egid %d; ruid %d; rgid %d;"
2115 "tag |%s| ===> class |%s|\n",
2116 filename ? filename : tsk->comm,
2122 RBCE_DATA(tsk) ? RBCE_DATA(tsk)->app_tag : "",
2123 tgt ? tgt->obj.name : "");
2124 DPRINTK(DBG_CLASSIFY_DETAILS, " eval after: ");
2125 bitvector_print(DBG_CLASSIFY_DETAILS, vec_eval);
2126 DPRINTK(DBG_CLASSIFY_DETAILS, "\n true after: ");
2127 bitvector_print(DBG_CLASSIFY_DETAILS, vec_true);
2128 DPRINTK(DBG_CLASSIFY_DETAILS, "\n");
2130 if (!use_persistent_state) {
2132 bitvector_free(vec_eval);
2135 bitvector_free(vec_true);
2138 ckrm_core_grab(cls);
2139 read_unlock(&global_rwlock);
2143 if (RBCE_DATA(tsk)) {
2144 RBCE_DATA(tsk)->rules_version = gl_rules_version;
2149 /*****************************************************************************
2151 * Module specific utilization of core RBCE functionality
2153 * Includes support for the various classtypes
2154 * New classtypes will require extensions here
2156 *****************************************************************************/
2158 /* helper functions that are required in the extended version */
2160 static inline void rbce_tc_manual(struct task_struct *tsk)
2162 read_lock(&global_rwlock);
2164 if (!RBCE_DATA(tsk)) {
2166 (void *)create_private_data(RBCE_DATA(tsk->parent), 0);
2168 if (RBCE_DATA(tsk)) {
2169 RBCE_DATA(tsk)->evaluate = 0;
2171 read_unlock(&global_rwlock);
2175 /*****************************************************************************
2176 * load any extensions
2177 *****************************************************************************/
2179 #ifdef RBCE_EXTENSION
2180 #include "rbcemod_ext.c"
2183 /*****************************************************************************
2184 * VARIOUS CLASSTYPES
2185 *****************************************************************************/
2187 // to enable type coercion of the function pointers
2189 /*============================================================================
2190 * TASKCLASS CLASSTYPE
2191 *============================================================================*/
2193 int tc_classtype = -1;
2196 * fork callback to be registered with core module.
2198 inline static void *rbce_tc_forkcb(struct task_struct *tsk)
2200 int rule_version_changed = 1;
2201 struct ckrm_core_class *cls;
2202 read_lock(&global_rwlock);
2205 (void *)create_private_data(RBCE_DATA(tsk->parent), 0);
2206 read_unlock(&global_rwlock);
2208 if (RBCE_DATA(tsk->parent)) {
2209 rule_version_changed =
2210 (RBCE_DATA(tsk->parent)->rules_version != gl_rules_version);
2212 cls = rule_version_changed ?
2213 rbce_classify(tsk, NULL, RBCE_TERMFLAG_ALL, tc_classtype) : NULL;
2215 // note the fork notification to any user client will be sent through
2216 // the guaranteed fork-reclassification
2221 * exit callback to be registered with core module.
2223 static void rbce_tc_exitcb(struct task_struct *tsk)
2225 struct rbce_private_data *pdata;
2227 send_exit_notification(tsk);
2229 pdata = RBCE_DATA(tsk);
2230 RBCE_DATAP(tsk) = NULL;
2232 if (pdata->app_tag) {
2233 kfree(pdata->app_tag);
2235 free_private_data(pdata);
2240 #define AENT(x) [ CKRM_EVENT_##x] = #x
2241 static const char *event_names[CKRM_NUM_EVENTS] = {
2259 void *rbce_tc_classify(enum ckrm_event event, ...)
2263 struct task_struct *tsk;
2264 struct rbce_private_data *pdata;
2266 va_start(args, event);
2267 tsk = va_arg(args, struct task_struct *);
2270 /* we only have to deal with events between
2271 * [ CKRM_LATCHABLE_EVENTS .. CKRM_NONLATCHABLE_EVENTS )
2274 // printk(KERN_DEBUG "tc_classify %p:%d:%s '%s'\n",tsk,tsk->pid,
2275 // tsk->comm,event_names[event]);
2279 case CKRM_EVENT_FORK:
2280 cls = rbce_tc_forkcb(tsk);
2283 case CKRM_EVENT_EXIT:
2284 rbce_tc_exitcb(tsk);
2287 case CKRM_EVENT_EXEC:
2288 cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_CMD |
2289 RBCE_TERMFLAG_UID | RBCE_TERMFLAG_GID,
2293 case CKRM_EVENT_UID:
2294 cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_UID, tc_classtype);
2297 case CKRM_EVENT_GID:
2298 cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_GID, tc_classtype);
2301 case CKRM_EVENT_XID:
2302 cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_XID, tc_classtype);
2305 case CKRM_EVENT_LOGIN:
2306 case CKRM_EVENT_USERADD:
2307 case CKRM_EVENT_USERDEL:
2308 case CKRM_EVENT_LISTEN_START:
2309 case CKRM_EVENT_LISTEN_STOP:
2310 case CKRM_EVENT_APPTAG:
2311 /* no interest in this events .. */
2318 case CKRM_EVENT_RECLASSIFY:
2319 if ((pdata = (RBCE_DATA(tsk)))) {
2320 pdata->evaluate = 1;
2322 cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_ALL, tc_classtype);
2326 // printk(KERN_DEBUG "tc_classify %p:%d:%s '%s' ==> %p\n",tsk,tsk->pid,
2327 // tsk->comm,event_names[event],cls);
2332 #ifndef RBCE_EXTENSION
2333 static void rbce_tc_notify(int event, void *core, struct task_struct *tsk)
2335 printk(KERN_DEBUG "tc_manual %p:%d:%s '%s'\n", tsk, tsk->pid, tsk->comm,
2336 event_names[event]);
2337 if (event != CKRM_EVENT_MANUAL)
2339 rbce_tc_manual(tsk);
2343 static struct ckrm_eng_callback rbce_taskclass_ecbs = {
2344 .c_interest = (unsigned long)(-1), // set whole bitmap
2345 .classify = (ce_classify_fct_t) rbce_tc_classify,
2346 .class_delete = rbce_class_deletecb,
2347 #ifndef RBCE_EXTENSION
2348 .n_interest = (1 << CKRM_EVENT_MANUAL),
2349 .notify = (ce_notify_fct_t) rbce_tc_notify,
2350 .always_callback = 0,
2352 .n_interest = (unsigned long)(-1), // set whole bitmap
2353 .notify = (ce_notify_fct_t) rbce_tc_ext_notify,
2354 .class_add = rbce_class_addcb,
2355 .always_callback = 1,
2359 /*============================================================================
2361 *============================================================================*/
2363 int sc_classtype = -1;
2365 void *rbce_sc_classify(enum ckrm_event event, ...)
2367 // no special consideratation
2370 struct task_struct *tsk;
2371 struct ckrm_net_struct *ns;
2373 va_start(args, event);
2374 ns = va_arg(args, struct ckrm_net_struct *);
2375 tsk = va_arg(args, struct task_struct *);
2378 result = rbce_classify(tsk, ns, RBCE_TERMFLAG_ALL, sc_classtype);
2380 DPRINTK(DBG_CLASSIFY_RES,
2381 "==> %d.%d.%d.%d\\%d , %p:%d:%s '%s' => %p\n",
2382 NIPQUAD(ns->ns_daddrv4), ns->ns_dport,
2383 tsk, tsk ? tsk->pid : 0, tsk ? tsk->comm : "-",
2384 event_names[event], result);
2388 static struct ckrm_eng_callback rbce_acceptQclass_ecbs = {
2389 .c_interest = (unsigned long)(-1),
2390 .always_callback = 0, // enable during debugging only
2391 .classify = (ce_classify_fct_t) & rbce_sc_classify,
2392 .class_delete = rbce_class_deletecb,
2395 /*============================================================================
2396 * Module Initialization ...
2397 *============================================================================*/
2399 #define TASKCLASS_NAME "taskclass"
2400 #define SOCKCLASS_NAME "socket_class"
2402 struct ce_regtable_struct {
2404 struct ckrm_eng_callback *cbs;
2408 struct ce_regtable_struct ce_regtable[] = {
2409 {TASKCLASS_NAME, &rbce_taskclass_ecbs, &tc_classtype},
2410 {SOCKCLASS_NAME, &rbce_acceptQclass_ecbs, &sc_classtype},
2414 static void unregister_classtype_engines(void)
2417 struct ce_regtable_struct *ceptr = ce_regtable;
2419 while (ceptr->name) {
2420 if (*ceptr->clsvar >= 0) {
2421 printk(KERN_DEBUG "ce unregister with <%s>\n",ceptr->name);
2422 while ((rc = ckrm_unregister_engine(ceptr->name)) == -EAGAIN)
2424 printk(KERN_DEBUG "ce unregister with <%s> rc=%d\n",ceptr->name,rc);
2425 *ceptr->clsvar = -1;
2431 static int register_classtype_engines(void)
2434 struct ce_regtable_struct *ceptr = ce_regtable;
2436 while (ceptr->name) {
2437 rc = ckrm_register_engine(ceptr->name, ceptr->cbs);
2438 printk(KERN_DEBUG "ce register with <%s> typeId=%d\n",ceptr->name,rc);
2439 if ((rc < 0) && (rc != -ENOENT)) {
2440 unregister_classtype_engines();
2444 *ceptr->clsvar = rc;
2450 // =========== /proc/sysctl/debug/rbce debug stuff =============
2453 static struct ctl_table_header *rbce_sysctl_table_header;
2455 #define CTL_RBCE_DEBUG (201) // picked some number.. dont know algo to pick
2456 static struct ctl_table rbce_entry_table[] = {
2458 .ctl_name = CTL_RBCE_DEBUG,
2461 .maxlen = sizeof(int),
2463 .proc_handler = &proc_dointvec,
2468 static struct ctl_table rbce_root_table[] = {
2470 .ctl_name = CTL_DEBUG,
2471 .procname = "debug",
2475 .child = rbce_entry_table},
2479 static inline void start_debug(void)
2481 rbce_sysctl_table_header = register_sysctl_table(rbce_root_table, 1);
2483 static inline void stop_debug(void)
2485 if (rbce_sysctl_table_header)
2486 unregister_sysctl_table(rbce_sysctl_table_header);
2491 static inline void start_debug(void)
2494 static inline void stop_debug(void)
2500 extern int rbce_mkdir(struct inode *, struct dentry *, int);
2501 extern int rbce_rmdir(struct inode *, struct dentry *);
2502 extern int rbce_create_magic(void);
2503 extern int rbce_clear_magic(void);
2505 rbce_eng_callback_t rcfs_ecbs = {
2512 /* ======================= Module definition Functions ====================== */
2518 printk(KERN_DEBUG "<1>\nInstalling \'%s\' module\n", modname);
2520 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
2521 INIT_LIST_HEAD(&rules_list[i]);
2524 rc = init_rbce_ext_pre();
2529 rc = register_classtype_engines();
2532 goto out_unreg_ckrm; // need to remove anyone opened
2534 /* register any other class type engine here */
2536 rc = rcfs_register_engine(&rcfs_ecbs);
2539 goto out_unreg_ckrm;
2542 rc = rbce_create_magic();
2545 goto out_unreg_rcfs;
2550 rc = init_rbce_ext_post();
2555 return 0; // SUCCESS
2561 rcfs_unregister_engine(&rcfs_ecbs);
2563 unregister_classtype_engines();
2567 printk(KERN_DEBUG "<1>%s: error installing rc=%d line=%d\n", __FUNCTION__, rc,
2572 void exit_rbce(void)
2576 printk(KERN_DEBUG "<1>Removing \'%s\' module\n", modname);
2581 // Print warnings if lists are not empty, which is a bug
2582 if (!list_empty(&class_list)) {
2583 printk(KERN_DEBUG "exit_rbce: Class list is not empty\n");
2586 for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
2587 if (!list_empty(&rules_list[i])) {
2588 printk(KERN_DEBUG "exit_rbce: Rules list for classtype %d"
2589 " is not empty\n", i);
2596 rcfs_unregister_engine(&rcfs_ecbs);
2597 unregister_classtype_engines();
2598 free_all_private_data();
2601 EXPORT_SYMBOL(get_rule);
2602 EXPORT_SYMBOL(rule_exists);
2603 EXPORT_SYMBOL(change_rule);
2604 EXPORT_SYMBOL(delete_rule);
2605 EXPORT_SYMBOL(rename_rule);
2606 EXPORT_SYMBOL(set_tasktag);
2608 module_init(init_rbce);
2609 module_exit(exit_rbce);