X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fcore%2Ffilter.c;h=0df843b667f45fc44cdf6d83d60523268fb71502;hb=refs%2Fheads%2Fvserver;hp=9c2a950807689d0fe3053cef237e7c096a59b7c7;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/net/core/filter.c b/net/core/filter.c index 9c2a95080..0df843b66 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -13,6 +13,7 @@ * 2 of the License, or (at your option) any later version. * * Andi Kleen - Fix a few bad bugs and races. + * Kris Katterjohn - Added many additional checks in sk_chk_filter() */ #include @@ -33,10 +34,11 @@ #include #include #include +#include #include /* No hurry in this branch */ -static u8 *load_pointer(struct sk_buff *skb, int k) +static void *__load_pointer(struct sk_buff *skb, int k) { u8 *ptr = NULL; @@ -50,8 +52,20 @@ static u8 *load_pointer(struct sk_buff *skb, int k) return NULL; } +static inline void *load_pointer(struct sk_buff *skb, int k, + unsigned int size, void *buffer) +{ + if (k >= 0) + return skb_header_pointer(skb, k, size, buffer); + else { + if (k >= SKF_AD_OFF) + return NULL; + return __load_pointer(skb, k); + } +} + /** - * sk_run_filter - run a filter on a socket + * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @filter: filter to apply * @flen: length of filter @@ -61,18 +75,14 @@ static u8 *load_pointer(struct sk_buff *skb, int k) * filtering, filter is the array of filter instructions, and * len is the number of filter blocks in the array. */ - -int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) +unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) { - unsigned char *data = skb->data; - /* len is UNSIGNED. Byte wide insns relies only on implicit - type casts to prevent reading arbitrary memory locations. - */ - unsigned int len = skb->len-skb->data_len; struct sock_filter *fentry; /* We walk down these */ - u32 A = 0; /* Accumulator */ - u32 X = 0; /* Index Register */ + void *ptr; + u32 A = 0; /* Accumulator */ + u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ + u32 tmp; int k; int pc; @@ -107,8 +117,6 @@ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) A /= X; continue; case BPF_ALU|BPF_DIV|BPF_K: - if (fentry->k == 0) - return 0; A /= fentry->k; continue; case BPF_ALU|BPF_AND|BPF_X: @@ -167,84 +175,36 @@ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) continue; case BPF_LD|BPF_W|BPF_ABS: k = fentry->k; - load_w: - if (k >= 0 && (unsigned int)(k+sizeof(u32)) <= len) { - A = ntohl(*(u32*)&data[k]); +load_w: + ptr = load_pointer(skb, k, 4, &tmp); + if (ptr != NULL) { + A = ntohl(get_unaligned((__be32 *)ptr)); continue; } - if (k < 0) { - u8 *ptr; - - if (k >= SKF_AD_OFF) - break; - ptr = load_pointer(skb, k); - if (ptr) { - A = ntohl(*(u32*)ptr); - continue; - } - } else { - u32 tmp; - if (!skb_copy_bits(skb, k, &tmp, 4)) { - A = ntohl(tmp); - continue; - } - } - return 0; + break; case BPF_LD|BPF_H|BPF_ABS: k = fentry->k; - load_h: - if (k >= 0 && (unsigned int)(k + sizeof(u16)) <= len) { - A = ntohs(*(u16*)&data[k]); +load_h: + ptr = load_pointer(skb, k, 2, &tmp); + if (ptr != NULL) { + A = ntohs(get_unaligned((__be16 *)ptr)); continue; } - if (k < 0) { - u8 *ptr; - - if (k >= SKF_AD_OFF) - break; - ptr = load_pointer(skb, k); - if (ptr) { - A = ntohs(*(u16*)ptr); - continue; - } - } else { - u16 tmp; - if (!skb_copy_bits(skb, k, &tmp, 2)) { - A = ntohs(tmp); - continue; - } - } - return 0; + break; case BPF_LD|BPF_B|BPF_ABS: k = fentry->k; load_b: - if (k >= 0 && (unsigned int)k < len) { - A = data[k]; + ptr = load_pointer(skb, k, 1, &tmp); + if (ptr != NULL) { + A = *(u8 *)ptr; continue; } - if (k < 0) { - u8 *ptr; - - if (k >= SKF_AD_OFF) - break; - ptr = load_pointer(skb, k); - if (ptr) { - A = *ptr; - continue; - } - } else { - u8 tmp; - if (!skb_copy_bits(skb, k, &tmp, 1)) { - A = tmp; - continue; - } - } - return 0; + break; case BPF_LD|BPF_W|BPF_LEN: - A = len; + A = skb->len; continue; case BPF_LDX|BPF_W|BPF_LEN: - X = len; + X = skb->len; continue; case BPF_LD|BPF_W|BPF_IND: k = X + fentry->k; @@ -256,10 +216,12 @@ load_b: k = X + fentry->k; goto load_b; case BPF_LDX|BPF_B|BPF_MSH: - if (fentry->k >= len) - return 0; - X = (data[fentry->k] & 0xf) << 2; - continue; + ptr = load_pointer(skb, fentry->k, 1, &tmp); + if (ptr != NULL) { + X = (*(u8 *)ptr & 0xf) << 2; + continue; + } + return 0; case BPF_LD|BPF_IMM: A = fentry->k; continue; @@ -279,9 +241,9 @@ load_b: A = X; continue; case BPF_RET|BPF_K: - return ((unsigned int)fentry->k); + return fentry->k; case BPF_RET|BPF_A: - return ((unsigned int)A); + return A; case BPF_ST: mem[fentry->k] = A; continue; @@ -289,7 +251,7 @@ load_b: mem[fentry->k] = X; continue; default: - /* Invalid instruction counts as RET */ + WARN_ON(1); return 0; } @@ -299,7 +261,7 @@ load_b: */ switch (k-SKF_AD_OFF) { case SKF_AD_PROTOCOL: - A = htons(skb->protocol); + A = ntohs(skb->protocol); continue; case SKF_AD_PKTTYPE: A = skb->pkt_type; @@ -322,60 +284,107 @@ load_b: * * Check the user's filter code. If we let some ugly * filter code slip through kaboom! The filter must contain - * no references or jumps that are out of range, no illegal instructions - * and no backward jumps. It must end with a RET instruction + * no references or jumps that are out of range, no illegal + * instructions, and must end with a RET instruction. + * + * All jumps are forward as they are not signed. * - * Returns 0 if the rule set is legal or a negative errno code if not. + * Returns 0 if the rule set is legal or -EINVAL if not. */ int sk_chk_filter(struct sock_filter *filter, int flen) { struct sock_filter *ftest; int pc; - if (((unsigned int)flen >= (~0U / sizeof(struct sock_filter))) || flen == 0) + if (flen == 0 || flen > BPF_MAXINSNS) return -EINVAL; /* check the filter code now */ for (pc = 0; pc < flen; pc++) { - /* all jumps are forward as they are not signed */ ftest = &filter[pc]; - if (BPF_CLASS(ftest->code) == BPF_JMP) { - /* but they mustn't jump off the end */ - if (BPF_OP(ftest->code) == BPF_JA) { - /* - * Note, the large ftest->k might cause loops. - * Compare this with conditional jumps below, - * where offsets are limited. --ANK (981016) - */ - if (ftest->k >= (unsigned)(flen-pc-1)) - return -EINVAL; - } else { - /* for conditionals both must be safe */ - if (pc + ftest->jt +1 >= flen || - pc + ftest->jf +1 >= flen) - return -EINVAL; - } - } - /* check that memory operations use valid addresses. */ - if (ftest->k >= BPF_MEMWORDS) { - /* but it might not be a memory operation... */ - switch (ftest->code) { - case BPF_ST: - case BPF_STX: - case BPF_LD|BPF_MEM: - case BPF_LDX|BPF_MEM: + /* Only allow valid instructions */ + switch (ftest->code) { + case BPF_ALU|BPF_ADD|BPF_K: + case BPF_ALU|BPF_ADD|BPF_X: + case BPF_ALU|BPF_SUB|BPF_K: + case BPF_ALU|BPF_SUB|BPF_X: + case BPF_ALU|BPF_MUL|BPF_K: + case BPF_ALU|BPF_MUL|BPF_X: + case BPF_ALU|BPF_DIV|BPF_X: + case BPF_ALU|BPF_AND|BPF_K: + case BPF_ALU|BPF_AND|BPF_X: + case BPF_ALU|BPF_OR|BPF_K: + case BPF_ALU|BPF_OR|BPF_X: + case BPF_ALU|BPF_LSH|BPF_K: + case BPF_ALU|BPF_LSH|BPF_X: + case BPF_ALU|BPF_RSH|BPF_K: + case BPF_ALU|BPF_RSH|BPF_X: + case BPF_ALU|BPF_NEG: + case BPF_LD|BPF_W|BPF_ABS: + case BPF_LD|BPF_H|BPF_ABS: + case BPF_LD|BPF_B|BPF_ABS: + case BPF_LD|BPF_W|BPF_LEN: + case BPF_LD|BPF_W|BPF_IND: + case BPF_LD|BPF_H|BPF_IND: + case BPF_LD|BPF_B|BPF_IND: + case BPF_LD|BPF_IMM: + case BPF_LDX|BPF_W|BPF_LEN: + case BPF_LDX|BPF_B|BPF_MSH: + case BPF_LDX|BPF_IMM: + case BPF_MISC|BPF_TAX: + case BPF_MISC|BPF_TXA: + case BPF_RET|BPF_K: + case BPF_RET|BPF_A: + break; + + /* Some instructions need special checks */ + + case BPF_ALU|BPF_DIV|BPF_K: + /* check for division by zero */ + if (ftest->k == 0) return -EINVAL; - } + break; + + case BPF_LD|BPF_MEM: + case BPF_LDX|BPF_MEM: + case BPF_ST: + case BPF_STX: + /* check for invalid memory addresses */ + if (ftest->k >= BPF_MEMWORDS) + return -EINVAL; + break; + + case BPF_JMP|BPF_JA: + /* + * Note, the large ftest->k might cause loops. + * Compare this with conditional jumps below, + * where offsets are limited. --ANK (981016) + */ + if (ftest->k >= (unsigned)(flen-pc-1)) + return -EINVAL; + break; + + case BPF_JMP|BPF_JEQ|BPF_K: + case BPF_JMP|BPF_JEQ|BPF_X: + case BPF_JMP|BPF_JGE|BPF_K: + case BPF_JMP|BPF_JGE|BPF_X: + case BPF_JMP|BPF_JGT|BPF_K: + case BPF_JMP|BPF_JGT|BPF_X: + case BPF_JMP|BPF_JSET|BPF_K: + case BPF_JMP|BPF_JSET|BPF_X: + /* for conditionals both must be safe */ + if (pc + ftest->jt + 1 >= flen || + pc + ftest->jf + 1 >= flen) + return -EINVAL; + break; + + default: + return -EINVAL; } } - /* - * The program must end with a return. We don't care where they - * jumped within the script (its always forwards) but in the end - * they _will_ hit this. - */ - return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; + return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; } /** @@ -395,8 +404,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) int err; /* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL || fprog->len > BPF_MAXINSNS) - return -EINVAL; + if (fprog->filter == NULL) + return -EINVAL; fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); if (!fp) @@ -413,10 +422,10 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) if (!err) { struct sk_filter *old_fp; - spin_lock_bh(&sk->sk_lock.slock); - old_fp = sk->sk_filter; - sk->sk_filter = fp; - spin_unlock_bh(&sk->sk_lock.slock); + rcu_read_lock_bh(); + old_fp = rcu_dereference(sk->sk_filter); + rcu_assign_pointer(sk->sk_filter, fp); + rcu_read_unlock_bh(); fp = old_fp; }