From: Patrick McHardy > Kris Katterjohn wrote: > > --- x/net/core/filter.c 2005-12-28 16:51:35.000000000 -0600 > > +++ y/net/core/filter.c 2005-12-28 16:53:32.000000000 -0600 > > @@ -250,7 +250,7 @@ load_b: > > mem[fentry->k] = X; > > continue; > > default: > > - /* Invalid instruction counts as RET */ > > + /* Should never be reached */ > > This stuff has had a number of bad bugs before, if it can't be reached, > please call BUG or something similar instead of silently ignoring the > error.
I'll just remove the default label and return statement. > > return 0; > > } > > > > @@ -300,38 +300,87 @@ int sk_chk_filter(struct sock_filter *fi > > for (pc = 0; pc < flen; pc++) { > > /* all jumps are forward as they are not signed */ > > ftest = &filter[pc]; > > - if (BPF_CLASS(ftest->code) == BPF_JMP) { > > - /* but they mustn't jump off the end */ > > - if (BPF_OP(ftest->code) == BPF_JA) { > > - /* > > - * Note, the large ftest->k might cause loops. > > - * Compare this with conditional jumps below, > > - * where offsets are limited. --ANK (981016) > > - */ > > - if (ftest->k >= (unsigned)(flen-pc-1)) > > - return -EINVAL; > > - } else { > > - /* for conditionals both must be safe */ > > - if (pc + ftest->jt +1 >= flen || > > - pc + ftest->jf +1 >= flen) > > - return -EINVAL; > > - } > > - } > > > > - /* check for division by zero -Kris Katterjohn 2005-10-30 */ > > - if (ftest->code == (BPF_ALU|BPF_DIV|BPF_K) && ftest->k == 0) > > - return -EINVAL; > > + /* Only allow valid instructions -Kris Katterjohn 2005-12-28 */ > > + switch (ftest->code) { > > + case BPF_ALU|BPF_ADD|BPF_K: > > + case BPF_ALU|BPF_ADD|BPF_X: > > + case BPF_ALU|BPF_SUB|BPF_K: > > + case BPF_ALU|BPF_SUB|BPF_X: > > + case BPF_ALU|BPF_MUL|BPF_K: > > + case BPF_ALU|BPF_MUL|BPF_X: > > + case BPF_ALU|BPF_DIV|BPF_X: > > + case BPF_ALU|BPF_AND|BPF_K: > > + case BPF_ALU|BPF_AND|BPF_X: > > + case BPF_ALU|BPF_OR|BPF_K: > > + case BPF_ALU|BPF_OR|BPF_X: > > + case BPF_ALU|BPF_LSH|BPF_K: > > + case BPF_ALU|BPF_LSH|BPF_X: > > + case BPF_ALU|BPF_RSH|BPF_K: > > + case BPF_ALU|BPF_RSH|BPF_X: > > + case BPF_ALU|BPF_NEG: > > + case BPF_LD|BPF_W|BPF_ABS: > > + case BPF_LD|BPF_H|BPF_ABS: > > + case BPF_LD|BPF_B|BPF_ABS: > > + case BPF_LD|BPF_W|BPF_LEN: > > + case BPF_LD|BPF_W|BPF_IND: > > + case BPF_LD|BPF_H|BPF_IND: > > + case BPF_LD|BPF_B|BPF_IND: > > + case BPF_LD|BPF_IMM: > > + case BPF_LDX|BPF_W|BPF_LEN: > > + case BPF_LDX|BPF_B|BPF_MSH: > > + case BPF_LDX|BPF_IMM: > > + case BPF_MISC|BPF_TAX: > > + case BPF_MISC|BPF_TXA: > > + case BPF_RET|BPF_K: > > + case BPF_RET|BPF_A: > > + break; > > I think this could be done more readable using BPF_CLASS(). If it's done with BPF_CLASS, then it would either be a lot longer or not check for only valid instructions. i.e. You'd check for BPF_RET, but not BPF_RET|BPF_K and BPF_RET|BPF_X which are the actual instructions. I thought about doing with with BPF_CLASS, but this way is shorter and checks for everything. > > + > > + /* Some instructions need special checks */ > > + > > + case BPF_ALU|BPF_DIV|BPF_K: > > + /* check for division by zero > > + * -Kris Katterjohn 2005-10-30 > > Please don't annotate every single comment with your name and date, > especially not totally useless ones such as this. If you want some > record of your changes inside the file, place it somewhere at the > top, where it doesn't clutter up the code. Okay. That was my first change to the kernel, and I didn't think I'd be doing more to it to clutter it up. > > + */ > > + if (ftest->k == 0) > > + return -EINVAL; > > Why do you keep the runtime check then? The runtime is for BPF_ALU|BPF_DIV|BPF_X, not BPF_ALU|BPF_DIV|BPF_K. The BPF_K instruction is constant and can be checked for at any time, but with BPF_X, it changes with each packet and must be checked at runtime. --- x/net/core/filter.c 2005-12-28 16:51:35.000000000 -0600 +++ y/net/core/filter.c 2005-12-29 12:19:48.000000000 -0600 @@ -13,6 +13,7 @@ * 2 of the License, or (at your option) any later version. * * Andi Kleen - Fix a few bad bugs and races. + * Kris Katterjohn 2005-12-28 - Added many additional checks in sk_chk_filter() */ #include <linux/module.h> @@ -249,9 +250,6 @@ load_b: case BPF_STX: mem[fentry->k] = X; continue; - default: - /* Invalid instruction counts as RET */ - return 0; } /* @@ -300,38 +298,85 @@ int sk_chk_filter(struct sock_filter *fi for (pc = 0; pc < flen; pc++) { /* all jumps are forward as they are not signed */ ftest = &filter[pc]; - if (BPF_CLASS(ftest->code) == BPF_JMP) { - /* but they mustn't jump off the end */ - if (BPF_OP(ftest->code) == BPF_JA) { - /* - * Note, the large ftest->k might cause loops. - * Compare this with conditional jumps below, - * where offsets are limited. --ANK (981016) - */ - if (ftest->k >= (unsigned)(flen-pc-1)) - return -EINVAL; - } else { - /* for conditionals both must be safe */ - if (pc + ftest->jt +1 >= flen || - pc + ftest->jf +1 >= flen) - return -EINVAL; - } - } - /* check for division by zero -Kris Katterjohn 2005-10-30 */ - if (ftest->code == (BPF_ALU|BPF_DIV|BPF_K) && ftest->k == 0) - return -EINVAL; + /* Only allow valid instructions */ + switch (ftest->code) { + case BPF_ALU|BPF_ADD|BPF_K: + case BPF_ALU|BPF_ADD|BPF_X: + case BPF_ALU|BPF_SUB|BPF_K: + case BPF_ALU|BPF_SUB|BPF_X: + case BPF_ALU|BPF_MUL|BPF_K: + case BPF_ALU|BPF_MUL|BPF_X: + case BPF_ALU|BPF_DIV|BPF_X: + case BPF_ALU|BPF_AND|BPF_K: + case BPF_ALU|BPF_AND|BPF_X: + case BPF_ALU|BPF_OR|BPF_K: + case BPF_ALU|BPF_OR|BPF_X: + case BPF_ALU|BPF_LSH|BPF_K: + case BPF_ALU|BPF_LSH|BPF_X: + case BPF_ALU|BPF_RSH|BPF_K: + case BPF_ALU|BPF_RSH|BPF_X: + case BPF_ALU|BPF_NEG: + case BPF_LD|BPF_W|BPF_ABS: + case BPF_LD|BPF_H|BPF_ABS: + case BPF_LD|BPF_B|BPF_ABS: + case BPF_LD|BPF_W|BPF_LEN: + case BPF_LD|BPF_W|BPF_IND: + case BPF_LD|BPF_H|BPF_IND: + case BPF_LD|BPF_B|BPF_IND: + case BPF_LD|BPF_IMM: + case BPF_LDX|BPF_W|BPF_LEN: + case BPF_LDX|BPF_B|BPF_MSH: + case BPF_LDX|BPF_IMM: + case BPF_MISC|BPF_TAX: + case BPF_MISC|BPF_TXA: + case BPF_RET|BPF_K: + case BPF_RET|BPF_A: + break; + + /* Some instructions need special checks */ - /* check that memory operations use valid addresses. */ - if (ftest->k >= BPF_MEMWORDS) { - /* but it might not be a memory operation... */ - switch (ftest->code) { - case BPF_ST: - case BPF_STX: - case BPF_LD|BPF_MEM: - case BPF_LDX|BPF_MEM: + case BPF_ALU|BPF_DIV|BPF_K: + /* check for division by zero */ + if (ftest->k == 0) return -EINVAL; - } + break; + + case BPF_LD|BPF_MEM: + case BPF_LDX|BPF_MEM: + case BPF_ST: + case BPF_STX: + /* check for invalid memory addresses */ + if (ftest->k >= BPF_MEMWORDS) + return -EINVAL; + break; + + case BPF_JMP|BPF_JA: + /* + * Note, the large ftest->k might cause loops. + * Compare this with conditional jumps below, + * where offsets are limited. --ANK (981016) + */ + if (ftest->k >= (unsigned)(flen-pc-1)) + return -EINVAL; + break; + + case BPF_JMP|BPF_JEQ|BPF_K: + case BPF_JMP|BPF_JEQ|BPF_X: + case BPF_JMP|BPF_JGE|BPF_K: + case BPF_JMP|BPF_JGE|BPF_X: + case BPF_JMP|BPF_JGT|BPF_K: + case BPF_JMP|BPF_JGT|BPF_X: + case BPF_JMP|BPF_JSET|BPF_K: + case BPF_JMP|BPF_JSET|BPF_X: + /* for conditionals both must be safe */ + if (pc + ftest->jt + 1 >= flen || + pc + ftest->jf + 1 >= flen) + return -EINVAL; + break; + + default: + return -EINVAL; } } - To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html