diff options
author | Joerg Mayer <jmayer@loplof.de> | 2018-05-01 20:30:39 +0200 |
---|---|---|
committer | Joerg Mayer <jmayer@loplof.de> | 2018-05-01 21:19:02 +0200 |
commit | 08a58700942f29139998acccb1c6ebb68beabbd9 (patch) | |
tree | fbd6bf8b9b5b5a3d1b3bea4bbc0751de9599e28b /bpf_filter.c | |
parent | 80b55a4b436713749ca68e38a9f4780e4fd921e8 (diff) |
Move bpf/net/bpf-filter.c to toplevel directory - it's the sole file in bpf/...
Fix autotools .devel failure in OOT-build while at it
Diffstat (limited to 'bpf_filter.c')
-rw-r--r-- | bpf_filter.c | 539 |
1 files changed, 539 insertions, 0 deletions
diff --git a/bpf_filter.c b/bpf_filter.c new file mode 100644 index 00000000..1484e08b --- /dev/null +++ b/bpf_filter.c @@ -0,0 +1,539 @@ +/*- + * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from the Stanford/CMU enet packet filter, + * (net/enet.c) distributed as part of 4.3BSD, and code contributed + * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence + * Berkeley Laboratory. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpf.c 7.5 (Berkeley) 7/15/91 + */ + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#include <pcap/pcap-inttypes.h> +#include "pcap-types.h" + +#ifndef _WIN32 +#include <sys/param.h> +#include <sys/types.h> +#include <sys/time.h> +#endif /* _WIN32 */ + +#include <pcap/bpf.h> + +#include <stdlib.h> + +#define int32 bpf_int32 +#define u_int32 bpf_u_int32 + +#ifndef LBL_ALIGN +/* + * XXX - IA-64? If not, this probably won't work on Win64 IA-64 + * systems, unless LBL_ALIGN is defined elsewhere for them. + * XXX - SuperH? If not, this probably won't work on WinCE SuperH + * systems, unless LBL_ALIGN is defined elsewhere for them. + */ +#if defined(sparc) || defined(__sparc__) || defined(mips) || \ + defined(ibm032) || defined(__alpha) || defined(__hpux) || \ + defined(__arm__) +#define LBL_ALIGN +#endif +#endif + +#ifndef LBL_ALIGN +#ifndef _WIN32 +#include <netinet/in.h> +#endif + +#define EXTRACT_SHORT(p) ((u_short)ntohs(*(u_short *)p)) +#define EXTRACT_LONG(p) (ntohl(*(u_int32 *)p)) +#else +#define EXTRACT_SHORT(p)\ + ((u_short)\ + ((u_short)*((u_char *)p+0)<<8|\ + (u_short)*((u_char *)p+1)<<0)) +#define EXTRACT_LONG(p)\ + ((u_int32)*((u_char *)p+0)<<24|\ + (u_int32)*((u_char *)p+1)<<16|\ + (u_int32)*((u_char *)p+2)<<8|\ + (u_int32)*((u_char *)p+3)<<0) +#endif + +#ifdef __linux__ +#include <linux/types.h> +#include <linux/if_packet.h> +#include <linux/filter.h> +#endif + +enum { + BPF_S_ANC_NONE, + BPF_S_ANC_VLAN_TAG, + BPF_S_ANC_VLAN_TAG_PRESENT, +}; + +/* + * Execute the filter program starting at pc on the packet p + * wirelen is the length of the original packet + * buflen is the amount of data present + * aux_data is auxiliary data, currently used only when interpreting + * filters intended for the Linux kernel in cases where the kernel + * rejects the filter; it contains VLAN tag information + * For the kernel, p is assumed to be a pointer to an mbuf if buflen is 0, + * in all other cases, p is a pointer to a buffer and buflen is its size. + * + * Thanks to Ani Sinha <ani@arista.com> for providing initial implementation + */ +u_int +bpf_filter_with_aux_data(const struct bpf_insn *pc, const u_char *p, + u_int wirelen, u_int buflen, const struct bpf_aux_data *aux_data) +{ + register u_int32 A, X; + register bpf_u_int32 k; + u_int32 mem[BPF_MEMWORDS]; + + if (pc == 0) + /* + * No filter means accept all. + */ + return (u_int)-1; + A = 0; + X = 0; + --pc; + for (;;) { + ++pc; + switch (pc->code) { + + default: + abort(); + case BPF_RET|BPF_K: + return (u_int)pc->k; + + case BPF_RET|BPF_A: + return (u_int)A; + + case BPF_LD|BPF_W|BPF_ABS: + k = pc->k; + if (k > buflen || sizeof(int32_t) > buflen - k) { + return 0; + } + A = EXTRACT_LONG(&p[k]); + continue; + + case BPF_LD|BPF_H|BPF_ABS: + k = pc->k; + if (k > buflen || sizeof(int16_t) > buflen - k) { + return 0; + } + A = EXTRACT_SHORT(&p[k]); + continue; + + case BPF_LD|BPF_B|BPF_ABS: + switch (pc->k) { + +#if defined(SKF_AD_VLAN_TAG) && defined(SKF_AD_VLAN_TAG_PRESENT) + case SKF_AD_OFF + SKF_AD_VLAN_TAG: + if (!aux_data) + return 0; + A = aux_data->vlan_tag; + break; + + case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: + if (!aux_data) + return 0; + A = aux_data->vlan_tag_present; + break; +#endif + default: + k = pc->k; + if (k >= buflen) { + return 0; + } + A = p[k]; + break; + } + continue; + + case BPF_LD|BPF_W|BPF_LEN: + A = wirelen; + continue; + + case BPF_LDX|BPF_W|BPF_LEN: + X = wirelen; + continue; + + case BPF_LD|BPF_W|BPF_IND: + k = X + pc->k; + if (pc->k > buflen || X > buflen - pc->k || + sizeof(int32_t) > buflen - k) { + return 0; + } + A = EXTRACT_LONG(&p[k]); + continue; + + case BPF_LD|BPF_H|BPF_IND: + k = X + pc->k; + if (X > buflen || pc->k > buflen - X || + sizeof(int16_t) > buflen - k) { + return 0; + } + A = EXTRACT_SHORT(&p[k]); + continue; + + case BPF_LD|BPF_B|BPF_IND: + k = X + pc->k; + if (pc->k >= buflen || X >= buflen - pc->k) { + return 0; + } + A = p[k]; + continue; + + case BPF_LDX|BPF_MSH|BPF_B: + k = pc->k; + if (k >= buflen) { + return 0; + } + X = (p[pc->k] & 0xf) << 2; + continue; + + case BPF_LD|BPF_IMM: + A = pc->k; + continue; + + case BPF_LDX|BPF_IMM: + X = pc->k; + continue; + + case BPF_LD|BPF_MEM: + A = mem[pc->k]; + continue; + + case BPF_LDX|BPF_MEM: + X = mem[pc->k]; + continue; + + case BPF_ST: + mem[pc->k] = A; + continue; + + case BPF_STX: + mem[pc->k] = X; + continue; + + case BPF_JMP|BPF_JA: + /* + * XXX - we currently implement "ip6 protochain" + * with backward jumps, so sign-extend pc->k. + */ + pc += (bpf_int32)pc->k; + continue; + + case BPF_JMP|BPF_JGT|BPF_K: + pc += (A > pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_K: + pc += (A >= pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_K: + pc += (A == pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_K: + pc += (A & pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGT|BPF_X: + pc += (A > X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_X: + pc += (A >= X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_X: + pc += (A == X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_X: + pc += (A & X) ? pc->jt : pc->jf; + continue; + + case BPF_ALU|BPF_ADD|BPF_X: + A += X; + continue; + + case BPF_ALU|BPF_SUB|BPF_X: + A -= X; + continue; + + case BPF_ALU|BPF_MUL|BPF_X: + A *= X; + continue; + + case BPF_ALU|BPF_DIV|BPF_X: + if (X == 0) + return 0; + A /= X; + continue; + + case BPF_ALU|BPF_MOD|BPF_X: + if (X == 0) + return 0; + A %= X; + continue; + + case BPF_ALU|BPF_AND|BPF_X: + A &= X; + continue; + + case BPF_ALU|BPF_OR|BPF_X: + A |= X; + continue; + + case BPF_ALU|BPF_XOR|BPF_X: + A ^= X; + continue; + + case BPF_ALU|BPF_LSH|BPF_X: + A <<= X; + continue; + + case BPF_ALU|BPF_RSH|BPF_X: + A >>= X; + continue; + + case BPF_ALU|BPF_ADD|BPF_K: + A += pc->k; + continue; + + case BPF_ALU|BPF_SUB|BPF_K: + A -= pc->k; + continue; + + case BPF_ALU|BPF_MUL|BPF_K: + A *= pc->k; + continue; + + case BPF_ALU|BPF_DIV|BPF_K: + A /= pc->k; + continue; + + case BPF_ALU|BPF_MOD|BPF_K: + A %= pc->k; + continue; + + case BPF_ALU|BPF_AND|BPF_K: + A &= pc->k; + continue; + + case BPF_ALU|BPF_OR|BPF_K: + A |= pc->k; + continue; + + case BPF_ALU|BPF_XOR|BPF_K: + A ^= pc->k; + continue; + + case BPF_ALU|BPF_LSH|BPF_K: + A <<= pc->k; + continue; + + case BPF_ALU|BPF_RSH|BPF_K: + A >>= pc->k; + continue; + + case BPF_ALU|BPF_NEG: + /* + * Most BPF arithmetic is unsigned, but negation + * can't be unsigned; throw some casts to + * specify what we're trying to do. + */ + A = (u_int32)(-(int32)A); + continue; + + case BPF_MISC|BPF_TAX: + X = A; + continue; + + case BPF_MISC|BPF_TXA: + A = X; + continue; + } + } +} + +u_int +bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen, + u_int buflen) +{ + return bpf_filter_with_aux_data(pc, p, wirelen, buflen, NULL); +} + + +/* + * Return true if the 'fcode' is a valid filter program. + * The constraints are that each jump be forward and to a valid + * code, that memory accesses are within valid ranges (to the + * extent that this can be checked statically; loads of packet + * data have to be, and are, also checked at run time), and that + * the code terminates with either an accept or reject. + * + * The kernel needs to be able to verify an application's filter code. + * Otherwise, a bogus program could easily crash the system. + */ +int +bpf_validate(const struct bpf_insn *f, int len) +{ + u_int i, from; + const struct bpf_insn *p; + + if (len < 1) + return 0; + + for (i = 0; i < (u_int)len; ++i) { + p = &f[i]; + switch (BPF_CLASS(p->code)) { + /* + * Check that memory operations use valid addresses. + */ + case BPF_LD: + case BPF_LDX: + switch (BPF_MODE(p->code)) { + case BPF_IMM: + break; + case BPF_ABS: + case BPF_IND: + case BPF_MSH: + /* + * There's no maximum packet data size + * in userland. The runtime packet length + * check suffices. + */ + break; + case BPF_MEM: + if (p->k >= BPF_MEMWORDS) + return 0; + break; + case BPF_LEN: + break; + default: + return 0; + } + break; + case BPF_ST: + case BPF_STX: + if (p->k >= BPF_MEMWORDS) + return 0; + break; + case BPF_ALU: + switch (BPF_OP(p->code)) { + case BPF_ADD: + case BPF_SUB: + case BPF_MUL: + case BPF_OR: + case BPF_AND: + case BPF_XOR: + case BPF_LSH: + case BPF_RSH: + case BPF_NEG: + break; + case BPF_DIV: + case BPF_MOD: + /* + * Check for constant division or modulus + * by 0. + */ + if (BPF_SRC(p->code) == BPF_K && p->k == 0) + return 0; + break; + default: + return 0; + } + break; + case BPF_JMP: + /* + * Check that jumps are within the code block, + * and that unconditional branches don't go + * backwards as a result of an overflow. + * Unconditional branches have a 32-bit offset, + * so they could overflow; we check to make + * sure they don't. Conditional branches have + * an 8-bit offset, and the from address is <= + * BPF_MAXINSNS, and we assume that BPF_MAXINSNS + * is sufficiently small that adding 255 to it + * won't overflow. + * + * We know that len is <= BPF_MAXINSNS, and we + * assume that BPF_MAXINSNS is < the maximum size + * of a u_int, so that i + 1 doesn't overflow. + * + * For userland, we don't know that the from + * or len are <= BPF_MAXINSNS, but we know that + * from <= len, and, except on a 64-bit system, + * it's unlikely that len, if it truly reflects + * the size of the program we've been handed, + * will be anywhere near the maximum size of + * a u_int. We also don't check for backward + * branches, as we currently support them in + * userland for the protochain operation. + */ + from = i + 1; + switch (BPF_OP(p->code)) { + case BPF_JA: + if (from + p->k >= (u_int)len) + return 0; + break; + case BPF_JEQ: + case BPF_JGT: + case BPF_JGE: + case BPF_JSET: + if (from + p->jt >= (u_int)len || from + p->jf >= (u_int)len) + return 0; + break; + default: + return 0; + } + break; + case BPF_RET: + break; + case BPF_MISC: + break; + default: + return 0; + } + } + return BPF_CLASS(f[len - 1].code) == BPF_RET; +} |