diff --git a/pppd/.cvsignore b/pppd/.cvsignore index 7ee9db2..48b96a9 100644 --- a/pppd/.cvsignore +++ b/pppd/.cvsignore @@ -1,3 +1,9 @@ Makefile pppd pppd.0 +pppd.cat8 +y.tab.h +y.tab.c +scanner.c +grammar.c +lex.yy.c diff --git a/pppd/bpf_filter.c b/pppd/bpf_filter.c new file mode 100644 index 0000000..763d855 --- /dev/null +++ b/pppd/bpf_filter.c @@ -0,0 +1,314 @@ +/* $Id: bpf_filter.c,v 1.1 1996/04/04 04:22:23 paulus Exp $ */ +/* From: NetBSD: bpf_filter.c,v 1.11 1995/04/22 13:26:39 cgd Exp $ */ + +/* + * Copyright (c) 1990, 1991, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from the Stanford/CMU enet packet filter, + * (net/enet.c) distributed as part of 4.3BSD, and code contributed + * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence + * Berkeley Laboratory. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include + +#if !defined(i386) && !defined(m68k) +#define BPF_ALIGN +#endif + +/* XXX we assume ints are 32 bits, shorts are 16 bits. */ + +#ifndef BPF_ALIGN +#define EXTRACT_SHORT(p) ((unsigned short)ntohs(*(unsigned short *)p)) +#define EXTRACT_LONG(p) (ntohl(*(u_int32_t *)p)) +#else +#define EXTRACT_SHORT(p)\ + ((unsigned short)\ + ((unsigned short)*((u_char *)p+0)<<8|\ + (unsigned short)*((u_char *)p+1)<<0)) +#define EXTRACT_LONG(p)\ + ((u_int32_t)*((u_char *)p+0)<<24|\ + (u_int32_t)*((u_char *)p+1)<<16|\ + (u_int32_t)*((u_char *)p+2)<<8|\ + (u_int32_t)*((u_char *)p+3)<<0) +#endif + +#include + +/* + * Execute the filter program starting at pc on the packet p + * wirelen is the length of the original packet + * buflen is the amount of data present + */ +u_int +bpf_filter(pc, p, wirelen, buflen) + register struct bpf_insn *pc; + register u_char *p; + u_int wirelen; + register u_int buflen; +{ + register u_int32_t A, X; + register int k; + int mem[BPF_MEMWORDS]; + + if (pc == 0) + /* + * No filter means accept all. + */ + return (u_int)-1; +#ifdef lint + A = 0; + X = 0; +#endif + --pc; + while (1) { + ++pc; + switch (pc->code) { + + default: + return 0; /* gak! */ + + case BPF_RET|BPF_K: + return (u_int)pc->k; + + case BPF_RET|BPF_A: + return (u_int)A; + + case BPF_LD|BPF_W|BPF_ABS: + k = pc->k; + if (k + sizeof(int) > buflen) { + return 0; + } + A = EXTRACT_LONG(&p[k]); + continue; + + case BPF_LD|BPF_H|BPF_ABS: + k = pc->k; + if (k + sizeof(short int) > buflen) { + return 0; + } + A = EXTRACT_SHORT(&p[k]); + continue; + + case BPF_LD|BPF_B|BPF_ABS: + k = pc->k; + if (k >= buflen) { + return 0; + } + A = p[k]; + continue; + + case BPF_LD|BPF_W|BPF_LEN: + A = wirelen; + continue; + + case BPF_LDX|BPF_W|BPF_LEN: + X = wirelen; + continue; + + case BPF_LD|BPF_W|BPF_IND: + k = X + pc->k; + if (k + sizeof(int) > buflen) { + return 0; + } + A = EXTRACT_LONG(&p[k]); + continue; + + case BPF_LD|BPF_H|BPF_IND: + k = X + pc->k; + if (k + sizeof(short int) > buflen) { + return 0; + } + A = EXTRACT_SHORT(&p[k]); + continue; + + case BPF_LD|BPF_B|BPF_IND: + k = X + pc->k; + if (k >= buflen) { + return 0; + } + A = p[k]; + continue; + + case BPF_LDX|BPF_MSH|BPF_B: + k = pc->k; + if (k >= buflen) { + return 0; + } + X = (p[pc->k] & 0xf) << 2; + continue; + + case BPF_LD|BPF_IMM: + A = pc->k; + continue; + + case BPF_LDX|BPF_IMM: + X = pc->k; + continue; + + case BPF_LD|BPF_MEM: + A = mem[pc->k]; + continue; + + case BPF_LDX|BPF_MEM: + X = mem[pc->k]; + continue; + + case BPF_ST: + mem[pc->k] = A; + continue; + + case BPF_STX: + mem[pc->k] = X; + continue; + + case BPF_JMP|BPF_JA: + pc += pc->k; + continue; + + case BPF_JMP|BPF_JGT|BPF_K: + pc += (A > pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_K: + pc += (A >= pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_K: + pc += (A == pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_K: + pc += (A & pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGT|BPF_X: + pc += (A > X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_X: + pc += (A >= X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_X: + pc += (A == X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_X: + pc += (A & X) ? pc->jt : pc->jf; + continue; + + case BPF_ALU|BPF_ADD|BPF_X: + A += X; + continue; + + case BPF_ALU|BPF_SUB|BPF_X: + A -= X; + continue; + + case BPF_ALU|BPF_MUL|BPF_X: + A *= X; + continue; + + case BPF_ALU|BPF_DIV|BPF_X: + if (X == 0) + return 0; + A /= X; + continue; + + case BPF_ALU|BPF_AND|BPF_X: + A &= X; + continue; + + case BPF_ALU|BPF_OR|BPF_X: + A |= X; + continue; + + case BPF_ALU|BPF_LSH|BPF_X: + A <<= X; + continue; + + case BPF_ALU|BPF_RSH|BPF_X: + A >>= X; + continue; + + case BPF_ALU|BPF_ADD|BPF_K: + A += pc->k; + continue; + + case BPF_ALU|BPF_SUB|BPF_K: + A -= pc->k; + continue; + + case BPF_ALU|BPF_MUL|BPF_K: + A *= pc->k; + continue; + + case BPF_ALU|BPF_DIV|BPF_K: + A /= pc->k; + continue; + + case BPF_ALU|BPF_AND|BPF_K: + A &= pc->k; + continue; + + case BPF_ALU|BPF_OR|BPF_K: + A |= pc->k; + continue; + + case BPF_ALU|BPF_LSH|BPF_K: + A <<= pc->k; + continue; + + case BPF_ALU|BPF_RSH|BPF_K: + A >>= pc->k; + continue; + + case BPF_ALU|BPF_NEG: + A = -A; + continue; + + case BPF_MISC|BPF_TAX: + X = A; + continue; + + case BPF_MISC|BPF_TXA: + A = X; + continue; + } + } +} diff --git a/pppd/gencode.c b/pppd/gencode.c new file mode 100644 index 0000000..252dbfc --- /dev/null +++ b/pppd/gencode.c @@ -0,0 +1,1298 @@ +/* From NetBSD: gencode.c,v 1.2 1995/03/06 11:38:21 mycroft Exp */ + +/* + * Copyright (c) 1990, 1991, 1992, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that: (1) source code distributions + * retain the above copyright notice and this paragraph in its entirety, (2) + * distributions including binary code include the above copyright notice and + * this paragraph in its entirety in the documentation or other materials + * provided with the distribution, and (3) all advertising materials mentioning + * features or use of this software display the following acknowledgement: + * ``This product includes software developed by the University of California, + * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of + * the University nor the names of its contributors may be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ +#ifndef lint +static char rcsid[] = + "@(#) Header: gencode.c,v 1.55 94/06/20 19:07:53 leres Exp (LBL)"; +#endif + +#include +#include +#include +#include +#include + +#include +#include + +#include "bpf_compile.h" +#include "gencode.h" + +#include +#ifdef __STDC__ +#include +#include +#else +#include +#endif + +#ifndef __GNUC__ +#define inline +#endif + +#define JMP(c) ((c)|BPF_JMP|BPF_K) + +static jmp_buf top_ctx; +static char errbuf[PCAP_ERRBUF_SIZE]; + +/* VARARGS */ +volatile void +#ifdef __STDC__ +bpf_error(char *fmt, ...) +#else +bpf_error(fmt, va_alist) + char *fmt; + va_dcl +#endif +{ + va_list ap; + +#ifdef __STDC__ + va_start(ap, fmt); +#else + va_start(ap); +#endif + vsprintf(errbuf, fmt, ap); + va_end(ap); + longjmp(top_ctx, 1); + /* NOTREACHED */ +} + +char * +bpf_geterr() +{ + return errbuf; +} + +static void init_linktype(); + +static int alloc_reg(void); +static void free_reg(int); + +static struct block *root; + +/* + * We divy out chunks of memory rather than call malloc each time so + * we don't have to worry about leaking memory. It's probably + * not a big deal if all this memory was wasted but it this ever + * goes into a library that would probably not be a good idea. + */ +#define NCHUNKS 16 +#define CHUNK0SIZE 1024 +struct chunk { + u_int n_left; + void *m; +}; + +static struct chunk chunks[NCHUNKS]; +static int cur_chunk = -1; + +static void *newchunk(u_int); +static void freechunks(void); +static inline struct block *new_block(int); +static inline struct slist *new_stmt(int); +static struct block *gen_retblk(int); +static inline void syntax(void); + +static void backpatch(struct block *, struct block *); +static void merge(struct block *, struct block *); +static struct block *gen_cmp(u_int, u_int, long); +static struct block *gen_mcmp(u_int, u_int, long, u_long); +#if 0 +static struct block *gen_bcmp(u_int, u_int, u_char *); +#endif +static struct block *gen_uncond(int); +static inline struct block *gen_true(void); +static inline struct block *gen_false(void); +static struct block *gen_linktype(int); +static struct block *gen_hostop(u_long, u_long, int, int, u_int, u_int); +static struct block *gen_host(u_long, u_long, int, int); +static struct block *gen_ipfrag(void); +static struct block *gen_portatom(int, long); +struct block *gen_portop(int, int, int); +static struct block *gen_port(int, int, int); +static int lookup_proto(char *, int); +static struct block *gen_proto(int, int, int); +static u_long net_mask(u_long *); +static u_long net_mask(u_long *); +static struct slist *xfer_to_x(struct arth *); +static struct slist *xfer_to_a(struct arth *); +static struct block *gen_len(int, int); + +static void * +newchunk(n) + u_int n; +{ + struct chunk *cp; + int size; + + /* XXX Round up to nearest long. */ + n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1); + + cp = &chunks[cur_chunk]; + if (cur_chunk < 0 || n > cp->n_left) { + if (++cur_chunk >= NCHUNKS) + bpf_error("out of memory"); + cp = &chunks[cur_chunk]; + size = CHUNK0SIZE << cur_chunk; + cp->m = (void *)malloc(size); + if (cp->m == 0 || n > size) + bpf_error("out of memory"); + memset((char *)cp->m, 0, size); + cp->n_left = size; + } + cp->n_left -= n; + return (void *)((char *)cp->m + cp->n_left); +} + +static void +freechunks() +{ + int i; + + for (i = 0; i < NCHUNKS; ++i) + if (chunks[i].m) + free(chunks[i].m); + cur_chunk = -1; +} + +/* + * A strdup whose allocations are freed after code generation is over. + */ +char * +sdup(s) + char *s; +{ + int n = strlen(s) + 1; + char *cp = newchunk(n); + strcpy(cp, s); + return (cp); +} + +static inline struct block * +new_block(code) + int code; +{ + struct block *p; + + p = (struct block *)newchunk(sizeof(*p)); + p->s.code = code; + p->head = p; + + return p; +} + +static inline struct slist * +new_stmt(code) + int code; +{ + struct slist *p; + + p = (struct slist *)newchunk(sizeof(*p)); + p->s.code = code; + + return p; +} + +static struct block * +gen_retblk(v) + int v; +{ + struct block *b = new_block(BPF_RET|BPF_K); + + b->s.k = v; + return b; +} + +static inline void +syntax() +{ + bpf_error("syntax error in filter expression"); +} + +static int snaplen; + +int +bpf_compile(program, buf, optimize) + struct bpf_program *program; + char *buf; + int optimize; +{ + extern int n_errors; + int len; + + if (setjmp(top_ctx)) + return (-1); + + snaplen = PPP_HDRLEN; + + lex_init(buf ? buf : ""); + init_linktype(); + pcap_parse(); + + if (n_errors) + syntax(); + + if (root == NULL) + root = gen_retblk(snaplen); + + if (optimize) { + bpf_optimize(&root); + if (root == NULL || + (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0)) + bpf_error("expression rejects all packets"); + } + program->bf_insns = icode_to_fcode(root, &len); + program->bf_len = len; + + freechunks(); + return (0); +} + +/* + * Backpatch the blocks in 'list' to 'target'. The 'sense' field indicates + * which of the jt and jf fields has been resolved and which is a pointer + * back to another unresolved block (or nil). At least one of the fields + * in each block is already resolved. + */ +static void +backpatch(list, target) + struct block *list, *target; +{ + struct block *next; + + while (list) { + if (!list->sense) { + next = JT(list); + JT(list) = target; + } else { + next = JF(list); + JF(list) = target; + } + list = next; + } +} + +/* + * Merge the lists in b0 and b1, using the 'sense' field to indicate + * which of jt and jf is the link. + */ +static void +merge(b0, b1) + struct block *b0, *b1; +{ + register struct block **p = &b0; + + /* Find end of list. */ + while (*p) + p = !((*p)->sense) ? &JT(*p) : &JF(*p); + + /* Concatenate the lists. */ + *p = b1; +} + +void +finish_parse(p) + struct block *p; +{ + backpatch(p, gen_retblk(snaplen)); + p->sense = !p->sense; + backpatch(p, gen_retblk(0)); + root = p->head; +} + +void +gen_and(b0, b1) + struct block *b0, *b1; +{ + backpatch(b0, b1->head); + b0->sense = !b0->sense; + b1->sense = !b1->sense; + merge(b1, b0); + b1->sense = !b1->sense; + b1->head = b0->head; +} + +void +gen_or(b0, b1) + struct block *b0, *b1; +{ + b0->sense = !b0->sense; + backpatch(b0, b1->head); + b0->sense = !b0->sense; + merge(b1, b0); + b1->head = b0->head; +} + +void +gen_not(b) + struct block *b; +{ + b->sense = !b->sense; +} + +static struct block * +gen_cmp(offset, size, v) + unsigned int offset, size; + long v; +{ + struct slist *s; + struct block *b; + + s = new_stmt(BPF_LD|BPF_ABS|size); + s->s.k = offset; + + b = new_block(JMP(BPF_JEQ)); + b->stmts = s; + b->s.k = v; + + return b; +} + +static struct block * +gen_mcmp(offset, size, v, mask) + unsigned int offset, size; + long v; + unsigned long mask; +{ + struct block *b = gen_cmp(offset, size, v); + struct slist *s; + + if (mask != 0xffffffff) { + s = new_stmt(BPF_ALU|BPF_AND|BPF_K); + s->s.k = mask; + b->stmts->next = s; + } + return b; +} + +#if 0 +static struct block * +gen_bcmp(offset, size, v) + unsigned int offset, size; + unsigned char *v; +{ + struct block *b, *tmp; + + b = NULL; + while (size >= 4) { + unsigned char *p = &v[size - 4]; + long w = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; + tmp = gen_cmp(offset + size - 4, BPF_W, w); + if (b != NULL) + gen_and(b, tmp); + b = tmp; + size -= 4; + } + while (size >= 2) { + unsigned char *p = &v[size - 2]; + long w = (p[0] << 8) | p[1]; + tmp = gen_cmp(offset + size - 2, BPF_H, w); + if (b != NULL) + gen_and(b, tmp); + b = tmp; + size -= 2; + } + if (size > 0) { + tmp = gen_cmp(offset, BPF_B, (long)v[0]); + if (b != NULL) + gen_and(b, tmp); + b = tmp; + } + return b; +} +#endif + +/* + * Various code constructs need to know the layout of the data link + * layer. These variables give the necessary offsets. off_linktype + * is set to -1 for no encapsulation, in which case, IP is assumed. + */ +static unsigned int off_linktype; +static unsigned int off_nl; + +static void +init_linktype() +{ + off_linktype = 2; + off_nl = 4; +} + +static struct block * +gen_uncond(rsense) + int rsense; +{ + struct block *b; + struct slist *s; + + s = new_stmt(BPF_LD|BPF_IMM); + s->s.k = !rsense; + b = new_block(JMP(BPF_JEQ)); + b->stmts = s; + + return b; +} + +static inline struct block * +gen_true() +{ + return gen_uncond(1); +} + +static inline struct block * +gen_false() +{ + return gen_uncond(0); +} + +static struct block * +gen_linktype(proto) + int proto; +{ + return gen_cmp(off_linktype, BPF_H, (long)proto); +} + +static struct block * +gen_hostop(addr, mask, dir, proto, src_off, dst_off) + unsigned long addr; + unsigned long mask; + int dir, proto; + unsigned int src_off, dst_off; +{ + struct block *b0, *b1; + unsigned int offset; + + switch (dir) { + + case Q_SRC: + offset = src_off; + break; + + case Q_DST: + offset = dst_off; + break; + + case Q_AND: + b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); + b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); + gen_and(b0, b1); + return b1; + + case Q_OR: + case Q_DEFAULT: + b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); + b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); + gen_or(b0, b1); + return b1; + + default: + abort(); + } + b0 = gen_linktype(proto); + b1 = gen_mcmp(offset, BPF_W, (long)addr, mask); + gen_and(b0, b1); + return b1; +} + +static struct block * +gen_host(addr, mask, proto, dir) + unsigned long addr; + unsigned long mask; + int proto; + int dir; +{ + struct block *b0; + + switch (proto) { + + case Q_DEFAULT: + b0 = gen_host(addr, mask, Q_IP, dir); + return b0; + + case Q_IP: + return gen_hostop(addr, mask, dir, PPP_IP, + off_nl + 12, off_nl + 16); + + case Q_TCP: + bpf_error("'tcp' modifier applied to host"); + + case Q_UDP: + bpf_error("'udp' modifier applied to host"); + + case Q_ICMP: + bpf_error("'icmp' modifier applied to host"); + + default: + abort(); + } + /* NOTREACHED */ +} + +struct block * +gen_proto_abbrev(proto) + int proto; +{ + struct block *b0, *b1; + + switch (proto) { + + case Q_TCP: + b0 = gen_linktype(PPP_IP); + b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_TCP); + gen_and(b0, b1); + break; + + case Q_UDP: + b0 = gen_linktype(PPP_IP); + b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_UDP); + gen_and(b0, b1); + break; + + case Q_ICMP: + b0 = gen_linktype(PPP_IP); + b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_ICMP); + gen_and(b0, b1); + break; + + case Q_IP: + b1 = gen_linktype(PPP_IP); + break; + + case Q_LINK: + bpf_error("link layer applied in wrong context"); + + default: + abort(); + } + return b1; +} + +static struct block * +gen_ipfrag() +{ + struct slist *s; + struct block *b; + + /* not ip frag */ + s = new_stmt(BPF_LD|BPF_H|BPF_ABS); + s->s.k = off_nl + 6; + b = new_block(JMP(BPF_JSET)); + b->s.k = 0x1fff; + b->stmts = s; + gen_not(b); + + return b; +} + +static struct block * +gen_portatom(off, v) + int off; + long v; +{ + struct slist *s; + struct block *b; + + s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); + s->s.k = off_nl; + + s->next = new_stmt(BPF_LD|BPF_IND|BPF_H); + s->next->s.k = off_nl + off; + + b = new_block(JMP(BPF_JEQ)); + b->stmts = s; + b->s.k = v; + + return b; +} + +struct block * +gen_portop(port, proto, dir) + int port, proto, dir; +{ + struct block *b0, *b1, *tmp; + + /* ip proto 'proto' */ + tmp = gen_cmp(off_nl + 9, BPF_B, (long)proto); + b0 = gen_ipfrag(); + gen_and(tmp, b0); + + switch (dir) { + case Q_SRC: + b1 = gen_portatom(0, (long)port); + break; + + case Q_DST: + b1 = gen_portatom(2, (long)port); + break; + + case Q_OR: + case Q_DEFAULT: + tmp = gen_portatom(0, (long)port); + b1 = gen_portatom(2, (long)port); + gen_or(tmp, b1); + break; + + case Q_AND: + tmp = gen_portatom(0, (long)port); + b1 = gen_portatom(2, (long)port); + gen_and(tmp, b1); + break; + + default: + abort(); + } + gen_and(b0, b1); + + return b1; +} + +static struct block * +gen_port(port, ip_proto, dir) + int port; + int ip_proto; + int dir; +{ + struct block *b0, *b1, *tmp; + + /* PPP proto ip */ + b0 = gen_linktype(PPP_IP); + + switch (ip_proto) { + case IPPROTO_UDP: + case IPPROTO_TCP: + b1 = gen_portop(port, ip_proto, dir); + break; + + case PROTO_UNDEF: + tmp = gen_portop(port, IPPROTO_TCP, dir); + b1 = gen_portop(port, IPPROTO_UDP, dir); + gen_or(tmp, b1); + break; + + default: + abort(); + } + gen_and(b0, b1); + return b1; +} + +static int +lookup_proto(name, proto) + char *name; + int proto; +{ + int v; + + switch (proto) { + case Q_DEFAULT: + case Q_IP: + v = pcap_nametoproto(name); + if (v == PROTO_UNDEF) + bpf_error("unknown ip proto '%s'", name); + break; + + case Q_LINK: + /* XXX should look up h/w protocol type based on linktype */ + v = pcap_nametopppproto(name); + if (v == PROTO_UNDEF) + bpf_error("unknown PPP proto '%s'", name); + break; + + default: + v = PROTO_UNDEF; + break; + } + return v; +} + +static struct block * +gen_proto(v, proto, dir) + int v; + int proto; + int dir; +{ + struct block *b0, *b1; + + if (dir != Q_DEFAULT) + bpf_error("direction applied to 'proto'"); + + switch (proto) { + case Q_DEFAULT: + case Q_IP: + b0 = gen_linktype(PPP_IP); + b1 = gen_cmp(off_nl + 9, BPF_B, (long)v); + gen_and(b0, b1); + return b1; + + case Q_LINK: + return gen_linktype(v); + + case Q_UDP: + bpf_error("'udp proto' is bogus"); + /* NOTREACHED */ + + case Q_TCP: + bpf_error("'tcp proto' is bogus"); + /* NOTREACHED */ + + case Q_ICMP: + bpf_error("'icmp proto' is bogus"); + /* NOTREACHED */ + + default: + abort(); + /* NOTREACHED */ + } + /* NOTREACHED */ +} + +/* + * Left justify 'addr' and return its resulting network mask. + */ +static unsigned long +net_mask(addr) + unsigned long *addr; +{ + register unsigned long m = 0xffffffff; + + if (*addr) + while ((*addr & 0xff000000) == 0) + *addr <<= 8, m <<= 8; + + return m; +} + +struct block * +gen_scode(name, q) + char *name; + struct qual q; +{ + int proto = q.proto; + int dir = q.dir; + unsigned long mask, addr, **alist; + struct block *b, *tmp; + int port, real_proto; + + switch (q.addr) { + + case Q_NET: + addr = pcap_nametonetaddr(name); + if (addr == 0) + bpf_error("unknown network '%s'", name); + mask = net_mask(&addr); + return gen_host(addr, mask, proto, dir); + + case Q_DEFAULT: + case Q_HOST: + if (proto == Q_LINK) { + bpf_error("link-level host name not supported"); + break; + } else { + alist = pcap_nametoaddr(name); + if (alist == NULL || *alist == NULL) + bpf_error("unknown host '%s'", name); + b = gen_host(**alist++, 0xffffffffL, proto, dir); + while (*alist) { + tmp = gen_host(**alist++, 0xffffffffL, + proto, dir); + gen_or(b, tmp); + b = tmp; + } + return b; + } + + case Q_PORT: + if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP) + bpf_error("illegal qualifier of 'port'"); + if (pcap_nametoport(name, &port, &real_proto) == 0) + bpf_error("unknown port '%s'", name); + if (proto == Q_UDP) { + if (real_proto == IPPROTO_TCP) + bpf_error("port '%s' is tcp", name); + else + /* override PROTO_UNDEF */ + real_proto = IPPROTO_UDP; + } + if (proto == Q_TCP) { + if (real_proto == IPPROTO_UDP) + bpf_error("port '%s' is udp", name); + else + /* override PROTO_UNDEF */ + real_proto = IPPROTO_TCP; + } + return gen_port(port, real_proto, dir); + + case Q_PROTO: + real_proto = lookup_proto(name, proto); + if (real_proto >= 0) + return gen_proto(real_proto, proto, dir); + else + bpf_error("unknown protocol: %s", name); + + case Q_UNDEF: + syntax(); + /* NOTREACHED */ + } + abort(); + /* NOTREACHED */ +} + +struct block * +gen_ncode(v, q) + unsigned long v; + struct qual q; +{ + unsigned long mask; + int proto = q.proto; + int dir = q.dir; + + switch (q.addr) { + + case Q_DEFAULT: + case Q_HOST: + case Q_NET: + if (proto == Q_LINK) { + bpf_error("illegal link layer address"); + } else { + mask = net_mask(&v); + return gen_host(v, mask, proto, dir); + } + + case Q_PORT: + if (proto == Q_UDP) + proto = IPPROTO_UDP; + else if (proto == Q_TCP) + proto = IPPROTO_TCP; + else if (proto == Q_DEFAULT) + proto = PROTO_UNDEF; + else + bpf_error("illegal qualifier of 'port'"); + + return gen_port((int)v, proto, dir); + + case Q_PROTO: + return gen_proto((int)v, proto, dir); + + case Q_UNDEF: + syntax(); + /* NOTREACHED */ + + default: + abort(); + /* NOTREACHED */ + } + /* NOTREACHED */ +} + +void +sappend(s0, s1) + struct slist *s0, *s1; +{ + /* + * This is definitely not the best way to do this, but the + * lists will rarely get long. + */ + while (s0->next) + s0 = s0->next; + s0->next = s1; +} + +static struct slist * +xfer_to_x(a) + struct arth *a; +{ + struct slist *s; + + s = new_stmt(BPF_LDX|BPF_MEM); + s->s.k = a->regno; + return s; +} + +static struct slist * +xfer_to_a(a) + struct arth *a; +{ + struct slist *s; + + s = new_stmt(BPF_LD|BPF_MEM); + s->s.k = a->regno; + return s; +} + +struct arth * +gen_load(proto, index, size) + int proto; + struct arth *index; + int size; +{ + struct slist *s, *tmp; + struct block *b; + int regno = alloc_reg(); + + free_reg(index->regno); + switch (size) { + + default: + bpf_error("data size must be 1, 2, or 4"); + + case 1: + size = BPF_B; + break; + + case 2: + size = BPF_H; + break; + + case 4: + size = BPF_W; + break; + } + switch (proto) { + default: + bpf_error("unsupported index operation"); + + case Q_LINK: + s = xfer_to_x(index); + tmp = new_stmt(BPF_LD|BPF_IND|size); + sappend(s, tmp); + sappend(index->s, s); + break; + + case Q_IP: + /* XXX Note that we assume a fixed link link header here. */ + s = xfer_to_x(index); + tmp = new_stmt(BPF_LD|BPF_IND|size); + tmp->s.k = off_nl; + sappend(s, tmp); + sappend(index->s, s); + + b = gen_proto_abbrev(proto); + if (index->b) + gen_and(index->b, b); + index->b = b; + break; + + case Q_TCP: + case Q_UDP: + case Q_ICMP: + s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); + s->s.k = off_nl; + sappend(s, xfer_to_a(index)); + sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X)); + sappend(s, new_stmt(BPF_MISC|BPF_TAX)); + sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size)); + tmp->s.k = off_nl; + sappend(index->s, s); + + gen_and(gen_proto_abbrev(proto), b = gen_ipfrag()); + if (index->b) + gen_and(index->b, b); + index->b = b; + break; + } + index->regno = regno; + s = new_stmt(BPF_ST); + s->s.k = regno; + sappend(index->s, s); + + return index; +} + +struct block * +gen_relation(code, a0, a1, reversed) + int code; + struct arth *a0, *a1; + int reversed; +{ + struct slist *s0, *s1, *s2; + struct block *b, *tmp; + + s0 = xfer_to_x(a1); + s1 = xfer_to_a(a0); + s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X); + b = new_block(JMP(code)); + if (reversed) + gen_not(b); + + sappend(s1, s2); + sappend(s0, s1); + sappend(a1->s, s0); + sappend(a0->s, a1->s); + + b->stmts = a0->s; + + free_reg(a0->regno); + free_reg(a1->regno); + + /* 'and' together protocol checks */ + if (a0->b) { + if (a1->b) { + gen_and(a0->b, tmp = a1->b); + } + else + tmp = a0->b; + } else + tmp = a1->b; + + if (tmp) + gen_and(tmp, b); + + return b; +} + +struct arth * +gen_loadlen() +{ + int regno = alloc_reg(); + struct arth *a = (struct arth *)newchunk(sizeof(*a)); + struct slist *s; + + s = new_stmt(BPF_LD|BPF_LEN); + s->next = new_stmt(BPF_ST); + s->next->s.k = regno; + a->s = s; + a->regno = regno; + + return a; +} + +struct arth * +gen_loadi(val) + int val; +{ + struct arth *a; + struct slist *s; + int reg; + + a = (struct arth *)newchunk(sizeof(*a)); + + reg = alloc_reg(); + + s = new_stmt(BPF_LD|BPF_IMM); + s->s.k = val; + s->next = new_stmt(BPF_ST); + s->next->s.k = reg; + a->s = s; + a->regno = reg; + + return a; +} + +struct arth * +gen_neg(a) + struct arth *a; +{ + struct slist *s; + + s = xfer_to_a(a); + sappend(a->s, s); + s = new_stmt(BPF_ALU|BPF_NEG); + s->s.k = 0; + sappend(a->s, s); + s = new_stmt(BPF_ST); + s->s.k = a->regno; + sappend(a->s, s); + + return a; +} + +struct arth * +gen_arth(code, a0, a1) + int code; + struct arth *a0, *a1; +{ + struct slist *s0, *s1, *s2; + + s0 = xfer_to_x(a1); + s1 = xfer_to_a(a0); + s2 = new_stmt(BPF_ALU|BPF_X|code); + + sappend(s1, s2); + sappend(s0, s1); + sappend(a1->s, s0); + sappend(a0->s, a1->s); + + free_reg(a1->regno); + + s0 = new_stmt(BPF_ST); + a0->regno = s0->s.k = alloc_reg(); + sappend(a0->s, s0); + + return a0; +} + +/* + * Here we handle simple allocation of the scratch registers. + * If too many registers are alloc'd, the allocator punts. + */ +static int regused[BPF_MEMWORDS]; +static int curreg; + +/* + * Return the next free register. + */ +static int +alloc_reg() +{ + int n = BPF_MEMWORDS; + + while (--n >= 0) { + if (regused[curreg]) + curreg = (curreg + 1) % BPF_MEMWORDS; + else { + regused[curreg] = 1; + return curreg; + } + } + bpf_error("too many registers needed to evaluate expression"); + /* NOTREACHED */ +} + +/* + * Return a register to the table so it can + * be used later. + */ +static void +free_reg(n) + int n; +{ + regused[n] = 0; +} + +static struct block * +gen_len(jmp, n) + int jmp, n; +{ + struct slist *s; + struct block *b; + + s = new_stmt(BPF_LD|BPF_LEN); + s->next = new_stmt(BPF_ALU|BPF_SUB|BPF_K); + s->next->s.k = n; + b = new_block(JMP(jmp)); + b->stmts = s; + + return b; +} + +struct block * +gen_greater(n) + int n; +{ + return gen_len(BPF_JGE, n); +} + +struct block * +gen_less(n) + int n; +{ + struct block *b; + + b = gen_len(BPF_JGT, n); + gen_not(b); + + return b; +} + +struct block * +gen_byteop(op, idx, val) + int op, idx, val; +{ + struct block *b; + struct slist *s; + + switch (op) { + default: + abort(); + + case '=': + return gen_cmp((unsigned int)idx, BPF_B, (long)val); + + case '<': + b = gen_cmp((unsigned int)idx, BPF_B, (long)val); + b->s.code = JMP(BPF_JGE); + gen_not(b); + return b; + + case '>': + b = gen_cmp((unsigned int)idx, BPF_B, (long)val); + b->s.code = JMP(BPF_JGT); + return b; + + case '|': + s = new_stmt(BPF_ALU|BPF_OR|BPF_K); + break; + + case '&': + s = new_stmt(BPF_ALU|BPF_AND|BPF_K); + break; + } + s->s.k = val; + b = new_block(JMP(BPF_JEQ)); + b->stmts = s; + gen_not(b); + + return b; +} + +struct block * +gen_broadcast(proto) + int proto; +{ + bpf_error("broadcast not supported"); +} + +struct block * +gen_multicast(proto) + int proto; +{ + register struct block *b0, *b1; + + switch (proto) { + case Q_DEFAULT: + case Q_IP: + b0 = gen_linktype(PPP_IP); + b1 = gen_cmp(off_nl + 16, BPF_B, (long)224); + b1->s.code = JMP(BPF_JGE); + gen_and(b0, b1); + return b1; + } + bpf_error("only IP multicast filters supported"); +} + +/* + * generate command for inbound/outbound. It's here so we can + * make it link-type specific. 'dir' = 0 implies "inbound", + * = 1 implies "outbound". + */ +struct block * +gen_inbound(dir) + int dir; +{ + register struct block *b0; + + b0 = gen_relation(BPF_JEQ, + gen_load(Q_LINK, gen_loadi(0), 1), + gen_loadi(0), + dir); + return (b0); +} diff --git a/pppd/gencode.h b/pppd/gencode.h new file mode 100644 index 0000000..72d4cc3 --- /dev/null +++ b/pppd/gencode.h @@ -0,0 +1,165 @@ +/* From NetBSD: gencode.h,v 1.2 1995/03/06 11:38:24 mycroft Exp */ + +/* + * Copyright (c) 1990, 1991, 1992, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that: (1) source code distributions + * retain the above copyright notice and this paragraph in its entirety, (2) + * distributions including binary code include the above copyright notice and + * this paragraph in its entirety in the documentation or other materials + * provided with the distribution, and (3) all advertising materials mentioning + * features or use of this software display the following acknowledgement: + * ``This product includes software developed by the University of California, + * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of + * the University nor the names of its contributors may be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * @(#) Header: gencode.h,v 1.20 94/06/12 14:29:30 leres Exp (LBL) + */ + +/* $Id: gencode.h,v 1.1 1996/04/04 04:22:26 paulus Exp $ */ + +/* Address qualifers. */ + +#define Q_HOST 1 +#define Q_NET 2 +#define Q_PORT 3 +#define Q_PROTO 4 + +/* Protocol qualifiers. */ + +#define Q_LINK 1 +#define Q_IP 2 +#define Q_TCP 3 +#define Q_UDP 4 +#define Q_ICMP 5 + +/* Directional qualifers. */ + +#define Q_SRC 1 +#define Q_DST 2 +#define Q_OR 3 +#define Q_AND 4 + +#define Q_DEFAULT 0 +#define Q_UNDEF 255 + +struct stmt { + int code; + long k; +}; + +struct slist { + struct stmt s; + struct slist *next; +}; + +/* + * A bit vector to represent definition sets. We assume TOT_REGISTERS + * is smaller than 8*sizeof(atomset). + */ +typedef unsigned long atomset; +#define ATOMMASK(n) (1 << (n)) +#define ATOMELEM(d, n) (d & ATOMMASK(n)) + +/* + * An unbounded set. + */ +typedef unsigned long *uset; + +/* + * Total number of atomic entities, including accumulator (A) and index (X). + * We treat all these guys similarly during flow analysis. + */ +#define N_ATOMS (BPF_MEMWORDS+2) + +struct edge { + int id; + int code; + uset edom; + struct block *succ; + struct block *pred; + struct edge *next; /* link list of incoming edges for a node */ +}; + +struct block { + int id; + struct slist *stmts; /* side effect stmts */ + struct stmt s; /* branch stmt */ + int mark; + int level; + int offset; + int sense; + struct edge et; + struct edge ef; + struct block *head; + struct block *link; /* link field used by optimizer */ + uset dom; + uset closure; + struct edge *in_edges; + atomset def, kill; + atomset in_use; + atomset out_use; + long oval; + long val[N_ATOMS]; +}; + +struct arth { + struct block *b; /* protocol checks */ + struct slist *s; /* stmt list */ + int regno; /* virtual register number of result */ +}; + +struct qual { + unsigned char addr; + unsigned char proto; + unsigned char dir; + unsigned char pad; +}; + +#ifndef __GNUC__ +#define volatile +#endif + +struct arth *gen_loadi __P((int)); +struct arth *gen_load __P((int, struct arth *, int)); +struct arth *gen_loadlen __P((void)); +struct arth *gen_neg __P((struct arth *)); +struct arth *gen_arth __P((int, struct arth *, struct arth *)); + +void gen_and __P((struct block *, struct block *)); +void gen_or __P((struct block *, struct block *)); +void gen_not __P((struct block *)); + +struct block *gen_scode __P((char *, struct qual)); +struct block *gen_ecode __P((unsigned char *, struct qual)); +struct block *gen_ncode __P((unsigned long, struct qual)); +struct block *gen_proto_abbrev __P((int)); +struct block *gen_relation __P((int, struct arth *, struct arth *, int)); +struct block *gen_less __P((int)); +struct block *gen_greater __P((int)); +struct block *gen_byteop __P((int, int, int)); +struct block *gen_broadcast __P((int)); +struct block *gen_multicast __P((int)); +struct block *gen_inbound __P((int)); + +void bpf_optimize __P((struct block **)); +volatile void bpf_error __P((char *, ...)); + +void finish_parse __P((struct block *)); +char *sdup __P((char *)); + +struct bpf_insn *icode_to_fcode __P((struct block *, int *)); +int pcap_parse __P((void)); +void lex_init __P((char *)); +void sappend __P((struct slist *, struct slist *)); + +/* XXX */ +#define JT(b) ((b)->et.succ) +#define JF(b) ((b)->ef.succ) diff --git a/pppd/grammar.y b/pppd/grammar.y new file mode 100644 index 0000000..4b4dd46 --- /dev/null +++ b/pppd/grammar.y @@ -0,0 +1,233 @@ +%{ +/* From NetBSD: grammar.y,v 1.2 1995/03/06 11:38:27 mycroft Exp */ + +/* + * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that: (1) source code distributions + * retain the above copyright notice and this paragraph in its entirety, (2) + * distributions including binary code include the above copyright notice and + * this paragraph in its entirety in the documentation or other materials + * provided with the distribution, and (3) all advertising materials mentioning + * features or use of this software display the following acknowledgement: + * ``This product includes software developed by the University of California, + * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of + * the University nor the names of its contributors may be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + */ +#ifndef lint +static char rcsid[] = + "@(#) Header: grammar.y,v 1.39 94/06/14 20:09:25 leres Exp (LBL)"; +#endif + +#include +#include +#include +#include + +#include +#include "bpf_compile.h" +#include "gencode.h" + +#define QSET(q, p, d, a) (q).proto = (p),\ + (q).dir = (d),\ + (q).addr = (a) + +int n_errors = 0; + +static struct qual qerr = { Q_UNDEF, Q_UNDEF, Q_UNDEF, Q_UNDEF }; + +static void +yyerror(char *msg) +{ + ++n_errors; + bpf_error(msg); + /* NOTREACHED */ +} + +#ifndef YYBISON +int +pcap_parse() +{ + extern int yyparse(); + return (yyparse()); +} +#endif + +%} + +%union { + int i; + unsigned long h; + char *s; + struct stmt *stmt; + struct arth *a; + struct { + struct qual q; + struct block *b; + } blk; + struct block *rblk; +} + +%type expr id nid pid term rterm qid +%type head +%type pqual dqual aqual +%type arth narth +%type byteop pname pnum relop irelop +%type and or paren not null prog +%type other + +%token DST SRC HOST +%token NET PORT LESS GREATER PROTO BYTE +%token IP TCP UDP ICMP +%token TK_BROADCAST TK_MULTICAST +%token NUM INBOUND OUTBOUND +%token LINK +%token GEQ LEQ NEQ +%token ID HID +%token LSH RSH +%token LEN + +%type ID +%type HID +%type NUM + +%left OR AND +%nonassoc '!' +%left '|' +%left '&' +%left LSH RSH +%left '+' '-' +%left '*' '/' +%nonassoc UMINUS +%% +prog: null expr +{ + finish_parse($2.b); +} + | null + ; +null: /* null */ { $$.q = qerr; } + ; +expr: term + | expr and term { gen_and($1.b, $3.b); $$ = $3; } + | expr and id { gen_and($1.b, $3.b); $$ = $3; } + | expr or term { gen_or($1.b, $3.b); $$ = $3; } + | expr or id { gen_or($1.b, $3.b); $$ = $3; } + ; +and: AND { $$ = $0; } + ; +or: OR { $$ = $0; } + ; +id: nid + | pnum { $$.b = gen_ncode((unsigned long)$1, + $$.q = $0.q); } + | paren pid ')' { $$ = $2; } + ; +nid: ID { $$.b = gen_scode($1, $$.q = $0.q); } + | HID { $$.b = gen_ncode(__pcap_atoin((char *)$1), + $$.q); } + | not id { gen_not($2.b); $$ = $2; } + ; +not: '!' { $$ = $0; } + ; +paren: '(' { $$ = $0; } + ; +pid: nid + | qid and id { gen_and($1.b, $3.b); $$ = $3; } + | qid or id { gen_or($1.b, $3.b); $$ = $3; } + ; +qid: pnum { $$.b = gen_ncode((unsigned long)$1, + $$.q = $0.q); } + | pid + ; +term: rterm + | not term { gen_not($2.b); $$ = $2; } + ; +head: pqual dqual aqual { QSET($$.q, $1, $2, $3); } + | pqual dqual { QSET($$.q, $1, $2, Q_DEFAULT); } + | pqual aqual { QSET($$.q, $1, Q_DEFAULT, $2); } + | pqual PROTO { QSET($$.q, $1, Q_DEFAULT, Q_PROTO); } + ; +rterm: head id { $$ = $2; } + | paren expr ')' { $$.b = $2.b; $$.q = $1.q; } + | pname { $$.b = gen_proto_abbrev($1); $$.q = qerr; } + | arth relop arth { $$.b = gen_relation($2, $1, $3, 0); + $$.q = qerr; } + | arth irelop arth { $$.b = gen_relation($2, $1, $3, 1); + $$.q = qerr; } + | other { $$.b = $1; $$.q = qerr; } + ; +/* protocol level qualifiers */ +pqual: pname + | { $$ = Q_DEFAULT; } + ; +/* 'direction' qualifiers */ +dqual: SRC { $$ = Q_SRC; } + | DST { $$ = Q_DST; } + | SRC OR DST { $$ = Q_OR; } + | DST OR SRC { $$ = Q_OR; } + | SRC AND DST { $$ = Q_AND; } + | DST AND SRC { $$ = Q_AND; } + ; +/* address type qualifiers */ +aqual: HOST { $$ = Q_HOST; } + | NET { $$ = Q_NET; } + | PORT { $$ = Q_PORT; } + ; +pname: LINK { $$ = Q_LINK; } + | IP { $$ = Q_IP; } + | TCP { $$ = Q_TCP; } + | UDP { $$ = Q_UDP; } + | ICMP { $$ = Q_ICMP; } + ; +other: pqual TK_BROADCAST { $$ = gen_broadcast($1); } + | pqual TK_MULTICAST { $$ = gen_multicast($1); } + | LESS NUM { $$ = gen_less($2); } + | GREATER NUM { $$ = gen_greater($2); } + | BYTE NUM byteop NUM { $$ = gen_byteop($3, $2, $4); } + | INBOUND { $$ = gen_inbound(0); } + | OUTBOUND { $$ = gen_inbound(1); } + ; +relop: '>' { $$ = BPF_JGT; } + | GEQ { $$ = BPF_JGE; } + | '=' { $$ = BPF_JEQ; } + ; +irelop: LEQ { $$ = BPF_JGT; } + | '<' { $$ = BPF_JGE; } + | NEQ { $$ = BPF_JEQ; } + ; +arth: pnum { $$ = gen_loadi($1); } + | narth + ; +narth: pname '[' arth ']' { $$ = gen_load($1, $3, 1); } + | pname '[' arth ':' NUM ']' { $$ = gen_load($1, $3, $5); } + | arth '+' arth { $$ = gen_arth(BPF_ADD, $1, $3); } + | arth '-' arth { $$ = gen_arth(BPF_SUB, $1, $3); } + | arth '*' arth { $$ = gen_arth(BPF_MUL, $1, $3); } + | arth '/' arth { $$ = gen_arth(BPF_DIV, $1, $3); } + | arth '&' arth { $$ = gen_arth(BPF_AND, $1, $3); } + | arth '|' arth { $$ = gen_arth(BPF_OR, $1, $3); } + | arth LSH arth { $$ = gen_arth(BPF_LSH, $1, $3); } + | arth RSH arth { $$ = gen_arth(BPF_RSH, $1, $3); } + | '-' arth %prec UMINUS { $$ = gen_neg($2); } + | paren narth ')' { $$ = $2; } + | LEN { $$ = gen_loadlen(); } + ; +byteop: '&' { $$ = '&'; } + | '|' { $$ = '|'; } + | '<' { $$ = '<'; } + | '>' { $$ = '>'; } + | '=' { $$ = '='; } + ; +pnum: NUM + | paren pnum ')' { $$ = $2; } + ; +%% diff --git a/pppd/nametoaddr.c b/pppd/nametoaddr.c new file mode 100644 index 0000000..c8ac85e --- /dev/null +++ b/pppd/nametoaddr.c @@ -0,0 +1,209 @@ +/* From NetBSD: nametoaddr.c,v 1.3 1995/04/29 05:42:23 cgd Exp */ + +/* + * Copyright (c) 1990, 1991, 1992, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that: (1) source code distributions + * retain the above copyright notice and this paragraph in its entirety, (2) + * distributions including binary code include the above copyright notice and + * this paragraph in its entirety in the documentation or other materials + * provided with the distribution, and (3) all advertising materials mentioning + * features or use of this software display the following acknowledgement: + * ``This product includes software developed by the University of California, + * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of + * the University nor the names of its contributors may be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Name to id translation routines used by the scanner. + * These functions are not time critical. + */ + +#ifndef lint +static char rcsid[] = + "@(#) Header: nametoaddr.c,v 1.21 94/06/20 19:07:54 leres Exp (LBL)"; +#endif + +#include +#ifdef __NetBSD__ +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_compile.h" +#include "gencode.h" + +#ifndef __GNUC__ +#define inline +#endif + +#ifndef NTOHL +#define NTOHL(x) (x) = ntohl(x) +#define NTOHS(x) (x) = ntohs(x) +#endif + +/* + * Convert host name to internet address. + * Return 0 upon failure. + */ +u_long ** +pcap_nametoaddr(const char *name) +{ +#ifndef h_addr + static u_long *hlist[2]; +#endif + u_long **p; + struct hostent *hp; + + if ((hp = gethostbyname(name)) != NULL) { +#ifndef h_addr + hlist[0] = (u_long *)hp->h_addr; + NTOHL(hp->h_addr); + return hlist; +#else + for (p = (u_long **)hp->h_addr_list; *p; ++p) + NTOHL(**p); + return (u_long **)hp->h_addr_list; +#endif + } + else + return 0; +} + +/* + * Convert net name to internet address. + * Return 0 upon failure. + */ +u_long +pcap_nametonetaddr(const char *name) +{ + struct netent *np; + + if ((np = getnetbyname(name)) != NULL) + return np->n_net; + else + return 0; +} + +/* + * Convert a port name to its port and protocol numbers. + * We assume only TCP or UDP. + * Return 0 upon failure. + */ +int +pcap_nametoport(const char *name, int *port, int *proto) +{ + struct servent *sp; + char *other; + + sp = getservbyname(name, (char *)0); + if (sp != NULL) { + NTOHS(sp->s_port); + *port = sp->s_port; + *proto = pcap_nametoproto(sp->s_proto); + /* + * We need to check /etc/services for ambiguous entries. + * If we find the ambiguous entry, and it has the + * same port number, change the proto to PROTO_UNDEF + * so both TCP and UDP will be checked. + */ + if (*proto == IPPROTO_TCP) + other = "udp"; + else + other = "tcp"; + + sp = getservbyname(name, other); + if (sp != 0) { + NTOHS(sp->s_port); + if (*port != sp->s_port) + /* Can't handle ambiguous names that refer + to different port numbers. */ +#ifdef notdef + warning("ambiguous port %s in /etc/services", + name); +#else + ; +#endif + *proto = PROTO_UNDEF; + } + return 1; + } +#if defined(ultrix) || defined(__osf__) + /* Special hack in case NFS isn't in /etc/services */ + if (strcmp(name, "nfs") == 0) { + *port = 2049; + *proto = PROTO_UNDEF; + return 1; + } +#endif + return 0; +} + +int +pcap_nametoproto(const char *str) +{ + struct protoent *p; + + p = getprotobyname(str); + if (p != 0) + return p->p_proto; + else + return PROTO_UNDEF; +} + +u_long +__pcap_atoin(const char *s) +{ + u_long addr = 0; + u_int n; + + while (1) { + n = 0; + while (*s && *s != '.') + n = n * 10 + *s++ - '0'; + addr <<= 8; + addr |= n & 0xff; + if (*s == '\0') + return addr; + ++s; + } + /* NOTREACHED */ +} + +struct pppproto { + char *s; + u_short p; +}; + +/* Static data base of PPP protocol types. */ +struct pppproto pppproto_db[] = { + { "ip", PPP_IP }, + { (char *)0, 0 } +}; + +int +pcap_nametopppproto(const char *s) +{ + struct pppproto *p = pppproto_db; + + while (p->s != 0) { + if (strcmp(p->s, s) == 0) + return p->p; + p += 1; + } + return PROTO_UNDEF; +} diff --git a/pppd/optimize.c b/pppd/optimize.c new file mode 100644 index 0000000..cb11949 --- /dev/null +++ b/pppd/optimize.c @@ -0,0 +1,1929 @@ +/* From NetBSD: optimize.c,v 1.3 1995/04/29 05:42:28 cgd Exp */ + +/* + * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that: (1) source code distributions + * retain the above copyright notice and this paragraph in its entirety, (2) + * distributions including binary code include the above copyright notice and + * this paragraph in its entirety in the documentation or other materials + * provided with the distribution, and (3) all advertising materials mentioning + * features or use of this software display the following acknowledgement: + * ``This product includes software developed by the University of California, + * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of + * the University nor the names of its contributors may be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Optimization module for tcpdump intermediate representation. + */ +#ifndef lint +static char rcsid[] = + "@(#) Header: optimize.c,v 1.45 94/06/20 19:07:55 leres Exp (LBL)"; +#endif + +#include +#include + +#include +#include + +#include +#ifdef __osf__ +#include +#include +#endif +#ifdef __NetBSD__ +#include +#endif +#include + +#include "gencode.h" + +#ifndef __GNUC__ +#define inline +#endif + +#define A_ATOM BPF_MEMWORDS +#define X_ATOM (BPF_MEMWORDS+1) + +#define NOP -1 + +/* + * This define is used to represent *both* the accumulator and + * x register in use-def computations. + * Currently, the use-def code assumes only one definition per instruction. + */ +#define AX_ATOM N_ATOMS + +/* + * A flag to indicate that further optimization is needed. + * Iterative passes are continued until a given pass yields no + * branch movement. + */ +static int done; + +/* + * A block is marked if only if its mark equals the current mark. + * Rather than traverse the code array, marking each item, 'cur_mark' is + * incremented. This automatically makes each element unmarked. + */ +static int cur_mark; +#define isMarked(p) ((p)->mark == cur_mark) +#define unMarkAll() cur_mark += 1 +#define Mark(p) ((p)->mark = cur_mark) + +static void opt_init(struct block *); +static void opt_cleanup(void); + +static void make_marks(struct block *); +static void mark_code(struct block *); + +static void intern_blocks(struct block *); + +static int eq_slist(struct slist *, struct slist *); + +static void find_levels_r(struct block *); + +static void find_levels(struct block *); +static void find_dom(struct block *); +static void propedom(struct edge *); +static void find_edom(struct block *); +static void find_closure(struct block *); +static int atomuse(struct stmt *); +static int atomdef(struct stmt *); +static void compute_local_ud(struct block *); +static void find_ud(struct block *); +static void init_val(void); +static long F(int, long, long); +static inline void vstore(struct stmt *, long *, long, int); +static void opt_blk(struct block *, int); +static int use_conflict(struct block *, struct block *); +static void opt_j(struct edge *); +static void or_pullup(struct block *); +static void and_pullup(struct block *); +static void opt_blks(struct block *, int); +static inline void link_inedge(struct edge *, struct block *); +static void find_inedges(struct block *); +static void opt_root(struct block **); +static void opt_loop(struct block *, int); +static void fold_op(struct stmt *, long, long); +static inline struct slist *this_op(struct slist *); +static void opt_not(struct block *); +static void opt_peep(struct block *); +static void opt_stmt(struct stmt *, long[], int); +static void deadstmt(struct stmt *, struct stmt *[]); +static void opt_deadstores(struct block *); +static void opt_blk(struct block *, int); +static int use_conflict(struct block *, struct block *); +static void opt_j(struct edge *); +static struct block *fold_edge(struct block *, struct edge *); +static inline int eq_blk(struct block *, struct block *); +static int slength(struct slist *); +static int count_blocks(struct block *); +static void number_blks_r(struct block *); +static int count_stmts(struct block *); +static void convert_code_r(struct block *); + +static int n_blocks; +struct block **blocks; +static int n_edges; +struct edge **edges; + +/* + * A bit vector set representation of the dominators. + * We round up the set size to the next power of two. + */ +static int nodewords; +static int edgewords; +struct block **levels; +u_long *space; +#define BITS_PER_WORD (8*sizeof(u_long)) +/* + * True if a is in uset {p} + */ +#define SET_MEMBER(p, a) \ +((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD))) + +/* + * Add 'a' to uset p. + */ +#define SET_INSERT(p, a) \ +(p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD)) + +/* + * Delete 'a' from uset p. + */ +#define SET_DELETE(p, a) \ +(p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD)) + +/* + * a := a intersect b + */ +#define SET_INTERSECT(a, b, n)\ +{\ + register u_long *_x = a, *_y = b;\ + register int _n = n;\ + while (--_n >= 0) *_x++ &= *_y++;\ +} + +/* + * a := a - b + */ +#define SET_SUBTRACT(a, b, n)\ +{\ + register u_long *_x = a, *_y = b;\ + register int _n = n;\ + while (--_n >= 0) *_x++ &=~ *_y++;\ +} + +/* + * a := a union b + */ +#define SET_UNION(a, b, n)\ +{\ + register u_long *_x = a, *_y = b;\ + register int _n = n;\ + while (--_n >= 0) *_x++ |= *_y++;\ +} + +static uset all_dom_sets; +static uset all_closure_sets; +static uset all_edge_sets; + +#ifndef MAX +#define MAX(a,b) ((a)>(b)?(a):(b)) +#endif + +static void +find_levels_r(b) + struct block *b; +{ + int level; + + if (isMarked(b)) + return; + + Mark(b); + b->link = 0; + + if (JT(b)) { + find_levels_r(JT(b)); + find_levels_r(JF(b)); + level = MAX(JT(b)->level, JF(b)->level) + 1; + } else + level = 0; + b->level = level; + b->link = levels[level]; + levels[level] = b; +} + +/* + * Level graph. The levels go from 0 at the leaves to + * N_LEVELS at the root. The levels[] array points to the + * first node of the level list, whose elements are linked + * with the 'link' field of the struct block. + */ +static void +find_levels(root) + struct block *root; +{ + memset((char *)levels, 0, n_blocks * sizeof(*levels)); + unMarkAll(); + find_levels_r(root); +} + +/* + * Find dominator relationships. + * Assumes graph has been leveled. + */ +static void +find_dom(root) + struct block *root; +{ + int i; + struct block *b; + u_long *x; + + /* + * Initialize sets to contain all nodes. + */ + x = all_dom_sets; + i = n_blocks * nodewords; + while (--i >= 0) + *x++ = ~0; + /* Root starts off empty. */ + for (i = nodewords; --i >= 0;) + root->dom[i] = 0; + + /* root->level is the highest level no found. */ + for (i = root->level; i >= 0; --i) { + for (b = levels[i]; b; b = b->link) { + SET_INSERT(b->dom, b->id); + if (JT(b) == 0) + continue; + SET_INTERSECT(JT(b)->dom, b->dom, nodewords); + SET_INTERSECT(JF(b)->dom, b->dom, nodewords); + } + } +} + +static void +propedom(ep) + struct edge *ep; +{ + SET_INSERT(ep->edom, ep->id); + if (ep->succ) { + SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords); + SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords); + } +} + +/* + * Compute edge dominators. + * Assumes graph has been leveled and predecessors established. + */ +static void +find_edom(root) + struct block *root; +{ + int i; + uset x; + struct block *b; + + x = all_edge_sets; + for (i = n_edges * edgewords; --i >= 0; ) + x[i] = ~0; + + /* root->level is the highest level no found. */ + memset(root->et.edom, 0, edgewords * sizeof(*(uset)0)); + memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0)); + for (i = root->level; i >= 0; --i) { + for (b = levels[i]; b != 0; b = b->link) { + propedom(&b->et); + propedom(&b->ef); + } + } +} + +/* + * Find the backwards transitive closure of the flow graph. These sets + * are backwards in the sense that we find the set of nodes that reach + * a given node, not the set of nodes that can be reached by a node. + * + * Assumes graph has been leveled. + */ +static void +find_closure(root) + struct block *root; +{ + int i; + struct block *b; + + /* + * Initialize sets to contain no nodes. + */ + memset((char *)all_closure_sets, 0, + n_blocks * nodewords * sizeof(*all_closure_sets)); + + /* root->level is the highest level no found. */ + for (i = root->level; i >= 0; --i) { + for (b = levels[i]; b; b = b->link) { + SET_INSERT(b->closure, b->id); + if (JT(b) == 0) + continue; + SET_UNION(JT(b)->closure, b->closure, nodewords); + SET_UNION(JF(b)->closure, b->closure, nodewords); + } + } +} + +/* + * Return the register number that is used by s. If A and X are both + * used, return AX_ATOM. If no register is used, return -1. + * + * The implementation should probably change to an array access. + */ +static int +atomuse(s) + struct stmt *s; +{ + register int c = s->code; + + if (c == NOP) + return -1; + + switch (BPF_CLASS(c)) { + + case BPF_RET: + return (BPF_RVAL(c) == BPF_A) ? A_ATOM : + (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1; + + case BPF_LD: + case BPF_LDX: + return (BPF_MODE(c) == BPF_IND) ? X_ATOM : + (BPF_MODE(c) == BPF_MEM) ? s->k : -1; + + case BPF_ST: + return A_ATOM; + + case BPF_STX: + return X_ATOM; + + case BPF_JMP: + case BPF_ALU: + if (BPF_SRC(c) == BPF_X) + return AX_ATOM; + return A_ATOM; + + case BPF_MISC: + return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM; + } + abort(); + /* NOTREACHED */ +} + +/* + * Return the register number that is defined by 's'. We assume that + * a single stmt cannot define more than one register. If no register + * is defined, return -1. + * + * The implementation should probably change to an array access. + */ +static int +atomdef(s) + struct stmt *s; +{ + if (s->code == NOP) + return -1; + + switch (BPF_CLASS(s->code)) { + + case BPF_LD: + case BPF_ALU: + return A_ATOM; + + case BPF_LDX: + return X_ATOM; + + case BPF_ST: + case BPF_STX: + return s->k; + + case BPF_MISC: + return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM; + } + return -1; +} + +static void +compute_local_ud(b) + struct block *b; +{ + struct slist *s; + atomset def = 0, use = 0, kill = 0; + int atom; + + for (s = b->stmts; s; s = s->next) { + if (s->s.code == NOP) + continue; + atom = atomuse(&s->s); + if (atom >= 0) { + if (atom == AX_ATOM) { + if (!ATOMELEM(def, X_ATOM)) + use |= ATOMMASK(X_ATOM); + if (!ATOMELEM(def, A_ATOM)) + use |= ATOMMASK(A_ATOM); + } + else if (atom < N_ATOMS) { + if (!ATOMELEM(def, atom)) + use |= ATOMMASK(atom); + } + else + abort(); + } + atom = atomdef(&s->s); + if (atom >= 0) { + if (!ATOMELEM(use, atom)) + kill |= ATOMMASK(atom); + def |= ATOMMASK(atom); + } + } + if (!ATOMELEM(def, A_ATOM) && BPF_CLASS(b->s.code) == BPF_JMP) + use |= ATOMMASK(A_ATOM); + + b->def = def; + b->kill = kill; + b->in_use = use; +} + +/* + * Assume graph is already leveled. + */ +static void +find_ud(root) + struct block *root; +{ + int i, maxlevel; + struct block *p; + + /* + * root->level is the highest level no found; + * count down from there. + */ + maxlevel = root->level; + for (i = maxlevel; i >= 0; --i) + for (p = levels[i]; p; p = p->link) { + compute_local_ud(p); + p->out_use = 0; + } + + for (i = 1; i <= maxlevel; ++i) { + for (p = levels[i]; p; p = p->link) { + p->out_use |= JT(p)->in_use | JF(p)->in_use; + p->in_use |= p->out_use &~ p->kill; + } + } +} + +/* + * These data structures are used in a Cocke and Shwarz style + * value numbering scheme. Since the flowgraph is acyclic, + * exit values can be propagated from a node's predecessors + * provided it is uniquely defined. + */ +struct valnode { + int code; + long v0, v1; + long val; + struct valnode *next; +}; + +#define MODULUS 213 +static struct valnode *hashtbl[MODULUS]; +static int curval; +static int maxval; + +/* Integer constants mapped with the load immediate opcode. */ +#define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L) + +struct vmapinfo { + int is_const; + long const_val; +}; + +struct vmapinfo *vmap; +struct valnode *vnode_base; +struct valnode *next_vnode; + +static void +init_val() +{ + curval = 0; + next_vnode = vnode_base; + memset((char *)vmap, 0, maxval * sizeof(*vmap)); + memset((char *)hashtbl, 0, sizeof hashtbl); +} + +/* Because we really don't have an IR, this stuff is a little messy. */ +static long +F(code, v0, v1) + int code; + long v0, v1; +{ + u_int hash; + int val; + struct valnode *p; + + hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8); + hash %= MODULUS; + + for (p = hashtbl[hash]; p; p = p->next) + if (p->code == code && p->v0 == v0 && p->v1 == v1) + return p->val; + + val = ++curval; + if (BPF_MODE(code) == BPF_IMM && + (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) { + vmap[val].const_val = v0; + vmap[val].is_const = 1; + } + p = next_vnode++; + p->val = val; + p->code = code; + p->v0 = v0; + p->v1 = v1; + p->next = hashtbl[hash]; + hashtbl[hash] = p; + + return val; +} + +static inline void +vstore(s, valp, newval, alter) + struct stmt *s; + long *valp; + long newval; + int alter; +{ + if (alter && *valp == newval) + s->code = NOP; + else + *valp = newval; +} + +static void +fold_op(s, v0, v1) + struct stmt *s; + long v0, v1; +{ + long a, b; + + a = vmap[v0].const_val; + b = vmap[v1].const_val; + + switch (BPF_OP(s->code)) { + case BPF_ADD: + a += b; + break; + + case BPF_SUB: + a -= b; + break; + + case BPF_MUL: + a *= b; + break; + + case BPF_DIV: + if (b == 0) + bpf_error("division by zero"); + a /= b; + break; + + case BPF_AND: + a &= b; + break; + + case BPF_OR: + a |= b; + break; + + case BPF_LSH: + a <<= b; + break; + + case BPF_RSH: + a >>= b; + break; + + case BPF_NEG: + a = -a; + break; + + default: + abort(); + } + s->k = a; + s->code = BPF_LD|BPF_IMM; + done = 0; +} + +static inline struct slist * +this_op(s) + struct slist *s; +{ + while (s != 0 && s->s.code == NOP) + s = s->next; + return s; +} + +static void +opt_not(b) + struct block *b; +{ + struct block *tmp = JT(b); + + JT(b) = JF(b); + JF(b) = tmp; +} + +static void +opt_peep(b) + struct block *b; +{ + struct slist *s; + struct slist *next, *last; + int val; + long v; + + s = b->stmts; + if (s == 0) + return; + + last = s; + while (1) { + s = this_op(s); + if (s == 0) + break; + next = this_op(s->next); + if (next == 0) + break; + last = next; + + /* + * st M[k] --> st M[k] + * ldx M[k] tax + */ + if (s->s.code == BPF_ST && + next->s.code == (BPF_LDX|BPF_MEM) && + s->s.k == next->s.k) { + done = 0; + next->s.code = BPF_MISC|BPF_TAX; + } + /* + * ld #k --> ldx #k + * tax txa + */ + if (s->s.code == (BPF_LD|BPF_IMM) && + next->s.code == (BPF_MISC|BPF_TAX)) { + s->s.code = BPF_LDX|BPF_IMM; + next->s.code = BPF_MISC|BPF_TXA; + done = 0; + } + /* + * This is an ugly special case, but it happens + * when you say tcp[k] or udp[k] where k is a constant. + */ + if (s->s.code == (BPF_LD|BPF_IMM)) { + struct slist *add, *tax, *ild; + + /* + * Check that X isn't used on exit from this + * block (which the optimizer might cause). + * We know the code generator won't generate + * any local dependencies. + */ + if (ATOMELEM(b->out_use, X_ATOM)) + break; + + if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B)) + add = next; + else + add = this_op(next->next); + if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X)) + break; + + tax = this_op(add->next); + if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX)) + break; + + ild = this_op(tax->next); + if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD || + BPF_MODE(ild->s.code) != BPF_IND) + break; + /* + * XXX We need to check that X is not + * subsequently used. We know we can eliminate the + * accumulator modifications since it is defined + * by the last stmt of this sequence. + * + * We want to turn this sequence: + * + * (004) ldi #0x2 {s} + * (005) ldxms [14] {next} -- optional + * (006) addx {add} + * (007) tax {tax} + * (008) ild [x+0] {ild} + * + * into this sequence: + * + * (004) nop + * (005) ldxms [14] + * (006) nop + * (007) nop + * (008) ild [x+2] + * + */ + ild->s.k += s->s.k; + s->s.code = NOP; + add->s.code = NOP; + tax->s.code = NOP; + done = 0; + } + s = next; + } + /* + * If we have a subtract to do a comparison, and the X register + * is a known constant, we can merge this value into the + * comparison. + */ + if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X) && + !ATOMELEM(b->out_use, A_ATOM)) { + val = b->val[X_ATOM]; + if (vmap[val].is_const) { + b->s.k += vmap[val].const_val; + last->s.code = NOP; + done = 0; + } else if (b->s.k == 0) { + /* + * sub x -> nop + * j #0 j x + */ + last->s.code = NOP; + b->s.code = BPF_CLASS(b->s.code) | BPF_OP(b->s.code) | + BPF_X; + done = 0; + } + } + /* + * Likewise, a constant subtract can be simplified. + */ + else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K) && + !ATOMELEM(b->out_use, A_ATOM)) { + b->s.k += last->s.k; + last->s.code = NOP; + done = 0; + } + /* + * and #k nop + * jeq #0 -> jset #k + */ + if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) && + !ATOMELEM(b->out_use, A_ATOM) && b->s.k == 0) { + b->s.k = last->s.k; + b->s.code = BPF_JMP|BPF_K|BPF_JSET; + last->s.code = NOP; + done = 0; + opt_not(b); + } + /* + * If the accumulator is a known constant, we can compute the + * comparison result. + */ + val = b->val[A_ATOM]; + if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) { + v = vmap[val].const_val; + switch (BPF_OP(b->s.code)) { + + case BPF_JEQ: + v = v == b->s.k; + break; + + case BPF_JGT: + v = v > b->s.k; + break; + + case BPF_JGE: + v = v >= b->s.k; + break; + + case BPF_JSET: + v &= b->s.k; + break; + + default: + abort(); + } + if (JF(b) != JT(b)) + done = 0; + if (v) + JF(b) = JT(b); + else + JT(b) = JF(b); + } +} + +/* + * Compute the symbolic value of expression of 's', and update + * anything it defines in the value table 'val'. If 'alter' is true, + * do various optimizations. This code would be cleaner if symbolic + * evaluation and code transformations weren't folded together. + */ +static void +opt_stmt(s, val, alter) + struct stmt *s; + long val[]; + int alter; +{ + int op; + long v; + + switch (s->code) { + + case BPF_LD|BPF_ABS|BPF_W: + case BPF_LD|BPF_ABS|BPF_H: + case BPF_LD|BPF_ABS|BPF_B: + v = F(s->code, s->k, 0L); + vstore(s, &val[A_ATOM], v, alter); + break; + + case BPF_LD|BPF_IND|BPF_W: + case BPF_LD|BPF_IND|BPF_H: + case BPF_LD|BPF_IND|BPF_B: + v = val[X_ATOM]; + if (alter && vmap[v].is_const) { + s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code); + s->k += vmap[v].const_val; + v = F(s->code, s->k, 0L); + done = 0; + } + else + v = F(s->code, s->k, v); + vstore(s, &val[A_ATOM], v, alter); + break; + + case BPF_LD|BPF_LEN: + v = F(s->code, 0L, 0L); + vstore(s, &val[A_ATOM], v, alter); + break; + + case BPF_LD|BPF_IMM: + v = K(s->k); + vstore(s, &val[A_ATOM], v, alter); + break; + + case BPF_LDX|BPF_IMM: + v = K(s->k); + vstore(s, &val[X_ATOM], v, alter); + break; + + case BPF_LDX|BPF_MSH|BPF_B: + v = F(s->code, s->k, 0L); + vstore(s, &val[X_ATOM], v, alter); + break; + + case BPF_ALU|BPF_NEG: + if (alter && vmap[val[A_ATOM]].is_const) { + s->code = BPF_LD|BPF_IMM; + s->k = -vmap[val[A_ATOM]].const_val; + val[A_ATOM] = K(s->k); + } + else + val[A_ATOM] = F(s->code, val[A_ATOM], 0L); + break; + + case BPF_ALU|BPF_ADD|BPF_K: + case BPF_ALU|BPF_SUB|BPF_K: + case BPF_ALU|BPF_MUL|BPF_K: + case BPF_ALU|BPF_DIV|BPF_K: + case BPF_ALU|BPF_AND|BPF_K: + case BPF_ALU|BPF_OR|BPF_K: + case BPF_ALU|BPF_LSH|BPF_K: + case BPF_ALU|BPF_RSH|BPF_K: + op = BPF_OP(s->code); + if (alter) { + if (s->k == 0) { + if (op == BPF_ADD || op == BPF_SUB || + op == BPF_LSH || op == BPF_RSH || + op == BPF_OR) { + s->code = NOP; + break; + } + if (op == BPF_MUL || op == BPF_AND) { + s->code = BPF_LD|BPF_IMM; + val[A_ATOM] = K(s->k); + break; + } + } + if (vmap[val[A_ATOM]].is_const) { + fold_op(s, val[A_ATOM], K(s->k)); + val[A_ATOM] = K(s->k); + break; + } + } + val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k)); + break; + + case BPF_ALU|BPF_ADD|BPF_X: + case BPF_ALU|BPF_SUB|BPF_X: + case BPF_ALU|BPF_MUL|BPF_X: + case BPF_ALU|BPF_DIV|BPF_X: + case BPF_ALU|BPF_AND|BPF_X: + case BPF_ALU|BPF_OR|BPF_X: + case BPF_ALU|BPF_LSH|BPF_X: + case BPF_ALU|BPF_RSH|BPF_X: + op = BPF_OP(s->code); + if (alter && vmap[val[X_ATOM]].is_const) { + if (vmap[val[A_ATOM]].is_const) { + fold_op(s, val[A_ATOM], val[X_ATOM]); + val[A_ATOM] = K(s->k); + } + else { + s->code = BPF_ALU|BPF_K|op; + s->k = vmap[val[X_ATOM]].const_val; + done = 0; + val[A_ATOM] = + F(s->code, val[A_ATOM], K(s->k)); + } + break; + } + /* + * Check if we're doing something to an accumulator + * that is 0, and simplify. This may not seem like + * much of a simplification but it could open up further + * optimizations. + * XXX We could also check for mul by 1, and -1, etc. + */ + if (alter && vmap[val[A_ATOM]].is_const + && vmap[val[A_ATOM]].const_val == 0) { + if (op == BPF_ADD || op == BPF_OR || + op == BPF_LSH || op == BPF_RSH || op == BPF_SUB) { + s->code = BPF_MISC|BPF_TXA; + vstore(s, &val[A_ATOM], val[X_ATOM], alter); + break; + } + else if (op == BPF_MUL || op == BPF_DIV || + op == BPF_AND) { + s->code = BPF_LD|BPF_IMM; + s->k = 0; + vstore(s, &val[A_ATOM], K(s->k), alter); + break; + } + else if (op == BPF_NEG) { + s->code = NOP; + break; + } + } + val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]); + break; + + case BPF_MISC|BPF_TXA: + vstore(s, &val[A_ATOM], val[X_ATOM], alter); + break; + + case BPF_LD|BPF_MEM: + v = val[s->k]; + if (alter && vmap[v].is_const) { + s->code = BPF_LD|BPF_IMM; + s->k = vmap[v].const_val; + done = 0; + } + vstore(s, &val[A_ATOM], v, alter); + break; + + case BPF_MISC|BPF_TAX: + vstore(s, &val[X_ATOM], val[A_ATOM], alter); + break; + + case BPF_LDX|BPF_MEM: + v = val[s->k]; + if (alter && vmap[v].is_const) { + s->code = BPF_LDX|BPF_IMM; + s->k = vmap[v].const_val; + done = 0; + } + vstore(s, &val[X_ATOM], v, alter); + break; + + case BPF_ST: + vstore(s, &val[s->k], val[A_ATOM], alter); + break; + + case BPF_STX: + vstore(s, &val[s->k], val[X_ATOM], alter); + break; + } +} + +static void +deadstmt(s, last) + register struct stmt *s; + register struct stmt *last[]; +{ + register int atom; + + atom = atomuse(s); + if (atom >= 0) { + if (atom == AX_ATOM) { + last[X_ATOM] = 0; + last[A_ATOM] = 0; + } + else + last[atom] = 0; + } + atom = atomdef(s); + if (atom >= 0) { + if (last[atom]) { + done = 0; + last[atom]->code = NOP; + } + last[atom] = s; + } +} + +static void +opt_deadstores(b) + register struct block *b; +{ + register struct slist *s; + register int atom; + struct stmt *last[N_ATOMS]; + + memset((char *)last, 0, sizeof last); + + for (s = b->stmts; s != 0; s = s->next) + deadstmt(&s->s, last); + deadstmt(&b->s, last); + + for (atom = 0; atom < N_ATOMS; ++atom) + if (last[atom] && !ATOMELEM(b->out_use, atom)) { + last[atom]->code = NOP; + done = 0; + } +} + +static void +opt_blk(b, do_stmts) + struct block *b; + int do_stmts; +{ + struct slist *s; + struct edge *p; + int i; + long aval; + + /* + * Initialize the atom values. + * If we have no predecessors, everything is undefined. + * Otherwise, we inherent our values from our predecessors. + * If any register has an ambiguous value (i.e. control paths are + * merging) give it the undefined value of 0. + */ + p = b->in_edges; + if (p == 0) + memset((char *)b->val, 0, sizeof(b->val)); + else { + memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val)); + while ((p = p->next) != NULL) { + for (i = 0; i < N_ATOMS; ++i) + if (b->val[i] != p->pred->val[i]) + b->val[i] = 0; + } + } + aval = b->val[A_ATOM]; + for (s = b->stmts; s; s = s->next) + opt_stmt(&s->s, b->val, do_stmts); + + /* + * This is a special case: if we don't use anything from this + * block, and we load the accumulator with value that is + * already there, eliminate all the statements. + */ + if (do_stmts && b->out_use == 0 && aval != 0 && + b->val[A_ATOM] == aval) + b->stmts = 0; + else { + opt_peep(b); + opt_deadstores(b); + } + /* + * Set up values for branch optimizer. + */ + if (BPF_SRC(b->s.code) == BPF_K) + b->oval = K(b->s.k); + else + b->oval = b->val[X_ATOM]; + b->et.code = b->s.code; + b->ef.code = -b->s.code; +} + +/* + * Return true if any register that is used on exit from 'succ', has + * an exit value that is different from the corresponding exit value + * from 'b'. + */ +static int +use_conflict(b, succ) + struct block *b, *succ; +{ + int atom; + atomset use = succ->out_use; + + if (use == 0) + return 0; + + for (atom = 0; atom < N_ATOMS; ++atom) + if (ATOMELEM(use, atom)) + if (b->val[atom] != succ->val[atom]) + return 1; + return 0; +} + +static struct block * +fold_edge(child, ep) + struct block *child; + struct edge *ep; +{ + int sense; + int aval0, aval1, oval0, oval1; + int code = ep->code; + + if (code < 0) { + code = -code; + sense = 0; + } else + sense = 1; + + if (child->s.code != code) + return 0; + + aval0 = child->val[A_ATOM]; + oval0 = child->oval; + aval1 = ep->pred->val[A_ATOM]; + oval1 = ep->pred->oval; + + if (aval0 != aval1) + return 0; + + if (oval0 == oval1) + /* + * The operands are identical, so the + * result is true if a true branch was + * taken to get here, otherwise false. + */ + return sense ? JT(child) : JF(child); + + if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K)) + /* + * At this point, we only know the comparison if we + * came down the true branch, and it was an equality + * comparison with a constant. We rely on the fact that + * distinct constants have distinct value numbers. + */ + return JF(child); + + return 0; +} + +static void +opt_j(ep) + struct edge *ep; +{ + register int i, k; + register struct block *target; + + if (JT(ep->succ) == 0) + return; + + if (JT(ep->succ) == JF(ep->succ)) { + /* + * Common branch targets can be eliminated, provided + * there is no data dependency. + */ + if (!use_conflict(ep->pred, ep->succ->et.succ)) { + done = 0; + ep->succ = JT(ep->succ); + } + } + /* + * For each edge dominator that matches the successor of this + * edge, promote the edge successor to the its grandchild. + * + * XXX We violate the set abstraction here in favor a reasonably + * efficient loop. + */ + top: + for (i = 0; i < edgewords; ++i) { + register u_long x = ep->edom[i]; + + while (x != 0) { + k = ffs(x) - 1; + x &=~ (1 << k); + k += i * BITS_PER_WORD; + + target = fold_edge(ep->succ, edges[k]); + /* + * Check that there is no data dependency between + * nodes that will be violated if we move the edge. + */ + if (target != 0 && !use_conflict(ep->pred, target)) { + done = 0; + ep->succ = target; + if (JT(target) != 0) + /* + * Start over unless we hit a leaf. + */ + goto top; + return; + } + } + } +} + + +static void +or_pullup(b) + struct block *b; +{ + int val, at_top; + struct block *pull; + struct block **diffp, **samep; + struct edge *ep; + + ep = b->in_edges; + if (ep == 0) + return; + + /* + * Make sure each predecessor loads the same value. + * XXX why? + */ + val = ep->pred->val[A_ATOM]; + for (ep = ep->next; ep != 0; ep = ep->next) + if (val != ep->pred->val[A_ATOM]) + return; + + if (JT(b->in_edges->pred) == b) + diffp = &JT(b->in_edges->pred); + else + diffp = &JF(b->in_edges->pred); + + at_top = 1; + while (1) { + if (*diffp == 0) + return; + + if (JT(*diffp) != JT(b)) + return; + + if (!SET_MEMBER((*diffp)->dom, b->id)) + return; + + if ((*diffp)->val[A_ATOM] != val) + break; + + diffp = &JF(*diffp); + at_top = 0; + } + samep = &JF(*diffp); + while (1) { + if (*samep == 0) + return; + + if (JT(*samep) != JT(b)) + return; + + if (!SET_MEMBER((*samep)->dom, b->id)) + return; + + if ((*samep)->val[A_ATOM] == val) + break; + + /* XXX Need to check that there are no data dependencies + between dp0 and dp1. Currently, the code generator + will not produce such dependencies. */ + samep = &JF(*samep); + } +#ifdef notdef + /* XXX This doesn't cover everything. */ + for (i = 0; i < N_ATOMS; ++i) + if ((*samep)->val[i] != pred->val[i]) + return; +#endif + /* Pull up the node. */ + pull = *samep; + *samep = JF(pull); + JF(pull) = *diffp; + + /* + * At the top of the chain, each predecessor needs to point at the + * pulled up node. Inside the chain, there is only one predecessor + * to worry about. + */ + if (at_top) { + for (ep = b->in_edges; ep != 0; ep = ep->next) { + if (JT(ep->pred) == b) + JT(ep->pred) = pull; + else + JF(ep->pred) = pull; + } + } + else + *diffp = pull; + + done = 0; +} + +static void +and_pullup(b) + struct block *b; +{ + int val, at_top; + struct block *pull; + struct block **diffp, **samep; + struct edge *ep; + + ep = b->in_edges; + if (ep == 0) + return; + + /* + * Make sure each predecessor loads the same value. + */ + val = ep->pred->val[A_ATOM]; + for (ep = ep->next; ep != 0; ep = ep->next) + if (val != ep->pred->val[A_ATOM]) + return; + + if (JT(b->in_edges->pred) == b) + diffp = &JT(b->in_edges->pred); + else + diffp = &JF(b->in_edges->pred); + + at_top = 1; + while (1) { + if (*diffp == 0) + return; + + if (JF(*diffp) != JF(b)) + return; + + if (!SET_MEMBER((*diffp)->dom, b->id)) + return; + + if ((*diffp)->val[A_ATOM] != val) + break; + + diffp = &JT(*diffp); + at_top = 0; + } + samep = &JT(*diffp); + while (1) { + if (*samep == 0) + return; + + if (JF(*samep) != JF(b)) + return; + + if (!SET_MEMBER((*samep)->dom, b->id)) + return; + + if ((*samep)->val[A_ATOM] == val) + break; + + /* XXX Need to check that there are no data dependencies + between diffp and samep. Currently, the code generator + will not produce such dependencies. */ + samep = &JT(*samep); + } +#ifdef notdef + /* XXX This doesn't cover everything. */ + for (i = 0; i < N_ATOMS; ++i) + if ((*samep)->val[i] != pred->val[i]) + return; +#endif + /* Pull up the node. */ + pull = *samep; + *samep = JT(pull); + JT(pull) = *diffp; + + /* + * At the top of the chain, each predecessor needs to point at the + * pulled up node. Inside the chain, there is only one predecessor + * to worry about. + */ + if (at_top) { + for (ep = b->in_edges; ep != 0; ep = ep->next) { + if (JT(ep->pred) == b) + JT(ep->pred) = pull; + else + JF(ep->pred) = pull; + } + } + else + *diffp = pull; + + done = 0; +} + +static void +opt_blks(root, do_stmts) + struct block *root; + int do_stmts; +{ + int i, maxlevel; + struct block *p; + + init_val(); + maxlevel = root->level; + for (i = maxlevel; i >= 0; --i) + for (p = levels[i]; p; p = p->link) + opt_blk(p, do_stmts); + + if (do_stmts) + /* + * No point trying to move branches; it can't possibly + * make a difference at this point. + */ + return; + + for (i = 1; i <= maxlevel; ++i) { + for (p = levels[i]; p; p = p->link) { + opt_j(&p->et); + opt_j(&p->ef); + } + } + for (i = 1; i <= maxlevel; ++i) { + for (p = levels[i]; p; p = p->link) { + or_pullup(p); + and_pullup(p); + } + } +} + +static inline void +link_inedge(parent, child) + struct edge *parent; + struct block *child; +{ + parent->next = child->in_edges; + child->in_edges = parent; +} + +static void +find_inedges(root) + struct block *root; +{ + int i; + struct block *b; + + for (i = 0; i < n_blocks; ++i) + blocks[i]->in_edges = 0; + + /* + * Traverse the graph, adding each edge to the predecessor + * list of its successors. Skip the leaves (i.e. level 0). + */ + for (i = root->level; i > 0; --i) { + for (b = levels[i]; b != 0; b = b->link) { + link_inedge(&b->et, JT(b)); + link_inedge(&b->ef, JF(b)); + } + } +} + +static void +opt_root(b) + struct block **b; +{ + struct slist *tmp, *s; + + s = (*b)->stmts; + (*b)->stmts = 0; + while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b)) + *b = JT(*b); + + tmp = (*b)->stmts; + if (tmp != 0) + sappend(s, tmp); + (*b)->stmts = s; +} + +static void +opt_loop(root, do_stmts) + struct block *root; + int do_stmts; +{ + +#ifdef BDEBUG + if (dflag > 1) + opt_dump(root); +#endif + do { + done = 1; + find_levels(root); + find_dom(root); + find_closure(root); + find_inedges(root); + find_ud(root); + find_edom(root); + opt_blks(root, do_stmts); +#ifdef BDEBUG + if (dflag > 1) + opt_dump(root); +#endif + } while (!done); +} + +/* + * Optimize the filter code in its dag representation. + */ +void +bpf_optimize(rootp) + struct block **rootp; +{ + struct block *root; + + root = *rootp; + + opt_init(root); + opt_loop(root, 0); + opt_loop(root, 1); + intern_blocks(root); + opt_root(rootp); + opt_cleanup(); +} + +static void +make_marks(p) + struct block *p; +{ + if (!isMarked(p)) { + Mark(p); + if (BPF_CLASS(p->s.code) != BPF_RET) { + make_marks(JT(p)); + make_marks(JF(p)); + } + } +} + +/* + * Mark code array such that isMarked(i) is true + * only for nodes that are alive. + */ +static void +mark_code(p) + struct block *p; +{ + cur_mark += 1; + make_marks(p); +} + +/* + * True iff the two stmt lists load the same value from the packet into + * the accumulator. + */ +static int +eq_slist(x, y) + struct slist *x, *y; +{ + while (1) { + while (x && x->s.code == NOP) + x = x->next; + while (y && y->s.code == NOP) + y = y->next; + if (x == 0) + return y == 0; + if (y == 0) + return x == 0; + if (x->s.code != y->s.code || x->s.k != y->s.k) + return 0; + x = x->next; + y = y->next; + } +} + +static inline int +eq_blk(b0, b1) + struct block *b0, *b1; +{ + if (b0->s.code == b1->s.code && + b0->s.k == b1->s.k && + b0->et.succ == b1->et.succ && + b0->ef.succ == b1->ef.succ) + return eq_slist(b0->stmts, b1->stmts); + return 0; +} + +static void +intern_blocks(root) + struct block *root; +{ + struct block *p; + int i, j; + int done; + top: + done = 1; + for (i = 0; i < n_blocks; ++i) + blocks[i]->link = 0; + + mark_code(root); + + for (i = n_blocks - 1; --i >= 0; ) { + if (!isMarked(blocks[i])) + continue; + for (j = i + 1; j < n_blocks; ++j) { + if (!isMarked(blocks[j])) + continue; + if (eq_blk(blocks[i], blocks[j])) { + blocks[i]->link = blocks[j]->link ? + blocks[j]->link : blocks[j]; + break; + } + } + } + for (i = 0; i < n_blocks; ++i) { + p = blocks[i]; + if (JT(p) == 0) + continue; + if (JT(p)->link) { + done = 0; + JT(p) = JT(p)->link; + } + if (JF(p)->link) { + done = 0; + JF(p) = JF(p)->link; + } + } + if (!done) + goto top; +} + +static void +opt_cleanup() +{ + free((void *)vnode_base); + free((void *)vmap); + free((void *)edges); + free((void *)space); + free((void *)levels); + free((void *)blocks); +} + +/* + * Return the number of stmts in 's'. + */ +static int +slength(s) + struct slist *s; +{ + int n = 0; + + for (; s; s = s->next) + if (s->s.code != NOP) + ++n; + return n; +} + +/* + * Return the number of nodes reachable by 'p'. + * All nodes should be initially unmarked. + */ +static int +count_blocks(p) + struct block *p; +{ + if (p == 0 || isMarked(p)) + return 0; + Mark(p); + return count_blocks(JT(p)) + count_blocks(JF(p)) + 1; +} + +/* + * Do a depth first search on the flow graph, numbering the + * the basic blocks, and entering them into the 'blocks' array.` + */ +static void +number_blks_r(p) + struct block *p; +{ + int n; + + if (p == 0 || isMarked(p)) + return; + + Mark(p); + n = n_blocks++; + p->id = n; + blocks[n] = p; + + number_blks_r(JT(p)); + number_blks_r(JF(p)); +} + +/* + * Return the number of stmts in the flowgraph reachable by 'p'. + * The nodes should be unmarked before calling. + */ +static int +count_stmts(p) + struct block *p; +{ + int n; + + if (p == 0 || isMarked(p)) + return 0; + Mark(p); + n = count_stmts(JT(p)) + count_stmts(JF(p)); + return slength(p->stmts) + n + 1; +} + +/* + * Allocate memory. All allocation is done before optimization + * is begun. A linear bound on the size of all data structures is computed + * from the total number of blocks and/or statements. + */ +static void +opt_init(root) + struct block *root; +{ + u_long *p; + int i, n, max_stmts; + + /* + * First, count the blocks, so we can malloc an array to map + * block number to block. Then, put the blocks into the array. + */ + unMarkAll(); + n = count_blocks(root); + blocks = (struct block **)malloc(n * sizeof(*blocks)); + unMarkAll(); + n_blocks = 0; + number_blks_r(root); + + n_edges = 2 * n_blocks; + edges = (struct edge **)malloc(n_edges * sizeof(*edges)); + + /* + * The number of levels is bounded by the number of nodes. + */ + levels = (struct block **)malloc(n_blocks * sizeof(*levels)); + + edgewords = n_edges / (8 * sizeof(u_long)) + 1; + nodewords = n_blocks / (8 * sizeof(u_long)) + 1; + + /* XXX */ + space = (u_long *)malloc(2 * n_blocks * nodewords * sizeof(*space) + + n_edges * edgewords * sizeof(*space)); + p = space; + all_dom_sets = p; + for (i = 0; i < n; ++i) { + blocks[i]->dom = p; + p += nodewords; + } + all_closure_sets = p; + for (i = 0; i < n; ++i) { + blocks[i]->closure = p; + p += nodewords; + } + all_edge_sets = p; + for (i = 0; i < n; ++i) { + register struct block *b = blocks[i]; + + b->et.edom = p; + p += edgewords; + b->ef.edom = p; + p += edgewords; + b->et.id = i; + edges[i] = &b->et; + b->ef.id = n_blocks + i; + edges[n_blocks + i] = &b->ef; + b->et.pred = b; + b->ef.pred = b; + } + max_stmts = 0; + for (i = 0; i < n; ++i) + max_stmts += slength(blocks[i]->stmts) + 1; + /* + * We allocate at most 3 value numbers per statement, + * so this is an upper bound on the number of valnodes + * we'll need. + */ + maxval = 3 * max_stmts; + vmap = (struct vmapinfo *)malloc(maxval * sizeof(*vmap)); + vnode_base = (struct valnode *)malloc(maxval * sizeof(*vmap)); +} + +/* + * Some pointers used to convert the basic block form of the code, + * into the array form that BPF requires. 'fstart' will point to + * the malloc'd array while 'ftail' is used during the recursive traversal. + */ +static struct bpf_insn *fstart; +static struct bpf_insn *ftail; + +#ifdef BDEBUG +int bids[1000]; +#endif + +static void +convert_code_r(p) + struct block *p; +{ + struct bpf_insn *dst; + struct slist *src; + int slen; + u_int off; + + if (p == 0 || isMarked(p)) + return; + Mark(p); + + convert_code_r(JF(p)); + convert_code_r(JT(p)); + + slen = slength(p->stmts); + dst = ftail -= slen + 1; + + p->offset = dst - fstart; + + for (src = p->stmts; src; src = src->next) { + if (src->s.code == NOP) + continue; + dst->code = (u_short)src->s.code; + dst->k = src->s.k; + ++dst; + } +#ifdef BDEBUG + bids[dst - fstart] = p->id + 1; +#endif + dst->code = (u_short)p->s.code; + dst->k = p->s.k; + if (JT(p)) { + off = JT(p)->offset - (p->offset + slen) - 1; + if (off >= 256) + bpf_error("long jumps not supported"); + dst->jt = off; + off = JF(p)->offset - (p->offset + slen) - 1; + if (off >= 256) + bpf_error("long jumps not supported"); + dst->jf = off; + } +} + + +/* + * Convert flowgraph intermediate representation to the + * BPF array representation. Set *lenp to the number of instructions. + */ +struct bpf_insn * +icode_to_fcode(root, lenp) + struct block *root; + int *lenp; +{ + int n; + struct bpf_insn *fp; + + unMarkAll(); + n = *lenp = count_stmts(root); + + fp = (struct bpf_insn *)malloc(sizeof(*fp) * n); + memset((char *)fp, 0, sizeof(*fp) * n); + fstart = fp; + ftail = fp + n; + + unMarkAll(); + convert_code_r(root); + + return fp; +} + +#ifdef BDEBUG +opt_dump(root) + struct block *root; +{ + struct bpf_program f; + + memset(bids, 0, sizeof bids); + f.bf_insns = icode_to_fcode(root, &f.bf_len); + bpf_dump(&f, 1); + putchar('\n'); + free((char *)f.bf_insns); +} +#endif diff --git a/pppd/pcap-namedb.h b/pppd/pcap-namedb.h new file mode 100644 index 0000000..c1624a2 --- /dev/null +++ b/pppd/pcap-namedb.h @@ -0,0 +1,58 @@ +/* From NetBSD: pcap-namedb.h,v 1.2 1995/03/06 11:38:48 mycroft Exp */ + +/* + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the Computer Systems + * Engineering Group at Lawrence Berkeley Laboratory. + * 4. Neither the name of the University nor of the Laboratory may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#) Header: pcap-namedb.h,v 1.2 94/06/14 20:03:34 leres Exp (LBL) + */ + +#ifndef lib_pcap_namedb_h +#define lib_pcap_namedb_h + +unsigned long **pcap_nametoaddr __P((const char *)); +unsigned long pcap_nametonetaddr __P((const char *)); + +int pcap_nametoport __P((const char *, int *, int *)); +int pcap_nametoproto __P((const char *)); +int pcap_nametopppproto __P((const char *)); +/* + * If a protocol is unknown, PROTO_UNDEF is returned. + * Also, pcap_nametoport() returns the protocol along with the port number. + * If there are ambiguous entried in /etc/services (i.e. domain + * can be either tcp or udp) PROTO_UNDEF is returned. + */ +#define PROTO_UNDEF -1 + +/* XXX move these to pcap-int.h? */ +unsigned long __pcap_atoin __P((const char *)); + +#endif diff --git a/pppd/pcap.h b/pppd/pcap.h new file mode 100644 index 0000000..48f5d95 --- /dev/null +++ b/pppd/pcap.h @@ -0,0 +1,58 @@ +/* $NetBSD: pcap.h,v 1.2 1995/03/06 11:39:07 mycroft Exp $ */ + +/* + * Copyright (c) 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the Computer Systems + * Engineering Group at Lawrence Berkeley Laboratory. + * 4. Neither the name of the University nor of the Laboratory may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#) Header: pcap.h,v 1.15 94/06/14 20:03:34 leres Exp (LBL) + */ + +#ifndef lib_pcap_h +#define lib_pcap_h + +#include +#include + +#include + +#define PCAP_VERSION_MAJOR 2 +#define PCAP_VERSION_MINOR 4 + +#define PCAP_ERRBUF_SIZE 256 + +char *bpf_geterr __P((void)); +int bpf_compile __P((struct bpf_program *, char *, int)); + +unsigned int bpf_filter __P((struct bpf_insn *, unsigned char *, + unsigned int, unsigned int)); +char *bpf_image(struct bpf_insn *, int); + +#endif diff --git a/pppd/scanner.l b/pppd/scanner.l new file mode 100644 index 0000000..64ffd34 --- /dev/null +++ b/pppd/scanner.l @@ -0,0 +1,173 @@ +%{ +/* From NetBSD: scanner.l,v 1.2 1995/03/06 11:39:12 mycroft Exp */ +/* From Header: scanner.l,v 1.40 94/06/10 17:21:44 mccanne Exp (LBL) */ + +/* + * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that: (1) source code distributions + * retain the above copyright notice and this paragraph in its entirety, (2) + * distributions including binary code include the above copyright notice and + * this paragraph in its entirety in the documentation or other materials + * provided with the distribution, and (3) all advertising materials mentioning + * features or use of this software display the following acknowledgement: + * ``This product includes software developed by the University of California, + * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of + * the University nor the names of its contributors may be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef lint +static char rcsid[] = "$Id"; +#endif + +#include +#include + +#include + +#include +#include "pcap.h" +#include "pcap-namedb.h" + +#include "gencode.h" +#include "y.tab.h" + +#ifndef __GNUC__ +#define inline +#endif + +static int stoi(char *); +static inline int xdtoi(int); + +#ifndef FLEX_SCANNER +static char *in_buffer; + +#undef getc +#define getc(fp) (*in_buffer == 0 ? EOF : *in_buffer++) +#endif + +extern YYSTYPE yylval; + +%} + +N ([0-9]+|(0X|0x)[0-9A-Fa-f]+) +B ([0-9A-Fa-f][0-9A-Fa-f]?) + +%a 3000 + +%% +dst return DST; +src return SRC; + +link|ppp return LINK; +ip return IP; +tcp return TCP; +udp return UDP; +icmp return ICMP; + +host return HOST; +net return NET; +port return PORT; +proto return PROTO; + +less return LESS; +greater return GREATER; +byte return BYTE; +broadcast return TK_BROADCAST; +multicast return TK_MULTICAST; + +and|"&&" return AND; +or|"||" return OR; +not return '!'; + +len|length return LEN; +inbound return INBOUND; +outbound return OUTBOUND; + +[ \n\t] ; +[+\-*/:\[\]!<>()&|=] return yytext[0]; +">=" return GEQ; +"<=" return LEQ; +"!=" return NEQ; +"==" return '='; +"<<" return LSH; +">>" return RSH; +{N} { yylval.i = stoi((char *)yytext); return NUM; } +({N}\.{N})|({N}\.{N}\.{N})|({N}\.{N}\.{N}\.{N}) { + yylval.s = sdup((char *)yytext); return HID; +} +[A-Za-z][-_.A-Za-z0-9]* { yylval.s = sdup((char *)yytext); return ID; } +"\\"[^ !()\n\t]+ { yylval.s = sdup((char *)yytext + 1); return ID; } +[^ \[\]\t\n\-_.A-Za-z0-9!<>()&|=]+ { bpf_error("illegal token: %s\n", yytext); } +. { bpf_error("illegal char '%c'", *yytext); } +%% +void +lex_init(buf) + char *buf; +{ +#ifdef FLEX_SCANNER + if (yy_current_buffer) + yy_delete_buffer(yy_current_buffer); + yy_switch_to_buffer(yy_scan_string(buf)); +#else + in_buffer = buf; +#endif +} + +/* + * Also define a yywrap. Note that if we're using flex, it will + * define a macro to map this identifier to pcap_wrap. + */ +int +yywrap() +{ + return 1; +} + +/* Hex digit to integer. */ +static inline int +xdtoi(c) + register int c; +{ + if (isdigit(c)) + return c - '0'; + else if (islower(c)) + return c - 'a' + 10; + else + return c - 'A' + 10; +} + +/* + * Convert string to integer. Just like atoi(), but checks for + * preceding 0x or 0 and uses hex or octal instead of decimal. + */ +static int +stoi(s) + char *s; +{ + int base = 10; + int n = 0; + + if (*s == '0') { + if (s[1] == 'x' || s[1] == 'X') { + s += 2; + base = 16; + } + else { + base = 8; + s += 1; + } + } + while (*s) + n = n * base + xdtoi(*s++); + + return n; +} +