2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 18:23:53 +08:00

net: filter: make BPF conversion more readable

Introduce BPF helper macros to define instructions
(similar to old BPF_STMT/BPF_JUMP macros)

Use them while converting classic BPF to internal
and in BPF testsuite later.

Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Alexei Starovoitov 2014-05-08 14:10:51 -07:00 committed by David S. Miller
parent 05ab2dae65
commit 9739eef13c
2 changed files with 101 additions and 92 deletions

View File

@ -79,6 +79,57 @@ enum {
/* BPF program can access up to 512 bytes of stack space. */ /* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512 #define MAX_BPF_STACK 512
/* bpf_add|sub|...: a += x, bpf_mov: a = x */
#define BPF_ALU64_REG(op, a, x) \
((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0})
#define BPF_ALU32_REG(op, a, x) \
((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0})
/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */
#define BPF_ALU64_IMM(op, a, imm) \
((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm})
#define BPF_ALU32_IMM(op, a, imm) \
((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm})
/* R0 = *(uint *) (skb->data + off) */
#define BPF_LD_ABS(size, off) \
((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off})
/* R0 = *(uint *) (skb->data + x + off) */
#define BPF_LD_IND(size, x, off) \
((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off})
/* a = *(uint *) (x + off) */
#define BPF_LDX_MEM(sz, a, x, off) \
((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0})
/* if (a 'op' x) goto pc+off */
#define BPF_JMP_REG(op, a, x, off) \
((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0})
/* if (a 'op' imm) goto pc+off */
#define BPF_JMP_IMM(op, a, imm, off) \
((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm})
#define BPF_EXIT_INSN() \
((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0})
static inline int size_to_bpf(int size)
{
switch (size) {
case 1:
return BPF_B;
case 2:
return BPF_H;
case 4:
return BPF_W;
case 8:
return BPF_DW;
default:
return -EINVAL;
}
}
/* Macro to invoke filter function. */ /* Macro to invoke filter function. */
#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)

View File

@ -668,10 +668,9 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_PROTOCOL: case SKF_AD_OFF + SKF_AD_PROTOCOL:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
insn->code = BPF_LDX | BPF_MEM | BPF_H; /* A = *(u16 *) (ctx + offsetof(protocol)) */
insn->a_reg = BPF_REG_A; *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
insn->x_reg = BPF_REG_CTX; offsetof(struct sk_buff, protocol));
insn->off = offsetof(struct sk_buff, protocol);
insn++; insn++;
/* A = ntohs(A) [emitting a nop or swap16] */ /* A = ntohs(A) [emitting a nop or swap16] */
@ -681,37 +680,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
break; break;
case SKF_AD_OFF + SKF_AD_PKTTYPE: case SKF_AD_OFF + SKF_AD_PKTTYPE:
insn->code = BPF_LDX | BPF_MEM | BPF_B; *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
insn->a_reg = BPF_REG_A; pkt_type_offset());
insn->x_reg = BPF_REG_CTX;
insn->off = pkt_type_offset();
if (insn->off < 0) if (insn->off < 0)
return false; return false;
insn++; insn++;
insn->code = BPF_ALU | BPF_AND | BPF_K; *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
insn->a_reg = BPF_REG_A;
insn->imm = PKT_TYPE_MAX;
break; break;
case SKF_AD_OFF + SKF_AD_IFINDEX: case SKF_AD_OFF + SKF_AD_IFINDEX:
case SKF_AD_OFF + SKF_AD_HATYPE: case SKF_AD_OFF + SKF_AD_HATYPE:
if (FIELD_SIZEOF(struct sk_buff, dev) == 8) *insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)),
insn->code = BPF_LDX | BPF_MEM | BPF_DW; BPF_REG_TMP, BPF_REG_CTX,
else offsetof(struct sk_buff, dev));
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = BPF_REG_TMP;
insn->x_reg = BPF_REG_CTX;
insn->off = offsetof(struct sk_buff, dev);
insn++; insn++;
insn->code = BPF_JMP | BPF_JNE | BPF_K; /* if (tmp != 0) goto pc+1 */
insn->a_reg = BPF_REG_TMP; *insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
insn->imm = 0;
insn->off = 1;
insn++; insn++;
insn->code = BPF_JMP | BPF_EXIT; *insn = BPF_EXIT_INSN();
insn++; insn++;
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
@ -732,55 +721,45 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_MARK: case SKF_AD_OFF + SKF_AD_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
insn->code = BPF_LDX | BPF_MEM | BPF_W; *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
insn->a_reg = BPF_REG_A; offsetof(struct sk_buff, mark));
insn->x_reg = BPF_REG_CTX;
insn->off = offsetof(struct sk_buff, mark);
break; break;
case SKF_AD_OFF + SKF_AD_RXHASH: case SKF_AD_OFF + SKF_AD_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
insn->code = BPF_LDX | BPF_MEM | BPF_W; *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
insn->a_reg = BPF_REG_A; offsetof(struct sk_buff, hash));
insn->x_reg = BPF_REG_CTX;
insn->off = offsetof(struct sk_buff, hash);
break; break;
case SKF_AD_OFF + SKF_AD_QUEUE: case SKF_AD_OFF + SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
insn->code = BPF_LDX | BPF_MEM | BPF_H; *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
insn->a_reg = BPF_REG_A; offsetof(struct sk_buff, queue_mapping));
insn->x_reg = BPF_REG_CTX;
insn->off = offsetof(struct sk_buff, queue_mapping);
break; break;
case SKF_AD_OFF + SKF_AD_VLAN_TAG: case SKF_AD_OFF + SKF_AD_VLAN_TAG:
case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
insn->code = BPF_LDX | BPF_MEM | BPF_H; /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
insn->a_reg = BPF_REG_A; *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
insn->x_reg = BPF_REG_CTX; offsetof(struct sk_buff, vlan_tci));
insn->off = offsetof(struct sk_buff, vlan_tci);
insn++; insn++;
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
insn->code = BPF_ALU | BPF_AND | BPF_K; *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
insn->a_reg = BPF_REG_A; ~VLAN_TAG_PRESENT);
insn->imm = ~VLAN_TAG_PRESENT;
} else { } else {
insn->code = BPF_ALU | BPF_RSH | BPF_K; /* A >>= 12 */
insn->a_reg = BPF_REG_A; *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
insn->imm = 12;
insn++; insn++;
insn->code = BPF_ALU | BPF_AND | BPF_K; /* A &= 1 */
insn->a_reg = BPF_REG_A; *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
insn->imm = 1;
} }
break; break;
@ -790,21 +769,15 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_CPU:
case SKF_AD_OFF + SKF_AD_RANDOM: case SKF_AD_OFF + SKF_AD_RANDOM:
/* arg1 = ctx */ /* arg1 = ctx */
insn->code = BPF_ALU64 | BPF_MOV | BPF_X; *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX);
insn->a_reg = BPF_REG_ARG1;
insn->x_reg = BPF_REG_CTX;
insn++; insn++;
/* arg2 = A */ /* arg2 = A */
insn->code = BPF_ALU64 | BPF_MOV | BPF_X; *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A);
insn->a_reg = BPF_REG_ARG2;
insn->x_reg = BPF_REG_A;
insn++; insn++;
/* arg3 = X */ /* arg3 = X */
insn->code = BPF_ALU64 | BPF_MOV | BPF_X; *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X);
insn->a_reg = BPF_REG_ARG3;
insn->x_reg = BPF_REG_X;
insn++; insn++;
/* Emit call(ctx, arg2=A, arg3=X) */ /* Emit call(ctx, arg2=A, arg3=X) */
@ -829,9 +802,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
break; break;
case SKF_AD_OFF + SKF_AD_ALU_XOR_X: case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
insn->code = BPF_ALU | BPF_XOR | BPF_X; /* A ^= X */
insn->a_reg = BPF_REG_A; *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
insn->x_reg = BPF_REG_X;
break; break;
default: default:
@ -897,9 +869,7 @@ do_pass:
fp = prog; fp = prog;
if (new_insn) { if (new_insn) {
new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X; *new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1);
new_insn->a_reg = BPF_REG_CTX;
new_insn->x_reg = BPF_REG_ARG1;
} }
new_insn++; new_insn++;
@ -1027,34 +997,28 @@ do_pass:
/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B: case BPF_LDX | BPF_MSH | BPF_B:
insn->code = BPF_ALU64 | BPF_MOV | BPF_X; /* tmp = A */
insn->a_reg = BPF_REG_TMP; *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A);
insn->x_reg = BPF_REG_A;
insn++; insn++;
insn->code = BPF_LD | BPF_ABS | BPF_B; /* A = R0 = *(u8 *) (skb->data + K) */
insn->a_reg = BPF_REG_A; *insn = BPF_LD_ABS(BPF_B, fp->k);
insn->imm = fp->k;
insn++; insn++;
insn->code = BPF_ALU | BPF_AND | BPF_K; /* A &= 0xf */
insn->a_reg = BPF_REG_A; *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
insn->imm = 0xf;
insn++; insn++;
insn->code = BPF_ALU | BPF_LSH | BPF_K; /* A <<= 2 */
insn->a_reg = BPF_REG_A; *insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
insn->imm = 2;
insn++; insn++;
insn->code = BPF_ALU64 | BPF_MOV | BPF_X; /* X = A */
insn->a_reg = BPF_REG_X; *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A);
insn->x_reg = BPF_REG_A;
insn++; insn++;
insn->code = BPF_ALU64 | BPF_MOV | BPF_X; /* A = tmp */
insn->a_reg = BPF_REG_A; *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP);
insn->x_reg = BPF_REG_TMP;
break; break;
/* RET_K, RET_A are remaped into 2 insns. */ /* RET_K, RET_A are remaped into 2 insns. */
@ -1068,7 +1032,7 @@ do_pass:
insn->imm = fp->k; insn->imm = fp->k;
insn++; insn++;
insn->code = BPF_JMP | BPF_EXIT; *insn = BPF_EXIT_INSN();
break; break;
/* Store to stack. */ /* Store to stack. */
@ -1102,16 +1066,12 @@ do_pass:
/* X = A */ /* X = A */
case BPF_MISC | BPF_TAX: case BPF_MISC | BPF_TAX:
insn->code = BPF_ALU64 | BPF_MOV | BPF_X; *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A);
insn->a_reg = BPF_REG_X;
insn->x_reg = BPF_REG_A;
break; break;
/* A = X */ /* A = X */
case BPF_MISC | BPF_TXA: case BPF_MISC | BPF_TXA:
insn->code = BPF_ALU64 | BPF_MOV | BPF_X; *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X);
insn->a_reg = BPF_REG_A;
insn->x_reg = BPF_REG_X;
break; break;
/* A = skb->len or X = skb->len */ /* A = skb->len or X = skb->len */
@ -1126,10 +1086,8 @@ do_pass:
/* access seccomp_data fields */ /* access seccomp_data fields */
case BPF_LDX | BPF_ABS | BPF_W: case BPF_LDX | BPF_ABS | BPF_W:
insn->code = BPF_LDX | BPF_MEM | BPF_W; /* A = *(u32 *) (ctx + K) */
insn->a_reg = BPF_REG_A; *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
insn->x_reg = BPF_REG_CTX;
insn->off = fp->k;
break; break;
default: default: