aboutsummaryrefslogtreecommitdiff
path: root/optimize.c
diff options
context:
space:
mode:
authorGuy Harris <guy@alum.mit.edu>2018-10-22 01:58:04 -0700
committerGuy Harris <guy@alum.mit.edu>2018-10-22 01:58:04 -0700
commitda570b00101b264004db032b4e67b90600b0edaf (patch)
tree07d05ea78bcb0883c141a6608fa05c630eb4fbb9 /optimize.c
parent4d6e12cd183cda6eade8cac4244089cee0048c7e (diff)
Catch shifts > 31 bits generated by the optimizer.
We already caught them in the code generator - but only if they were between 31 and 2^31-1, but we weren't catching them when shifts by the X register got optimized into shifts by a constant, so we need to: 1) catch it in the optimizer; 2) fix the check in the code generator. (In the longer run, we need to clear up signed vs. unsigned stuff in the code generator and optimizer.) Credit to OSS-Fuzz for finding the optimizer issue (which was using a shift constant that was "negative", thus pointing out the code generator issue).
Diffstat (limited to 'optimize.c')
-rw-r--r--optimize.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/optimize.c b/optimize.c
index 4c2a84c1..6c6deb00 100644
--- a/optimize.c
+++ b/optimize.c
@@ -1201,6 +1201,16 @@ opt_stmt(compiler_state_t *cstate, opt_state_t *opt_state,
else {
s->code = BPF_ALU|BPF_K|op;
s->k = opt_state->vmap[val[X_ATOM]].const_val;
+ /*
+ * XXX - we need to make up our minds
+ * as to what integers are signed and
+ * what integers are unsigned in BPF
+ * programs and in our IR.
+ */
+ if ((op == BPF_LSH || op == BPF_RSH) &&
+ (s->k < 0 || s->k > 31))
+ opt_error(cstate, opt_state,
+ "shift by more than 31 bits");
opt_state->done = 0;
val[A_ATOM] =
F(opt_state, s->code, val[A_ATOM], K(s->k));