1 | /* |
2 | * dhcpcd: BPF arp and bootp filtering |
3 | * Copyright (c) 2006-2019 Roy Marples <roy@marples.name> |
4 | * All rights reserved |
5 | |
6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions |
8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
16 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
17 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
18 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
19 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
20 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
21 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
22 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
23 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
24 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
25 | * SUCH DAMAGE. |
26 | */ |
27 | |
28 | #include <sys/ioctl.h> |
29 | #include <sys/socket.h> |
30 | |
31 | #include <arpa/inet.h> |
32 | |
33 | #include <net/if.h> |
34 | #include <netinet/in.h> |
35 | #include <netinet/if_ether.h> |
36 | |
37 | #ifdef __linux__ |
38 | /* Special BPF snowflake. */ |
39 | #include <linux/filter.h> |
40 | #define bpf_insn sock_filter |
41 | #else |
42 | #include <net/bpf.h> |
43 | #endif |
44 | |
45 | #include <errno.h> |
46 | #include <fcntl.h> |
47 | #include <paths.h> |
48 | #include <stddef.h> |
49 | #include <stdlib.h> |
50 | #include <string.h> |
51 | |
52 | #include "common.h" |
53 | #include "arp.h" |
54 | #include "bpf.h" |
55 | #include "dhcp.h" |
56 | #include "if.h" |
57 | #include "logerr.h" |
58 | |
59 | #define ARP_ADDRS_MAX 3 |
60 | |
61 | /* BPF helper macros */ |
62 | #ifdef __linux__ |
63 | #define BPF_WHOLEPACKET 0x7fffffff /* work around buggy LPF filters */ |
64 | #else |
65 | #define BPF_WHOLEPACKET ~0U |
66 | #endif |
67 | |
68 | /* Macros to update the BPF structure */ |
69 | #define BPF_SET_STMT(insn, c, v) { \ |
70 | (insn)->code = (c); \ |
71 | (insn)->jt = 0; \ |
72 | (insn)->jf = 0; \ |
73 | (insn)->k = (uint32_t)(v); \ |
74 | }; |
75 | |
76 | #define BPF_SET_JUMP(insn, c, v, t, f) { \ |
77 | (insn)->code = (c); \ |
78 | (insn)->jt = (t); \ |
79 | (insn)->jf = (f); \ |
80 | (insn)->k = (uint32_t)(v); \ |
81 | }; |
82 | |
83 | size_t |
84 | (const struct interface *ifp) |
85 | { |
86 | |
87 | switch(ifp->family) { |
88 | case ARPHRD_ETHER: |
89 | return sizeof(struct ether_header); |
90 | default: |
91 | return 0; |
92 | } |
93 | } |
94 | |
95 | #ifndef __linux__ |
96 | /* Linux is a special snowflake for opening, attaching and reading BPF. |
97 | * See if-linux.c for the Linux specific BPF functions. */ |
98 | |
99 | const char *bpf_name = "Berkley Packet Filter" ; |
100 | |
101 | int |
102 | bpf_open(struct interface *ifp, int (*filter)(struct interface *, int)) |
103 | { |
104 | struct ipv4_state *state; |
105 | int fd = -1; |
106 | struct ifreq ifr; |
107 | int ibuf_len = 0; |
108 | size_t buf_len; |
109 | struct bpf_version pv; |
110 | #ifdef BIOCIMMEDIATE |
111 | unsigned int flags; |
112 | #endif |
113 | #ifndef O_CLOEXEC |
114 | int fd_opts; |
115 | #endif |
116 | |
117 | #ifdef _PATH_BPF |
118 | fd = open(_PATH_BPF, O_RDWR | O_NONBLOCK |
119 | #ifdef O_CLOEXEC |
120 | | O_CLOEXEC |
121 | #endif |
122 | ); |
123 | #else |
124 | char device[32]; |
125 | int n = 0; |
126 | |
127 | do { |
128 | snprintf(device, sizeof(device), "/dev/bpf%d" , n++); |
129 | fd = open(device, O_RDWR | O_NONBLOCK |
130 | #ifdef O_CLOEXEC |
131 | | O_CLOEXEC |
132 | #endif |
133 | ); |
134 | } while (fd == -1 && errno == EBUSY); |
135 | #endif |
136 | |
137 | if (fd == -1) |
138 | return -1; |
139 | |
140 | #ifndef O_CLOEXEC |
141 | if ((fd_opts = fcntl(fd, F_GETFD)) == -1 || |
142 | fcntl(fd, F_SETFD, fd_opts | FD_CLOEXEC) == -1) { |
143 | close(fd); |
144 | return -1; |
145 | } |
146 | #endif |
147 | |
148 | memset(&pv, 0, sizeof(pv)); |
149 | if (ioctl(fd, BIOCVERSION, &pv) == -1) |
150 | goto eexit; |
151 | if (pv.bv_major != BPF_MAJOR_VERSION || |
152 | pv.bv_minor < BPF_MINOR_VERSION) { |
153 | logerrx("BPF version mismatch - recompile" ); |
154 | goto eexit; |
155 | } |
156 | |
157 | if (filter(ifp, fd) != 0) |
158 | goto eexit; |
159 | |
160 | memset(&ifr, 0, sizeof(ifr)); |
161 | strlcpy(ifr.ifr_name, ifp->name, sizeof(ifr.ifr_name)); |
162 | if (ioctl(fd, BIOCSETIF, &ifr) == -1) |
163 | goto eexit; |
164 | |
165 | /* Get the required BPF buffer length from the kernel. */ |
166 | if (ioctl(fd, BIOCGBLEN, &ibuf_len) == -1) |
167 | goto eexit; |
168 | buf_len = (size_t)ibuf_len; |
169 | state = ipv4_getstate(ifp); |
170 | if (state == NULL) |
171 | goto eexit; |
172 | if (state->buffer_size != buf_len) { |
173 | void *nb; |
174 | |
175 | if ((nb = realloc(state->buffer, buf_len)) == NULL) |
176 | goto eexit; |
177 | state->buffer = nb; |
178 | state->buffer_size = buf_len; |
179 | } |
180 | |
181 | #ifdef BIOCIMMEDIATE |
182 | flags = 1; |
183 | if (ioctl(fd, BIOCIMMEDIATE, &flags) == -1) |
184 | goto eexit; |
185 | #endif |
186 | |
187 | return fd; |
188 | |
189 | eexit: |
190 | close(fd); |
191 | return -1; |
192 | } |
193 | |
194 | /* BPF requires that we read the entire buffer. |
195 | * So we pass the buffer in the API so we can loop on >1 packet. */ |
196 | ssize_t |
197 | bpf_read(struct interface *ifp, int fd, void *data, size_t len, |
198 | unsigned int *flags) |
199 | { |
200 | ssize_t fl = (ssize_t)bpf_frame_header_len(ifp); |
201 | ssize_t bytes; |
202 | struct ipv4_state *state = IPV4_STATE(ifp); |
203 | |
204 | struct bpf_hdr packet; |
205 | const char *payload; |
206 | |
207 | *flags &= ~BPF_EOF; |
208 | for (;;) { |
209 | if (state->buffer_len == 0) { |
210 | bytes = read(fd, state->buffer, state->buffer_size); |
211 | #if defined(__sun) |
212 | /* After 2^31 bytes, the kernel offset overflows. |
213 | * To work around this bug, lseek 0. */ |
214 | if (bytes == -1 && errno == EINVAL) { |
215 | lseek(fd, 0, SEEK_SET); |
216 | continue; |
217 | } |
218 | #endif |
219 | if (bytes == -1 || bytes == 0) |
220 | return bytes; |
221 | state->buffer_len = (size_t)bytes; |
222 | state->buffer_pos = 0; |
223 | } |
224 | bytes = -1; |
225 | memcpy(&packet, state->buffer + state->buffer_pos, |
226 | sizeof(packet)); |
227 | if (state->buffer_pos + packet.bh_caplen + packet.bh_hdrlen > |
228 | state->buffer_len) |
229 | goto next; /* Packet beyond buffer, drop. */ |
230 | payload = state->buffer + state->buffer_pos + |
231 | packet.bh_hdrlen + fl; |
232 | bytes = (ssize_t)packet.bh_caplen - fl; |
233 | if ((size_t)bytes > len) |
234 | bytes = (ssize_t)len; |
235 | memcpy(data, payload, (size_t)bytes); |
236 | next: |
237 | state->buffer_pos += BPF_WORDALIGN(packet.bh_hdrlen + |
238 | packet.bh_caplen); |
239 | if (state->buffer_pos >= state->buffer_len) { |
240 | state->buffer_len = state->buffer_pos = 0; |
241 | *flags |= BPF_EOF; |
242 | } |
243 | if (bytes != -1) |
244 | return bytes; |
245 | } |
246 | |
247 | /* NOTREACHED */ |
248 | } |
249 | |
250 | int |
251 | bpf_attach(int fd, void *filter, unsigned int filter_len) |
252 | { |
253 | struct bpf_program pf; |
254 | |
255 | /* Install the filter. */ |
256 | memset(&pf, 0, sizeof(pf)); |
257 | pf.bf_insns = filter; |
258 | pf.bf_len = filter_len; |
259 | return ioctl(fd, BIOCSETF, &pf); |
260 | } |
261 | #endif |
262 | |
263 | #ifndef __sun |
264 | /* SunOS is special too - sending via BPF goes nowhere. */ |
265 | ssize_t |
266 | bpf_send(const struct interface *ifp, int fd, uint16_t protocol, |
267 | const void *data, size_t len) |
268 | { |
269 | struct iovec iov[2]; |
270 | struct ether_header eh; |
271 | |
272 | switch(ifp->family) { |
273 | case ARPHRD_ETHER: |
274 | memset(&eh.ether_dhost, 0xff, sizeof(eh.ether_dhost)); |
275 | memcpy(&eh.ether_shost, ifp->hwaddr, sizeof(eh.ether_shost)); |
276 | eh.ether_type = htons(protocol); |
277 | iov[0].iov_base = &eh; |
278 | iov[0].iov_len = sizeof(eh); |
279 | break; |
280 | default: |
281 | iov[0].iov_base = NULL; |
282 | iov[0].iov_len = 0; |
283 | break; |
284 | } |
285 | iov[1].iov_base = UNCONST(data); |
286 | iov[1].iov_len = len; |
287 | return writev(fd, iov, 2); |
288 | } |
289 | #endif |
290 | |
291 | int |
292 | bpf_close(struct interface *ifp, int fd) |
293 | { |
294 | struct ipv4_state *state = IPV4_STATE(ifp); |
295 | |
296 | /* Rewind the buffer on closing. */ |
297 | state->buffer_len = state->buffer_pos = 0; |
298 | return close(fd); |
299 | } |
300 | |
301 | /* Normally this is needed by bootp. |
302 | * Once that uses this again, the ARP guard here can be removed. */ |
303 | #ifdef ARP |
304 | #define BPF_CMP_HWADDR_LEN ((((HWADDR_LEN / 4) + 2) * 2) + 1) |
305 | static unsigned int |
306 | bpf_cmp_hwaddr(struct bpf_insn *bpf, size_t bpf_len, size_t off, |
307 | bool equal, uint8_t *hwaddr, size_t hwaddr_len) |
308 | { |
309 | struct bpf_insn *bp; |
310 | size_t maclen, nlft, njmps; |
311 | uint32_t mac32; |
312 | uint16_t mac16; |
313 | uint8_t jt, jf; |
314 | |
315 | /* Calc the number of jumps */ |
316 | if ((hwaddr_len / 4) >= 128) { |
317 | errno = EINVAL; |
318 | return 0; |
319 | } |
320 | njmps = (hwaddr_len / 4) * 2; /* 2 instructions per check */ |
321 | /* We jump after the 1st check. */ |
322 | if (njmps) |
323 | njmps -= 2; |
324 | nlft = hwaddr_len % 4; |
325 | if (nlft) { |
326 | njmps += (nlft / 2) * 2; |
327 | nlft = nlft % 2; |
328 | if (nlft) |
329 | njmps += 2; |
330 | |
331 | } |
332 | |
333 | /* Skip to positive finish. */ |
334 | njmps++; |
335 | if (equal) { |
336 | jt = (uint8_t)njmps; |
337 | jf = 0; |
338 | } else { |
339 | jt = 0; |
340 | jf = (uint8_t)njmps; |
341 | } |
342 | |
343 | bp = bpf; |
344 | for (; hwaddr_len > 0; |
345 | hwaddr += maclen, hwaddr_len -= maclen, off += maclen) |
346 | { |
347 | if (bpf_len < 3) { |
348 | errno = ENOBUFS; |
349 | return 0; |
350 | } |
351 | bpf_len -= 3; |
352 | |
353 | if (hwaddr_len >= 4) { |
354 | maclen = sizeof(mac32); |
355 | memcpy(&mac32, hwaddr, maclen); |
356 | BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, off); |
357 | bp++; |
358 | BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, |
359 | htonl(mac32), jt, jf); |
360 | } else if (hwaddr_len >= 2) { |
361 | maclen = sizeof(mac16); |
362 | memcpy(&mac16, hwaddr, maclen); |
363 | BPF_SET_STMT(bp, BPF_LD + BPF_H + BPF_IND, off); |
364 | bp++; |
365 | BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, |
366 | htons(mac16), jt, jf); |
367 | } else { |
368 | maclen = sizeof(*hwaddr); |
369 | BPF_SET_STMT(bp, BPF_LD + BPF_B + BPF_IND, off); |
370 | bp++; |
371 | BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, |
372 | *hwaddr, jt, jf); |
373 | } |
374 | if (jt) |
375 | jt = (uint8_t)(jt - 2); |
376 | if (jf) |
377 | jf = (uint8_t)(jf - 2); |
378 | bp++; |
379 | } |
380 | |
381 | /* Last step is always return failure. |
382 | * Next step is a positive finish. */ |
383 | BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); |
384 | bp++; |
385 | |
386 | return (unsigned int)(bp - bpf); |
387 | } |
388 | #endif |
389 | |
390 | #ifdef ARP |
391 | |
392 | static const struct bpf_insn bpf_arp_ether [] = { |
393 | /* Ensure packet is at least correct size. */ |
394 | BPF_STMT(BPF_LD + BPF_W + BPF_LEN, 0), |
395 | BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, sizeof(struct ether_arp), 1, 0), |
396 | BPF_STMT(BPF_RET + BPF_K, 0), |
397 | |
398 | /* Check this is an ARP packet. */ |
399 | BPF_STMT(BPF_LD + BPF_H + BPF_ABS, |
400 | offsetof(struct ether_header, ether_type)), |
401 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_ARP, 1, 0), |
402 | BPF_STMT(BPF_RET + BPF_K, 0), |
403 | |
404 | /* Load frame header length into X */ |
405 | BPF_STMT(BPF_LDX + BPF_W + BPF_IMM, sizeof(struct ether_header)), |
406 | |
407 | /* Make sure the hardware family matches. */ |
408 | BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_hrd)), |
409 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPHRD_ETHER, 1, 0), |
410 | BPF_STMT(BPF_RET + BPF_K, 0), |
411 | |
412 | /* Make sure the hardware length matches. */ |
413 | BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_hln)), |
414 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, |
415 | sizeof(((struct ether_arp *)0)->arp_sha), 1, 0), |
416 | BPF_STMT(BPF_RET + BPF_K, 0), |
417 | }; |
418 | #define BPF_ARP_ETHER_LEN __arraycount(bpf_arp_ether) |
419 | |
420 | static const struct bpf_insn bpf_arp_filter [] = { |
421 | /* Make sure this is for IP. */ |
422 | BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_pro)), |
423 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0), |
424 | BPF_STMT(BPF_RET + BPF_K, 0), |
425 | /* Make sure this is an ARP REQUEST. */ |
426 | BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_op)), |
427 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REQUEST, 2, 0), |
428 | /* or ARP REPLY. */ |
429 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REPLY, 1, 0), |
430 | BPF_STMT(BPF_RET + BPF_K, 0), |
431 | /* Make sure the protocol length matches. */ |
432 | BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_pln)), |
433 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, sizeof(in_addr_t), 1, 0), |
434 | BPF_STMT(BPF_RET + BPF_K, 0), |
435 | }; |
436 | #define BPF_ARP_FILTER_LEN __arraycount(bpf_arp_filter) |
437 | |
438 | #define BPF_ARP_ADDRS_LEN 1 + (ARP_ADDRS_MAX * 2) + 3 + \ |
439 | (ARP_ADDRS_MAX * 2) + 1 |
440 | |
441 | #define BPF_ARP_LEN BPF_ARP_ETHER_LEN + BPF_ARP_FILTER_LEN + \ |
442 | BPF_CMP_HWADDR_LEN + BPF_ARP_ADDRS_LEN |
443 | |
444 | int |
445 | bpf_arp(struct interface *ifp, int fd) |
446 | { |
447 | struct bpf_insn bpf[BPF_ARP_LEN]; |
448 | struct bpf_insn *bp; |
449 | struct iarp_state *state; |
450 | uint16_t arp_len; |
451 | |
452 | if (fd == -1) |
453 | return 0; |
454 | |
455 | bp = bpf; |
456 | /* Check frame header. */ |
457 | switch(ifp->family) { |
458 | case ARPHRD_ETHER: |
459 | memcpy(bp, bpf_arp_ether, sizeof(bpf_arp_ether)); |
460 | bp += BPF_ARP_ETHER_LEN; |
461 | arp_len = sizeof(struct ether_header)+sizeof(struct ether_arp); |
462 | break; |
463 | default: |
464 | errno = EINVAL; |
465 | return -1; |
466 | } |
467 | |
468 | /* Copy in the main filter. */ |
469 | memcpy(bp, bpf_arp_filter, sizeof(bpf_arp_filter)); |
470 | bp += BPF_ARP_FILTER_LEN; |
471 | |
472 | /* Ensure it's not from us. */ |
473 | bp += bpf_cmp_hwaddr(bp, BPF_CMP_HWADDR_LEN, sizeof(struct arphdr), |
474 | false, ifp->hwaddr, ifp->hwlen); |
475 | |
476 | state = ARP_STATE(ifp); |
477 | if (TAILQ_FIRST(&state->arp_states)) { |
478 | struct arp_state *astate; |
479 | size_t naddrs; |
480 | |
481 | /* Match sender protocol address */ |
482 | BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, |
483 | sizeof(struct arphdr) + ifp->hwlen); |
484 | bp++; |
485 | naddrs = 0; |
486 | TAILQ_FOREACH(astate, &state->arp_states, next) { |
487 | if (++naddrs > ARP_ADDRS_MAX) { |
488 | errno = ENOBUFS; |
489 | logerr(__func__); |
490 | break; |
491 | } |
492 | BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, |
493 | htonl(astate->addr.s_addr), 0, 1); |
494 | bp++; |
495 | BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len); |
496 | bp++; |
497 | } |
498 | |
499 | /* If we didn't match sender, then we're only interested in |
500 | * ARP probes to us, so check the null host sender. */ |
501 | BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, INADDR_ANY, 1, 0); |
502 | bp++; |
503 | BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); |
504 | bp++; |
505 | |
506 | /* Match target protocol address */ |
507 | BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, |
508 | (sizeof(struct arphdr) |
509 | + (size_t)(ifp->hwlen * 2) + sizeof(in_addr_t))); |
510 | bp++; |
511 | naddrs = 0; |
512 | TAILQ_FOREACH(astate, &state->arp_states, next) { |
513 | if (++naddrs > ARP_ADDRS_MAX) { |
514 | /* Already logged error above. */ |
515 | break; |
516 | } |
517 | BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, |
518 | htonl(astate->addr.s_addr), 0, 1); |
519 | bp++; |
520 | BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len); |
521 | bp++; |
522 | } |
523 | |
524 | /* Return nothing, no protocol address match. */ |
525 | BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); |
526 | bp++; |
527 | } |
528 | |
529 | return bpf_attach(fd, bpf, (unsigned int)(bp - bpf)); |
530 | } |
531 | #endif |
532 | |
533 | static const struct bpf_insn bpf_bootp_ether[] = { |
534 | /* Make sure this is an IP packet. */ |
535 | BPF_STMT(BPF_LD + BPF_H + BPF_ABS, |
536 | offsetof(struct ether_header, ether_type)), |
537 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0), |
538 | BPF_STMT(BPF_RET + BPF_K, 0), |
539 | |
540 | /* Load frame header length into X. */ |
541 | BPF_STMT(BPF_LDX + BPF_W + BPF_IMM, sizeof(struct ether_header)), |
542 | /* Copy to M0. */ |
543 | BPF_STMT(BPF_STX, 0), |
544 | }; |
545 | #define BPF_BOOTP_ETHER_LEN __arraycount(bpf_bootp_ether) |
546 | |
547 | static const struct bpf_insn bpf_bootp_filter[] = { |
548 | /* Make sure it's an optionless IPv4 packet. */ |
549 | BPF_STMT(BPF_LD + BPF_B + BPF_IND, 0), |
550 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 0x45, 1, 0), |
551 | BPF_STMT(BPF_RET + BPF_K, 0), |
552 | |
553 | /* Make sure it's a UDP packet. */ |
554 | BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct ip, ip_p)), |
555 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 1, 0), |
556 | BPF_STMT(BPF_RET + BPF_K, 0), |
557 | |
558 | /* Make sure this isn't a fragment. */ |
559 | BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_off)), |
560 | BPF_JUMP(BPF_JMP + BPF_JSET + BPF_K, 0x1fff, 0, 1), |
561 | BPF_STMT(BPF_RET + BPF_K, 0), |
562 | |
563 | /* Store IP location in M1. */ |
564 | BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_len)), |
565 | BPF_STMT(BPF_ST, 1), |
566 | |
567 | /* Store IP length in M2. */ |
568 | BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_len)), |
569 | BPF_STMT(BPF_ST, 2), |
570 | |
571 | /* Advance to the UDP header. */ |
572 | BPF_STMT(BPF_MISC + BPF_TXA, 0), |
573 | BPF_STMT(BPF_ALU + BPF_ADD + BPF_K, sizeof(struct ip)), |
574 | BPF_STMT(BPF_MISC + BPF_TAX, 0), |
575 | |
576 | /* Store X in M3. */ |
577 | BPF_STMT(BPF_STX, 3), |
578 | |
579 | /* Make sure it's from and to the right port. */ |
580 | BPF_STMT(BPF_LD + BPF_W + BPF_IND, 0), |
581 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, (BOOTPS << 16) + BOOTPC, 1, 0), |
582 | BPF_STMT(BPF_RET + BPF_K, 0), |
583 | |
584 | /* Store UDP length in X. */ |
585 | BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct udphdr, uh_ulen)), |
586 | BPF_STMT(BPF_MISC + BPF_TAX, 0), |
587 | /* Copy IP length in M2 to A. */ |
588 | BPF_STMT(BPF_LD + BPF_MEM, 2), |
589 | /* Ensure IP length - IP header size == UDP length. */ |
590 | BPF_STMT(BPF_ALU + BPF_SUB + BPF_K, sizeof(struct ip)), |
591 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_X, 0, 1, 0), |
592 | BPF_STMT(BPF_RET + BPF_K, 0), |
593 | |
594 | /* Advance to the BOOTP packet (UDP X is in M3). */ |
595 | BPF_STMT(BPF_LD + BPF_MEM, 3), |
596 | BPF_STMT(BPF_ALU + BPF_ADD + BPF_K, sizeof(struct udphdr)), |
597 | BPF_STMT(BPF_MISC + BPF_TAX, 0), |
598 | |
599 | /* Make sure it's BOOTREPLY. */ |
600 | BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct bootp, op)), |
601 | BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, BOOTREPLY, 1, 0), |
602 | BPF_STMT(BPF_RET + BPF_K, 0), |
603 | }; |
604 | |
605 | #define BPF_BOOTP_FILTER_LEN __arraycount(bpf_bootp_filter) |
606 | #define BPF_BOOTP_CHADDR_LEN ((BOOTP_CHADDR_LEN / 4) * 3) |
607 | #define BPF_BOOTP_XID_LEN 4 /* BOUND check is 4 instructions */ |
608 | |
609 | #define BPF_BOOTP_LEN BPF_BOOTP_ETHER_LEN + BPF_BOOTP_FILTER_LEN \ |
610 | + BPF_BOOTP_XID_LEN + BPF_BOOTP_CHADDR_LEN + 4 |
611 | |
612 | int |
613 | bpf_bootp(struct interface *ifp, int fd) |
614 | { |
615 | #if 0 |
616 | const struct dhcp_state *state = D_CSTATE(ifp); |
617 | #endif |
618 | struct bpf_insn bpf[BPF_BOOTP_LEN]; |
619 | struct bpf_insn *bp; |
620 | |
621 | if (fd == -1) |
622 | return 0; |
623 | |
624 | bp = bpf; |
625 | /* Check frame header. */ |
626 | switch(ifp->family) { |
627 | case ARPHRD_ETHER: |
628 | memcpy(bp, bpf_bootp_ether, sizeof(bpf_bootp_ether)); |
629 | bp += BPF_BOOTP_ETHER_LEN; |
630 | break; |
631 | default: |
632 | errno = EINVAL; |
633 | return -1; |
634 | } |
635 | |
636 | /* Copy in the main filter. */ |
637 | memcpy(bp, bpf_bootp_filter, sizeof(bpf_bootp_filter)); |
638 | bp += BPF_BOOTP_FILTER_LEN; |
639 | |
640 | /* These checks won't work when same IP exists on other interfaces. */ |
641 | #if 0 |
642 | if (ifp->hwlen <= sizeof(((struct bootp *)0)->chaddr)) |
643 | bp += bpf_cmp_hwaddr(bp, BPF_BOOTP_CHADDR_LEN, |
644 | offsetof(struct bootp, chaddr), |
645 | true, ifp->hwaddr, ifp->hwlen); |
646 | |
647 | /* Make sure the BOOTP packet is for us. */ |
648 | if (state->state == DHS_BOUND) { |
649 | /* If bound, we only expect FORCERENEW messages |
650 | * and they need to be unicast to us. |
651 | * Move back to the IP header in M0 and check dst. */ |
652 | BPF_SET_STMT(bp, BPF_LDX + BPF_W + BPF_MEM, 0); |
653 | bp++; |
654 | BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, |
655 | offsetof(struct ip, ip_dst)); |
656 | bp++; |
657 | BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, |
658 | htonl(state->lease.addr.s_addr), 1, 0); |
659 | bp++; |
660 | BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); |
661 | bp++; |
662 | } else { |
663 | /* As we're not bound, we need to check xid to ensure |
664 | * it's a reply to our transaction. */ |
665 | BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, |
666 | offsetof(struct bootp, xid)); |
667 | bp++; |
668 | BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, |
669 | state->xid, 1, 0); |
670 | bp++; |
671 | BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); |
672 | bp++; |
673 | } |
674 | #endif |
675 | |
676 | /* All passed, return the packet |
677 | * (Frame length in M0, IP length in M2). */ |
678 | BPF_SET_STMT(bp, BPF_LD + BPF_MEM, 0); |
679 | bp++; |
680 | BPF_SET_STMT(bp, BPF_LDX + BPF_MEM, 2); |
681 | bp++; |
682 | BPF_SET_STMT(bp, BPF_ALU + BPF_ADD + BPF_X, 0); |
683 | bp++; |
684 | BPF_SET_STMT(bp, BPF_RET + BPF_A, 0); |
685 | bp++; |
686 | |
687 | return bpf_attach(fd, bpf, (unsigned int)(bp - bpf)); |
688 | } |
689 | |