1/* $NetBSD: bpf_stub.c,v 1.8 2018/06/25 03:22:14 msaitoh Exp $ */
2
3/*
4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.8 2018/06/25 03:22:14 msaitoh Exp $");
31
32#include <sys/param.h>
33#include <sys/kmem.h>
34#include <sys/mbuf.h>
35
36#include <net/bpf.h>
37
38struct laglist {
39 struct ifnet *lag_ifp;
40 u_int lag_dlt;
41 u_int lag_hlen;
42 struct bpf_if **lag_drvp;
43
44 TAILQ_ENTRY(laglist) lag_entries;
45};
46
47static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs);
48
49static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **);
50static void bpf_stub_detach(struct ifnet *);
51
52static void bpf_stub_null(void);
53static void bpf_stub_warn(void);
54
55static kmutex_t handovermtx;
56static kcondvar_t handovercv;
57static bool handover;
58
59struct bpf_ops bpf_ops_stub = {
60 .bpf_attach = bpf_stub_attach,
61 .bpf_detach = bpf_stub_detach,
62 .bpf_change_type = (void *)bpf_stub_null,
63
64 .bpf_mtap = (void *)bpf_stub_warn,
65 .bpf_mtap2 = (void *)bpf_stub_warn,
66 .bpf_mtap_af = (void *)bpf_stub_warn,
67 .bpf_mtap_sl_in = (void *)bpf_stub_warn,
68 .bpf_mtap_sl_out = (void *)bpf_stub_warn,
69
70 .bpf_mtap_softint_init = (void *)bpf_stub_null,
71 .bpf_mtap_softint = (void *)bpf_stub_warn,
72};
73struct bpf_ops *bpf_ops;
74
75static void
76bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp)
77{
78 struct laglist *lag;
79 bool storeattach = true;
80
81 lag = kmem_alloc(sizeof(*lag), KM_SLEEP);
82 lag->lag_ifp = ifp;
83 lag->lag_dlt = dlt;
84 lag->lag_hlen = hlen;
85 lag->lag_drvp = drvp;
86
87 mutex_enter(&handovermtx);
88 /*
89 * If handover is in progress, wait for it to finish and complete
90 * attach after that. Otherwise record ourselves.
91 */
92 while (handover) {
93 storeattach = false;
94 cv_wait(&handovercv, &handovermtx);
95 }
96
97 if (storeattach == false) {
98 mutex_exit(&handovermtx);
99 kmem_free(lag, sizeof(*lag));
100 KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
101 bpf_ops->bpf_attach(ifp, dlt, hlen, drvp);
102 } else {
103 *drvp = NULL;
104 TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries);
105 mutex_exit(&handovermtx);
106 }
107}
108
109static void
110bpf_stub_detach(struct ifnet *ifp)
111{
112 TAILQ_HEAD(, laglist) rmlist;
113 struct laglist *lag, *lag_next;
114 bool didhand;
115
116 TAILQ_INIT(&rmlist);
117
118 didhand = false;
119 mutex_enter(&handovermtx);
120 while (handover) {
121 didhand = true;
122 cv_wait(&handovercv, &handovermtx);
123 }
124
125 if (didhand == false) {
126 /* atomically remove all */
127 for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) {
128 lag_next = TAILQ_NEXT(lag, lag_entries);
129 if (lag->lag_ifp == ifp) {
130 TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
131 TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries);
132 }
133 }
134 mutex_exit(&handovermtx);
135 while ((lag = TAILQ_FIRST(&rmlist)) != NULL) {
136 TAILQ_REMOVE(&rmlist, lag, lag_entries);
137 kmem_free(lag, sizeof(*lag));
138 }
139 } else {
140 mutex_exit(&handovermtx);
141 KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
142 bpf_ops->bpf_detach(ifp);
143 }
144}
145
146static void
147bpf_stub_null(void)
148{
149
150}
151
152static void
153bpf_stub_warn(void)
154{
155
156#ifdef DEBUG
157 panic("bpf method called without attached bpf_if");
158#endif
159#ifdef DIAGNOSTIC
160 printf("bpf method called without attached bpf_if\n");
161#endif
162}
163
164void
165bpf_setops(void)
166{
167
168 mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE);
169 cv_init(&handovercv, "bpfops");
170 bpf_ops = &bpf_ops_stub;
171}
172
173/*
174 * Party's over, prepare for handover.
175 * It needs to happen *before* bpf_ops is set to make it atomic
176 * to callers (see also stub implementations, which wait if
177 * called during handover). The likelyhood of seeing a full
178 * attach-detach *during* handover comes close to astronomical,
179 * but handle it anyway since it's relatively easy.
180 */
181void
182bpf_ops_handover_enter(struct bpf_ops *newops)
183{
184 struct laglist *lag;
185
186 mutex_enter(&handovermtx);
187 handover = true;
188
189 while ((lag = TAILQ_FIRST(&lagdrvs)) != NULL) {
190 TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
191 mutex_exit(&handovermtx);
192 newops->bpf_attach(lag->lag_ifp, lag->lag_dlt,
193 lag->lag_hlen, lag->lag_drvp);
194 kmem_free(lag, sizeof(*lag));
195 mutex_enter(&handovermtx);
196 }
197 mutex_exit(&handovermtx);
198}
199
200/* hangover done */
201void
202bpf_ops_handover_exit(void)
203{
204
205 mutex_enter(&handovermtx);
206 handover = false;
207 cv_broadcast(&handovercv);
208 mutex_exit(&handovermtx);
209}
210