1 | /* $NetBSD: if_vmx.c,v 1.32 2019/07/16 10:12:10 knakahara Exp $ */ |
2 | /* $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $ */ |
3 | |
4 | /* |
5 | * Copyright (c) 2013 Tsubai Masanari |
6 | * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org> |
7 | * |
8 | * Permission to use, copy, modify, and distribute this software for any |
9 | * purpose with or without fee is hereby granted, provided that the above |
10 | * copyright notice and this permission notice appear in all copies. |
11 | * |
12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
13 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
14 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
15 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
16 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
17 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
18 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
19 | */ |
20 | |
21 | #include <sys/cdefs.h> |
22 | __KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.32 2019/07/16 10:12:10 knakahara Exp $" ); |
23 | |
24 | #include <sys/param.h> |
25 | #include <sys/cpu.h> |
26 | #include <sys/kernel.h> |
27 | #include <sys/kmem.h> |
28 | #include <sys/bus.h> |
29 | #include <sys/device.h> |
30 | #include <sys/mbuf.h> |
31 | #include <sys/sockio.h> |
32 | |
33 | #include <net/bpf.h> |
34 | #include <net/if.h> |
35 | #include <net/if_ether.h> |
36 | #include <net/if_media.h> |
37 | |
38 | #include <netinet/if_inarp.h> |
39 | #include <netinet/in_systm.h> /* for <netinet/ip.h> */ |
40 | #include <netinet/in.h> /* for <netinet/ip.h> */ |
41 | #include <netinet/ip.h> /* for struct ip */ |
42 | #include <netinet/ip6.h> /* for struct ip6_hdr */ |
43 | #include <netinet/tcp.h> /* for struct tcphdr */ |
44 | #include <netinet/udp.h> /* for struct udphdr */ |
45 | |
46 | #include <dev/pci/pcivar.h> |
47 | #include <dev/pci/pcireg.h> |
48 | #include <dev/pci/pcidevs.h> |
49 | |
50 | #include <arch/x86/pci/if_vmxreg.h> |
51 | |
52 | #define VMXNET3_DRIVER_VERSION 0x00010000 |
53 | |
54 | /* |
55 | * Max descriptors per Tx packet. We must limit the size of the |
56 | * any TSO packets based on the number of segments. |
57 | */ |
58 | #define VMXNET3_TX_MAXSEGS 32 |
59 | #define VMXNET3_TX_MAXSIZE (VMXNET3_TX_MAXSEGS * MCLBYTES) |
60 | |
61 | /* |
62 | * Maximum support Tx segments size. The length field in the |
63 | * Tx descriptor is 14 bits. |
64 | */ |
65 | #define VMXNET3_TX_MAXSEGSIZE (1 << 14) |
66 | |
67 | /* |
68 | * The maximum number of Rx segments we accept. |
69 | */ |
70 | #define VMXNET3_MAX_RX_SEGS 0 /* no segments */ |
71 | |
72 | /* |
73 | * Predetermined size of the multicast MACs filter table. If the |
74 | * number of multicast addresses exceeds this size, then the |
75 | * ALL_MULTI mode is use instead. |
76 | */ |
77 | #define VMXNET3_MULTICAST_MAX 32 |
78 | |
79 | /* |
80 | * Our Tx watchdog timeout. |
81 | */ |
82 | #define VMXNET3_WATCHDOG_TIMEOUT 5 |
83 | |
84 | /* |
85 | * IP protocols that we can perform Tx checksum offloading of. |
86 | */ |
87 | #define VMXNET3_CSUM_OFFLOAD \ |
88 | (M_CSUM_TCPv4 | M_CSUM_UDPv4) |
89 | #define VMXNET3_CSUM_OFFLOAD_IPV6 \ |
90 | (M_CSUM_TCPv6 | M_CSUM_UDPv6) |
91 | |
92 | #define VMXNET3_CSUM_ALL_OFFLOAD \ |
93 | (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | M_CSUM_TSOv4 | M_CSUM_TSOv6) |
94 | |
95 | #define VMXNET3_RXRINGS_PERQ 2 |
96 | |
97 | #define VMXNET3_CORE_LOCK(_sc) mutex_enter((_sc)->vmx_mtx) |
98 | #define VMXNET3_CORE_UNLOCK(_sc) mutex_exit((_sc)->vmx_mtx) |
99 | #define VMXNET3_CORE_LOCK_ASSERT(_sc) mutex_owned((_sc)->vmx_mtx) |
100 | |
101 | #define VMXNET3_RXQ_LOCK(_rxq) mutex_enter((_rxq)->vxrxq_mtx) |
102 | #define VMXNET3_RXQ_UNLOCK(_rxq) mutex_exit((_rxq)->vxrxq_mtx) |
103 | #define VMXNET3_RXQ_LOCK_ASSERT(_rxq) \ |
104 | mutex_owned((_rxq)->vxrxq_mtx) |
105 | |
106 | #define VMXNET3_TXQ_LOCK(_txq) mutex_enter((_txq)->vxtxq_mtx) |
107 | #define VMXNET3_TXQ_UNLOCK(_txq) mutex_exit((_txq)->vxtxq_mtx) |
108 | #define VMXNET3_TXQ_LOCK_ASSERT(_txq) \ |
109 | mutex_owned((_txq)->vxtxq_mtx) |
110 | |
111 | struct vmxnet3_dma_alloc { |
112 | bus_addr_t dma_paddr; |
113 | void *dma_vaddr; |
114 | bus_dmamap_t dma_map; |
115 | bus_size_t dma_size; |
116 | bus_dma_segment_t dma_segs[1]; |
117 | }; |
118 | |
119 | struct vmxnet3_txbuf { |
120 | bus_dmamap_t vtxb_dmamap; |
121 | struct mbuf *vtxb_m; |
122 | }; |
123 | |
124 | struct vmxnet3_txring { |
125 | struct vmxnet3_txbuf *vxtxr_txbuf; |
126 | struct vmxnet3_txdesc *vxtxr_txd; |
127 | u_int vxtxr_head; |
128 | u_int vxtxr_next; |
129 | u_int vxtxr_ndesc; |
130 | int vxtxr_gen; |
131 | struct vmxnet3_dma_alloc vxtxr_dma; |
132 | }; |
133 | |
134 | struct vmxnet3_rxbuf { |
135 | bus_dmamap_t vrxb_dmamap; |
136 | struct mbuf *vrxb_m; |
137 | }; |
138 | |
139 | struct vmxnet3_rxring { |
140 | struct vmxnet3_rxbuf *vxrxr_rxbuf; |
141 | struct vmxnet3_rxdesc *vxrxr_rxd; |
142 | u_int vxrxr_fill; |
143 | u_int vxrxr_ndesc; |
144 | int vxrxr_gen; |
145 | int vxrxr_rid; |
146 | struct vmxnet3_dma_alloc vxrxr_dma; |
147 | bus_dmamap_t vxrxr_spare_dmap; |
148 | }; |
149 | |
150 | struct vmxnet3_comp_ring { |
151 | union { |
152 | struct vmxnet3_txcompdesc *txcd; |
153 | struct vmxnet3_rxcompdesc *rxcd; |
154 | } vxcr_u; |
155 | u_int vxcr_next; |
156 | u_int vxcr_ndesc; |
157 | int vxcr_gen; |
158 | struct vmxnet3_dma_alloc vxcr_dma; |
159 | }; |
160 | |
161 | struct vmxnet3_txq_stats { |
162 | uint64_t vmtxs_opackets; /* if_opackets */ |
163 | uint64_t vmtxs_obytes; /* if_obytes */ |
164 | uint64_t vmtxs_omcasts; /* if_omcasts */ |
165 | uint64_t vmtxs_csum; |
166 | uint64_t vmtxs_tso; |
167 | uint64_t vmtxs_full; |
168 | uint64_t vmtxs_offload_failed; |
169 | }; |
170 | |
171 | struct vmxnet3_txqueue { |
172 | kmutex_t *vxtxq_mtx; |
173 | struct vmxnet3_softc *vxtxq_sc; |
174 | int vxtxq_id; |
175 | int vxtxq_intr_idx; |
176 | int vxtxq_watchdog; |
177 | struct vmxnet3_txring vxtxq_cmd_ring; |
178 | struct vmxnet3_comp_ring vxtxq_comp_ring; |
179 | struct vmxnet3_txq_stats vxtxq_stats; |
180 | struct vmxnet3_txq_shared *vxtxq_ts; |
181 | char vxtxq_name[16]; |
182 | }; |
183 | |
184 | struct vmxnet3_rxq_stats { |
185 | uint64_t vmrxs_ipackets; /* if_ipackets */ |
186 | uint64_t vmrxs_ibytes; /* if_ibytes */ |
187 | uint64_t vmrxs_iqdrops; /* if_iqdrops */ |
188 | uint64_t vmrxs_ierrors; /* if_ierrors */ |
189 | }; |
190 | |
191 | struct vmxnet3_rxqueue { |
192 | kmutex_t *vxrxq_mtx; |
193 | struct vmxnet3_softc *vxrxq_sc; |
194 | int vxrxq_id; |
195 | int vxrxq_intr_idx; |
196 | struct mbuf *vxrxq_mhead; |
197 | struct mbuf *vxrxq_mtail; |
198 | struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ]; |
199 | struct vmxnet3_comp_ring vxrxq_comp_ring; |
200 | struct vmxnet3_rxq_stats vxrxq_stats; |
201 | struct vmxnet3_rxq_shared *vxrxq_rs; |
202 | char vxrxq_name[16]; |
203 | }; |
204 | |
205 | struct vmxnet3_statistics { |
206 | uint32_t vmst_defragged; |
207 | uint32_t vmst_defrag_failed; |
208 | uint32_t vmst_mgetcl_failed; |
209 | uint32_t vmst_mbuf_load_failed; |
210 | }; |
211 | |
212 | struct vmxnet3_softc { |
213 | device_t vmx_dev; |
214 | struct ethercom vmx_ethercom; |
215 | struct ifmedia vmx_media; |
216 | struct vmxnet3_driver_shared *vmx_ds; |
217 | int vmx_flags; |
218 | #define VMXNET3_FLAG_NO_MSIX (1 << 0) |
219 | #define (1 << 1) |
220 | #define VMXNET3_FLAG_ATTACHED (1 << 2) |
221 | |
222 | struct vmxnet3_txqueue *vmx_txq; |
223 | struct vmxnet3_rxqueue *vmx_rxq; |
224 | |
225 | struct pci_attach_args *vmx_pa; |
226 | |
227 | bus_space_tag_t vmx_iot0; |
228 | bus_space_tag_t vmx_iot1; |
229 | bus_space_handle_t vmx_ioh0; |
230 | bus_space_handle_t vmx_ioh1; |
231 | bus_size_t vmx_ios0; |
232 | bus_size_t vmx_ios1; |
233 | bus_dma_tag_t vmx_dmat; |
234 | |
235 | int vmx_link_active; |
236 | int vmx_ntxqueues; |
237 | int vmx_nrxqueues; |
238 | int vmx_ntxdescs; |
239 | int vmx_nrxdescs; |
240 | int vmx_max_rxsegs; |
241 | |
242 | struct vmxnet3_statistics vmx_stats; |
243 | |
244 | int vmx_intr_type; |
245 | int vmx_intr_mask_mode; |
246 | int vmx_event_intr_idx; |
247 | int vmx_nintrs; |
248 | pci_intr_handle_t *vmx_intrs; /* legacy use vmx_intrs[0] */ |
249 | void *vmx_ihs[VMXNET3_MAX_INTRS]; |
250 | |
251 | kmutex_t *vmx_mtx; |
252 | |
253 | uint8_t *vmx_mcast; |
254 | void *vmx_qs; |
255 | struct vmxnet3_rss_shared *; |
256 | callout_t vmx_tick; |
257 | struct vmxnet3_dma_alloc vmx_ds_dma; |
258 | struct vmxnet3_dma_alloc vmx_qs_dma; |
259 | struct vmxnet3_dma_alloc vmx_mcast_dma; |
260 | struct vmxnet3_dma_alloc ; |
261 | int vmx_max_ntxqueues; |
262 | int vmx_max_nrxqueues; |
263 | uint8_t vmx_lladdr[ETHER_ADDR_LEN]; |
264 | }; |
265 | |
266 | #define VMXNET3_STAT |
267 | |
268 | #ifdef VMXNET3_STAT |
269 | struct { |
270 | u_int txhead; |
271 | u_int txdone; |
272 | u_int maxtxlen; |
273 | u_int rxdone; |
274 | u_int rxfill; |
275 | u_int intr; |
276 | } vmxstat; |
277 | #endif |
278 | |
279 | typedef enum { |
280 | VMXNET3_BARRIER_RD, |
281 | VMXNET3_BARRIER_WR, |
282 | VMXNET3_BARRIER_RDWR, |
283 | } vmxnet3_barrier_t; |
284 | |
285 | #define JUMBO_LEN (MCLBYTES - ETHER_ALIGN) /* XXX */ |
286 | #define DMAADDR(map) ((map)->dm_segs[0].ds_addr) |
287 | |
288 | #define vtophys(va) 0 /* XXX ok? */ |
289 | |
290 | int vmxnet3_match(device_t, cfdata_t, void *); |
291 | void vmxnet3_attach(device_t, device_t, void *); |
292 | int vmxnet3_detach(device_t, int); |
293 | |
294 | int vmxnet3_alloc_pci_resources(struct vmxnet3_softc *); |
295 | void vmxnet3_free_pci_resources(struct vmxnet3_softc *); |
296 | int vmxnet3_check_version(struct vmxnet3_softc *); |
297 | void vmxnet3_check_multiqueue(struct vmxnet3_softc *); |
298 | |
299 | int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *); |
300 | int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *); |
301 | int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *); |
302 | int vmxnet3_alloc_interrupts(struct vmxnet3_softc *); |
303 | void vmxnet3_free_interrupts(struct vmxnet3_softc *); |
304 | |
305 | int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *); |
306 | int vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *); |
307 | int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *); |
308 | void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *); |
309 | int vmxnet3_setup_interrupts(struct vmxnet3_softc *); |
310 | |
311 | int vmxnet3_init_rxq(struct vmxnet3_softc *, int); |
312 | int vmxnet3_init_txq(struct vmxnet3_softc *, int); |
313 | int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *); |
314 | void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *); |
315 | void vmxnet3_destroy_txq(struct vmxnet3_txqueue *); |
316 | void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *); |
317 | |
318 | int vmxnet3_alloc_shared_data(struct vmxnet3_softc *); |
319 | void vmxnet3_free_shared_data(struct vmxnet3_softc *); |
320 | int vmxnet3_alloc_txq_data(struct vmxnet3_softc *); |
321 | void vmxnet3_free_txq_data(struct vmxnet3_softc *); |
322 | int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *); |
323 | void vmxnet3_free_rxq_data(struct vmxnet3_softc *); |
324 | int vmxnet3_alloc_queue_data(struct vmxnet3_softc *); |
325 | void vmxnet3_free_queue_data(struct vmxnet3_softc *); |
326 | int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *); |
327 | void vmxnet3_free_mcast_table(struct vmxnet3_softc *); |
328 | void vmxnet3_init_shared_data(struct vmxnet3_softc *); |
329 | void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *); |
330 | void vmxnet3_reinit_shared_data(struct vmxnet3_softc *); |
331 | int vmxnet3_alloc_data(struct vmxnet3_softc *); |
332 | void vmxnet3_free_data(struct vmxnet3_softc *); |
333 | int vmxnet3_setup_interface(struct vmxnet3_softc *); |
334 | |
335 | void vmxnet3_evintr(struct vmxnet3_softc *); |
336 | void vmxnet3_txq_eof(struct vmxnet3_txqueue *); |
337 | int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *); |
338 | void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *, |
339 | struct vmxnet3_rxring *, int); |
340 | void vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *); |
341 | void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *); |
342 | void vmxnet3_rxq_input(struct vmxnet3_rxqueue *, |
343 | struct vmxnet3_rxcompdesc *, struct mbuf *); |
344 | void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *); |
345 | int vmxnet3_legacy_intr(void *); |
346 | int vmxnet3_txq_intr(void *); |
347 | int vmxnet3_rxq_intr(void *); |
348 | int vmxnet3_event_intr(void *); |
349 | |
350 | void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *); |
351 | void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); |
352 | void vmxnet3_stop_locked(struct vmxnet3_softc *); |
353 | void vmxnet3_stop_rendezvous(struct vmxnet3_softc *); |
354 | void vmxnet3_stop(struct ifnet *, int); |
355 | |
356 | void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *); |
357 | int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); |
358 | int vmxnet3_reinit_queues(struct vmxnet3_softc *); |
359 | int vmxnet3_enable_device(struct vmxnet3_softc *); |
360 | void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *); |
361 | int vmxnet3_reinit(struct vmxnet3_softc *); |
362 | |
363 | void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *); |
364 | int vmxnet3_init_locked(struct vmxnet3_softc *); |
365 | int vmxnet3_init(struct ifnet *); |
366 | |
367 | int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *, struct mbuf *, int *, int *); |
368 | int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **, bus_dmamap_t); |
369 | void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t); |
370 | int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **); |
371 | void vmxnet3_start_locked(struct ifnet *); |
372 | void vmxnet3_start(struct ifnet *); |
373 | |
374 | void vmxnet3_set_rxfilter(struct vmxnet3_softc *); |
375 | int vmxnet3_ioctl(struct ifnet *, u_long, void *); |
376 | int vmxnet3_ifflags_cb(struct ethercom *); |
377 | |
378 | int vmxnet3_watchdog(struct vmxnet3_txqueue *); |
379 | void vmxnet3_refresh_host_stats(struct vmxnet3_softc *); |
380 | void vmxnet3_tick(void *); |
381 | void vmxnet3_link_status(struct vmxnet3_softc *); |
382 | void vmxnet3_media_status(struct ifnet *, struct ifmediareq *); |
383 | int vmxnet3_media_change(struct ifnet *); |
384 | void vmxnet3_set_lladdr(struct vmxnet3_softc *); |
385 | void vmxnet3_get_lladdr(struct vmxnet3_softc *); |
386 | |
387 | void vmxnet3_enable_all_intrs(struct vmxnet3_softc *); |
388 | void vmxnet3_disable_all_intrs(struct vmxnet3_softc *); |
389 | |
390 | int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t, bus_size_t, |
391 | struct vmxnet3_dma_alloc *); |
392 | void vmxnet3_dma_free(struct vmxnet3_softc *, struct vmxnet3_dma_alloc *); |
393 | |
394 | CFATTACH_DECL3_NEW(vmx, sizeof(struct vmxnet3_softc), |
395 | vmxnet3_match, vmxnet3_attach, vmxnet3_detach, NULL, NULL, NULL, 0); |
396 | |
397 | /* round down to the nearest power of 2 */ |
398 | static int |
399 | vmxnet3_calc_queue_size(int n) |
400 | { |
401 | int v, q; |
402 | |
403 | v = n; |
404 | while (v != 0) { |
405 | if (powerof2(n) != 0) |
406 | break; |
407 | v /= 2; |
408 | q = rounddown2(n, v); |
409 | if (q != 0) { |
410 | n = q; |
411 | break; |
412 | } |
413 | } |
414 | if (n == 0) |
415 | n = 1; |
416 | |
417 | return n; |
418 | } |
419 | |
420 | static inline void |
421 | vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) |
422 | { |
423 | |
424 | bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v); |
425 | } |
426 | |
427 | static inline uint32_t |
428 | vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r) |
429 | { |
430 | |
431 | return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r)); |
432 | } |
433 | |
434 | static inline void |
435 | vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) |
436 | { |
437 | |
438 | bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v); |
439 | } |
440 | |
441 | static inline void |
442 | vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd) |
443 | { |
444 | |
445 | vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd); |
446 | } |
447 | |
448 | static inline uint32_t |
449 | vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd) |
450 | { |
451 | |
452 | vmxnet3_write_cmd(sc, cmd); |
453 | return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD)); |
454 | } |
455 | |
456 | static inline void |
457 | vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq) |
458 | { |
459 | vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0); |
460 | } |
461 | |
462 | static inline void |
463 | vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq) |
464 | { |
465 | vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1); |
466 | } |
467 | |
468 | static inline void |
469 | vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr) |
470 | { |
471 | |
472 | if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) { |
473 | rxr->vxrxr_fill = 0; |
474 | rxr->vxrxr_gen ^= 1; |
475 | } |
476 | } |
477 | |
478 | static inline int |
479 | vmxnet3_txring_avail(struct vmxnet3_txring *txr) |
480 | { |
481 | int avail = txr->vxtxr_next - txr->vxtxr_head - 1; |
482 | return (avail < 0 ? txr->vxtxr_ndesc + avail : avail); |
483 | } |
484 | |
485 | /* |
486 | * Since this is a purely paravirtualized device, we do not have |
487 | * to worry about DMA coherency. But at times, we must make sure |
488 | * both the compiler and CPU do not reorder memory operations. |
489 | */ |
490 | static inline void |
491 | vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type) |
492 | { |
493 | |
494 | switch (type) { |
495 | case VMXNET3_BARRIER_RD: |
496 | membar_consumer(); |
497 | break; |
498 | case VMXNET3_BARRIER_WR: |
499 | membar_producer(); |
500 | break; |
501 | case VMXNET3_BARRIER_RDWR: |
502 | membar_sync(); |
503 | break; |
504 | default: |
505 | panic("%s: bad barrier type %d" , __func__, type); |
506 | } |
507 | } |
508 | |
509 | int |
510 | vmxnet3_match(device_t parent, cfdata_t match, void *aux) |
511 | { |
512 | struct pci_attach_args *pa = (struct pci_attach_args *)aux; |
513 | |
514 | if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE && |
515 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_VMXNET3) |
516 | return 1; |
517 | |
518 | return 0; |
519 | } |
520 | |
521 | void |
522 | vmxnet3_attach(device_t parent, device_t self, void *aux) |
523 | { |
524 | struct vmxnet3_softc *sc = device_private(self); |
525 | struct pci_attach_args *pa = aux; |
526 | pcireg_t preg; |
527 | int error; |
528 | |
529 | sc->vmx_dev = self; |
530 | sc->vmx_pa = pa; |
531 | if (pci_dma64_available(pa)) |
532 | sc->vmx_dmat = pa->pa_dmat64; |
533 | else |
534 | sc->vmx_dmat = pa->pa_dmat; |
535 | |
536 | pci_aprint_devinfo_fancy(pa, "Ethernet controller" , "vmxnet3" , 1); |
537 | |
538 | preg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
539 | preg |= PCI_COMMAND_MASTER_ENABLE; |
540 | pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); |
541 | |
542 | sc->vmx_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); |
543 | callout_init(&sc->vmx_tick, CALLOUT_MPSAFE); |
544 | |
545 | sc->vmx_max_ntxqueues = |
546 | vmxnet3_calc_queue_size(MIN(VMXNET3_MAX_TX_QUEUES, ncpu)); |
547 | sc->vmx_max_nrxqueues = |
548 | vmxnet3_calc_queue_size(MIN(VMXNET3_MAX_RX_QUEUES, ncpu)); |
549 | sc->vmx_ntxdescs = 512; |
550 | sc->vmx_nrxdescs = 256; |
551 | sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS; |
552 | |
553 | error = vmxnet3_alloc_pci_resources(sc); |
554 | if (error) |
555 | return; |
556 | |
557 | error = vmxnet3_check_version(sc); |
558 | if (error) |
559 | return; |
560 | |
561 | error = vmxnet3_alloc_rxtx_queues(sc); |
562 | if (error) |
563 | return; |
564 | |
565 | error = vmxnet3_alloc_interrupts(sc); |
566 | if (error) |
567 | return; |
568 | |
569 | vmxnet3_check_multiqueue(sc); |
570 | |
571 | error = vmxnet3_alloc_data(sc); |
572 | if (error) |
573 | return; |
574 | |
575 | error = vmxnet3_setup_interface(sc); |
576 | if (error) |
577 | return; |
578 | |
579 | error = vmxnet3_setup_interrupts(sc); |
580 | if (error) |
581 | return; |
582 | |
583 | sc->vmx_flags |= VMXNET3_FLAG_ATTACHED; |
584 | } |
585 | |
586 | int |
587 | vmxnet3_detach(device_t self, int flags) |
588 | { |
589 | struct vmxnet3_softc *sc; |
590 | struct ifnet *ifp; |
591 | |
592 | sc = device_private(self); |
593 | ifp = &sc->vmx_ethercom.ec_if; |
594 | |
595 | if (sc->vmx_flags & VMXNET3_FLAG_ATTACHED) { |
596 | VMXNET3_CORE_LOCK(sc); |
597 | vmxnet3_stop_locked(sc); |
598 | callout_halt(&sc->vmx_tick, sc->vmx_mtx); |
599 | VMXNET3_CORE_UNLOCK(sc); |
600 | |
601 | ifmedia_delete_instance(&sc->vmx_media, IFM_INST_ANY); |
602 | |
603 | ether_ifdetach(ifp); |
604 | if_detach(ifp); |
605 | } |
606 | |
607 | vmxnet3_free_interrupts(sc); |
608 | |
609 | vmxnet3_free_data(sc); |
610 | vmxnet3_free_pci_resources(sc); |
611 | vmxnet3_free_rxtx_queues(sc); |
612 | |
613 | if (sc->vmx_mtx) |
614 | mutex_obj_free(sc->vmx_mtx); |
615 | |
616 | return (0); |
617 | } |
618 | |
619 | int |
620 | vmxnet3_alloc_pci_resources(struct vmxnet3_softc *sc) |
621 | { |
622 | struct pci_attach_args *pa = sc->vmx_pa; |
623 | pcireg_t memtype; |
624 | |
625 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); |
626 | if (pci_mapreg_map(pa, PCI_BAR(0), memtype, 0, &sc->vmx_iot0, &sc->vmx_ioh0, |
627 | NULL, &sc->vmx_ios0)) { |
628 | aprint_error_dev(sc->vmx_dev, "failed to map BAR0\n" ); |
629 | return (ENXIO); |
630 | } |
631 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(1)); |
632 | if (pci_mapreg_map(pa, PCI_BAR(1), memtype, 0, &sc->vmx_iot1, &sc->vmx_ioh1, |
633 | NULL, &sc->vmx_ios1)) { |
634 | aprint_error_dev(sc->vmx_dev, "failed to map BAR1\n" ); |
635 | return (ENXIO); |
636 | } |
637 | |
638 | if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, NULL, NULL)) { |
639 | sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX; |
640 | return (0); |
641 | } |
642 | |
643 | return (0); |
644 | } |
645 | |
646 | void |
647 | vmxnet3_free_pci_resources(struct vmxnet3_softc *sc) |
648 | { |
649 | |
650 | if (sc->vmx_ios0) { |
651 | bus_space_unmap(sc->vmx_iot0, sc->vmx_ioh0, sc->vmx_ios0); |
652 | sc->vmx_ios0 = 0; |
653 | } |
654 | |
655 | if (sc->vmx_ios1) { |
656 | bus_space_unmap(sc->vmx_iot1, sc->vmx_ioh1, sc->vmx_ios1); |
657 | sc->vmx_ios1 = 0; |
658 | } |
659 | } |
660 | |
661 | int |
662 | vmxnet3_check_version(struct vmxnet3_softc *sc) |
663 | { |
664 | u_int ver; |
665 | |
666 | ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS); |
667 | if ((ver & 0x1) == 0) { |
668 | aprint_error_dev(sc->vmx_dev, |
669 | "unsupported hardware version 0x%x\n" , ver); |
670 | return (ENOTSUP); |
671 | } |
672 | vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1); |
673 | |
674 | ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS); |
675 | if ((ver & 0x1) == 0) { |
676 | aprint_error_dev(sc->vmx_dev, |
677 | "incompatiable UPT version 0x%x\n" , ver); |
678 | return (ENOTSUP); |
679 | } |
680 | vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1); |
681 | |
682 | return (0); |
683 | } |
684 | |
685 | void |
686 | vmxnet3_check_multiqueue(struct vmxnet3_softc *sc) |
687 | { |
688 | |
689 | if (sc->vmx_intr_type != VMXNET3_IT_MSIX) |
690 | goto out; |
691 | |
692 | /* Just use the maximum configured for now. */ |
693 | sc->vmx_nrxqueues = sc->vmx_max_nrxqueues; |
694 | sc->vmx_ntxqueues = sc->vmx_max_ntxqueues; |
695 | |
696 | if (sc->vmx_nrxqueues > 1) |
697 | sc->vmx_flags |= VMXNET3_FLAG_RSS; |
698 | |
699 | return; |
700 | |
701 | out: |
702 | sc->vmx_ntxqueues = 1; |
703 | sc->vmx_nrxqueues = 1; |
704 | } |
705 | |
706 | int |
707 | vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc) |
708 | { |
709 | int required; |
710 | struct pci_attach_args *pa = sc->vmx_pa; |
711 | |
712 | if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) |
713 | return (1); |
714 | |
715 | /* Allocate an additional vector for the events interrupt. */ |
716 | required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1; |
717 | |
718 | if (pci_msix_count(pa->pa_pc, pa->pa_tag) < required) |
719 | return (1); |
720 | |
721 | if (pci_msix_alloc_exact(pa, &sc->vmx_intrs, required) == 0) { |
722 | sc->vmx_nintrs = required; |
723 | return (0); |
724 | } |
725 | |
726 | return (1); |
727 | } |
728 | |
729 | int |
730 | vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc) |
731 | { |
732 | int nmsi, required; |
733 | struct pci_attach_args *pa = sc->vmx_pa; |
734 | |
735 | required = 1; |
736 | |
737 | nmsi = pci_msi_count(pa->pa_pc, pa->pa_tag); |
738 | if (nmsi < required) |
739 | return (1); |
740 | |
741 | if (pci_msi_alloc_exact(pa, &sc->vmx_intrs, required) == 0) { |
742 | sc->vmx_nintrs = required; |
743 | return (0); |
744 | } |
745 | |
746 | return (1); |
747 | } |
748 | |
749 | int |
750 | vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc) |
751 | { |
752 | |
753 | if (pci_intx_alloc(sc->vmx_pa, &sc->vmx_intrs) == 0) { |
754 | sc->vmx_nintrs = 1; |
755 | return (0); |
756 | } |
757 | |
758 | return (1); |
759 | } |
760 | |
761 | int |
762 | vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc) |
763 | { |
764 | u_int config; |
765 | int error; |
766 | |
767 | config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG); |
768 | |
769 | sc->vmx_intr_type = config & 0x03; |
770 | sc->vmx_intr_mask_mode = (config >> 2) & 0x03; |
771 | |
772 | switch (sc->vmx_intr_type) { |
773 | case VMXNET3_IT_AUTO: |
774 | sc->vmx_intr_type = VMXNET3_IT_MSIX; |
775 | /* FALLTHROUGH */ |
776 | case VMXNET3_IT_MSIX: |
777 | error = vmxnet3_alloc_msix_interrupts(sc); |
778 | if (error == 0) |
779 | break; |
780 | sc->vmx_intr_type = VMXNET3_IT_MSI; |
781 | /* FALLTHROUGH */ |
782 | case VMXNET3_IT_MSI: |
783 | error = vmxnet3_alloc_msi_interrupts(sc); |
784 | if (error == 0) |
785 | break; |
786 | sc->vmx_intr_type = VMXNET3_IT_LEGACY; |
787 | /* FALLTHROUGH */ |
788 | case VMXNET3_IT_LEGACY: |
789 | error = vmxnet3_alloc_legacy_interrupts(sc); |
790 | if (error == 0) |
791 | break; |
792 | /* FALLTHROUGH */ |
793 | default: |
794 | sc->vmx_intr_type = -1; |
795 | aprint_error_dev(sc->vmx_dev, "cannot allocate any interrupt resources\n" ); |
796 | return (ENXIO); |
797 | } |
798 | |
799 | return (error); |
800 | } |
801 | |
802 | void |
803 | vmxnet3_free_interrupts(struct vmxnet3_softc *sc) |
804 | { |
805 | pci_chipset_tag_t pc = sc->vmx_pa->pa_pc; |
806 | int i; |
807 | |
808 | for (i = 0; i < sc->vmx_nintrs; i++) { |
809 | pci_intr_disestablish(pc, sc->vmx_ihs[i]); |
810 | } |
811 | pci_intr_release(pc, sc->vmx_intrs, sc->vmx_nintrs); |
812 | } |
813 | |
814 | int |
815 | vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc) |
816 | { |
817 | pci_chipset_tag_t pc = sc->vmx_pa->pa_pc; |
818 | struct vmxnet3_txqueue *txq; |
819 | struct vmxnet3_rxqueue *rxq; |
820 | pci_intr_handle_t *intr; |
821 | void **ihs; |
822 | int intr_idx, i; |
823 | const char *intrstr; |
824 | char intrbuf[PCI_INTRSTR_LEN]; |
825 | char xnamebuf[32]; |
826 | |
827 | intr = sc->vmx_intrs; |
828 | intr_idx = 0; |
829 | ihs = sc->vmx_ihs; |
830 | |
831 | for (i = 0; i < sc->vmx_ntxqueues; i++, intr++, ihs++, intr_idx++) { |
832 | snprintf(xnamebuf, 32, "%s: tx %d" , device_xname(sc->vmx_dev), i); |
833 | |
834 | txq = &sc->vmx_txq[i]; |
835 | |
836 | intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); |
837 | |
838 | pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true); |
839 | *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET, |
840 | vmxnet3_txq_intr, txq, xnamebuf); |
841 | if (*ihs == NULL) { |
842 | aprint_error_dev(sc->vmx_dev, |
843 | "unable to establish tx interrupt at %s\n" , intrstr); |
844 | return (-1); |
845 | } |
846 | aprint_normal_dev(sc->vmx_dev, "tx interrupting at %s\n" , intrstr); |
847 | |
848 | txq->vxtxq_intr_idx = intr_idx; |
849 | } |
850 | |
851 | for (i = 0; i < sc->vmx_nrxqueues; i++, intr++, ihs++, intr_idx++) { |
852 | snprintf(xnamebuf, 32, "%s: rx %d" , device_xname(sc->vmx_dev), i); |
853 | |
854 | rxq = &sc->vmx_rxq[i]; |
855 | |
856 | intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); |
857 | |
858 | pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true); |
859 | *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET, |
860 | vmxnet3_rxq_intr, rxq, xnamebuf); |
861 | if (*ihs == NULL) { |
862 | aprint_error_dev(sc->vmx_dev, |
863 | "unable to establish rx interrupt at %s\n" , intrstr); |
864 | return (-1); |
865 | } |
866 | aprint_normal_dev(sc->vmx_dev, "rx interrupting at %s\n" , intrstr); |
867 | |
868 | rxq->vxrxq_intr_idx = intr_idx; |
869 | } |
870 | |
871 | intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); |
872 | |
873 | snprintf(xnamebuf, 32, "%s: link" , device_xname(sc->vmx_dev)); |
874 | pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true); |
875 | *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET, |
876 | vmxnet3_event_intr, sc, xnamebuf); |
877 | if (*ihs == NULL) { |
878 | aprint_error_dev(sc->vmx_dev, |
879 | "unable to establish event interrupt at %s\n" , intrstr); |
880 | return (-1); |
881 | } |
882 | aprint_normal_dev(sc->vmx_dev, "event interrupting at %s\n" , intrstr); |
883 | |
884 | sc->vmx_event_intr_idx = intr_idx; |
885 | |
886 | return (0); |
887 | } |
888 | |
889 | int |
890 | vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *sc) |
891 | { |
892 | pci_chipset_tag_t pc = sc->vmx_pa->pa_pc; |
893 | pci_intr_handle_t *intr; |
894 | void **ihs; |
895 | int i; |
896 | const char *intrstr; |
897 | char intrbuf[PCI_INTRSTR_LEN]; |
898 | char xnamebuf[32]; |
899 | |
900 | intr = &sc->vmx_intrs[0]; |
901 | ihs = sc->vmx_ihs; |
902 | |
903 | intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); |
904 | |
905 | snprintf(xnamebuf, 32, "%s: msi" , device_xname(sc->vmx_dev)); |
906 | pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true); |
907 | *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET, |
908 | vmxnet3_legacy_intr, sc, xnamebuf); |
909 | if (*ihs == NULL) { |
910 | aprint_error_dev(sc->vmx_dev, |
911 | "unable to establish interrupt at %s\n" , intrstr); |
912 | return (-1); |
913 | } |
914 | aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n" , intrstr); |
915 | |
916 | for (i = 0; i < sc->vmx_ntxqueues; i++) |
917 | sc->vmx_txq[i].vxtxq_intr_idx = 0; |
918 | for (i = 0; i < sc->vmx_nrxqueues; i++) |
919 | sc->vmx_rxq[i].vxrxq_intr_idx = 0; |
920 | sc->vmx_event_intr_idx = 0; |
921 | |
922 | return (0); |
923 | } |
924 | |
925 | int |
926 | vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc) |
927 | { |
928 | pci_chipset_tag_t pc = sc->vmx_pa->pa_pc; |
929 | pci_intr_handle_t *intr; |
930 | void **ihs; |
931 | int i; |
932 | const char *intrstr; |
933 | char intrbuf[PCI_INTRSTR_LEN]; |
934 | char xnamebuf[32]; |
935 | |
936 | intr = &sc->vmx_intrs[0]; |
937 | ihs = sc->vmx_ihs; |
938 | |
939 | intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); |
940 | |
941 | snprintf(xnamebuf, 32, "%s:legacy" , device_xname(sc->vmx_dev)); |
942 | pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true); |
943 | *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET, |
944 | vmxnet3_legacy_intr, sc, xnamebuf); |
945 | if (*ihs == NULL) { |
946 | aprint_error_dev(sc->vmx_dev, |
947 | "unable to establish interrupt at %s\n" , intrstr); |
948 | return (-1); |
949 | } |
950 | aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n" , intrstr); |
951 | |
952 | for (i = 0; i < sc->vmx_ntxqueues; i++) |
953 | sc->vmx_txq[i].vxtxq_intr_idx = 0; |
954 | for (i = 0; i < sc->vmx_nrxqueues; i++) |
955 | sc->vmx_rxq[i].vxrxq_intr_idx = 0; |
956 | sc->vmx_event_intr_idx = 0; |
957 | |
958 | return (0); |
959 | } |
960 | |
961 | void |
962 | vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc) |
963 | { |
964 | struct vmxnet3_txqueue *txq; |
965 | struct vmxnet3_txq_shared *txs; |
966 | struct vmxnet3_rxqueue *rxq; |
967 | struct vmxnet3_rxq_shared *rxs; |
968 | int i; |
969 | |
970 | sc->vmx_ds->evintr = sc->vmx_event_intr_idx; |
971 | |
972 | for (i = 0; i < sc->vmx_ntxqueues; i++) { |
973 | txq = &sc->vmx_txq[i]; |
974 | txs = txq->vxtxq_ts; |
975 | txs->intr_idx = txq->vxtxq_intr_idx; |
976 | } |
977 | |
978 | for (i = 0; i < sc->vmx_nrxqueues; i++) { |
979 | rxq = &sc->vmx_rxq[i]; |
980 | rxs = rxq->vxrxq_rs; |
981 | rxs->intr_idx = rxq->vxrxq_intr_idx; |
982 | } |
983 | } |
984 | |
985 | int |
986 | vmxnet3_setup_interrupts(struct vmxnet3_softc *sc) |
987 | { |
988 | int error; |
989 | |
990 | switch (sc->vmx_intr_type) { |
991 | case VMXNET3_IT_MSIX: |
992 | error = vmxnet3_setup_msix_interrupts(sc); |
993 | break; |
994 | case VMXNET3_IT_MSI: |
995 | error = vmxnet3_setup_msi_interrupt(sc); |
996 | break; |
997 | case VMXNET3_IT_LEGACY: |
998 | error = vmxnet3_setup_legacy_interrupt(sc); |
999 | break; |
1000 | default: |
1001 | panic("%s: invalid interrupt type %d" , __func__, |
1002 | sc->vmx_intr_type); |
1003 | } |
1004 | |
1005 | if (error == 0) |
1006 | vmxnet3_set_interrupt_idx(sc); |
1007 | |
1008 | return (error); |
1009 | } |
1010 | |
1011 | int |
1012 | vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q) |
1013 | { |
1014 | struct vmxnet3_rxqueue *rxq; |
1015 | struct vmxnet3_rxring *rxr; |
1016 | int i; |
1017 | |
1018 | rxq = &sc->vmx_rxq[q]; |
1019 | |
1020 | snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d" , |
1021 | device_xname(sc->vmx_dev), q); |
1022 | rxq->vxrxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */); |
1023 | |
1024 | rxq->vxrxq_sc = sc; |
1025 | rxq->vxrxq_id = q; |
1026 | |
1027 | for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { |
1028 | rxr = &rxq->vxrxq_cmd_ring[i]; |
1029 | rxr->vxrxr_rid = i; |
1030 | rxr->vxrxr_ndesc = sc->vmx_nrxdescs; |
1031 | rxr->vxrxr_rxbuf = kmem_zalloc(rxr->vxrxr_ndesc * |
1032 | sizeof(struct vmxnet3_rxbuf), KM_SLEEP); |
1033 | if (rxr->vxrxr_rxbuf == NULL) |
1034 | return (ENOMEM); |
1035 | |
1036 | rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs; |
1037 | } |
1038 | |
1039 | return (0); |
1040 | } |
1041 | |
1042 | int |
1043 | vmxnet3_init_txq(struct vmxnet3_softc *sc, int q) |
1044 | { |
1045 | struct vmxnet3_txqueue *txq; |
1046 | struct vmxnet3_txring *txr; |
1047 | |
1048 | txq = &sc->vmx_txq[q]; |
1049 | txr = &txq->vxtxq_cmd_ring; |
1050 | |
1051 | snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d" , |
1052 | device_xname(sc->vmx_dev), q); |
1053 | txq->vxtxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */); |
1054 | |
1055 | txq->vxtxq_sc = sc; |
1056 | txq->vxtxq_id = q; |
1057 | |
1058 | txr->vxtxr_ndesc = sc->vmx_ntxdescs; |
1059 | txr->vxtxr_txbuf = kmem_zalloc(txr->vxtxr_ndesc * |
1060 | sizeof(struct vmxnet3_txbuf), KM_SLEEP); |
1061 | if (txr->vxtxr_txbuf == NULL) |
1062 | return (ENOMEM); |
1063 | |
1064 | txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs; |
1065 | |
1066 | return (0); |
1067 | } |
1068 | |
1069 | int |
1070 | vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc) |
1071 | { |
1072 | int i, error; |
1073 | |
1074 | KASSERT(!cpu_intr_p()); |
1075 | KASSERT(!cpu_softintr_p()); |
1076 | |
1077 | /* |
1078 | * Only attempt to create multiple queues if MSIX is available. |
1079 | * This check prevents us from allocating queue structures that |
1080 | * we will not use. |
1081 | * |
1082 | * FreeBSD: |
1083 | * MSIX is disabled by default because its apparently broken for |
1084 | * devices passed through by at least ESXi 5.1. |
1085 | * The hw.pci.honor_msi_blacklist tunable must be set to zero for MSIX. |
1086 | */ |
1087 | if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) { |
1088 | sc->vmx_max_nrxqueues = 1; |
1089 | sc->vmx_max_ntxqueues = 1; |
1090 | } |
1091 | |
1092 | sc->vmx_rxq = kmem_zalloc( |
1093 | sizeof(struct vmxnet3_rxqueue) * sc->vmx_max_nrxqueues, KM_SLEEP); |
1094 | sc->vmx_txq = kmem_zalloc( |
1095 | sizeof(struct vmxnet3_txqueue) * sc->vmx_max_ntxqueues, KM_SLEEP); |
1096 | if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL) |
1097 | return (ENOMEM); |
1098 | |
1099 | for (i = 0; i < sc->vmx_max_nrxqueues; i++) { |
1100 | error = vmxnet3_init_rxq(sc, i); |
1101 | if (error) |
1102 | return (error); |
1103 | } |
1104 | |
1105 | for (i = 0; i < sc->vmx_max_ntxqueues; i++) { |
1106 | error = vmxnet3_init_txq(sc, i); |
1107 | if (error) |
1108 | return (error); |
1109 | } |
1110 | |
1111 | return (0); |
1112 | } |
1113 | |
1114 | void |
1115 | vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq) |
1116 | { |
1117 | struct vmxnet3_rxring *rxr; |
1118 | int i; |
1119 | |
1120 | rxq->vxrxq_sc = NULL; |
1121 | rxq->vxrxq_id = -1; |
1122 | |
1123 | for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { |
1124 | rxr = &rxq->vxrxq_cmd_ring[i]; |
1125 | |
1126 | if (rxr->vxrxr_rxbuf != NULL) { |
1127 | kmem_free(rxr->vxrxr_rxbuf, |
1128 | rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxbuf)); |
1129 | rxr->vxrxr_rxbuf = NULL; |
1130 | } |
1131 | } |
1132 | |
1133 | if (rxq->vxrxq_mtx != NULL) |
1134 | mutex_obj_free(rxq->vxrxq_mtx); |
1135 | } |
1136 | |
1137 | void |
1138 | vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq) |
1139 | { |
1140 | struct vmxnet3_txring *txr; |
1141 | |
1142 | txr = &txq->vxtxq_cmd_ring; |
1143 | |
1144 | txq->vxtxq_sc = NULL; |
1145 | txq->vxtxq_id = -1; |
1146 | |
1147 | if (txr->vxtxr_txbuf != NULL) { |
1148 | kmem_free(txr->vxtxr_txbuf, |
1149 | txr->vxtxr_ndesc * sizeof(struct vmxnet3_txbuf)); |
1150 | txr->vxtxr_txbuf = NULL; |
1151 | } |
1152 | |
1153 | if (txq->vxtxq_mtx != NULL) |
1154 | mutex_obj_free(txq->vxtxq_mtx); |
1155 | } |
1156 | |
1157 | void |
1158 | vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc) |
1159 | { |
1160 | int i; |
1161 | |
1162 | if (sc->vmx_rxq != NULL) { |
1163 | for (i = 0; i < sc->vmx_max_nrxqueues; i++) |
1164 | vmxnet3_destroy_rxq(&sc->vmx_rxq[i]); |
1165 | kmem_free(sc->vmx_rxq, |
1166 | sizeof(struct vmxnet3_rxqueue) * sc->vmx_max_nrxqueues); |
1167 | sc->vmx_rxq = NULL; |
1168 | } |
1169 | |
1170 | if (sc->vmx_txq != NULL) { |
1171 | for (i = 0; i < sc->vmx_max_ntxqueues; i++) |
1172 | vmxnet3_destroy_txq(&sc->vmx_txq[i]); |
1173 | kmem_free(sc->vmx_txq, |
1174 | sizeof(struct vmxnet3_txqueue) * sc->vmx_max_ntxqueues); |
1175 | sc->vmx_txq = NULL; |
1176 | } |
1177 | } |
1178 | |
1179 | int |
1180 | vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc) |
1181 | { |
1182 | device_t dev; |
1183 | uint8_t *kva; |
1184 | size_t size; |
1185 | int i, error; |
1186 | |
1187 | dev = sc->vmx_dev; |
1188 | |
1189 | size = sizeof(struct vmxnet3_driver_shared); |
1190 | error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma); |
1191 | if (error) { |
1192 | device_printf(dev, "cannot alloc shared memory\n" ); |
1193 | return (error); |
1194 | } |
1195 | sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr; |
1196 | |
1197 | size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) + |
1198 | sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared); |
1199 | error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma); |
1200 | if (error) { |
1201 | device_printf(dev, "cannot alloc queue shared memory\n" ); |
1202 | return (error); |
1203 | } |
1204 | sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr; |
1205 | kva = sc->vmx_qs; |
1206 | |
1207 | for (i = 0; i < sc->vmx_ntxqueues; i++) { |
1208 | sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva; |
1209 | kva += sizeof(struct vmxnet3_txq_shared); |
1210 | } |
1211 | for (i = 0; i < sc->vmx_nrxqueues; i++) { |
1212 | sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva; |
1213 | kva += sizeof(struct vmxnet3_rxq_shared); |
1214 | } |
1215 | |
1216 | if (sc->vmx_flags & VMXNET3_FLAG_RSS) { |
1217 | size = sizeof(struct vmxnet3_rss_shared); |
1218 | error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma); |
1219 | if (error) { |
1220 | device_printf(dev, "cannot alloc rss shared memory\n" ); |
1221 | return (error); |
1222 | } |
1223 | sc->vmx_rss = |
1224 | (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr; |
1225 | } |
1226 | |
1227 | return (0); |
1228 | } |
1229 | |
1230 | void |
1231 | vmxnet3_free_shared_data(struct vmxnet3_softc *sc) |
1232 | { |
1233 | |
1234 | if (sc->vmx_rss != NULL) { |
1235 | vmxnet3_dma_free(sc, &sc->vmx_rss_dma); |
1236 | sc->vmx_rss = NULL; |
1237 | } |
1238 | |
1239 | if (sc->vmx_qs != NULL) { |
1240 | vmxnet3_dma_free(sc, &sc->vmx_qs_dma); |
1241 | sc->vmx_qs = NULL; |
1242 | } |
1243 | |
1244 | if (sc->vmx_ds != NULL) { |
1245 | vmxnet3_dma_free(sc, &sc->vmx_ds_dma); |
1246 | sc->vmx_ds = NULL; |
1247 | } |
1248 | } |
1249 | |
1250 | int |
1251 | vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc) |
1252 | { |
1253 | device_t dev; |
1254 | struct vmxnet3_txqueue *txq; |
1255 | struct vmxnet3_txring *txr; |
1256 | struct vmxnet3_comp_ring *txc; |
1257 | size_t descsz, compsz; |
1258 | int i, q, error; |
1259 | |
1260 | dev = sc->vmx_dev; |
1261 | |
1262 | for (q = 0; q < sc->vmx_ntxqueues; q++) { |
1263 | txq = &sc->vmx_txq[q]; |
1264 | txr = &txq->vxtxq_cmd_ring; |
1265 | txc = &txq->vxtxq_comp_ring; |
1266 | |
1267 | descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc); |
1268 | compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc); |
1269 | |
1270 | error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma); |
1271 | if (error) { |
1272 | device_printf(dev, "cannot alloc Tx descriptors for " |
1273 | "queue %d error %d\n" , q, error); |
1274 | return (error); |
1275 | } |
1276 | txr->vxtxr_txd = |
1277 | (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr; |
1278 | |
1279 | error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma); |
1280 | if (error) { |
1281 | device_printf(dev, "cannot alloc Tx comp descriptors " |
1282 | "for queue %d error %d\n" , q, error); |
1283 | return (error); |
1284 | } |
1285 | txc->vxcr_u.txcd = |
1286 | (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr; |
1287 | |
1288 | for (i = 0; i < txr->vxtxr_ndesc; i++) { |
1289 | error = bus_dmamap_create(sc->vmx_dmat, VMXNET3_TX_MAXSIZE, |
1290 | VMXNET3_TX_MAXSEGS, VMXNET3_TX_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, |
1291 | &txr->vxtxr_txbuf[i].vtxb_dmamap); |
1292 | if (error) { |
1293 | device_printf(dev, "unable to create Tx buf " |
1294 | "dmamap for queue %d idx %d\n" , q, i); |
1295 | return (error); |
1296 | } |
1297 | } |
1298 | } |
1299 | |
1300 | return (0); |
1301 | } |
1302 | |
1303 | void |
1304 | vmxnet3_free_txq_data(struct vmxnet3_softc *sc) |
1305 | { |
1306 | struct vmxnet3_txqueue *txq; |
1307 | struct vmxnet3_txring *txr; |
1308 | struct vmxnet3_comp_ring *txc; |
1309 | struct vmxnet3_txbuf *txb; |
1310 | int i, q; |
1311 | |
1312 | for (q = 0; q < sc->vmx_ntxqueues; q++) { |
1313 | txq = &sc->vmx_txq[q]; |
1314 | txr = &txq->vxtxq_cmd_ring; |
1315 | txc = &txq->vxtxq_comp_ring; |
1316 | |
1317 | for (i = 0; i < txr->vxtxr_ndesc; i++) { |
1318 | txb = &txr->vxtxr_txbuf[i]; |
1319 | if (txb->vtxb_dmamap != NULL) { |
1320 | bus_dmamap_destroy(sc->vmx_dmat, |
1321 | txb->vtxb_dmamap); |
1322 | txb->vtxb_dmamap = NULL; |
1323 | } |
1324 | } |
1325 | |
1326 | if (txc->vxcr_u.txcd != NULL) { |
1327 | vmxnet3_dma_free(sc, &txc->vxcr_dma); |
1328 | txc->vxcr_u.txcd = NULL; |
1329 | } |
1330 | |
1331 | if (txr->vxtxr_txd != NULL) { |
1332 | vmxnet3_dma_free(sc, &txr->vxtxr_dma); |
1333 | txr->vxtxr_txd = NULL; |
1334 | } |
1335 | } |
1336 | } |
1337 | |
1338 | int |
1339 | vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc) |
1340 | { |
1341 | device_t dev; |
1342 | struct vmxnet3_rxqueue *rxq; |
1343 | struct vmxnet3_rxring *rxr; |
1344 | struct vmxnet3_comp_ring *rxc; |
1345 | int descsz, compsz; |
1346 | int i, j, q, error; |
1347 | |
1348 | dev = sc->vmx_dev; |
1349 | |
1350 | for (q = 0; q < sc->vmx_nrxqueues; q++) { |
1351 | rxq = &sc->vmx_rxq[q]; |
1352 | rxc = &rxq->vxrxq_comp_ring; |
1353 | compsz = 0; |
1354 | |
1355 | for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { |
1356 | rxr = &rxq->vxrxq_cmd_ring[i]; |
1357 | |
1358 | descsz = rxr->vxrxr_ndesc * |
1359 | sizeof(struct vmxnet3_rxdesc); |
1360 | compsz += rxr->vxrxr_ndesc * |
1361 | sizeof(struct vmxnet3_rxcompdesc); |
1362 | |
1363 | error = vmxnet3_dma_malloc(sc, descsz, 512, |
1364 | &rxr->vxrxr_dma); |
1365 | if (error) { |
1366 | device_printf(dev, "cannot allocate Rx " |
1367 | "descriptors for queue %d/%d error %d\n" , |
1368 | i, q, error); |
1369 | return (error); |
1370 | } |
1371 | rxr->vxrxr_rxd = |
1372 | (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr; |
1373 | } |
1374 | |
1375 | error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma); |
1376 | if (error) { |
1377 | device_printf(dev, "cannot alloc Rx comp descriptors " |
1378 | "for queue %d error %d\n" , q, error); |
1379 | return (error); |
1380 | } |
1381 | rxc->vxcr_u.rxcd = |
1382 | (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr; |
1383 | |
1384 | for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { |
1385 | rxr = &rxq->vxrxq_cmd_ring[i]; |
1386 | |
1387 | error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1, |
1388 | JUMBO_LEN, 0, BUS_DMA_NOWAIT, |
1389 | &rxr->vxrxr_spare_dmap); |
1390 | if (error) { |
1391 | device_printf(dev, "unable to create spare " |
1392 | "dmamap for queue %d/%d error %d\n" , |
1393 | q, i, error); |
1394 | return (error); |
1395 | } |
1396 | |
1397 | for (j = 0; j < rxr->vxrxr_ndesc; j++) { |
1398 | error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1, |
1399 | JUMBO_LEN, 0, BUS_DMA_NOWAIT, |
1400 | &rxr->vxrxr_rxbuf[j].vrxb_dmamap); |
1401 | if (error) { |
1402 | device_printf(dev, "unable to create " |
1403 | "dmamap for queue %d/%d slot %d " |
1404 | "error %d\n" , |
1405 | q, i, j, error); |
1406 | return (error); |
1407 | } |
1408 | } |
1409 | } |
1410 | } |
1411 | |
1412 | return (0); |
1413 | } |
1414 | |
1415 | void |
1416 | vmxnet3_free_rxq_data(struct vmxnet3_softc *sc) |
1417 | { |
1418 | struct vmxnet3_rxqueue *rxq; |
1419 | struct vmxnet3_rxring *rxr; |
1420 | struct vmxnet3_comp_ring *rxc; |
1421 | struct vmxnet3_rxbuf *rxb; |
1422 | int i, j, q; |
1423 | |
1424 | for (q = 0; q < sc->vmx_nrxqueues; q++) { |
1425 | rxq = &sc->vmx_rxq[q]; |
1426 | rxc = &rxq->vxrxq_comp_ring; |
1427 | |
1428 | for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { |
1429 | rxr = &rxq->vxrxq_cmd_ring[i]; |
1430 | |
1431 | if (rxr->vxrxr_spare_dmap != NULL) { |
1432 | bus_dmamap_destroy(sc->vmx_dmat, |
1433 | rxr->vxrxr_spare_dmap); |
1434 | rxr->vxrxr_spare_dmap = NULL; |
1435 | } |
1436 | |
1437 | for (j = 0; j < rxr->vxrxr_ndesc; j++) { |
1438 | rxb = &rxr->vxrxr_rxbuf[j]; |
1439 | if (rxb->vrxb_dmamap != NULL) { |
1440 | bus_dmamap_destroy(sc->vmx_dmat, |
1441 | rxb->vrxb_dmamap); |
1442 | rxb->vrxb_dmamap = NULL; |
1443 | } |
1444 | } |
1445 | } |
1446 | |
1447 | if (rxc->vxcr_u.rxcd != NULL) { |
1448 | vmxnet3_dma_free(sc, &rxc->vxcr_dma); |
1449 | rxc->vxcr_u.rxcd = NULL; |
1450 | } |
1451 | |
1452 | for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { |
1453 | rxr = &rxq->vxrxq_cmd_ring[i]; |
1454 | |
1455 | if (rxr->vxrxr_rxd != NULL) { |
1456 | vmxnet3_dma_free(sc, &rxr->vxrxr_dma); |
1457 | rxr->vxrxr_rxd = NULL; |
1458 | } |
1459 | } |
1460 | } |
1461 | } |
1462 | |
1463 | int |
1464 | vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc) |
1465 | { |
1466 | int error; |
1467 | |
1468 | error = vmxnet3_alloc_txq_data(sc); |
1469 | if (error) |
1470 | return (error); |
1471 | |
1472 | error = vmxnet3_alloc_rxq_data(sc); |
1473 | if (error) |
1474 | return (error); |
1475 | |
1476 | return (0); |
1477 | } |
1478 | |
1479 | void |
1480 | vmxnet3_free_queue_data(struct vmxnet3_softc *sc) |
1481 | { |
1482 | |
1483 | if (sc->vmx_rxq != NULL) |
1484 | vmxnet3_free_rxq_data(sc); |
1485 | |
1486 | if (sc->vmx_txq != NULL) |
1487 | vmxnet3_free_txq_data(sc); |
1488 | } |
1489 | |
1490 | int |
1491 | vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc) |
1492 | { |
1493 | int error; |
1494 | |
1495 | error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN, |
1496 | 32, &sc->vmx_mcast_dma); |
1497 | if (error) |
1498 | device_printf(sc->vmx_dev, "unable to alloc multicast table\n" ); |
1499 | else |
1500 | sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr; |
1501 | |
1502 | return (error); |
1503 | } |
1504 | |
1505 | void |
1506 | vmxnet3_free_mcast_table(struct vmxnet3_softc *sc) |
1507 | { |
1508 | |
1509 | if (sc->vmx_mcast != NULL) { |
1510 | vmxnet3_dma_free(sc, &sc->vmx_mcast_dma); |
1511 | sc->vmx_mcast = NULL; |
1512 | } |
1513 | } |
1514 | |
1515 | void |
1516 | vmxnet3_init_shared_data(struct vmxnet3_softc *sc) |
1517 | { |
1518 | struct vmxnet3_driver_shared *ds; |
1519 | struct vmxnet3_txqueue *txq; |
1520 | struct vmxnet3_txq_shared *txs; |
1521 | struct vmxnet3_rxqueue *rxq; |
1522 | struct vmxnet3_rxq_shared *rxs; |
1523 | int i; |
1524 | |
1525 | ds = sc->vmx_ds; |
1526 | |
1527 | /* |
1528 | * Initialize fields of the shared data that remains the same across |
1529 | * reinits. Note the shared data is zero'd when allocated. |
1530 | */ |
1531 | |
1532 | ds->magic = VMXNET3_REV1_MAGIC; |
1533 | |
1534 | /* DriverInfo */ |
1535 | ds->version = VMXNET3_DRIVER_VERSION; |
1536 | ds->guest = VMXNET3_GOS_FREEBSD | |
1537 | #ifdef __LP64__ |
1538 | VMXNET3_GOS_64BIT; |
1539 | #else |
1540 | VMXNET3_GOS_32BIT; |
1541 | #endif |
1542 | ds->vmxnet3_revision = 1; |
1543 | ds->upt_version = 1; |
1544 | |
1545 | /* Misc. conf */ |
1546 | ds->driver_data = vtophys(sc); |
1547 | ds->driver_data_len = sizeof(struct vmxnet3_softc); |
1548 | ds->queue_shared = sc->vmx_qs_dma.dma_paddr; |
1549 | ds->queue_shared_len = sc->vmx_qs_dma.dma_size; |
1550 | ds->nrxsg_max = sc->vmx_max_rxsegs; |
1551 | |
1552 | /* RSS conf */ |
1553 | if (sc->vmx_flags & VMXNET3_FLAG_RSS) { |
1554 | ds->rss.version = 1; |
1555 | ds->rss.paddr = sc->vmx_rss_dma.dma_paddr; |
1556 | ds->rss.len = sc->vmx_rss_dma.dma_size; |
1557 | } |
1558 | |
1559 | /* Interrupt control. */ |
1560 | ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO; |
1561 | ds->nintr = sc->vmx_nintrs; |
1562 | ds->evintr = sc->vmx_event_intr_idx; |
1563 | ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL; |
1564 | |
1565 | for (i = 0; i < sc->vmx_nintrs; i++) |
1566 | ds->modlevel[i] = UPT1_IMOD_ADAPTIVE; |
1567 | |
1568 | /* Receive filter. */ |
1569 | ds->mcast_table = sc->vmx_mcast_dma.dma_paddr; |
1570 | ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size; |
1571 | |
1572 | /* Tx queues */ |
1573 | for (i = 0; i < sc->vmx_ntxqueues; i++) { |
1574 | txq = &sc->vmx_txq[i]; |
1575 | txs = txq->vxtxq_ts; |
1576 | |
1577 | txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr; |
1578 | txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc; |
1579 | txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr; |
1580 | txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc; |
1581 | txs->driver_data = vtophys(txq); |
1582 | txs->driver_data_len = sizeof(struct vmxnet3_txqueue); |
1583 | } |
1584 | |
1585 | /* Rx queues */ |
1586 | for (i = 0; i < sc->vmx_nrxqueues; i++) { |
1587 | rxq = &sc->vmx_rxq[i]; |
1588 | rxs = rxq->vxrxq_rs; |
1589 | |
1590 | rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr; |
1591 | rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc; |
1592 | rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr; |
1593 | rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc; |
1594 | rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr; |
1595 | rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc; |
1596 | rxs->driver_data = vtophys(rxq); |
1597 | rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue); |
1598 | } |
1599 | } |
1600 | |
1601 | void |
1602 | (struct vmxnet3_softc *sc) |
1603 | { |
1604 | /* |
1605 | * Use the same key as the Linux driver until FreeBSD can do |
1606 | * RSS (presumably Toeplitz) in software. |
1607 | */ |
1608 | static const uint8_t [UPT1_RSS_MAX_KEY_SIZE] = { |
1609 | 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac, |
1610 | 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28, |
1611 | 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70, |
1612 | 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3, |
1613 | 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9, |
1614 | }; |
1615 | |
1616 | struct vmxnet3_rss_shared *; |
1617 | int i; |
1618 | |
1619 | rss = sc->vmx_rss; |
1620 | |
1621 | rss->hash_type = |
1622 | UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 | |
1623 | UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6; |
1624 | rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ; |
1625 | rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE; |
1626 | rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE; |
1627 | memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE); |
1628 | |
1629 | for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++) |
1630 | rss->ind_table[i] = i % sc->vmx_nrxqueues; |
1631 | } |
1632 | |
1633 | void |
1634 | vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc) |
1635 | { |
1636 | struct ifnet *ifp; |
1637 | struct vmxnet3_driver_shared *ds; |
1638 | |
1639 | ifp = &sc->vmx_ethercom.ec_if; |
1640 | ds = sc->vmx_ds; |
1641 | |
1642 | ds->mtu = ifp->if_mtu; |
1643 | ds->ntxqueue = sc->vmx_ntxqueues; |
1644 | ds->nrxqueue = sc->vmx_nrxqueues; |
1645 | |
1646 | ds->upt_features = 0; |
1647 | if (ifp->if_capenable & |
1648 | (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | |
1649 | IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) |
1650 | ds->upt_features |= UPT1_F_CSUM; |
1651 | if (sc->vmx_ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING) |
1652 | ds->upt_features |= UPT1_F_VLAN; |
1653 | |
1654 | if (sc->vmx_flags & VMXNET3_FLAG_RSS) { |
1655 | ds->upt_features |= UPT1_F_RSS; |
1656 | vmxnet3_reinit_rss_shared_data(sc); |
1657 | } |
1658 | |
1659 | vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr); |
1660 | vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH, |
1661 | (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32); |
1662 | } |
1663 | |
1664 | int |
1665 | vmxnet3_alloc_data(struct vmxnet3_softc *sc) |
1666 | { |
1667 | int error; |
1668 | |
1669 | error = vmxnet3_alloc_shared_data(sc); |
1670 | if (error) |
1671 | return (error); |
1672 | |
1673 | error = vmxnet3_alloc_queue_data(sc); |
1674 | if (error) |
1675 | return (error); |
1676 | |
1677 | error = vmxnet3_alloc_mcast_table(sc); |
1678 | if (error) |
1679 | return (error); |
1680 | |
1681 | vmxnet3_init_shared_data(sc); |
1682 | |
1683 | return (0); |
1684 | } |
1685 | |
1686 | void |
1687 | vmxnet3_free_data(struct vmxnet3_softc *sc) |
1688 | { |
1689 | |
1690 | vmxnet3_free_mcast_table(sc); |
1691 | vmxnet3_free_queue_data(sc); |
1692 | vmxnet3_free_shared_data(sc); |
1693 | } |
1694 | |
1695 | int |
1696 | vmxnet3_setup_interface(struct vmxnet3_softc *sc) |
1697 | { |
1698 | struct ifnet *ifp = &sc->vmx_ethercom.ec_if; |
1699 | |
1700 | vmxnet3_get_lladdr(sc); |
1701 | aprint_normal_dev(sc->vmx_dev, "Ethernet address %s\n" , |
1702 | ether_sprintf(sc->vmx_lladdr)); |
1703 | vmxnet3_set_lladdr(sc); |
1704 | |
1705 | strlcpy(ifp->if_xname, device_xname(sc->vmx_dev), IFNAMSIZ); |
1706 | ifp->if_softc = sc; |
1707 | ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; |
1708 | ifp->if_ioctl = vmxnet3_ioctl; |
1709 | ifp->if_start = vmxnet3_start; |
1710 | ifp->if_watchdog = NULL; |
1711 | ifp->if_init = vmxnet3_init; |
1712 | ifp->if_stop = vmxnet3_stop; |
1713 | sc->vmx_ethercom.ec_if.if_capabilities |=IFCAP_CSUM_IPv4_Rx | |
1714 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
1715 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | |
1716 | IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx | |
1717 | IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx; |
1718 | |
1719 | ifp->if_capenable = ifp->if_capabilities; |
1720 | |
1721 | sc->vmx_ethercom.ec_if.if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; |
1722 | |
1723 | sc->vmx_ethercom.ec_capabilities |= |
1724 | ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; |
1725 | sc->vmx_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; |
1726 | |
1727 | IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs); |
1728 | IFQ_SET_READY(&ifp->if_snd); |
1729 | |
1730 | /* Initialize ifmedia structures. */ |
1731 | sc->vmx_ethercom.ec_ifmedia = &sc->vmx_media; |
1732 | ifmedia_init(&sc->vmx_media, IFM_IMASK, vmxnet3_media_change, |
1733 | vmxnet3_media_status); |
1734 | ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL); |
1735 | ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); |
1736 | ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T, 0, NULL); |
1737 | ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); |
1738 | ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T, 0, NULL); |
1739 | ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO); |
1740 | |
1741 | if_attach(ifp); |
1742 | if_deferred_start_init(ifp, NULL); |
1743 | ether_ifattach(ifp, sc->vmx_lladdr); |
1744 | ether_set_ifflags_cb(&sc->vmx_ethercom, vmxnet3_ifflags_cb); |
1745 | vmxnet3_link_status(sc); |
1746 | |
1747 | return (0); |
1748 | } |
1749 | |
1750 | void |
1751 | vmxnet3_evintr(struct vmxnet3_softc *sc) |
1752 | { |
1753 | device_t dev; |
1754 | struct vmxnet3_txq_shared *ts; |
1755 | struct vmxnet3_rxq_shared *rs; |
1756 | uint32_t event; |
1757 | int reset; |
1758 | |
1759 | dev = sc->vmx_dev; |
1760 | reset = 0; |
1761 | |
1762 | VMXNET3_CORE_LOCK(sc); |
1763 | |
1764 | /* Clear events. */ |
1765 | event = sc->vmx_ds->event; |
1766 | vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event); |
1767 | |
1768 | if (event & VMXNET3_EVENT_LINK) { |
1769 | vmxnet3_link_status(sc); |
1770 | if (sc->vmx_link_active != 0) |
1771 | if_schedule_deferred_start(&sc->vmx_ethercom.ec_if); |
1772 | } |
1773 | |
1774 | if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) { |
1775 | reset = 1; |
1776 | vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS); |
1777 | ts = sc->vmx_txq[0].vxtxq_ts; |
1778 | if (ts->stopped != 0) |
1779 | device_printf(dev, "Tx queue error %#x\n" , ts->error); |
1780 | rs = sc->vmx_rxq[0].vxrxq_rs; |
1781 | if (rs->stopped != 0) |
1782 | device_printf(dev, "Rx queue error %#x\n" , rs->error); |
1783 | device_printf(dev, "Rx/Tx queue error event ... resetting\n" ); |
1784 | } |
1785 | |
1786 | if (event & VMXNET3_EVENT_DIC) |
1787 | device_printf(dev, "device implementation change event\n" ); |
1788 | if (event & VMXNET3_EVENT_DEBUG) |
1789 | device_printf(dev, "debug event\n" ); |
1790 | |
1791 | if (reset != 0) |
1792 | vmxnet3_init_locked(sc); |
1793 | |
1794 | VMXNET3_CORE_UNLOCK(sc); |
1795 | } |
1796 | |
1797 | void |
1798 | vmxnet3_txq_eof(struct vmxnet3_txqueue *txq) |
1799 | { |
1800 | struct vmxnet3_softc *sc; |
1801 | struct vmxnet3_txring *txr; |
1802 | struct vmxnet3_comp_ring *txc; |
1803 | struct vmxnet3_txcompdesc *txcd; |
1804 | struct vmxnet3_txbuf *txb; |
1805 | struct mbuf *m; |
1806 | u_int sop; |
1807 | |
1808 | sc = txq->vxtxq_sc; |
1809 | txr = &txq->vxtxq_cmd_ring; |
1810 | txc = &txq->vxtxq_comp_ring; |
1811 | |
1812 | VMXNET3_TXQ_LOCK_ASSERT(txq); |
1813 | |
1814 | for (;;) { |
1815 | txcd = &txc->vxcr_u.txcd[txc->vxcr_next]; |
1816 | if (txcd->gen != txc->vxcr_gen) |
1817 | break; |
1818 | vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); |
1819 | |
1820 | if (++txc->vxcr_next == txc->vxcr_ndesc) { |
1821 | txc->vxcr_next = 0; |
1822 | txc->vxcr_gen ^= 1; |
1823 | } |
1824 | |
1825 | sop = txr->vxtxr_next; |
1826 | txb = &txr->vxtxr_txbuf[sop]; |
1827 | |
1828 | if ((m = txb->vtxb_m) != NULL) { |
1829 | bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap, |
1830 | 0, txb->vtxb_dmamap->dm_mapsize, |
1831 | BUS_DMASYNC_POSTWRITE); |
1832 | bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap); |
1833 | |
1834 | txq->vxtxq_stats.vmtxs_opackets++; |
1835 | txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len; |
1836 | if (m->m_flags & M_MCAST) |
1837 | txq->vxtxq_stats.vmtxs_omcasts++; |
1838 | |
1839 | m_freem(m); |
1840 | txb->vtxb_m = NULL; |
1841 | } |
1842 | |
1843 | txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc; |
1844 | } |
1845 | |
1846 | if (txr->vxtxr_head == txr->vxtxr_next) |
1847 | txq->vxtxq_watchdog = 0; |
1848 | } |
1849 | |
1850 | int |
1851 | vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr) |
1852 | { |
1853 | struct mbuf *m; |
1854 | struct vmxnet3_rxdesc *rxd; |
1855 | struct vmxnet3_rxbuf *rxb; |
1856 | bus_dma_tag_t tag; |
1857 | bus_dmamap_t dmap; |
1858 | int idx, btype, error; |
1859 | |
1860 | tag = sc->vmx_dmat; |
1861 | dmap = rxr->vxrxr_spare_dmap; |
1862 | idx = rxr->vxrxr_fill; |
1863 | rxd = &rxr->vxrxr_rxd[idx]; |
1864 | rxb = &rxr->vxrxr_rxbuf[idx]; |
1865 | |
1866 | /* Don't allocate buffers for ring 2 for now. */ |
1867 | if (rxr->vxrxr_rid != 0) |
1868 | return -1; |
1869 | btype = VMXNET3_BTYPE_HEAD; |
1870 | |
1871 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1872 | if (m == NULL) |
1873 | return (ENOBUFS); |
1874 | |
1875 | MCLGET(m, M_DONTWAIT); |
1876 | if ((m->m_flags & M_EXT) == 0) { |
1877 | sc->vmx_stats.vmst_mgetcl_failed++; |
1878 | m_freem(m); |
1879 | return (ENOBUFS); |
1880 | } |
1881 | |
1882 | m->m_pkthdr.len = m->m_len = JUMBO_LEN; |
1883 | m_adj(m, ETHER_ALIGN); |
1884 | |
1885 | error = bus_dmamap_load_mbuf(sc->vmx_dmat, dmap, m, BUS_DMA_NOWAIT); |
1886 | if (error) { |
1887 | m_freem(m); |
1888 | sc->vmx_stats.vmst_mbuf_load_failed++; |
1889 | return (error); |
1890 | } |
1891 | |
1892 | if (rxb->vrxb_m != NULL) { |
1893 | bus_dmamap_sync(tag, rxb->vrxb_dmamap, |
1894 | 0, rxb->vrxb_dmamap->dm_mapsize, |
1895 | BUS_DMASYNC_POSTREAD); |
1896 | bus_dmamap_unload(tag, rxb->vrxb_dmamap); |
1897 | } |
1898 | |
1899 | rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap; |
1900 | rxb->vrxb_dmamap = dmap; |
1901 | rxb->vrxb_m = m; |
1902 | |
1903 | rxd->addr = DMAADDR(dmap); |
1904 | rxd->len = m->m_pkthdr.len; |
1905 | rxd->btype = btype; |
1906 | rxd->gen = rxr->vxrxr_gen; |
1907 | |
1908 | vmxnet3_rxr_increment_fill(rxr); |
1909 | return (0); |
1910 | } |
1911 | |
1912 | |
1913 | |
1914 | void |
1915 | vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq, |
1916 | struct vmxnet3_rxring *rxr, int idx) |
1917 | { |
1918 | struct vmxnet3_rxdesc *rxd; |
1919 | |
1920 | rxd = &rxr->vxrxr_rxd[idx]; |
1921 | rxd->gen = rxr->vxrxr_gen; |
1922 | vmxnet3_rxr_increment_fill(rxr); |
1923 | } |
1924 | |
1925 | void |
1926 | vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq) |
1927 | { |
1928 | struct vmxnet3_softc *sc; |
1929 | struct vmxnet3_rxring *rxr; |
1930 | struct vmxnet3_comp_ring *rxc; |
1931 | struct vmxnet3_rxcompdesc *rxcd; |
1932 | int idx, eof; |
1933 | |
1934 | sc = rxq->vxrxq_sc; |
1935 | rxc = &rxq->vxrxq_comp_ring; |
1936 | |
1937 | do { |
1938 | rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; |
1939 | if (rxcd->gen != rxc->vxcr_gen) |
1940 | break; /* Not expected. */ |
1941 | vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); |
1942 | |
1943 | if (++rxc->vxcr_next == rxc->vxcr_ndesc) { |
1944 | rxc->vxcr_next = 0; |
1945 | rxc->vxcr_gen ^= 1; |
1946 | } |
1947 | |
1948 | idx = rxcd->rxd_idx; |
1949 | eof = rxcd->eop; |
1950 | if (rxcd->qid < sc->vmx_nrxqueues) |
1951 | rxr = &rxq->vxrxq_cmd_ring[0]; |
1952 | else |
1953 | rxr = &rxq->vxrxq_cmd_ring[1]; |
1954 | vmxnet3_rxq_eof_discard(rxq, rxr, idx); |
1955 | } while (!eof); |
1956 | } |
1957 | |
1958 | void |
1959 | vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) |
1960 | { |
1961 | if (rxcd->no_csum) |
1962 | return; |
1963 | |
1964 | if (rxcd->ipv4) { |
1965 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4; |
1966 | if (rxcd->ipcsum_ok == 0) |
1967 | m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; |
1968 | } |
1969 | |
1970 | if (rxcd->fragment) |
1971 | return; |
1972 | |
1973 | if (rxcd->tcp) { |
1974 | m->m_pkthdr.csum_flags |= |
1975 | rxcd->ipv4 ? M_CSUM_TCPv4 : M_CSUM_TCPv6; |
1976 | if ((rxcd->csum_ok) == 0) |
1977 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; |
1978 | } |
1979 | |
1980 | if (rxcd->udp) { |
1981 | m->m_pkthdr.csum_flags |= |
1982 | rxcd->ipv4 ? M_CSUM_UDPv4 : M_CSUM_UDPv6 ; |
1983 | if ((rxcd->csum_ok) == 0) |
1984 | m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; |
1985 | } |
1986 | } |
1987 | |
1988 | void |
1989 | vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq, |
1990 | struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) |
1991 | { |
1992 | struct vmxnet3_softc *sc; |
1993 | struct ifnet *ifp; |
1994 | |
1995 | sc = rxq->vxrxq_sc; |
1996 | ifp = &sc->vmx_ethercom.ec_if; |
1997 | |
1998 | if (rxcd->error) { |
1999 | rxq->vxrxq_stats.vmrxs_ierrors++; |
2000 | m_freem(m); |
2001 | return; |
2002 | } |
2003 | |
2004 | if (!rxcd->no_csum) |
2005 | vmxnet3_rx_csum(rxcd, m); |
2006 | if (rxcd->vlan) |
2007 | vlan_set_tag(m, rxcd->vtag); |
2008 | |
2009 | rxq->vxrxq_stats.vmrxs_ipackets++; |
2010 | rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len; |
2011 | |
2012 | if_percpuq_enqueue(ifp->if_percpuq, m); |
2013 | } |
2014 | |
2015 | void |
2016 | vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq) |
2017 | { |
2018 | struct vmxnet3_softc *sc; |
2019 | struct ifnet *ifp; |
2020 | struct vmxnet3_rxring *rxr; |
2021 | struct vmxnet3_comp_ring *rxc; |
2022 | struct vmxnet3_rxdesc *rxd __diagused; |
2023 | struct vmxnet3_rxcompdesc *rxcd; |
2024 | struct mbuf *m, *m_head, *m_tail; |
2025 | int idx, length; |
2026 | |
2027 | sc = rxq->vxrxq_sc; |
2028 | ifp = &sc->vmx_ethercom.ec_if; |
2029 | rxc = &rxq->vxrxq_comp_ring; |
2030 | |
2031 | VMXNET3_RXQ_LOCK_ASSERT(rxq); |
2032 | |
2033 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
2034 | return; |
2035 | |
2036 | m_head = rxq->vxrxq_mhead; |
2037 | rxq->vxrxq_mhead = NULL; |
2038 | m_tail = rxq->vxrxq_mtail; |
2039 | rxq->vxrxq_mtail = NULL; |
2040 | KASSERT(m_head == NULL || m_tail != NULL); |
2041 | |
2042 | for (;;) { |
2043 | rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; |
2044 | if (rxcd->gen != rxc->vxcr_gen) { |
2045 | rxq->vxrxq_mhead = m_head; |
2046 | rxq->vxrxq_mtail = m_tail; |
2047 | break; |
2048 | } |
2049 | vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); |
2050 | |
2051 | if (++rxc->vxcr_next == rxc->vxcr_ndesc) { |
2052 | rxc->vxcr_next = 0; |
2053 | rxc->vxcr_gen ^= 1; |
2054 | } |
2055 | |
2056 | idx = rxcd->rxd_idx; |
2057 | length = rxcd->len; |
2058 | if (rxcd->qid < sc->vmx_nrxqueues) |
2059 | rxr = &rxq->vxrxq_cmd_ring[0]; |
2060 | else |
2061 | rxr = &rxq->vxrxq_cmd_ring[1]; |
2062 | rxd = &rxr->vxrxr_rxd[idx]; |
2063 | |
2064 | m = rxr->vxrxr_rxbuf[idx].vrxb_m; |
2065 | KASSERT(m != NULL); |
2066 | |
2067 | /* |
2068 | * The host may skip descriptors. We detect this when this |
2069 | * descriptor does not match the previous fill index. Catch |
2070 | * up with the host now. |
2071 | */ |
2072 | if (__predict_false(rxr->vxrxr_fill != idx)) { |
2073 | while (rxr->vxrxr_fill != idx) { |
2074 | rxr->vxrxr_rxd[rxr->vxrxr_fill].gen = |
2075 | rxr->vxrxr_gen; |
2076 | vmxnet3_rxr_increment_fill(rxr); |
2077 | } |
2078 | } |
2079 | |
2080 | if (rxcd->sop) { |
2081 | /* start of frame w/o head buffer */ |
2082 | KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD); |
2083 | /* start of frame not in ring 0 */ |
2084 | KASSERT(rxr == &rxq->vxrxq_cmd_ring[0]); |
2085 | /* duplicate start of frame? */ |
2086 | KASSERT(m_head == NULL); |
2087 | |
2088 | if (length == 0) { |
2089 | /* Just ignore this descriptor. */ |
2090 | vmxnet3_rxq_eof_discard(rxq, rxr, idx); |
2091 | goto nextp; |
2092 | } |
2093 | |
2094 | if (vmxnet3_newbuf(sc, rxr) != 0) { |
2095 | rxq->vxrxq_stats.vmrxs_iqdrops++; |
2096 | vmxnet3_rxq_eof_discard(rxq, rxr, idx); |
2097 | if (!rxcd->eop) |
2098 | vmxnet3_rxq_discard_chain(rxq); |
2099 | goto nextp; |
2100 | } |
2101 | |
2102 | m_set_rcvif(m, ifp); |
2103 | m->m_pkthdr.len = m->m_len = length; |
2104 | m->m_pkthdr.csum_flags = 0; |
2105 | m_head = m_tail = m; |
2106 | |
2107 | } else { |
2108 | /* non start of frame w/o body buffer */ |
2109 | KASSERT(rxd->btype == VMXNET3_BTYPE_BODY); |
2110 | /* frame not started? */ |
2111 | KASSERT(m_head != NULL); |
2112 | |
2113 | if (vmxnet3_newbuf(sc, rxr) != 0) { |
2114 | rxq->vxrxq_stats.vmrxs_iqdrops++; |
2115 | vmxnet3_rxq_eof_discard(rxq, rxr, idx); |
2116 | if (!rxcd->eop) |
2117 | vmxnet3_rxq_discard_chain(rxq); |
2118 | m_freem(m_head); |
2119 | m_head = m_tail = NULL; |
2120 | goto nextp; |
2121 | } |
2122 | |
2123 | m->m_len = length; |
2124 | m_head->m_pkthdr.len += length; |
2125 | m_tail->m_next = m; |
2126 | m_tail = m; |
2127 | } |
2128 | |
2129 | if (rxcd->eop) { |
2130 | vmxnet3_rxq_input(rxq, rxcd, m_head); |
2131 | m_head = m_tail = NULL; |
2132 | |
2133 | /* Must recheck after dropping the Rx lock. */ |
2134 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
2135 | break; |
2136 | } |
2137 | |
2138 | nextp: |
2139 | if (__predict_false(rxq->vxrxq_rs->update_rxhead)) { |
2140 | int qid = rxcd->qid; |
2141 | bus_size_t r; |
2142 | |
2143 | idx = (idx + 1) % rxr->vxrxr_ndesc; |
2144 | if (qid >= sc->vmx_nrxqueues) { |
2145 | qid -= sc->vmx_nrxqueues; |
2146 | r = VMXNET3_BAR0_RXH2(qid); |
2147 | } else |
2148 | r = VMXNET3_BAR0_RXH1(qid); |
2149 | vmxnet3_write_bar0(sc, r, idx); |
2150 | } |
2151 | } |
2152 | } |
2153 | |
2154 | int |
2155 | vmxnet3_legacy_intr(void *xsc) |
2156 | { |
2157 | struct vmxnet3_softc *sc; |
2158 | struct vmxnet3_rxqueue *rxq; |
2159 | struct vmxnet3_txqueue *txq; |
2160 | |
2161 | sc = xsc; |
2162 | rxq = &sc->vmx_rxq[0]; |
2163 | txq = &sc->vmx_txq[0]; |
2164 | |
2165 | if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) { |
2166 | if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0) |
2167 | return (0); |
2168 | } |
2169 | if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) |
2170 | vmxnet3_disable_all_intrs(sc); |
2171 | |
2172 | if (sc->vmx_ds->event != 0) |
2173 | vmxnet3_evintr(sc); |
2174 | |
2175 | VMXNET3_RXQ_LOCK(rxq); |
2176 | vmxnet3_rxq_eof(rxq); |
2177 | VMXNET3_RXQ_UNLOCK(rxq); |
2178 | |
2179 | VMXNET3_TXQ_LOCK(txq); |
2180 | vmxnet3_txq_eof(txq); |
2181 | if_schedule_deferred_start(&sc->vmx_ethercom.ec_if); |
2182 | VMXNET3_TXQ_UNLOCK(txq); |
2183 | |
2184 | vmxnet3_enable_all_intrs(sc); |
2185 | |
2186 | return (1); |
2187 | } |
2188 | |
2189 | int |
2190 | vmxnet3_txq_intr(void *xtxq) |
2191 | { |
2192 | struct vmxnet3_softc *sc; |
2193 | struct vmxnet3_txqueue *txq; |
2194 | |
2195 | txq = xtxq; |
2196 | sc = txq->vxtxq_sc; |
2197 | |
2198 | if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) |
2199 | vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx); |
2200 | |
2201 | VMXNET3_TXQ_LOCK(txq); |
2202 | vmxnet3_txq_eof(txq); |
2203 | if_schedule_deferred_start(&sc->vmx_ethercom.ec_if); |
2204 | VMXNET3_TXQ_UNLOCK(txq); |
2205 | |
2206 | vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx); |
2207 | |
2208 | return (1); |
2209 | } |
2210 | |
2211 | int |
2212 | vmxnet3_rxq_intr(void *xrxq) |
2213 | { |
2214 | struct vmxnet3_softc *sc; |
2215 | struct vmxnet3_rxqueue *rxq; |
2216 | |
2217 | rxq = xrxq; |
2218 | sc = rxq->vxrxq_sc; |
2219 | |
2220 | if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) |
2221 | vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx); |
2222 | |
2223 | VMXNET3_RXQ_LOCK(rxq); |
2224 | vmxnet3_rxq_eof(rxq); |
2225 | VMXNET3_RXQ_UNLOCK(rxq); |
2226 | |
2227 | vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx); |
2228 | |
2229 | return (1); |
2230 | } |
2231 | |
2232 | int |
2233 | vmxnet3_event_intr(void *xsc) |
2234 | { |
2235 | struct vmxnet3_softc *sc; |
2236 | |
2237 | sc = xsc; |
2238 | |
2239 | if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) |
2240 | vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx); |
2241 | |
2242 | if (sc->vmx_ds->event != 0) |
2243 | vmxnet3_evintr(sc); |
2244 | |
2245 | vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx); |
2246 | |
2247 | return (1); |
2248 | } |
2249 | |
2250 | void |
2251 | vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) |
2252 | { |
2253 | struct vmxnet3_txring *txr; |
2254 | struct vmxnet3_txbuf *txb; |
2255 | int i; |
2256 | |
2257 | txr = &txq->vxtxq_cmd_ring; |
2258 | |
2259 | for (i = 0; i < txr->vxtxr_ndesc; i++) { |
2260 | txb = &txr->vxtxr_txbuf[i]; |
2261 | |
2262 | if (txb->vtxb_m == NULL) |
2263 | continue; |
2264 | |
2265 | bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap, |
2266 | 0, txb->vtxb_dmamap->dm_mapsize, |
2267 | BUS_DMASYNC_POSTWRITE); |
2268 | bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap); |
2269 | m_freem(txb->vtxb_m); |
2270 | txb->vtxb_m = NULL; |
2271 | } |
2272 | } |
2273 | |
2274 | void |
2275 | vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) |
2276 | { |
2277 | struct vmxnet3_rxring *rxr; |
2278 | struct vmxnet3_rxbuf *rxb; |
2279 | int i, j; |
2280 | |
2281 | if (rxq->vxrxq_mhead != NULL) { |
2282 | m_freem(rxq->vxrxq_mhead); |
2283 | rxq->vxrxq_mhead = NULL; |
2284 | rxq->vxrxq_mtail = NULL; |
2285 | } |
2286 | |
2287 | for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { |
2288 | rxr = &rxq->vxrxq_cmd_ring[i]; |
2289 | |
2290 | for (j = 0; j < rxr->vxrxr_ndesc; j++) { |
2291 | rxb = &rxr->vxrxr_rxbuf[j]; |
2292 | |
2293 | if (rxb->vrxb_m == NULL) |
2294 | continue; |
2295 | |
2296 | bus_dmamap_sync(sc->vmx_dmat, rxb->vrxb_dmamap, |
2297 | 0, rxb->vrxb_dmamap->dm_mapsize, |
2298 | BUS_DMASYNC_POSTREAD); |
2299 | bus_dmamap_unload(sc->vmx_dmat, rxb->vrxb_dmamap); |
2300 | m_freem(rxb->vrxb_m); |
2301 | rxb->vrxb_m = NULL; |
2302 | } |
2303 | } |
2304 | } |
2305 | |
2306 | void |
2307 | vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc) |
2308 | { |
2309 | struct vmxnet3_rxqueue *rxq; |
2310 | struct vmxnet3_txqueue *txq; |
2311 | int i; |
2312 | |
2313 | for (i = 0; i < sc->vmx_nrxqueues; i++) { |
2314 | rxq = &sc->vmx_rxq[i]; |
2315 | VMXNET3_RXQ_LOCK(rxq); |
2316 | VMXNET3_RXQ_UNLOCK(rxq); |
2317 | } |
2318 | |
2319 | for (i = 0; i < sc->vmx_ntxqueues; i++) { |
2320 | txq = &sc->vmx_txq[i]; |
2321 | VMXNET3_TXQ_LOCK(txq); |
2322 | VMXNET3_TXQ_UNLOCK(txq); |
2323 | } |
2324 | } |
2325 | |
2326 | void |
2327 | vmxnet3_stop_locked(struct vmxnet3_softc *sc) |
2328 | { |
2329 | struct ifnet *ifp; |
2330 | int q; |
2331 | |
2332 | ifp = &sc->vmx_ethercom.ec_if; |
2333 | VMXNET3_CORE_LOCK_ASSERT(sc); |
2334 | |
2335 | ifp->if_flags &= ~IFF_RUNNING; |
2336 | sc->vmx_link_active = 0; |
2337 | callout_stop(&sc->vmx_tick); |
2338 | |
2339 | /* Disable interrupts. */ |
2340 | vmxnet3_disable_all_intrs(sc); |
2341 | vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE); |
2342 | |
2343 | vmxnet3_stop_rendezvous(sc); |
2344 | |
2345 | for (q = 0; q < sc->vmx_ntxqueues; q++) |
2346 | vmxnet3_txstop(sc, &sc->vmx_txq[q]); |
2347 | for (q = 0; q < sc->vmx_nrxqueues; q++) |
2348 | vmxnet3_rxstop(sc, &sc->vmx_rxq[q]); |
2349 | |
2350 | vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET); |
2351 | } |
2352 | |
2353 | void |
2354 | vmxnet3_stop(struct ifnet *ifp, int disable) |
2355 | { |
2356 | struct vmxnet3_softc *sc = ifp->if_softc; |
2357 | |
2358 | VMXNET3_CORE_LOCK(sc); |
2359 | vmxnet3_stop_locked(sc); |
2360 | VMXNET3_CORE_UNLOCK(sc); |
2361 | } |
2362 | |
2363 | void |
2364 | vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) |
2365 | { |
2366 | struct vmxnet3_txring *txr; |
2367 | struct vmxnet3_comp_ring *txc; |
2368 | |
2369 | txr = &txq->vxtxq_cmd_ring; |
2370 | txr->vxtxr_head = 0; |
2371 | txr->vxtxr_next = 0; |
2372 | txr->vxtxr_gen = VMXNET3_INIT_GEN; |
2373 | memset(txr->vxtxr_txd, 0, |
2374 | txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc)); |
2375 | |
2376 | txc = &txq->vxtxq_comp_ring; |
2377 | txc->vxcr_next = 0; |
2378 | txc->vxcr_gen = VMXNET3_INIT_GEN; |
2379 | memset(txc->vxcr_u.txcd, 0, |
2380 | txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc)); |
2381 | } |
2382 | |
2383 | int |
2384 | vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) |
2385 | { |
2386 | struct vmxnet3_rxring *rxr; |
2387 | struct vmxnet3_comp_ring *rxc; |
2388 | int i, populate, idx, error; |
2389 | |
2390 | /* LRO and jumbo frame is not supported yet */ |
2391 | populate = 1; |
2392 | |
2393 | for (i = 0; i < populate; i++) { |
2394 | rxr = &rxq->vxrxq_cmd_ring[i]; |
2395 | rxr->vxrxr_fill = 0; |
2396 | rxr->vxrxr_gen = VMXNET3_INIT_GEN; |
2397 | memset(rxr->vxrxr_rxd, 0, |
2398 | rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); |
2399 | |
2400 | for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) { |
2401 | error = vmxnet3_newbuf(sc, rxr); |
2402 | if (error) |
2403 | return (error); |
2404 | } |
2405 | } |
2406 | |
2407 | for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) { |
2408 | rxr = &rxq->vxrxq_cmd_ring[i]; |
2409 | rxr->vxrxr_fill = 0; |
2410 | rxr->vxrxr_gen = 0; |
2411 | memset(rxr->vxrxr_rxd, 0, |
2412 | rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); |
2413 | } |
2414 | |
2415 | rxc = &rxq->vxrxq_comp_ring; |
2416 | rxc->vxcr_next = 0; |
2417 | rxc->vxcr_gen = VMXNET3_INIT_GEN; |
2418 | memset(rxc->vxcr_u.rxcd, 0, |
2419 | rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc)); |
2420 | |
2421 | return (0); |
2422 | } |
2423 | |
2424 | int |
2425 | vmxnet3_reinit_queues(struct vmxnet3_softc *sc) |
2426 | { |
2427 | device_t dev; |
2428 | int q, error; |
2429 | dev = sc->vmx_dev; |
2430 | |
2431 | for (q = 0; q < sc->vmx_ntxqueues; q++) |
2432 | vmxnet3_txinit(sc, &sc->vmx_txq[q]); |
2433 | |
2434 | for (q = 0; q < sc->vmx_nrxqueues; q++) { |
2435 | error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]); |
2436 | if (error) { |
2437 | device_printf(dev, "cannot populate Rx queue %d\n" , q); |
2438 | return (error); |
2439 | } |
2440 | } |
2441 | |
2442 | return (0); |
2443 | } |
2444 | |
2445 | int |
2446 | vmxnet3_enable_device(struct vmxnet3_softc *sc) |
2447 | { |
2448 | int q; |
2449 | |
2450 | if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) { |
2451 | device_printf(sc->vmx_dev, "device enable command failed!\n" ); |
2452 | return (1); |
2453 | } |
2454 | |
2455 | /* Reset the Rx queue heads. */ |
2456 | for (q = 0; q < sc->vmx_nrxqueues; q++) { |
2457 | vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0); |
2458 | vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0); |
2459 | } |
2460 | |
2461 | return (0); |
2462 | } |
2463 | |
2464 | void |
2465 | vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc) |
2466 | { |
2467 | |
2468 | vmxnet3_set_rxfilter(sc); |
2469 | |
2470 | memset(sc->vmx_ds->vlan_filter, 0, sizeof(sc->vmx_ds->vlan_filter)); |
2471 | vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); |
2472 | } |
2473 | |
2474 | int |
2475 | vmxnet3_reinit(struct vmxnet3_softc *sc) |
2476 | { |
2477 | |
2478 | vmxnet3_set_lladdr(sc); |
2479 | vmxnet3_reinit_shared_data(sc); |
2480 | |
2481 | if (vmxnet3_reinit_queues(sc) != 0) |
2482 | return (ENXIO); |
2483 | |
2484 | if (vmxnet3_enable_device(sc) != 0) |
2485 | return (ENXIO); |
2486 | |
2487 | vmxnet3_reinit_rxfilters(sc); |
2488 | |
2489 | return (0); |
2490 | } |
2491 | |
2492 | |
2493 | |
2494 | |
2495 | |
2496 | int |
2497 | vmxnet3_init_locked(struct vmxnet3_softc *sc) |
2498 | { |
2499 | struct ifnet *ifp = &sc->vmx_ethercom.ec_if; |
2500 | int error; |
2501 | |
2502 | vmxnet3_stop_locked(sc); |
2503 | |
2504 | error = vmxnet3_reinit(sc); |
2505 | if (error) { |
2506 | vmxnet3_stop_locked(sc); |
2507 | return (error); |
2508 | } |
2509 | |
2510 | ifp->if_flags |= IFF_RUNNING; |
2511 | vmxnet3_link_status(sc); |
2512 | |
2513 | vmxnet3_enable_all_intrs(sc); |
2514 | callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); |
2515 | |
2516 | return (0); |
2517 | } |
2518 | |
2519 | int |
2520 | vmxnet3_init(struct ifnet *ifp) |
2521 | { |
2522 | struct vmxnet3_softc *sc = ifp->if_softc; |
2523 | int error; |
2524 | |
2525 | VMXNET3_CORE_LOCK(sc); |
2526 | error = vmxnet3_init_locked(sc); |
2527 | VMXNET3_CORE_UNLOCK(sc); |
2528 | |
2529 | return (error); |
2530 | } |
2531 | |
2532 | int |
2533 | vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m, |
2534 | int *start, int *csum_start) |
2535 | { |
2536 | struct ether_header *eh; |
2537 | struct mbuf *mp; |
2538 | int offset, csum_off, iphl, offp; |
2539 | bool v4; |
2540 | |
2541 | eh = mtod(m, struct ether_header *); |
2542 | switch (htons(eh->ether_type)) { |
2543 | case ETHERTYPE_IP: |
2544 | case ETHERTYPE_IPV6: |
2545 | offset = ETHER_HDR_LEN; |
2546 | break; |
2547 | case ETHERTYPE_VLAN: |
2548 | offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
2549 | break; |
2550 | default: |
2551 | m_freem(m); |
2552 | return (EINVAL); |
2553 | } |
2554 | |
2555 | if ((m->m_pkthdr.csum_flags & |
2556 | (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) { |
2557 | iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); |
2558 | v4 = true; |
2559 | } else { |
2560 | iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); |
2561 | v4 = false; |
2562 | } |
2563 | *start = offset + iphl; |
2564 | |
2565 | if (m->m_pkthdr.csum_flags & |
2566 | (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_TSOv4 | M_CSUM_TSOv6)) { |
2567 | csum_off = offsetof(struct tcphdr, th_sum); |
2568 | } else { |
2569 | csum_off = offsetof(struct udphdr, uh_sum); |
2570 | } |
2571 | |
2572 | *csum_start = *start + csum_off; |
2573 | mp = m_pulldown(m, 0, *csum_start + 2, &offp); |
2574 | if (!mp) { |
2575 | /* m is already freed */ |
2576 | return ENOBUFS; |
2577 | } |
2578 | |
2579 | if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) { |
2580 | struct tcphdr *tcp; |
2581 | |
2582 | txq->vxtxq_stats.vmtxs_tso++; |
2583 | tcp = (void *)(mtod(mp, char *) + offp + *start); |
2584 | |
2585 | if (v4) { |
2586 | struct ip *ip; |
2587 | |
2588 | ip = (void *)(mtod(mp, char *) + offp + offset); |
2589 | tcp->th_sum = in_cksum_phdr(ip->ip_src.s_addr, |
2590 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); |
2591 | } else { |
2592 | struct ip6_hdr *ip6; |
2593 | |
2594 | ip6 = (void *)(mtod(mp, char *) + offp + offset); |
2595 | tcp->th_sum = in6_cksum_phdr(&ip6->ip6_src, |
2596 | &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); |
2597 | } |
2598 | |
2599 | /* |
2600 | * For TSO, the size of the protocol header is also |
2601 | * included in the descriptor header size. |
2602 | */ |
2603 | *start += (tcp->th_off << 2); |
2604 | } else |
2605 | txq->vxtxq_stats.vmtxs_csum++; |
2606 | |
2607 | return (0); |
2608 | } |
2609 | |
2610 | int |
2611 | vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0, |
2612 | bus_dmamap_t dmap) |
2613 | { |
2614 | struct mbuf *m; |
2615 | bus_dma_tag_t tag; |
2616 | int error; |
2617 | |
2618 | m = *m0; |
2619 | tag = txq->vxtxq_sc->vmx_dmat; |
2620 | |
2621 | error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT); |
2622 | if (error == 0 || error != EFBIG) |
2623 | return (error); |
2624 | |
2625 | m = m_defrag(m, M_NOWAIT); |
2626 | if (m != NULL) { |
2627 | *m0 = m; |
2628 | error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT); |
2629 | } else |
2630 | error = ENOBUFS; |
2631 | |
2632 | if (error) { |
2633 | m_freem(*m0); |
2634 | *m0 = NULL; |
2635 | txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++; |
2636 | } else |
2637 | txq->vxtxq_sc->vmx_stats.vmst_defragged++; |
2638 | |
2639 | return (error); |
2640 | } |
2641 | |
2642 | void |
2643 | vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap) |
2644 | { |
2645 | |
2646 | bus_dmamap_unload(txq->vxtxq_sc->vmx_dmat, dmap); |
2647 | } |
2648 | |
2649 | int |
2650 | vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0) |
2651 | { |
2652 | struct vmxnet3_softc *sc; |
2653 | struct vmxnet3_txring *txr; |
2654 | struct vmxnet3_txdesc *txd, *sop; |
2655 | struct mbuf *m; |
2656 | bus_dmamap_t dmap; |
2657 | bus_dma_segment_t *segs; |
2658 | int i, gen, start, csum_start, nsegs, error; |
2659 | |
2660 | sc = txq->vxtxq_sc; |
2661 | start = 0; |
2662 | txd = NULL; |
2663 | txr = &txq->vxtxq_cmd_ring; |
2664 | dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap; |
2665 | |
2666 | error = vmxnet3_txq_load_mbuf(txq, m0, dmap); |
2667 | if (error) |
2668 | return (error); |
2669 | |
2670 | nsegs = dmap->dm_nsegs; |
2671 | segs = dmap->dm_segs; |
2672 | |
2673 | m = *m0; |
2674 | KASSERT(m->m_flags & M_PKTHDR); |
2675 | KASSERT(nsegs <= VMXNET3_TX_MAXSEGS); |
2676 | |
2677 | if (vmxnet3_txring_avail(txr) < nsegs) { |
2678 | txq->vxtxq_stats.vmtxs_full++; |
2679 | vmxnet3_txq_unload_mbuf(txq, dmap); |
2680 | return (ENOSPC); |
2681 | } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) { |
2682 | error = vmxnet3_txq_offload_ctx(txq, m, &start, &csum_start); |
2683 | if (error) { |
2684 | /* m is already freed */ |
2685 | txq->vxtxq_stats.vmtxs_offload_failed++; |
2686 | vmxnet3_txq_unload_mbuf(txq, dmap); |
2687 | *m0 = NULL; |
2688 | return (error); |
2689 | } |
2690 | } |
2691 | |
2692 | txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m; |
2693 | sop = &txr->vxtxr_txd[txr->vxtxr_head]; |
2694 | gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */ |
2695 | |
2696 | for (i = 0; i < nsegs; i++) { |
2697 | txd = &txr->vxtxr_txd[txr->vxtxr_head]; |
2698 | |
2699 | txd->addr = segs[i].ds_addr; |
2700 | txd->len = segs[i].ds_len; |
2701 | txd->gen = gen; |
2702 | txd->dtype = 0; |
2703 | txd->offload_mode = VMXNET3_OM_NONE; |
2704 | txd->offload_pos = 0; |
2705 | txd->hlen = 0; |
2706 | txd->eop = 0; |
2707 | txd->compreq = 0; |
2708 | txd->vtag_mode = 0; |
2709 | txd->vtag = 0; |
2710 | |
2711 | if (++txr->vxtxr_head == txr->vxtxr_ndesc) { |
2712 | txr->vxtxr_head = 0; |
2713 | txr->vxtxr_gen ^= 1; |
2714 | } |
2715 | gen = txr->vxtxr_gen; |
2716 | } |
2717 | txd->eop = 1; |
2718 | txd->compreq = 1; |
2719 | |
2720 | if (vlan_has_tag(m)) { |
2721 | sop->vtag_mode = 1; |
2722 | sop->vtag = vlan_get_tag(m); |
2723 | } |
2724 | |
2725 | if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) { |
2726 | sop->offload_mode = VMXNET3_OM_TSO; |
2727 | sop->hlen = start; |
2728 | sop->offload_pos = m->m_pkthdr.segsz; |
2729 | } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD | |
2730 | VMXNET3_CSUM_OFFLOAD_IPV6)) { |
2731 | sop->offload_mode = VMXNET3_OM_CSUM; |
2732 | sop->hlen = start; |
2733 | sop->offload_pos = csum_start; |
2734 | } |
2735 | |
2736 | /* Finally, change the ownership. */ |
2737 | vmxnet3_barrier(sc, VMXNET3_BARRIER_WR); |
2738 | sop->gen ^= 1; |
2739 | |
2740 | txq->vxtxq_ts->npending += nsegs; |
2741 | if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) { |
2742 | txq->vxtxq_ts->npending = 0; |
2743 | vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), |
2744 | txr->vxtxr_head); |
2745 | } |
2746 | |
2747 | return (0); |
2748 | } |
2749 | |
2750 | void |
2751 | vmxnet3_start_locked(struct ifnet *ifp) |
2752 | { |
2753 | struct vmxnet3_softc *sc; |
2754 | struct vmxnet3_txqueue *txq; |
2755 | struct vmxnet3_txring *txr; |
2756 | struct mbuf *m_head; |
2757 | int tx; |
2758 | |
2759 | sc = ifp->if_softc; |
2760 | txq = &sc->vmx_txq[0]; |
2761 | txr = &txq->vxtxq_cmd_ring; |
2762 | tx = 0; |
2763 | |
2764 | VMXNET3_TXQ_LOCK_ASSERT(txq); |
2765 | |
2766 | if ((ifp->if_flags & IFF_RUNNING) == 0 || |
2767 | sc->vmx_link_active == 0) |
2768 | return; |
2769 | |
2770 | for (;;) { |
2771 | IFQ_POLL(&ifp->if_snd, m_head); |
2772 | if (m_head == NULL) |
2773 | break; |
2774 | |
2775 | if (vmxnet3_txring_avail(txr) < VMXNET3_TX_MAXSEGS) |
2776 | break; |
2777 | |
2778 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
2779 | if (m_head == NULL) |
2780 | break; |
2781 | |
2782 | if (vmxnet3_txq_encap(txq, &m_head) != 0) { |
2783 | if (m_head != NULL) |
2784 | m_freem(m_head); |
2785 | break; |
2786 | } |
2787 | |
2788 | tx++; |
2789 | bpf_mtap(ifp, m_head, BPF_D_OUT); |
2790 | } |
2791 | |
2792 | if (tx > 0) |
2793 | txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT; |
2794 | } |
2795 | |
2796 | |
2797 | void |
2798 | vmxnet3_start(struct ifnet *ifp) |
2799 | { |
2800 | struct vmxnet3_softc *sc; |
2801 | struct vmxnet3_txqueue *txq; |
2802 | |
2803 | sc = ifp->if_softc; |
2804 | txq = &sc->vmx_txq[0]; |
2805 | |
2806 | VMXNET3_TXQ_LOCK(txq); |
2807 | vmxnet3_start_locked(ifp); |
2808 | VMXNET3_TXQ_UNLOCK(txq); |
2809 | } |
2810 | |
2811 | void |
2812 | vmxnet3_set_rxfilter(struct vmxnet3_softc *sc) |
2813 | { |
2814 | struct ifnet *ifp = &sc->vmx_ethercom.ec_if; |
2815 | struct ethercom *ec = &sc->vmx_ethercom; |
2816 | struct vmxnet3_driver_shared *ds = sc->vmx_ds; |
2817 | struct ether_multi *enm; |
2818 | struct ether_multistep step; |
2819 | u_int mode; |
2820 | uint8_t *p; |
2821 | |
2822 | ds->mcast_tablelen = 0; |
2823 | CLR(ifp->if_flags, IFF_ALLMULTI); |
2824 | |
2825 | /* |
2826 | * Always accept broadcast frames. |
2827 | * Always accept frames destined to our station address. |
2828 | */ |
2829 | mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST; |
2830 | |
2831 | if (ISSET(ifp->if_flags, IFF_PROMISC) || |
2832 | ec->ec_multicnt > VMXNET3_MULTICAST_MAX) |
2833 | goto allmulti; |
2834 | |
2835 | p = sc->vmx_mcast; |
2836 | ETHER_LOCK(ec); |
2837 | ETHER_FIRST_MULTI(step, ec, enm); |
2838 | while (enm != NULL) { |
2839 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
2840 | ETHER_UNLOCK(ec); |
2841 | /* |
2842 | * We must listen to a range of multicast addresses. |
2843 | * For now, just accept all multicasts, rather than |
2844 | * trying to set only those filter bits needed to match |
2845 | * the range. (At this time, the only use of address |
2846 | * ranges is for IP multicast routing, for which the |
2847 | * range is big enough to require all bits set.) |
2848 | */ |
2849 | goto allmulti; |
2850 | } |
2851 | memcpy(p, enm->enm_addrlo, ETHER_ADDR_LEN); |
2852 | |
2853 | p += ETHER_ADDR_LEN; |
2854 | |
2855 | ETHER_NEXT_MULTI(step, enm); |
2856 | } |
2857 | ETHER_UNLOCK(ec); |
2858 | |
2859 | if (ec->ec_multicnt > 0) { |
2860 | SET(mode, VMXNET3_RXMODE_MCAST); |
2861 | ds->mcast_tablelen = p - sc->vmx_mcast; |
2862 | } |
2863 | |
2864 | goto setit; |
2865 | |
2866 | allmulti: |
2867 | SET(ifp->if_flags, IFF_ALLMULTI); |
2868 | SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST)); |
2869 | if (ifp->if_flags & IFF_PROMISC) |
2870 | SET(mode, VMXNET3_RXMODE_PROMISC); |
2871 | |
2872 | setit: |
2873 | vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER); |
2874 | ds->rxmode = mode; |
2875 | vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE); |
2876 | } |
2877 | |
2878 | int |
2879 | vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
2880 | { |
2881 | struct vmxnet3_softc *sc = ifp->if_softc; |
2882 | struct ifreq *ifr = (struct ifreq *)data; |
2883 | int s, error = 0; |
2884 | |
2885 | switch (cmd) { |
2886 | case SIOCSIFMTU: { |
2887 | int nmtu = ifr->ifr_mtu; |
2888 | |
2889 | if (nmtu < VMXNET3_MIN_MTU || nmtu > VMXNET3_MAX_MTU) { |
2890 | error = EINVAL; |
2891 | break; |
2892 | } |
2893 | if (ifp->if_mtu != nmtu) { |
2894 | error = ether_ioctl(ifp, cmd, data); |
2895 | if (error == ENETRESET) |
2896 | error = vmxnet3_init(ifp); |
2897 | } |
2898 | break; |
2899 | } |
2900 | case SIOCGIFDATA: |
2901 | case SIOCZIFDATA: |
2902 | ifp->if_ipackets = 0; |
2903 | ifp->if_ibytes = 0; |
2904 | ifp->if_iqdrops = 0; |
2905 | ifp->if_ierrors = 0; |
2906 | for (int i = 0; i < sc->vmx_nrxqueues; i++) { |
2907 | struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i]; |
2908 | |
2909 | VMXNET3_RXQ_LOCK(rxq); |
2910 | ifp->if_ipackets += rxq->vxrxq_stats.vmrxs_ipackets; |
2911 | ifp->if_ibytes += rxq->vxrxq_stats.vmrxs_ibytes; |
2912 | ifp->if_iqdrops += rxq->vxrxq_stats.vmrxs_iqdrops; |
2913 | ifp->if_ierrors += rxq->vxrxq_stats.vmrxs_ierrors; |
2914 | if (cmd == SIOCZIFDATA) { |
2915 | memset(&rxq->vxrxq_stats, 0, |
2916 | sizeof(rxq->vxrxq_stats)); |
2917 | } |
2918 | VMXNET3_RXQ_UNLOCK(rxq); |
2919 | } |
2920 | ifp->if_opackets = 0; |
2921 | ifp->if_obytes = 0; |
2922 | ifp->if_omcasts = 0; |
2923 | for (int i = 0; i < sc->vmx_ntxqueues; i++) { |
2924 | struct vmxnet3_txqueue *txq = &sc->vmx_txq[i]; |
2925 | |
2926 | VMXNET3_TXQ_LOCK(txq); |
2927 | ifp->if_opackets += txq->vxtxq_stats.vmtxs_opackets; |
2928 | ifp->if_obytes += txq->vxtxq_stats.vmtxs_obytes; |
2929 | ifp->if_omcasts += txq->vxtxq_stats.vmtxs_omcasts; |
2930 | if (cmd == SIOCZIFDATA) { |
2931 | memset(&txq->vxtxq_stats, 0, |
2932 | sizeof(txq->vxtxq_stats)); |
2933 | } |
2934 | VMXNET3_TXQ_UNLOCK(txq); |
2935 | } |
2936 | /* FALLTHROUGH */ |
2937 | default: |
2938 | s = splnet(); |
2939 | error = ether_ioctl(ifp, cmd, data); |
2940 | splx(s); |
2941 | } |
2942 | |
2943 | if (error == ENETRESET) { |
2944 | VMXNET3_CORE_LOCK(sc); |
2945 | if (ifp->if_flags & IFF_RUNNING) |
2946 | vmxnet3_set_rxfilter(sc); |
2947 | VMXNET3_CORE_UNLOCK(sc); |
2948 | error = 0; |
2949 | } |
2950 | |
2951 | return error; |
2952 | } |
2953 | |
2954 | int |
2955 | vmxnet3_ifflags_cb(struct ethercom *ec) |
2956 | { |
2957 | struct vmxnet3_softc *sc; |
2958 | |
2959 | sc = ec->ec_if.if_softc; |
2960 | |
2961 | VMXNET3_CORE_LOCK(sc); |
2962 | vmxnet3_set_rxfilter(sc); |
2963 | VMXNET3_CORE_UNLOCK(sc); |
2964 | |
2965 | return 0; |
2966 | } |
2967 | |
2968 | int |
2969 | vmxnet3_watchdog(struct vmxnet3_txqueue *txq) |
2970 | { |
2971 | struct vmxnet3_softc *sc; |
2972 | |
2973 | sc = txq->vxtxq_sc; |
2974 | |
2975 | VMXNET3_TXQ_LOCK(txq); |
2976 | if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) { |
2977 | VMXNET3_TXQ_UNLOCK(txq); |
2978 | return (0); |
2979 | } |
2980 | VMXNET3_TXQ_UNLOCK(txq); |
2981 | |
2982 | device_printf(sc->vmx_dev, "watchdog timeout on queue %d\n" , |
2983 | txq->vxtxq_id); |
2984 | return (1); |
2985 | } |
2986 | |
2987 | void |
2988 | vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc) |
2989 | { |
2990 | |
2991 | vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS); |
2992 | } |
2993 | |
2994 | void |
2995 | vmxnet3_tick(void *xsc) |
2996 | { |
2997 | struct vmxnet3_softc *sc; |
2998 | int i, timedout; |
2999 | |
3000 | sc = xsc; |
3001 | timedout = 0; |
3002 | |
3003 | VMXNET3_CORE_LOCK(sc); |
3004 | |
3005 | vmxnet3_refresh_host_stats(sc); |
3006 | |
3007 | for (i = 0; i < sc->vmx_ntxqueues; i++) |
3008 | timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]); |
3009 | |
3010 | if (timedout != 0) |
3011 | vmxnet3_init_locked(sc); |
3012 | else |
3013 | callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); |
3014 | |
3015 | VMXNET3_CORE_UNLOCK(sc); |
3016 | } |
3017 | |
3018 | void |
3019 | vmxnet3_link_status(struct vmxnet3_softc *sc) |
3020 | { |
3021 | struct ifnet *ifp = &sc->vmx_ethercom.ec_if; |
3022 | u_int x, link, speed; |
3023 | |
3024 | x = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK); |
3025 | speed = x >> 16; |
3026 | if (x & 1) { |
3027 | sc->vmx_link_active = 1; |
3028 | ifp->if_baudrate = IF_Mbps(speed); |
3029 | link = LINK_STATE_UP; |
3030 | } else { |
3031 | sc->vmx_link_active = 0; |
3032 | link = LINK_STATE_DOWN; |
3033 | } |
3034 | |
3035 | if_link_state_change(ifp, link); |
3036 | } |
3037 | |
3038 | void |
3039 | vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) |
3040 | { |
3041 | struct vmxnet3_softc *sc = ifp->if_softc; |
3042 | |
3043 | vmxnet3_link_status(sc); |
3044 | |
3045 | ifmr->ifm_status = IFM_AVALID; |
3046 | ifmr->ifm_active = IFM_ETHER; |
3047 | |
3048 | VMXNET3_CORE_LOCK(sc); |
3049 | if (ifp->if_link_state != LINK_STATE_UP) { |
3050 | VMXNET3_CORE_UNLOCK(sc); |
3051 | return; |
3052 | } |
3053 | |
3054 | ifmr->ifm_status |= IFM_ACTIVE; |
3055 | |
3056 | if (ifp->if_baudrate >= IF_Gbps(10ULL)) |
3057 | ifmr->ifm_active |= IFM_10G_T; |
3058 | VMXNET3_CORE_UNLOCK(sc); |
3059 | } |
3060 | |
3061 | int |
3062 | vmxnet3_media_change(struct ifnet *ifp) |
3063 | { |
3064 | return 0; |
3065 | } |
3066 | |
3067 | void |
3068 | vmxnet3_set_lladdr(struct vmxnet3_softc *sc) |
3069 | { |
3070 | uint32_t ml, mh; |
3071 | |
3072 | ml = sc->vmx_lladdr[0]; |
3073 | ml |= sc->vmx_lladdr[1] << 8; |
3074 | ml |= sc->vmx_lladdr[2] << 16; |
3075 | ml |= sc->vmx_lladdr[3] << 24; |
3076 | vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml); |
3077 | |
3078 | mh = sc->vmx_lladdr[4]; |
3079 | mh |= sc->vmx_lladdr[5] << 8; |
3080 | vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh); |
3081 | } |
3082 | |
3083 | void |
3084 | vmxnet3_get_lladdr(struct vmxnet3_softc *sc) |
3085 | { |
3086 | uint32_t ml, mh; |
3087 | |
3088 | ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL); |
3089 | mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH); |
3090 | |
3091 | sc->vmx_lladdr[0] = ml; |
3092 | sc->vmx_lladdr[1] = ml >> 8; |
3093 | sc->vmx_lladdr[2] = ml >> 16; |
3094 | sc->vmx_lladdr[3] = ml >> 24; |
3095 | sc->vmx_lladdr[4] = mh; |
3096 | sc->vmx_lladdr[5] = mh >> 8; |
3097 | } |
3098 | |
3099 | void |
3100 | vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc) |
3101 | { |
3102 | int i; |
3103 | |
3104 | sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL; |
3105 | for (i = 0; i < sc->vmx_nintrs; i++) |
3106 | vmxnet3_enable_intr(sc, i); |
3107 | } |
3108 | |
3109 | void |
3110 | vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc) |
3111 | { |
3112 | int i; |
3113 | |
3114 | sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL; |
3115 | for (i = 0; i < sc->vmx_nintrs; i++) |
3116 | vmxnet3_disable_intr(sc, i); |
3117 | } |
3118 | |
3119 | |
3120 | int |
3121 | vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align, |
3122 | struct vmxnet3_dma_alloc *dma) |
3123 | { |
3124 | bus_dma_tag_t t = sc->vmx_dmat; |
3125 | bus_dma_segment_t *segs = dma->dma_segs; |
3126 | int n, error; |
3127 | |
3128 | memset(dma, 0, sizeof(*dma)); |
3129 | |
3130 | error = bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT); |
3131 | if (error) { |
3132 | aprint_error_dev(sc->vmx_dev, "bus_dmamem_alloc failed: %d\n" , error); |
3133 | goto fail1; |
3134 | } |
3135 | KASSERT(n == 1); |
3136 | |
3137 | error = bus_dmamem_map(t, segs, 1, size, &dma->dma_vaddr, BUS_DMA_NOWAIT); |
3138 | if (error) { |
3139 | aprint_error_dev(sc->vmx_dev, "bus_dmamem_map failed: %d\n" , error); |
3140 | goto fail2; |
3141 | } |
3142 | |
3143 | error = bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &dma->dma_map); |
3144 | if (error) { |
3145 | aprint_error_dev(sc->vmx_dev, "bus_dmamap_create failed: %d\n" , error); |
3146 | goto fail3; |
3147 | } |
3148 | |
3149 | error = bus_dmamap_load(t, dma->dma_map, dma->dma_vaddr, size, NULL, |
3150 | BUS_DMA_NOWAIT); |
3151 | if (error) { |
3152 | aprint_error_dev(sc->vmx_dev, "bus_dmamap_load failed: %d\n" , error); |
3153 | goto fail4; |
3154 | } |
3155 | |
3156 | memset(dma->dma_vaddr, 0, size); |
3157 | dma->dma_paddr = DMAADDR(dma->dma_map); |
3158 | dma->dma_size = size; |
3159 | |
3160 | return (0); |
3161 | fail4: |
3162 | bus_dmamap_destroy(t, dma->dma_map); |
3163 | fail3: |
3164 | bus_dmamem_unmap(t, dma->dma_vaddr, size); |
3165 | fail2: |
3166 | bus_dmamem_free(t, segs, 1); |
3167 | fail1: |
3168 | return (error); |
3169 | } |
3170 | |
3171 | void |
3172 | vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma) |
3173 | { |
3174 | bus_dma_tag_t t = sc->vmx_dmat; |
3175 | |
3176 | bus_dmamap_unload(t, dma->dma_map); |
3177 | bus_dmamap_destroy(t, dma->dma_map); |
3178 | bus_dmamem_unmap(t, dma->dma_vaddr, dma->dma_size); |
3179 | bus_dmamem_free(t, dma->dma_segs, 1); |
3180 | |
3181 | memset(dma, 0, sizeof(*dma)); |
3182 | } |
3183 | |