1 | /* $NetBSD: cpufunc.h,v 1.34 2019/07/05 17:08:55 maxv Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Charles M. Hannum, and by Andrew Doran. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | #ifndef _X86_CPUFUNC_H_ |
33 | #define _X86_CPUFUNC_H_ |
34 | |
35 | /* |
36 | * Functions to provide access to x86-specific instructions. |
37 | */ |
38 | |
39 | #include <sys/cdefs.h> |
40 | #include <sys/types.h> |
41 | |
42 | #include <machine/segments.h> |
43 | #include <machine/specialreg.h> |
44 | |
45 | #ifdef _KERNEL |
46 | #if defined(_KERNEL_OPT) |
47 | #include "opt_xen.h" |
48 | #endif |
49 | |
50 | static inline void |
51 | x86_pause(void) |
52 | { |
53 | __asm volatile ("pause" ); |
54 | } |
55 | |
56 | void x86_lfence(void); |
57 | void x86_sfence(void); |
58 | void x86_mfence(void); |
59 | void x86_flush(void); |
60 | void x86_hlt(void); |
61 | void x86_stihlt(void); |
62 | void tlbflush(void); |
63 | void tlbflushg(void); |
64 | void invlpg(vaddr_t); |
65 | void wbinvd(void); |
66 | void breakpoint(void); |
67 | |
68 | #define INVPCID_ADDRESS 0 |
69 | #define INVPCID_CONTEXT 1 |
70 | #define INVPCID_ALL 2 |
71 | #define INVPCID_ALL_NONGLOBAL 3 |
72 | |
73 | static inline void |
74 | invpcid(register_t op, uint64_t pcid, vaddr_t va) |
75 | { |
76 | struct { |
77 | uint64_t pcid; |
78 | uint64_t addr; |
79 | } desc = { |
80 | .pcid = pcid, |
81 | .addr = va |
82 | }; |
83 | |
84 | __asm volatile ( |
85 | "invpcid %[desc],%[op]" |
86 | : |
87 | : [desc] "m" (desc), [op] "r" (op) |
88 | : "memory" |
89 | ); |
90 | } |
91 | |
92 | static inline uint64_t |
93 | rdtsc(void) |
94 | { |
95 | uint32_t low, high; |
96 | |
97 | __asm volatile ( |
98 | "rdtsc" |
99 | : "=a" (low), "=d" (high) |
100 | : |
101 | ); |
102 | |
103 | return (low | ((uint64_t)high << 32)); |
104 | } |
105 | |
106 | #ifndef XEN |
107 | void x86_hotpatch(uint32_t, const uint8_t *, size_t); |
108 | void x86_patch_window_open(u_long *, u_long *); |
109 | void x86_patch_window_close(u_long, u_long); |
110 | void x86_patch(bool); |
111 | #endif |
112 | |
113 | void x86_monitor(const void *, uint32_t, uint32_t); |
114 | void x86_mwait(uint32_t, uint32_t); |
115 | |
116 | static inline void |
117 | x86_cpuid2(uint32_t eax, uint32_t ecx, uint32_t *regs) |
118 | { |
119 | uint32_t ebx, edx; |
120 | |
121 | __asm volatile ( |
122 | "cpuid" |
123 | : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) |
124 | : "a" (eax), "c" (ecx) |
125 | ); |
126 | |
127 | regs[0] = eax; |
128 | regs[1] = ebx; |
129 | regs[2] = ecx; |
130 | regs[3] = edx; |
131 | } |
132 | #define x86_cpuid(a,b) x86_cpuid2((a), 0, (b)) |
133 | |
134 | /* -------------------------------------------------------------------------- */ |
135 | |
136 | void lidt(struct region_descriptor *); |
137 | void lldt(u_short); |
138 | void ltr(u_short); |
139 | |
140 | static inline uint16_t |
141 | x86_getss(void) |
142 | { |
143 | uint16_t val; |
144 | |
145 | __asm volatile ( |
146 | "mov %%ss,%[val]" |
147 | : [val] "=r" (val) |
148 | : |
149 | ); |
150 | return val; |
151 | } |
152 | |
153 | static inline void |
154 | setds(uint16_t val) |
155 | { |
156 | __asm volatile ( |
157 | "mov %[val],%%ds" |
158 | : |
159 | : [val] "r" (val) |
160 | ); |
161 | } |
162 | |
163 | static inline void |
164 | setes(uint16_t val) |
165 | { |
166 | __asm volatile ( |
167 | "mov %[val],%%es" |
168 | : |
169 | : [val] "r" (val) |
170 | ); |
171 | } |
172 | |
173 | static inline void |
174 | setfs(uint16_t val) |
175 | { |
176 | __asm volatile ( |
177 | "mov %[val],%%fs" |
178 | : |
179 | : [val] "r" (val) |
180 | ); |
181 | } |
182 | |
183 | void setusergs(int); |
184 | |
185 | /* -------------------------------------------------------------------------- */ |
186 | |
187 | #define FUNC_CR(crnum) \ |
188 | static inline void lcr##crnum(register_t val) \ |
189 | { \ |
190 | __asm volatile ( \ |
191 | "mov %[val],%%cr" #crnum \ |
192 | : \ |
193 | : [val] "r" (val) \ |
194 | : "memory" \ |
195 | ); \ |
196 | } \ |
197 | static inline register_t rcr##crnum(void) \ |
198 | { \ |
199 | register_t val; \ |
200 | __asm volatile ( \ |
201 | "mov %%cr" #crnum ",%[val]" \ |
202 | : [val] "=r" (val) \ |
203 | : \ |
204 | ); \ |
205 | return val; \ |
206 | } |
207 | |
208 | #define PROTO_CR(crnum) \ |
209 | void lcr##crnum(register_t); \ |
210 | register_t rcr##crnum(void); |
211 | |
212 | #ifndef XENPV |
213 | FUNC_CR(0) |
214 | FUNC_CR(2) |
215 | FUNC_CR(3) |
216 | #else |
217 | PROTO_CR(0) |
218 | PROTO_CR(2) |
219 | PROTO_CR(3) |
220 | #endif |
221 | |
222 | FUNC_CR(4) |
223 | FUNC_CR(8) |
224 | |
225 | /* -------------------------------------------------------------------------- */ |
226 | |
227 | #define FUNC_DR(drnum) \ |
228 | static inline void ldr##drnum(register_t val) \ |
229 | { \ |
230 | __asm volatile ( \ |
231 | "mov %[val],%%dr" #drnum \ |
232 | : \ |
233 | : [val] "r" (val) \ |
234 | ); \ |
235 | } \ |
236 | static inline register_t rdr##drnum(void) \ |
237 | { \ |
238 | register_t val; \ |
239 | __asm volatile ( \ |
240 | "mov %%dr" #drnum ",%[val]" \ |
241 | : [val] "=r" (val) \ |
242 | : \ |
243 | ); \ |
244 | return val; \ |
245 | } |
246 | |
247 | #define PROTO_DR(drnum) \ |
248 | register_t rdr##drnum(void); \ |
249 | void ldr##drnum(register_t); |
250 | |
251 | #ifndef XENPV |
252 | FUNC_DR(0) |
253 | FUNC_DR(1) |
254 | FUNC_DR(2) |
255 | FUNC_DR(3) |
256 | FUNC_DR(6) |
257 | FUNC_DR(7) |
258 | #else |
259 | PROTO_DR(0) |
260 | PROTO_DR(1) |
261 | PROTO_DR(2) |
262 | PROTO_DR(3) |
263 | PROTO_DR(6) |
264 | PROTO_DR(7) |
265 | #endif |
266 | |
267 | /* -------------------------------------------------------------------------- */ |
268 | |
269 | union savefpu; |
270 | |
271 | static inline void |
272 | fninit(void) |
273 | { |
274 | __asm volatile ("fninit" ); |
275 | } |
276 | |
277 | static inline void |
278 | fnclex(void) |
279 | { |
280 | __asm volatile ("fnclex" ); |
281 | } |
282 | |
283 | static inline void |
284 | fnstcw(uint16_t *val) |
285 | { |
286 | __asm volatile ( |
287 | "fnstcw %[val]" |
288 | : [val] "=m" (*val) |
289 | : |
290 | ); |
291 | } |
292 | |
293 | static inline void |
294 | fnstsw(uint16_t *val) |
295 | { |
296 | __asm volatile ( |
297 | "fnstsw %[val]" |
298 | : [val] "=m" (*val) |
299 | : |
300 | ); |
301 | } |
302 | |
303 | static inline void |
304 | clts(void) |
305 | { |
306 | __asm volatile ("clts" ); |
307 | } |
308 | |
309 | void stts(void); |
310 | |
311 | static inline void |
312 | x86_stmxcsr(uint32_t *val) |
313 | { |
314 | __asm volatile ( |
315 | "stmxcsr %[val]" |
316 | : [val] "=m" (*val) |
317 | : |
318 | ); |
319 | } |
320 | |
321 | static inline void |
322 | x86_ldmxcsr(uint32_t *val) |
323 | { |
324 | __asm volatile ( |
325 | "ldmxcsr %[val]" |
326 | : |
327 | : [val] "m" (*val) |
328 | ); |
329 | } |
330 | |
331 | void fldummy(void); |
332 | |
333 | static inline uint64_t |
334 | rdxcr(uint32_t xcr) |
335 | { |
336 | uint32_t low, high; |
337 | |
338 | __asm volatile ( |
339 | "xgetbv" |
340 | : "=a" (low), "=d" (high) |
341 | : "c" (xcr) |
342 | ); |
343 | |
344 | return (low | ((uint64_t)high << 32)); |
345 | } |
346 | |
347 | static inline void |
348 | wrxcr(uint32_t xcr, uint64_t val) |
349 | { |
350 | uint32_t low, high; |
351 | |
352 | low = val; |
353 | high = val >> 32; |
354 | __asm volatile ( |
355 | "xsetbv" |
356 | : |
357 | : "a" (low), "d" (high), "c" (xcr) |
358 | ); |
359 | } |
360 | |
361 | void fnsave(union savefpu *); |
362 | void frstor(const union savefpu *); |
363 | |
364 | void fxsave(union savefpu *); |
365 | void fxrstor(const union savefpu *); |
366 | |
367 | void xsave(union savefpu *, uint64_t); |
368 | void xsaveopt(union savefpu *, uint64_t); |
369 | void xrstor(const union savefpu *, uint64_t); |
370 | |
371 | /* -------------------------------------------------------------------------- */ |
372 | |
373 | #ifdef XENPV |
374 | void x86_disable_intr(void); |
375 | void x86_enable_intr(void); |
376 | #else |
377 | static inline void |
378 | x86_disable_intr(void) |
379 | { |
380 | __asm volatile ("cli" ::: "memory" ); |
381 | } |
382 | |
383 | static inline void |
384 | x86_enable_intr(void) |
385 | { |
386 | __asm volatile ("sti" ::: "memory" ); |
387 | } |
388 | #endif /* XENPV */ |
389 | |
390 | /* Use read_psl, write_psl when saving and restoring interrupt state. */ |
391 | u_long x86_read_psl(void); |
392 | void x86_write_psl(u_long); |
393 | |
394 | /* Use read_flags, write_flags to adjust other members of %eflags. */ |
395 | u_long x86_read_flags(void); |
396 | void x86_write_flags(u_long); |
397 | |
398 | void x86_reset(void); |
399 | |
400 | /* -------------------------------------------------------------------------- */ |
401 | |
402 | /* |
403 | * Some of the undocumented AMD64 MSRs need a 'passcode' to access. |
404 | * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c |
405 | */ |
406 | #define OPTERON_MSR_PASSCODE 0x9c5a203aU |
407 | |
408 | static inline uint64_t |
409 | rdmsr(u_int msr) |
410 | { |
411 | uint32_t low, high; |
412 | |
413 | __asm volatile ( |
414 | "rdmsr" |
415 | : "=a" (low), "=d" (high) |
416 | : "c" (msr) |
417 | ); |
418 | |
419 | return (low | ((uint64_t)high << 32)); |
420 | } |
421 | |
422 | uint64_t rdmsr_locked(u_int); |
423 | int rdmsr_safe(u_int, uint64_t *); |
424 | |
425 | static inline void |
426 | wrmsr(u_int msr, uint64_t val) |
427 | { |
428 | uint32_t low, high; |
429 | |
430 | low = val; |
431 | high = val >> 32; |
432 | __asm volatile ( |
433 | "wrmsr" |
434 | : |
435 | : "a" (low), "d" (high), "c" (msr) |
436 | ); |
437 | } |
438 | |
439 | void wrmsr_locked(u_int, uint64_t); |
440 | |
441 | #endif /* _KERNEL */ |
442 | |
443 | #endif /* !_X86_CPUFUNC_H_ */ |
444 | |