1/* $NetBSD: cpufunc.h,v 1.34 2019/07/05 17:08:55 maxv Exp $ */
2
3/*
4 * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef _X86_CPUFUNC_H_
33#define _X86_CPUFUNC_H_
34
35/*
36 * Functions to provide access to x86-specific instructions.
37 */
38
39#include <sys/cdefs.h>
40#include <sys/types.h>
41
42#include <machine/segments.h>
43#include <machine/specialreg.h>
44
45#ifdef _KERNEL
46#if defined(_KERNEL_OPT)
47#include "opt_xen.h"
48#endif
49
50static inline void
51x86_pause(void)
52{
53 __asm volatile ("pause");
54}
55
56void x86_lfence(void);
57void x86_sfence(void);
58void x86_mfence(void);
59void x86_flush(void);
60void x86_hlt(void);
61void x86_stihlt(void);
62void tlbflush(void);
63void tlbflushg(void);
64void invlpg(vaddr_t);
65void wbinvd(void);
66void breakpoint(void);
67
68#define INVPCID_ADDRESS 0
69#define INVPCID_CONTEXT 1
70#define INVPCID_ALL 2
71#define INVPCID_ALL_NONGLOBAL 3
72
73static inline void
74invpcid(register_t op, uint64_t pcid, vaddr_t va)
75{
76 struct {
77 uint64_t pcid;
78 uint64_t addr;
79 } desc = {
80 .pcid = pcid,
81 .addr = va
82 };
83
84 __asm volatile (
85 "invpcid %[desc],%[op]"
86 :
87 : [desc] "m" (desc), [op] "r" (op)
88 : "memory"
89 );
90}
91
92static inline uint64_t
93rdtsc(void)
94{
95 uint32_t low, high;
96
97 __asm volatile (
98 "rdtsc"
99 : "=a" (low), "=d" (high)
100 :
101 );
102
103 return (low | ((uint64_t)high << 32));
104}
105
106#ifndef XEN
107void x86_hotpatch(uint32_t, const uint8_t *, size_t);
108void x86_patch_window_open(u_long *, u_long *);
109void x86_patch_window_close(u_long, u_long);
110void x86_patch(bool);
111#endif
112
113void x86_monitor(const void *, uint32_t, uint32_t);
114void x86_mwait(uint32_t, uint32_t);
115
116static inline void
117x86_cpuid2(uint32_t eax, uint32_t ecx, uint32_t *regs)
118{
119 uint32_t ebx, edx;
120
121 __asm volatile (
122 "cpuid"
123 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
124 : "a" (eax), "c" (ecx)
125 );
126
127 regs[0] = eax;
128 regs[1] = ebx;
129 regs[2] = ecx;
130 regs[3] = edx;
131}
132#define x86_cpuid(a,b) x86_cpuid2((a), 0, (b))
133
134/* -------------------------------------------------------------------------- */
135
136void lidt(struct region_descriptor *);
137void lldt(u_short);
138void ltr(u_short);
139
140static inline uint16_t
141x86_getss(void)
142{
143 uint16_t val;
144
145 __asm volatile (
146 "mov %%ss,%[val]"
147 : [val] "=r" (val)
148 :
149 );
150 return val;
151}
152
153static inline void
154setds(uint16_t val)
155{
156 __asm volatile (
157 "mov %[val],%%ds"
158 :
159 : [val] "r" (val)
160 );
161}
162
163static inline void
164setes(uint16_t val)
165{
166 __asm volatile (
167 "mov %[val],%%es"
168 :
169 : [val] "r" (val)
170 );
171}
172
173static inline void
174setfs(uint16_t val)
175{
176 __asm volatile (
177 "mov %[val],%%fs"
178 :
179 : [val] "r" (val)
180 );
181}
182
183void setusergs(int);
184
185/* -------------------------------------------------------------------------- */
186
187#define FUNC_CR(crnum) \
188 static inline void lcr##crnum(register_t val) \
189 { \
190 __asm volatile ( \
191 "mov %[val],%%cr" #crnum \
192 : \
193 : [val] "r" (val) \
194 : "memory" \
195 ); \
196 } \
197 static inline register_t rcr##crnum(void) \
198 { \
199 register_t val; \
200 __asm volatile ( \
201 "mov %%cr" #crnum ",%[val]" \
202 : [val] "=r" (val) \
203 : \
204 ); \
205 return val; \
206 }
207
208#define PROTO_CR(crnum) \
209 void lcr##crnum(register_t); \
210 register_t rcr##crnum(void);
211
212#ifndef XENPV
213FUNC_CR(0)
214FUNC_CR(2)
215FUNC_CR(3)
216#else
217PROTO_CR(0)
218PROTO_CR(2)
219PROTO_CR(3)
220#endif
221
222FUNC_CR(4)
223FUNC_CR(8)
224
225/* -------------------------------------------------------------------------- */
226
227#define FUNC_DR(drnum) \
228 static inline void ldr##drnum(register_t val) \
229 { \
230 __asm volatile ( \
231 "mov %[val],%%dr" #drnum \
232 : \
233 : [val] "r" (val) \
234 ); \
235 } \
236 static inline register_t rdr##drnum(void) \
237 { \
238 register_t val; \
239 __asm volatile ( \
240 "mov %%dr" #drnum ",%[val]" \
241 : [val] "=r" (val) \
242 : \
243 ); \
244 return val; \
245 }
246
247#define PROTO_DR(drnum) \
248 register_t rdr##drnum(void); \
249 void ldr##drnum(register_t);
250
251#ifndef XENPV
252FUNC_DR(0)
253FUNC_DR(1)
254FUNC_DR(2)
255FUNC_DR(3)
256FUNC_DR(6)
257FUNC_DR(7)
258#else
259PROTO_DR(0)
260PROTO_DR(1)
261PROTO_DR(2)
262PROTO_DR(3)
263PROTO_DR(6)
264PROTO_DR(7)
265#endif
266
267/* -------------------------------------------------------------------------- */
268
269union savefpu;
270
271static inline void
272fninit(void)
273{
274 __asm volatile ("fninit");
275}
276
277static inline void
278fnclex(void)
279{
280 __asm volatile ("fnclex");
281}
282
283static inline void
284fnstcw(uint16_t *val)
285{
286 __asm volatile (
287 "fnstcw %[val]"
288 : [val] "=m" (*val)
289 :
290 );
291}
292
293static inline void
294fnstsw(uint16_t *val)
295{
296 __asm volatile (
297 "fnstsw %[val]"
298 : [val] "=m" (*val)
299 :
300 );
301}
302
303static inline void
304clts(void)
305{
306 __asm volatile ("clts");
307}
308
309void stts(void);
310
311static inline void
312x86_stmxcsr(uint32_t *val)
313{
314 __asm volatile (
315 "stmxcsr %[val]"
316 : [val] "=m" (*val)
317 :
318 );
319}
320
321static inline void
322x86_ldmxcsr(uint32_t *val)
323{
324 __asm volatile (
325 "ldmxcsr %[val]"
326 :
327 : [val] "m" (*val)
328 );
329}
330
331void fldummy(void);
332
333static inline uint64_t
334rdxcr(uint32_t xcr)
335{
336 uint32_t low, high;
337
338 __asm volatile (
339 "xgetbv"
340 : "=a" (low), "=d" (high)
341 : "c" (xcr)
342 );
343
344 return (low | ((uint64_t)high << 32));
345}
346
347static inline void
348wrxcr(uint32_t xcr, uint64_t val)
349{
350 uint32_t low, high;
351
352 low = val;
353 high = val >> 32;
354 __asm volatile (
355 "xsetbv"
356 :
357 : "a" (low), "d" (high), "c" (xcr)
358 );
359}
360
361void fnsave(union savefpu *);
362void frstor(const union savefpu *);
363
364void fxsave(union savefpu *);
365void fxrstor(const union savefpu *);
366
367void xsave(union savefpu *, uint64_t);
368void xsaveopt(union savefpu *, uint64_t);
369void xrstor(const union savefpu *, uint64_t);
370
371/* -------------------------------------------------------------------------- */
372
373#ifdef XENPV
374void x86_disable_intr(void);
375void x86_enable_intr(void);
376#else
377static inline void
378x86_disable_intr(void)
379{
380 __asm volatile ("cli" ::: "memory");
381}
382
383static inline void
384x86_enable_intr(void)
385{
386 __asm volatile ("sti" ::: "memory");
387}
388#endif /* XENPV */
389
390/* Use read_psl, write_psl when saving and restoring interrupt state. */
391u_long x86_read_psl(void);
392void x86_write_psl(u_long);
393
394/* Use read_flags, write_flags to adjust other members of %eflags. */
395u_long x86_read_flags(void);
396void x86_write_flags(u_long);
397
398void x86_reset(void);
399
400/* -------------------------------------------------------------------------- */
401
402/*
403 * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
404 * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
405 */
406#define OPTERON_MSR_PASSCODE 0x9c5a203aU
407
408static inline uint64_t
409rdmsr(u_int msr)
410{
411 uint32_t low, high;
412
413 __asm volatile (
414 "rdmsr"
415 : "=a" (low), "=d" (high)
416 : "c" (msr)
417 );
418
419 return (low | ((uint64_t)high << 32));
420}
421
422uint64_t rdmsr_locked(u_int);
423int rdmsr_safe(u_int, uint64_t *);
424
425static inline void
426wrmsr(u_int msr, uint64_t val)
427{
428 uint32_t low, high;
429
430 low = val;
431 high = val >> 32;
432 __asm volatile (
433 "wrmsr"
434 :
435 : "a" (low), "d" (high), "c" (msr)
436 );
437}
438
439void wrmsr_locked(u_int, uint64_t);
440
441#endif /* _KERNEL */
442
443#endif /* !_X86_CPUFUNC_H_ */
444