| 1 | /* $NetBSD: cpu.h,v 1.107 2019/06/26 12:29:00 mgorny Exp $ */ |
| 2 | |
| 3 | /* |
| 4 | * Copyright (c) 1990 The Regents of the University of California. |
| 5 | * All rights reserved. |
| 6 | * |
| 7 | * This code is derived from software contributed to Berkeley by |
| 8 | * William Jolitz. |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * 1. Redistributions of source code must retain the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer. |
| 15 | * 2. Redistributions in binary form must reproduce the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer in the |
| 17 | * documentation and/or other materials provided with the distribution. |
| 18 | * 3. Neither the name of the University nor the names of its contributors |
| 19 | * may be used to endorse or promote products derived from this software |
| 20 | * without specific prior written permission. |
| 21 | * |
| 22 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 28 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 29 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 30 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 31 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 32 | * SUCH DAMAGE. |
| 33 | * |
| 34 | * @(#)cpu.h 5.4 (Berkeley) 5/9/91 |
| 35 | */ |
| 36 | |
| 37 | #ifndef _X86_CPU_H_ |
| 38 | #define _X86_CPU_H_ |
| 39 | |
| 40 | #if defined(_KERNEL) || defined(_STANDALONE) |
| 41 | #include <sys/types.h> |
| 42 | #else |
| 43 | #include <stdint.h> |
| 44 | #include <stdbool.h> |
| 45 | #endif /* _KERNEL || _STANDALONE */ |
| 46 | |
| 47 | #if defined(_KERNEL) || defined(_KMEMUSER) |
| 48 | #if defined(_KERNEL_OPT) |
| 49 | #include "opt_xen.h" |
| 50 | #include "opt_svs.h" |
| 51 | #ifdef i386 |
| 52 | #include "opt_user_ldt.h" |
| 53 | #endif |
| 54 | #endif |
| 55 | |
| 56 | /* |
| 57 | * Definitions unique to x86 cpu support. |
| 58 | */ |
| 59 | #include <machine/frame.h> |
| 60 | #include <machine/pte.h> |
| 61 | #include <machine/segments.h> |
| 62 | #include <machine/tss.h> |
| 63 | #include <machine/intrdefs.h> |
| 64 | |
| 65 | #include <x86/cacheinfo.h> |
| 66 | |
| 67 | #include <sys/cpu_data.h> |
| 68 | #include <sys/evcnt.h> |
| 69 | #include <sys/device_if.h> /* for device_t */ |
| 70 | |
| 71 | #ifdef XEN |
| 72 | #include <xen/include/public/xen.h> |
| 73 | #include <xen/include/public/event_channel.h> |
| 74 | #include <sys/mutex.h> |
| 75 | #endif /* XEN */ |
| 76 | |
| 77 | struct intrsource; |
| 78 | struct pmap; |
| 79 | |
| 80 | #ifdef __x86_64__ |
| 81 | #define i386tss x86_64_tss |
| 82 | #endif |
| 83 | |
| 84 | #define NIOPORTS 1024 /* # of ports we allow to be mapped */ |
| 85 | #define IOMAPSIZE (NIOPORTS / 8) /* I/O bitmap size in bytes */ |
| 86 | |
| 87 | struct cpu_tss { |
| 88 | #ifdef i386 |
| 89 | struct i386tss dblflt_tss; |
| 90 | struct i386tss ddbipi_tss; |
| 91 | #endif |
| 92 | struct i386tss tss; |
| 93 | uint8_t iomap[IOMAPSIZE]; |
| 94 | } __packed; |
| 95 | |
| 96 | /* |
| 97 | * Arguments to hardclock, softclock and statclock |
| 98 | * encapsulate the previous machine state in an opaque |
| 99 | * clockframe; for now, use generic intrframe. |
| 100 | */ |
| 101 | struct clockframe { |
| 102 | struct intrframe cf_if; |
| 103 | }; |
| 104 | |
| 105 | /* |
| 106 | * a bunch of this belongs in cpuvar.h; move it later.. |
| 107 | */ |
| 108 | |
| 109 | struct cpu_info { |
| 110 | struct cpu_data ci_data; /* MI per-cpu data */ |
| 111 | device_t ci_dev; /* pointer to our device */ |
| 112 | struct cpu_info *ci_self; /* self-pointer */ |
| 113 | #ifdef XEN |
| 114 | volatile struct vcpu_info *ci_vcpu; /* for XEN */ |
| 115 | #endif |
| 116 | |
| 117 | /* |
| 118 | * Will be accessed by other CPUs. |
| 119 | */ |
| 120 | struct cpu_info *ci_next; /* next cpu */ |
| 121 | struct lwp *ci_curlwp; /* current owner of the processor */ |
| 122 | struct lwp *ci_fpcurlwp; /* current owner of the FPU */ |
| 123 | cpuid_t ci_cpuid; /* our CPU ID */ |
| 124 | uint32_t ci_acpiid; /* our ACPI/MADT ID */ |
| 125 | uint32_t ci_initapicid; /* our initial APIC ID */ |
| 126 | |
| 127 | /* |
| 128 | * Private members. |
| 129 | */ |
| 130 | struct pmap *ci_pmap; /* current pmap */ |
| 131 | int ci_want_pmapload; /* pmap_load() is needed */ |
| 132 | volatile int ci_tlbstate; /* one of TLBSTATE_ states. see below */ |
| 133 | #define TLBSTATE_VALID 0 /* all user tlbs are valid */ |
| 134 | #define TLBSTATE_LAZY 1 /* tlbs are valid but won't be kept uptodate */ |
| 135 | #define TLBSTATE_STALE 2 /* we might have stale user tlbs */ |
| 136 | int ci_curldt; /* current LDT descriptor */ |
| 137 | int ci_nintrhand; /* number of H/W interrupt handlers */ |
| 138 | uint64_t ci_scratch; |
| 139 | uintptr_t ci_pmap_data[128 / sizeof(uintptr_t)]; |
| 140 | |
| 141 | #ifndef XENPV |
| 142 | struct intrsource *ci_isources[MAX_INTR_SOURCES]; |
| 143 | #endif |
| 144 | #if defined(XEN) |
| 145 | struct intrsource *ci_xsources[NIPL]; |
| 146 | uint32_t ci_xmask[NIPL]; |
| 147 | uint32_t ci_xunmask[NIPL]; |
| 148 | uint32_t ci_xpending; /* XEN doesn't use the cmpxchg8 path */ |
| 149 | #endif |
| 150 | |
| 151 | volatile int ci_mtx_count; /* Negative count of spin mutexes */ |
| 152 | volatile int ci_mtx_oldspl; /* Old SPL at this ci_idepth */ |
| 153 | |
| 154 | /* The following must be aligned for cmpxchg8b. */ |
| 155 | struct { |
| 156 | uint32_t ipending; |
| 157 | int ilevel; |
| 158 | } ci_istate __aligned(8); |
| 159 | #define ci_ipending ci_istate.ipending |
| 160 | #define ci_ilevel ci_istate.ilevel |
| 161 | int ci_idepth; |
| 162 | void * ci_intrstack; |
| 163 | uint32_t ci_imask[NIPL]; |
| 164 | uint32_t ci_iunmask[NIPL]; |
| 165 | |
| 166 | uint32_t ci_flags; /* flags; see below */ |
| 167 | uint32_t ci_ipis; /* interprocessor interrupts pending */ |
| 168 | |
| 169 | uint32_t ci_signature; /* X86 cpuid type (cpuid.1.%eax) */ |
| 170 | uint32_t ci_vendor[4]; /* vendor string */ |
| 171 | uint32_t ci_max_cpuid; /* cpuid.0:%eax */ |
| 172 | uint32_t ci_max_ext_cpuid; /* cpuid.80000000:%eax */ |
| 173 | volatile uint32_t ci_lapic_counter; |
| 174 | |
| 175 | uint32_t ci_feat_val[8]; /* X86 CPUID feature bits */ |
| 176 | /* [0] basic features cpuid.1:%edx |
| 177 | * [1] basic features cpuid.1:%ecx (CPUID2_xxx bits) |
| 178 | * [2] extended features cpuid:80000001:%edx |
| 179 | * [3] extended features cpuid:80000001:%ecx |
| 180 | * [4] VIA padlock features |
| 181 | * [5] structured extended features cpuid.7:%ebx |
| 182 | * [6] structured extended features cpuid.7:%ecx |
| 183 | * [7] structured extended features cpuid.7:%edx |
| 184 | */ |
| 185 | |
| 186 | const struct cpu_functions *ci_func; /* start/stop functions */ |
| 187 | struct trapframe *ci_ddb_regs; |
| 188 | |
| 189 | u_int ci_cflush_lsize; /* CLFLUSH insn line size */ |
| 190 | struct x86_cache_info ci_cinfo[CAI_COUNT]; |
| 191 | |
| 192 | device_t ci_frequency; /* Frequency scaling technology */ |
| 193 | device_t ci_padlock; /* VIA PadLock private storage */ |
| 194 | device_t ci_temperature; /* Intel coretemp(4) or equivalent */ |
| 195 | device_t ci_vm; /* Virtual machine guest driver */ |
| 196 | |
| 197 | /* |
| 198 | * Segmentation-related data. |
| 199 | */ |
| 200 | union descriptor *ci_gdt; |
| 201 | struct cpu_tss *ci_tss; /* Per-cpu TSSes; shared among LWPs */ |
| 202 | int ci_tss_sel; /* TSS selector of this cpu */ |
| 203 | |
| 204 | /* |
| 205 | * The following two are actually region_descriptors, |
| 206 | * but that would pollute the namespace. |
| 207 | */ |
| 208 | uintptr_t ci_suspend_gdt; |
| 209 | uint16_t ci_suspend_gdt_padding; |
| 210 | uintptr_t ci_suspend_idt; |
| 211 | uint16_t ci_suspend_idt_padding; |
| 212 | |
| 213 | uint16_t ci_suspend_tr; |
| 214 | uint16_t ci_suspend_ldt; |
| 215 | uintptr_t ci_suspend_fs; |
| 216 | uintptr_t ci_suspend_gs; |
| 217 | uintptr_t ci_suspend_kgs; |
| 218 | uintptr_t ci_suspend_efer; |
| 219 | uintptr_t ci_suspend_reg[12]; |
| 220 | uintptr_t ci_suspend_cr0; |
| 221 | uintptr_t ci_suspend_cr2; |
| 222 | uintptr_t ci_suspend_cr3; |
| 223 | uintptr_t ci_suspend_cr4; |
| 224 | uintptr_t ci_suspend_cr8; |
| 225 | |
| 226 | /* The following must be in a single cache line. */ |
| 227 | int ci_want_resched __aligned(64); |
| 228 | int ci_padout __aligned(64); |
| 229 | |
| 230 | #ifndef __HAVE_DIRECT_MAP |
| 231 | #define VPAGE_SRC 0 |
| 232 | #define VPAGE_DST 1 |
| 233 | #define VPAGE_ZER 2 |
| 234 | #define VPAGE_PTP 3 |
| 235 | #define VPAGE_MAX 4 |
| 236 | vaddr_t vpage[VPAGE_MAX]; |
| 237 | pt_entry_t *vpage_pte[VPAGE_MAX]; |
| 238 | #endif |
| 239 | |
| 240 | #ifdef PAE |
| 241 | uint32_t ci_pae_l3_pdirpa; /* PA of L3 PD */ |
| 242 | pd_entry_t * ci_pae_l3_pdir; /* VA pointer to L3 PD */ |
| 243 | #endif |
| 244 | |
| 245 | #ifdef SVS |
| 246 | pd_entry_t * ci_svs_updir; |
| 247 | paddr_t ci_svs_updirpa; |
| 248 | paddr_t ci_unused; |
| 249 | kmutex_t ci_svs_mtx; |
| 250 | pd_entry_t * ci_svs_rsp0_pte; |
| 251 | vaddr_t ci_svs_rsp0; |
| 252 | vaddr_t ci_svs_ursp0; |
| 253 | vaddr_t ci_svs_krsp0; |
| 254 | vaddr_t ci_svs_utls; |
| 255 | #endif |
| 256 | |
| 257 | #ifdef XEN |
| 258 | u_long ci_evtmask[NR_EVENT_CHANNELS]; /* events allowed on this CPU */ |
| 259 | struct evcnt ci_ipi_events[XEN_NIPIS]; |
| 260 | evtchn_port_t ci_ipi_evtchn; |
| 261 | #if defined(XENPV) |
| 262 | #if defined(PAE) || defined(__x86_64__) |
| 263 | /* Currently active user PGD (can't use rcr3() with Xen) */ |
| 264 | pd_entry_t * ci_kpm_pdir; /* per-cpu PMD (va) */ |
| 265 | paddr_t ci_kpm_pdirpa; /* per-cpu PMD (pa) */ |
| 266 | kmutex_t ci_kpm_mtx; |
| 267 | #endif /* defined(PAE) || defined(__x86_64__) */ |
| 268 | |
| 269 | #if defined(__x86_64__) |
| 270 | /* per-cpu version of normal_pdes */ |
| 271 | pd_entry_t * ci_normal_pdes[3]; /* Ok to hardcode. only for x86_64 && XENPV */ |
| 272 | paddr_t ci_xen_current_user_pgd; |
| 273 | #endif /* defined(__x86_64__) */ |
| 274 | |
| 275 | size_t ci_xpq_idx; |
| 276 | #endif /* XENPV */ |
| 277 | |
| 278 | /* Xen raw system time at which we last ran hardclock. */ |
| 279 | uint64_t ci_xen_hardclock_systime_ns; |
| 280 | |
| 281 | /* |
| 282 | * Last TSC-adjusted local Xen system time we observed. Used |
| 283 | * to detect whether the Xen clock has gone backwards. |
| 284 | */ |
| 285 | uint64_t ci_xen_last_systime_ns; |
| 286 | |
| 287 | /* |
| 288 | * Distance in nanoseconds from the local view of system time |
| 289 | * to the global view of system time, if the local time is |
| 290 | * behind the global time. |
| 291 | */ |
| 292 | uint64_t ci_xen_systime_ns_skew; |
| 293 | |
| 294 | /* Xen periodic timer interrupt handle. */ |
| 295 | struct intrhand *ci_xen_timer_intrhand; |
| 296 | |
| 297 | /* |
| 298 | * Clockframe for timer interrupt handler. |
| 299 | * Saved at entry via event callback. |
| 300 | */ |
| 301 | vaddr_t ci_xen_clockf_pc; /* RIP at last event interrupt */ |
| 302 | bool ci_xen_clockf_usermode; /* Was the guest in usermode ? */ |
| 303 | |
| 304 | /* Event counters for various pathologies that might happen. */ |
| 305 | struct evcnt ci_xen_cpu_tsc_backwards_evcnt; |
| 306 | struct evcnt ci_xen_tsc_delta_negative_evcnt; |
| 307 | struct evcnt ci_xen_raw_systime_wraparound_evcnt; |
| 308 | struct evcnt ci_xen_raw_systime_backwards_evcnt; |
| 309 | struct evcnt ci_xen_systime_backwards_hardclock_evcnt; |
| 310 | struct evcnt ci_xen_missed_hardclock_evcnt; |
| 311 | #else /* XEN */ |
| 312 | struct evcnt ci_ipi_events[X86_NIPI]; |
| 313 | #endif /* XEN */ |
| 314 | |
| 315 | }; |
| 316 | |
| 317 | /* |
| 318 | * Macros to handle (some) trapframe registers for common x86 code. |
| 319 | */ |
| 320 | #ifdef __x86_64__ |
| 321 | #define X86_TF_RAX(tf) tf->tf_rax |
| 322 | #define X86_TF_RDX(tf) tf->tf_rdx |
| 323 | #define X86_TF_RSP(tf) tf->tf_rsp |
| 324 | #define X86_TF_RIP(tf) tf->tf_rip |
| 325 | #define X86_TF_RFLAGS(tf) tf->tf_rflags |
| 326 | #else |
| 327 | #define X86_TF_RAX(tf) tf->tf_eax |
| 328 | #define X86_TF_RDX(tf) tf->tf_edx |
| 329 | #define X86_TF_RSP(tf) tf->tf_esp |
| 330 | #define X86_TF_RIP(tf) tf->tf_eip |
| 331 | #define X86_TF_RFLAGS(tf) tf->tf_eflags |
| 332 | #endif |
| 333 | |
| 334 | /* |
| 335 | * Processor flag notes: The "primary" CPU has certain MI-defined |
| 336 | * roles (mostly relating to hardclock handling); we distinguish |
| 337 | * between the processor which booted us, and the processor currently |
| 338 | * holding the "primary" role just to give us the flexibility later to |
| 339 | * change primaries should we be sufficiently twisted. |
| 340 | */ |
| 341 | |
| 342 | #define CPUF_BSP 0x0001 /* CPU is the original BSP */ |
| 343 | #define CPUF_AP 0x0002 /* CPU is an AP */ |
| 344 | #define CPUF_SP 0x0004 /* CPU is only processor */ |
| 345 | #define CPUF_PRIMARY 0x0008 /* CPU is active primary processor */ |
| 346 | |
| 347 | #define CPUF_SYNCTSC 0x0800 /* Synchronize TSC */ |
| 348 | #define CPUF_PRESENT 0x1000 /* CPU is present */ |
| 349 | #define CPUF_RUNNING 0x2000 /* CPU is running */ |
| 350 | #define CPUF_PAUSE 0x4000 /* CPU is paused in DDB */ |
| 351 | #define CPUF_GO 0x8000 /* CPU should start running */ |
| 352 | |
| 353 | #endif /* _KERNEL || __KMEMUSER */ |
| 354 | |
| 355 | #ifdef _KERNEL |
| 356 | /* |
| 357 | * We statically allocate the CPU info for the primary CPU (or, |
| 358 | * the only CPU on uniprocessors), and the primary CPU is the |
| 359 | * first CPU on the CPU info list. |
| 360 | */ |
| 361 | extern struct cpu_info cpu_info_primary; |
| 362 | extern struct cpu_info *cpu_info_list; |
| 363 | |
| 364 | #define CPU_INFO_ITERATOR int __unused |
| 365 | #define CPU_INFO_FOREACH(cii, ci) ci = cpu_info_list; \ |
| 366 | ci != NULL; ci = ci->ci_next |
| 367 | |
| 368 | #define CPU_STARTUP(_ci, _target) ((_ci)->ci_func->start(_ci, _target)) |
| 369 | #define CPU_STOP(_ci) ((_ci)->ci_func->stop(_ci)) |
| 370 | #define CPU_START_CLEANUP(_ci) ((_ci)->ci_func->cleanup(_ci)) |
| 371 | |
| 372 | #if !defined(__GNUC__) || defined(_MODULE) |
| 373 | /* For non-GCC and modules */ |
| 374 | struct cpu_info *x86_curcpu(void); |
| 375 | void cpu_set_curpri(int); |
| 376 | # ifdef __GNUC__ |
| 377 | lwp_t *x86_curlwp(void) __attribute__ ((const)); |
| 378 | # else |
| 379 | lwp_t *x86_curlwp(void); |
| 380 | # endif |
| 381 | #endif |
| 382 | |
| 383 | #define cpu_number() (cpu_index(curcpu())) |
| 384 | |
| 385 | #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) |
| 386 | |
| 387 | #define X86_AST_GENERIC 0x01 |
| 388 | #define X86_AST_PREEMPT 0x02 |
| 389 | |
| 390 | #define aston(l, why) ((l)->l_md.md_astpending |= (why)) |
| 391 | #define cpu_did_resched(l) ((l)->l_md.md_astpending &= ~X86_AST_PREEMPT) |
| 392 | |
| 393 | void cpu_boot_secondary_processors(void); |
| 394 | void cpu_init_idle_lwps(void); |
| 395 | void cpu_init_msrs(struct cpu_info *, bool); |
| 396 | void cpu_load_pmap(struct pmap *, struct pmap *); |
| 397 | void cpu_broadcast_halt(void); |
| 398 | void cpu_kick(struct cpu_info *); |
| 399 | |
| 400 | void cpu_pcpuarea_init(struct cpu_info *); |
| 401 | void cpu_svs_init(struct cpu_info *); |
| 402 | void cpu_speculation_init(struct cpu_info *); |
| 403 | |
| 404 | #define curcpu() x86_curcpu() |
| 405 | #define curlwp x86_curlwp() |
| 406 | #define curpcb ((struct pcb *)lwp_getpcb(curlwp)) |
| 407 | |
| 408 | /* |
| 409 | * Give a profiling tick to the current process when the user profiling |
| 410 | * buffer pages are invalid. On the i386, request an ast to send us |
| 411 | * through trap(), marking the proc as needing a profiling tick. |
| 412 | */ |
| 413 | extern void cpu_need_proftick(struct lwp *l); |
| 414 | |
| 415 | /* |
| 416 | * Notify the LWP l that it has a signal pending, process as soon as |
| 417 | * possible. |
| 418 | */ |
| 419 | extern void cpu_signotify(struct lwp *); |
| 420 | |
| 421 | /* |
| 422 | * We need a machine-independent name for this. |
| 423 | */ |
| 424 | extern void (*delay_func)(unsigned int); |
| 425 | struct timeval; |
| 426 | |
| 427 | #ifndef __HIDE_DELAY |
| 428 | #define DELAY(x) (*delay_func)(x) |
| 429 | #define delay(x) (*delay_func)(x) |
| 430 | #endif |
| 431 | |
| 432 | extern int biosbasemem; |
| 433 | extern int biosextmem; |
| 434 | extern int cputype; |
| 435 | extern int cpuid_level; |
| 436 | extern int cpu_class; |
| 437 | extern char cpu_brand_string[]; |
| 438 | extern int use_pae; |
| 439 | |
| 440 | #ifdef __i386__ |
| 441 | #define i386_fpu_present 1 |
| 442 | int npx586bug1(int, int); |
| 443 | extern int i386_fpu_fdivbug; |
| 444 | extern int i386_use_fxsave; |
| 445 | extern int i386_has_sse; |
| 446 | extern int i386_has_sse2; |
| 447 | #else |
| 448 | #define i386_fpu_present 1 |
| 449 | #define i386_fpu_fdivbug 0 |
| 450 | #define i386_use_fxsave 1 |
| 451 | #define i386_has_sse 1 |
| 452 | #define i386_has_sse2 1 |
| 453 | #endif |
| 454 | |
| 455 | extern int x86_fpu_save; |
| 456 | #define FPU_SAVE_FSAVE 0 |
| 457 | #define FPU_SAVE_FXSAVE 1 |
| 458 | #define FPU_SAVE_XSAVE 2 |
| 459 | #define FPU_SAVE_XSAVEOPT 3 |
| 460 | extern unsigned int x86_fpu_save_size; |
| 461 | extern uint64_t x86_xsave_features; |
| 462 | extern size_t x86_xsave_offsets[]; |
| 463 | extern size_t x86_xsave_sizes[]; |
| 464 | extern uint32_t x86_fpu_mxcsr_mask; |
| 465 | extern bool x86_fpu_eager; |
| 466 | |
| 467 | extern void (*x86_cpu_idle)(void); |
| 468 | #define cpu_idle() (*x86_cpu_idle)() |
| 469 | |
| 470 | /* machdep.c */ |
| 471 | #ifdef i386 |
| 472 | void cpu_set_tss_gates(struct cpu_info *); |
| 473 | #endif |
| 474 | void cpu_reset(void); |
| 475 | |
| 476 | /* longrun.c */ |
| 477 | u_int tmx86_get_longrun_mode(void); |
| 478 | void tmx86_get_longrun_status(u_int *, u_int *, u_int *); |
| 479 | void tmx86_init_longrun(void); |
| 480 | |
| 481 | /* identcpu.c */ |
| 482 | void cpu_probe(struct cpu_info *); |
| 483 | void cpu_identify(struct cpu_info *); |
| 484 | void identify_hypervisor(void); |
| 485 | |
| 486 | typedef enum vm_guest { |
| 487 | VM_GUEST_NO = 0, |
| 488 | VM_GUEST_VM, |
| 489 | VM_GUEST_XEN, |
| 490 | VM_GUEST_XENPVHVM, |
| 491 | VM_GUEST_HV, |
| 492 | VM_GUEST_VMWARE, |
| 493 | VM_GUEST_KVM, |
| 494 | VM_LAST |
| 495 | } vm_guest_t; |
| 496 | extern vm_guest_t vm_guest; |
| 497 | |
| 498 | /* cpu_topology.c */ |
| 499 | void x86_cpu_topology(struct cpu_info *); |
| 500 | |
| 501 | /* locore.s */ |
| 502 | struct region_descriptor; |
| 503 | void lgdt(struct region_descriptor *); |
| 504 | #ifdef XENPV |
| 505 | void lgdt_finish(void); |
| 506 | #endif |
| 507 | |
| 508 | struct pcb; |
| 509 | void savectx(struct pcb *); |
| 510 | void lwp_trampoline(void); |
| 511 | #ifdef XEN |
| 512 | void xen_startrtclock(void); |
| 513 | void xen_delay(unsigned int); |
| 514 | void xen_initclocks(void); |
| 515 | void xen_suspendclocks(struct cpu_info *); |
| 516 | void xen_resumeclocks(struct cpu_info *); |
| 517 | #endif /* XEN */ |
| 518 | /* clock.c */ |
| 519 | void initrtclock(u_long); |
| 520 | void startrtclock(void); |
| 521 | void i8254_delay(unsigned int); |
| 522 | void i8254_microtime(struct timeval *); |
| 523 | void i8254_initclocks(void); |
| 524 | unsigned int gettick(void); |
| 525 | extern void (*x86_delay)(unsigned int); |
| 526 | |
| 527 | /* cpu.c */ |
| 528 | void cpu_probe_features(struct cpu_info *); |
| 529 | |
| 530 | /* vm_machdep.c */ |
| 531 | void cpu_proc_fork(struct proc *, struct proc *); |
| 532 | paddr_t kvtop(void *); |
| 533 | |
| 534 | #ifdef USER_LDT |
| 535 | /* sys_machdep.h */ |
| 536 | int x86_get_ldt(struct lwp *, void *, register_t *); |
| 537 | int x86_set_ldt(struct lwp *, void *, register_t *); |
| 538 | #endif |
| 539 | |
| 540 | /* isa_machdep.c */ |
| 541 | void isa_defaultirq(void); |
| 542 | int isa_nmi(void); |
| 543 | |
| 544 | /* consinit.c */ |
| 545 | void kgdb_port_init(void); |
| 546 | |
| 547 | /* bus_machdep.c */ |
| 548 | void x86_bus_space_init(void); |
| 549 | void x86_bus_space_mallocok(void); |
| 550 | |
| 551 | #endif /* _KERNEL */ |
| 552 | |
| 553 | #if defined(_KERNEL) || defined(_KMEMUSER) |
| 554 | #include <machine/psl.h> /* Must be after struct cpu_info declaration */ |
| 555 | #endif /* _KERNEL || __KMEMUSER */ |
| 556 | |
| 557 | /* |
| 558 | * CTL_MACHDEP definitions. |
| 559 | */ |
| 560 | #define CPU_CONSDEV 1 /* dev_t: console terminal device */ |
| 561 | #define CPU_BIOSBASEMEM 2 /* int: bios-reported base mem (K) */ |
| 562 | #define CPU_BIOSEXTMEM 3 /* int: bios-reported ext. mem (K) */ |
| 563 | /* CPU_NKPDE 4 obsolete: int: number of kernel PDEs */ |
| 564 | #define CPU_BOOTED_KERNEL 5 /* string: booted kernel name */ |
| 565 | #define CPU_DISKINFO 6 /* struct disklist *: |
| 566 | * disk geometry information */ |
| 567 | #define CPU_FPU_PRESENT 7 /* int: FPU is present */ |
| 568 | #define CPU_OSFXSR 8 /* int: OS uses FXSAVE/FXRSTOR */ |
| 569 | #define CPU_SSE 9 /* int: OS/CPU supports SSE */ |
| 570 | #define CPU_SSE2 10 /* int: OS/CPU supports SSE2 */ |
| 571 | #define CPU_TMLR_MODE 11 /* int: longrun mode |
| 572 | * 0: minimum frequency |
| 573 | * 1: economy |
| 574 | * 2: performance |
| 575 | * 3: maximum frequency |
| 576 | */ |
| 577 | #define CPU_TMLR_FREQUENCY 12 /* int: current frequency */ |
| 578 | #define CPU_TMLR_VOLTAGE 13 /* int: current voltage */ |
| 579 | #define CPU_TMLR_PERCENTAGE 14 /* int: current clock percentage */ |
| 580 | #define CPU_FPU_SAVE 15 /* int: FPU Instructions layout |
| 581 | * to use this, CPU_OSFXSR must be true |
| 582 | * 0: FSAVE |
| 583 | * 1: FXSAVE |
| 584 | * 2: XSAVE |
| 585 | * 3: XSAVEOPT |
| 586 | */ |
| 587 | #define CPU_FPU_SAVE_SIZE 16 /* int: FPU Instruction layout size */ |
| 588 | #define CPU_XSAVE_FEATURES 17 /* quad: XSAVE features */ |
| 589 | |
| 590 | /* |
| 591 | * Structure for CPU_DISKINFO sysctl call. |
| 592 | * XXX this should be somewhere else. |
| 593 | */ |
| 594 | #define MAX_BIOSDISKS 16 |
| 595 | |
| 596 | struct disklist { |
| 597 | int dl_nbiosdisks; /* number of bios disks */ |
| 598 | int dl_unused; |
| 599 | struct biosdisk_info { |
| 600 | int bi_dev; /* BIOS device # (0x80 ..) */ |
| 601 | int bi_cyl; /* cylinders on disk */ |
| 602 | int bi_head; /* heads per track */ |
| 603 | int bi_sec; /* sectors per track */ |
| 604 | uint64_t bi_lbasecs; /* total sec. (iff ext13) */ |
| 605 | #define BIFLAG_INVALID 0x01 |
| 606 | #define BIFLAG_EXTINT13 0x02 |
| 607 | int bi_flags; |
| 608 | int bi_unused; |
| 609 | } dl_biosdisks[MAX_BIOSDISKS]; |
| 610 | |
| 611 | int dl_nnativedisks; /* number of native disks */ |
| 612 | struct nativedisk_info { |
| 613 | char ni_devname[16]; /* native device name */ |
| 614 | int ni_nmatches; /* # of matches w/ BIOS */ |
| 615 | int ni_biosmatches[MAX_BIOSDISKS]; /* indices in dl_biosdisks */ |
| 616 | } dl_nativedisks[1]; /* actually longer */ |
| 617 | }; |
| 618 | #endif /* !_X86_CPU_H_ */ |
| 619 | |