1 | /* $NetBSD: lwp.h,v 1.186 2019/06/19 21:39:53 kamil Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010 |
5 | * The NetBSD Foundation, Inc. |
6 | * All rights reserved. |
7 | * |
8 | * This code is derived from software contributed to The NetBSD Foundation |
9 | * by Nathan J. Williams and Andrew Doran. |
10 | * |
11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions |
13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. |
19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ |
32 | |
33 | #ifndef _SYS_LWP_H_ |
34 | #define _SYS_LWP_H_ |
35 | |
36 | #if defined(_KERNEL) || defined(_KMEMUSER) |
37 | |
38 | #include <sys/param.h> |
39 | #include <sys/time.h> |
40 | #include <sys/queue.h> |
41 | #include <sys/callout.h> |
42 | #include <sys/kcpuset.h> |
43 | #include <sys/mutex.h> |
44 | #include <sys/condvar.h> |
45 | #include <sys/signalvar.h> |
46 | #include <sys/sched.h> |
47 | #include <sys/specificdata.h> |
48 | #include <sys/syncobj.h> |
49 | #include <sys/resource.h> |
50 | |
51 | #if defined(_KERNEL) |
52 | struct lwp; |
53 | /* forward declare this for <machine/cpu.h> so it can get l_cpu. */ |
54 | static __inline struct cpu_info *lwp_getcpu(struct lwp *); |
55 | #include <machine/cpu.h> /* curcpu() and cpu_info */ |
56 | #endif |
57 | |
58 | #include <machine/proc.h> /* Machine-dependent proc substruct. */ |
59 | |
60 | /* |
61 | * Lightweight process. Field markings and the corresponding locks: |
62 | * |
63 | * a: proc_lock |
64 | * c: condition variable interlock, passed to cv_wait() |
65 | * l: *l_mutex |
66 | * p: l_proc->p_lock |
67 | * s: spc_mutex, which may or may not be referenced by l_mutex |
68 | * S: l_selcluster->sc_lock |
69 | * (: unlocked, stable |
70 | * !: unlocked, may only be reliably accessed by the LWP itself |
71 | * |
72 | * Fields are clustered together by usage (to increase the likelihood |
73 | * of cache hits) and by size (to reduce dead space in the structure). |
74 | */ |
75 | |
76 | #include <sys/pcu.h> |
77 | |
78 | struct lockdebug; |
79 | struct sysent; |
80 | |
81 | struct lwp { |
82 | /* Scheduling and overall state. */ |
83 | TAILQ_ENTRY(lwp) l_runq; /* s: run queue */ |
84 | union { |
85 | void * info; /* s: scheduler-specific structure */ |
86 | u_int timeslice; /* l: time-quantum for SCHED_M2 */ |
87 | } l_sched; |
88 | struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */ |
89 | kmutex_t * volatile l_mutex; /* l: ptr to mutex on sched state */ |
90 | int l_ctxswtch; /* l: performing a context switch */ |
91 | void *l_addr; /* l: PCB address; use lwp_getpcb() */ |
92 | struct mdlwp l_md; /* l: machine-dependent fields. */ |
93 | int l_flag; /* l: misc flag values */ |
94 | int l_stat; /* l: overall LWP status */ |
95 | struct bintime l_rtime; /* l: real time */ |
96 | struct bintime l_stime; /* l: start time (while ONPROC) */ |
97 | u_int l_swtime; /* l: time swapped in or out */ |
98 | u_int l_rticks; /* l: Saved start time of run */ |
99 | u_int l_rticksum; /* l: Sum of ticks spent running */ |
100 | u_int l_slpticks; /* l: Saved start time of sleep */ |
101 | u_int l_slpticksum; /* l: Sum of ticks spent sleeping */ |
102 | int l_biglocks; /* l: biglock count before sleep */ |
103 | int l_class; /* l: scheduling class */ |
104 | int l_kpriority; /* !: has kernel priority boost */ |
105 | pri_t l_kpribase; /* !: kernel priority base level */ |
106 | pri_t l_priority; /* l: scheduler priority */ |
107 | pri_t l_inheritedprio;/* l: inherited priority */ |
108 | pri_t l_protectprio; /* l: for PTHREAD_PRIO_PROTECT */ |
109 | pri_t l_auxprio; /* l: max(inherit,protect) priority */ |
110 | int l_protectdepth; /* l: for PTHREAD_PRIO_PROTECT */ |
111 | SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */ |
112 | uint64_t l_ncsw; /* l: total context switches */ |
113 | uint64_t l_nivcsw; /* l: involuntary context switches */ |
114 | u_int l_cpticks; /* (: Ticks of CPU time */ |
115 | fixpt_t l_pctcpu; /* p: %cpu during l_swtime */ |
116 | fixpt_t l_estcpu; /* l: cpu time for SCHED_4BSD */ |
117 | psetid_t l_psid; /* l: assigned processor-set ID */ |
118 | struct cpu_info *l_target_cpu; /* l: target CPU to migrate */ |
119 | struct lwpctl *l_lwpctl; /* p: lwpctl block kernel address */ |
120 | struct lcpage *l_lcpage; /* p: lwpctl containing page */ |
121 | kcpuset_t *l_affinity; /* l: CPU set for affinity */ |
122 | |
123 | /* Synchronisation. */ |
124 | struct turnstile *l_ts; /* l: current turnstile */ |
125 | struct syncobj *l_syncobj; /* l: sync object operations set */ |
126 | TAILQ_ENTRY(lwp) l_sleepchain; /* l: sleep queue */ |
127 | wchan_t l_wchan; /* l: sleep address */ |
128 | const char *l_wmesg; /* l: reason for sleep */ |
129 | struct sleepq *l_sleepq; /* l: current sleep queue */ |
130 | int l_sleeperr; /* !: error before unblock */ |
131 | u_int l_slptime; /* l: time since last blocked */ |
132 | callout_t l_timeout_ch; /* !: callout for tsleep */ |
133 | u_int l_emap_gen; /* !: emap generation number */ |
134 | kcondvar_t l_waitcv; /* a: vfork() wait */ |
135 | bool l_vforkwaiting; /* a: vfork() waiting */ |
136 | |
137 | #if PCU_UNIT_COUNT > 0 |
138 | struct cpu_info * volatile l_pcu_cpu[PCU_UNIT_COUNT]; |
139 | uint32_t l_pcu_valid; |
140 | #endif |
141 | |
142 | /* Process level and global state, misc. */ |
143 | LIST_ENTRY(lwp) l_list; /* a: entry on list of all LWPs */ |
144 | void *l_ctxlink; /* p: uc_link {get,set}context */ |
145 | struct proc *l_proc; /* p: parent process */ |
146 | LIST_ENTRY(lwp) l_sibling; /* p: entry on proc's list of LWPs */ |
147 | lwpid_t l_waiter; /* p: first LWP waiting on us */ |
148 | lwpid_t l_waitingfor; /* p: specific LWP we are waiting on */ |
149 | int l_prflag; /* p: process level flags */ |
150 | u_int l_refcnt; /* p: reference count on this LWP */ |
151 | lwpid_t l_lid; /* (: LWP identifier; local to proc */ |
152 | char *l_name; /* (: name, optional */ |
153 | |
154 | /* State of select() or poll(). */ |
155 | int l_selflag; /* S: polling state flags */ |
156 | SLIST_HEAD(,selinfo) l_selwait; /* S: descriptors waited on */ |
157 | int l_selret; /* S: return value of select/poll */ |
158 | uintptr_t l_selrec; /* !: argument for selrecord() */ |
159 | struct selcluster *l_selcluster;/* !: associated cluster data */ |
160 | void * l_selbits; /* (: select() bit-field */ |
161 | size_t l_selni; /* (: size of a single bit-field */ |
162 | |
163 | /* Signals. */ |
164 | int l_sigrestore; /* p: need to restore old sig mask */ |
165 | sigset_t l_sigwaitset; /* p: signals being waited for */ |
166 | kcondvar_t l_sigcv; /* p: for sigsuspend() */ |
167 | struct ksiginfo *l_sigwaited; /* p: delivered signals from set */ |
168 | sigpend_t *l_sigpendset; /* p: XXX issignal()/postsig() baton */ |
169 | LIST_ENTRY(lwp) l_sigwaiter; /* p: chain on list of waiting LWPs */ |
170 | stack_t l_sigstk; /* p: sp & on stack state variable */ |
171 | sigset_t l_sigmask; /* p: signal mask */ |
172 | sigpend_t l_sigpend; /* p: signals to this LWP */ |
173 | sigset_t l_sigoldmask; /* p: mask for sigpause */ |
174 | |
175 | /* Private data. */ |
176 | specificdata_reference |
177 | l_specdataref; /* !: subsystem lwp-specific data */ |
178 | struct timespec l_ktrcsw; /* !: for ktrace CSW trace XXX */ |
179 | void *l_private; /* !: svr4-style lwp-private data */ |
180 | struct lwp *l_switchto; /* !: mi_switch: switch to this LWP */ |
181 | struct kauth_cred *l_cred; /* !: cached credentials */ |
182 | struct filedesc *l_fd; /* !: cached copy of proc::p_fd */ |
183 | void *l_emuldata; /* !: kernel lwp-private data */ |
184 | struct fstrans_lwp_info *l_fstrans; /* (: fstrans private data */ |
185 | u_int l_cv_signalled; /* c: restarted by cv_signal() */ |
186 | u_short l_shlocks; /* !: lockdebug: shared locks held */ |
187 | u_short l_exlocks; /* !: lockdebug: excl. locks held */ |
188 | u_short l_psrefs; /* !: count of psref held */ |
189 | u_short l_blcnt; /* !: count of kernel_lock held */ |
190 | int l_nopreempt; /* !: don't preempt me! */ |
191 | u_int l_dopreempt; /* s: kernel preemption pending */ |
192 | int l_pflag; /* !: LWP private flags */ |
193 | int l_dupfd; /* !: side return from cloning devs XXX */ |
194 | const struct sysent * volatile l_sysent;/* !: currently active syscall */ |
195 | struct rusage l_ru; /* !: accounting information */ |
196 | uint64_t l_pfailtime; /* !: for kernel preemption */ |
197 | uintptr_t l_pfailaddr; /* !: for kernel preemption */ |
198 | uintptr_t l_pfaillock; /* !: for kernel preemption */ |
199 | _TAILQ_HEAD(,struct lockdebug,volatile) l_ld_locks;/* !: locks held by LWP */ |
200 | int l_tcgen; /* !: for timecounter removal */ |
201 | |
202 | /* These are only used by 'options SYSCALL_TIMES'. */ |
203 | uint32_t l_syscall_time; /* !: time epoch for current syscall */ |
204 | uint64_t *l_syscall_counter; /* !: counter for current process */ |
205 | |
206 | struct kdtrace_thread *l_dtrace; /* (: DTrace-specific data. */ |
207 | }; |
208 | |
209 | /* |
210 | * UAREA_PCB_OFFSET: an offset of PCB structure in the uarea. MD code may |
211 | * define it in <machine/proc.h>, to indicate a different uarea layout. |
212 | */ |
213 | #ifndef UAREA_PCB_OFFSET |
214 | #define UAREA_PCB_OFFSET 0 |
215 | #endif |
216 | |
217 | LIST_HEAD(lwplist, lwp); /* A list of LWPs. */ |
218 | |
219 | #ifdef _KERNEL |
220 | extern struct lwplist alllwp; /* List of all LWPs. */ |
221 | extern lwp_t lwp0; /* LWP for proc0. */ |
222 | extern int maxlwp __read_mostly; /* max number of lwps */ |
223 | #ifndef MAXLWP |
224 | #define MAXLWP 2048 |
225 | #endif |
226 | #ifndef __HAVE_CPU_MAXLWP |
227 | #define cpu_maxlwp() MAXLWP |
228 | #endif |
229 | #endif |
230 | |
231 | #endif /* _KERNEL || _KMEMUSER */ |
232 | |
233 | /* These flags are kept in l_flag. */ |
234 | #define LW_IDLE 0x00000001 /* Idle lwp. */ |
235 | #define LW_LWPCTL 0x00000002 /* Adjust lwpctl in userret */ |
236 | #define LW_CVLOCKDEBUG 0x00000004 /* Waker does lockdebug */ |
237 | #define LW_SINTR 0x00000080 /* Sleep is interruptible. */ |
238 | #define LW_SYSTEM 0x00000200 /* Kernel thread */ |
239 | #define LW_WSUSPEND 0x00020000 /* Suspend before return to user */ |
240 | #define LW_BATCH 0x00040000 /* LWP tends to hog CPU */ |
241 | #define LW_WCORE 0x00080000 /* Stop for core dump on return to user */ |
242 | #define LW_WEXIT 0x00100000 /* Exit before return to user */ |
243 | #define LW_PENDSIG 0x01000000 /* Pending signal for us */ |
244 | #define LW_CANCELLED 0x02000000 /* tsleep should not sleep */ |
245 | #define LW_WREBOOT 0x08000000 /* System is rebooting, please suspend */ |
246 | #define LW_UNPARKED 0x10000000 /* Unpark op pending */ |
247 | #define LW_RUMP_CLEAR 0x40000000 /* Clear curlwp in RUMP scheduler */ |
248 | #define LW_RUMP_QEXIT 0x80000000 /* LWP should exit ASAP */ |
249 | |
250 | /* The second set of flags is kept in l_pflag. */ |
251 | #define LP_KTRACTIVE 0x00000001 /* Executing ktrace operation */ |
252 | #define LP_KTRCSW 0x00000002 /* ktrace context switch marker */ |
253 | #define LP_KTRCSWUSER 0x00000004 /* ktrace context switch marker */ |
254 | #define LP_PIDLID 0x00000008 /* free LID from PID space on exit */ |
255 | #define LP_OWEUPC 0x00000010 /* Owe user profiling tick */ |
256 | #define LP_MPSAFE 0x00000020 /* Starts life without kernel_lock */ |
257 | #define LP_INTR 0x00000040 /* Soft interrupt handler */ |
258 | #define LP_SYSCTLWRITE 0x00000080 /* sysctl write lock held */ |
259 | #define LP_MUSTJOIN 0x00000100 /* Must join kthread on exit */ |
260 | #define LP_SINGLESTEP 0x00000400 /* Single step thread in ptrace(2) */ |
261 | #define LP_TIMEINTR 0x00010000 /* Time this soft interrupt */ |
262 | #define LP_PREEMPTING 0x00020000 /* mi_switch called involuntarily */ |
263 | #define LP_RUNNING 0x20000000 /* Active on a CPU */ |
264 | #define LP_BOUND 0x80000000 /* Bound to a CPU */ |
265 | |
266 | /* The third set is kept in l_prflag. */ |
267 | #define LPR_DETACHED 0x00800000 /* Won't be waited for. */ |
268 | #define LPR_CRMOD 0x00000100 /* Credentials modified */ |
269 | |
270 | /* |
271 | * Mask indicating that there is "exceptional" work to be done on return to |
272 | * user. |
273 | */ |
274 | #define LW_USERRET \ |
275 | (LW_WEXIT | LW_PENDSIG | LW_WREBOOT | LW_WSUSPEND | LW_WCORE | LW_LWPCTL) |
276 | |
277 | /* |
278 | * Status values. |
279 | * |
280 | * A note about LSRUN and LSONPROC: LSRUN indicates that a process is |
281 | * runnable but *not* yet running, i.e. is on a run queue. LSONPROC |
282 | * indicates that the process is actually executing on a CPU, i.e. |
283 | * it is no longer on a run queue. |
284 | * |
285 | * These values are set in stone and must not be reused with future changes. |
286 | */ |
287 | #define LSIDL 1 /* Process being created by fork. */ |
288 | #define LSRUN 2 /* Currently runnable. */ |
289 | #define LSSLEEP 3 /* Sleeping on an address. */ |
290 | #define LSSTOP 4 /* Process debugging or suspension. */ |
291 | #define LSZOMB 5 /* Awaiting collection by parent. */ |
292 | /* define LSDEAD 6 Process is almost a zombie. (removed in 5.0) */ |
293 | #define LSONPROC 7 /* Process is currently on a CPU. */ |
294 | #define LSSUSPENDED 8 /* Not running, not signalable. */ |
295 | |
296 | #if defined(_KERNEL) || defined(_KMEMUSER) |
297 | static __inline void * |
298 | lwp_getpcb(struct lwp *l) |
299 | { |
300 | |
301 | return l->l_addr; |
302 | } |
303 | #endif /* _KERNEL || _KMEMUSER */ |
304 | |
305 | #ifdef _KERNEL |
306 | #define LWP_CACHE_CREDS(l, p) \ |
307 | do { \ |
308 | (void)p; \ |
309 | if (__predict_false((l)->l_prflag & LPR_CRMOD)) \ |
310 | lwp_update_creds(l); \ |
311 | } while (/* CONSTCOND */ 0) |
312 | |
313 | void lwpinit(void); |
314 | void lwp0_init(void); |
315 | void lwp_sys_init(void); |
316 | |
317 | void lwp_startup(lwp_t *, lwp_t *); |
318 | void startlwp(void *); |
319 | |
320 | int lwp_locked(lwp_t *, kmutex_t *); |
321 | void lwp_setlock(lwp_t *, kmutex_t *); |
322 | void lwp_unlock_to(lwp_t *, kmutex_t *); |
323 | int lwp_trylock(lwp_t *); |
324 | void lwp_addref(lwp_t *); |
325 | void lwp_delref(lwp_t *); |
326 | void lwp_delref2(lwp_t *); |
327 | void lwp_drainrefs(lwp_t *); |
328 | bool lwp_alive(lwp_t *); |
329 | lwp_t *lwp_find_first(proc_t *); |
330 | |
331 | int lwp_wait(lwp_t *, lwpid_t, lwpid_t *, bool); |
332 | void lwp_continue(lwp_t *); |
333 | void lwp_unsleep(lwp_t *, bool); |
334 | void lwp_unstop(lwp_t *); |
335 | void lwp_exit(lwp_t *); |
336 | void lwp_exit_switchaway(lwp_t *) __dead; |
337 | int lwp_suspend(lwp_t *, lwp_t *); |
338 | int lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *); |
339 | void lwp_update_creds(lwp_t *); |
340 | void lwp_migrate(lwp_t *, struct cpu_info *); |
341 | lwp_t * lwp_find2(pid_t, lwpid_t); |
342 | lwp_t * lwp_find(proc_t *, int); |
343 | void lwp_userret(lwp_t *); |
344 | void lwp_need_userret(lwp_t *); |
345 | void lwp_free(lwp_t *, bool, bool); |
346 | uint64_t lwp_pctr(void); |
347 | int lwp_setprivate(lwp_t *, void *); |
348 | int do_lwp_create(lwp_t *, void *, u_long, lwpid_t *, const sigset_t *, |
349 | const stack_t *); |
350 | |
351 | void lwpinit_specificdata(void); |
352 | int lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t); |
353 | void lwp_specific_key_delete(specificdata_key_t); |
354 | void lwp_initspecific(lwp_t *); |
355 | void lwp_finispecific(lwp_t *); |
356 | void *lwp_getspecific(specificdata_key_t); |
357 | #if defined(_LWP_API_PRIVATE) |
358 | void *_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t); |
359 | #endif |
360 | void lwp_setspecific(specificdata_key_t, void *); |
361 | void lwp_setspecific_by_lwp(lwp_t *, specificdata_key_t, void *); |
362 | |
363 | /* Syscalls. */ |
364 | int lwp_park(clockid_t, int, struct timespec *, const void *); |
365 | int lwp_unpark(lwpid_t, const void *); |
366 | |
367 | /* DDB. */ |
368 | void lwp_whatis(uintptr_t, void (*)(const char *, ...) __printflike(1, 2)); |
369 | |
370 | /* |
371 | * Lock an LWP. XXX _MODULE |
372 | */ |
373 | static __inline void |
374 | lwp_lock(lwp_t *l) |
375 | { |
376 | kmutex_t *old = l->l_mutex; |
377 | |
378 | /* |
379 | * Note: mutex_spin_enter() will have posted a read barrier. |
380 | * Re-test l->l_mutex. If it has changed, we need to try again. |
381 | */ |
382 | mutex_spin_enter(old); |
383 | while (__predict_false(l->l_mutex != old)) { |
384 | mutex_spin_exit(old); |
385 | old = l->l_mutex; |
386 | mutex_spin_enter(old); |
387 | } |
388 | } |
389 | |
390 | /* |
391 | * Unlock an LWP. XXX _MODULE |
392 | */ |
393 | static __inline void |
394 | lwp_unlock(lwp_t *l) |
395 | { |
396 | mutex_spin_exit(l->l_mutex); |
397 | } |
398 | |
399 | static __inline void |
400 | lwp_changepri(lwp_t *l, pri_t pri) |
401 | { |
402 | KASSERT(mutex_owned(l->l_mutex)); |
403 | |
404 | if (l->l_priority == pri) |
405 | return; |
406 | |
407 | (*l->l_syncobj->sobj_changepri)(l, pri); |
408 | KASSERT(l->l_priority == pri); |
409 | } |
410 | |
411 | static __inline void |
412 | lwp_lendpri(lwp_t *l, pri_t pri) |
413 | { |
414 | KASSERT(mutex_owned(l->l_mutex)); |
415 | |
416 | (*l->l_syncobj->sobj_lendpri)(l, pri); |
417 | KASSERT(l->l_inheritedprio == pri); |
418 | } |
419 | |
420 | static __inline pri_t |
421 | lwp_eprio(lwp_t *l) |
422 | { |
423 | pri_t pri; |
424 | |
425 | pri = l->l_priority; |
426 | if ((l->l_flag & LW_SYSTEM) == 0 && l->l_kpriority && pri < PRI_KERNEL) |
427 | pri = (pri >> 1) + l->l_kpribase; |
428 | return MAX(l->l_auxprio, pri); |
429 | } |
430 | |
431 | int lwp_create(lwp_t *, struct proc *, vaddr_t, int, void *, size_t, |
432 | void (*)(void *), void *, lwp_t **, int, const sigset_t *, const stack_t *); |
433 | |
434 | /* |
435 | * XXX _MODULE |
436 | * We should provide real stubs for the below that modules can use. |
437 | */ |
438 | |
439 | static __inline void |
440 | spc_lock(struct cpu_info *ci) |
441 | { |
442 | mutex_spin_enter(ci->ci_schedstate.spc_mutex); |
443 | } |
444 | |
445 | static __inline void |
446 | spc_unlock(struct cpu_info *ci) |
447 | { |
448 | mutex_spin_exit(ci->ci_schedstate.spc_mutex); |
449 | } |
450 | |
451 | static __inline void |
452 | spc_dlock(struct cpu_info *ci1, struct cpu_info *ci2) |
453 | { |
454 | struct schedstate_percpu *spc1 = &ci1->ci_schedstate; |
455 | struct schedstate_percpu *spc2 = &ci2->ci_schedstate; |
456 | |
457 | KASSERT(ci1 != ci2); |
458 | if (ci1 < ci2) { |
459 | mutex_spin_enter(spc1->spc_mutex); |
460 | mutex_spin_enter(spc2->spc_mutex); |
461 | } else { |
462 | mutex_spin_enter(spc2->spc_mutex); |
463 | mutex_spin_enter(spc1->spc_mutex); |
464 | } |
465 | } |
466 | |
467 | /* |
468 | * Allow machine-dependent code to override curlwp in <machine/cpu.h> for |
469 | * its own convenience. Otherwise, we declare it as appropriate. |
470 | */ |
471 | #if !defined(curlwp) |
472 | #if defined(MULTIPROCESSOR) |
473 | #define curlwp curcpu()->ci_curlwp /* Current running LWP */ |
474 | #else |
475 | extern struct lwp *curlwp; /* Current running LWP */ |
476 | #endif /* MULTIPROCESSOR */ |
477 | #endif /* ! curlwp */ |
478 | #define curproc (curlwp->l_proc) |
479 | |
480 | /* |
481 | * This provide a way for <machine/cpu.h> to get l_cpu for curlwp before |
482 | * struct lwp is defined. |
483 | */ |
484 | static __inline struct cpu_info * |
485 | lwp_getcpu(struct lwp *l) |
486 | { |
487 | return l->l_cpu; |
488 | } |
489 | |
490 | static __inline bool |
491 | CURCPU_IDLE_P(void) |
492 | { |
493 | struct cpu_info *ci = curcpu(); |
494 | return ci->ci_data.cpu_onproc == ci->ci_data.cpu_idlelwp; |
495 | } |
496 | |
497 | /* |
498 | * Disable and re-enable preemption. Only for low-level kernel |
499 | * use. Device drivers and anything that could potentially be |
500 | * compiled as a module should use kpreempt_disable() and |
501 | * kpreempt_enable(). |
502 | */ |
503 | static __inline void |
504 | KPREEMPT_DISABLE(lwp_t *l) |
505 | { |
506 | |
507 | KASSERT(l == curlwp); |
508 | l->l_nopreempt++; |
509 | __insn_barrier(); |
510 | } |
511 | |
512 | static __inline void |
513 | KPREEMPT_ENABLE(lwp_t *l) |
514 | { |
515 | |
516 | KASSERT(l == curlwp); |
517 | KASSERT(l->l_nopreempt > 0); |
518 | __insn_barrier(); |
519 | if (--l->l_nopreempt != 0) |
520 | return; |
521 | __insn_barrier(); |
522 | if (__predict_false(l->l_dopreempt)) |
523 | kpreempt(0); |
524 | __insn_barrier(); |
525 | } |
526 | |
527 | /* For lwp::l_dopreempt */ |
528 | #define DOPREEMPT_ACTIVE 0x01 |
529 | #define DOPREEMPT_COUNTED 0x02 |
530 | |
531 | /* |
532 | * Prevent curlwp from migrating between CPUs between curlwp_bind and |
533 | * curlwp_bindx. One use case is psref(9) that has a contract that |
534 | * forbids migrations. |
535 | */ |
536 | static __inline int |
537 | curlwp_bind(void) |
538 | { |
539 | int bound; |
540 | |
541 | bound = curlwp->l_pflag & LP_BOUND; |
542 | curlwp->l_pflag |= LP_BOUND; |
543 | __insn_barrier(); |
544 | |
545 | return bound; |
546 | } |
547 | |
548 | static __inline void |
549 | curlwp_bindx(int bound) |
550 | { |
551 | |
552 | KASSERT(curlwp->l_pflag & LP_BOUND); |
553 | __insn_barrier(); |
554 | curlwp->l_pflag ^= bound ^ LP_BOUND; |
555 | } |
556 | |
557 | #endif /* _KERNEL */ |
558 | |
559 | /* Flags for _lwp_create(), as per Solaris. */ |
560 | #define LWP_DETACHED 0x00000040 |
561 | #define LWP_SUSPENDED 0x00000080 |
562 | |
563 | /* Kernel-internal flags for LWP creation. */ |
564 | #define LWP_PIDLID 0x40000000 |
565 | #define LWP_VFORK 0x80000000 |
566 | |
567 | #endif /* !_SYS_LWP_H_ */ |
568 | |