| 1 | /* $NetBSD: pthread_int.h,v 1.95 2019/03/05 01:35:52 christos Exp $ */ |
| 2 | |
| 3 | /*- |
| 4 | * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc. |
| 5 | * All rights reserved. |
| 6 | * |
| 7 | * This code is derived from software contributed to The NetBSD Foundation |
| 8 | * by Nathan J. Williams and Andrew Doran. |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * 1. Redistributions of source code must retain the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer. |
| 15 | * 2. Redistributions in binary form must reproduce the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer in the |
| 17 | * documentation and/or other materials provided with the distribution. |
| 18 | * |
| 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
| 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
| 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
| 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 29 | * POSSIBILITY OF SUCH DAMAGE. |
| 30 | */ |
| 31 | |
| 32 | #ifndef _LIB_PTHREAD_INT_H |
| 33 | #define _LIB_PTHREAD_INT_H |
| 34 | |
| 35 | #include <sys/tls.h> |
| 36 | |
| 37 | /* #define PTHREAD__DEBUG */ |
| 38 | #define ERRORCHECK |
| 39 | |
| 40 | #include "pthread_types.h" |
| 41 | #include "pthread_queue.h" |
| 42 | #include "pthread_md.h" |
| 43 | |
| 44 | /* Need to use libc-private names for atomic operations. */ |
| 45 | #include "../../common/lib/libc/atomic/atomic_op_namespace.h" |
| 46 | |
| 47 | #include <sys/atomic.h> |
| 48 | #include <sys/rbtree.h> |
| 49 | |
| 50 | #include <limits.h> |
| 51 | #include <lwp.h> |
| 52 | #include <signal.h> |
| 53 | #include <stdbool.h> |
| 54 | |
| 55 | #ifdef __GNUC__ |
| 56 | #define PTHREAD_HIDE __attribute__ ((visibility("hidden"))) |
| 57 | #else |
| 58 | #define PTHREAD_HIDE /* nothing */ |
| 59 | #endif |
| 60 | |
| 61 | #define PTHREAD__UNPARK_MAX 32 |
| 62 | |
| 63 | /* |
| 64 | * The size of this structure needs to be no larger than struct |
| 65 | * __pthread_cleanup_store, defined in pthread.h. |
| 66 | */ |
| 67 | struct pt_clean_t { |
| 68 | PTQ_ENTRY(pt_clean_t) ptc_next; |
| 69 | void (*ptc_cleanup)(void *); |
| 70 | void *ptc_arg; |
| 71 | }; |
| 72 | |
| 73 | /* Private data for pthread_attr_t */ |
| 74 | struct pthread_attr_private { |
| 75 | char ptap_name[PTHREAD_MAX_NAMELEN_NP]; |
| 76 | void *ptap_namearg; |
| 77 | void *ptap_stackaddr; |
| 78 | size_t ptap_stacksize; |
| 79 | size_t ptap_guardsize; |
| 80 | struct sched_param ptap_sp; |
| 81 | int ptap_policy; |
| 82 | }; |
| 83 | |
| 84 | struct pthread_lock_ops { |
| 85 | void (*plo_init)(__cpu_simple_lock_t *); |
| 86 | int (*plo_try)(__cpu_simple_lock_t *); |
| 87 | void (*plo_unlock)(__cpu_simple_lock_t *); |
| 88 | void (*plo_lock)(__cpu_simple_lock_t *); |
| 89 | }; |
| 90 | |
| 91 | struct __pthread_st { |
| 92 | pthread_t pt_self; /* Must be first. */ |
| 93 | #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) |
| 94 | struct tls_tcb *pt_tls; /* Thread Local Storage area */ |
| 95 | #endif |
| 96 | unsigned int pt_magic; /* Magic number */ |
| 97 | int pt_state; /* running, blocked, etc. */ |
| 98 | pthread_mutex_t pt_lock; /* lock on state */ |
| 99 | int pt_flags; /* see PT_FLAG_* below */ |
| 100 | int pt_cancel; /* Deferred cancellation */ |
| 101 | int pt_errno; /* Thread-specific errno. */ |
| 102 | stack_t pt_stack; /* Our stack */ |
| 103 | bool pt_stack_allocated; |
| 104 | size_t pt_guardsize; |
| 105 | void *pt_exitval; /* Read by pthread_join() */ |
| 106 | char *pt_name; /* Thread's name, set by the app. */ |
| 107 | int pt_willpark; /* About to park */ |
| 108 | lwpid_t pt_unpark; /* Unpark this when parking */ |
| 109 | struct pthread_lock_ops pt_lockops;/* Cached to avoid PIC overhead */ |
| 110 | pthread_mutex_t *pt_droplock; /* Drop this lock if cancelled */ |
| 111 | pthread_cond_t pt_joiners; /* Threads waiting to join. */ |
| 112 | void *(*pt_func)(void *);/* Function to call at start. */ |
| 113 | void *pt_arg; /* Argument to pass at start. */ |
| 114 | |
| 115 | /* Threads to defer waking, usually until pthread_mutex_unlock(). */ |
| 116 | lwpid_t pt_waiters[PTHREAD__UNPARK_MAX]; |
| 117 | size_t pt_nwaiters; |
| 118 | |
| 119 | /* Stack of cancellation cleanup handlers and their arguments */ |
| 120 | PTQ_HEAD(, pt_clean_t) pt_cleanup_stack; |
| 121 | |
| 122 | /* LWP ID and entry on the list of all threads. */ |
| 123 | lwpid_t pt_lid; |
| 124 | rb_node_t pt_alltree; |
| 125 | PTQ_ENTRY(__pthread_st) pt_allq; |
| 126 | PTQ_ENTRY(__pthread_st) pt_deadq; |
| 127 | |
| 128 | /* |
| 129 | * General synchronization data. We try to align, as threads |
| 130 | * on other CPUs will access this data frequently. |
| 131 | */ |
| 132 | int pt_dummy1 __aligned(128); |
| 133 | struct lwpctl *pt_lwpctl; /* Kernel/user comms area */ |
| 134 | volatile int pt_blocking; /* Blocking in userspace */ |
| 135 | volatile int pt_rwlocked; /* Handed rwlock successfully */ |
| 136 | volatile int pt_signalled; /* Received pthread_cond_signal() */ |
| 137 | volatile int pt_mutexwait; /* Waiting to acquire mutex */ |
| 138 | void * volatile pt_mutexnext; /* Next thread in chain */ |
| 139 | void * volatile pt_sleepobj; /* Object slept on */ |
| 140 | PTQ_ENTRY(__pthread_st) pt_sleep; |
| 141 | void (*pt_early)(void *); |
| 142 | int pt_dummy2 __aligned(128); |
| 143 | |
| 144 | /* Thread-specific data. Large so it sits close to the end. */ |
| 145 | int pt_havespecific; |
| 146 | struct pt_specific { |
| 147 | void *pts_value; |
| 148 | PTQ_ENTRY(pt_specific) pts_next; |
| 149 | } pt_specific[]; |
| 150 | }; |
| 151 | |
| 152 | /* Thread states */ |
| 153 | #define PT_STATE_RUNNING 1 |
| 154 | #define PT_STATE_ZOMBIE 5 |
| 155 | #define PT_STATE_DEAD 6 |
| 156 | |
| 157 | /* Flag values */ |
| 158 | |
| 159 | #define PT_FLAG_DETACHED 0x0001 |
| 160 | #define PT_FLAG_CS_DISABLED 0x0004 /* Cancellation disabled */ |
| 161 | #define PT_FLAG_CS_ASYNC 0x0008 /* Cancellation is async */ |
| 162 | #define PT_FLAG_CS_PENDING 0x0010 |
| 163 | #define PT_FLAG_SCOPE_SYSTEM 0x0040 |
| 164 | #define PT_FLAG_EXPLICIT_SCHED 0x0080 |
| 165 | #define PT_FLAG_SUSPENDED 0x0100 /* In the suspended queue */ |
| 166 | |
| 167 | #define PT_MAGIC 0x11110001 |
| 168 | #define PT_DEAD 0xDEAD0001 |
| 169 | |
| 170 | #define PT_ATTR_MAGIC 0x22220002 |
| 171 | #define PT_ATTR_DEAD 0xDEAD0002 |
| 172 | |
| 173 | extern size_t pthread__stacksize; |
| 174 | extern size_t pthread__guardsize; |
| 175 | extern size_t pthread__pagesize; |
| 176 | extern int pthread__nspins; |
| 177 | extern int pthread__concurrency; |
| 178 | extern int pthread__osrev; |
| 179 | extern int pthread__unpark_max; |
| 180 | extern int pthread_keys_max; |
| 181 | |
| 182 | extern int __uselibcstub; |
| 183 | |
| 184 | /* Flag to be used in a ucontext_t's uc_flags indicating that |
| 185 | * the saved register state is "user" state only, not full |
| 186 | * trap state. |
| 187 | */ |
| 188 | #define _UC_USER_BIT 30 |
| 189 | #define _UC_USER (1LU << _UC_USER_BIT) |
| 190 | |
| 191 | /* Utility functions */ |
| 192 | void pthread__unpark_all(pthread_queue_t *, pthread_t, pthread_mutex_t *) |
| 193 | PTHREAD_HIDE; |
| 194 | void pthread__unpark(pthread_queue_t *, pthread_t, pthread_mutex_t *) |
| 195 | PTHREAD_HIDE; |
| 196 | int pthread__park(pthread_t, pthread_mutex_t *, pthread_queue_t *, |
| 197 | const struct timespec *, int, const void *) |
| 198 | PTHREAD_HIDE; |
| 199 | pthread_mutex_t *pthread__hashlock(volatile const void *) PTHREAD_HIDE; |
| 200 | |
| 201 | /* Internal locking primitives */ |
| 202 | void pthread__lockprim_init(void) PTHREAD_HIDE; |
| 203 | void pthread_lockinit(pthread_spin_t *) PTHREAD_HIDE; |
| 204 | |
| 205 | static inline void pthread__spinlock(pthread_t, pthread_spin_t *) |
| 206 | __attribute__((__always_inline__)); |
| 207 | static inline void |
| 208 | pthread__spinlock(pthread_t self, pthread_spin_t *lock) |
| 209 | { |
| 210 | if (__predict_true((*self->pt_lockops.plo_try)(lock))) |
| 211 | return; |
| 212 | (*self->pt_lockops.plo_lock)(lock); |
| 213 | } |
| 214 | |
| 215 | static inline int pthread__spintrylock(pthread_t, pthread_spin_t *) |
| 216 | __attribute__((__always_inline__)); |
| 217 | static inline int |
| 218 | pthread__spintrylock(pthread_t self, pthread_spin_t *lock) |
| 219 | { |
| 220 | return (*self->pt_lockops.plo_try)(lock); |
| 221 | } |
| 222 | |
| 223 | static inline void pthread__spinunlock(pthread_t, pthread_spin_t *) |
| 224 | __attribute__((__always_inline__)); |
| 225 | static inline void |
| 226 | pthread__spinunlock(pthread_t self, pthread_spin_t *lock) |
| 227 | { |
| 228 | (*self->pt_lockops.plo_unlock)(lock); |
| 229 | } |
| 230 | |
| 231 | extern const struct pthread_lock_ops *pthread__lock_ops; |
| 232 | |
| 233 | int pthread__simple_locked_p(__cpu_simple_lock_t *) PTHREAD_HIDE; |
| 234 | #define pthread__simple_lock_init(alp) (*pthread__lock_ops->plo_init)(alp) |
| 235 | #define pthread__simple_lock_try(alp) (*pthread__lock_ops->plo_try)(alp) |
| 236 | #define pthread__simple_unlock(alp) (*pthread__lock_ops->plo_unlock)(alp) |
| 237 | |
| 238 | void pthread__testcancel(pthread_t) PTHREAD_HIDE; |
| 239 | int pthread__find(pthread_t) PTHREAD_HIDE; |
| 240 | |
| 241 | #ifndef PTHREAD_MD_INIT |
| 242 | #define PTHREAD_MD_INIT |
| 243 | #endif |
| 244 | |
| 245 | #ifndef _INITCONTEXT_U_MD |
| 246 | #define _INITCONTEXT_U_MD(ucp) |
| 247 | #endif |
| 248 | |
| 249 | #define _INITCONTEXT_U(ucp) do { \ |
| 250 | (ucp)->uc_flags = _UC_CPU | _UC_STACK; \ |
| 251 | _INITCONTEXT_U_MD(ucp) \ |
| 252 | } while (/*CONSTCOND*/0) |
| 253 | |
| 254 | |
| 255 | #if !defined(__HAVE_TLS_VARIANT_I) && !defined(__HAVE_TLS_VARIANT_II) |
| 256 | #error Either __HAVE_TLS_VARIANT_I or __HAVE_TLS_VARIANT_II must be defined |
| 257 | #endif |
| 258 | |
| 259 | #ifdef _PTHREAD_GETTCB_EXT |
| 260 | struct tls_tcb *_PTHREAD_GETTCB_EXT(void); |
| 261 | #endif |
| 262 | |
| 263 | static inline pthread_t __constfunc |
| 264 | pthread__self(void) |
| 265 | { |
| 266 | #if defined(_PTHREAD_GETTCB_EXT) |
| 267 | struct tls_tcb * const tcb = _PTHREAD_GETTCB_EXT(); |
| 268 | #elif defined(__HAVE___LWP_GETTCB_FAST) |
| 269 | struct tls_tcb * const tcb = __lwp_gettcb_fast(); |
| 270 | #else |
| 271 | struct tls_tcb * const tcb = __lwp_getprivate_fast(); |
| 272 | #endif |
| 273 | return (pthread_t)tcb->tcb_pthread; |
| 274 | } |
| 275 | |
| 276 | #define pthread__abort() \ |
| 277 | pthread__assertfunc(__FILE__, __LINE__, __func__, "unreachable") |
| 278 | |
| 279 | #define pthread__assert(e) do { \ |
| 280 | if (__predict_false(!(e))) \ |
| 281 | pthread__assertfunc(__FILE__, __LINE__, __func__, #e); \ |
| 282 | } while (/*CONSTCOND*/0) |
| 283 | |
| 284 | #define pthread__error(err, msg, e) do { \ |
| 285 | if (__predict_false(!(e))) { \ |
| 286 | pthread__errorfunc(__FILE__, __LINE__, __func__, msg); \ |
| 287 | return (err); \ |
| 288 | } \ |
| 289 | } while (/*CONSTCOND*/0) |
| 290 | |
| 291 | void *pthread_tsd_init(size_t *) PTHREAD_HIDE; |
| 292 | void pthread__destroy_tsd(pthread_t) PTHREAD_HIDE; |
| 293 | void pthread__copy_tsd(pthread_t) PTHREAD_HIDE; |
| 294 | |
| 295 | __dead void pthread__assertfunc(const char *, int, const char *, const char *) |
| 296 | PTHREAD_HIDE; |
| 297 | void pthread__errorfunc(const char *, int, const char *, const char *) |
| 298 | PTHREAD_HIDE; |
| 299 | char *pthread__getenv(const char *) PTHREAD_HIDE; |
| 300 | __dead void pthread__cancelled(void) PTHREAD_HIDE; |
| 301 | void pthread__mutex_deferwake(pthread_t, pthread_mutex_t *) PTHREAD_HIDE; |
| 302 | int pthread__checkpri(int) PTHREAD_HIDE; |
| 303 | int pthread__add_specific(pthread_t, pthread_key_t, const void *) PTHREAD_HIDE; |
| 304 | |
| 305 | #ifndef pthread__smt_pause |
| 306 | #define pthread__smt_pause() /* nothing */ |
| 307 | #endif |
| 308 | #ifndef pthread__smt_wake |
| 309 | #define pthread__smt_wake() /* nothing */ |
| 310 | #endif |
| 311 | |
| 312 | /* |
| 313 | * Bits in the owner field of the lock that indicate lock state. If the |
| 314 | * WRITE_LOCKED bit is clear, then the owner field is actually a count of |
| 315 | * the number of readers. |
| 316 | */ |
| 317 | #define RW_HAS_WAITERS 0x01 /* lock has waiters */ |
| 318 | #define RW_WRITE_WANTED 0x02 /* >= 1 waiter is a writer */ |
| 319 | #define RW_WRITE_LOCKED 0x04 /* lock is currently write locked */ |
| 320 | #define RW_UNUSED 0x08 /* currently unused */ |
| 321 | |
| 322 | #define RW_FLAGMASK 0x0f |
| 323 | |
| 324 | #define RW_READ_COUNT_SHIFT 4 |
| 325 | #define RW_READ_INCR (1 << RW_READ_COUNT_SHIFT) |
| 326 | #define RW_THREAD ((uintptr_t)-RW_READ_INCR) |
| 327 | #define RW_OWNER(rw) ((rw)->rw_owner & RW_THREAD) |
| 328 | #define RW_COUNT(rw) ((rw)->rw_owner & RW_THREAD) |
| 329 | #define RW_FLAGS(rw) ((rw)->rw_owner & ~RW_THREAD) |
| 330 | |
| 331 | #endif /* _LIB_PTHREAD_INT_H */ |
| 332 | |