12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
16 #define TIME_QUANTUM_USEC (10 * 1000)
17 #define RB_CONDATTR_CLOCK_MONOTONIC 1
21 #define native_thread_yield() Sleep(0)
22 #define remove_signal_thread_list(th)
24 static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
31 w32_error(
const char *
func)
35 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
36 FORMAT_MESSAGE_FROM_SYSTEM |
37 FORMAT_MESSAGE_IGNORE_INSERTS,
40 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
41 (LPTSTR) & lpMsgBuf, 0,
NULL) == 0)
42 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
43 FORMAT_MESSAGE_FROM_SYSTEM |
44 FORMAT_MESSAGE_IGNORE_INSERTS,
47 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
48 (LPTSTR) & lpMsgBuf, 0,
NULL);
49 rb_bug(
"%s: %s", func, (
char*)lpMsgBuf);
53 w32_mutex_lock(HANDLE lock)
58 result = w32_wait_events(&lock, 1, INFINITE, 0);
64 case WAIT_OBJECT_0 + 1:
73 rb_bug(
"win32_mutex_lock: WAIT_ABANDONED");
76 rb_bug(
"win32_mutex_lock: unknown result (%ld)", result);
84 w32_mutex_create(
void)
88 w32_error(
"native_mutex_initialize");
99 if (GVL_DEBUG) fprintf(stderr,
"gvl acquire (%p): acquire\n", th);
112 native_thread_yield();
120 rb_bug(
"gvl_atfork() is called on win32");
126 if (GVL_DEBUG) fprintf(stderr,
"gvl init\n");
127 vm->
gvl.
lock = w32_mutex_create();
133 if (GVL_DEBUG) fprintf(stderr,
"gvl destroy\n");
138 ruby_thread_from_native(
void)
140 return TlsGetValue(ruby_native_thread_key);
146 return TlsSetValue(ruby_native_thread_key, th);
154 ruby_native_thread_key = TlsAlloc();
155 ruby_thread_set_native(th);
156 DuplicateHandle(GetCurrentProcess(),
163 thread_debug(
"initial thread (th: %p, thid: %p, event: %p)\n",
169 w32_set_event(HANDLE handle)
171 if (SetEvent(handle) == 0) {
172 w32_error(
"w32_set_event");
177 w32_reset_event(HANDLE handle)
179 if (ResetEvent(handle) == 0) {
180 w32_error(
"w32_reset_event");
187 HANDLE *targets = events;
191 thread_debug(
" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
192 events, count, timeout, th);
194 gvl_acquire(th->
vm, th);
196 w32_reset_event(intr);
201 targets =
ALLOCA_N(HANDLE, count + 1);
202 memcpy(targets, events,
sizeof(HANDLE) * count);
204 targets[count++] = intr;
205 thread_debug(
" * handle: %p (count: %d, intr)\n", intr, count);
210 thread_debug(
" WaitForMultipleObjects start (count: %d)\n", count);
211 ret = WaitForMultipleObjects(count, targets,
FALSE, timeout);
212 thread_debug(
" WaitForMultipleObjects end (ret: %lu)\n", ret);
214 if (ret == (
DWORD)(WAIT_OBJECT_0 + count - 1) && th) {
220 for (i = 0; i <
count; i++) {
222 GetHandleInformation(targets[i], &dmy) ?
"OK" :
"NG");
228 static void ubf_handle(
void *
ptr);
229 #define ubf_select ubf_handle
234 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
243 ubf_handle, ruby_thread_from_native(),
FALSE);
248 w32_close_handle(HANDLE handle)
250 if (CloseHandle(handle) == 0) {
251 w32_error(
"w32_close_handle");
256 w32_resume_thread(HANDLE handle)
258 if (ResumeThread(handle) == (
DWORD)-1) {
259 w32_error(
"w32_resume_thread");
264 #define HAVE__BEGINTHREADEX 1
266 #undef HAVE__BEGINTHREADEX
269 #ifdef HAVE__BEGINTHREADEX
270 #define start_thread (HANDLE)_beginthreadex
271 #define thread_errno errno
272 typedef unsigned long (__stdcall *w32_thread_start_func)(
void*);
274 #define start_thread CreateThread
275 #define thread_errno rb_w32_map_errno(GetLastError())
276 typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
280 w32_create_thread(
DWORD stack_size, w32_thread_start_func func,
void *
val)
282 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED, 0);
288 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
297 ubf_handle, ruby_thread_from_native(),
FALSE);
304 const volatile DWORD msec = (tv) ?
321 ret = w32_wait_events(0, 0, msec, th);
337 w32_mutex_lock(lock->mutex);
339 EnterCriticalSection(&lock->crit);
349 return ReleaseMutex(lock->mutex);
351 LeaveCriticalSection(&lock->crit);
361 thread_debug(
"native_mutex_trylock: %p\n", lock->mutex);
362 result = w32_wait_events(&lock->mutex, 1, 1, 0);
363 thread_debug(
"native_mutex_trylock result: %d\n", result);
380 lock->mutex = w32_mutex_create();
383 InitializeCriticalSection(&lock->crit);
391 w32_close_handle(lock->mutex);
393 DeleteCriticalSection(&lock->crit);
397 struct cond_event_entry {
398 struct cond_event_entry* next;
399 struct cond_event_entry* prev;
407 struct cond_event_entry *
e = cond->
next;
408 struct cond_event_entry *
head = (
struct cond_event_entry*)cond;
411 struct cond_event_entry *next = e->next;
412 struct cond_event_entry *prev = e->prev;
416 e->next = e->prev =
e;
426 struct cond_event_entry *e = cond->
next;
427 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
430 struct cond_event_entry *next = e->next;
431 struct cond_event_entry *prev = e->prev;
437 e->next = e->prev =
e;
448 struct cond_event_entry
entry;
449 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
455 entry.prev = head->prev;
456 head->prev->next = &
entry;
459 native_mutex_unlock(mutex);
461 r = WaitForSingleObject(
entry.event, msec);
462 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
463 rb_bug(
"native_cond_wait: WaitForSingleObject returns %lu", r);
466 native_mutex_lock(mutex);
471 w32_close_handle(
entry.event);
472 return (r == WAIT_OBJECT_0) ? 0 :
ETIMEDOUT;
478 return native_cond_timedwait_ms(cond, mutex, INFINITE);
482 abs_timespec_to_timeout_ms(
struct timespec *ts)
500 unsigned long timeout_ms;
502 timeout_ms = abs_timespec_to_timeout_ms(ts);
506 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
521 now.tv_nsec = tv.
tv_usec * 1000;
523 timeout.tv_sec = now.tv_sec;
524 timeout.tv_nsec = now.tv_nsec;
525 timeout.tv_sec += timeout_rel.tv_sec;
526 timeout.tv_nsec += timeout_rel.tv_nsec;
528 if (timeout.tv_nsec >= 1000*1000*1000) {
530 timeout.tv_nsec -= 1000*1000*1000;
533 if (timeout.tv_sec < now.tv_sec)
534 timeout.tv_sec = TIMET_MAX;
542 cond->
next = (
struct cond_event_entry *)cond;
543 cond->
prev = (
struct cond_event_entry *)cond;
557 #define CHECK_ERR(expr) \
558 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
563 MEMORY_BASIC_INFORMATION mi;
567 CHECK_ERR(VirtualQuery(&mi, &mi,
sizeof(mi)));
568 base = mi.AllocationBase;
569 end = mi.BaseAddress;
570 end += mi.RegionSize;
573 if (space > 1024*1024) space = 1024*1024;
578 #ifndef InterlockedExchangePointer
579 #define InterlockedExchangePointer(t, v) \
580 (void *)InterlockedExchange((long *)(t), (long)(v))
587 w32_close_handle(intr);
590 static unsigned long __stdcall
591 thread_start_func_1(
void *th_ptr)
594 volatile HANDLE thread_id = th->
thread_id;
596 native_thread_init_stack(th);
600 thread_debug(
"thread created (th: %p, thid: %p, event: %p)\n", th,
605 w32_close_handle(thread_id);
613 size_t stack_size = 4 * 1024;
614 th->
thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
632 native_thread_join(HANDLE th)
634 w32_wait_events(&th, 1, INFINITE, 0);
637 #if USE_NATIVE_THREAD_PRIORITY
644 priority = THREAD_PRIORITY_ABOVE_NORMAL;
647 priority = THREAD_PRIORITY_BELOW_NORMAL;
650 priority = THREAD_PRIORITY_NORMAL;
653 SetThreadPriority(th->
thread_id, priority);
683 return w32_wait_events(0, 0, 0, th);
687 ubf_handle(
void *
ptr)
695 static HANDLE timer_thread_id = 0;
696 static HANDLE timer_thread_lock;
698 static unsigned long __stdcall
699 timer_thread_func(
void *
dummy)
702 while (WaitForSingleObject(timer_thread_lock, TIME_QUANTUM_USEC/1000) ==
717 rb_thread_create_timer_thread(
void)
719 if (timer_thread_id == 0) {
720 if (!timer_thread_lock) {
721 timer_thread_lock = CreateEvent(0,
TRUE,
FALSE, 0);
723 timer_thread_id = w32_create_thread(1024 + (
THREAD_DEBUG ? BUFSIZ : 0),
724 timer_thread_func, 0);
725 w32_resume_thread(timer_thread_id);
730 native_stop_timer_thread(
int close_anyway)
734 SetEvent(timer_thread_lock);
735 native_thread_join(timer_thread_id);
736 CloseHandle(timer_thread_lock);
737 timer_thread_lock = 0;
743 native_reset_timer_thread(
void)
745 if (timer_thread_id) {
746 CloseHandle(timer_thread_id);
752 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
757 #if defined(__MINGW32__)
759 rb_w32_stack_overflow_handler(
struct _EXCEPTION_POINTERS *exception)
761 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
765 return EXCEPTION_CONTINUE_SEARCH;
769 #ifdef RUBY_ALLOCA_CHKSTK
771 ruby_alloca_chkstk(
size_t len,
void *sp)
791 return GetCurrentThread();
RUBY_SYMBOL_EXPORT_BEGIN rb_nativethread_id_t rb_nativethread_self()
void rb_bug(const char *fmt,...)
int gettimeofday(struct timeval *, struct timezone *)
int rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
int rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
size_t ruby_stack_length(VALUE **)
SSL_METHOD *(* func)(void)
rb_unblock_function_t * func
pthread_mutex_t rb_nativethread_lock_t
static volatile int system_working
WINBASEAPI BOOL WINAPI TryEnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection)
void rb_exc_raise(VALUE mesg)
void rb_thread_wakeup_timer_thread(void)
int WINAPI rb_w32_Sleep(unsigned long msec)
struct rb_thread_struct::@192 machine
int rb_w32_sleep(unsigned long msec)
struct cond_event_entry * next
int rb_w32_select_with_thread(int nfds, fd_set *rd, fd_set *wr, fd_set *ex, struct timeval *timeout, void *th)
#define ALLOCA_N(type, n)
#define GVL_UNLOCK_BEGIN()
int rb_w32_time_subtract(struct timeval *rest, const struct timeval *wait)
pthread_t rb_nativethread_id_t
struct cond_event_entry * prev
#define rb_thread_raised_set(th, f)
void ruby_init_stack(volatile VALUE *)
static void timer_thread_function(void *)
int rb_reserved_fd_p(int fd)
void rb_sys_fail(const char *mesg)
#define rb_fd_resize(n, f)
#define thread_start_func_2(th, st, rst)
struct rb_unblock_callback unblock
rb_nativethread_id_t thread_id
native_thread_data_t native_thread_data
#define BLOCKING_REGION(func, arg)
#define rb_thread_raised_p(th, f)
#define RUBY_VM_INTERRUPTED(th)
rb_nativethread_lock_t interrupt_lock
void Init_native_thread(void)
static rb_thread_t * GET_THREAD(void)
int rb_w32_check_interrupt(void *)