1 /**************************************************************************/
2 /* */
3 /* OCaml */
4 /* */
5 /* Xavier Leroy and Damien Doligez, INRIA Rocquencourt */
6 /* */
7 /* Copyright 1995 Institut National de Recherche en Informatique et */
8 /* en Automatique. */
9 /* */
10 /* All rights reserved. This file is distributed under the terms of */
11 /* the GNU Lesser General Public License version 2.1, with the */
12 /* special exception on linking described in the file LICENSE. */
13 /* */
14 /**************************************************************************/
15
16 #define CAML_INTERNALS
17
18 #include "caml/alloc.h"
19 #include "caml/backtrace.h"
20 #include "caml/callback.h"
21 #include "caml/custom.h"
22 #include "caml/domain.h"
23 #include "caml/fail.h"
24 #include "caml/io.h"
25 #include "caml/memory.h"
26 #include "caml/misc.h"
27 #include "caml/mlvalues.h"
28 #include "caml/printexc.h"
29 #include "caml/roots.h"
30 #include "caml/signals.h"
31 #ifdef NATIVE_CODE
32 #include "caml/stack.h"
33 #else
34 #include "caml/stacks.h"
35 #endif
36 #include "caml/sys.h"
37 #include "caml/memprof.h"
38 #include "threads.h"
39
40 #if defined(NATIVE_CODE) && defined(WITH_SPACETIME)
41 #include "caml/spacetime.h"
42 #endif
43
44 #ifndef NATIVE_CODE
45 /* Initial size of bytecode stack when a thread is created (4 Ko) */
46 #define Thread_stack_size (Stack_size / 4)
47 #endif
48
49 /* Max computation time before rescheduling, in milliseconds */
50 #define Thread_timeout 50
51
52 /* OS-specific code */
53 #ifdef _WIN32
54 #include "st_win32.h"
55 #else
56 #include "st_posix.h"
57 #endif
58
59 /* The ML value describing a thread (heap-allocated) */
60
61 struct caml_thread_descr {
62 value ident; /* Unique integer ID */
63 value start_closure; /* The closure to start this thread */
64 value terminated; /* Triggered event for thread termination */
65 };
66
67 #define Ident(v) (((struct caml_thread_descr *)(v))->ident)
68 #define Start_closure(v) (((struct caml_thread_descr *)(v))->start_closure)
69 #define Terminated(v) (((struct caml_thread_descr *)(v))->terminated)
70
71 /* The infos on threads (allocated via caml_stat_alloc()) */
72
73 struct caml_thread_struct {
74 value descr; /* The heap-allocated descriptor (root) */
75 struct caml_thread_struct * next; /* Double linking of running threads */
76 struct caml_thread_struct * prev;
77 #ifdef NATIVE_CODE
78 char * top_of_stack; /* Top of stack for this thread (approx.) */
79 char * bottom_of_stack; /* Saved value of Caml_state->bottom_of_stack */
80 uintnat last_retaddr; /* Saved value of Caml_state->last_return_address */
81 value * gc_regs; /* Saved value of Caml_state->gc_regs */
82 char * exception_pointer; /* Saved value of Caml_state->exception_pointer */
83 struct caml__roots_block * local_roots; /* Saved value of local_roots */
84 struct longjmp_buffer * exit_buf; /* For thread exit */
85 #if defined(NATIVE_CODE) && defined(WITH_SPACETIME)
86 value internal_spacetime_trie_root;
87 value internal_spacetime_finaliser_trie_root;
88 value* spacetime_trie_node_ptr;
89 value* spacetime_finaliser_trie_root;
90 #endif
91 #else
92 value * stack_low; /* The execution stack for this thread */
93 value * stack_high;
94 value * stack_threshold;
95 value * sp; /* Saved value of Caml_state->extern_sp for this thread */
96 value * trapsp; /* Saved value of Caml_state->trapsp for this thread */
97 /* Saved value of Caml_state->local_roots */
98 struct caml__roots_block * local_roots;
99 struct longjmp_buffer * external_raise; /* Saved Caml_state->external_raise */
100 #endif
101 int backtrace_pos; /* Saved Caml_state->backtrace_pos */
102 backtrace_slot * backtrace_buffer; /* Saved Caml_state->backtrace_buffer */
103 value backtrace_last_exn; /* Saved Caml_state->backtrace_last_exn (root) */
104 int memprof_suspended; /* Saved caml_memprof_suspended */
105 };
106
107 typedef struct caml_thread_struct * caml_thread_t;
108
109 /* The "head" of the circular list of thread descriptors */
110 static caml_thread_t all_threads = NULL;
111
112 /* The descriptor for the currently executing thread */
113 static caml_thread_t curr_thread = NULL;
114
115 /* The master lock protecting the OCaml runtime system */
116 static st_masterlock caml_master_lock;
117
118 /* Whether the "tick" thread is already running */
119 static int caml_tick_thread_running = 0;
120
121 /* The thread identifier of the "tick" thread */
122 static st_thread_id caml_tick_thread_id;
123
124 /* The key used for storing the thread descriptor in the specific data
125 of the corresponding system thread. */
126 static st_tlskey thread_descriptor_key;
127
128 /* The key used for unlocking I/O channels on exceptions */
129 static st_tlskey last_channel_locked_key;
130
131 /* Identifier for next thread creation */
132 static intnat thread_next_ident = 0;
133
134 /* Forward declarations */
135 static value caml_threadstatus_new (void);
136 static void caml_threadstatus_terminate (value);
137 static st_retcode caml_threadstatus_wait (value);
138
139 /* Imports from the native-code runtime system */
140 #ifdef NATIVE_CODE
141 extern struct longjmp_buffer caml_termination_jmpbuf;
142 extern void (*caml_termination_hook)(void);
143 #endif
144
145 /* Hook for scanning the stacks of the other threads */
146
147 static void (*prev_scan_roots_hook) (scanning_action);
148
149 static void caml_thread_scan_roots(scanning_action action)
150 {
151 caml_thread_t th;
152
153 th = curr_thread;
154 do {
155 (*action)(th->descr, &th->descr);
156 (*action)(th->backtrace_last_exn, &th->backtrace_last_exn);
157 /* Don't rescan the stack of the current thread, it was done already */
158 if (th != curr_thread) {
159 #ifdef NATIVE_CODE
160 if (th->bottom_of_stack != NULL)
161 caml_do_local_roots(action, th->bottom_of_stack, th->last_retaddr,
162 th->gc_regs, th->local_roots);
163 #else
164 caml_do_local_roots(action, th->sp, th->stack_high, th->local_roots);
165 #endif
166 }
167 th = th->next;
168 } while (th != curr_thread);
169 /* Hook */
170 if (prev_scan_roots_hook != NULL) (*prev_scan_roots_hook)(action);
171 }
172
173 /* Saving and restoring runtime state in curr_thread */
174
175 static inline void caml_thread_save_runtime_state(void)
176 {
177 #ifdef NATIVE_CODE
178 curr_thread->top_of_stack = Caml_state->top_of_stack;
179 curr_thread->bottom_of_stack = Caml_state->bottom_of_stack;
180 curr_thread->last_retaddr = Caml_state->last_return_address;
181 curr_thread->gc_regs = Caml_state->gc_regs;
182 curr_thread->exception_pointer = Caml_state->exception_pointer;
183 #ifdef WITH_SPACETIME
184 curr_thread->spacetime_trie_node_ptr
185 = caml_spacetime_trie_node_ptr;
186 curr_thread->spacetime_finaliser_trie_root
187 = caml_spacetime_finaliser_trie_root;
188 #endif
189 #else
190 curr_thread->stack_low = Caml_state->stack_low;
191 curr_thread->stack_high = Caml_state->stack_high;
192 curr_thread->stack_threshold = Caml_state->stack_threshold;
193 curr_thread->sp = Caml_state->extern_sp;
194 curr_thread->trapsp = Caml_state->trapsp;
195 curr_thread->external_raise = Caml_state->external_raise;
196 #endif
197 curr_thread->local_roots = Caml_state->local_roots;
198 curr_thread->backtrace_pos = Caml_state->backtrace_pos;
199 curr_thread->backtrace_buffer = Caml_state->backtrace_buffer;
200 curr_thread->backtrace_last_exn = Caml_state->backtrace_last_exn;
201 curr_thread->memprof_suspended = caml_memprof_suspended;
202 }
203
204 static inline void caml_thread_restore_runtime_state(void)
205 {
206 #ifdef NATIVE_CODE
207 Caml_state->top_of_stack = curr_thread->top_of_stack;
208 Caml_state->bottom_of_stack= curr_thread->bottom_of_stack;
209 Caml_state->last_return_address = curr_thread->last_retaddr;
210 Caml_state->gc_regs = curr_thread->gc_regs;
211 Caml_state->exception_pointer = curr_thread->exception_pointer;
212 #ifdef WITH_SPACETIME
213 caml_spacetime_trie_node_ptr
214 = curr_thread->spacetime_trie_node_ptr;
215 caml_spacetime_finaliser_trie_root
216 = curr_thread->spacetime_finaliser_trie_root;
217 #endif
218 #else
219 Caml_state->stack_low = curr_thread->stack_low;
220 Caml_state->stack_high = curr_thread->stack_high;
221 Caml_state->stack_threshold = curr_thread->stack_threshold;
222 Caml_state->extern_sp = curr_thread->sp;
223 Caml_state->trapsp = curr_thread->trapsp;
224 Caml_state->external_raise = curr_thread->external_raise;
225 #endif
226 Caml_state->local_roots = curr_thread->local_roots;
227 Caml_state->backtrace_pos = curr_thread->backtrace_pos;
228 Caml_state->backtrace_buffer = curr_thread->backtrace_buffer;
229 Caml_state->backtrace_last_exn = curr_thread->backtrace_last_exn;
230 caml_memprof_suspended = curr_thread->memprof_suspended;
231 }
232
233 /* Hooks for caml_enter_blocking_section and caml_leave_blocking_section */
234
235
236 static void caml_thread_enter_blocking_section(void)
237 {
238 /* Save the current runtime state in the thread descriptor
239 of the current thread */
240 caml_thread_save_runtime_state();
241 /* Tell other threads that the runtime is free */
242 st_masterlock_release(&caml_master_lock);
243 }
244
245 static void caml_thread_leave_blocking_section(void)
246 {
247 /* Wait until the runtime is free */
248 st_masterlock_acquire(&caml_master_lock);
249 /* Update curr_thread to point to the thread descriptor corresponding
250 to the thread currently executing */
251 curr_thread = st_tls_get(thread_descriptor_key);
252 /* Restore the runtime state from the curr_thread descriptor */
253 caml_thread_restore_runtime_state();
254 }
255
256 static int caml_thread_try_leave_blocking_section(void)
257 {
258 /* Disable immediate processing of signals (PR#3659).
259 try_leave_blocking_section always fails, forcing the signal to be
260 recorded and processed at the next leave_blocking_section or
261 polling. */
262 return 0;
263 }
264
265 /* Hooks for I/O locking */
266
267 static void caml_io_mutex_free(struct channel *chan)
268 {
269 st_mutex mutex = chan->mutex;
270 if (mutex != NULL) {
271 st_mutex_destroy(mutex);
272 chan->mutex = NULL;
273 }
274 }
275
276 static void caml_io_mutex_lock(struct channel *chan)
277 {
278 st_mutex mutex = chan->mutex;
279
280 if (mutex == NULL) {
281 st_check_error(st_mutex_create(&mutex), "channel locking"); /*PR#7038*/
282 chan->mutex = mutex;
283 }
284 /* PR#4351: first try to acquire mutex without releasing the master lock */
285 if (st_mutex_trylock(mutex) == PREVIOUSLY_UNLOCKED) {
286 st_tls_set(last_channel_locked_key, (void *) chan);
287 return;
288 }
289 /* If unsuccessful, block on mutex */
290 caml_enter_blocking_section();
291 st_mutex_lock(mutex);
292 /* Problem: if a signal occurs at this point,
293 and the signal handler raises an exception, we will not
294 unlock the mutex. The alternative (doing the setspecific
295 before locking the mutex is also incorrect, since we could
296 then unlock a mutex that is unlocked or locked by someone else. */
297 st_tls_set(last_channel_locked_key, (void *) chan);
298 caml_leave_blocking_section();
299 }
300
301 static void caml_io_mutex_unlock(struct channel *chan)
302 {
303 st_mutex_unlock(chan->mutex);
304 st_tls_set(last_channel_locked_key, NULL);
305 }
306
307 static void caml_io_mutex_unlock_exn(void)
308 {
309 struct channel * chan = st_tls_get(last_channel_locked_key);
310 if (chan != NULL) caml_io_mutex_unlock(chan);
311 }
312
313 /* Hook for estimating stack usage */
314
315 static uintnat (*prev_stack_usage_hook)(void);
316
317 static uintnat caml_thread_stack_usage(void)
318 {
319 uintnat sz;
320 caml_thread_t th;
321
322 /* Don't add stack for current thread, this is done elsewhere */
323 for (sz = 0, th = curr_thread->next;
324 th != curr_thread;
325 th = th->next) {
326 #ifdef NATIVE_CODE
327 if(th->top_of_stack != NULL && th->bottom_of_stack != NULL &&
328 th->top_of_stack > th->bottom_of_stack)
329 sz += (value *) th->top_of_stack - (value *) th->bottom_of_stack;
330 #else
331 sz += th->stack_high - th->sp;
332 #endif
333 }
334 if (prev_stack_usage_hook != NULL)
335 sz += prev_stack_usage_hook();
336 return sz;
337 }
338
339 /* Create and setup a new thread info block.
340 This block has no associated thread descriptor and
341 is not inserted in the list of threads. */
342
343 static caml_thread_t caml_thread_new_info(void)
344 {
345 caml_thread_t th;
346 th = (caml_thread_t) caml_stat_alloc_noexc(sizeof(struct caml_thread_struct));
347 if (th == NULL) return NULL;
348 th->descr = Val_unit; /* filled later */
349 #ifdef NATIVE_CODE
350 th->bottom_of_stack = NULL;
351 th->top_of_stack = NULL;
352 th->last_retaddr = 1;
353 th->exception_pointer = NULL;
354 th->local_roots = NULL;
355 th->exit_buf = NULL;
356 #ifdef WITH_SPACETIME
357 /* CR-someday mshinwell: The commented-out changes here are for multicore,
358 where we think we should have one trie per domain. */
359 th->internal_spacetime_trie_root = Val_unit;
360 th->spacetime_trie_node_ptr =
361 &caml_spacetime_trie_root; /* &th->internal_spacetime_trie_root; */
362 th->internal_spacetime_finaliser_trie_root = Val_unit;
363 th->spacetime_finaliser_trie_root
364 = caml_spacetime_finaliser_trie_root;
365 /* &th->internal_spacetime_finaliser_trie_root; */
366 caml_spacetime_register_thread(
367 th->spacetime_trie_node_ptr,
368 th->spacetime_finaliser_trie_root);
369 #endif
370 #else
371 /* Allocate the stacks */
372 th->stack_low = (value *) caml_stat_alloc(Thread_stack_size);
373 th->stack_high = th->stack_low + Thread_stack_size / sizeof(value);
374 th->stack_threshold = th->stack_low + Stack_threshold / sizeof(value);
375 th->sp = th->stack_high;
376 th->trapsp = th->stack_high;
377 th->local_roots = NULL;
378 th->external_raise = NULL;
379 #endif
380 th->backtrace_pos = 0;
381 th->backtrace_buffer = NULL;
382 th->backtrace_last_exn = Val_unit;
383 th->memprof_suspended = 0;
384 return th;
385 }
386
387 /* Allocate a thread descriptor block. */
388
389 static value caml_thread_new_descriptor(value clos)
390 {
391 value mu = Val_unit;
392 value descr;
393 Begin_roots2 (clos, mu)
394 /* Create and initialize the termination semaphore */
395 mu = caml_threadstatus_new();
396 /* Create a descriptor for the new thread */
397 descr = caml_alloc_small(3, 0);
398 Ident(descr) = Val_long(thread_next_ident);
399 Start_closure(descr) = clos;
400 Terminated(descr) = mu;
401 thread_next_ident++;
402 End_roots();
403 return descr;
404 }
405
406 /* Remove a thread info block from the list of threads.
407 Free it and its stack resources. */
408
409 static void caml_thread_remove_info(caml_thread_t th)
410 {
411 if (th->next == th)
412 all_threads = NULL; /* last OCaml thread exiting */
413 else if (all_threads == th)
414 all_threads = th->next; /* PR#5295 */
415 th->next->prev = th->prev;
416 th->prev->next = th->next;
417 #ifndef NATIVE_CODE
418 caml_stat_free(th->stack_low);
419 #endif
420 if (th->backtrace_buffer != NULL) caml_stat_free(th->backtrace_buffer);
421 #ifndef WITH_SPACETIME
422 caml_stat_free(th);
423 /* CR-soon mshinwell: consider what to do about the Spacetime trace. Could
424 perhaps have a hook to save a snapshot on thread termination.
425 For the moment we can't even free [th], since it contains the trie
426 roots. */
427 #endif
428 }
429
430 /* Reinitialize the thread machinery after a fork() (PR#4577) */
431
432 static void caml_thread_reinitialize(void)
433 {
434 caml_thread_t thr, next;
435 struct channel * chan;
436
437 /* Remove all other threads (now nonexistent)
438 from the doubly-linked list of threads */
439 thr = curr_thread->next;
440 while (thr != curr_thread) {
441 next = thr->next;
442 caml_stat_free(thr);
443 thr = next;
444 }
445 curr_thread->next = curr_thread;
446 curr_thread->prev = curr_thread;
447 all_threads = curr_thread;
448 /* Reinitialize the master lock machinery,
449 just in case the fork happened while other threads were doing
450 caml_leave_blocking_section */
451 st_masterlock_init(&caml_master_lock);
452 /* Tick thread is not currently running in child process, will be
453 re-created at next Thread.create */
454 caml_tick_thread_running = 0;
455 /* Destroy all IO mutexes; will be reinitialized on demand */
456 for (chan = caml_all_opened_channels;
457 chan != NULL;
458 chan = chan->next) {
459 if (chan->mutex != NULL) {
460 st_mutex_destroy(chan->mutex);
461 chan->mutex = NULL;
462 }
463 }
464 }
465
466 /* Initialize the thread machinery */
467
468 CAMLprim value caml_thread_initialize(value unit) /* ML */
469 {
470 /* Protect against repeated initialization (PR#3532) */
471 if (curr_thread != NULL) return Val_unit;
472 /* OS-specific initialization */
473 st_initialize();
474 /* Initialize and acquire the master lock */
475 st_masterlock_init(&caml_master_lock);
476 /* Initialize the keys */
477 st_tls_newkey(&thread_descriptor_key);
478 st_tls_newkey(&last_channel_locked_key);
479 /* Set up a thread info block for the current thread */
480 curr_thread =
481 (caml_thread_t) caml_stat_alloc(sizeof(struct caml_thread_struct));
482 curr_thread->descr = caml_thread_new_descriptor(Val_unit);
483 curr_thread->next = curr_thread;
484 curr_thread->prev = curr_thread;
485 all_threads = curr_thread;
486 curr_thread->backtrace_last_exn = Val_unit;
487 #ifdef NATIVE_CODE
488 curr_thread->exit_buf = &caml_termination_jmpbuf;
489 #endif
490 /* The stack-related fields will be filled in at the next
491 caml_enter_blocking_section */
492 /* Associate the thread descriptor with the thread */
493 st_tls_set(thread_descriptor_key, (void *) curr_thread);
494 /* Set up the hooks */
495 prev_scan_roots_hook = caml_scan_roots_hook;
496 caml_scan_roots_hook = caml_thread_scan_roots;
497 caml_enter_blocking_section_hook = caml_thread_enter_blocking_section;
498 caml_leave_blocking_section_hook = caml_thread_leave_blocking_section;
499 caml_try_leave_blocking_section_hook = caml_thread_try_leave_blocking_section;
500 #ifdef NATIVE_CODE
501 caml_termination_hook = st_thread_exit;
502 #endif
503 caml_channel_mutex_free = caml_io_mutex_free;
504 caml_channel_mutex_lock = caml_io_mutex_lock;
505 caml_channel_mutex_unlock = caml_io_mutex_unlock;
506 caml_channel_mutex_unlock_exn = caml_io_mutex_unlock_exn;
507 prev_stack_usage_hook = caml_stack_usage_hook;
508 caml_stack_usage_hook = caml_thread_stack_usage;
509 /* Set up fork() to reinitialize the thread machinery in the child
510 (PR#4577) */
511 st_atfork(caml_thread_reinitialize);
512 return Val_unit;
513 }
514
515 /* Cleanup the thread machinery when the runtime is shut down. Joining the tick
516 thread take 25ms on average / 50ms in the worst case, so we don't do it on
517 program exit. */
518
519 CAMLprim value caml_thread_cleanup(value unit) /* ML */
520 {
521 if (caml_tick_thread_running){
522 caml_tick_thread_stop = 1;
523 st_thread_join(caml_tick_thread_id);
524 caml_tick_thread_stop = 0;
525 caml_tick_thread_running = 0;
526 }
527 return Val_unit;
528 }
529
530 /* Thread cleanup at termination */
531
532 static void caml_thread_stop(void)
533 {
534 /* PR#5188, PR#7220: some of the global runtime state may have
535 changed as the thread was running, so we save it in the
536 curr_thread data to make sure that the cleanup logic
537 below uses accurate information. */
538 caml_thread_save_runtime_state();
539 /* Signal that the thread has terminated */
540 caml_threadstatus_terminate(Terminated(curr_thread->descr));
541 /* Remove th from the doubly-linked list of threads and free its info block */
542 caml_thread_remove_info(curr_thread);
543 /* OS-specific cleanups */
544 st_thread_cleanup();
545 /* Release the runtime system */
546 st_masterlock_release(&caml_master_lock);
547 }
548
549 /* Create a thread */
550
551 static ST_THREAD_FUNCTION caml_thread_start(void * arg)
552 {
553 caml_thread_t th = (caml_thread_t) arg;
554 value clos;
555 #ifdef NATIVE_CODE
556 struct longjmp_buffer termination_buf;
557 char tos;
558 /* Record top of stack (approximative) */
559 th->top_of_stack = &tos;
560 #endif
561
562 /* Associate the thread descriptor with the thread */
563 st_tls_set(thread_descriptor_key, (void *) th);
564 /* Acquire the global mutex */
565 caml_leave_blocking_section();
566 caml_setup_stack_overflow_detection();
567 #ifdef NATIVE_CODE
568 /* Setup termination handler (for caml_thread_exit) */
569 if (sigsetjmp(termination_buf.buf, 0) == 0) {
570 th->exit_buf = &termination_buf;
571 #endif
572 /* Callback the closure */
573 clos = Start_closure(th->descr);
574 caml_modify(&(Start_closure(th->descr)), Val_unit);
575 caml_callback_exn(clos, Val_unit);
576 caml_thread_stop();
577 #ifdef NATIVE_CODE
578 }
579 #endif
580 /* The thread now stops running */
581 return 0;
582 }
583
584 CAMLprim value caml_thread_new(value clos) /* ML */
585 {
586 caml_thread_t th;
587 st_retcode err;
588
589 /* Create a thread info block */
590 th = caml_thread_new_info();
591 if (th == NULL) caml_raise_out_of_memory();
592 /* Equip it with a thread descriptor */
593 th->descr = caml_thread_new_descriptor(clos);
594 /* Add thread info block to the list of threads */
595 th->next = curr_thread->next;
596 th->prev = curr_thread;
597 curr_thread->next->prev = th;
598 curr_thread->next = th;
599 /* Create the new thread */
600 err = st_thread_create(NULL, caml_thread_start, (void *) th);
601 if (err != 0) {
602 /* Creation failed, remove thread info block from list of threads */
603 caml_thread_remove_info(th);
604 st_check_error(err, "Thread.create");
605 }
606 /* Create the tick thread if not already done.
607 Because of PR#4666, we start the tick thread late, only when we create
608 the first additional thread in the current process*/
609 if (! caml_tick_thread_running) {
610 err = st_thread_create(&caml_tick_thread_id, caml_thread_tick, NULL);
611 st_check_error(err, "Thread.create");
612 caml_tick_thread_running = 1;
613 }
614 return th->descr;
615 }
616
617 /* Register a thread already created from C */
618
619 CAMLexport int caml_c_thread_register(void)
620 {
621 caml_thread_t th;
622 st_retcode err;
623
624 /* Already registered? */
625 if (st_tls_get(thread_descriptor_key) != NULL) return 0;
626 /* Create a thread info block */
627 th = caml_thread_new_info();
628 if (th == NULL) return 0;
629 #ifdef NATIVE_CODE
630 th->top_of_stack = (char *) &err;
631 #endif
632 /* Take master lock to protect access to the chaining of threads */
633 st_masterlock_acquire(&caml_master_lock);
634 /* Add thread info block to the list of threads */
635 if (all_threads == NULL) {
636 th->next = th;
637 th->prev = th;
638 all_threads = th;
639 } else {
640 th->next = all_threads->next;
641 th->prev = all_threads;
642 all_threads->next->prev = th;
643 all_threads->next = th;
644 }
645 /* Associate the thread descriptor with the thread */
646 st_tls_set(thread_descriptor_key, (void *) th);
647 /* Release the master lock */
648 st_masterlock_release(&caml_master_lock);
649 /* Now we can re-enter the run-time system and heap-allocate the descriptor */
650 caml_leave_blocking_section();
651 th->descr = caml_thread_new_descriptor(Val_unit); /* no closure */
652 /* Create the tick thread if not already done. */
653 if (! caml_tick_thread_running) {
654 err = st_thread_create(&caml_tick_thread_id, caml_thread_tick, NULL);
655 if (err == 0) caml_tick_thread_running = 1;
656 }
657 /* Exit the run-time system */
658 caml_enter_blocking_section();
659 return 1;
660 }
661
662 /* Unregister a thread that was created from C and registered with
663 the function above */
664
665 CAMLexport int caml_c_thread_unregister(void)
666 {
667 caml_thread_t th = st_tls_get(thread_descriptor_key);
668 /* Not registered? */
669 if (th == NULL) return 0;
670 /* Wait until the runtime is available */
671 st_masterlock_acquire(&caml_master_lock);
672 /* Forget the thread descriptor */
673 st_tls_set(thread_descriptor_key, NULL);
674 /* Remove thread info block from list of threads, and free it */
675 caml_thread_remove_info(th);
676 /* Release the runtime */
677 st_masterlock_release(&caml_master_lock);
678 return 1;
679 }
680
681 /* Return the current thread */
682
683 CAMLprim value caml_thread_self(value unit) /* ML */
684 {
685 if (curr_thread == NULL)
686 caml_invalid_argument("Thread.self: not initialized");
687 return curr_thread->descr;
688 }
689
690 /* Return the identifier of a thread */
691
692 CAMLprim value caml_thread_id(value th) /* ML */
693 {
694 return Ident(th);
695 }
696
697 /* Print uncaught exception and backtrace */
698
699 CAMLprim value caml_thread_uncaught_exception(value exn) /* ML */
700 {
701 char * msg = caml_format_exception(exn);
702 fprintf(stderr, "Thread %d killed on uncaught exception %s\n",
703 Int_val(Ident(curr_thread->descr)), msg);
704 caml_stat_free(msg);
705 if (Caml_state->backtrace_active) caml_print_exception_backtrace();
706 fflush(stderr);
707 return Val_unit;
708 }
709
710 /* Terminate current thread */
711
712 CAMLprim value caml_thread_exit(value unit) /* ML */
713 {
714 struct longjmp_buffer * exit_buf = NULL;
715
716 if (curr_thread == NULL)
717 caml_invalid_argument("Thread.exit: not initialized");
718
719 /* In native code, we cannot call pthread_exit here because on some
720 systems this raises a C++ exception, and ocamlopt-generated stack
721 frames cannot be unwound. Instead, we longjmp to the thread
722 creation point (in caml_thread_start) or to the point in
723 caml_main where caml_termination_hook will be called.
724 Note that threads created in C then registered do not have
725 a creation point (exit_buf == NULL).
726 */
727 #ifdef NATIVE_CODE
728 exit_buf = curr_thread->exit_buf;
729 #endif
730 caml_thread_stop();
731 if (exit_buf != NULL) {
732 /* Native-code and (main thread or thread created by OCaml) */
733 siglongjmp(exit_buf->buf, 1);
734 } else {
735 /* Bytecode, or thread created from C */
736 st_thread_exit();
737 }
738 return Val_unit; /* not reached */
739 }
740
741 /* Allow re-scheduling */
742
743 CAMLprim value caml_thread_yield(value unit) /* ML */
744 {
745 if (st_masterlock_waiters(&caml_master_lock) == 0) return Val_unit;
746
747 /* Do all the parts of a blocking section enter/leave except lock
748 manipulation, which we'll do more efficiently in st_thread_yield. (Since
749 our blocking section doesn't contain anything interesting, don't bother
750 with saving errno.)
751 */
752 caml_raise_if_exception(caml_process_pending_signals_exn());
753 caml_thread_save_runtime_state();
754 st_thread_yield(&caml_master_lock);
755 curr_thread = st_tls_get(thread_descriptor_key);
756 caml_thread_restore_runtime_state();
757 caml_raise_if_exception(caml_process_pending_signals_exn());
758
759 return Val_unit;
760 }
761
762 /* Suspend the current thread until another thread terminates */
763
764 CAMLprim value caml_thread_join(value th) /* ML */
765 {
766 st_retcode rc = caml_threadstatus_wait(Terminated(th));
767 st_check_error(rc, "Thread.join");
768 return Val_unit;
769 }
770
771 /* Mutex operations */
772
773 #define Mutex_val(v) (* ((st_mutex *) Data_custom_val(v)))
774
775 static void caml_mutex_finalize(value wrapper)
776 {
777 st_mutex_destroy(Mutex_val(wrapper));
778 }
779
780 static int caml_mutex_compare(value wrapper1, value wrapper2)
781 {
782 st_mutex mut1 = Mutex_val(wrapper1);
783 st_mutex mut2 = Mutex_val(wrapper2);
784 return mut1 == mut2 ? 0 : mut1 < mut2 ? -1 : 1;
785 }
786
787 static intnat caml_mutex_hash(value wrapper)
788 {
789 return (intnat) (Mutex_val(wrapper));
790 }
791
792 static struct custom_operations caml_mutex_ops = {
793 "_mutex",
794 caml_mutex_finalize,
795 caml_mutex_compare,
796 caml_mutex_hash,
797 custom_serialize_default,
798 custom_deserialize_default,
799 custom_compare_ext_default,
800 custom_fixed_length_default
801 };
802
803 CAMLprim value caml_mutex_new(value unit) /* ML */
804 {
805 st_mutex mut = NULL; /* suppress warning */
806 value wrapper;
807 st_check_error(st_mutex_create(&mut), "Mutex.create");
808 wrapper = caml_alloc_custom(&caml_mutex_ops, sizeof(st_mutex *),
809 0, 1);
810 Mutex_val(wrapper) = mut;
811 return wrapper;
812 }
813
814 CAMLprim value caml_mutex_lock(value wrapper) /* ML */
815 {
816 st_mutex mut = Mutex_val(wrapper);
817 st_retcode retcode;
818
819 /* PR#4351: first try to acquire mutex without releasing the master lock */
820 if (st_mutex_trylock(mut) == PREVIOUSLY_UNLOCKED) return Val_unit;
821 /* If unsuccessful, block on mutex */
822 Begin_root(wrapper) /* prevent the deallocation of mutex */
823 caml_enter_blocking_section();
824 retcode = st_mutex_lock(mut);
825 caml_leave_blocking_section();
826 End_roots();
827 st_check_error(retcode, "Mutex.lock");
828 return Val_unit;
829 }
830
831 CAMLprim value caml_mutex_unlock(value wrapper) /* ML */
832 {
833 st_mutex mut = Mutex_val(wrapper);
834 st_retcode retcode;
835 /* PR#4351: no need to release and reacquire master lock */
836 retcode = st_mutex_unlock(mut);
837 st_check_error(retcode, "Mutex.unlock");
838 return Val_unit;
839 }
840
841 CAMLprim value caml_mutex_try_lock(value wrapper) /* ML */
842 {
843 st_mutex mut = Mutex_val(wrapper);
844 st_retcode retcode;
845 retcode = st_mutex_trylock(mut);
846 if (retcode == ALREADY_LOCKED) return Val_false;
847 st_check_error(retcode, "Mutex.try_lock");
848 return Val_true;
849 }
850
851 /* Conditions operations */
852
853 #define Condition_val(v) (* (st_condvar *) Data_custom_val(v))
854
855 static void caml_condition_finalize(value wrapper)
856 {
857 st_condvar_destroy(Condition_val(wrapper));
858 }
859
860 static int caml_condition_compare(value wrapper1, value wrapper2)
861 {
862 st_condvar cond1 = Condition_val(wrapper1);
863 st_condvar cond2 = Condition_val(wrapper2);
864 return cond1 == cond2 ? 0 : cond1 < cond2 ? -1 : 1;
865 }
866
867 static intnat caml_condition_hash(value wrapper)
868 {
869 return (intnat) (Condition_val(wrapper));
870 }
871
872 static struct custom_operations caml_condition_ops = {
873 "_condition",
874 caml_condition_finalize,
875 caml_condition_compare,
876 caml_condition_hash,
877 custom_serialize_default,
878 custom_deserialize_default,
879 custom_compare_ext_default,
880 custom_fixed_length_default
881 };
882
883 CAMLprim value caml_condition_new(value unit) /* ML */
884 {
885 st_condvar cond = NULL; /* suppress warning */
886 value wrapper;
887 st_check_error(st_condvar_create(&cond), "Condition.create");
888 wrapper = caml_alloc_custom(&caml_condition_ops, sizeof(st_condvar *),
889 0, 1);
890 Condition_val(wrapper) = cond;
891 return wrapper;
892 }
893
894 CAMLprim value caml_condition_wait(value wcond, value wmut) /* ML */
895 {
896 st_condvar cond = Condition_val(wcond);
897 st_mutex mut = Mutex_val(wmut);
898 st_retcode retcode;
899
900 Begin_roots2(wcond, wmut) /* prevent deallocation of cond and mutex */
901 caml_enter_blocking_section();
902 retcode = st_condvar_wait(cond, mut);
903 caml_leave_blocking_section();
904 End_roots();
905 st_check_error(retcode, "Condition.wait");
906 return Val_unit;
907 }
908
909 CAMLprim value caml_condition_signal(value wrapper) /* ML */
910 {
911 st_check_error(st_condvar_signal(Condition_val(wrapper)),
912 "Condition.signal");
913 return Val_unit;
914 }
915
916 CAMLprim value caml_condition_broadcast(value wrapper) /* ML */
917 {
918 st_check_error(st_condvar_broadcast(Condition_val(wrapper)),
919 "Condition.broadcast");
920 return Val_unit;
921 }
922
923 /* Thread status blocks */
924
925 #define Threadstatus_val(v) (* ((st_event *) Data_custom_val(v)))
926
927 static void caml_threadstatus_finalize(value wrapper)
928 {
929 st_event_destroy(Threadstatus_val(wrapper));
930 }
931
932 static int caml_threadstatus_compare(value wrapper1, value wrapper2)
933 {
934 st_event ts1 = Threadstatus_val(wrapper1);
935 st_event ts2 = Threadstatus_val(wrapper2);
936 return ts1 == ts2 ? 0 : ts1 < ts2 ? -1 : 1;
937 }
938
939 static struct custom_operations caml_threadstatus_ops = {
940 "_threadstatus",
941 caml_threadstatus_finalize,
942 caml_threadstatus_compare,
943 custom_hash_default,
944 custom_serialize_default,
945 custom_deserialize_default,
946 custom_compare_ext_default,
947 custom_fixed_length_default
948 };
949
950 static value caml_threadstatus_new (void)
951 {
952 st_event ts = NULL; /* suppress warning */
953 value wrapper;
954 st_check_error(st_event_create(&ts), "Thread.create");
955 wrapper = caml_alloc_custom(&caml_threadstatus_ops, sizeof(st_event *),
956 0, 1);
957 Threadstatus_val(wrapper) = ts;
958 return wrapper;
959 }
960
961 static void caml_threadstatus_terminate (value wrapper)
962 {
963 st_event_trigger(Threadstatus_val(wrapper));
964 }
965
966 static st_retcode caml_threadstatus_wait (value wrapper)
967 {
968 st_event ts = Threadstatus_val(wrapper);
969 st_retcode retcode;
970
971 Begin_roots1(wrapper) /* prevent deallocation of ts */
972 caml_enter_blocking_section();
973 retcode = st_event_wait(ts);
974 caml_leave_blocking_section();
975 End_roots();
976 return retcode;
977 }
978