Author: Armin Rigo <[email protected]>
Branch: stmgc-c7
Changeset: r73767:d0263e2a9370
Date: 2014-10-04 17:42 +0200
http://bitbucket.org/pypy/pypy/changeset/d0263e2a9370/
Log: import stmgc/8f88cdb1d916
diff --git a/rpython/translator/stm/src_stm/revision
b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-83e4c655d31b
+8f88cdb1d916
diff --git a/rpython/translator/stm/src_stm/stm/contention.c
b/rpython/translator/stm/src_stm/stm/contention.c
--- a/rpython/translator/stm/src_stm/stm/contention.c
+++ b/rpython/translator/stm/src_stm/stm/contention.c
@@ -4,34 +4,50 @@
#endif
-enum contention_kind_e {
+/* Here are the possible kinds of contention:
- /* A write-write contention occurs when we running our transaction
- and detect that we are about to write to an object that another
- thread is also writing to. This kind of contention must be
- resolved before continuing. This *must* abort one of the two
- threads: the caller's thread is not at a safe-point, so cannot
- wait! */
- WRITE_WRITE_CONTENTION,
+ STM_CONTENTION_WRITE_WRITE
- /* A write-read contention occurs when we are trying to commit: it
+ A write-write contention occurs when we are running our
+ transaction and detect that we are about to write to an object
+ that another thread is also writing to. This kind of
+ contention must be resolved before continuing. This *must*
+ abort one of the two threads: the caller's thread is not at a
+ safe-point, so cannot wait!
+
+ It is reported as a timing event with the following two markers:
+ the current thread (i.e. where the second-in-time write occurs);
+ and the other thread (from its 'modified_old_objects_markers',
+ where the first-in-time write occurred).
+
+ STM_CONTENTION_WRITE_READ
+
+ A write-read contention occurs when we are trying to commit: it
means that an object we wrote to was also read by another
transaction. Even though it would seem obvious that we should
just abort the other thread and proceed in our commit, a more
subtle answer would be in some cases to wait for the other thread
to commit first. It would commit having read the old value, and
- then we can commit our change to it. */
- WRITE_READ_CONTENTION,
+ then we can commit our change to it.
- /* An inevitable contention occurs when we're trying to become
+ It is reported as a timing event with only one marker: the
+ older location of the write that was done by the current thread.
+
+ STM_CONTENTION_INEVITABLE
+
+ An inevitable contention occurs when we're trying to become
inevitable but another thread already is. We can never abort the
other thread in this case, but we still have the choice to abort
- ourselves or pause until the other thread commits. */
- INEVITABLE_CONTENTION,
-};
+ ourselves or pause until the other thread commits.
+
+ It is reported with two markers, one for the current thread and
+ one for the other thread. Each marker gives the location that
+ attempts to make the transaction inevitable.
+*/
+
struct contmgr_s {
- enum contention_kind_e kind;
+ enum stm_event_e kind;
struct stm_priv_segment_info_s *other_pseg;
bool abort_other;
bool try_sleep; // XXX add a way to timeout, but should handle repeated
@@ -100,7 +116,7 @@
static bool contention_management(uint8_t other_segment_num,
- enum contention_kind_e kind,
+ enum stm_event_e kind,
object_t *obj)
{
assert(_has_mutex());
@@ -110,6 +126,9 @@
if (must_abort())
abort_with_mutex();
+ /* Report the contention */
+ timing_contention(kind, other_segment_num, obj);
+
/* Who should abort here: this thread, or the other thread? */
struct contmgr_s contmgr;
contmgr.kind = kind;
@@ -139,20 +158,9 @@
contmgr.abort_other = false;
}
-
- int wait_category =
- kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ :
- kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE :
- STM_TIME_WAIT_OTHER;
-
- int abort_category =
- kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE :
- kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ :
- kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE :
- STM_TIME_RUN_ABORTED_OTHER;
-
-
- if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION &&
+ /* Do one of three things here...
+ */
+ if (contmgr.try_sleep && kind != STM_CONTENTION_WRITE_WRITE &&
contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) {
others_may_have_run = true;
/* Sleep.
@@ -165,14 +173,12 @@
itself already paused here.
*/
contmgr.other_pseg->signal_when_done = true;
- marker_contention(kind, false, other_segment_num, obj);
-
- change_timing_state(wait_category);
/* tell the other to commit ASAP */
signal_other_to_commit_soon(contmgr.other_pseg);
dprintf(("pausing...\n"));
+
cond_signal(C_AT_SAFE_POINT);
STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE;
cond_wait(C_TRANSACTION_DONE);
@@ -181,14 +187,6 @@
if (must_abort())
abort_with_mutex();
-
- struct stm_priv_segment_info_s *pseg =
- get_priv_segment(STM_SEGMENT->segment_num);
- double elapsed =
- change_timing_state_tl(pseg->pub.running_thread,
- STM_TIME_RUN_CURRENT);
- marker_copy(pseg->pub.running_thread, pseg,
- wait_category, elapsed);
}
else if (!contmgr.abort_other) {
@@ -196,16 +194,13 @@
signal_other_to_commit_soon(contmgr.other_pseg);
dprintf(("abort in contention: kind %d\n", kind));
- STM_SEGMENT->nursery_end = abort_category;
- marker_contention(kind, false, other_segment_num, obj);
abort_with_mutex();
}
else {
/* We have to signal the other thread to abort, and wait until
it does. */
- contmgr.other_pseg->pub.nursery_end = abort_category;
- marker_contention(kind, true, other_segment_num, obj);
+ contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT;
int sp = contmgr.other_pseg->safe_point;
switch (sp) {
@@ -297,7 +292,8 @@
assert(get_priv_segment(other_segment_num)->write_lock_num ==
prev_owner);
- contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj);
+ contention_management(other_segment_num,
+ STM_CONTENTION_WRITE_WRITE, obj);
/* now we return into _stm_write_slowpath() and will try again
to acquire the write lock on our object. */
@@ -309,10 +305,12 @@
static bool write_read_contention_management(uint8_t other_segment_num,
object_t *obj)
{
- return contention_management(other_segment_num, WRITE_READ_CONTENTION,
obj);
+ return contention_management(other_segment_num,
+ STM_CONTENTION_WRITE_READ, obj);
}
static void inevitable_contention_management(uint8_t other_segment_num)
{
- contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL);
+ contention_management(other_segment_num,
+ STM_CONTENTION_INEVITABLE, NULL);
}
diff --git a/rpython/translator/stm/src_stm/stm/core.c
b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -125,17 +125,13 @@
dprintf_test(("write_slowpath %p -> mod_old\n", obj));
- /* First change to this old object from this transaction.
+ /* Add the current marker, recording where we wrote to this object */
+ timing_record_write();
+
+ /* Change to this old object from this transaction.
Add it to the list 'modified_old_objects'. */
LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj);
- /* Add the current marker, recording where we wrote to this object */
- uintptr_t marker[2];
- marker_fetch(STM_SEGMENT->running_thread, marker);
- STM_PSEGMENT->modified_old_objects_markers =
- list_append2(STM_PSEGMENT->modified_old_objects_markers,
- marker[0], marker[1]);
-
release_marker_lock(STM_SEGMENT->segment_base);
/* We need to privatize the pages containing the object, if they
@@ -329,29 +325,24 @@
STM_SEGMENT->transaction_read_version = 1;
}
-static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable)
+static uint64_t _global_start_time = 0;
+
+static void _stm_start_transaction(stm_thread_local_t *tl)
{
assert(!_stm_in_transaction(tl));
- retry:
- if (inevitable) {
- wait_for_end_of_inevitable_transaction(tl);
- }
-
- if (!acquire_thread_segment(tl))
- goto retry;
+ while (!acquire_thread_segment(tl))
+ ;
/* GS invalid before this point! */
assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION);
assert(STM_PSEGMENT->transaction_state == TS_NONE);
- change_timing_state(STM_TIME_RUN_CURRENT);
- STM_PSEGMENT->start_time = tl->_timing_cur_start;
+ timing_event(tl, STM_TRANSACTION_START);
+ STM_PSEGMENT->start_time = _global_start_time++;
STM_PSEGMENT->signalled_to_commit_soon = false;
STM_PSEGMENT->safe_point = SP_RUNNING;
- STM_PSEGMENT->marker_inev[1] = 0;
- if (inevitable)
- marker_fetch_inev();
- STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE :
TS_REGULAR);
+ STM_PSEGMENT->marker_inev.object = NULL;
+ STM_PSEGMENT->transaction_state = TS_REGULAR;
#ifndef NDEBUG
STM_PSEGMENT->running_pthread = pthread_self();
#endif
@@ -400,14 +391,16 @@
#else
long repeat_count = stm_rewind_jmp_setjmp(tl);
#endif
- _stm_start_transaction(tl, false);
+ _stm_start_transaction(tl);
return repeat_count;
}
void stm_start_inevitable_transaction(stm_thread_local_t *tl)
{
- s_mutex_lock();
- _stm_start_transaction(tl, true);
+ /* used to be more efficient, starting directly an inevitable transaction,
+ but there is no real point any more, I believe */
+ stm_start_transaction(tl);
+ stm_become_inevitable(tl, "start_inevitable_transaction");
}
@@ -450,7 +443,10 @@
return true;
}
/* we aborted the other transaction without waiting, so
- we can just continue */
+ we can just break out of this loop on
+ modified_old_objects and continue with the next
+ segment */
+ break;
}
}));
}
@@ -784,13 +780,13 @@
list_clear(STM_PSEGMENT->modified_old_objects_markers);
}
-static void _finish_transaction(int attribute_to)
+static void _finish_transaction(enum stm_event_e event)
{
STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
STM_PSEGMENT->transaction_state = TS_NONE;
/* marker_inev is not needed anymore */
- STM_PSEGMENT->marker_inev[1] = 0;
+ STM_PSEGMENT->marker_inev.object = NULL;
/* reset these lists to NULL for the next transaction */
_verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
@@ -798,9 +794,9 @@
list_clear(STM_PSEGMENT->old_objects_with_cards);
LIST_FREE(STM_PSEGMENT->large_overflow_objects);
- timing_end_transaction(attribute_to);
+ stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+ timing_event(tl, event);
- stm_thread_local_t *tl = STM_SEGMENT->running_thread;
release_thread_segment(tl);
/* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
}
@@ -813,9 +809,6 @@
minor_collection(/*commit=*/ true);
- /* the call to minor_collection() above leaves us with
- STM_TIME_BOOKKEEPING */
-
/* synchronize overflow objects living in privatized pages */
push_overflow_objects_from_privatized_pages();
@@ -839,9 +832,9 @@
/* if a major collection is required, do it here */
if (is_major_collection_requested()) {
- int oldstate = change_timing_state(STM_TIME_MAJOR_GC);
+ timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START);
major_collection_now_at_safe_point();
- change_timing_state(oldstate);
+ timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE);
}
/* synchronize modified old objects to other threads */
@@ -868,7 +861,7 @@
}
/* done */
- _finish_transaction(STM_TIME_RUN_COMMITTED);
+ _finish_transaction(STM_TRANSACTION_COMMIT);
/* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
s_mutex_unlock();
@@ -961,10 +954,6 @@
(int)pseg->transaction_state);
}
- /* if we don't have marker information already, look up and preserve
- the marker information from the shadowstack as a string */
- marker_default_for_abort(pseg);
-
/* throw away the content of the nursery */
long bytes_in_nursery = throw_away_nursery(pseg);
@@ -1053,16 +1042,13 @@
/* invoke the callbacks */
invoke_and_clear_user_callbacks(1); /* for abort */
- int attribute_to = STM_TIME_RUN_ABORTED_OTHER;
-
if (is_abort(STM_SEGMENT->nursery_end)) {
/* done aborting */
- attribute_to = STM_SEGMENT->nursery_end;
STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE
: NURSERY_END;
}
- _finish_transaction(attribute_to);
+ _finish_transaction(STM_TRANSACTION_ABORT);
/* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
/* Broadcast C_ABORTED to wake up contention.c */
@@ -1104,8 +1090,8 @@
if (STM_PSEGMENT->transaction_state == TS_REGULAR) {
dprintf(("become_inevitable: %s\n", msg));
- marker_fetch_inev();
- wait_for_end_of_inevitable_transaction(NULL);
+ timing_fetch_inev();
+ wait_for_end_of_inevitable_transaction();
STM_PSEGMENT->transaction_state = TS_INEVITABLE;
stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
invoke_and_clear_user_callbacks(0); /* for commit */
diff --git a/rpython/translator/stm/src_stm/stm/core.h
b/rpython/translator/stm/src_stm/stm/core.h
--- a/rpython/translator/stm/src_stm/stm/core.h
+++ b/rpython/translator/stm/src_stm/stm/core.h
@@ -139,7 +139,7 @@
/* Start time: to know approximately for how long a transaction has
been running, in contention management */
- double start_time;
+ uint64_t start_time;
/* This is the number stored in the overflowed objects (a multiple of
GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the
@@ -197,10 +197,8 @@
pthread_t running_pthread;
#endif
- /* Temporarily stores the marker information */
- char marker_self[_STM_MARKER_LEN];
- char marker_other[_STM_MARKER_LEN];
- uintptr_t marker_inev[2]; /* marker where this thread became inevitable */
+ /* marker where this thread became inevitable */
+ stm_loc_marker_t marker_inev;
};
enum /* safe_point */ {
diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c
b/rpython/translator/stm/src_stm/stm/forksupport.c
--- a/rpython/translator/stm/src_stm/stm/forksupport.c
+++ b/rpython/translator/stm/src_stm/stm/forksupport.c
@@ -56,14 +56,12 @@
s_mutex_unlock();
bool was_in_transaction = _stm_in_transaction(this_tl);
- if (was_in_transaction) {
- stm_become_inevitable(this_tl, "fork");
- /* Note that the line above can still fail and abort, which should
- be fine */
- }
- else {
- stm_start_inevitable_transaction(this_tl);
- }
+ if (!was_in_transaction)
+ stm_start_transaction(this_tl);
+
+ stm_become_inevitable(this_tl, "fork");
+ /* Note that the line above can still fail and abort, which should
+ be fine */
s_mutex_lock();
synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
@@ -188,7 +186,6 @@
#ifndef NDEBUG
pr->running_pthread = pthread_self();
#endif
- strcpy(pr->marker_self, "fork");
tl->shadowstack = NULL;
pr->shadowstack_at_start_of_transaction = NULL;
stm_rewind_jmp_forget(tl);
@@ -205,6 +202,9 @@
just release these locks early */
s_mutex_unlock();
+ /* Open a new profiling file, if any */
+ forksupport_open_new_profiling_file();
+
/* Move the copy of the mmap over the old one, overwriting it
and thus freeing the old mapping in this process
*/
diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c
b/rpython/translator/stm/src_stm/stm/gcpage.c
--- a/rpython/translator/stm/src_stm/stm/gcpage.c
+++ b/rpython/translator/stm/src_stm/stm/gcpage.c
@@ -142,7 +142,7 @@
if (is_major_collection_requested()) { /* if still true */
- int oldstate = change_timing_state(STM_TIME_MAJOR_GC);
+ timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START);
synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
@@ -150,7 +150,7 @@
major_collection_now_at_safe_point();
}
- change_timing_state(oldstate);
+ timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE);
}
s_mutex_unlock();
@@ -447,9 +447,9 @@
for (i = list_count(lst); i > 0; i -= 2) {
mark_visit_object((object_t *)list_item(lst, i - 1), base);
}
- if (get_priv_segment(j)->marker_inev[1]) {
- uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1];
- mark_visit_object((object_t *)marker_inev_obj, base);
+ if (get_priv_segment(j)->marker_inev.segment_base) {
+ object_t *marker_inev_obj =
get_priv_segment(j)->marker_inev.object;
+ mark_visit_object(marker_inev_obj, base);
}
}
}
diff --git a/rpython/translator/stm/src_stm/stm/marker.c
b/rpython/translator/stm/src_stm/stm/marker.c
--- a/rpython/translator/stm/src_stm/stm/marker.c
+++ b/rpython/translator/stm/src_stm/stm/marker.c
@@ -4,18 +4,11 @@
#endif
-void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number,
- object_t *following_object,
- char *outputbuf, size_t outputbufsize);
-
-void (*stmcb_debug_print)(const char *cause, double time,
- const char *marker);
-
-
-static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2])
+static void marker_fetch(stm_loc_marker_t *out_marker)
{
- /* fetch the current marker from the tl's shadow stack,
- and return it in 'marker[2]'. */
+ /* Fetch the current marker from the 'out_marker->tl's shadow stack,
+ and return it in 'out_marker->odd_number' and 'out_marker->object'. */
+ stm_thread_local_t *tl = out_marker->tl;
struct stm_shadowentry_s *current = tl->shadowstack - 1;
struct stm_shadowentry_s *base = tl->shadowstack_base;
@@ -29,85 +22,31 @@
}
if (current != base) {
/* found the odd marker */
- marker[0] = (uintptr_t)current[0].ss;
- marker[1] = (uintptr_t)current[1].ss;
+ out_marker->odd_number = (uintptr_t)current[0].ss;
+ out_marker->object = current[1].ss;
}
else {
/* no marker found */
- marker[0] = 0;
- marker[1] = 0;
+ out_marker->odd_number = 0;
+ out_marker->object = NULL;
}
}
-static void marker_expand(uintptr_t marker[2], char *segment_base,
- char *outmarker)
+static void _timing_fetch_inev(void)
{
- /* Expand the marker given by 'marker[2]' into a full string. This
- works assuming that the marker was produced inside the segment
- given by 'segment_base'. If that's from a different thread, you
- must first acquire the corresponding 'marker_lock'. */
- assert(_has_mutex());
- outmarker[0] = 0;
- if (marker[0] == 0)
- return; /* no marker entry found */
- if (stmcb_expand_marker != NULL) {
- stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1],
- outmarker, _STM_MARKER_LEN);
- }
+ stm_loc_marker_t marker;
+ marker.tl = STM_SEGMENT->running_thread;
+ marker_fetch(&marker);
+ STM_PSEGMENT->marker_inev.odd_number = marker.odd_number;
+ STM_PSEGMENT->marker_inev.object = marker.object;
}
-static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg)
+static void marker_fetch_obj_write(object_t *obj, stm_loc_marker_t *out_marker)
{
- if (pseg->marker_self[0] != 0)
- return; /* already collected an entry */
-
- uintptr_t marker[2];
- marker_fetch(pseg->pub.running_thread, marker);
- marker_expand(marker, pseg->pub.segment_base, pseg->marker_self);
- pseg->marker_other[0] = 0;
-}
-
-char *_stm_expand_marker(void)
-{
- /* for tests only! */
- static char _result[_STM_MARKER_LEN];
- uintptr_t marker[2];
- _result[0] = 0;
- s_mutex_lock();
- marker_fetch(STM_SEGMENT->running_thread, marker);
- marker_expand(marker, STM_SEGMENT->segment_base, _result);
- s_mutex_unlock();
- return _result;
-}
-
-static void marker_copy(stm_thread_local_t *tl,
- struct stm_priv_segment_info_s *pseg,
- enum stm_time_e attribute_to, double time)
-{
- /* Copies the marker information from pseg to tl. This is called
- indirectly from abort_with_mutex(), but only if the lost time is
- greater than that of the previous recorded marker. By contrast,
- pseg->marker_self has been filled already in all cases. The
- reason for the two steps is that we must fill pseg->marker_self
- earlier than now (some objects may be GCed), but we only know
- here the total time it gets attributed.
+ /* From 'out_marker->tl', fill in 'out_marker->segment_base' and
+ 'out_marker->odd_number' and 'out_marker->object' from the
+ marker associated with writing the 'obj'.
*/
- if (stmcb_debug_print) {
- stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self);
- }
- if (time * 0.99 > tl->longest_marker_time) {
- tl->longest_marker_state = attribute_to;
- tl->longest_marker_time = time;
- memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN);
- memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN);
- }
- pseg->marker_self[0] = 0;
- pseg->marker_other[0] = 0;
-}
-
-static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj,
- uintptr_t marker[2])
-{
assert(_has_mutex());
/* here, we acquired the other thread's marker_lock, which means that:
@@ -119,80 +58,86 @@
the global mutex_lock at this point too).
*/
long i;
+ int in_segment_num = out_marker->tl->associated_segment_num;
struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num);
struct list_s *mlst = pseg->modified_old_objects;
struct list_s *mlstm = pseg->modified_old_objects_markers;
- for (i = list_count(mlst); --i >= 0; ) {
+ assert(list_count(mlstm) <= 2 * list_count(mlst));
+ for (i = list_count(mlstm) / 2; --i >= 0; ) {
if (list_item(mlst, i) == (uintptr_t)obj) {
- assert(list_count(mlstm) == 2 * list_count(mlst));
- marker[0] = list_item(mlstm, i * 2 + 0);
- marker[1] = list_item(mlstm, i * 2 + 1);
+ out_marker->odd_number = list_item(mlstm, i * 2 + 0);
+ out_marker->object = (object_t *)list_item(mlstm, i * 2 + 1);
return;
}
}
- marker[0] = 0;
- marker[1] = 0;
+ out_marker->odd_number = 0;
+ out_marker->object = NULL;
}
-static void marker_contention(int kind, bool abort_other,
- uint8_t other_segment_num, object_t *obj)
+static void _timing_record_write(void)
{
- uintptr_t self_marker[2];
- uintptr_t other_marker[2];
- struct stm_priv_segment_info_s *my_pseg, *other_pseg;
+ stm_loc_marker_t marker;
+ marker.tl = STM_SEGMENT->running_thread;
+ marker_fetch(&marker);
- my_pseg = get_priv_segment(STM_SEGMENT->segment_num);
+ long base_count = list_count(STM_PSEGMENT->modified_old_objects);
+ struct list_s *mlstm = STM_PSEGMENT->modified_old_objects_markers;
+ while (list_count(mlstm) < 2 * base_count) {
+ mlstm = list_append2(mlstm, 0, 0);
+ }
+ mlstm = list_append2(mlstm, marker.odd_number, (uintptr_t)marker.object);
+ STM_PSEGMENT->modified_old_objects_markers = mlstm;
+}
+
+static void _timing_contention(enum stm_event_e kind,
+ uint8_t other_segment_num, object_t *obj)
+{
+ struct stm_priv_segment_info_s *other_pseg;
other_pseg = get_priv_segment(other_segment_num);
- char *my_segment_base = STM_SEGMENT->segment_base;
- char *other_segment_base = get_segment_base(other_segment_num);
+ char *other_segment_base = other_pseg->pub.segment_base;
+ acquire_marker_lock(other_segment_base);
- acquire_marker_lock(other_segment_base);
+ stm_loc_marker_t markers[2];
/* Collect the location for myself. It's usually the current
location, except in a write-read abort, in which case it's the
older location of the write. */
- if (kind == WRITE_READ_CONTENTION)
- marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker);
+ markers[0].tl = STM_SEGMENT->running_thread;
+ markers[0].segment_base = STM_SEGMENT->segment_base;
+
+ if (kind == STM_CONTENTION_WRITE_READ)
+ marker_fetch_obj_write(obj, &markers[0]);
else
- marker_fetch(my_pseg->pub.running_thread, self_marker);
-
- /* Expand this location into either my_pseg->marker_self or
- other_pseg->marker_other, depending on who aborts. */
- marker_expand(self_marker, my_segment_base,
- abort_other ? other_pseg->marker_other
- : my_pseg->marker_self);
+ marker_fetch(&markers[0]);
/* For some categories, we can also collect the relevant information
for the other segment. */
- char *outmarker = abort_other ? other_pseg->marker_self
- : my_pseg->marker_other;
+ markers[1].tl = other_pseg->pub.running_thread;
+ markers[1].segment_base = other_pseg->pub.segment_base;
+
switch (kind) {
- case WRITE_WRITE_CONTENTION:
- marker_fetch_obj_write(other_segment_num, obj, other_marker);
- marker_expand(other_marker, other_segment_base, outmarker);
+ case STM_CONTENTION_WRITE_WRITE:
+ marker_fetch_obj_write(obj, &markers[1]);
break;
- case INEVITABLE_CONTENTION:
- assert(abort_other == false);
- other_marker[0] = other_pseg->marker_inev[0];
- other_marker[1] = other_pseg->marker_inev[1];
- marker_expand(other_marker, other_segment_base, outmarker);
- break;
- case WRITE_READ_CONTENTION:
- strcpy(outmarker, "<read at unknown location>");
+ case STM_CONTENTION_INEVITABLE:
+ markers[1].odd_number = other_pseg->marker_inev.odd_number;
+ markers[1].object = other_pseg->marker_inev.object;
break;
default:
- outmarker[0] = 0;
+ markers[1].odd_number = 0;
+ markers[1].object = NULL;
break;
}
+ stmcb_timing_event(markers[0].tl, kind, markers);
+
+ /* only release the lock after stmcb_timing_event(), otherwise it could
+ run into race conditions trying to interpret 'markers[1].object' */
release_marker_lock(other_segment_base);
}
-static void marker_fetch_inev(void)
-{
- uintptr_t marker[2];
- marker_fetch(STM_SEGMENT->running_thread, marker);
- STM_PSEGMENT->marker_inev[0] = marker[0];
- STM_PSEGMENT->marker_inev[1] = marker[1];
-}
+
+void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */
+ enum stm_event_e event,
+ stm_loc_marker_t *markers);
diff --git a/rpython/translator/stm/src_stm/stm/marker.h
b/rpython/translator/stm/src_stm/stm/marker.h
--- a/rpython/translator/stm/src_stm/stm/marker.h
+++ b/rpython/translator/stm/src_stm/stm/marker.h
@@ -1,13 +1,20 @@
/* Imported by rpython/translator/stm/import_stmgc.py */
-static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]);
-static void marker_fetch_inev(void);
-static void marker_expand(uintptr_t marker[2], char *segment_base,
- char *outmarker);
-static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg);
-static void marker_copy(stm_thread_local_t *tl,
- struct stm_priv_segment_info_s *pseg,
- enum stm_time_e attribute_to, double time);
+static void _timing_record_write(void);
+static void _timing_fetch_inev(void);
+static void _timing_contention(enum stm_event_e kind,
+ uint8_t other_segment_num, object_t *obj);
-static void marker_contention(int kind, bool abort_other,
- uint8_t other_segment_num, object_t *obj);
+
+#define timing_event(tl, event) \
+ (stmcb_timing_event != NULL ? stmcb_timing_event(tl, event, NULL) :
(void)0)
+
+#define timing_record_write() \
+ (stmcb_timing_event != NULL ? _timing_record_write() : (void)0)
+
+#define timing_fetch_inev() \
+ (stmcb_timing_event != NULL ? _timing_fetch_inev() : (void)0)
+
+#define timing_contention(kind, other_segnum, obj) \
+ (stmcb_timing_event != NULL ? \
+ _timing_contention(kind, other_segnum, obj) : (void)0)
diff --git a/rpython/translator/stm/src_stm/stm/nursery.c
b/rpython/translator/stm/src_stm/stm/nursery.c
--- a/rpython/translator/stm/src_stm/stm/nursery.c
+++ b/rpython/translator/stm/src_stm/stm/nursery.c
@@ -426,11 +426,13 @@
for (i = num_old + 1; i < total; i += 2) {
minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i));
}
- if (STM_PSEGMENT->marker_inev[1]) {
- uintptr_t *pmarker_inev_obj = (uintptr_t *)
+ if (STM_PSEGMENT->marker_inev.segment_base) {
+ assert(STM_PSEGMENT->marker_inev.segment_base ==
+ STM_SEGMENT->segment_base);
+ object_t **pmarker_inev_obj = (object_t **)
REAL_ADDRESS(STM_SEGMENT->segment_base,
- &STM_PSEGMENT->marker_inev[1]);
- minor_trace_if_young((object_t **)pmarker_inev_obj);
+ &STM_PSEGMENT->marker_inev.object);
+ minor_trace_if_young(pmarker_inev_obj);
}
}
@@ -573,11 +575,11 @@
stm_safe_point();
- change_timing_state(STM_TIME_MINOR_GC);
+ timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_START);
_do_minor_collection(commit);
- change_timing_state(commit ? STM_TIME_BOOKKEEPING : STM_TIME_RUN_CURRENT);
+ timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_DONE);
}
void stm_collect(long level)
diff --git a/rpython/translator/stm/src_stm/stm/nursery.h
b/rpython/translator/stm/src_stm/stm/nursery.h
--- a/rpython/translator/stm/src_stm/stm/nursery.h
+++ b/rpython/translator/stm/src_stm/stm/nursery.h
@@ -1,8 +1,14 @@
/* Imported by rpython/translator/stm/import_stmgc.py */
-/* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */
-#define NSE_SIGPAUSE STM_TIME_WAIT_OTHER
-#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON
+/* 'nursery_end' is either NURSERY_END or one of NSE_SIGxxx */
+#define NSE_SIGABORT 1
+#define NSE_SIGPAUSE 2
+#define NSE_SIGCOMMITSOON 3
+#define _NSE_NUM_SIGNALS 4
+
+#if _NSE_NUM_SIGNALS >= _STM_NSE_SIGNAL_MAX
+# error "increase _STM_NSE_SIGNAL_MAX"
+#endif
static uint32_t highest_overflow_number;
diff --git a/rpython/translator/stm/src_stm/stm/setup.c
b/rpython/translator/stm/src_stm/stm/setup.c
--- a/rpython/translator/stm/src_stm/stm/setup.c
+++ b/rpython/translator/stm/src_stm/stm/setup.c
@@ -23,8 +23,8 @@
static char *setup_mmap(char *reason, int *map_fd)
{
char name[128];
- sprintf(name, "/stmgc-c7-bigmem-%ld-%.18e",
- (long)getpid(), get_stm_time());
+ sprintf(name, "/stmgc-c7-bigmem-%ld",
+ (long)getpid());
/* Create the big shared memory object, and immediately unlink it.
There is a small window where if this process is killed the
@@ -226,6 +226,8 @@
return (pthread_t *)(tl->creating_pthread);
}
+static int thread_local_counters = 0;
+
void stm_register_thread_local(stm_thread_local_t *tl)
{
int num;
@@ -242,14 +244,13 @@
num = tl->prev->associated_segment_num;
}
tl->thread_local_obj = NULL;
- tl->_timing_cur_state = STM_TIME_OUTSIDE_TRANSACTION;
- tl->_timing_cur_start = get_stm_time();
/* assign numbers consecutively, but that's for tests; we could also
assign the same number to all of them and they would get their own
numbers automatically. */
num = (num % NB_SEGMENTS) + 1;
tl->associated_segment_num = num;
+ tl->thread_local_counter = ++thread_local_counters;
*_get_cpth(tl) = pthread_self();
_init_shadow_stack(tl);
set_gs_register(get_segment_base(num));
diff --git a/rpython/translator/stm/src_stm/stm/sync.c
b/rpython/translator/stm/src_stm/stm/sync.c
--- a/rpython/translator/stm/src_stm/stm/sync.c
+++ b/rpython/translator/stm/src_stm/stm/sync.c
@@ -124,32 +124,19 @@
/************************************************************/
-static void wait_for_end_of_inevitable_transaction(
- stm_thread_local_t *tl_or_null_if_can_abort)
+static void wait_for_end_of_inevitable_transaction(void)
{
long i;
restart:
for (i = 1; i <= NB_SEGMENTS; i++) {
struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i);
if (other_pseg->transaction_state == TS_INEVITABLE) {
- if (tl_or_null_if_can_abort == NULL) {
- /* handle this case like a contention: it will either
- abort us (not the other thread, which is inevitable),
- or wait for a while. If we go past this call, then we
- waited; in this case we have to re-check if no other
- thread is inevitable. */
- inevitable_contention_management(i);
- }
- else {
- /* wait for stm_commit_transaction() to finish this
- inevitable transaction */
- signal_other_to_commit_soon(other_pseg);
- change_timing_state_tl(tl_or_null_if_can_abort,
- STM_TIME_WAIT_INEVITABLE);
- cond_wait(C_INEVITABLE);
- /* don't bother changing the timing state again: the caller
- will very soon go to STM_TIME_RUN_CURRENT */
- }
+ /* handle this case like a contention: it will either
+ abort us (not the other thread, which is inevitable),
+ or wait for a while. If we go past this call, then we
+ waited; in this case we have to re-check if no other
+ thread is inevitable. */
+ inevitable_contention_management(i);
goto restart;
}
}
@@ -189,8 +176,9 @@
}
/* No segment available. Wait until release_thread_segment()
signals that one segment has been freed. */
- change_timing_state_tl(tl, STM_TIME_WAIT_FREE_SEGMENT);
+ timing_event(tl, STM_WAIT_FREE_SEGMENT);
cond_wait(C_SEGMENT_FREE);
+ timing_event(tl, STM_WAIT_DONE);
/* Return false to the caller, which will call us again */
return false;
@@ -332,7 +320,6 @@
if (STM_SEGMENT->nursery_end == NURSERY_END)
return; /* fast path: no safe point requested */
- int previous_state = -1;
assert(_seems_to_be_running_transaction());
assert(_has_mutex());
while (1) {
@@ -343,10 +330,6 @@
break; /* no safe point requested */
if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) {
- if (previous_state == -1) {
- previous_state =
change_timing_state(STM_TIME_SYNC_COMMIT_SOON);
- }
-
STM_PSEGMENT->signalled_to_commit_soon = true;
stmcb_commit_soon();
if (!pause_signalled) {
@@ -363,17 +346,12 @@
#ifdef STM_TESTS
abort_with_mutex();
#endif
- if (previous_state == -1) {
- previous_state = change_timing_state(STM_TIME_SYNC_PAUSE);
- }
+ timing_event(STM_SEGMENT->running_thread, STM_WAIT_SYNC_PAUSE);
cond_signal(C_AT_SAFE_POINT);
STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED;
cond_wait(C_REQUEST_REMOVED);
STM_PSEGMENT->safe_point = SP_RUNNING;
- }
-
- if (previous_state != -1) {
- change_timing_state(previous_state);
+ timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE);
}
}
diff --git a/rpython/translator/stm/src_stm/stm/sync.h
b/rpython/translator/stm/src_stm/stm/sync.h
--- a/rpython/translator/stm/src_stm/stm/sync.h
+++ b/rpython/translator/stm/src_stm/stm/sync.h
@@ -29,7 +29,7 @@
static bool acquire_thread_segment(stm_thread_local_t *tl);
static void release_thread_segment(stm_thread_local_t *tl);
-static void wait_for_end_of_inevitable_transaction(stm_thread_local_t *);
+static void wait_for_end_of_inevitable_transaction(void);
enum sync_type_e {
STOP_OTHERS_UNTIL_MUTEX_UNLOCK,
diff --git a/rpython/translator/stm/src_stm/stmgc.c
b/rpython/translator/stm/src_stm/stmgc.c
--- a/rpython/translator/stm/src_stm/stmgc.c
+++ b/rpython/translator/stm/src_stm/stmgc.c
@@ -15,8 +15,8 @@
#include "stm/extra.h"
#include "stm/fprintcolor.h"
#include "stm/weakref.h"
-#include "stm/timing.h"
#include "stm/marker.h"
+#include "stm/prof.h"
#include "stm/misc.c"
#include "stm/list.c"
@@ -35,6 +35,6 @@
#include "stm/extra.c"
#include "stm/fprintcolor.c"
#include "stm/weakref.c"
-#include "stm/timing.c"
#include "stm/marker.c"
+#include "stm/prof.c"
#include "stm/rewind_setjmp.c"
diff --git a/rpython/translator/stm/src_stm/stmgc.h
b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -55,28 +55,6 @@
object_t *ss;
};
-enum stm_time_e {
- STM_TIME_OUTSIDE_TRANSACTION,
- STM_TIME_RUN_CURRENT,
- STM_TIME_RUN_COMMITTED,
- STM_TIME_RUN_ABORTED_WRITE_WRITE,
- STM_TIME_RUN_ABORTED_WRITE_READ,
- STM_TIME_RUN_ABORTED_INEVITABLE,
- STM_TIME_RUN_ABORTED_OTHER,
- STM_TIME_WAIT_FREE_SEGMENT,
- STM_TIME_WAIT_WRITE_READ,
- STM_TIME_WAIT_INEVITABLE,
- STM_TIME_WAIT_OTHER,
- STM_TIME_SYNC_COMMIT_SOON,
- STM_TIME_BOOKKEEPING,
- STM_TIME_MINOR_GC,
- STM_TIME_MAJOR_GC,
- STM_TIME_SYNC_PAUSE,
- _STM_TIME_N
-};
-
-#define _STM_MARKER_LEN 80
-
typedef struct stm_thread_local_s {
/* every thread should handle the shadow stack itself */
struct stm_shadowentry_s *shadowstack, *shadowstack_base;
@@ -89,20 +67,11 @@
char *mem_clear_on_abort;
size_t mem_bytes_to_clear_on_abort;
/* after an abort, some details about the abort are stored there.
- (these fields are not modified on a successful commit) */
+ (this field is not modified on a successful commit) */
long last_abort__bytes_in_nursery;
- /* timing information, accumulated */
- uint32_t events[_STM_TIME_N];
- float timing[_STM_TIME_N];
- double _timing_cur_start;
- enum stm_time_e _timing_cur_state;
- /* the marker with the longest associated time so far */
- enum stm_time_e longest_marker_state;
- double longest_marker_time;
- char longest_marker_self[_STM_MARKER_LEN];
- char longest_marker_other[_STM_MARKER_LEN];
/* the next fields are handled internally by the library */
int associated_segment_num;
+ int thread_local_counter;
struct stm_thread_local_s *prev, *next;
void *creating_pthread[2];
} stm_thread_local_t;
@@ -156,7 +125,7 @@
#define _STM_CARD_SIZE 32 /* must be >= 32 */
#define _STM_MIN_CARD_COUNT 17
#define _STM_MIN_CARD_OBJ_SIZE (_STM_CARD_SIZE * _STM_MIN_CARD_COUNT)
-#define _STM_NSE_SIGNAL_MAX _STM_TIME_N
+#define _STM_NSE_SIGNAL_MAX 7
#define _STM_FAST_ALLOC (66*1024)
@@ -439,20 +408,79 @@
const char *msg);
-/* Temporary? */
-void stm_flush_timing(stm_thread_local_t *tl, int verbose);
+/* Profiling events. In the comments: content of the markers, if any */
+enum stm_event_e {
+ /* always STM_TRANSACTION_START followed later by one of COMMIT or ABORT */
+ STM_TRANSACTION_START,
+ STM_TRANSACTION_COMMIT,
+ STM_TRANSACTION_ABORT,
+ /* contention; see details at the start of contention.c */
+ STM_CONTENTION_WRITE_WRITE, /* markers: self loc / other written loc */
+ STM_CONTENTION_WRITE_READ, /* markers: self written loc / other missing
*/
+ STM_CONTENTION_INEVITABLE, /* markers: self loc / other inev loc */
+
+ /* following a contention, we get from the same thread one of:
+ STM_ABORTING_OTHER_CONTENTION, STM_TRANSACTION_ABORT (self-abort),
+ or STM_WAIT_CONTENTION (self-wait). */
+ STM_ABORTING_OTHER_CONTENTION,
+
+ /* always one STM_WAIT_xxx followed later by STM_WAIT_DONE */
+ STM_WAIT_FREE_SEGMENT,
+ STM_WAIT_SYNC_PAUSE,
+ STM_WAIT_CONTENTION,
+ STM_WAIT_DONE,
+
+ /* start and end of GC cycles */
+ STM_GC_MINOR_START,
+ STM_GC_MINOR_DONE,
+ STM_GC_MAJOR_START,
+ STM_GC_MAJOR_DONE,
+
+ _STM_EVENT_N
+};
+
+#define STM_EVENT_NAMES \
+ "transaction start", \
+ "transaction commit", \
+ "transaction abort", \
+ "contention write write", \
+ "contention write read", \
+ "contention inevitable", \
+ "aborting other contention", \
+ "wait free segment", \
+ "wait sync pause", \
+ "wait contention", \
+ "wait done", \
+ "gc minor start", \
+ "gc minor done", \
+ "gc major start", \
+ "gc major done"
/* The markers pushed in the shadowstack are an odd number followed by a
- regular pointer. When needed, this library invokes this callback to
- turn this pair into a human-readable explanation. */
-extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number,
- object_t *following_object,
- char *outputbuf, size_t outputbufsize);
-extern void (*stmcb_debug_print)(const char *cause, double time,
- const char *marker);
+ regular pointer. */
+typedef struct {
+ stm_thread_local_t *tl;
+ char *segment_base; /* base to interpret the 'object' below */
+ uintptr_t odd_number; /* marker odd number, or 0 if marker is missing */
+ object_t *object; /* marker object, or NULL if marker is missing */
+} stm_loc_marker_t;
+extern void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread
*/
+ enum stm_event_e event,
+ stm_loc_marker_t *markers);
-/* Conventience macros to push the markers into the shadowstack */
+/* Calling this sets up a stmcb_timing_event callback that will produce
+ a binary file calling 'profiling_file_name'. After a fork(), it is
+ written to 'profiling_file_name.fork<PID>'. Call it with NULL to
+ stop profiling. Returns -1 in case of error (see errno then).
+ The optional 'expand_marker' function pointer is called to expand
+ the marker's odd_number and object into data, starting at the given
+ position and with the given maximum length. */
+int stm_set_timing_log(const char *profiling_file_name,
+ int expand_marker(stm_loc_marker_t *, char *, int));
+
+
+/* Convenience macros to push the markers into the shadowstack */
#define STM_PUSH_MARKER(tl, odd_num, p) do { \
uintptr_t _odd_num = (odd_num); \
assert(_odd_num & 1); \
@@ -477,8 +505,6 @@
_ss->ss = (object_t *)_odd_num; \
} while (0)
-char *_stm_expand_marker(void);
-
/* ==================== END ==================== */
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit