On Wednesday 05 December 2007 18:10, Christoph Bartoschek wrote:
> Am Mittwoch, 5. Dezember 2007 schrieb Julian Seward:
> > On Wednesday 05 December 2007 16:32, Christoph Bartoschek wrote:
> > > The read of COND in the parent thread happens in a segment that is
> > > after the accesses that established the shared-modified state in the
> > > happens-before graph.
> > >
> > > Given that COND should not trigger an error.
> >
> > But why should we assume that ownership of COND should change from
> > shared-modified to exclusively-owned-by-parent at the point of the
> > signal/wait pair?
Ok. Try the attached patch; it implements this change. Once patched,
you need the flag --happens-before=cvhack, else Helgrind behaves exactly
as before. The transition it does, at the event
_VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, is:
signalling thread gives up ownership of
shared locations, giving them to the waiting thread
instead. If a location was previously accessed only by
the waiting thread and the signalling thread, then the
waiting thread acquires exclusive ownership.
It makes H shut up with Konstantin's original test case cv.cc. I did
not test anything else. Note this is an insanely inefficient
implementation -- this patch is just to find out if the idea is useful.
J
Index: helgrind/hg_main.c
===================================================================
--- helgrind/hg_main.c (revision 7282)
+++ helgrind/hg_main.c (working copy)
@@ -90,7 +90,7 @@
// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
// happens_before_wrk: 1000
// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
-#define SHOW_EXPENSIVE_STUFF 0
+#define SHOW_EXPENSIVE_STUFF 1
// 0 for silent, 1 for some stuff, 2 for lots of stuff
#define SHOW_EVENTS 0
@@ -132,6 +132,7 @@
// 1 = segments at thread create/join
// 2 = as 1 + segments at condition variable signal/broadcast/wait
// + segments at sem_wait/sem_post
+// 3 = as 2 + mess with mem ownership at cv wait completion
static Int clo_happens_before = 2; /* default setting */
/* Generate .vcg output of the happens-before graph?
@@ -2974,6 +2975,7 @@
case 'r': how = "rd"; break;
case 'w': how = "wr"; break;
case 'p': how = "pa"; break;
+ case 'm': how = "MA"; break;
default: tl_assert(0);
}
show_shadow_w32_for_user(txt_old, sizeof(txt_old), sv_old);
@@ -5589,70 +5591,28 @@
all__sanity_check("evh__pre_thread_ll_exit-post");
}
+/* Examine all ShM/ShR states in the system. For each one, look at
+ its thread set. If the thread set contains "change_this", remove
+ it and insert "to_this" instead. If the resulting thread set is a
+ singleton, change the shadow word to be an Excl state belonging to
+ the current segment for "to_this". */
static
-void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
+void evh__substitute_in_threadsets ( HChar* who,
+ Thread* change_this,
+ Thread* to_this )
{
Int stats_SMs, stats_SMs_scanned, stats_reExcls;
Addr ga;
SecMap* sm;
- Thread* thr_s;
- Thread* thr_q;
- if (SHOW_EVENTS >= 1)
- VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
- (Int)stay_tid, quit_thr );
+ Thread* thr_q = change_this;
+ Thread* thr_s = to_this;
- tl_assert(is_sane_ThreadId(stay_tid));
+ tl_assert( is_sane_Thread(thr_q) );
+ tl_assert( is_sane_Thread(thr_s) );
+ tl_assert(thr_q != thr_s);
- thr_s = map_threads_maybe_lookup( stay_tid );
- thr_q = quit_thr;
- tl_assert(thr_s != NULL);
- tl_assert(thr_q != NULL);
- tl_assert(thr_s != thr_q);
-
- if (clo_happens_before >= 1) {
- /* Start a new segment for the stayer */
- SegmentID new_segid = 0; /* bogus */
- Segment* new_seg = NULL;
- evhH__start_new_segment_for_thread( &new_segid, &new_seg, thr_s );
- tl_assert(is_sane_SegmentID(new_segid));
- tl_assert(is_sane_Segment(new_seg));
- /* and make it depend on the quitter's last segment */
- tl_assert(new_seg->other == NULL);
- new_seg->other = map_segments_lookup( thr_q->csegid );
- new_seg->other_hint = 'j';
- tl_assert(new_seg->thr == thr_s);
- new_seg->vts = tickL_and_joinR_VTS( thr_s, new_seg->prev->vts,
- new_seg->other->vts );
- }
-
- // FIXME: error-if: exiting thread holds any locks
- // or should evh__pre_thread_ll_exit do that?
-
- /* Delete thread from ShM/ShR thread sets and restore Excl states
- where appropriate */
-
- /* When Thread(t) joins to Thread(u):
-
- scan all shadow memory. For each ShM/ShR thread set, replace
- 't' in each set with 'u'. If this results in a singleton 'u',
- change the state to Excl(u->csegid).
-
- Optimisation: tag each SecMap with a superset of the union of
- the thread sets in the SecMap. Then if the tag set does not
- include 't' then the SecMap can be skipped, because there is no
- 't' to change to anything else.
-
- Problem is that the tag set needs to be updated often, after
- every ShR/ShM store. (that increases the thread set of the
- shadow value.)
-
- --> Compromise. Tag each SecMap with a .mbHasShared bit which
- must be set true if any ShR/ShM on the page. Set this for
- any transitions into ShR/ShM on the page. Then skip page if
- not set.
-
- .mbHasShared bits are (effectively) cached in cache_shmem.
+ /* .mbHasShared bits are (effectively) cached in cache_shmem.
Hence that must be flushed before we can safely consult them.
Since we're modifying the backing store, we also need to
@@ -5717,16 +5677,92 @@
wnew = isM ? mk_SHVAL_ShM(tset_new, lset_old)
: mk_SHVAL_ShR(tset_new, lset_old);
}
+ if (clo_trace_level > 0
+ && *w32p != wnew
+ && ga <= clo_trace_addr
+ && clo_trace_addr < ga+N_SECMAP_ARANGE) {
+ msm__show_state_change( thr_s, ga, 0, 'm', *w32p, wnew );
+ }
*w32p = wnew;
}
}
HG_(doneIterFM)( map_shmem );
if (SHOW_EXPENSIVE_STUFF)
- VG_(printf)("evh__post_thread_join: %d SMs, "
- "%d scanned, %d re-Excls\n",
- stats_SMs, stats_SMs_scanned, stats_reExcls);
+ VG_(printf)("%s: %d SMs, %d scanned, %d re-Excls\n",
+ who, stats_SMs, stats_SMs_scanned, stats_reExcls);
+}
+static
+void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
+{
+ Thread* thr_s;
+ Thread* thr_q;
+
+ if (SHOW_EVENTS >= 1)
+ VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
+ (Int)stay_tid, quit_thr );
+
+ tl_assert(is_sane_ThreadId(stay_tid));
+
+ thr_s = map_threads_maybe_lookup( stay_tid );
+ thr_q = quit_thr;
+ tl_assert(thr_s != NULL);
+ tl_assert(thr_q != NULL);
+ tl_assert(thr_s != thr_q);
+
+ if (clo_happens_before >= 1) {
+ /* Start a new segment for the stayer */
+ SegmentID new_segid = 0; /* bogus */
+ Segment* new_seg = NULL;
+ evhH__start_new_segment_for_thread( &new_segid, &new_seg, thr_s );
+ tl_assert(is_sane_SegmentID(new_segid));
+ tl_assert(is_sane_Segment(new_seg));
+ /* and make it depend on the quitter's last segment */
+ tl_assert(new_seg->other == NULL);
+ new_seg->other = map_segments_lookup( thr_q->csegid );
+ new_seg->other_hint = 'j';
+ tl_assert(new_seg->thr == thr_s);
+ new_seg->vts = tickL_and_joinR_VTS( thr_s, new_seg->prev->vts,
+ new_seg->other->vts );
+ }
+
+ // FIXME: error-if: exiting thread holds any locks
+ // or should evh__pre_thread_ll_exit do that?
+
+ /* Delete thread from ShM/ShR thread sets and restore Excl states
+ where appropriate */
+
+ /* When Thread(t) joins to Thread(u):
+
+ scan all shadow memory. For each ShM/ShR thread set, replace
+ 't' in each set with 'u'. If this results in a singleton 'u',
+ change the state to Excl(u->csegid).
+
+ Optimisation: tag each SecMap with a superset of the union of
+ the thread sets in the SecMap. Then if the tag set does not
+ include 't' then the SecMap can be skipped, because there is no
+ 't' to change to anything else.
+
+ Problem is that the tag set needs to be updated often, after
+ every ShR/ShM store. (that increases the thread set of the
+ shadow value.)
+
+ --> Compromise. Tag each SecMap with a .mbHasShared bit which
+ must be set true if any ShR/ShM on the page. Set this for
+ any transitions into ShR/ShM on the page. Then skip page if
+ not set.
+
+ .mbHasShared bits are (effectively) cached in cache_shmem.
+ Hence that must be flushed before we can safely consult them.
+
+ Since we're modifying the backing store, we also need to
+ invalidate cache_shmem, so that subsequent memory references get
+ up to date shadow values.
+ */
+ evh__substitute_in_threadsets( "evh__HG_PTHREAD_JOIN_POST",
+ thr_q, thr_s );
+
/* This holds because, at least when using NPTL as the thread
library, we should be notified the low level thread exit before
we hear of any join event on it. The low level exit
@@ -6173,6 +6209,15 @@
new_seg->thr,
new_seg->prev->vts,
new_seg->other->vts );
+ /* Do nasty hack: signalling thread gives up ownership of
+ shared locations, giving them to the waiting thread
+ instead. If a location was previously accessed only by
+ the waiting thread and the signalling thread, then the
+ waiting thread acquires exclusive ownership. */
+ if (clo_happens_before >= 3)
+ evh__substitute_in_threadsets(
+ "evh__HG_PTHREAD_COND_WAIT_POST",
+ signalling_seg->thr, new_seg->thr );
} else {
/* Hmm. How can a wait on 'cond' succeed if nobody signalled
it? If this happened it would surely be a bug in the
@@ -8531,6 +8576,8 @@
clo_happens_before = 1;
else if (VG_CLO_STREQ(arg, "--happens-before=all"))
clo_happens_before = 2;
+ else if (VG_CLO_STREQ(arg, "--happens-before=cvhack"))
+ clo_happens_before = 3;
else if (VG_CLO_STREQ(arg, "--gen-vcg=no"))
clo_gen_vcg = 0;
-------------------------------------------------------------------------
SF.Net email is sponsored by:
Check out the new SourceForge.net Marketplace.
It's the best place to buy or sell services for
just about anything Open Source.
http://sourceforge.net/services/buy/index.php
_______________________________________________
Valgrind-developers mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/valgrind-developers