Hi Mathieu, On Sun, Aug 27, 2017 at 01:50:35PM -0700, Mathieu Desnoyers wrote: > Add a new MEMBARRIER_CMD_REGISTER_SYNC_CORE command to the membarrier > system call. It allows processes to register their intent to have their > threads issue core serializing barriers in addition to memory barriers > whenever a membarrier command is performed. > > It is relevant for reclaim of JIT code, which requires to issue core > serializing barriers on all threads running on behalf of a process > after ensuring the old code is not visible anymore, before re-using > memory for new code. > > When a processes returns from a MEMBARRIER_CMD_REGISTER_SYNC_CORE > command, it is guaranteed that all following MEMBARRIER_CMD_SHARED and > MEMBARRIER_CMD_PRIVATE_EXPEDITED issue core serializing barriers, in > addition to the memory barriers, on all of its running threads. > > * Scheduler Overhead Benchmarks > > Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz > taskset 01 ./perf bench sched pipe -T > Linux v4.13-rc6 > > Avg. usecs/op Std.Dev. usecs/op > Before this change: 2.75 0.12 > Non-registered processes: 2.73 0.08 > Registered processes: 3.07 0.02 > > Changes since v1: > - Add missing MEMBARRIER_CMD_REGISTER_SYNC_CORE header documentation, > - Add benchmarks to commit message. > > Signed-off-by: Mathieu Desnoyers <mathieu.desnoy...@efficios.com> > CC: Peter Zijlstra <pet...@infradead.org> > CC: Paul E. McKenney <paul...@linux.vnet.ibm.com> > CC: Boqun Feng <boqun.f...@gmail.com> > CC: Andrew Hunter <a...@google.com> > CC: Maged Michael <maged.mich...@gmail.com> > CC: gro...@google.com > CC: Avi Kivity <a...@scylladb.com> > CC: Benjamin Herrenschmidt <b...@kernel.crashing.org> > CC: Paul Mackerras <pau...@samba.org> > CC: Michael Ellerman <m...@ellerman.id.au> > CC: Dave Watson <davejwat...@fb.com> > CC: Andy Lutomirski <l...@kernel.org> > CC: Will Deacon <will.dea...@arm.com> > CC: Hans Boehm <hbo...@google.com> > --- > fs/exec.c | 1 + > include/linux/sched.h | 52 > +++++++++++++++++++++++++++++++++++++++++ > include/uapi/linux/membarrier.h | 8 +++++++ > kernel/fork.c | 2 ++ > kernel/sched/core.c | 3 +++ > kernel/sched/membarrier.c | 37 ++++++++++++++++++++++++++--- > 6 files changed, 100 insertions(+), 3 deletions(-) > > diff --git a/fs/exec.c b/fs/exec.c > index 62175cbcc801..a4ab3253bac7 100644 > --- a/fs/exec.c > +++ b/fs/exec.c > @@ -1794,6 +1794,7 @@ static int do_execveat_common(int fd, struct filename > *filename, > /* execve succeeded */ > current->fs->in_exec = 0; > current->in_execve = 0; > + membarrier_execve(current); > acct_update_integrals(current); > task_numa_free(current); > free_bprm(bprm); > diff --git a/include/linux/sched.h b/include/linux/sched.h > index 8337e2db0bb2..b1ecdc4e8b84 100644 > --- a/include/linux/sched.h > +++ b/include/linux/sched.h > @@ -1086,6 +1086,9 @@ struct task_struct { > /* Used by LSM modules for access restriction: */ > void *security; > #endif > +#ifdef CONFIG_MEMBARRIER > + int membarrier_sync_core;
Why put this in task_struct rather than mm_struct? If the reclaim of JIT code is one of the target users, don't you want to make this as a attribute of an address space? Am I missing something subtle here? Regards, Boqun > +#endif > > /* > * New fields for task_struct should be added above here, so that > @@ -1623,4 +1626,53 @@ extern long sched_getaffinity(pid_t pid, struct > cpumask *mask); > #define TASK_SIZE_OF(tsk) TASK_SIZE > #endif > > +#ifdef CONFIG_MEMBARRIER > +static inline void membarrier_fork(struct task_struct *t, > + unsigned long clone_flags) > +{ > + /* > + * Coherence of membarrier_sync_core against thread fork is > + * protected by siglock. membarrier_fork is called with siglock > + * held. > + */ > + t->membarrier_sync_core = current->membarrier_sync_core; > +} > +static inline void membarrier_execve(struct task_struct *t) > +{ > + t->membarrier_sync_core = 0; > +} > +static inline void membarrier_sched_out(struct task_struct *t) > +{ > + /* > + * Core serialization is performed before the memory barrier > + * preceding the store to rq->curr. > + */ > + if (unlikely(READ_ONCE(t->membarrier_sync_core))) > + sync_core(); > +} > +static inline void membarrier_sched_in(struct task_struct *t) > +{ > + /* > + * Core serialization is performed after the memory barrier > + * following the store to rq->curr. > + */ > + if (unlikely(READ_ONCE(t->membarrier_sync_core))) > + sync_core(); > +} > +#else > +static inline void membarrier_fork(struct task_struct *t, > + unsigned long clone_flags) > +{ > +} > +static inline void membarrier_execve(struct task_struct *t) > +{ > +} > +static inline void membarrier_sched_out(struct task_struct *t) > +{ > +} > +static inline void membarrier_sched_in(struct task_struct *t) > +{ > +} > +#endif > + > #endif > diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h > index 6d47b3249d8a..933c35ebcc10 100644 > --- a/include/uapi/linux/membarrier.h > +++ b/include/uapi/linux/membarrier.h > @@ -56,6 +56,13 @@ > * complete faster than the non-expedited ones, > * they never block, but have the downside of > * causing extra overhead. > + * @MEMBARRIER_CMD_REGISTER_SYNC_CORE: > + * Register the caller process so all of its > + * threads will issue core serialization > + * barriers in addition to memory barriers upon > + * SHARED and PRIVATE barriers targeting > + * its threads issued after this registration > + * returns. > * > * Command to be passed to the membarrier system call. The commands need to > * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to > @@ -67,6 +74,7 @@ enum membarrier_cmd { > /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ > /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ > MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), > + MEMBARRIER_CMD_REGISTER_SYNC_CORE = (1 << 4), > }; > > #endif /* _UAPI_LINUX_MEMBARRIER_H */ > diff --git a/kernel/fork.c b/kernel/fork.c > index 17921b0390b4..713b3c932671 100644 > --- a/kernel/fork.c > +++ b/kernel/fork.c > @@ -1840,6 +1840,8 @@ static __latent_entropy struct task_struct > *copy_process( > */ > copy_seccomp(p); > > + membarrier_fork(p, clone_flags); > + > /* > * Process group and session signals need to be delivered to just the > * parent before the fork or both the parent and the child after the > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > index 0e36d9960d91..3ca27d066950 100644 > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -3292,6 +3292,8 @@ static void __sched notrace __schedule(bool preempt) > local_irq_disable(); > rcu_note_context_switch(preempt); > > + membarrier_sched_out(prev); > + > /* > * Make sure that signal_pending_state()->signal_pending() below > * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) > @@ -3364,6 +3366,7 @@ static void __sched notrace __schedule(bool preempt) > > /* Also unlocks the rq: */ > rq = context_switch(rq, prev, next, &rf); > + membarrier_sched_in(next); > } else { > rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); > rq_unlock_irq(rq, &rf); > diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c > index 7eec6914d2d2..f128caf64b49 100644 > --- a/kernel/sched/membarrier.c > +++ b/kernel/sched/membarrier.c > @@ -25,12 +25,16 @@ > * Bitmask made from a "or" of all commands within enum membarrier_cmd, > * except MEMBARRIER_CMD_QUERY. > */ > -#define MEMBARRIER_CMD_BITMASK \ > - (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED) > +#define MEMBARRIER_CMD_BITMASK \ > + (MEMBARRIER_CMD_SHARED \ > + | MEMBARRIER_CMD_PRIVATE_EXPEDITED \ > + | MEMBARRIER_CMD_REGISTER_SYNC_CORE) > > static void ipi_mb(void *info) > { > - smp_mb(); /* IPIs should be serializing but paranoid. */ > + /* IPIs should be serializing but paranoid. */ > + smp_mb(); > + sync_core(); > } > > static void membarrier_private_expedited(void) > @@ -96,6 +100,30 @@ static void membarrier_private_expedited(void) > smp_mb(); /* exit from system call is not a mb */ > } > > +static void membarrier_register_sync_core(void) > +{ > + struct task_struct *p = current, *t; > + > + if (get_nr_threads(p) == 1) { > + p->membarrier_sync_core = 1; > + return; > + } > + > + /* > + * Coherence of membarrier_sync_core against thread fork is > + * protected by siglock. > + */ > + spin_lock(&p->sighand->siglock); > + for_each_thread(p, t) > + WRITE_ONCE(t->membarrier_sync_core, 1); > + spin_unlock(&p->sighand->siglock); > + /* > + * Ensure all future scheduler execution will observe the new > + * membarrier_sync_core state for this process. > + */ > + synchronize_sched(); > +} > + > /** > * sys_membarrier - issue memory barriers on a set of threads > * @cmd: Takes command values defined in enum membarrier_cmd. > @@ -146,6 +174,9 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags) > case MEMBARRIER_CMD_PRIVATE_EXPEDITED: > membarrier_private_expedited(); > return 0; > + case MEMBARRIER_CMD_REGISTER_SYNC_CORE: > + membarrier_register_sync_core(); > + return 0; > default: > return -EINVAL; > } > -- > 2.11.0 >
signature.asc
Description: PGP signature