On 04/13/2015 11:09 AM, Peter Zijlstra wrote:
On Thu, Apr 09, 2015 at 05:41:44PM -0400, Waiman Long wrote:
+__visible void __pv_queue_spin_unlock(struct qspinlock *lock)
+{
+ struct __qspinlock *l = (void *)lock;
+ struct pv_node *node;
+
+ if (likely(cmpxchg(&l->locked, _Q_LOC
On 04/13/2015 11:08 AM, Peter Zijlstra wrote:
On Thu, Apr 09, 2015 at 05:41:44PM -0400, Waiman Long wrote:
+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
+{
+ struct __qspinlock *l = (void *)lock;
+ struct qspinlock **lp = NULL;
+ struct pv_node *
On 04/13/2015 10:47 AM, Peter Zijlstra wrote:
On Thu, Apr 09, 2015 at 05:41:44PM -0400, Waiman Long wrote:
+void __init __pv_init_lock_hash(void)
+{
+ int pv_hash_size = 4 * num_possible_cpus();
+
+ if (pv_hash_size< (1U<< LFSR_MIN_BITS))
+ pv_hash_size = (1U<< LF
On Thu, Apr 09, 2015 at 05:41:44PM -0400, Waiman Long wrote:
> >>+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
> >>+{
> >>+ struct __qspinlock *l = (void *)lock;
> >>+ struct qspinlock **lp = NULL;
> >>+ struct pv_node *pn = (struct pv_node *)node;
> >>+ int
On Thu, Apr 09, 2015 at 05:41:44PM -0400, Waiman Long wrote:
> >>+__visible void __pv_queue_spin_unlock(struct qspinlock *lock)
> >>+{
> >>+ struct __qspinlock *l = (void *)lock;
> >>+ struct pv_node *node;
> >>+
> >>+ if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
> >>+
On Thu, Apr 09, 2015 at 05:41:44PM -0400, Waiman Long wrote:
> >>+void __init __pv_init_lock_hash(void)
> >>+{
> >>+ int pv_hash_size = 4 * num_possible_cpus();
> >>+
> >>+ if (pv_hash_size< (1U<< LFSR_MIN_BITS))
> >>+ pv_hash_size = (1U<< LFSR_MIN_BITS);
> >>+ /*
> >>+* Allo
On 04/09/2015 02:23 PM, Peter Zijlstra wrote:
On Thu, Apr 09, 2015 at 08:13:27PM +0200, Peter Zijlstra wrote:
On Mon, Apr 06, 2015 at 10:55:44PM -0400, Waiman Long wrote:
+#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
+static struct qspinlock **pv_hash(struct qspinloc
On Thu, Apr 09, 2015 at 08:13:27PM +0200, Peter Zijlstra wrote:
> On Mon, Apr 06, 2015 at 10:55:44PM -0400, Waiman Long wrote:
> > +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct
> > pv_hash_bucket))
> > +static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node
> > *
On Mon, Apr 06, 2015 at 10:55:44PM -0400, Waiman Long wrote:
> +++ b/kernel/locking/qspinlock_paravirt.h
> @@ -0,0 +1,321 @@
> +#ifndef _GEN_PV_LOCK_SLOWPATH
> +#error "do not include this file"
> +#endif
> +
> +/*
> + * Implement paravirt qspinlocks; the general idea is to halt the vcpus
> instea
Provide a separate (second) version of the spin_lock_slowpath for
paravirt along with a special unlock path.
The second slowpath is generated by adding a few pv hooks to the
normal slowpath, but where those will compile away for the native
case, they expand into special wait/wake code for the pv v
10 matches
Mail list logo