http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_brlock.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_brlock.h b/lib/ck/include/ck_brlock.h
new file mode 100644
index 0000000..d59f0e9
--- /dev/null
+++ b/lib/ck/include/ck_brlock.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2011-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_BRLOCK_H
+#define _CK_BRLOCK_H
+
+/*
+ * Big reader spinlocks provide cache-local contention-free read
+ * lock acquisition in the absence of writers. This comes at the
+ * cost of O(n) write lock acquisition. They were first implemented
+ * in the Linux kernel by Ingo Molnar and David S. Miller around the
+ * year 2000.
+ *
+ * This implementation is thread-agnostic which comes at the cost
+ * of larger reader objects due to necessary linkage overhead. In
+ * order to cut down on TLB pressure, it is recommended to allocate
+ * these objects on the same page.
+ */
+
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+struct ck_brlock_reader {
+       unsigned int n_readers;
+       struct ck_brlock_reader *previous;
+       struct ck_brlock_reader *next;
+};
+typedef struct ck_brlock_reader ck_brlock_reader_t;
+
+#define CK_BRLOCK_READER_INITIALIZER {0}
+
+struct ck_brlock {
+       struct ck_brlock_reader *readers;
+       unsigned int writer;
+};
+typedef struct ck_brlock ck_brlock_t;
+
+#define CK_BRLOCK_INITIALIZER {NULL, false}
+
+CK_CC_INLINE static void
+ck_brlock_init(struct ck_brlock *br)
+{
+
+       br->readers = NULL;
+       br->writer = false;
+       ck_pr_barrier();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_brlock_write_lock(struct ck_brlock *br)
+{
+       struct ck_brlock_reader *cursor;
+
+       /*
+        * As the frequency of write acquisitions should be low,
+        * there is no point to more advanced contention avoidance.
+        */
+       while (ck_pr_fas_uint(&br->writer, true) == true)
+               ck_pr_stall();
+
+       ck_pr_fence_atomic_load();
+
+       /* The reader list is protected under the writer br. */
+       for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
+               while (ck_pr_load_uint(&cursor->n_readers) != 0)
+                       ck_pr_stall();
+       }
+
+       /* Already acquired with respect to other writers. */
+       return;
+}
+
+CK_CC_INLINE static void
+ck_brlock_write_unlock(struct ck_brlock *br)
+{
+
+       ck_pr_fence_release();
+       ck_pr_store_uint(&br->writer, false);
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_brlock_write_trylock(struct ck_brlock *br, unsigned int factor)
+{
+       struct ck_brlock_reader *cursor;
+       unsigned int steps = 0;
+
+       while (ck_pr_fas_uint(&br->writer, true) == true) {
+               if (++steps >= factor)
+                       return false;
+
+               ck_pr_stall();
+       }
+
+       /*
+        * We do not require a strict fence here as atomic RMW operations
+        * are serializing.
+        */
+       ck_pr_fence_atomic_load();
+
+       for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
+               while (ck_pr_load_uint(&cursor->n_readers) != 0) {
+                       if (++steps >= factor) {
+                               ck_brlock_write_unlock(br);
+                               return false;
+                       }
+
+                       ck_pr_stall();
+               }
+       }
+
+       /* Already acquired with respect to other writers. */
+       return true;
+}
+
+CK_CC_INLINE static void
+ck_brlock_read_register(struct ck_brlock *br, struct ck_brlock_reader *reader)
+{
+
+       reader->n_readers = 0;
+       reader->previous = NULL;
+
+       /* Implicit compiler barrier. */
+       ck_brlock_write_lock(br);
+
+       reader->next = ck_pr_load_ptr(&br->readers);
+       if (reader->next != NULL)
+               reader->next->previous = reader;
+       ck_pr_store_ptr(&br->readers, reader);
+
+       ck_brlock_write_unlock(br);
+       return;
+}
+
+CK_CC_INLINE static void
+ck_brlock_read_unregister(struct ck_brlock *br, struct ck_brlock_reader 
*reader)
+{
+
+       ck_brlock_write_lock(br);
+
+       if (reader->next != NULL)
+               reader->next->previous = reader->previous;
+
+       if (reader->previous != NULL)
+               reader->previous->next = reader->next;
+       else
+               br->readers = reader->next;
+
+       ck_brlock_write_unlock(br);
+       return;
+}
+
+CK_CC_INLINE static void
+ck_brlock_read_lock(struct ck_brlock *br, struct ck_brlock_reader *reader)
+{
+
+       if (reader->n_readers >= 1) {
+               ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
+               return;
+       }
+
+       for (;;) {
+               while (ck_pr_load_uint(&br->writer) == true)
+                       ck_pr_stall();
+
+#if defined(__x86__) || defined(__x86_64__)
+               ck_pr_fas_uint(&reader->n_readers, 1);
+
+               /*
+                * Serialize reader counter update with respect to load of
+                * writer.
+                */
+               ck_pr_fence_atomic_load();
+#else
+               ck_pr_store_uint(&reader->n_readers, 1);
+
+               /*
+                * Serialize reader counter update with respect to load of
+                * writer.
+                */
+               ck_pr_fence_store_load();
+#endif
+
+               if (ck_pr_load_uint(&br->writer) == false)
+                       break;
+
+               ck_pr_store_uint(&reader->n_readers, 0);
+       }
+
+       ck_pr_fence_load();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_brlock_read_trylock(struct ck_brlock *br,
+                      struct ck_brlock_reader *reader,
+                      unsigned int factor)
+{
+       unsigned int steps = 0;
+
+       if (reader->n_readers >= 1) {
+               ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
+               return true;
+       }
+
+       for (;;) {
+               while (ck_pr_load_uint(&br->writer) == true) {
+                       if (++steps >= factor)
+                               return false;
+
+                       ck_pr_stall();
+               }
+
+#if defined(__x86__) || defined(__x86_64__)
+               ck_pr_fas_uint(&reader->n_readers, 1);
+
+               /*
+                * Serialize reader counter update with respect to load of
+                * writer.
+                */
+               ck_pr_fence_atomic_load();
+#else
+               ck_pr_store_uint(&reader->n_readers, 1);
+
+               /*
+                * Serialize reader counter update with respect to load of
+                * writer.
+                */
+               ck_pr_fence_store_load();
+#endif
+
+               if (ck_pr_load_uint(&br->writer) == false)
+                       break;
+
+               ck_pr_store_uint(&reader->n_readers, 0);
+
+               if (++steps >= factor)
+                       return false;
+       }
+
+       ck_pr_fence_load();
+       return true;
+}
+
+CK_CC_INLINE static void
+ck_brlock_read_unlock(struct ck_brlock_reader *reader)
+{
+
+       ck_pr_fence_load_store();
+       ck_pr_store_uint(&reader->n_readers, reader->n_readers - 1);
+       return;
+}
+
+#endif /* _CK_BRLOCK_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_bytelock.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_bytelock.h b/lib/ck/include/ck_bytelock.h
new file mode 100644
index 0000000..cd855cf
--- /dev/null
+++ b/lib/ck/include/ck_bytelock.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_BYTELOCK_H
+#define _CK_BYTELOCK_H
+
+/*
+ * The implementations here are derived from the work described in:
+ *   Dice, D. and Shavit, N. 2010. TLRW: return of the read-write lock.
+ *   In Proceedings of the 22nd ACM Symposium on Parallelism in Algorithms
+ *   and Architectures (Thira, Santorini, Greece, June 13 - 15, 2010).
+ *   SPAA '10. ACM, New York, NY, 284-293.
+ */
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <ck_limits.h>
+
+struct ck_bytelock {
+       unsigned int owner;
+       unsigned int n_readers;
+       uint8_t readers[CK_MD_CACHELINE - sizeof(unsigned int) * 2] 
CK_CC_ALIGN(8);
+};
+typedef struct ck_bytelock ck_bytelock_t;
+
+#define CK_BYTELOCK_INITIALIZER { 0, 0, {0} }
+#define CK_BYTELOCK_UNSLOTTED   UINT_MAX
+
+CK_CC_INLINE static void
+ck_bytelock_init(struct ck_bytelock *bytelock)
+{
+       unsigned int i;
+
+       bytelock->owner = 0;
+       bytelock->n_readers = 0;
+       for (i = 0; i < sizeof bytelock->readers; i++)
+               bytelock->readers[i] = false;
+
+       ck_pr_barrier();
+       return;
+}
+
+#ifdef CK_F_PR_LOAD_64
+#define CK_BYTELOCK_LENGTH sizeof(uint64_t)
+#define CK_BYTELOCK_LOAD ck_pr_load_64
+#define CK_BYTELOCK_TYPE uint64_t
+#elif defined(CK_F_PR_LOAD_32)
+#define CK_BYTELOCK_LENGTH sizeof(uint32_t)
+#define CK_BYTELOCK_LOAD ck_pr_load_32
+#define CK_BYTELOCK_TYPE uint32_t
+#else
+#error Unsupported platform.
+#endif
+
+CK_CC_INLINE static void
+ck_bytelock_write_lock(struct ck_bytelock *bytelock, unsigned int slot)
+{
+       CK_BYTELOCK_TYPE *readers = (void *)bytelock->readers;
+       unsigned int i;
+
+       /* Announce upcoming writer acquisition. */
+       while (ck_pr_cas_uint(&bytelock->owner, 0, slot) == false)
+               ck_pr_stall();
+
+       /* If we are slotted, we might be upgrading from a read lock. */
+       if (slot <= sizeof bytelock->readers)
+               ck_pr_store_8(&bytelock->readers[slot - 1], false);
+
+       /* Wait for slotted readers to drain out. */
+       ck_pr_fence_store_load();
+       for (i = 0; i < sizeof(bytelock->readers) / CK_BYTELOCK_LENGTH; i++) {
+               while (CK_BYTELOCK_LOAD(&readers[i]) != false)
+                       ck_pr_stall();
+       }
+
+       /* Wait for unslotted readers to drain out. */
+       while (ck_pr_load_uint(&bytelock->n_readers) != 0)
+               ck_pr_stall();
+
+       return;
+}
+
+#undef CK_BYTELOCK_LENGTH
+#undef CK_BYTELOCK_LOAD
+#undef CK_BYTELOCK_TYPE
+
+CK_CC_INLINE static void
+ck_bytelock_write_unlock(struct ck_bytelock *bytelock)
+{
+
+       ck_pr_fence_release();
+       ck_pr_store_uint(&bytelock->owner, 0);
+       return;
+}
+
+CK_CC_INLINE static void
+ck_bytelock_read_lock(struct ck_bytelock *bytelock, unsigned int slot)
+{
+
+       if (ck_pr_load_uint(&bytelock->owner) == slot) {
+               ck_pr_store_8(&bytelock->readers[slot - 1], true);
+               ck_pr_fence_strict_store();
+               ck_pr_store_uint(&bytelock->owner, 0);
+               return;
+       }
+
+       /* Unslotted threads will have to use the readers counter. */
+       if (slot > sizeof bytelock->readers) {
+               for (;;) {
+                       ck_pr_inc_uint(&bytelock->n_readers);
+                       ck_pr_fence_atomic_load();
+                       if (ck_pr_load_uint(&bytelock->owner) == 0)
+                               break;
+                       ck_pr_dec_uint(&bytelock->n_readers);
+
+                       while (ck_pr_load_uint(&bytelock->owner) != 0)
+                               ck_pr_stall();
+               }
+
+               ck_pr_fence_load();
+               return;
+       }
+
+       slot -= 1;
+       for (;;) {
+               ck_pr_store_8(&bytelock->readers[slot], true);
+               ck_pr_fence_store_load();
+
+               /*
+                * If there is no owner at this point, our slot has
+                * already been published and it is guaranteed no
+                * write acquisition will succeed until we drain out.
+                */
+               if (ck_pr_load_uint(&bytelock->owner) == 0)
+                       break;
+
+               ck_pr_store_8(&bytelock->readers[slot], false);
+               while (ck_pr_load_uint(&bytelock->owner) != 0)
+                       ck_pr_stall();
+       }
+
+       ck_pr_fence_load();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_bytelock_read_unlock(struct ck_bytelock *bytelock, unsigned int slot)
+{
+
+       ck_pr_fence_release();
+
+       if (slot > sizeof bytelock->readers)
+               ck_pr_dec_uint(&bytelock->n_readers);
+       else
+               ck_pr_store_8(&bytelock->readers[slot - 1], false);
+
+       return;
+}
+
+#endif /* _CK_BYTELOCK_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_cc.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_cc.h b/lib/ck/include/ck_cc.h
new file mode 100644
index 0000000..a4baaa5
--- /dev/null
+++ b/lib/ck/include/ck_cc.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2009-2014 Samy Al Bahra.
+ * Copyright 2014 Paul Khuong.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_CC_H
+#define _CK_CC_H
+
+#if defined(__GNUC__) || defined(__SUNPRO_C)
+#include "gcc/ck_cc.h"
+#endif
+
+/*
+ * Container function.
+ * This relies on (compiler) implementation-defined behavior.
+ */
+#define CK_CC_CONTAINER(F, T, M, N)                                            
\
+       CK_CC_INLINE static T *                                                 
\
+       N(F *p)                                                                 
\
+       {                                                                       
\
+               const F *n = p;                                                 
\
+               return (T *)(void *)(((char *)n) - ((size_t)&((T *)0)->M));     
\
+       }
+
+#define CK_CC_PAD(x) union { char pad[x]; }
+
+#ifndef CK_CC_ALIASED
+#define CK_CC_ALIASED
+#endif
+
+#ifndef CK_CC_UNUSED
+#define CK_CC_UNUSED
+#endif
+
+#ifndef CK_CC_USED
+#define CK_CC_USED
+#endif
+
+#ifndef CK_CC_IMM
+#define CK_CC_IMM
+#endif
+
+#ifndef CK_CC_PACKED
+#define CK_CC_PACKED
+#endif
+
+#ifndef CK_CC_WEAKREF
+#define CK_CC_WEAKREF
+#endif
+
+#ifndef CK_CC_ALIGN
+#define CK_CC_ALIGN(X)
+#endif
+
+#ifndef CK_CC_CACHELINE
+#define CK_CC_CACHELINE
+#endif
+
+#ifndef CK_CC_LIKELY
+#define CK_CC_LIKELY(x) x
+#endif
+
+#ifndef CK_CC_UNLIKELY
+#define CK_CC_UNLIKELY(x) x
+#endif
+
+#ifndef CK_F_CC_FFS
+#define CK_F_CC_FFS
+CK_CC_INLINE static int
+ck_cc_ffs(unsigned int x)
+{
+       unsigned int i;
+
+       if (x == 0)
+               return 0;
+
+       for (i = 1; (x & 1) == 0; i++, x >>= 1);
+
+       return i;
+}
+#endif
+
+#ifndef CK_F_CC_CLZ
+#define CK_F_CC_CLZ
+#include <ck_limits.h>
+
+CK_CC_INLINE static int
+ck_cc_clz(unsigned int x)
+{
+       unsigned int count, i;
+
+       for (count = 0, i = sizeof(unsigned int) * CHAR_BIT; i > 0; count++) {
+               unsigned int bit = 1U << --i;
+
+               if (x & bit)
+                       break;
+       }
+
+       return count;
+}
+#endif
+
+#ifndef CK_F_CC_CTZ
+#define CK_F_CC_CTZ
+CK_CC_INLINE static int
+ck_cc_ctz(unsigned int x)
+{
+       unsigned int i;
+
+       if (x == 0)
+               return 0;
+
+       for (i = 0; (x & 1) == 0; i++, x >>= 1);
+
+       return i;
+}
+#endif
+
+#ifndef CK_F_CC_POPCOUNT
+#define CK_F_CC_POPCOUNT
+CK_CC_INLINE static int
+ck_cc_popcount(unsigned int x)
+{
+       unsigned int acc;
+
+       for (acc = 0; x != 0; x >>= 1)
+               acc += x & 1;
+
+       return acc;
+}
+#endif
+
+#endif /* _CK_CC_H */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_cohort.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_cohort.h b/lib/ck/include/ck_cohort.h
new file mode 100644
index 0000000..5bc8baa
--- /dev/null
+++ b/lib/ck/include/ck_cohort.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2013-2014 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_COHORT_H
+#define _CK_COHORT_H
+
+/*
+ * This is an implementation of lock cohorts as described in:
+ *     Dice, D.; Marathe, V.; and Shavit, N. 2012.
+ *     Lock Cohorting: A General Technique for Designing NUMA Locks
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stddef.h>
+
+enum ck_cohort_state {
+       CK_COHORT_STATE_GLOBAL = 0,
+       CK_COHORT_STATE_LOCAL = 1
+};
+
+#define CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT 10
+
+#define CK_COHORT_NAME(N) ck_cohort_##N
+#define CK_COHORT_INSTANCE(N) struct CK_COHORT_NAME(N)
+#define CK_COHORT_INIT(N, C, GL, LL, P) ck_cohort_##N##_init(C, GL, LL, P)
+#define CK_COHORT_LOCK(N, C, GC, LC) ck_cohort_##N##_lock(C, GC, LC)
+#define CK_COHORT_UNLOCK(N, C, GC, LC) ck_cohort_##N##_unlock(C, GC, LC)
+#define CK_COHORT_TRYLOCK(N, C, GLC, LLC, LUC) ck_cohort_##N##_trylock(C, GLC, 
LLC, LUC)
+#define CK_COHORT_LOCKED(N, C, GC, LC) ck_cohort_##N##_locked(C, GC, LC)
+
+#define CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI)                         
\
+       CK_COHORT_INSTANCE(N) {                                                 
\
+               void *global_lock;                                              
\
+               void *local_lock;                                               
\
+               enum ck_cohort_state release_state;                             
\
+               unsigned int waiting_threads;                                   
\
+               unsigned int acquire_count;                                     
\
+               unsigned int local_pass_limit;                                  
\
+       };                                                                      
\
+                                                                               
\
+       CK_CC_INLINE static void                                                
\
+       ck_cohort_##N##_init(struct ck_cohort_##N *cohort,                      
\
+           void *global_lock, void *local_lock, unsigned int pass_limit)       
\
+       {                                                                       
\
+               cohort->global_lock = global_lock;                              
\
+               cohort->local_lock = local_lock;                                
\
+               cohort->release_state = CK_COHORT_STATE_GLOBAL;                 
\
+               cohort->waiting_threads = 0;                                    
\
+               cohort->acquire_count = 0;                                      
\
+               cohort->local_pass_limit = pass_limit;                          
\
+               ck_pr_barrier();                                                
\
+               return;                                                         
\
+       }                                                                       
\
+                                                                               
\
+       CK_CC_INLINE static void                                                
\
+       ck_cohort_##N##_lock(CK_COHORT_INSTANCE(N) *cohort,                     
\
+           void *global_context, void *local_context)                          
\
+       {                                                                       
\
+                                                                               
\
+               ck_pr_inc_uint(&cohort->waiting_threads);                       
\
+               LL(cohort->local_lock, local_context);                          
\
+               ck_pr_dec_uint(&cohort->waiting_threads);                       
\
+                                                                               
\
+               if (cohort->release_state == CK_COHORT_STATE_GLOBAL) {          
\
+                       GL(cohort->global_lock, global_context);                
\
+               }                                                               
\
+                                                                               
\
+               ++cohort->acquire_count;                                        
\
+               return;                                                         
\
+       }                                                                       
\
+                                                                               
\
+       CK_CC_INLINE static void                                                
\
+       ck_cohort_##N##_unlock(CK_COHORT_INSTANCE(N) *cohort,                   
\
+           void *global_context, void *local_context)                          
\
+       {                                                                       
\
+                                                                               
\
+               if (ck_pr_load_uint(&cohort->waiting_threads) > 0               
\
+                   && cohort->acquire_count < cohort->local_pass_limit) {      
\
+                       cohort->release_state = CK_COHORT_STATE_LOCAL;          
\
+               } else {                                                        
\
+                       GU(cohort->global_lock, global_context);                
\
+                       cohort->release_state = CK_COHORT_STATE_GLOBAL;         
\
+                       cohort->acquire_count = 0;                              
\
+               }                                                               
\
+                                                                               
\
+               ck_pr_fence_release();                                          
\
+               LU(cohort->local_lock, local_context);                          
\
+                                                                               
\
+               return;                                                         
\
+       }                                                                       
\
+                                                                               
\
+       CK_CC_INLINE static bool                                                
\
+       ck_cohort_##N##_locked(CK_COHORT_INSTANCE(N) *cohort,                   
\
+           void *global_context, void *local_context)                          
\
+       {                                                                       
\
+               return GI(cohort->local_lock, local_context) ||                 
\
+                   LI(cohort->global_lock, global_context);                    
\
+       }
+
+#define CK_COHORT_TRYLOCK_PROTOTYPE(N, GL, GU, GI, GTL, LL, LU, LI, LTL)       
\
+       CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI)                          
\
+       CK_CC_INLINE static bool                                                
\
+       ck_cohort_##N##_trylock(CK_COHORT_INSTANCE(N) *cohort,                  
\
+           void *global_context, void *local_context,                          
\
+           void *local_unlock_context)                                         
\
+       {                                                                       
\
+                                                                               
\
+               bool trylock_result;                                            
\
+                                                                               
\
+               ck_pr_inc_uint(&cohort->waiting_threads);                       
\
+               trylock_result = LTL(cohort->local_lock, local_context);        
\
+               ck_pr_dec_uint(&cohort->waiting_threads);                       
\
+               if (trylock_result == false) {                                  
\
+                       return false;                                           
\
+               }                                                               
\
+                                                                               
\
+               if (cohort->release_state == CK_COHORT_STATE_GLOBAL &&          
\
+                   GTL(cohort->global_lock, global_context) == false) {        
\
+                       LU(cohort->local_lock, local_unlock_context);           
\
+                       return false;                                           
\
+               }                                                               
\
+                                                                               
\
+               ++cohort->acquire_count;                                        
\
+               return true;                                                    
\
+       }
+
+#define CK_COHORT_INITIALIZER {                                                
        \
+       .global_lock = NULL,                                                    
\
+       .local_lock = NULL,                                                     
\
+       .release_state = CK_COHORT_STATE_GLOBAL,                                
\
+       .waiting_threads = 0,                                                   
\
+       .acquire_count = 0,                                                     
\
+       .local_pass_limit = CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT                  
\
+}
+
+#endif /* _CK_COHORT_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_elide.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_elide.h b/lib/ck/include/ck_elide.h
new file mode 100644
index 0000000..8ffff40
--- /dev/null
+++ b/lib/ck/include/ck_elide.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2013-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_ELIDE_H
+#define _CK_ELIDE_H
+
+/*
+ * As RTM is currently only supported on TSO x86 architectures,
+ * fences have been omitted. They will be necessary for other
+ * non-TSO architectures with TM support.
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <string.h>
+
+/*
+ * skip_-prefixed counters represent the number of consecutive
+ * elisions to forfeit. retry_-prefixed counters represent the
+ * number of elision retries to attempt before forfeit.
+ *
+ *     _busy: Lock was busy
+ *    _other: Unknown explicit abort
+ * _conflict: Data conflict in elision section
+ */
+struct ck_elide_config {
+       unsigned short skip_busy;
+       short retry_busy;
+       unsigned short skip_other;
+       short retry_other;
+       unsigned short skip_conflict;
+       short retry_conflict;
+};
+
+#define CK_ELIDE_CONFIG_DEFAULT_INITIALIZER {  \
+       .skip_busy = 5,                         \
+       .retry_busy = 256,                      \
+       .skip_other = 3,                        \
+       .retry_other = 3,                       \
+       .skip_conflict = 2,                     \
+       .retry_conflict = 5                     \
+}
+
+struct ck_elide_stat {
+       unsigned int n_fallback;
+       unsigned int n_elide;
+       unsigned short skip;
+};
+typedef struct ck_elide_stat ck_elide_stat_t;
+
+#define CK_ELIDE_STAT_INITIALIZER { 0, 0, 0 }
+
+static inline void
+ck_elide_stat_init(ck_elide_stat_t *st)
+{
+
+       memset(st, 0, sizeof(*st));
+       return;
+}
+
+#ifdef CK_F_PR_RTM
+enum _ck_elide_hint {
+       CK_ELIDE_HINT_RETRY = 0,
+       CK_ELIDE_HINT_SPIN,
+       CK_ELIDE_HINT_STOP
+};
+
+#define _CK_ELIDE_LOCK_BUSY 0xFF
+
+static enum _ck_elide_hint
+_ck_elide_fallback(int *retry,
+    struct ck_elide_stat *st,
+    struct ck_elide_config *c,
+    unsigned int status)
+{
+
+       st->n_fallback++;
+       if (*retry > 0)
+               return CK_ELIDE_HINT_RETRY;
+
+       if (st->skip != 0)
+               return CK_ELIDE_HINT_STOP;
+
+       if (status & CK_PR_RTM_EXPLICIT) {
+               if (CK_PR_RTM_CODE(status) == _CK_ELIDE_LOCK_BUSY) {
+                       st->skip = c->skip_busy;
+                       *retry = c->retry_busy;
+                       return CK_ELIDE_HINT_SPIN;
+               }
+
+               st->skip = c->skip_other;
+               return CK_ELIDE_HINT_STOP;
+       }
+
+       if ((status & CK_PR_RTM_RETRY) &&
+           (status & CK_PR_RTM_CONFLICT)) {
+               st->skip = c->skip_conflict;
+               *retry = c->retry_conflict;
+               return CK_ELIDE_HINT_RETRY;
+       }
+
+       /*
+        * Capacity, debug and nesting abortions are likely to be
+        * invariant conditions for the acquisition, execute regular
+        * path instead. If retry bit is not set, then take the hint.
+        */
+       st->skip = USHRT_MAX;
+       return CK_ELIDE_HINT_STOP;
+}
+
+/*
+ * Defines an elision implementation according to the following variables:
+ *     N - Namespace of elision implementation.
+ *     T - Typename of mutex.
+ *   L_P - Lock predicate, returns false if resource is available.
+ *     L - Function to call if resource is unavailable of transaction aborts.
+ *   U_P - Unlock predicate, returns false if elision failed.
+ *     U - Function to call if transaction failed.
+ */
+#define CK_ELIDE_PROTOTYPE(N, T, L_P, L, U_P, U)                               
        \
+       CK_CC_INLINE static void                                                
        \
+       ck_elide_##N##_lock_adaptive(T *lock,                                   
        \
+           struct ck_elide_stat *st,                                           
        \
+           struct ck_elide_config *c)                                          
        \
+       {                                                                       
        \
+               enum _ck_elide_hint hint;                                       
        \
+               int retry;                                                      
        \
+                                                                               
        \
+               if (CK_CC_UNLIKELY(st->skip != 0)) {                            
        \
+                       st->skip--;                                             
        \
+                       goto acquire;                                           
        \
+               }                                                               
        \
+                                                                               
        \
+               retry = c->retry_conflict;                                      
        \
+               do {                                                            
        \
+                       unsigned int status = ck_pr_rtm_begin();                
        \
+                       if (status == CK_PR_RTM_STARTED) {                      
        \
+                               if (L_P(lock) == true)                          
        \
+                                       ck_pr_rtm_abort(_CK_ELIDE_LOCK_BUSY);   
        \
+                                                                               
        \
+                               return;                                         
        \
+                       }                                                       
        \
+                                                                               
        \
+                       hint = _ck_elide_fallback(&retry, st, c, status);       
        \
+                       if (hint == CK_ELIDE_HINT_RETRY)                        
        \
+                               continue;                                       
        \
+                                                                               
        \
+                       if (hint == CK_ELIDE_HINT_SPIN) {                       
        \
+                               while (--retry != 0) {                          
        \
+                                       if (L_P(lock) == false)                 
        \
+                                               break;                          
        \
+                                                                               
        \
+                                       ck_pr_stall();                          
        \
+                               }                                               
        \
+                                                                               
        \
+                               continue;                                       
        \
+                       }                                                       
        \
+                                                                               
        \
+                       if (hint == CK_ELIDE_HINT_STOP)                         
        \
+                               break;                                          
        \
+               } while (CK_CC_LIKELY(--retry > 0));                            
        \
+                                                                               
        \
+       acquire:                                                                
        \
+               L(lock);                                                        
        \
+               return;                                                         
        \
+       }                                                                       
        \
+       CK_CC_INLINE static void                                                
        \
+       ck_elide_##N##_unlock_adaptive(struct ck_elide_stat *st, T *lock)       
        \
+       {                                                                       
        \
+                                                                               
        \
+               if (U_P(lock) == false) {                                       
        \
+                       ck_pr_rtm_end();                                        
        \
+                       st->skip = 0;                                           
        \
+                       st->n_elide++;                                          
        \
+               } else {                                                        
        \
+                       U(lock);                                                
        \
+               }                                                               
        \
+                                                                               
        \
+               return;                                                         
        \
+       }                                                                       
        \
+       CK_CC_INLINE static void                                                
        \
+       ck_elide_##N##_lock(T *lock)                                            
        \
+       {                                                                       
        \
+                                                                               
        \
+               if (ck_pr_rtm_begin() != CK_PR_RTM_STARTED) {                   
        \
+                       L(lock);                                                
        \
+                       return;                                                 
        \
+               }                                                               
        \
+                                                                               
        \
+               if (L_P(lock) == true)                                          
        \
+                       ck_pr_rtm_abort(_CK_ELIDE_LOCK_BUSY);                   
        \
+                                                                               
        \
+               return;                                                         
        \
+       }                                                                       
        \
+       CK_CC_INLINE static void                                                
        \
+       ck_elide_##N##_unlock(T *lock)                                          
        \
+       {                                                                       
        \
+                                                                               
        \
+               if (U_P(lock) == false) {                                       
        \
+                       ck_pr_rtm_end();                                        
        \
+               } else {                                                        
        \
+                       U(lock);                                                
        \
+               }                                                               
        \
+                                                                               
        \
+               return;                                                         
        \
+       }
+
+#define CK_ELIDE_TRYLOCK_PROTOTYPE(N, T, TL_P, TL)                     \
+       CK_CC_INLINE static bool                                        \
+       ck_elide_##N##_trylock(T *lock)                                 \
+       {                                                               \
+                                                                       \
+               if (ck_pr_rtm_begin() != CK_PR_RTM_STARTED)             \
+                       return false;                                   \
+                                                                       \
+               if (TL_P(lock) == true)                                 \
+                       ck_pr_rtm_abort(_CK_ELIDE_LOCK_BUSY);           \
+                                                                       \
+               return true;                                            \
+       }
+#else
+/*
+ * If RTM is not enabled on the target platform (CK_F_PR_RTM) then these
+ * elision wrappers directly calls into the user-specified lock operations.
+ * Unfortunately, the storage cost of both ck_elide_config and ck_elide_stat
+ * are paid (typically a storage cost that is a function of lock objects and
+ * thread count).
+ */
+#define CK_ELIDE_PROTOTYPE(N, T, L_P, L, U_P, U)                       \
+       CK_CC_INLINE static void                                        \
+       ck_elide_##N##_lock_adaptive(T *lock,                           \
+           struct ck_elide_stat *st,                                   \
+           struct ck_elide_config *c)                                  \
+       {                                                               \
+                                                                       \
+               (void)st;                                               \
+               (void)c;                                                \
+               L(lock);                                                \
+               return;                                                 \
+       }                                                               \
+       CK_CC_INLINE static void                                        \
+       ck_elide_##N##_unlock_adaptive(struct ck_elide_stat *st,        \
+           T *lock)                                                    \
+       {                                                               \
+                                                                       \
+               (void)st;                                               \
+               U(lock);                                                \
+               return;                                                 \
+       }                                                               \
+       CK_CC_INLINE static void                                        \
+       ck_elide_##N##_lock(T *lock)                                    \
+       {                                                               \
+                                                                       \
+               L(lock);                                                \
+               return;                                                 \
+       }                                                               \
+       CK_CC_INLINE static void                                        \
+       ck_elide_##N##_unlock(T *lock)                                  \
+       {                                                               \
+                                                                       \
+               U(lock);                                                \
+               return;                                                 \
+       }
+
+#define CK_ELIDE_TRYLOCK_PROTOTYPE(N, T, TL_P, TL)                     \
+       CK_CC_INLINE static bool                                        \
+       ck_elide_##N##_trylock(T *lock)                                 \
+       {                                                               \
+                                                                       \
+               return TL_P(lock);                                      \
+       }
+#endif /* !CK_F_PR_RTM */
+
+/*
+ * Best-effort elision lock operations. First argument is name (N)
+ * associated with implementation and the second is a pointer to
+ * the type specified above (T).
+ *
+ * Unlike the adaptive variant, this interface does not have any retry
+ * semantics. In environments where jitter is low, this may yield a tighter
+ * fast path.
+ */
+#define CK_ELIDE_LOCK(NAME, LOCK)      ck_elide_##NAME##_lock(LOCK)    
+#define CK_ELIDE_UNLOCK(NAME, LOCK)    ck_elide_##NAME##_unlock(LOCK)
+#define CK_ELIDE_TRYLOCK(NAME, LOCK)   ck_elide_##NAME##_trylock(LOCK)
+
+/*
+ * Adaptive elision lock operations. In addition to name and pointer
+ * to the lock, you must pass in a pointer to an initialized
+ * ck_elide_config structure along with a per-thread stat structure.
+ */
+#define CK_ELIDE_LOCK_ADAPTIVE(NAME, STAT, CONFIG, LOCK) \
+       ck_elide_##NAME##_lock_adaptive(LOCK, STAT, CONFIG)
+
+#define CK_ELIDE_UNLOCK_ADAPTIVE(NAME, STAT, LOCK) \
+       ck_elide_##NAME##_unlock_adaptive(STAT, LOCK)
+
+#endif /* _CK_ELIDE_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_epoch.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_epoch.h b/lib/ck/include/ck_epoch.h
new file mode 100644
index 0000000..2e71599
--- /dev/null
+++ b/lib/ck/include/ck_epoch.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2011-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_EPOCH_H
+#define _CK_EPOCH_H
+
+/*
+ * The implementation here is inspired from the work described in:
+ *   Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
+ *   of Cambridge Computing Laboratory.
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+#include <stdbool.h>
+
+#ifndef CK_EPOCH_LENGTH
+#define CK_EPOCH_LENGTH 4
+#endif
+
+struct ck_epoch_entry;
+typedef struct ck_epoch_entry ck_epoch_entry_t;
+typedef void ck_epoch_cb_t(ck_epoch_entry_t *);
+
+/*
+ * This should be embedded into objects you wish to be the target of
+ * ck_epoch_cb_t functions (with ck_epoch_call).
+ */
+struct ck_epoch_entry {
+       ck_epoch_cb_t *function;
+       ck_stack_entry_t stack_entry;
+};
+
+/*
+ * Return pointer to ck_epoch_entry container object.
+ */
+#define CK_EPOCH_CONTAINER(T, M, N) CK_CC_CONTAINER(struct ck_epoch_entry, T, 
M, N)
+
+struct ck_epoch_record {
+       unsigned int state;
+       unsigned int epoch;
+       unsigned int active;
+       unsigned int n_pending;
+       unsigned int n_peak;
+       unsigned long n_dispatch;
+       ck_stack_t pending[CK_EPOCH_LENGTH];
+       ck_stack_entry_t record_next;
+} CK_CC_CACHELINE;
+typedef struct ck_epoch_record ck_epoch_record_t;
+
+struct ck_epoch {
+       unsigned int epoch;
+       char pad[CK_MD_CACHELINE - sizeof(unsigned int)];
+       ck_stack_t records;
+       unsigned int n_free;
+};
+typedef struct ck_epoch ck_epoch_t;
+
+/*
+ * Marks the beginning of an epoch-protected section.
+ */
+CK_CC_INLINE static void
+ck_epoch_begin(ck_epoch_t *epoch, ck_epoch_record_t *record)
+{
+
+       /*
+        * Only observe new epoch if thread is not recursing into a read
+        * section.
+        */
+       if (record->active == 0) {
+               unsigned int g_epoch = ck_pr_load_uint(&epoch->epoch);
+
+               /*
+                * It is possible for loads to be re-ordered before the store
+                * is committed into the caller's epoch and active fields.
+                * For this reason, store to load serialization is necessary.
+                */
+               ck_pr_store_uint(&record->epoch, g_epoch);
+
+#if defined(__x86__) || defined(__x86_64__)
+               ck_pr_fas_uint(&record->active, 1);
+               ck_pr_fence_atomic_load();
+#else
+               ck_pr_store_uint(&record->active, 1);
+               ck_pr_fence_store_load();
+#endif
+
+               return;
+       }
+
+       ck_pr_store_uint(&record->active, record->active + 1);
+       return;
+}
+
+/*
+ * Marks the end of an epoch-protected section.
+ */
+CK_CC_INLINE static void
+ck_epoch_end(ck_epoch_t *global, ck_epoch_record_t *record)
+{
+
+       (void)global;
+
+       ck_pr_fence_release();
+       ck_pr_store_uint(&record->active, record->active - 1);
+       return;
+}
+
+/*
+ * Defers the execution of the function pointed to by the "cb"
+ * argument until an epoch counter loop. This allows for a
+ * non-blocking deferral.
+ */
+CK_CC_INLINE static void
+ck_epoch_call(ck_epoch_t *epoch,
+             ck_epoch_record_t *record,
+             ck_epoch_entry_t *entry,
+             ck_epoch_cb_t *function)
+{
+       unsigned int e = ck_pr_load_uint(&epoch->epoch);
+       unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
+
+       record->n_pending++;
+       entry->function = function;
+       ck_stack_push_spnc(&record->pending[offset], &entry->stack_entry);
+       return;
+}
+
+void ck_epoch_init(ck_epoch_t *);
+ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *);
+void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *);
+void ck_epoch_unregister(ck_epoch_t *, ck_epoch_record_t *);
+bool ck_epoch_poll(ck_epoch_t *, ck_epoch_record_t *);
+void ck_epoch_synchronize(ck_epoch_t *, ck_epoch_record_t *);
+void ck_epoch_barrier(ck_epoch_t *, ck_epoch_record_t *);
+void ck_epoch_reclaim(ck_epoch_record_t *);
+
+#endif /* _CK_EPOCH_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_fifo.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_fifo.h b/lib/ck/include/ck_fifo.h
new file mode 100644
index 0000000..a65bcfc
--- /dev/null
+++ b/lib/ck/include/ck_fifo.h
@@ -0,0 +1,475 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_FIFO_H
+#define _CK_FIFO_H
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+#include <stddef.h>
+
+#ifndef CK_F_FIFO_SPSC
+#define CK_F_FIFO_SPSC
+struct ck_fifo_spsc_entry {
+       void *value;
+       struct ck_fifo_spsc_entry *next;
+};
+typedef struct ck_fifo_spsc_entry ck_fifo_spsc_entry_t;
+
+struct ck_fifo_spsc {
+       ck_spinlock_t m_head;
+       struct ck_fifo_spsc_entry *head;
+       char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_spsc_entry *) - 
sizeof(ck_spinlock_t)];
+       ck_spinlock_t m_tail;
+       struct ck_fifo_spsc_entry *tail;
+       struct ck_fifo_spsc_entry *head_snapshot;
+       struct ck_fifo_spsc_entry *garbage;
+};
+typedef struct ck_fifo_spsc ck_fifo_spsc_t;
+
+CK_CC_INLINE static bool
+ck_fifo_spsc_enqueue_trylock(struct ck_fifo_spsc *fifo)
+{
+
+       return ck_spinlock_trylock(&fifo->m_tail);
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_enqueue_lock(struct ck_fifo_spsc *fifo)
+{
+
+       ck_spinlock_lock(&fifo->m_tail);
+       return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_enqueue_unlock(struct ck_fifo_spsc *fifo)
+{
+
+       ck_spinlock_unlock(&fifo->m_tail);
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_spsc_dequeue_trylock(struct ck_fifo_spsc *fifo)
+{
+
+       return ck_spinlock_trylock(&fifo->m_head);
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_dequeue_lock(struct ck_fifo_spsc *fifo)
+{
+
+       ck_spinlock_lock(&fifo->m_head);
+       return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_dequeue_unlock(struct ck_fifo_spsc *fifo)
+{
+
+       ck_spinlock_unlock(&fifo->m_head);
+       return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_init(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry *stub)
+{
+
+       ck_spinlock_init(&fifo->m_head);
+       ck_spinlock_init(&fifo->m_tail);
+
+       stub->next = NULL;
+       fifo->head = fifo->tail = fifo->head_snapshot = fifo->garbage = stub;
+       return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_deinit(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry 
**garbage)
+{
+
+       *garbage = fifo->head;
+       fifo->head = fifo->tail = NULL;
+       return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_enqueue(struct ck_fifo_spsc *fifo,
+                    struct ck_fifo_spsc_entry *entry,
+                    void *value)
+{
+
+       entry->value = value;
+       entry->next = NULL;
+
+       /* If stub->next is visible, guarantee that entry is consistent. */
+       ck_pr_fence_store();
+       ck_pr_store_ptr(&fifo->tail->next, entry);
+       fifo->tail = entry;
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_spsc_dequeue(struct ck_fifo_spsc *fifo, void *value)
+{
+       struct ck_fifo_spsc_entry *entry;
+
+       /*
+        * The head pointer is guaranteed to always point to a stub entry.
+        * If the stub entry does not point to an entry, then the queue is
+        * empty.
+        */
+       entry = ck_pr_load_ptr(&fifo->head->next);
+       if (entry == NULL)
+               return false;
+
+       /* If entry is visible, guarantee store to value is visible. */
+       ck_pr_store_ptr(value, entry->value);
+       ck_pr_fence_store();
+       ck_pr_store_ptr(&fifo->head, entry);
+       return true;
+}
+
+/*
+ * Recycle a node. This technique for recycling nodes is based on
+ * Dmitriy Vyukov's work.
+ */
+CK_CC_INLINE static struct ck_fifo_spsc_entry *
+ck_fifo_spsc_recycle(struct ck_fifo_spsc *fifo)
+{
+       struct ck_fifo_spsc_entry *garbage;
+
+       if (fifo->head_snapshot == fifo->garbage) {
+               fifo->head_snapshot = ck_pr_load_ptr(&fifo->head);
+               if (fifo->head_snapshot == fifo->garbage)
+                       return NULL;
+       }
+
+       garbage = fifo->garbage;
+       fifo->garbage = garbage->next;
+       return garbage;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_spsc_isempty(struct ck_fifo_spsc *fifo)
+{
+       struct ck_fifo_spsc_entry *head = ck_pr_load_ptr(&fifo->head);
+       return ck_pr_load_ptr(&head->next) == NULL;
+}
+
+#define CK_FIFO_SPSC_ISEMPTY(f)        ((f)->head->next == NULL)
+#define CK_FIFO_SPSC_FIRST(f)  ((f)->head->next)
+#define CK_FIFO_SPSC_NEXT(m)   ((m)->next)
+#define CK_FIFO_SPSC_SPARE(f)  ((f)->head)
+#define CK_FIFO_SPSC_FOREACH(fifo, entry)                      \
+       for ((entry) = CK_FIFO_SPSC_FIRST(fifo);                \
+            (entry) != NULL;                                   \
+            (entry) = CK_FIFO_SPSC_NEXT(entry))
+#define CK_FIFO_SPSC_FOREACH_SAFE(fifo, entry, T)              \
+       for ((entry) = CK_FIFO_SPSC_FIRST(fifo);                \
+            (entry) != NULL && ((T) = (entry)->next, 1);       \
+            (entry) = (T))
+
+#endif /* CK_F_FIFO_SPSC */
+
+#ifdef CK_F_PR_CAS_PTR_2
+#ifndef CK_F_FIFO_MPMC
+#define CK_F_FIFO_MPMC
+struct ck_fifo_mpmc_entry;
+struct ck_fifo_mpmc_pointer {
+       struct ck_fifo_mpmc_entry *pointer;
+       char *generation CK_CC_PACKED;
+} CK_CC_ALIGN(16);
+
+struct ck_fifo_mpmc_entry {
+       void *value;
+       struct ck_fifo_mpmc_pointer next;
+};
+typedef struct ck_fifo_mpmc_entry ck_fifo_mpmc_entry_t;
+
+struct ck_fifo_mpmc {
+       struct ck_fifo_mpmc_pointer head;
+       char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_mpmc_pointer)];
+       struct ck_fifo_mpmc_pointer tail;
+};
+typedef struct ck_fifo_mpmc ck_fifo_mpmc_t;
+
+CK_CC_INLINE static void
+ck_fifo_mpmc_init(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry *stub)
+{
+
+       stub->next.pointer = NULL;
+       stub->next.generation = NULL;
+       fifo->head.pointer = fifo->tail.pointer = stub;
+       fifo->head.generation = fifo->tail.generation = NULL;
+       return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_mpmc_deinit(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry 
**garbage)
+{
+
+       *garbage = fifo->head.pointer;
+       fifo->head.pointer = fifo->tail.pointer = NULL;
+       return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_mpmc_enqueue(struct ck_fifo_mpmc *fifo,
+                    struct ck_fifo_mpmc_entry *entry,
+                    void *value)
+{
+       struct ck_fifo_mpmc_pointer tail, next, update;
+
+       /*
+        * Prepare the upcoming node and make sure to commit the updates
+        * before publishing.
+        */
+       entry->value = value;
+       entry->next.pointer = NULL;
+       entry->next.generation = 0;
+       ck_pr_fence_store_atomic();
+
+       for (;;) {
+               tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
+               ck_pr_fence_load();
+               tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
+               next.generation = 
ck_pr_load_ptr(&tail.pointer->next.generation);
+               next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
+
+               if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
+                       continue;
+
+               if (next.pointer != NULL) {
+                       /*
+                        * If the tail pointer has an entry following it then
+                        * it needs to be forwarded to the next entry. This
+                        * helps us guarantee we are always operating on the
+                        * last entry.
+                        */
+                       update.pointer = next.pointer;
+                       update.generation = tail.generation + 1;
+                       ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+               } else {
+                       /*
+                        * Attempt to commit new entry to the end of the
+                        * current tail.
+                        */
+                       update.pointer = entry;
+                       update.generation = next.generation + 1;
+                       if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, 
&update) == true)
+                               break;
+               }
+       }
+
+       ck_pr_fence_atomic();
+
+       /* After a successful insert, forward the tail to the new entry. */
+       update.generation = tail.generation + 1;
+       ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_mpmc_tryenqueue(struct ck_fifo_mpmc *fifo,
+                       struct ck_fifo_mpmc_entry *entry,
+                       void *value)
+{
+       struct ck_fifo_mpmc_pointer tail, next, update;
+
+       entry->value = value;
+       entry->next.pointer = NULL;
+       entry->next.generation = 0;
+
+       ck_pr_fence_store_atomic();
+
+       tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
+       ck_pr_fence_load();
+       tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
+       next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
+       next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
+
+       if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
+               return false;
+
+       if (next.pointer != NULL) {
+               /*
+                * If the tail pointer has an entry following it then
+                * it needs to be forwarded to the next entry. This
+                * helps us guarantee we are always operating on the
+                * last entry.
+                */
+               update.pointer = next.pointer;
+               update.generation = tail.generation + 1;
+               ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+               return false;
+       } else {
+               /*
+                * Attempt to commit new entry to the end of the
+                * current tail.
+                */
+               update.pointer = entry;
+               update.generation = next.generation + 1;
+               if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, &update) == 
false)
+                       return false;
+       }
+
+       ck_pr_fence_atomic();
+
+       /* After a successful insert, forward the tail to the new entry. */
+       update.generation = tail.generation + 1;
+       ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+       return true;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_mpmc_dequeue(struct ck_fifo_mpmc *fifo,
+                    void *value,
+                    struct ck_fifo_mpmc_entry **garbage)
+{
+       struct ck_fifo_mpmc_pointer head, tail, next, update;
+
+       for (;;) {
+               head.generation = ck_pr_load_ptr(&fifo->head.generation);
+               ck_pr_fence_load();
+               head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
+               tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
+               ck_pr_fence_load();
+               tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
+
+               next.generation = 
ck_pr_load_ptr(&head.pointer->next.generation);
+               next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
+
+               update.pointer = next.pointer;
+               if (head.pointer == tail.pointer) {
+                       /*
+                        * The head is guaranteed to always point at a stub
+                        * entry. If the stub entry has no references then the
+                        * queue is empty.
+                        */
+                       if (next.pointer == NULL)
+                               return false;
+
+                       /* Forward the tail pointer if necessary. */
+                       update.generation = tail.generation + 1;
+                       ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+               } else {
+                       /*
+                        * It is possible for head snapshot to have been
+                        * re-used. Avoid deferencing during enqueue
+                        * re-use.
+                        */
+                       if (next.pointer == NULL)
+                               continue;
+
+                       /* Save value before commit. */
+                       *(void **)value = ck_pr_load_ptr(&next.pointer->value);
+
+                       /* Forward the head pointer to the next entry. */
+                       update.generation = head.generation + 1;
+                       if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == 
true)
+                               break;
+               }
+       }
+
+       *garbage = head.pointer;
+       return true;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_mpmc_trydequeue(struct ck_fifo_mpmc *fifo,
+                       void *value,
+                       struct ck_fifo_mpmc_entry **garbage)
+{
+       struct ck_fifo_mpmc_pointer head, tail, next, update;
+
+       head.generation = ck_pr_load_ptr(&fifo->head.generation);
+       ck_pr_fence_load();
+       head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
+
+       tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
+       ck_pr_fence_load();
+       tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
+
+       next.generation = ck_pr_load_ptr(&head.pointer->next.generation);
+       next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
+
+       update.pointer = next.pointer;
+       if (head.pointer == tail.pointer) {
+               /*
+                * The head is guaranteed to always point at a stub
+                * entry. If the stub entry has no references then the
+                * queue is empty.
+                */
+               if (next.pointer == NULL)
+                       return false;
+
+               /* Forward the tail pointer if necessary. */
+               update.generation = tail.generation + 1;
+               ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+               return false;
+       } else {
+               /*
+                * It is possible for head snapshot to have been
+                * re-used. Avoid deferencing during enqueue.
+                */
+               if (next.pointer == NULL)
+                       return false;
+
+               /* Save value before commit. */
+               *(void **)value = ck_pr_load_ptr(&next.pointer->value);
+
+               /* Forward the head pointer to the next entry. */
+               update.generation = head.generation + 1;
+               if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == false)
+                       return false;
+       }
+
+       *garbage = head.pointer;
+       return true;
+}
+
+#define CK_FIFO_MPMC_ISEMPTY(f)        ((f)->head.pointer->next.pointer == 
NULL)
+#define CK_FIFO_MPMC_FIRST(f)  ((f)->head.pointer->next.pointer)
+#define CK_FIFO_MPMC_NEXT(m)   ((m)->next.pointer)
+#define CK_FIFO_MPMC_FOREACH(fifo, entry)                              \
+       for ((entry) = CK_FIFO_MPMC_FIRST(fifo);                        \
+            (entry) != NULL;                                           \
+            (entry) = CK_FIFO_MPMC_NEXT(entry))
+#define CK_FIFO_MPMC_FOREACH_SAFE(fifo, entry, T)                      \
+       for ((entry) = CK_FIFO_MPMC_FIRST(fifo);                        \
+            (entry) != NULL && ((T) = (entry)->next.pointer, 1);       \
+            (entry) = (T))
+
+#endif /* CK_F_FIFO_MPMC */
+#endif /* CK_F_PR_CAS_PTR_2 */
+
+#endif /* _CK_FIFO_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_hp.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_hp.h b/lib/ck/include/ck_hp.h
new file mode 100644
index 0000000..505de5b
--- /dev/null
+++ b/lib/ck/include/ck_hp.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_HP_H
+#define _CK_HP_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+
+#ifndef CK_HP_CACHE
+#define CK_HP_CACHE 512
+#endif
+
+struct ck_hp_hazard;
+typedef void (*ck_hp_destructor_t)(void *);
+
+struct ck_hp {
+       ck_stack_t subscribers;
+       unsigned int n_subscribers;
+       unsigned int n_free;
+       unsigned int threshold;
+       unsigned int degree;
+       ck_hp_destructor_t destroy;
+};
+typedef struct ck_hp ck_hp_t;
+
+struct ck_hp_hazard {
+       void *pointer;
+       void *data;
+       ck_stack_entry_t pending_entry;
+};
+typedef struct ck_hp_hazard ck_hp_hazard_t;
+
+enum {
+       CK_HP_USED = 0,
+       CK_HP_FREE = 1
+};
+
+struct ck_hp_record {
+       int state;
+       void **pointers;
+       void *cache[CK_HP_CACHE];
+       struct ck_hp *global;
+       ck_stack_t pending;
+       unsigned int n_pending;
+       ck_stack_entry_t global_entry;
+       unsigned int n_peak;
+       uint64_t n_reclamations;
+} CK_CC_CACHELINE;
+typedef struct ck_hp_record ck_hp_record_t;
+
+CK_CC_INLINE static void
+ck_hp_set(struct ck_hp_record *record, unsigned int i, void *pointer)
+{
+
+       ck_pr_store_ptr(&record->pointers[i], pointer);
+       return;
+}
+
+CK_CC_INLINE static void
+ck_hp_clear(struct ck_hp_record *record)
+{
+       void **pointers = record->pointers;
+       unsigned int i;
+
+       for (i = 0; i < record->global->degree; i++)
+               *pointers++ = NULL;
+
+       return;
+}
+
+void ck_hp_init(ck_hp_t *, unsigned int, unsigned int, ck_hp_destructor_t);
+void ck_hp_set_threshold(ck_hp_t *, unsigned int);
+void ck_hp_register(ck_hp_t *, ck_hp_record_t *, void **);
+void ck_hp_unregister(ck_hp_record_t *);
+ck_hp_record_t *ck_hp_recycle(ck_hp_t *);
+void ck_hp_reclaim(ck_hp_record_t *);
+void ck_hp_free(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
+void ck_hp_retire(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
+void ck_hp_purge(ck_hp_record_t *);
+
+#endif /* _CK_HP_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_hp_fifo.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_hp_fifo.h b/lib/ck/include/ck_hp_fifo.h
new file mode 100644
index 0000000..59bf0e4
--- /dev/null
+++ b/lib/ck/include/ck_hp_fifo.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_HP_FIFO_H
+#define _CK_HP_FIFO_H
+
+#include <ck_cc.h>
+#include <ck_hp.h>
+#include <ck_pr.h>
+#include <stddef.h>
+
+#define CK_HP_FIFO_SLOTS_COUNT (2)
+#define CK_HP_FIFO_SLOTS_SIZE  (sizeof(void *) * CK_HP_FIFO_SLOTS_COUNT)
+
+/*
+ * Though it is possible to embed the data structure, measurements need
+ * to be made for the cost of this. If we were to embed the hazard pointer
+ * state into the data structure, this means every deferred reclamation
+ * will also include a cache invalidation when linking into the hazard pointer
+ * pending queue. This may lead to terrible cache line bouncing.
+ */
+struct ck_hp_fifo_entry {
+       void *value;
+       ck_hp_hazard_t hazard;
+       struct ck_hp_fifo_entry *next;
+};
+typedef struct ck_hp_fifo_entry ck_hp_fifo_entry_t;
+
+struct ck_hp_fifo {
+       struct ck_hp_fifo_entry *head;
+       struct ck_hp_fifo_entry *tail;
+};
+typedef struct ck_hp_fifo ck_hp_fifo_t;
+
+CK_CC_INLINE static void
+ck_hp_fifo_init(struct ck_hp_fifo *fifo, struct ck_hp_fifo_entry *stub)
+{
+
+       fifo->head = fifo->tail = stub;
+       stub->next = NULL;
+       return;
+}
+
+CK_CC_INLINE static void
+ck_hp_fifo_deinit(struct ck_hp_fifo *fifo, struct ck_hp_fifo_entry **stub)
+{
+
+       *stub = fifo->head;
+       fifo->head = fifo->tail = NULL;
+       return;
+}
+
+CK_CC_INLINE static void
+ck_hp_fifo_enqueue_mpmc(ck_hp_record_t *record,
+                       struct ck_hp_fifo *fifo,
+                       struct ck_hp_fifo_entry *entry,
+                       void *value)
+{
+       struct ck_hp_fifo_entry *tail, *next;
+
+       entry->value = value;
+       entry->next = NULL;
+       ck_pr_fence_store_atomic();
+
+       for (;;) {
+               tail = ck_pr_load_ptr(&fifo->tail);
+               ck_hp_set(record, 0, tail);
+               ck_pr_fence_store_load();
+               if (tail != ck_pr_load_ptr(&fifo->tail))
+                       continue;
+
+               next = ck_pr_load_ptr(&tail->next);
+               if (next != NULL) {
+                       ck_pr_cas_ptr(&fifo->tail, tail, next);
+                       continue;
+               } else if (ck_pr_cas_ptr(&fifo->tail->next, next, entry) == 
true)
+                       break;
+       }
+
+       ck_pr_fence_atomic();
+       ck_pr_cas_ptr(&fifo->tail, tail, entry);
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_hp_fifo_tryenqueue_mpmc(ck_hp_record_t *record,
+                          struct ck_hp_fifo *fifo,
+                          struct ck_hp_fifo_entry *entry,
+                          void *value)
+{
+       struct ck_hp_fifo_entry *tail, *next;
+
+       entry->value = value;
+       entry->next = NULL;
+       ck_pr_fence_store_atomic();
+
+       tail = ck_pr_load_ptr(&fifo->tail);
+       ck_hp_set(record, 0, tail);
+       ck_pr_fence_store_load();
+       if (tail != ck_pr_load_ptr(&fifo->tail))
+               return false;
+
+       next = ck_pr_load_ptr(&tail->next);
+       if (next != NULL) {
+               ck_pr_cas_ptr(&fifo->tail, tail, next);
+               return false;
+       } else if (ck_pr_cas_ptr(&fifo->tail->next, next, entry) == false)
+               return false;
+
+       ck_pr_fence_atomic();
+       ck_pr_cas_ptr(&fifo->tail, tail, entry);
+       return true;
+}
+
+CK_CC_INLINE static struct ck_hp_fifo_entry *
+ck_hp_fifo_dequeue_mpmc(ck_hp_record_t *record,
+                       struct ck_hp_fifo *fifo,
+                       void *value)
+{
+       struct ck_hp_fifo_entry *head, *tail, *next;
+
+       for (;;) {
+               head = ck_pr_load_ptr(&fifo->head);
+               ck_pr_fence_load();
+               tail = ck_pr_load_ptr(&fifo->tail);
+               ck_hp_set(record, 0, head);
+               ck_pr_fence_store_load();
+               if (head != ck_pr_load_ptr(&fifo->head))
+                       continue;
+
+               next = ck_pr_load_ptr(&head->next);
+               ck_hp_set(record, 1, next);
+               ck_pr_fence_store_load();
+               if (head != ck_pr_load_ptr(&fifo->head))
+                       continue;
+
+               if (head == tail) {
+                       if (next == NULL)
+                               return NULL;
+
+                       ck_pr_cas_ptr(&fifo->tail, tail, next);
+                       continue;
+               } else if (ck_pr_cas_ptr(&fifo->head, head, next) == true)
+                       break;
+       }
+
+       ck_pr_store_ptr(value, next->value);
+       return head;
+}
+
+CK_CC_INLINE static struct ck_hp_fifo_entry *
+ck_hp_fifo_trydequeue_mpmc(ck_hp_record_t *record,
+                          struct ck_hp_fifo *fifo,
+                          void *value)
+{
+       struct ck_hp_fifo_entry *head, *tail, *next;
+
+       head = ck_pr_load_ptr(&fifo->head);
+       ck_pr_fence_load();
+       tail = ck_pr_load_ptr(&fifo->tail);
+       ck_hp_set(record, 0, head);
+       ck_pr_fence_store_load();
+       if (head != ck_pr_load_ptr(&fifo->head))
+               return NULL;
+
+       next = ck_pr_load_ptr(&head->next);
+       ck_hp_set(record, 1, next);
+       ck_pr_fence_store_load();
+       if (head != ck_pr_load_ptr(&fifo->head))
+               return NULL;
+
+       if (head == tail) {
+               if (next == NULL)
+                       return NULL;
+
+               ck_pr_cas_ptr(&fifo->tail, tail, next);
+               return NULL;
+       } else if (ck_pr_cas_ptr(&fifo->head, head, next) == false)
+               return NULL;
+
+       ck_pr_store_ptr(value, next->value);
+       return head;
+}
+
+#define CK_HP_FIFO_ISEMPTY(f) ((f)->head->next == NULL)
+#define CK_HP_FIFO_FIRST(f)   ((f)->head->next)
+#define CK_HP_FIFO_NEXT(m)    ((m)->next)
+#define CK_HP_FIFO_FOREACH(fifo, entry)                        \
+        for ((entry) = CK_HP_FIFO_FIRST(fifo);                 \
+             (entry) != NULL;                                   \
+             (entry) = CK_HP_FIFO_NEXT(entry))
+#define CK_HP_FIFO_FOREACH_SAFE(fifo, entry, T)                        \
+        for ((entry) = CK_HP_FIFO_FIRST(fifo);                 \
+             (entry) != NULL && ((T) = (entry)->next, 1);      \
+             (entry) = (T))
+
+#endif /* _CK_HP_FIFO_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_hp_stack.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_hp_stack.h b/lib/ck/include/ck_hp_stack.h
new file mode 100644
index 0000000..3b67fc4
--- /dev/null
+++ b/lib/ck/include/ck_hp_stack.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_HP_STACK_H
+#define _CK_HP_STACK_H
+
+#include <ck_cc.h>
+#include <ck_hp.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+#include <stddef.h>
+
+#define CK_HP_STACK_SLOTS_COUNT 1
+#define CK_HP_STACK_SLOTS_SIZE  sizeof(void *)
+
+CK_CC_INLINE static void
+ck_hp_stack_push_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+       ck_stack_push_upmc(target, entry);
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_hp_stack_trypush_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+       return ck_stack_trypush_upmc(target, entry);
+}
+
+CK_CC_INLINE static struct ck_stack_entry *
+ck_hp_stack_pop_mpmc(ck_hp_record_t *record, struct ck_stack *target)
+{
+       struct ck_stack_entry *entry, *update;
+
+       do {
+               entry = ck_pr_load_ptr(&target->head);
+               if (entry == NULL)
+                       return NULL;
+
+               ck_hp_set(record, 0, entry);
+               ck_pr_fence_store_load();
+       } while (entry != ck_pr_load_ptr(&target->head));
+
+       while (ck_pr_cas_ptr_value(&target->head, entry, entry->next, &entry) 
== false) {
+               if (entry == NULL)
+                       return NULL;
+
+               ck_hp_set(record, 0, entry);
+               ck_pr_fence_store_load();
+               update = ck_pr_load_ptr(&target->head);
+               while (entry != update) {
+                       ck_hp_set(record, 0, update);
+                       ck_pr_fence_store_load();
+                       entry = update;
+                       update = ck_pr_load_ptr(&target->head);
+                       if (update == NULL)
+                               return NULL;
+               }
+       }
+
+       return entry;
+}
+
+CK_CC_INLINE static bool
+ck_hp_stack_trypop_mpmc(ck_hp_record_t *record, struct ck_stack *target, 
struct ck_stack_entry **r)
+{
+       struct ck_stack_entry *entry;
+
+       entry = ck_pr_load_ptr(&target->head);
+       if (entry == NULL)
+               return false;
+
+       ck_hp_set(record, 0, entry);
+       ck_pr_fence_store_load();
+       if (entry != ck_pr_load_ptr(&target->head))
+               goto leave;
+
+       if (ck_pr_cas_ptr_value(&target->head, entry, entry->next, &entry) == 
false)
+               goto leave;
+
+       *r = entry;
+       return true;
+
+leave:
+       ck_hp_set(record, 0, NULL);
+       return false;
+}
+
+#endif /* _CK_HP_STACK_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_hs.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_hs.h b/lib/ck/include/ck_hs.h
new file mode 100644
index 0000000..91f990b
--- /dev/null
+++ b/lib/ck/include/ck_hs.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2012-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_HS_H
+#define _CK_HS_H
+
+#include <ck_cc.h>
+#include <ck_malloc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+/*
+ * Indicates a single-writer many-reader workload. Mutually
+ * exclusive with CK_HS_MODE_MPMC
+ */
+#define CK_HS_MODE_SPMC                1
+
+/*
+ * Indicates that values to be stored are not pointers but
+ * values. Allows for full precision. Mutually exclusive
+ * with CK_HS_MODE_OBJECT.
+ */
+#define CK_HS_MODE_DIRECT      2
+
+/* 
+ * Indicates that the values to be stored are pointers.
+ * Allows for space optimizations in the presence of pointer
+ * packing. Mutually exclusive with CK_HS_MODE_DIRECT.
+ */
+#define CK_HS_MODE_OBJECT      8
+
+/*
+ * Indicates a delete-heavy workload. This will reduce the
+ * need for garbage collection at the cost of approximately
+ * 12% to 20% increased memory usage.
+ */
+#define CK_HS_MODE_DELETE      16
+
+/* Currently unsupported. */
+#define CK_HS_MODE_MPMC    (void)
+
+/*
+ * Hash callback function.
+ */
+typedef unsigned long ck_hs_hash_cb_t(const void *, unsigned long);
+
+/*
+ * Returns pointer to object if objects are equivalent.
+ */
+typedef bool ck_hs_compare_cb_t(const void *, const void *);
+
+#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
+#define CK_HS_PP
+#define CK_HS_KEY_MASK ((1U << ((sizeof(void *) * 8) - CK_MD_VMA_BITS)) - 1)
+#endif
+
+struct ck_hs_map;
+struct ck_hs {
+       struct ck_malloc *m;
+       struct ck_hs_map *map;
+       unsigned int mode;
+       unsigned long seed;
+       ck_hs_hash_cb_t *hf;
+       ck_hs_compare_cb_t *compare;
+};
+typedef struct ck_hs ck_hs_t;
+
+struct ck_hs_stat {
+       unsigned long tombstones;
+       unsigned long n_entries;
+       unsigned int probe_maximum;
+};
+
+struct ck_hs_iterator {
+       void **cursor;
+       unsigned long offset;
+};
+typedef struct ck_hs_iterator ck_hs_iterator_t;
+
+#define CK_HS_ITERATOR_INITIALIZER { NULL, 0 }
+
+/* Convenience wrapper to table hash function. */
+#define CK_HS_HASH(T, F, K) F((K), (T)->seed)
+
+void ck_hs_iterator_init(ck_hs_iterator_t *);
+bool ck_hs_next(ck_hs_t *, ck_hs_iterator_t *, void **);
+bool ck_hs_move(ck_hs_t *, ck_hs_t *, ck_hs_hash_cb_t *,
+    ck_hs_compare_cb_t *, struct ck_malloc *);
+bool ck_hs_init(ck_hs_t *, unsigned int, ck_hs_hash_cb_t *,
+    ck_hs_compare_cb_t *, struct ck_malloc *, unsigned long, unsigned long);
+void ck_hs_destroy(ck_hs_t *);
+void *ck_hs_get(ck_hs_t *, unsigned long, const void *);
+bool ck_hs_put(ck_hs_t *, unsigned long, const void *);
+bool ck_hs_put_unique(ck_hs_t *, unsigned long, const void *);
+bool ck_hs_set(ck_hs_t *, unsigned long, const void *, void **);
+bool ck_hs_fas(ck_hs_t *, unsigned long, const void *, void **);
+void *ck_hs_remove(ck_hs_t *, unsigned long, const void *);
+bool ck_hs_grow(ck_hs_t *, unsigned long);
+bool ck_hs_rebuild(ck_hs_t *);
+bool ck_hs_gc(ck_hs_t *, unsigned long, unsigned long);
+unsigned long ck_hs_count(ck_hs_t *);
+bool ck_hs_reset(ck_hs_t *);
+bool ck_hs_reset_size(ck_hs_t *, unsigned long);
+void ck_hs_stat(ck_hs_t *, struct ck_hs_stat *);
+
+#endif /* _CK_HS_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_ht.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_ht.h b/lib/ck/include/ck_ht.h
new file mode 100644
index 0000000..86fb7f0
--- /dev/null
+++ b/lib/ck/include/ck_ht.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2012-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_HT_H
+#define _CK_HT_H
+
+#include <ck_pr.h>
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_STORE_64)
+#define CK_F_HT
+
+#include <ck_cc.h>
+#include <ck_malloc.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+struct ck_ht_hash {
+       uint64_t value;
+};
+typedef struct ck_ht_hash ck_ht_hash_t;
+
+#define CK_HT_MODE_DIRECT      1U
+#define CK_HT_MODE_BYTESTRING  2U
+#define CK_HT_WORKLOAD_DELETE  4U
+
+#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
+#define CK_HT_PP
+#define CK_HT_KEY_LENGTH ((sizeof(void *) * 8) - CK_MD_VMA_BITS)
+#define CK_HT_KEY_MASK   ((1U << CK_HT_KEY_LENGTH) - 1)
+#else
+#define CK_HT_KEY_LENGTH 65535U
+#endif
+
+struct ck_ht_entry {
+#ifdef CK_HT_PP
+       uintptr_t key;
+       uintptr_t value CK_CC_PACKED;
+} CK_CC_ALIGN(16);
+#else
+       uintptr_t key;
+       uintptr_t value;
+       uint64_t key_length;
+       uint64_t hash;
+} CK_CC_ALIGN(32);
+#endif
+typedef struct ck_ht_entry ck_ht_entry_t;
+
+/*
+ * The user is free to define their own stub values.
+ */
+#ifndef CK_HT_KEY_EMPTY
+#define CK_HT_KEY_EMPTY                ((uintptr_t)0)
+#endif
+
+#ifndef CK_HT_KEY_TOMBSTONE
+#define CK_HT_KEY_TOMBSTONE    (~CK_HT_KEY_EMPTY)
+#endif
+
+/*
+ * Hash callback function. First argument is updated to contain a hash value,
+ * second argument is the key, third argument is key length and final argument
+ * is the hash table seed value.
+ */
+typedef void ck_ht_hash_cb_t(ck_ht_hash_t *, const void *, size_t, uint64_t);
+
+struct ck_ht_map;
+struct ck_ht {
+       struct ck_malloc *m;
+       struct ck_ht_map *map;
+       unsigned int mode;
+       uint64_t seed;
+       ck_ht_hash_cb_t *h;
+};
+typedef struct ck_ht ck_ht_t;
+
+struct ck_ht_stat {
+       uint64_t probe_maximum;
+       uint64_t n_entries;
+};
+
+struct ck_ht_iterator {
+       struct ck_ht_entry *current;
+       uint64_t offset;
+};
+typedef struct ck_ht_iterator ck_ht_iterator_t;
+
+#define CK_HT_ITERATOR_INITIALIZER { NULL, 0 }
+
+CK_CC_INLINE static void
+ck_ht_iterator_init(struct ck_ht_iterator *iterator)
+{
+
+       iterator->current = NULL;
+       iterator->offset = 0;
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_ht_entry_empty(ck_ht_entry_t *entry)
+{
+
+       return entry->key == CK_HT_KEY_EMPTY;
+}
+
+CK_CC_INLINE static void
+ck_ht_entry_key_set_direct(ck_ht_entry_t *entry, uintptr_t key)
+{
+
+       entry->key = key;
+       return;
+}
+
+CK_CC_INLINE static void
+ck_ht_entry_key_set(ck_ht_entry_t *entry, const void *key, uint16_t key_length)
+{
+
+#ifdef CK_HT_PP
+       entry->key = (uintptr_t)key | ((uintptr_t)key_length << CK_MD_VMA_BITS);
+#else
+       entry->key = (uintptr_t)key;
+       entry->key_length = key_length;
+#endif
+
+       return;
+}
+
+CK_CC_INLINE static void *
+ck_ht_entry_key(ck_ht_entry_t *entry)
+{
+
+#ifdef CK_HT_PP
+       return (void *)(entry->key & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
+#else
+       return (void *)entry->key;
+#endif
+}
+
+CK_CC_INLINE static uint16_t
+ck_ht_entry_key_length(ck_ht_entry_t *entry)
+{
+
+#ifdef CK_HT_PP
+       return entry->key >> CK_MD_VMA_BITS;
+#else
+       return entry->key_length;
+#endif
+}
+
+CK_CC_INLINE static void *
+ck_ht_entry_value(ck_ht_entry_t *entry)
+{
+
+#ifdef CK_HT_PP
+       return (void *)(entry->value & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
+#else
+       return (void *)entry->value;
+#endif
+}
+
+CK_CC_INLINE static void
+ck_ht_entry_set(struct ck_ht_entry *entry,
+               ck_ht_hash_t h,
+               const void *key,
+               uint16_t key_length,
+               const void *value)
+{
+
+#ifdef CK_HT_PP
+       entry->key = (uintptr_t)key | ((uintptr_t)key_length << CK_MD_VMA_BITS);
+       entry->value = (uintptr_t)value | ((uintptr_t)(h.value >> 32) << 
CK_MD_VMA_BITS);
+#else
+       entry->key = (uintptr_t)key;
+       entry->value = (uintptr_t)value;
+       entry->key_length = key_length;
+       entry->hash = h.value;
+#endif
+
+       return;
+}
+
+CK_CC_INLINE static void
+ck_ht_entry_set_direct(struct ck_ht_entry *entry,
+                      ck_ht_hash_t h,
+                      uintptr_t key,
+                      uintptr_t value)
+{
+
+       entry->key = key;
+       entry->value = value;
+
+#ifndef CK_HT_PP
+       entry->hash = h.value;
+#else
+       (void)h;
+#endif
+       return;
+}
+
+CK_CC_INLINE static uintptr_t
+ck_ht_entry_key_direct(ck_ht_entry_t *entry)
+{
+
+       return entry->key;
+}
+
+CK_CC_INLINE static uintptr_t
+ck_ht_entry_value_direct(ck_ht_entry_t *entry)
+{
+
+       return entry->value;
+}
+
+/*
+ * Iteration must occur without any concurrent mutations on
+ * the hash table.
+ */
+bool ck_ht_next(ck_ht_t *, ck_ht_iterator_t *, ck_ht_entry_t **entry);
+
+void ck_ht_stat(ck_ht_t *, struct ck_ht_stat *);
+void ck_ht_hash(ck_ht_hash_t *, ck_ht_t *, const void *, uint16_t);
+void ck_ht_hash_direct(ck_ht_hash_t *, ck_ht_t *, uintptr_t);
+bool ck_ht_init(ck_ht_t *, unsigned int, ck_ht_hash_cb_t *,
+    struct ck_malloc *, uint64_t, uint64_t);
+void ck_ht_destroy(ck_ht_t *);
+bool ck_ht_set_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
+bool ck_ht_put_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
+bool ck_ht_get_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
+bool ck_ht_gc(struct ck_ht *, unsigned long, unsigned long);
+bool ck_ht_grow_spmc(ck_ht_t *, uint64_t);
+bool ck_ht_remove_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
+bool ck_ht_reset_spmc(ck_ht_t *);
+bool ck_ht_reset_size_spmc(ck_ht_t *, uint64_t);
+uint64_t ck_ht_count(ck_ht_t *);
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_STORE_64 */
+#endif /* _CK_HT_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_limits.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_limits.h b/lib/ck/include/ck_limits.h
new file mode 100644
index 0000000..c597763
--- /dev/null
+++ b/lib/ck/include/ck_limits.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#else
+#include <limits.h>
+#endif /* __linux__ && __KERNEL__ */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_malloc.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_malloc.h b/lib/ck/include/ck_malloc.h
new file mode 100644
index 0000000..40f1898
--- /dev/null
+++ b/lib/ck/include/ck_malloc.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_MALLOC_H
+#define _CK_MALLOC_H
+
+#include <stdbool.h>
+#include <sys/types.h>
+
+struct ck_malloc {
+       void *(*malloc)(size_t);
+       void *(*realloc)(void *, size_t, size_t, bool);
+       void (*free)(void *, size_t, bool);
+};
+
+#endif /* _CK_MALLOC_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_md.h.in
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_md.h.in b/lib/ck/include/ck_md.h.in
new file mode 100644
index 0000000..4449cf8
--- /dev/null
+++ b/lib/ck/include/ck_md.h.in
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2011-2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_MD_H
+#define _CK_MD_H
+
+#ifndef CK_MD_CACHELINE
+#define CK_MD_CACHELINE (64)
+#endif
+
+#ifndef CK_MD_PAGESIZE
+#define CK_MD_PAGESIZE (4096)
+#endif
+
+#ifndef @RTM_ENABLE@
+#define @RTM_ENABLE@
+#endif /* @RTM_ENABLE@ */
+
+#ifndef @POINTER_PACK_ENABLE@
+#define @POINTER_PACK_ENABLE@
+#endif /* @POINTER_PACK_ENABLE@ */
+
+#ifndef @VMA_BITS@ 
+#define @VMA_BITS@ @VMA_BITS_VALUE@
+#endif /* @VMA_BITS@ */
+
+#ifndef @MM@
+#define @MM@
+#endif /* @MM@ */
+
+#endif /* _CK_MD_H */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/ck_pflock.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/ck_pflock.h b/lib/ck/include/ck_pflock.h
new file mode 100644
index 0000000..58963e8
--- /dev/null
+++ b/lib/ck/include/ck_pflock.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2013 John Wittrock.
+ * Copyright 2013-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_PFLOCK_H
+#define _CK_PFLOCK_H
+
+/*
+ * This is an implementation of phase-fair locks derived from the work
+ * described in:
+ *    Brandenburg, B. and Anderson, J. 2010. Spin-Based
+ *    Reader-Writer Synchronization for Multiprocessor Real-Time Systems
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+
+struct ck_pflock {
+       uint32_t rin;
+       uint32_t rout;
+       uint32_t win;
+       uint32_t wout;
+};
+typedef struct ck_pflock ck_pflock_t;
+
+#define CK_PFLOCK_LSB   0xFFFFFFF0
+#define CK_PFLOCK_RINC  0x100          /* Reader increment value. */
+#define CK_PFLOCK_WBITS 0x3            /* Writer bits in reader. */
+#define CK_PFLOCK_PRES  0x2            /* Writer present bit. */
+#define CK_PFLOCK_PHID  0x1            /* Phase ID bit. */
+
+#define CK_PFLOCK_INITIALIZER {0, 0, 0, 0}
+
+CK_CC_INLINE static void
+ck_pflock_init(struct ck_pflock *pf)
+{
+
+       pf->rin = 0;
+       pf->rout = 0;
+       pf->win = 0;
+       pf->wout = 0;
+       ck_pr_barrier();
+
+       return;
+}
+
+CK_CC_INLINE static void
+ck_pflock_write_unlock(ck_pflock_t *pf)
+{
+
+       ck_pr_fence_release();
+
+       /* Migrate from write phase to read phase. */
+       ck_pr_and_32(&pf->rin, CK_PFLOCK_LSB);
+
+       /* Allow other writers to continue. */
+       ck_pr_faa_32(&pf->wout, 1);
+       return;
+}
+
+CK_CC_INLINE static void
+ck_pflock_write_lock(ck_pflock_t *pf)
+{
+       uint32_t ticket;
+
+       /* Acquire ownership of write-phase. */
+       ticket = ck_pr_faa_32(&pf->win, 1);
+       while (ck_pr_load_32(&pf->wout) != ticket)
+               ck_pr_stall();
+
+       /*
+        * Acquire ticket on read-side in order to allow them
+        * to flush. Indicates to any incoming reader that a
+        * write-phase is pending.
+        */
+       ticket = ck_pr_faa_32(&pf->rin,
+           (ticket & CK_PFLOCK_PHID) | CK_PFLOCK_PRES);
+
+       /* Wait for any pending readers to flush. */
+       while (ck_pr_load_32(&pf->rout) != ticket)
+               ck_pr_stall();
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_pflock_read_unlock(ck_pflock_t *pf)
+{
+
+       ck_pr_fence_release();
+       ck_pr_faa_32(&pf->rout, CK_PFLOCK_RINC);
+       return;
+}
+
+CK_CC_INLINE static void
+ck_pflock_read_lock(ck_pflock_t *pf)
+{
+       uint32_t w;
+
+       /*
+        * If no writer is present, then the operation has completed
+        * successfully.
+        */
+       w = ck_pr_faa_32(&pf->rin, CK_PFLOCK_RINC) & CK_PFLOCK_WBITS;
+       if (w == 0)
+               goto leave;
+
+       /* Wait for current write phase to complete. */
+       while ((ck_pr_load_32(&pf->rin) & CK_PFLOCK_WBITS) == w)
+               ck_pr_stall();
+
+leave:
+       /* Acquire semantics with respect to readers. */
+       ck_pr_fence_acquire();
+       return;
+}
+
+#endif /* _CK_PFLOCK_H */
+

Reply via email to