http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/gcc/x86/ck_f_pr.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/gcc/x86/ck_f_pr.h b/lib/ck/include/gcc/x86/ck_f_pr.h
new file mode 100644
index 0000000..f82c66b
--- /dev/null
+++ b/lib/ck/include/gcc/x86/ck_f_pr.h
@@ -0,0 +1,152 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BTC_16
+#define CK_F_PR_BTC_32
+#define CK_F_PR_BTC_INT
+#define CK_F_PR_BTC_PTR
+#define CK_F_PR_BTC_UINT
+#define CK_F_PR_BTR_16
+#define CK_F_PR_BTR_32
+#define CK_F_PR_BTR_INT
+#define CK_F_PR_BTR_PTR
+#define CK_F_PR_BTR_UINT
+#define CK_F_PR_BTS_16
+#define CK_F_PR_BTS_32
+#define CK_F_PR_BTS_INT
+#define CK_F_PR_BTS_PTR
+#define CK_F_PR_BTS_UINT
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_16_ZERO
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_32_ZERO
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_8_ZERO
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_CHAR_ZERO
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_INT_ZERO
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_PTR_ZERO
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_DEC_UINT_ZERO
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_16_ZERO
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_32_ZERO
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_8_ZERO
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_CHAR_ZERO
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_INT_ZERO
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_PTR_ZERO
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_INC_UINT_ZERO
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_16_ZERO
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_32_ZERO
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_8_ZERO
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_CHAR_ZERO
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_INT_ZERO
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_PTR_ZERO
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NEG_UINT_ZERO
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/gcc/x86/ck_pr.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/gcc/x86/ck_pr.h b/lib/ck/include/gcc/x86/ck_pr.h
new file mode 100644
index 0000000..2ed3e38
--- /dev/null
+++ b/lib/ck/include/gcc/x86/ck_pr.h
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2009-2014 Samy Al Bahra.
+ * Copyright 2011 Devon H. O'Dell <devon.od...@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_PR_X86_H
+#define _CK_PR_X86_H
+
+#ifndef _CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+#include <stdbool.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/* Minimum requirements for the CK_PR interface are met. */
+#define CK_F_PR
+
+#ifdef CK_MD_UMP
+#define CK_PR_LOCK_PREFIX
+#else
+#define CK_PR_LOCK_PREFIX "lock "
+#endif
+
+/*
+ * Prevent speculative execution in busy-wait loops (P4 <=)
+ * or "predefined delay".
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+       __asm__ __volatile__("pause" ::: "memory");
+       return;
+}
+
+#define CK_PR_FENCE(T, I)                              \
+       CK_CC_INLINE static void                        \
+       ck_pr_fence_strict_##T(void)                    \
+       {                                               \
+               __asm__ __volatile__(I ::: "memory");   \
+       }
+
+CK_PR_FENCE(atomic, "sfence")
+CK_PR_FENCE(atomic_store, "sfence")
+CK_PR_FENCE(atomic_load, "mfence")
+CK_PR_FENCE(store_atomic, "sfence")
+CK_PR_FENCE(load_atomic, "mfence")
+CK_PR_FENCE(load, "lfence")
+CK_PR_FENCE(load_store, "mfence")
+CK_PR_FENCE(store, "sfence")
+CK_PR_FENCE(store_load, "mfence")
+CK_PR_FENCE(memory, "mfence")
+CK_PR_FENCE(release, "mfence")
+CK_PR_FENCE(acquire, "mfence")
+
+#undef CK_PR_FENCE
+
+/*
+ * Atomic fetch-and-store operations.
+ */
+#define CK_PR_FAS(S, M, T, C, I)                               \
+       CK_CC_INLINE static T                                   \
+       ck_pr_fas_##S(M *target, T v)                           \
+       {                                                       \
+               __asm__ __volatile__(I " %0, %1"                \
+                                       : "+m" (*(C *)target),  \
+                                         "+q" (v)              \
+                                       :                       \
+                                       : "memory");            \
+               return v;                                       \
+       }
+
+CK_PR_FAS(ptr, void, void *, char, "xchgl")
+
+#define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I)
+
+CK_PR_FAS_S(char, char, "xchgb")
+CK_PR_FAS_S(uint, unsigned int, "xchgl")
+CK_PR_FAS_S(int, int, "xchgl")
+CK_PR_FAS_S(32, uint32_t, "xchgl")
+CK_PR_FAS_S(16, uint16_t, "xchgw")
+CK_PR_FAS_S(8,  uint8_t,  "xchgb")
+
+#undef CK_PR_FAS_S
+#undef CK_PR_FAS
+
+#define CK_PR_LOAD(S, M, T, C, I)                              \
+       CK_CC_INLINE static T                                   \
+       ck_pr_load_##S(const M *target)                         \
+       {                                                       \
+               T r;                                            \
+               __asm__ __volatile__(I " %1, %0"                \
+                                       : "=q" (r)              \
+                                       : "m"  (*(C *)target)   \
+                                       : "memory");            \
+               return (r);                                     \
+       }
+
+CK_PR_LOAD(ptr, void, void *, char, "movl")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(char, char, "movb")
+CK_PR_LOAD_S(uint, unsigned int, "movl")
+CK_PR_LOAD_S(int, int, "movl")
+CK_PR_LOAD_S(32, uint32_t, "movl")
+CK_PR_LOAD_S(16, uint16_t, "movw")
+CK_PR_LOAD_S(8,  uint8_t,  "movb")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#define CK_PR_STORE(S, M, T, C, I)                             \
+       CK_CC_INLINE static void                                \
+       ck_pr_store_##S(M *target, T v)                         \
+       {                                                       \
+               __asm__ __volatile__(I " %1, %0"                \
+                                       : "=m" (*(C *)target)   \
+                                       : CK_CC_IMM "q" (v)     \
+                                       : "memory");            \
+               return;                                         \
+       }
+
+CK_PR_STORE(ptr, void, const void *, char, "movl")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(char, char, "movb")
+CK_PR_STORE_S(uint, unsigned int, "movl")
+CK_PR_STORE_S(int, int, "movl")
+CK_PR_STORE_S(32, uint32_t, "movl")
+CK_PR_STORE_S(16, uint16_t, "movw")
+CK_PR_STORE_S(8,  uint8_t, "movb")
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+/*
+ * Atomic fetch-and-add operations.
+ */
+#define CK_PR_FAA(S, M, T, C, I)                                       \
+       CK_CC_INLINE static T                                           \
+       ck_pr_faa_##S(M *target, T d)                                   \
+       {                                                               \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0"      \
+                                       : "+m" (*(C *)target),          \
+                                         "+q" (d)                      \
+                                       :                               \
+                                       : "memory", "cc");              \
+               return (d);                                             \
+       }
+
+CK_PR_FAA(ptr, void, uintptr_t, char, "xaddl")
+
+#define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I)
+
+CK_PR_FAA_S(char, char, "xaddb")
+CK_PR_FAA_S(uint, unsigned int, "xaddl")
+CK_PR_FAA_S(int, int, "xaddl")
+CK_PR_FAA_S(32, uint32_t, "xaddl")
+CK_PR_FAA_S(16, uint16_t, "xaddw")
+CK_PR_FAA_S(8,  uint8_t,  "xaddb")
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAA
+
+/*
+ * Atomic store-only unary operations.
+ */
+#define CK_PR_UNARY(K, S, T, C, I)                             \
+       CK_PR_UNARY_R(K, S, T, C, I)                            \
+       CK_PR_UNARY_V(K, S, T, C, I)
+
+#define CK_PR_UNARY_R(K, S, T, C, I)                           \
+       CK_CC_INLINE static void                                \
+       ck_pr_##K##_##S(T *target)                              \
+       {                                                       \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0"  \
+                                       : "+m" (*(C *)target)   \
+                                       :                       \
+                                       : "memory", "cc");      \
+               return;                                         \
+       }
+
+#define CK_PR_UNARY_V(K, S, T, C, I)                                   \
+       CK_CC_INLINE static void                                        \
+       ck_pr_##K##_##S##_zero(T *target, bool *r)                      \
+       {                                                               \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \
+                                       : "+m" (*(C *)target),          \
+                                         "=m" (*r)                     \
+                                       :                               \
+                                       : "memory", "cc");              \
+               return;                                                 \
+       }
+
+
+#define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I)
+
+#define CK_PR_GENERATE(K)                              \
+       CK_PR_UNARY(K, ptr, void, char, #K "l")         \
+       CK_PR_UNARY_S(K, char, char, #K "b")            \
+       CK_PR_UNARY_S(K, int, int, #K "l")              \
+       CK_PR_UNARY_S(K, uint, unsigned int, #K "l")    \
+       CK_PR_UNARY_S(K, 32, uint32_t, #K "l")          \
+       CK_PR_UNARY_S(K, 16, uint16_t, #K "w")          \
+       CK_PR_UNARY_S(K, 8, uint8_t, #K "b")
+
+CK_PR_GENERATE(inc)
+CK_PR_GENERATE(dec)
+CK_PR_GENERATE(neg)
+
+/* not does not affect condition flags. */
+#undef CK_PR_UNARY_V
+#define CK_PR_UNARY_V(a, b, c, d, e)
+CK_PR_GENERATE(not)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY_V
+#undef CK_PR_UNARY_R
+#undef CK_PR_UNARY
+
+/*
+ * Atomic store-only binary operations.
+ */
+#define CK_PR_BINARY(K, S, M, T, C, I)                                 \
+       CK_CC_INLINE static void                                        \
+       ck_pr_##K##_##S(M *target, T d)                                 \
+       {                                                               \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0"      \
+                                       : "+m" (*(C *)target)           \
+                                       : CK_CC_IMM "q" (d)             \
+                                       : "memory", "cc");              \
+               return;                                                 \
+       }
+
+#define CK_PR_BINARY_S(K, S, T, I) CK_PR_BINARY(K, S, T, T, T, I)
+
+#define CK_PR_GENERATE(K)                                      \
+       CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "l")     \
+       CK_PR_BINARY_S(K, char, char, #K "b")                   \
+       CK_PR_BINARY_S(K, int, int, #K "l")                     \
+       CK_PR_BINARY_S(K, uint, unsigned int, #K "l")           \
+       CK_PR_BINARY_S(K, 32, uint32_t, #K "l")                 \
+       CK_PR_BINARY_S(K, 16, uint16_t, #K "w")                 \
+       CK_PR_BINARY_S(K, 8, uint8_t, #K "b")
+
+CK_PR_GENERATE(add)
+CK_PR_GENERATE(sub)
+CK_PR_GENERATE(and)
+CK_PR_GENERATE(or)
+CK_PR_GENERATE(xor)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+/*
+ * Atomic compare and swap.
+ */
+#define CK_PR_CAS(S, M, T, C, I)                                               
\
+       CK_CC_INLINE static bool                                                
\
+       ck_pr_cas_##S(M *target, T compare, T set)                              
\
+       {                                                                       
\
+               bool z;                                                         
\
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1"     
\
+                                       : "+m"  (*(C *)target),                 
\
+                                         "=a"  (z)                             
\
+                                       : "q"   (set),                          
\
+                                         "a"   (compare)                       
\
+                                       : "memory", "cc");                      
\
+               return z;                                                       
\
+       }
+
+CK_PR_CAS(ptr, void, void *, char, "cmpxchgl")
+
+#define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I)
+
+CK_PR_CAS_S(char, char, "cmpxchgb")
+CK_PR_CAS_S(int, int, "cmpxchgl")
+CK_PR_CAS_S(uint, unsigned int, "cmpxchgl")
+CK_PR_CAS_S(32, uint32_t, "cmpxchgl")
+CK_PR_CAS_S(16, uint16_t, "cmpxchgw")
+CK_PR_CAS_S(8,  uint8_t,  "cmpxchgb")
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+/*
+ * Compare and swap, set *v to old value of target.
+ */
+#define CK_PR_CAS_O(S, M, T, C, I, R)                                          
\
+       CK_CC_INLINE static bool                                                
\
+       ck_pr_cas_##S##_value(M *target, T compare, T set, M *v)                
\
+       {                                                                       
\
+               bool z;                                                         
\
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;"   
\
+                                    "mov %% " R ", %2;"                        
\
+                                    "setz %1;"                                 
\
+                                       : "+m"  (*(C *)target),                 
\
+                                         "=a"  (z),                            
\
+                                         "=m"  (*(C *)v)                       
\
+                                       : "q"   (set),                          
\
+                                         "a"   (compare)                       
\
+                                       : "memory", "cc");                      
\
+               return (bool)z;                                                 
\
+       }
+
+CK_PR_CAS_O(ptr, void, void *, char, "l", "eax")
+
+#define CK_PR_CAS_O_S(S, T, I, R)      \
+       CK_PR_CAS_O(S, T, T, T, I, R)
+
+CK_PR_CAS_O_S(char, char, "b", "al")
+CK_PR_CAS_O_S(int, int, "l", "eax")
+CK_PR_CAS_O_S(uint, unsigned int, "l", "eax")
+CK_PR_CAS_O_S(32, uint32_t, "l", "eax")
+CK_PR_CAS_O_S(16, uint16_t, "w", "ax")
+CK_PR_CAS_O_S(8,  uint8_t,  "b", "al")
+
+#undef CK_PR_CAS_O_S
+#undef CK_PR_CAS_O
+
+/*
+ * Atomic bit test operations.
+ */
+#define CK_PR_BT(K, S, T, P, C, I)                                     \
+       CK_CC_INLINE static bool                                        \
+       ck_pr_##K##_##S(T *target, unsigned int b)                      \
+       {                                                               \
+               bool c;                                                 \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1"    \
+                                       : "+m" (*(C *)target),          \
+                                         "=q" (c)                      \
+                                       : "q"  ((P)b)                   \
+                                       : "memory", "cc");              \
+               return (bool)c;                                         \
+       }
+
+#define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I)
+
+#define CK_PR_GENERATE(K)                                      \
+       CK_PR_BT(K, ptr, void, uint32_t, char, #K "l %2, %0")   \
+       CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0")        \
+       CK_PR_BT_S(K, int, int, #K "l %2, %0")                  \
+       CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0")              \
+       CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0")
+
+CK_PR_GENERATE(btc)
+CK_PR_GENERATE(bts)
+CK_PR_GENERATE(btr)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BT
+
+#endif /* _CK_PR_X86_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/gcc/x86_64/ck_f_pr.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/gcc/x86_64/ck_f_pr.h 
b/lib/ck/include/gcc/x86_64/ck_f_pr.h
new file mode 100644
index 0000000..545f5fd
--- /dev/null
+++ b/lib/ck/include/gcc/x86_64/ck_f_pr.h
@@ -0,0 +1,202 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_64
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_64
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BTC_16
+#define CK_F_PR_BTC_32
+#define CK_F_PR_BTC_64
+#define CK_F_PR_BTC_INT
+#define CK_F_PR_BTC_PTR
+#define CK_F_PR_BTC_UINT
+#define CK_F_PR_BTR_16
+#define CK_F_PR_BTR_32
+#define CK_F_PR_BTR_64
+#define CK_F_PR_BTR_INT
+#define CK_F_PR_BTR_PTR
+#define CK_F_PR_BTR_UINT
+#define CK_F_PR_BTS_16
+#define CK_F_PR_BTS_32
+#define CK_F_PR_BTS_64
+#define CK_F_PR_BTS_INT
+#define CK_F_PR_BTS_PTR
+#define CK_F_PR_BTS_UINT
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_8
+#define CK_F_PR_CAS_16_8_VALUE
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_4
+#define CK_F_PR_CAS_32_4_VALUE
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_2
+#define CK_F_PR_CAS_64_2_VALUE
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_16
+#define CK_F_PR_CAS_8_16_VALUE
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_16
+#define CK_F_PR_CAS_CHAR_16_VALUE
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_4
+#define CK_F_PR_CAS_INT_4_VALUE
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_2
+#define CK_F_PR_CAS_PTR_2_VALUE
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_DOUBLE
+#define CK_F_PR_CAS_DOUBLE_2
+#define CK_F_PR_CAS_DOUBLE_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_4
+#define CK_F_PR_CAS_UINT_4_VALUE
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_16_ZERO
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_32_ZERO
+#define CK_F_PR_DEC_64
+#define CK_F_PR_DEC_64_ZERO
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_8_ZERO
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_CHAR_ZERO
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_INT_ZERO
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_PTR_ZERO
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_DEC_UINT_ZERO
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_64
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_64
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_DOUBLE
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_16_ZERO
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_32_ZERO
+#define CK_F_PR_INC_64
+#define CK_F_PR_INC_64_ZERO
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_8_ZERO
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_CHAR_ZERO
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_INT_ZERO
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_PTR_ZERO
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_INC_UINT_ZERO
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_16_8
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_32_4
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_64_2
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_8_16
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_CHAR_16
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_INT_4
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_PTR_2
+#define CK_F_PR_LOAD_DOUBLE
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_LOAD_UINT_4
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_16_ZERO
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_32_ZERO
+#define CK_F_PR_NEG_64
+#define CK_F_PR_NEG_64_ZERO
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_8_ZERO
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_CHAR_ZERO
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_INT_ZERO
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_PTR_ZERO
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NEG_UINT_ZERO
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_64
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_64
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_DOUBLE
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_64
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_64
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/gcc/x86_64/ck_pr.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/gcc/x86_64/ck_pr.h 
b/lib/ck/include/gcc/x86_64/ck_pr.h
new file mode 100644
index 0000000..4d57e80
--- /dev/null
+++ b/lib/ck/include/gcc/x86_64/ck_pr.h
@@ -0,0 +1,545 @@
+/*
+ * Copyright 2009-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_PR_X86_64_H
+#define _CK_PR_X86_64_H
+
+#ifndef _CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+#include <stdbool.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Support for TSX extensions.
+ */
+#ifdef CK_MD_RTM_ENABLE
+#include "ck_pr_rtm.h"
+#endif
+
+/* Minimum requirements for the CK_PR interface are met. */
+#define CK_F_PR
+
+#ifdef CK_MD_UMP
+#define CK_PR_LOCK_PREFIX
+#else
+#define CK_PR_LOCK_PREFIX "lock "
+#endif
+
+/*
+ * Prevent speculative execution in busy-wait loops (P4 <=)
+ * or "predefined delay".
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+       __asm__ __volatile__("pause" ::: "memory");
+       return;
+}
+
+#define CK_PR_FENCE(T, I)                              \
+       CK_CC_INLINE static void                        \
+       ck_pr_fence_strict_##T(void)                    \
+       {                                               \
+               __asm__ __volatile__(I ::: "memory");   \
+       }
+
+CK_PR_FENCE(atomic, "sfence")
+CK_PR_FENCE(atomic_store, "sfence")
+CK_PR_FENCE(atomic_load, "mfence")
+CK_PR_FENCE(store_atomic, "sfence")
+CK_PR_FENCE(load_atomic, "mfence")
+CK_PR_FENCE(load, "lfence")
+CK_PR_FENCE(load_store, "mfence")
+CK_PR_FENCE(store, "sfence")
+CK_PR_FENCE(store_load, "mfence")
+CK_PR_FENCE(memory, "mfence")
+CK_PR_FENCE(release, "mfence")
+CK_PR_FENCE(acquire, "mfence")
+
+#undef CK_PR_FENCE
+
+/*
+ * Atomic fetch-and-store operations.
+ */
+#define CK_PR_FAS(S, M, T, C, I)                               \
+       CK_CC_INLINE static T                                   \
+       ck_pr_fas_##S(M *target, T v)                           \
+       {                                                       \
+               __asm__ __volatile__(I " %0, %1"                \
+                                       : "+m" (*(C *)target),  \
+                                         "+q" (v)              \
+                                       :                       \
+                                       : "memory");            \
+               return v;                                       \
+       }
+
+CK_PR_FAS(ptr, void, void *, char, "xchgq")
+
+#define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I)
+
+CK_PR_FAS_S(double, double, "xchgq")
+CK_PR_FAS_S(char, char, "xchgb")
+CK_PR_FAS_S(uint, unsigned int, "xchgl")
+CK_PR_FAS_S(int, int, "xchgl")
+CK_PR_FAS_S(64, uint64_t, "xchgq")
+CK_PR_FAS_S(32, uint32_t, "xchgl")
+CK_PR_FAS_S(16, uint16_t, "xchgw")
+CK_PR_FAS_S(8,  uint8_t,  "xchgb")
+
+#undef CK_PR_FAS_S
+#undef CK_PR_FAS
+
+/*
+ * Atomic load-from-memory operations.
+ */
+#define CK_PR_LOAD(S, M, T, C, I)                              \
+       CK_CC_INLINE static T                                   \
+       ck_pr_load_##S(const M *target)                         \
+       {                                                       \
+               T r;                                            \
+               __asm__ __volatile__(I " %1, %0"                \
+                                       : "=q" (r)              \
+                                       : "m"  (*(C *)target)   \
+                                       : "memory");            \
+               return (r);                                     \
+       }
+
+CK_PR_LOAD(ptr, void, void *, char, "movq")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(char, char, "movb")
+CK_PR_LOAD_S(uint, unsigned int, "movl")
+CK_PR_LOAD_S(int, int, "movl")
+CK_PR_LOAD_S(double, double, "movq")
+CK_PR_LOAD_S(64, uint64_t, "movq")
+CK_PR_LOAD_S(32, uint32_t, "movl")
+CK_PR_LOAD_S(16, uint16_t, "movw")
+CK_PR_LOAD_S(8,  uint8_t,  "movb")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+CK_CC_INLINE static void
+ck_pr_load_64_2(const uint64_t target[2], uint64_t v[2])
+{
+       __asm__ __volatile__("movq %%rdx, %%rcx;"
+                            "movq %%rax, %%rbx;"
+                            CK_PR_LOCK_PREFIX "cmpxchg16b %2;"
+                               : "=a" (v[0]),
+                                 "=d" (v[1])
+                               : "m" (*(const uint64_t *)target)
+                               : "rbx", "rcx", "memory", "cc");
+       return;
+}
+
+CK_CC_INLINE static void
+ck_pr_load_ptr_2(void *t, void *v)
+{
+       ck_pr_load_64_2(t, v);
+       return;
+}
+
+#define CK_PR_LOAD_2(S, W, T)                                                  
\
+       CK_CC_INLINE static void                                                
\
+       ck_pr_load_##S##_##W(const T t[2], T v[2])                              
\
+       {                                                                       
\
+               ck_pr_load_64_2((const uint64_t *)(const void *)t,              
\
+                               (uint64_t *)(void *)v);                         
\
+               return;                                                         
\
+       }
+
+CK_PR_LOAD_2(char, 16, char)
+CK_PR_LOAD_2(int, 4, int)
+CK_PR_LOAD_2(uint, 4, unsigned int)
+CK_PR_LOAD_2(32, 4, uint32_t)
+CK_PR_LOAD_2(16, 8, uint16_t)
+CK_PR_LOAD_2(8, 16, uint8_t)
+
+#undef CK_PR_LOAD_2
+
+/*
+ * Atomic store-to-memory operations.
+ */
+#define CK_PR_STORE_IMM(S, M, T, C, I, K)                              \
+       CK_CC_INLINE static void                                        \
+       ck_pr_store_##S(M *target, T v)                                 \
+       {                                                               \
+               __asm__ __volatile__(I " %1, %0"                        \
+                                       : "=m" (*(C *)target)           \
+                                       : K "q" (v)                     \
+                                       : "memory");                    \
+               return;                                                 \
+       }
+
+#define CK_PR_STORE(S, M, T, C, I)                             \
+       CK_CC_INLINE static void                                \
+       ck_pr_store_##S(M *target, T v)                         \
+       {                                                       \
+               __asm__ __volatile__(I " %1, %0"                \
+                                       : "=m" (*(C *)target)   \
+                                       : "q" (v)               \
+                                       : "memory");            \
+               return;                                         \
+       }
+
+CK_PR_STORE_IMM(ptr, void, const void *, char, "movq", CK_CC_IMM_U32)
+CK_PR_STORE(double, double, double, double, "movq")
+
+#define CK_PR_STORE_S(S, T, I, K) CK_PR_STORE_IMM(S, T, T, T, I, K)
+
+CK_PR_STORE_S(char, char, "movb", CK_CC_IMM_S32)
+CK_PR_STORE_S(int, int, "movl", CK_CC_IMM_S32)
+CK_PR_STORE_S(uint, unsigned int, "movl", CK_CC_IMM_U32)
+CK_PR_STORE_S(64, uint64_t, "movq", CK_CC_IMM_U32)
+CK_PR_STORE_S(32, uint32_t, "movl", CK_CC_IMM_U32)
+CK_PR_STORE_S(16, uint16_t, "movw", CK_CC_IMM_U32)
+CK_PR_STORE_S(8,  uint8_t, "movb", CK_CC_IMM_U32)
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE_IMM
+#undef CK_PR_STORE
+
+/*
+ * Atomic fetch-and-add operations.
+ */
+#define CK_PR_FAA(S, M, T, C, I)                                       \
+       CK_CC_INLINE static T                                           \
+       ck_pr_faa_##S(M *target, T d)                                   \
+       {                                                               \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0"      \
+                                       : "+m" (*(C *)target),          \
+                                         "+q" (d)                      \
+                                       :                               \
+                                       : "memory", "cc");              \
+               return (d);                                             \
+       }
+
+CK_PR_FAA(ptr, void, uintptr_t, char, "xaddq")
+
+#define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I)
+
+CK_PR_FAA_S(char, char, "xaddb")
+CK_PR_FAA_S(uint, unsigned int, "xaddl")
+CK_PR_FAA_S(int, int, "xaddl")
+CK_PR_FAA_S(64, uint64_t, "xaddq")
+CK_PR_FAA_S(32, uint32_t, "xaddl")
+CK_PR_FAA_S(16, uint16_t, "xaddw")
+CK_PR_FAA_S(8,  uint8_t,  "xaddb")
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAA
+
+/*
+ * Atomic store-only unary operations.
+ */
+#define CK_PR_UNARY(K, S, T, C, I)                             \
+       CK_PR_UNARY_R(K, S, T, C, I)                            \
+       CK_PR_UNARY_V(K, S, T, C, I)
+
+#define CK_PR_UNARY_R(K, S, T, C, I)                           \
+       CK_CC_INLINE static void                                \
+       ck_pr_##K##_##S(T *target)                              \
+       {                                                       \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0"  \
+                                       : "+m" (*(C *)target)   \
+                                       :                       \
+                                       : "memory", "cc");      \
+               return;                                         \
+       }
+
+#define CK_PR_UNARY_V(K, S, T, C, I)                                   \
+       CK_CC_INLINE static void                                        \
+       ck_pr_##K##_##S##_zero(T *target, bool *r)                      \
+       {                                                               \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \
+                                       : "+m" (*(C *)target),          \
+                                         "=m" (*r)                     \
+                                       :                               \
+                                       : "memory", "cc");              \
+               return;                                                 \
+       }
+
+
+#define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I)
+
+#define CK_PR_GENERATE(K)                              \
+       CK_PR_UNARY(K, ptr, void, char, #K "q")         \
+       CK_PR_UNARY_S(K, char, char, #K "b")            \
+       CK_PR_UNARY_S(K, int, int, #K "l")              \
+       CK_PR_UNARY_S(K, uint, unsigned int, #K "l")    \
+       CK_PR_UNARY_S(K, 64, uint64_t, #K "q")          \
+       CK_PR_UNARY_S(K, 32, uint32_t, #K "l")          \
+       CK_PR_UNARY_S(K, 16, uint16_t, #K "w")          \
+       CK_PR_UNARY_S(K, 8, uint8_t, #K "b")
+
+CK_PR_GENERATE(inc)
+CK_PR_GENERATE(dec)
+CK_PR_GENERATE(neg)
+
+/* not does not affect condition flags. */
+#undef CK_PR_UNARY_V
+#define CK_PR_UNARY_V(a, b, c, d, e)
+CK_PR_GENERATE(not)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY_V
+#undef CK_PR_UNARY_R
+#undef CK_PR_UNARY
+
+/*
+ * Atomic store-only binary operations.
+ */
+#define CK_PR_BINARY(K, S, M, T, C, I, O)                              \
+       CK_CC_INLINE static void                                        \
+       ck_pr_##K##_##S(M *target, T d)                                 \
+       {                                                               \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0"      \
+                                       : "+m" (*(C *)target)           \
+                                       : O "q" (d)                     \
+                                       : "memory", "cc");              \
+               return;                                                 \
+       }
+
+#define CK_PR_BINARY_S(K, S, T, I, O) CK_PR_BINARY(K, S, T, T, T, I, O)
+
+#define CK_PR_GENERATE(K)                                                      
\
+       CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "q", CK_CC_IMM_U32)      
\
+       CK_PR_BINARY_S(K, char, char, #K "b", CK_CC_IMM_S32)                    
\
+       CK_PR_BINARY_S(K, int, int, #K "l", CK_CC_IMM_S32)                      
\
+       CK_PR_BINARY_S(K, uint, unsigned int, #K "l", CK_CC_IMM_U32)            
\
+       CK_PR_BINARY_S(K, 64, uint64_t, #K "q", CK_CC_IMM_U32)                  
\
+       CK_PR_BINARY_S(K, 32, uint32_t, #K "l", CK_CC_IMM_U32)                  
\
+       CK_PR_BINARY_S(K, 16, uint16_t, #K "w", CK_CC_IMM_U32)                  
\
+       CK_PR_BINARY_S(K, 8, uint8_t, #K "b", CK_CC_IMM_U32)
+
+CK_PR_GENERATE(add)
+CK_PR_GENERATE(sub)
+CK_PR_GENERATE(and)
+CK_PR_GENERATE(or)
+CK_PR_GENERATE(xor)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+/*
+ * Atomic compare and swap.
+ */
+#define CK_PR_CAS(S, M, T, C, I)                                               
\
+       CK_CC_INLINE static bool                                                
\
+       ck_pr_cas_##S(M *target, T compare, T set)                              
\
+       {                                                                       
\
+               bool z;                                                         
\
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1"     
\
+                                       : "+m"  (*(C *)target),                 
\
+                                         "=a"  (z)                             
\
+                                       : "q"   (set),                          
\
+                                         "a"   (compare)                       
\
+                                       : "memory", "cc");                      
\
+               return z;                                                       
\
+       }
+
+CK_PR_CAS(ptr, void, void *, char, "cmpxchgq")
+
+#define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I)
+
+CK_PR_CAS_S(char, char, "cmpxchgb")
+CK_PR_CAS_S(int, int, "cmpxchgl")
+CK_PR_CAS_S(uint, unsigned int, "cmpxchgl")
+CK_PR_CAS_S(double, double, "cmpxchgq")
+CK_PR_CAS_S(64, uint64_t, "cmpxchgq")
+CK_PR_CAS_S(32, uint32_t, "cmpxchgl")
+CK_PR_CAS_S(16, uint16_t, "cmpxchgw")
+CK_PR_CAS_S(8,  uint8_t,  "cmpxchgb")
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+/*
+ * Compare and swap, set *v to old value of target.
+ */
+#define CK_PR_CAS_O(S, M, T, C, I, R)                                          
\
+       CK_CC_INLINE static bool                                                
\
+       ck_pr_cas_##S##_value(M *target, T compare, T set, M *v)                
\
+       {                                                                       
\
+               bool z;                                                         
\
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;"   
\
+                                    "mov %% " R ", %2;"                        
\
+                                    "setz %1;"                                 
\
+                                       : "+m"  (*(C *)target),                 
\
+                                         "=a"  (z),                            
\
+                                         "=m"  (*(C *)v)                       
\
+                                       : "q"   (set),                          
\
+                                         "a"   (compare)                       
\
+                                       : "memory", "cc");                      
\
+               return z;                                                       
\
+       }
+
+CK_PR_CAS_O(ptr, void, void *, char, "q", "rax")
+
+#define CK_PR_CAS_O_S(S, T, I, R)      \
+       CK_PR_CAS_O(S, T, T, T, I, R)
+
+CK_PR_CAS_O_S(char, char, "b", "al")
+CK_PR_CAS_O_S(int, int, "l", "eax")
+CK_PR_CAS_O_S(uint, unsigned int, "l", "eax")
+CK_PR_CAS_O_S(double, double, "q", "rax")
+CK_PR_CAS_O_S(64, uint64_t, "q", "rax")
+CK_PR_CAS_O_S(32, uint32_t, "l", "eax")
+CK_PR_CAS_O_S(16, uint16_t, "w", "ax")
+CK_PR_CAS_O_S(8,  uint8_t,  "b", "al")
+
+#undef CK_PR_CAS_O_S
+#undef CK_PR_CAS_O
+
+/*
+ * Contrary to C-interface, alignment requirements are that of uint64_t[2].
+ */
+CK_CC_INLINE static bool
+ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
+{
+       bool z;
+
+       __asm__ __volatile__("movq 0(%4), %%rax;"
+                            "movq 8(%4), %%rdx;"
+                            CK_PR_LOCK_PREFIX "cmpxchg16b %0; setz %1"
+                               : "+m" (*target),
+                                 "=q" (z)
+                               : "b"  (set[0]),
+                                 "c"  (set[1]),
+                                 "q"  (compare)
+                               : "memory", "cc", "%rax", "%rdx");
+       return z;
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2(void *t, void *c, void *s)
+{
+       return ck_pr_cas_64_2(t, c, s);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2_value(uint64_t target[2],
+                    uint64_t compare[2],
+                    uint64_t set[2],
+                    uint64_t v[2])
+{
+       bool z;
+
+       __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg16b %0;"
+                            "setz %3"
+                               : "+m" (*target),
+                                 "=a" (v[0]),
+                                 "=d" (v[1]),
+                                 "=q" (z)
+                               : "a" (compare[0]),
+                                 "d" (compare[1]),
+                                 "b" (set[0]),
+                                 "c" (set[1])
+                               : "memory", "cc");
+       return z;
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2_value(void *t, void *c, void *s, void *v)
+{
+       return ck_pr_cas_64_2_value(t, c, s, v);
+}
+
+#define CK_PR_CAS_V(S, W, T)                                   \
+CK_CC_INLINE static bool                                       \
+ck_pr_cas_##S##_##W(T t[W], T c[W], T s[W])                    \
+{                                                              \
+       return ck_pr_cas_64_2((uint64_t *)(void *)t,            \
+                             (uint64_t *)(void *)c,            \
+                             (uint64_t *)(void *)s);           \
+}                                                              \
+CK_CC_INLINE static bool                                       \
+ck_pr_cas_##S##_##W##_value(T *t, T c[W], T s[W], T *v)                \
+{                                                              \
+       return ck_pr_cas_64_2_value((uint64_t *)(void *)t,      \
+                                   (uint64_t *)(void *)c,      \
+                                   (uint64_t *)(void *)s,      \
+                                   (uint64_t *)(void *)v);     \
+}
+
+CK_PR_CAS_V(double, 2, double)
+CK_PR_CAS_V(char, 16, char)
+CK_PR_CAS_V(int, 4, int)
+CK_PR_CAS_V(uint, 4, unsigned int)
+CK_PR_CAS_V(32, 4, uint32_t)
+CK_PR_CAS_V(16, 8, uint16_t)
+CK_PR_CAS_V(8, 16, uint8_t)
+
+#undef CK_PR_CAS_V
+
+/*
+ * Atomic bit test operations.
+ */
+#define CK_PR_BT(K, S, T, P, C, I)                                     \
+       CK_CC_INLINE static bool                                        \
+       ck_pr_##K##_##S(T *target, unsigned int b)                      \
+       {                                                               \
+               bool c;                                                 \
+               __asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1"    \
+                                       : "+m" (*(C *)target),          \
+                                         "=q" (c)                      \
+                                       : "q"  ((P)b)                   \
+                                       : "memory", "cc");              \
+               return c;                                               \
+       }
+
+#define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I)
+
+#define CK_PR_GENERATE(K)                                      \
+       CK_PR_BT(K, ptr, void, uint64_t, char, #K "q %2, %0")   \
+       CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0")        \
+       CK_PR_BT_S(K, int, int, #K "l %2, %0")                  \
+       CK_PR_BT_S(K, 64, uint64_t, #K "q %2, %0")              \
+       CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0")              \
+       CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0")
+
+CK_PR_GENERATE(btc)
+CK_PR_GENERATE(bts)
+CK_PR_GENERATE(btr)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BT
+
+#endif /* _CK_PR_X86_64_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/gcc/x86_64/ck_pr_rtm.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/gcc/x86_64/ck_pr_rtm.h 
b/lib/ck/include/gcc/x86_64/ck_pr_rtm.h
new file mode 100644
index 0000000..eafa325
--- /dev/null
+++ b/lib/ck/include/gcc/x86_64/ck_pr_rtm.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2012,2013 Intel Corporation
+ * Author: Andi Kleen
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that: (1) source code distributions
+ * retain the above copyright notice and this paragraph in its entirety, (2)
+ * distributions including binary code include the above copyright notice and
+ * this paragraph in its entirety in the documentation or other materials
+ * provided with the distribution
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _CK_PR_X86_64_RTM_H
+#define _CK_PR_X86_64_RTM_H
+
+#ifndef _CK_PR_X86_64_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#define CK_F_PR_RTM
+
+#include <ck_cc.h>
+#include <stdbool.h>
+
+#define CK_PR_RTM_STARTED      (~0U)
+#define CK_PR_RTM_EXPLICIT     (1 << 0)
+#define CK_PR_RTM_RETRY                (1 << 1)
+#define CK_PR_RTM_CONFLICT     (1 << 2)
+#define CK_PR_RTM_CAPACITY     (1 << 3)
+#define CK_PR_RTM_DEBUG                (1 << 4)
+#define CK_PR_RTM_NESTED       (1 << 5)
+#define CK_PR_RTM_CODE(x)      (((x) >> 24) & 0xFF)
+
+CK_CC_INLINE static unsigned int
+ck_pr_rtm_begin(void)
+{
+       unsigned int r = CK_PR_RTM_STARTED;
+
+       __asm__ __volatile__(".byte 0xc7,0xf8;"
+                            ".long 0;"
+                               : "+a" (r)
+                               :
+                               : "memory");
+
+       return r;
+}
+
+CK_CC_INLINE static void
+ck_pr_rtm_end(void)
+{
+
+       __asm__ __volatile__(".byte 0x0f,0x01,0xd5" ::: "memory");
+       return;
+}
+
+CK_CC_INLINE static void
+ck_pr_rtm_abort(const unsigned int status)
+{
+
+       __asm__ __volatile__(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory");
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_pr_rtm_test(void)
+{
+       bool r;
+
+       __asm__ __volatile__(".byte 0x0f,0x01,0xd6;"
+                            "setnz %0"
+                               : "=r" (r)
+                               :
+                               : "memory");
+
+       return r;
+}
+
+#endif /* _CK_PR_X86_64_RTM_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/spinlock/anderson.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/spinlock/anderson.h 
b/lib/ck/include/spinlock/anderson.h
new file mode 100644
index 0000000..7800b2d
--- /dev/null
+++ b/lib/ck/include/spinlock/anderson.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SPINLOCK_ANDERSON_H
+#define _CK_SPINLOCK_ANDERSON_H
+
+#include <ck_cc.h>
+#include <ck_limits.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+
+#ifndef CK_F_SPINLOCK_ANDERSON
+#define CK_F_SPINLOCK_ANDERSON
+/*
+ * This is an implementation of Anderson's array-based queuing lock.
+ */
+struct ck_spinlock_anderson_thread {
+       unsigned int locked;
+       unsigned int position;
+};
+typedef struct ck_spinlock_anderson_thread ck_spinlock_anderson_thread_t;
+
+struct ck_spinlock_anderson {
+       struct ck_spinlock_anderson_thread *slots;
+       unsigned int count;
+       unsigned int wrap;
+       unsigned int mask;
+       char pad[CK_MD_CACHELINE - sizeof(unsigned int) * 3 - sizeof(void *)];
+       unsigned int next;
+};
+typedef struct ck_spinlock_anderson ck_spinlock_anderson_t;
+
+CK_CC_INLINE static void
+ck_spinlock_anderson_init(struct ck_spinlock_anderson *lock,
+    struct ck_spinlock_anderson_thread *slots,
+    unsigned int count)
+{
+       unsigned int i;
+
+       slots[0].locked = false;
+       slots[0].position = 0;
+       for (i = 1; i < count; i++) {
+               slots[i].locked = true;
+               slots[i].position = i;
+       }
+
+       lock->slots = slots;
+       lock->count = count;
+       lock->mask = count - 1;
+       lock->next = 0;
+
+       /*
+        * If the number of threads is not a power of two then compute
+        * appropriate wrap-around value in the case of next slot counter
+        * overflow.
+        */
+       if (count & (count - 1))
+               lock->wrap = (UINT_MAX % count) + 1;
+       else
+               lock->wrap = 0;
+
+       ck_pr_barrier();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_anderson_locked(struct ck_spinlock_anderson *lock)
+{
+       unsigned int position;
+
+       ck_pr_fence_load();
+       position = ck_pr_load_uint(&lock->next) & lock->mask;
+       ck_pr_fence_load();
+
+       return ck_pr_load_uint(&lock->slots[position].locked);
+}
+
+CK_CC_INLINE static void
+ck_spinlock_anderson_lock(struct ck_spinlock_anderson *lock,
+    struct ck_spinlock_anderson_thread **slot)
+{
+       unsigned int position, next;
+       unsigned int count = lock->count;
+
+       /*
+        * If count is not a power of 2, then it is possible for an overflow
+        * to reallocate beginning slots to more than one thread. To avoid this
+        * use a compare-and-swap.
+        */
+       if (lock->wrap != 0) {
+               position = ck_pr_load_uint(&lock->next);
+
+               do {
+                       if (position == UINT_MAX)
+                               next = lock->wrap;
+                       else
+                               next = position + 1;
+               } while (ck_pr_cas_uint_value(&lock->next, position,
+                                             next, &position) == false);
+
+               position %= count;
+       } else {
+               position = ck_pr_faa_uint(&lock->next, 1);
+               position &= lock->mask;
+       }
+
+       /* Serialize with respect to previous thread's store. */
+       ck_pr_fence_load();
+
+       /* Spin until slot is marked as unlocked. First slot is initialized to 
false. */
+       while (ck_pr_load_uint(&lock->slots[position].locked) == true)
+               ck_pr_stall();
+
+       /* Prepare slot for potential re-use by another thread. */
+       ck_pr_store_uint(&lock->slots[position].locked, true);
+       ck_pr_fence_acquire();
+
+       *slot = lock->slots + position;
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock,
+    struct ck_spinlock_anderson_thread *slot)
+{
+       unsigned int position;
+
+       ck_pr_fence_release();
+
+       /* Mark next slot as available. */
+       if (lock->wrap == 0)
+               position = (slot->position + 1) & lock->mask;
+       else
+               position = (slot->position + 1) % lock->count;
+
+       ck_pr_store_uint(&lock->slots[position].locked, false);
+       return;
+}
+#endif /* CK_F_SPINLOCK_ANDERSON */
+#endif /* _CK_SPINLOCK_ANDERSON_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/spinlock/cas.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/spinlock/cas.h b/lib/ck/include/spinlock/cas.h
new file mode 100644
index 0000000..acad3d5
--- /dev/null
+++ b/lib/ck/include/spinlock/cas.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SPINLOCK_CAS_H
+#define _CK_SPINLOCK_CAS_H
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_elide.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+
+#ifndef CK_F_SPINLOCK_CAS
+#define CK_F_SPINLOCK_CAS
+/*
+ * This is a simple CACAS (TATAS) spinlock implementation.
+ */
+struct ck_spinlock_cas {
+       unsigned int value;
+};
+typedef struct ck_spinlock_cas ck_spinlock_cas_t;
+
+#define CK_SPINLOCK_CAS_INITIALIZER {false}
+
+CK_CC_INLINE static void
+ck_spinlock_cas_init(struct ck_spinlock_cas *lock)
+{
+
+       lock->value = false;
+       ck_pr_barrier();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_cas_trylock(struct ck_spinlock_cas *lock)
+{
+       unsigned int value;
+
+       value = ck_pr_fas_uint(&lock->value, true);
+       if (value == false)
+               ck_pr_fence_acquire();
+
+       return !value;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_cas_locked(struct ck_spinlock_cas *lock)
+{
+
+       ck_pr_fence_load();
+       return ck_pr_load_uint(&lock->value);
+}
+
+CK_CC_INLINE static void
+ck_spinlock_cas_lock(struct ck_spinlock_cas *lock)
+{
+
+       while (ck_pr_cas_uint(&lock->value, false, true) == false) {
+               while (ck_pr_load_uint(&lock->value) == true)
+                       ck_pr_stall();
+       }
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_cas_lock_eb(struct ck_spinlock_cas *lock)
+{
+       ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+
+       while (ck_pr_cas_uint(&lock->value, false, true) == false)
+               ck_backoff_eb(&backoff);
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_cas_unlock(struct ck_spinlock_cas *lock)
+{
+
+       /* Set lock state to unlocked. */
+       ck_pr_fence_release();
+       ck_pr_store_uint(&lock->value, false);
+       return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_spinlock_cas, ck_spinlock_cas_t,
+    ck_spinlock_cas_locked, ck_spinlock_cas_lock,
+    ck_spinlock_cas_locked, ck_spinlock_cas_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_cas, ck_spinlock_cas_t,
+    ck_spinlock_cas_locked, ck_spinlock_cas_trylock)
+
+#endif /* CK_F_SPINLOCK_CAS */
+#endif /* _CK_SPINLOCK_CAS_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/spinlock/clh.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/spinlock/clh.h b/lib/ck/include/spinlock/clh.h
new file mode 100644
index 0000000..eb98b1d
--- /dev/null
+++ b/lib/ck/include/spinlock/clh.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SPINLOCK_CLH_H
+#define _CK_SPINLOCK_CLH_H
+
+#include <ck_cc.h>
+#include <ck_limits.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+#ifndef CK_F_SPINLOCK_CLH
+#define CK_F_SPINLOCK_CLH
+
+struct ck_spinlock_clh {
+       unsigned int wait;
+       struct ck_spinlock_clh *previous;
+};
+typedef struct ck_spinlock_clh ck_spinlock_clh_t;
+
+CK_CC_INLINE static void
+ck_spinlock_clh_init(struct ck_spinlock_clh **lock, struct ck_spinlock_clh 
*unowned)
+{
+
+       unowned->previous = NULL;
+       unowned->wait = false;
+       *lock = unowned;
+       ck_pr_barrier();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_clh_locked(struct ck_spinlock_clh **queue)
+{
+       struct ck_spinlock_clh *head;
+
+       ck_pr_fence_load();
+       head = ck_pr_load_ptr(queue);
+       ck_pr_fence_load();
+       return ck_pr_load_uint(&head->wait);
+}
+
+CK_CC_INLINE static void
+ck_spinlock_clh_lock(struct ck_spinlock_clh **queue, struct ck_spinlock_clh 
*thread)
+{
+       struct ck_spinlock_clh *previous;
+
+       /* Indicate to the next thread on queue that they will have to block. */
+       thread->wait = true;
+       ck_pr_fence_store_atomic();
+
+       /* Mark current request as last request. Save reference to previous 
request. */
+       previous = ck_pr_fas_ptr(queue, thread);
+       thread->previous = previous;
+
+       /* Wait until previous thread is done with lock. */
+       ck_pr_fence_load();
+       while (ck_pr_load_uint(&previous->wait) == true)
+               ck_pr_stall();
+
+       ck_pr_fence_load();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread)
+{
+       struct ck_spinlock_clh *previous;
+
+       /*
+        * If there are waiters, they are spinning on the current node wait
+        * flag. The flag is cleared so that the successor may complete an
+        * acquisition. If the caller is pre-empted then the predecessor field
+        * may be updated by a successor's lock operation. In order to avoid
+        * this, save a copy of the predecessor before setting the flag.
+        */
+       previous = thread[0]->previous;
+
+       /* We have to pay this cost anyways, use it as a compiler barrier too. 
*/
+       ck_pr_fence_release();
+       ck_pr_store_uint(&(*thread)->wait, false);
+
+       /*
+        * Predecessor is guaranteed not to be spinning on previous request,
+        * so update caller to use previous structure. This allows successor
+        * all the time in the world to successfully read updated wait flag.
+        */
+       *thread = previous;
+       return;
+}
+#endif /* CK_F_SPINLOCK_CLH */
+#endif /* _CK_SPINLOCK_CLH_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/spinlock/dec.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/spinlock/dec.h b/lib/ck/include/spinlock/dec.h
new file mode 100644
index 0000000..8c1e000
--- /dev/null
+++ b/lib/ck/include/spinlock/dec.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SPINLOCK_DEC_H
+#define _CK_SPINLOCK_DEC_H
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_elide.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+
+#ifndef CK_F_SPINLOCK_DEC
+#define CK_F_SPINLOCK_DEC
+/*
+ * This is similar to the CACAS lock but makes use of an atomic decrement
+ * operation to check if the lock value was decremented to 0 from 1. The
+ * idea is that a decrement operation is cheaper than a compare-and-swap.
+ */
+struct ck_spinlock_dec {
+       unsigned int value;
+};
+typedef struct ck_spinlock_dec ck_spinlock_dec_t;
+
+#define CK_SPINLOCK_DEC_INITIALIZER    {1}
+
+CK_CC_INLINE static void
+ck_spinlock_dec_init(struct ck_spinlock_dec *lock)
+{
+
+       lock->value = 1;
+       ck_pr_barrier();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_dec_trylock(struct ck_spinlock_dec *lock)
+{
+       unsigned int value;
+
+       value = ck_pr_fas_uint(&lock->value, 0);
+       if (value == 1) {
+               ck_pr_fence_acquire();
+               return true;
+       }
+
+       return false;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_dec_locked(struct ck_spinlock_dec *lock)
+{
+
+       ck_pr_fence_load();
+       return ck_pr_load_uint(&lock->value) != 1;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_dec_lock(struct ck_spinlock_dec *lock)
+{
+       bool r;
+
+       for (;;) {
+               /*
+                * Only one thread is guaranteed to decrement lock to 0.
+                * Overflow must be protected against. No more than
+                * UINT_MAX lock requests can happen while the lock is held.
+                */
+               ck_pr_dec_uint_zero(&lock->value, &r);
+               if (r == true)
+                       break;
+
+               /* Load value without generating write cycles. */
+               while (ck_pr_load_uint(&lock->value) != 1)
+                       ck_pr_stall();
+       }
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock)
+{
+       ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+       bool r;
+
+       for (;;) {
+               ck_pr_dec_uint_zero(&lock->value, &r);
+               if (r == true)
+                       break;
+
+               ck_backoff_eb(&backoff);
+       }
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock)
+{
+
+       ck_pr_fence_release();
+
+       /* Unconditionally set lock value to 1 so someone can decrement lock to 
0. */
+       ck_pr_store_uint(&lock->value, 1);
+       return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t,
+    ck_spinlock_dec_locked, ck_spinlock_dec_lock,
+    ck_spinlock_dec_locked, ck_spinlock_dec_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t,
+    ck_spinlock_dec_locked, ck_spinlock_dec_trylock)
+
+#endif /* CK_F_SPINLOCK_DEC */
+#endif /* _CK_SPINLOCK_DEC_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/spinlock/fas.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/spinlock/fas.h b/lib/ck/include/spinlock/fas.h
new file mode 100644
index 0000000..946a9ad
--- /dev/null
+++ b/lib/ck/include/spinlock/fas.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SPINLOCK_FAS_H
+#define _CK_SPINLOCK_FAS_H
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_elide.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+
+#ifndef CK_F_SPINLOCK_FAS
+#define CK_F_SPINLOCK_FAS
+
+struct ck_spinlock_fas {
+       unsigned int value;
+};
+typedef struct ck_spinlock_fas ck_spinlock_fas_t;
+
+#define CK_SPINLOCK_FAS_INITIALIZER {false}
+
+CK_CC_INLINE static void
+ck_spinlock_fas_init(struct ck_spinlock_fas *lock)
+{
+
+       lock->value = false;
+       ck_pr_barrier();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_fas_trylock(struct ck_spinlock_fas *lock)
+{
+       bool value;
+
+       value = ck_pr_fas_uint(&lock->value, true);
+       if (value == false)
+               ck_pr_fence_acquire();
+
+       return !value;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_fas_locked(struct ck_spinlock_fas *lock)
+{
+
+       ck_pr_fence_load();
+       return ck_pr_load_uint(&lock->value);
+}
+
+CK_CC_INLINE static void
+ck_spinlock_fas_lock(struct ck_spinlock_fas *lock)
+{
+
+       while (ck_pr_fas_uint(&lock->value, true) == true) {
+               while (ck_pr_load_uint(&lock->value) == true)
+                       ck_pr_stall();
+       }
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_fas_lock_eb(struct ck_spinlock_fas *lock)
+{
+       ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+
+       while (ck_pr_fas_uint(&lock->value, true) == true)
+               ck_backoff_eb(&backoff);
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_fas_unlock(struct ck_spinlock_fas *lock)
+{
+
+       ck_pr_fence_release();
+       ck_pr_store_uint(&lock->value, false);
+       return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_spinlock_fas, ck_spinlock_fas_t,
+    ck_spinlock_fas_locked, ck_spinlock_fas_lock,
+    ck_spinlock_fas_locked, ck_spinlock_fas_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_fas, ck_spinlock_fas_t,
+    ck_spinlock_fas_locked, ck_spinlock_fas_trylock)
+
+#endif /* CK_F_SPINLOCK_FAS */
+#endif /* _CK_SPINLOCK_FAS_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/spinlock/hclh.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/spinlock/hclh.h b/lib/ck/include/spinlock/hclh.h
new file mode 100644
index 0000000..edaeaca
--- /dev/null
+++ b/lib/ck/include/spinlock/hclh.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2013-2014 Olivier Houchard
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SPINLOCK_HCLH_H
+#define _CK_SPINLOCK_HCLH_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+#ifndef CK_F_SPINLOCK_HCLH
+#define CK_F_SPINLOCK_HCLH
+struct ck_spinlock_hclh {
+       unsigned int wait;
+       unsigned int splice;
+       int cluster_id;
+       struct ck_spinlock_hclh *previous;
+};
+typedef struct ck_spinlock_hclh ck_spinlock_hclh_t;
+
+CK_CC_INLINE static void
+ck_spinlock_hclh_init(struct ck_spinlock_hclh **lock,
+    struct ck_spinlock_hclh *unowned,
+    int cluster_id)
+{
+
+       unowned->previous = NULL;
+       unowned->wait = false;
+       unowned->splice = false;
+       unowned->cluster_id = cluster_id;
+       *lock = unowned;
+       ck_pr_barrier();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_hclh_locked(struct ck_spinlock_hclh **queue)
+{
+       struct ck_spinlock_hclh *head;
+
+       ck_pr_fence_load();
+       head = ck_pr_load_ptr(queue);
+       ck_pr_fence_load();
+       return ck_pr_load_uint(&head->wait);
+}
+
+CK_CC_INLINE static void
+ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,
+    struct ck_spinlock_hclh **local_queue,
+    struct ck_spinlock_hclh *thread)
+{
+       struct ck_spinlock_hclh *previous, *local_tail;
+
+       /* Indicate to the next thread on queue that they will have to block. */
+       thread->wait = true;
+       thread->splice = false;
+       thread->cluster_id = (*local_queue)->cluster_id;
+
+       /* Serialize with respect to update of local queue. */
+       ck_pr_fence_store_atomic();
+
+       /* Mark current request as last request. Save reference to previous 
request. */
+       previous = ck_pr_fas_ptr(local_queue, thread);
+       thread->previous = previous;
+
+       /* Wait until previous thread from the local queue is done with lock. */
+       ck_pr_fence_load();
+       if (previous->previous != NULL &&
+           previous->cluster_id == thread->cluster_id) {
+               while (ck_pr_load_uint(&previous->wait) == true)
+                       ck_pr_stall();
+
+               /* We're head of the global queue, we're done */
+               if (ck_pr_load_uint(&previous->splice) == false)
+                       return;
+       } 
+
+       /* Now we need to splice the local queue into the global queue. */
+       local_tail = ck_pr_load_ptr(local_queue);
+       previous = ck_pr_fas_ptr(glob_queue, local_tail);
+
+       ck_pr_store_uint(&local_tail->splice, true);
+
+       /* Wait until previous thread from the global queue is done with lock. 
*/
+       while (ck_pr_load_uint(&previous->wait) == true)
+               ck_pr_stall();
+
+       ck_pr_fence_load();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread)
+{
+       struct ck_spinlock_hclh *previous;
+
+       /*
+        * If there are waiters, they are spinning on the current node wait
+        * flag. The flag is cleared so that the successor may complete an
+        * acquisition. If the caller is pre-empted then the predecessor field
+        * may be updated by a successor's lock operation. In order to avoid
+        * this, save a copy of the predecessor before setting the flag.
+        */
+       previous = thread[0]->previous;
+
+       /* We have to pay this cost anyways, use it as a compiler barrier too. 
*/
+       ck_pr_fence_release();
+       ck_pr_store_uint(&(*thread)->wait, false);
+
+       /*
+        * Predecessor is guaranteed not to be spinning on previous request,
+        * so update caller to use previous structure. This allows successor
+        * all the time in the world to successfully read updated wait flag.
+        */
+       *thread = previous;
+       return;
+}
+#endif /* CK_F_SPINLOCK_HCLH */
+#endif /* _CK_SPINLOCK_HCLH_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/spinlock/mcs.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/spinlock/mcs.h b/lib/ck/include/spinlock/mcs.h
new file mode 100644
index 0000000..1a61e82
--- /dev/null
+++ b/lib/ck/include/spinlock/mcs.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SPINLOCK_MCS_H
+#define _CK_SPINLOCK_MCS_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+#ifndef CK_F_SPINLOCK_MCS
+#define CK_F_SPINLOCK_MCS
+
+struct ck_spinlock_mcs {
+       unsigned int locked;
+       struct ck_spinlock_mcs *next;
+};
+typedef struct ck_spinlock_mcs * ck_spinlock_mcs_t;
+typedef struct ck_spinlock_mcs ck_spinlock_mcs_context_t;
+
+#define CK_SPINLOCK_MCS_INITIALIZER        (NULL)
+
+CK_CC_INLINE static void
+ck_spinlock_mcs_init(struct ck_spinlock_mcs **queue)
+{
+
+       *queue = NULL;
+       ck_pr_barrier();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs 
*node)
+{
+
+       node->locked = true;
+       node->next = NULL;
+       ck_pr_fence_store_atomic();
+
+       if (ck_pr_cas_ptr(queue, NULL, node) == true) {
+               ck_pr_fence_acquire();
+               return true;
+       }
+
+       return false;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_mcs_locked(struct ck_spinlock_mcs **queue)
+{
+
+       ck_pr_fence_load();
+       return ck_pr_load_ptr(queue) != NULL;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_mcs_lock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs 
*node)
+{
+       struct ck_spinlock_mcs *previous;
+
+       /*
+        * In the case that there is a successor, let them know they must wait
+        * for us to unlock.
+        */
+       node->locked = true;
+       node->next = NULL;
+       ck_pr_fence_store_atomic();
+
+       /*
+        * Swap current tail with current lock request. If the swap operation
+        * returns NULL, it means the queue was empty. If the queue was empty,
+        * then the operation is complete.
+        */
+       previous = ck_pr_fas_ptr(queue, node);
+       if (previous != NULL) {
+               /* Let the previous lock holder know that we are waiting on 
them. */
+               ck_pr_store_ptr(&previous->next, node);
+               while (ck_pr_load_uint(&node->locked) == true)
+                       ck_pr_stall();
+       }
+
+       ck_pr_fence_load();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue, struct ck_spinlock_mcs 
*node)
+{
+       struct ck_spinlock_mcs *next;
+
+       ck_pr_fence_release();
+
+       next = ck_pr_load_ptr(&node->next);
+       if (next == NULL) {
+               /*
+                * If there is no request following us then it is a possibilty
+                * that we are the current tail. In this case, we may just
+                * mark the spinlock queue as empty.
+                */
+               if (ck_pr_load_ptr(queue) == node &&
+                   ck_pr_cas_ptr(queue, node, NULL) == true) {
+                       return;
+               }
+
+               /*
+                * If the node is not the current tail then a lock operation is
+                * in-progress. In this case, busy-wait until the queue is in
+                * a consistent state to wake up the incoming lock request.
+                */
+               for (;;) {
+                       next = ck_pr_load_ptr(&node->next);
+                       if (next != NULL)
+                               break;
+
+                       ck_pr_stall();
+               }
+       }
+
+       /* Allow the next lock operation to complete. */
+       ck_pr_store_uint(&next->locked, false);
+       return;
+}
+#endif /* CK_F_SPINLOCK_MCS */
+#endif /* _CK_SPINLOCK_MCS_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/include/spinlock/ticket.h
----------------------------------------------------------------------
diff --git a/lib/ck/include/spinlock/ticket.h b/lib/ck/include/spinlock/ticket.h
new file mode 100644
index 0000000..1fc641a
--- /dev/null
+++ b/lib/ck/include/spinlock/ticket.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2010-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_SPINLOCK_TICKET_H
+#define _CK_SPINLOCK_TICKET_H
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_elide.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+
+#ifndef CK_F_SPINLOCK_TICKET
+#define CK_F_SPINLOCK_TICKET
+/*
+ * If 16-bit or 32-bit increment is supported, implement support for
+ * trylock functionality on availability of 32-bit or 64-bit fetch-and-add
+ * and compare-and-swap. This code path is only applied to x86*.
+ */
+#if defined(CK_MD_TSO) && (defined(__x86__) || defined(__x86_64__))
+#if defined(CK_F_PR_FAA_32) && defined(CK_F_PR_INC_16) && 
defined(CK_F_PR_CAS_32)
+#define CK_SPINLOCK_TICKET_TYPE                uint32_t
+#define CK_SPINLOCK_TICKET_TYPE_BASE   uint16_t
+#define CK_SPINLOCK_TICKET_INC(x)      ck_pr_inc_16(x)
+#define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_32(x, y, z)
+#define CK_SPINLOCK_TICKET_FAA(x, y)   ck_pr_faa_32(x, y)
+#define CK_SPINLOCK_TICKET_LOAD(x)     ck_pr_load_32(x)
+#define CK_SPINLOCK_TICKET_INCREMENT   (0x00010000UL)
+#define CK_SPINLOCK_TICKET_MASK                (0xFFFFUL)
+#define CK_SPINLOCK_TICKET_SHIFT       (16)
+#elif defined(CK_F_PR_FAA_64) && defined(CK_F_PR_INC_32) && 
defined(CK_F_PR_CAS_64)
+#define CK_SPINLOCK_TICKET_TYPE                uint64_t
+#define CK_SPINLOCK_TICKET_TYPE_BASE   uint32_t
+#define CK_SPINLOCK_TICKET_INC(x)      ck_pr_inc_32(x)
+#define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_64(x, y, z)
+#define CK_SPINLOCK_TICKET_FAA(x, y)   ck_pr_faa_64(x, y)
+#define CK_SPINLOCK_TICKET_LOAD(x)     ck_pr_load_64(x)
+#define CK_SPINLOCK_TICKET_INCREMENT   (0x0000000100000000ULL)
+#define CK_SPINLOCK_TICKET_MASK                (0xFFFFFFFFULL)
+#define CK_SPINLOCK_TICKET_SHIFT       (32)
+#endif
+#endif /* CK_MD_TSO */
+
+#if defined(CK_SPINLOCK_TICKET_TYPE)
+#define CK_F_SPINLOCK_TICKET_TRYLOCK
+
+struct ck_spinlock_ticket {
+       CK_SPINLOCK_TICKET_TYPE value;
+};
+typedef struct ck_spinlock_ticket ck_spinlock_ticket_t;
+#define CK_SPINLOCK_TICKET_INITIALIZER { .value = 0 }
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_init(struct ck_spinlock_ticket *ticket)
+{
+
+       ticket->value = 0;
+       ck_pr_barrier();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket)
+{
+       CK_SPINLOCK_TICKET_TYPE request, position;
+
+       ck_pr_fence_load();
+
+       request = CK_SPINLOCK_TICKET_LOAD(&ticket->value);
+       position = request & CK_SPINLOCK_TICKET_MASK;
+       request >>= CK_SPINLOCK_TICKET_SHIFT;
+
+       return request != position;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
+{
+       CK_SPINLOCK_TICKET_TYPE request, position;
+
+       /* Get our ticket number and set next ticket number. */
+       request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
+           CK_SPINLOCK_TICKET_INCREMENT);
+
+       position = request & CK_SPINLOCK_TICKET_MASK;
+       request >>= CK_SPINLOCK_TICKET_SHIFT;
+
+       while (request != position) {
+               ck_pr_stall();
+               position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
+                   CK_SPINLOCK_TICKET_MASK;
+       }
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
+{
+       CK_SPINLOCK_TICKET_TYPE request, position;
+       ck_backoff_t backoff;
+
+       /* Get our ticket number and set next ticket number. */
+       request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
+           CK_SPINLOCK_TICKET_INCREMENT);
+
+       position = request & CK_SPINLOCK_TICKET_MASK;
+       request >>= CK_SPINLOCK_TICKET_SHIFT;
+
+       while (request != position) {
+               ck_pr_stall();
+               position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
+                   CK_SPINLOCK_TICKET_MASK;
+
+               backoff = (request - position) & CK_SPINLOCK_TICKET_MASK;
+               backoff <<= c;
+               ck_backoff_eb(&backoff);
+       }
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_ticket_trylock(struct ck_spinlock_ticket *ticket)
+{
+       CK_SPINLOCK_TICKET_TYPE snapshot, request, position;
+
+       snapshot = CK_SPINLOCK_TICKET_LOAD(&ticket->value);
+       position = snapshot & CK_SPINLOCK_TICKET_MASK;
+       request = snapshot >> CK_SPINLOCK_TICKET_SHIFT;
+
+       if (position != request)
+               return false;
+
+       if (CK_SPINLOCK_TICKET_CAS(&ticket->value,
+           snapshot, snapshot + CK_SPINLOCK_TICKET_INCREMENT) == false) {
+               return false;
+       }
+
+       ck_pr_fence_acquire();
+       return true;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
+{
+
+       ck_pr_fence_release();
+       CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)(void 
*)&ticket->value);
+       return;
+}
+
+#undef CK_SPINLOCK_TICKET_TYPE
+#undef CK_SPINLOCK_TICKET_TYPE_BASE
+#undef CK_SPINLOCK_TICKET_INC
+#undef CK_SPINLOCK_TICKET_FAA
+#undef CK_SPINLOCK_TICKET_LOAD
+#undef CK_SPINLOCK_TICKET_INCREMENT
+#undef CK_SPINLOCK_TICKET_MASK
+#undef CK_SPINLOCK_TICKET_SHIFT
+#else
+/*
+ * MESI benefits from cacheline padding between next and current. This avoids
+ * invalidation of current from the cache due to incoming lock requests.
+ */
+struct ck_spinlock_ticket {
+       unsigned int next;
+       unsigned int position;
+};
+typedef struct ck_spinlock_ticket ck_spinlock_ticket_t;
+
+#define CK_SPINLOCK_TICKET_INITIALIZER {.next = 0, .position = 0}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_init(struct ck_spinlock_ticket *ticket)
+{
+
+       ticket->next = 0;
+       ticket->position = 0;
+       ck_pr_barrier();
+
+       return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket)
+{
+       unsigned int request;
+
+       ck_pr_fence_load();
+       request = ck_pr_load_uint(&ticket->next);
+       ck_pr_fence_load();
+       return ck_pr_load_uint(&ticket->position) != request;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
+{
+       unsigned int request;
+
+       /* Get our ticket number and set next ticket number. */
+       request = ck_pr_faa_uint(&ticket->next, 1);
+
+       /*
+        * Busy-wait until our ticket number is current.
+        * We can get away without a fence here assuming
+        * our position counter does not overflow.
+        */
+       while (ck_pr_load_uint(&ticket->position) != request)
+               ck_pr_stall();
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
+{
+       ck_backoff_t backoff;
+       unsigned int request, position;
+
+       request = ck_pr_faa_uint(&ticket->next, 1);
+
+       for (;;) {
+               position = ck_pr_load_uint(&ticket->position);
+               if (position == request)
+                       break;
+
+               backoff = request - position;
+               backoff <<= c;
+
+               /*
+                * Ideally, back-off from generating cache traffic for at least
+                * the amount of time necessary for the number of pending lock
+                * acquisition and relinquish operations (assuming an empty
+                * critical section).
+                */
+               ck_backoff_eb(&backoff);
+       }
+
+       ck_pr_fence_acquire();
+       return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
+{
+       unsigned int update;
+
+       ck_pr_fence_release();
+
+       /*
+        * Update current ticket value so next lock request can proceed.
+        * Overflow behavior is assumed to be roll-over, in which case,
+        * it is only an issue if there are 2^32 pending lock requests.
+        */
+       update = ck_pr_load_uint(&ticket->position);
+       ck_pr_store_uint(&ticket->position, update + 1);
+       return;
+}
+#endif /* !CK_F_SPINLOCK_TICKET_TRYLOCK */
+
+CK_ELIDE_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
+    ck_spinlock_ticket_locked, ck_spinlock_ticket_lock,
+    ck_spinlock_ticket_locked, ck_spinlock_ticket_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
+    ck_spinlock_ticket_locked, ck_spinlock_ticket_trylock)
+
+#endif /* CK_F_SPINLOCK_TICKET */
+#endif /* _CK_SPINLOCK_TICKET_H */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/regressions/Makefile
----------------------------------------------------------------------
diff --git a/lib/ck/regressions/Makefile b/lib/ck/regressions/Makefile
new file mode 100644
index 0000000..3195e52
--- /dev/null
+++ b/lib/ck/regressions/Makefile
@@ -0,0 +1,128 @@
+DIR=array      \
+    backoff    \
+    barrier    \
+    bitmap     \
+    brlock     \
+    bytelock   \
+    cohort     \
+    epoch      \
+    fifo       \
+    hp         \
+    hs         \
+    rhs                \
+    ht         \
+    pflock     \
+    pr         \
+    queue      \
+    ring       \
+    rwlock     \
+    swlock     \
+    sequence   \
+    spinlock   \
+    stack      \
+    swlock     \
+    tflock
+
+.PHONY: all clean check
+
+all:
+       $(MAKE) -C ./ck_array/validate all
+       $(MAKE) -C ./ck_cohort/validate all
+       $(MAKE) -C ./ck_cohort/benchmark all
+       $(MAKE) -C ./ck_bitmap/validate all
+       $(MAKE) -C ./ck_backoff/validate all
+       $(MAKE) -C ./ck_queue/validate all
+       $(MAKE) -C ./ck_brlock/validate all
+       $(MAKE) -C ./ck_ht/validate all
+       $(MAKE) -C ./ck_ht/benchmark all
+       $(MAKE) -C ./ck_brlock/benchmark all
+       $(MAKE) -C ./ck_spinlock/validate all
+       $(MAKE) -C ./ck_spinlock/benchmark all
+       $(MAKE) -C ./ck_fifo/validate all
+       $(MAKE) -C ./ck_fifo/benchmark all
+       $(MAKE) -C ./ck_pr/validate all
+       $(MAKE) -C ./ck_pr/benchmark all
+       $(MAKE) -C ./ck_hs/benchmark all
+       $(MAKE) -C ./ck_hs/validate all
+       $(MAKE) -C ./ck_rhs/benchmark all
+       $(MAKE) -C ./ck_rhs/validate all
+       $(MAKE) -C ./ck_barrier/validate all
+       $(MAKE) -C ./ck_barrier/benchmark all
+       $(MAKE) -C ./ck_bytelock/validate all
+       $(MAKE) -C ./ck_bytelock/benchmark all
+       $(MAKE) -C ./ck_epoch/validate all
+       $(MAKE) -C ./ck_rwcohort/validate all
+       $(MAKE) -C ./ck_rwcohort/benchmark all
+       $(MAKE) -C ./ck_sequence/validate all
+       $(MAKE) -C ./ck_sequence/benchmark all
+       $(MAKE) -C ./ck_stack/validate all
+       $(MAKE) -C ./ck_stack/benchmark all
+       $(MAKE) -C ./ck_ring/validate all
+       $(MAKE) -C ./ck_ring/benchmark all
+       $(MAKE) -C ./ck_rwlock/validate all
+       $(MAKE) -C ./ck_rwlock/benchmark all
+       $(MAKE) -C ./ck_tflock/validate all
+       $(MAKE) -C ./ck_tflock/benchmark all
+       $(MAKE) -C ./ck_swlock/validate all
+       $(MAKE) -C ./ck_swlock/benchmark all
+       $(MAKE) -C ./ck_pflock/validate all
+       $(MAKE) -C ./ck_pflock/benchmark all
+       $(MAKE) -C ./ck_hp/validate all
+       $(MAKE) -C ./ck_hp/benchmark all
+
+clean:
+       $(MAKE) -C ./ck_array/validate clean
+       $(MAKE) -C ./ck_pflock/validate clean
+       $(MAKE) -C ./ck_pflock/benchmark clean
+       $(MAKE) -C ./ck_tflock/validate clean
+       $(MAKE) -C ./ck_tflock/benchmark clean
+       $(MAKE) -C ./ck_rwcohort/validate clean
+       $(MAKE) -C ./ck_rwcohort/benchmark clean
+       $(MAKE) -C ./ck_backoff/validate clean
+       $(MAKE) -C ./ck_bitmap/validate clean
+       $(MAKE) -C ./ck_queue/validate clean
+       $(MAKE) -C ./ck_cohort/validate clean
+       $(MAKE) -C ./ck_cohort/benchmark clean
+       $(MAKE) -C ./ck_brlock/validate clean
+       $(MAKE) -C ./ck_ht/validate clean
+       $(MAKE) -C ./ck_ht/benchmark clean
+       $(MAKE) -C ./ck_hs/validate clean
+       $(MAKE) -C ./ck_hs/benchmark clean
+       $(MAKE) -C ./ck_rhs/validate clean
+       $(MAKE) -C ./ck_rhs/benchmark clean
+       $(MAKE) -C ./ck_brlock/benchmark clean
+       $(MAKE) -C ./ck_spinlock/validate clean
+       $(MAKE) -C ./ck_spinlock/benchmark clean
+       $(MAKE) -C ./ck_fifo/validate clean
+       $(MAKE) -C ./ck_fifo/benchmark clean
+       $(MAKE) -C ./ck_pr/validate clean
+       $(MAKE) -C ./ck_pr/benchmark clean
+       $(MAKE) -C ./ck_barrier/validate clean
+       $(MAKE) -C ./ck_barrier/benchmark clean
+       $(MAKE) -C ./ck_bytelock/validate clean
+       $(MAKE) -C ./ck_bytelock/benchmark clean
+       $(MAKE) -C ./ck_epoch/validate clean
+       $(MAKE) -C ./ck_sequence/validate clean
+       $(MAKE) -C ./ck_sequence/benchmark clean
+       $(MAKE) -C ./ck_stack/validate clean
+       $(MAKE) -C ./ck_stack/benchmark clean
+       $(MAKE) -C ./ck_ring/validate clean
+       $(MAKE) -C ./ck_ring/benchmark clean
+       $(MAKE) -C ./ck_rwlock/validate clean
+       $(MAKE) -C ./ck_rwlock/benchmark clean
+       $(MAKE) -C ./ck_swlock/validate clean
+       $(MAKE) -C ./ck_swlock/benchmark clean
+       $(MAKE) -C ./ck_pflock/validate clean
+       $(MAKE) -C ./ck_pflock/benchmark clean
+       $(MAKE) -C ./ck_hp/validate clean
+       $(MAKE) -C ./ck_hp/benchmark clean
+
+check: all
+       rc=0;                                                   \
+       for d in $(DIR) ; do                                    \
+               echo "----[ Testing $$d....";                   \
+               $(MAKE) -C ./ck_$$d/validate check || rc=1;     \
+               echo;                                           \
+       done;                                                   \
+       exit $$rc
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/regressions/Makefile.unsupported
----------------------------------------------------------------------
diff --git a/lib/ck/regressions/Makefile.unsupported 
b/lib/ck/regressions/Makefile.unsupported
new file mode 100644
index 0000000..90aa877
--- /dev/null
+++ b/lib/ck/regressions/Makefile.unsupported
@@ -0,0 +1,9 @@
+.PHONY: all clean check
+
+all:
+       @echo Regressions are currently unsupported for out-of-source builds
+
+clean: all
+
+check: all
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/regressions/ck_array/validate/Makefile
----------------------------------------------------------------------
diff --git a/lib/ck/regressions/ck_array/validate/Makefile 
b/lib/ck/regressions/ck_array/validate/Makefile
new file mode 100644
index 0000000..3c48167
--- /dev/null
+++ b/lib/ck/regressions/ck_array/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_array.h ../../../src/ck_array.c
+       $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_array.c
+
+check: all
+       ./serial
+
+clean:
+       rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE -ggdb

Reply via email to