Repository: trafficserver
Updated Branches:
  refs/heads/master f5a3d5a2f -> f098175e9


http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/src/ck_ht.c
----------------------------------------------------------------------
diff --git a/lib/ck/src/ck_ht.c b/lib/ck/src/ck_ht.c
new file mode 100644
index 0000000..ea9de1e
--- /dev/null
+++ b/lib/ck/src/ck_ht.c
@@ -0,0 +1,1030 @@
+/*
+ * Copyright 2012-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_ht.h>
+
+#ifdef CK_F_HT
+/*
+ * This implementation borrows several techniques from Josh Dybnis's
+ * nbds library which can be found at http://code.google.com/p/nbds
+ *
+ * This release currently only includes support for 64-bit platforms.
+ * We can address 32-bit platforms in a future release.
+ */
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "ck_ht_hash.h"
+#include "ck_internal.h"
+
+#ifndef CK_HT_BUCKET_LENGTH
+
+#ifdef CK_HT_PP
+#define CK_HT_BUCKET_SHIFT 2ULL
+#else
+#define CK_HT_BUCKET_SHIFT 1ULL
+#endif
+
+#define CK_HT_BUCKET_LENGTH (1U << CK_HT_BUCKET_SHIFT)
+#define CK_HT_BUCKET_MASK (CK_HT_BUCKET_LENGTH - 1)
+#endif
+
+#ifndef CK_HT_PROBE_DEFAULT
+#define CK_HT_PROBE_DEFAULT 64ULL
+#endif
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
+#define CK_HT_WORD         uint8_t
+#define CK_HT_WORD_MAX     UINT8_MAX
+#define CK_HT_STORE(x, y)   ck_pr_store_8(x, y)
+#define CK_HT_LOAD(x)      ck_pr_load_8(x)
+#elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
+#define CK_HT_WORD         uint16_t
+#define CK_HT_WORD_MAX     UINT16_MAX
+#define CK_HT_STORE(x, y)   ck_pr_store_16(x, y)
+#define CK_HT_LOAD(x)      ck_pr_load_16(x)
+#elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
+#define CK_HT_WORD         uint32_t
+#define CK_HT_WORD_MAX     UINT32_MAX
+#define CK_HT_STORE(x, y)   ck_pr_store_32(x, y)
+#define CK_HT_LOAD(x)      ck_pr_load_32(x)
+#else
+#error "ck_ht is not supported on your platform."
+#endif
+
+struct ck_ht_map {
+       unsigned int mode;
+       uint64_t deletions;
+       uint64_t probe_maximum;
+       uint64_t probe_length;
+       uint64_t probe_limit;
+       uint64_t size;
+       uint64_t n_entries;
+       uint64_t mask;
+       uint64_t capacity;
+       uint64_t step;
+       CK_HT_WORD *probe_bound;
+       struct ck_ht_entry *entries;
+};
+
+void
+ck_ht_stat(struct ck_ht *table,
+    struct ck_ht_stat *st)
+{
+       struct ck_ht_map *map = table->map;
+
+       st->n_entries = map->n_entries;
+       st->probe_maximum = map->probe_maximum;
+       return;
+}
+
+void
+ck_ht_hash(struct ck_ht_hash *h,
+    struct ck_ht *table,
+    const void *key,
+    uint16_t key_length)
+{
+
+       h->value = MurmurHash64A(key, key_length, table->seed);
+       return;
+}
+
+void
+ck_ht_hash_direct(struct ck_ht_hash *h,
+    struct ck_ht *table,
+    uintptr_t key)
+{
+
+       ck_ht_hash(h, table, &key, sizeof(key));
+       return;
+}
+
+static void
+ck_ht_hash_wrapper(struct ck_ht_hash *h,
+    const void *key,
+    size_t length,
+    uint64_t seed)
+{
+
+       h->value = MurmurHash64A(key, length, seed);
+       return;
+}
+
+static struct ck_ht_map *
+ck_ht_map_create(struct ck_ht *table, uint64_t entries)
+{
+       struct ck_ht_map *map;
+       uint64_t size, n_entries, prefix;
+
+       n_entries = ck_internal_power_2(entries);
+       if (n_entries < CK_HT_BUCKET_LENGTH)
+               return NULL;
+
+       size = sizeof(struct ck_ht_map) +
+                  (sizeof(struct ck_ht_entry) * n_entries + CK_MD_CACHELINE - 
1);
+
+       if (table->mode & CK_HT_WORKLOAD_DELETE) {
+               prefix = sizeof(CK_HT_WORD) * n_entries;
+               size += prefix;
+       } else {
+               prefix = 0;
+       }
+
+       map = table->m->malloc(size);
+       if (map == NULL)
+               return NULL;
+
+       map->mode = table->mode;
+       map->size = size;
+       map->probe_limit = ck_internal_max_64(n_entries >>
+           (CK_HT_BUCKET_SHIFT + 2), CK_HT_PROBE_DEFAULT);
+
+       map->deletions = 0;
+       map->probe_maximum = 0;
+       map->capacity = n_entries;
+       map->step = ck_internal_bsf_64(map->capacity);
+       map->mask = map->capacity - 1;
+       map->n_entries = 0;
+       map->entries = (struct ck_ht_entry *)(((uintptr_t)&map[1] + prefix +
+           CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
+
+       if (table->mode & CK_HT_WORKLOAD_DELETE) {
+               map->probe_bound = (CK_HT_WORD *)&map[1];
+               memset(map->probe_bound, 0, prefix);
+       } else {
+               map->probe_bound = NULL;
+       }
+
+       memset(map->entries, 0, sizeof(struct ck_ht_entry) * n_entries);
+       ck_pr_fence_store();
+       return map;
+}
+
+static inline void
+ck_ht_map_bound_set(struct ck_ht_map *m,
+    struct ck_ht_hash h,
+    uint64_t n_probes)
+{
+       uint64_t offset = h.value & m->mask;
+
+       if (n_probes > m->probe_maximum)
+               ck_pr_store_64(&m->probe_maximum, n_probes);
+
+       if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) {
+               if (n_probes >= CK_HT_WORD_MAX)
+                       n_probes = CK_HT_WORD_MAX;
+
+               CK_HT_STORE(&m->probe_bound[offset], n_probes);
+               ck_pr_fence_store();
+       }
+
+       return;
+}
+
+static inline uint64_t
+ck_ht_map_bound_get(struct ck_ht_map *m, struct ck_ht_hash h)
+{
+       uint64_t offset = h.value & m->mask;
+       uint64_t r = CK_HT_WORD_MAX;
+
+       if (m->probe_bound != NULL) {
+               r = CK_HT_LOAD(&m->probe_bound[offset]);
+               if (r == CK_HT_WORD_MAX)
+                       r = ck_pr_load_64(&m->probe_maximum);
+       } else {
+               r = ck_pr_load_64(&m->probe_maximum);
+       }
+
+       return r;
+}
+
+static void
+ck_ht_map_destroy(struct ck_malloc *m, struct ck_ht_map *map, bool defer)
+{
+
+       m->free(map, map->size, defer);
+       return;
+}
+
+static inline size_t
+ck_ht_map_probe_next(struct ck_ht_map *map, size_t offset, ck_ht_hash_t h, 
size_t probes)
+{
+       ck_ht_hash_t r;
+       size_t stride;
+       unsigned long level = (unsigned long)probes >> CK_HT_BUCKET_SHIFT;
+
+       r.value = (h.value >> map->step) >> level;
+       stride = (r.value & ~CK_HT_BUCKET_MASK) << 1
+                    | (r.value & CK_HT_BUCKET_MASK);
+
+       return (offset + level +
+           (stride | CK_HT_BUCKET_LENGTH)) & map->mask;
+}
+
+bool
+ck_ht_init(struct ck_ht *table,
+    unsigned int mode,
+    ck_ht_hash_cb_t *h,
+    struct ck_malloc *m,
+    uint64_t entries,
+    uint64_t seed)
+{
+
+       if (m == NULL || m->malloc == NULL || m->free == NULL)
+               return false;
+
+       table->m = m;
+       table->mode = mode;
+       table->seed = seed;
+
+       if (h == NULL) {
+               table->h = ck_ht_hash_wrapper;
+       } else {
+               table->h = h;
+       }
+
+       table->map = ck_ht_map_create(table, entries);
+       return table->map != NULL;
+}
+
+static struct ck_ht_entry *
+ck_ht_map_probe_wr(struct ck_ht_map *map,
+    ck_ht_hash_t h,
+    ck_ht_entry_t *snapshot,
+    ck_ht_entry_t **available,
+    const void *key,
+    uint16_t key_length,
+    uint64_t *probe_limit,
+    uint64_t *probe_wr)
+{
+       struct ck_ht_entry *bucket, *cursor;
+       struct ck_ht_entry *first = NULL;
+       size_t offset, i, j;
+       uint64_t probes = 0;
+       uint64_t limit;
+
+       if (probe_limit == NULL) {
+               limit = ck_ht_map_bound_get(map, h);
+       } else {
+               limit = UINT64_MAX;
+       }
+
+       offset = h.value & map->mask;
+       for (i = 0; i < map->probe_limit; i++) {
+               /*
+                * Probe on a complete cache line first. Scan forward and wrap 
around to
+                * the beginning of the cache line. Only when the complete 
cache line has
+                * been scanned do we move on to the next row.
+                */
+               bucket = (void *)((uintptr_t)(map->entries + offset) &
+                            ~(CK_MD_CACHELINE - 1));
+
+               for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) {
+                       uint16_t k;
+
+                       if (probes++ > limit)
+                               break;
+
+                       cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH 
- 1));
+
+                       /*
+                        * It is probably worth it to encapsulate probe state
+                        * in order to prevent a complete reprobe sequence in
+                        * the case of intermittent writers.
+                        */
+                       if (cursor->key == CK_HT_KEY_TOMBSTONE) {
+                               if (first == NULL) {
+                                       first = cursor;
+                                       *probe_wr = probes;
+                               }
+
+                               continue;
+                       }
+
+                       if (cursor->key == CK_HT_KEY_EMPTY)
+                               goto leave;
+
+                       if (cursor->key == (uintptr_t)key)
+                               goto leave;
+
+                       if (map->mode & CK_HT_MODE_BYTESTRING) {
+                               void *pointer;
+
+                               /*
+                                * Check memoized portion of hash value before
+                                * expensive full-length comparison.
+                                */
+                               k = ck_ht_entry_key_length(cursor);
+                               if (k != key_length)
+                                       continue;
+
+#ifdef CK_HT_PP
+                               if ((cursor->value >> CK_MD_VMA_BITS) != 
((h.value >> 32) & CK_HT_KEY_MASK))
+                                       continue;
+#else
+                               if (cursor->hash != h.value)
+                                       continue;
+#endif
+
+                               pointer = ck_ht_entry_key(cursor);
+                               if (memcmp(pointer, key, key_length) == 0)
+                                       goto leave;
+                       }
+               }
+
+               offset = ck_ht_map_probe_next(map, offset, h, probes);
+       }
+
+       cursor = NULL;
+
+leave:
+       if (probe_limit != NULL) {
+               *probe_limit = probes;
+       } else if (first == NULL) {
+               *probe_wr = probes;
+       }
+
+       *available = first;
+
+       if (cursor != NULL) {
+               *snapshot = *cursor;
+       }
+
+       return cursor;
+}
+
+bool
+ck_ht_gc(struct ck_ht *ht, unsigned long cycles, unsigned long seed)
+{
+       CK_HT_WORD *bounds = NULL;
+       struct ck_ht_map *map = ht->map;
+       uint64_t maximum, i;
+       uint64_t size = 0;
+
+       if (map->n_entries == 0) {
+               ck_pr_store_64(&map->probe_maximum, 0);
+               if (map->probe_bound != NULL)
+                       memset(map->probe_bound, 0, sizeof(CK_HT_WORD) * 
map->capacity);
+
+               return true;
+       }
+       
+       if (cycles == 0) {
+               maximum = 0;
+
+               if (map->probe_bound != NULL) {
+                       size = sizeof(CK_HT_WORD) * map->capacity;
+                       bounds = ht->m->malloc(size);
+                       if (bounds == NULL)
+                               return false;
+
+                       memset(bounds, 0, size);
+               }
+       } else {
+               maximum = map->probe_maximum;
+       }
+
+       for (i = 0; i < map->capacity; i++) {
+               struct ck_ht_entry *entry, *priority, snapshot;
+               struct ck_ht_hash h;
+               uint64_t probes_wr;
+               uint64_t offset;
+
+               entry = &map->entries[(i + seed) & map->mask];
+               if (entry->key == CK_HT_KEY_EMPTY ||
+                   entry->key == CK_HT_KEY_TOMBSTONE) {
+                       continue;
+               }
+
+               if (ht->mode & CK_HT_MODE_BYTESTRING) {
+#ifndef CK_HT_PP
+                       h.value = entry->hash;
+#else
+                       ht->h(&h, ck_ht_entry_key(entry), 
ck_ht_entry_key_length(entry),
+                           ht->seed);
+#endif
+                       entry = ck_ht_map_probe_wr(map, h, &snapshot, &priority,
+                           ck_ht_entry_key(entry),
+                           ck_ht_entry_key_length(entry),
+                           NULL, &probes_wr);
+               } else {
+#ifndef CK_HT_PP
+                       h.value = entry->hash;
+#else
+                       ht->h(&h, &entry->key, sizeof(entry->key), ht->seed);
+#endif
+                       entry = ck_ht_map_probe_wr(map, h, &snapshot, &priority,
+                           (void *)entry->key,
+                           sizeof(entry->key),
+                           NULL, &probes_wr);
+               }
+
+               offset = h.value & map->mask;
+
+               if (priority != NULL) {
+#ifndef CK_HT_PP
+                       ck_pr_store_64(&priority->key_length, 
entry->key_length);
+                       ck_pr_store_64(&priority->hash, entry->hash);
+#endif
+                       ck_pr_store_ptr(&priority->value, (void *)entry->value);
+                       ck_pr_fence_store();
+                       ck_pr_store_ptr(&priority->key, (void *)entry->key);
+                       ck_pr_fence_store();
+                       ck_pr_store_64(&map->deletions, map->deletions + 1);
+                       ck_pr_fence_store();
+                       ck_pr_store_ptr(&entry->key, (void 
*)CK_HT_KEY_TOMBSTONE);
+               }
+
+               if (cycles == 0) {
+                       if (probes_wr > maximum)
+                               maximum = probes_wr;
+
+                       if (probes_wr >= CK_HT_WORD_MAX)
+                               probes_wr = CK_HT_WORD_MAX;
+
+                       if (bounds != NULL && probes_wr > bounds[offset])
+                               bounds[offset] = probes_wr;
+               } else if (--cycles == 0)
+                       break;
+       }
+
+       if (maximum != map->probe_maximum)
+               ck_pr_store_64(&map->probe_maximum, maximum);
+
+       if (bounds != NULL) {
+               for (i = 0; i < map->capacity; i++)
+                       CK_HT_STORE(&map->probe_bound[i], bounds[i]);
+
+               ht->m->free(bounds, size, false);
+       }
+
+       return true;
+}
+
+static struct ck_ht_entry *
+ck_ht_map_probe_rd(struct ck_ht_map *map,
+    ck_ht_hash_t h,
+    ck_ht_entry_t *snapshot,
+    const void *key,
+    uint16_t key_length)
+{
+       struct ck_ht_entry *bucket, *cursor;
+       size_t offset, i, j;
+       uint64_t probes = 0;
+       uint64_t probe_maximum;
+
+#ifndef CK_HT_PP
+       uint64_t d = 0;
+       uint64_t d_prime = 0;
+retry:
+#endif
+
+       probe_maximum = ck_ht_map_bound_get(map, h);
+       offset = h.value & map->mask;
+
+       for (i = 0; i < map->probe_limit; i++) {
+               /*
+                * Probe on a complete cache line first. Scan forward and wrap 
around to
+                * the beginning of the cache line. Only when the complete 
cache line has
+                * been scanned do we move on to the next row.
+                */
+               bucket = (void *)((uintptr_t)(map->entries + offset) &
+                            ~(CK_MD_CACHELINE - 1));
+
+               for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) {
+                       uint16_t k;
+
+                       if (probes++ > probe_maximum)
+                               return NULL;
+
+                       cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH 
- 1));
+
+#ifdef CK_HT_PP
+                       snapshot->key = (uintptr_t)ck_pr_load_ptr(&cursor->key);
+                       ck_pr_fence_load();
+                       snapshot->value = 
(uintptr_t)ck_pr_load_ptr(&cursor->value);
+#else
+                       d = ck_pr_load_64(&map->deletions);
+                       snapshot->key = (uintptr_t)ck_pr_load_ptr(&cursor->key);
+                       ck_pr_fence_load();
+                       snapshot->key_length = 
ck_pr_load_64(&cursor->key_length);
+                       snapshot->hash = ck_pr_load_64(&cursor->hash);
+                       snapshot->value = 
(uintptr_t)ck_pr_load_ptr(&cursor->value);
+#endif
+
+                       /*
+                        * It is probably worth it to encapsulate probe state
+                        * in order to prevent a complete reprobe sequence in
+                        * the case of intermittent writers.
+                        */
+                       if (snapshot->key == CK_HT_KEY_TOMBSTONE)
+                               continue;
+
+                       if (snapshot->key == CK_HT_KEY_EMPTY)
+                               goto leave;
+
+                       if (snapshot->key == (uintptr_t)key)
+                               goto leave;
+
+                       if (map->mode & CK_HT_MODE_BYTESTRING) {
+                               void *pointer;
+
+                               /*
+                                * Check memoized portion of hash value before
+                                * expensive full-length comparison.
+                                */
+                               k = ck_ht_entry_key_length(snapshot);
+                               if (k != key_length)
+                                       continue;
+#ifdef CK_HT_PP
+                               if ((snapshot->value >> CK_MD_VMA_BITS) != 
((h.value >> 32) & CK_HT_KEY_MASK))
+                                       continue;
+#else
+                               if (snapshot->hash != h.value)
+                                       continue;
+
+                               d_prime = ck_pr_load_64(&map->deletions);
+
+                               /*
+                                * It is possible that the slot was
+                                * replaced, initiate a re-probe.
+                                */
+                               if (d != d_prime)
+                                       goto retry;
+#endif
+
+                               pointer = ck_ht_entry_key(snapshot);
+                               if (memcmp(pointer, key, key_length) == 0)
+                                       goto leave;
+                       }
+               }
+
+               offset = ck_ht_map_probe_next(map, offset, h, probes);
+       }
+
+       return NULL;
+
+leave:
+       return cursor;
+}
+
+uint64_t
+ck_ht_count(struct ck_ht *table)
+{
+       struct ck_ht_map *map = ck_pr_load_ptr(&table->map);
+
+       return ck_pr_load_64(&map->n_entries);
+}
+
+bool
+ck_ht_next(struct ck_ht *table,
+    struct ck_ht_iterator *i,
+    struct ck_ht_entry **entry)
+{
+       struct ck_ht_map *map = table->map;
+       uintptr_t key;
+
+       if (i->offset >= map->capacity)
+               return false;
+
+       do {
+               key = map->entries[i->offset].key;
+               if (key != CK_HT_KEY_EMPTY && key != CK_HT_KEY_TOMBSTONE)
+                       break;
+       } while (++i->offset < map->capacity);
+
+       if (i->offset >= map->capacity)
+               return false;
+
+       *entry = map->entries + i->offset++;
+       return true;
+}
+
+bool
+ck_ht_reset_size_spmc(struct ck_ht *table, uint64_t size)
+{
+       struct ck_ht_map *map, *update;
+
+       map = table->map;
+       update = ck_ht_map_create(table, size);
+       if (update == NULL)
+               return false;
+
+       ck_pr_store_ptr(&table->map, update);
+       ck_ht_map_destroy(table->m, map, true);
+       return true;
+}
+
+bool
+ck_ht_reset_spmc(struct ck_ht *table)
+{
+       struct ck_ht_map *map = table->map;
+
+       return ck_ht_reset_size_spmc(table, map->capacity);
+}
+
+bool
+ck_ht_grow_spmc(struct ck_ht *table, uint64_t capacity)
+{
+       struct ck_ht_map *map, *update;
+       struct ck_ht_entry *bucket, *previous;
+       struct ck_ht_hash h;
+       size_t k, i, j, offset;
+       uint64_t probes;
+
+restart:
+       map = table->map;
+
+       if (map->capacity >= capacity)
+               return false;
+
+       update = ck_ht_map_create(table, capacity);
+       if (update == NULL)
+               return false;
+
+       for (k = 0; k < map->capacity; k++) {
+               previous = &map->entries[k];
+
+               if (previous->key == CK_HT_KEY_EMPTY || previous->key == 
CK_HT_KEY_TOMBSTONE)
+                       continue;
+
+               if (table->mode & CK_HT_MODE_BYTESTRING) {
+#ifdef CK_HT_PP
+                       void *key;
+                       uint16_t key_length;
+
+                       key = ck_ht_entry_key(previous);
+                       key_length = ck_ht_entry_key_length(previous);
+#endif
+
+#ifndef CK_HT_PP
+                       h.value = previous->hash;
+#else
+                       table->h(&h, key, key_length, table->seed);
+#endif
+               } else {
+#ifndef CK_HT_PP
+                       h.value = previous->hash;
+#else
+                       table->h(&h, &previous->key, sizeof(previous->key), 
table->seed);
+#endif
+               }
+
+               offset = h.value & update->mask;
+               probes = 0;
+
+               for (i = 0; i < update->probe_limit; i++) {
+                       bucket = (void *)((uintptr_t)(update->entries + offset) 
& ~(CK_MD_CACHELINE - 1));
+
+                       for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) {
+                               struct ck_ht_entry *cursor = bucket + ((j + 
offset) & (CK_HT_BUCKET_LENGTH - 1));
+
+                               probes++;
+                               if (CK_CC_LIKELY(cursor->key == 
CK_HT_KEY_EMPTY)) {
+                                       *cursor = *previous;
+                                       update->n_entries++;
+                                       ck_ht_map_bound_set(update, h, probes);
+                                       break;
+                               }
+                       }
+
+                       if (j < CK_HT_BUCKET_LENGTH)
+                               break;
+
+                       offset = ck_ht_map_probe_next(update, offset, h, 
probes);
+               }
+
+               if (i == update->probe_limit) {
+                       /*
+                        * We have hit the probe limit, the map needs to be even
+                        * larger.
+                        */
+                       ck_ht_map_destroy(table->m, update, false);
+                       capacity <<= 1;
+                       goto restart;
+               }
+       }
+
+       ck_pr_fence_store();
+       ck_pr_store_ptr(&table->map, update);
+       ck_ht_map_destroy(table->m, map, true);
+       return true;
+}
+
+bool
+ck_ht_remove_spmc(struct ck_ht *table,
+    ck_ht_hash_t h,
+    ck_ht_entry_t *entry)
+{
+       struct ck_ht_map *map;
+       struct ck_ht_entry *candidate, snapshot;
+
+       map = table->map;
+
+       if (table->mode & CK_HT_MODE_BYTESTRING) {
+               candidate = ck_ht_map_probe_rd(map, h, &snapshot,
+                   ck_ht_entry_key(entry),
+                   ck_ht_entry_key_length(entry));
+       } else {
+               candidate = ck_ht_map_probe_rd(map, h, &snapshot,
+                   (void *)entry->key,
+                   sizeof(entry->key));
+       }
+
+       /* No matching entry was found. */
+       if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY)
+               return false;
+
+       *entry = snapshot;
+       ck_pr_store_ptr(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
+
+       /*
+        * It is possible that the key is read before transition into
+        * the tombstone state. Assuming the keys do match, a reader
+        * may have already acquired a snapshot of the value at the time.
+        * However, assume the reader is preempted as a deletion occurs
+        * followed by a replacement. In this case, it is possible that
+        * the reader acquires some value V' instead of V. Let us assume
+        * however that any transition from V into V' (essentially, update
+        * of a value without the reader knowing of a K -> K' transition),
+        * is preceded by an update to the deletions counter. This guarantees
+        * any replacement of a T key also implies a D -> D' transition.
+        * If D has not transitioned, the value has yet to be replaced so it
+        * is a valid association with K and is safe to return. If D has
+        * transitioned after a reader has acquired a snapshot then it is
+        * possible that we are in the invalid state of (K, V'). The reader
+        * is then able to attempt a reprobe at which point the only visible
+        * states should be (T, V') or (K', V'). The latter is guaranteed
+        * through memory fencing.
+        */
+       ck_pr_store_64(&map->deletions, map->deletions + 1);
+       ck_pr_fence_store();
+       ck_pr_store_64(&map->n_entries, map->n_entries - 1);
+       return true;
+}
+
+bool
+ck_ht_get_spmc(struct ck_ht *table,
+    ck_ht_hash_t h,
+    ck_ht_entry_t *entry)
+{
+       struct ck_ht_entry *candidate, snapshot;
+       struct ck_ht_map *map;
+       uint64_t d, d_prime;
+
+restart:
+       map = ck_pr_load_ptr(&table->map);
+
+       /*
+        * Platforms that cannot read key and key_length atomically must reprobe
+        * on the scan of any single entry.
+        */
+       d = ck_pr_load_64(&map->deletions);
+
+       if (table->mode & CK_HT_MODE_BYTESTRING) {
+               candidate = ck_ht_map_probe_rd(map, h, &snapshot,
+                   ck_ht_entry_key(entry), ck_ht_entry_key_length(entry));
+       } else {
+               candidate = ck_ht_map_probe_rd(map, h, &snapshot,
+                   (void *)entry->key, sizeof(entry->key));
+       }
+
+       d_prime = ck_pr_load_64(&map->deletions);
+       if (d != d_prime) {
+               /*
+                * It is possible we have read (K, V'). Only valid states are
+                * (K, V), (K', V') and (T, V). Restart load operation in face
+                * of concurrent deletions or replacements.
+                */
+               goto restart;
+       }
+
+       if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY)
+               return false;
+
+       *entry = snapshot;
+       return true;
+}
+
+bool
+ck_ht_set_spmc(struct ck_ht *table,
+    ck_ht_hash_t h,
+    ck_ht_entry_t *entry)
+{
+       struct ck_ht_entry snapshot, *candidate, *priority;
+       struct ck_ht_map *map;
+       uint64_t probes, probes_wr;
+       bool empty = false;
+
+       for (;;) {
+               map = table->map;
+
+               if (table->mode & CK_HT_MODE_BYTESTRING) {
+                       candidate = ck_ht_map_probe_wr(map, h, &snapshot, 
&priority,
+                           ck_ht_entry_key(entry),
+                           ck_ht_entry_key_length(entry),
+                           &probes, &probes_wr);
+               } else {
+                       candidate = ck_ht_map_probe_wr(map, h, &snapshot, 
&priority,
+                           (void *)entry->key,
+                           sizeof(entry->key),
+                           &probes, &probes_wr);
+               }
+
+               if (priority != NULL) {
+                       probes = probes_wr;
+                       break;
+               }
+
+               if (candidate != NULL)
+                       break;
+
+               if (ck_ht_grow_spmc(table, map->capacity << 1) == false)
+                       return false;
+       }
+
+       if (candidate == NULL) {
+               candidate = priority;
+               empty = true;
+       }
+
+       if (candidate->key != CK_HT_KEY_EMPTY &&
+           priority != NULL && candidate != priority) {
+               /*
+                * If we are replacing an existing entry and an earlier
+                * tombstone was found in the probe sequence then replace
+                * the existing entry in a manner that doesn't affect 
linearizability
+                * of concurrent get operations. We avoid a state of (K, B)
+                * (where [K, B] -> [K', B]) by guaranteeing a forced reprobe
+                * before transitioning from K to T. (K, B) implies (K, B, D')
+                * so we will reprobe successfully from this transient state.
+                */
+               probes = probes_wr;
+
+#ifndef CK_HT_PP
+               ck_pr_store_64(&priority->key_length, entry->key_length);
+               ck_pr_store_64(&priority->hash, entry->hash);
+#endif
+               ck_pr_store_ptr(&priority->value, (void *)entry->value);
+               ck_pr_fence_store();
+               ck_pr_store_ptr(&priority->key, (void *)entry->key);
+               ck_pr_fence_store();
+               ck_pr_store_64(&map->deletions, map->deletions + 1);
+               ck_pr_fence_store();
+               ck_pr_store_ptr(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
+       } else {
+               /*
+                * In this case we are inserting a new entry or replacing
+                * an existing entry. There is no need to force a re-probe
+                * on tombstone replacement due to the fact that previous
+                * deletion counter update would have been published with
+                * respect to any concurrent probes.
+                */
+               bool replace = candidate->key != CK_HT_KEY_EMPTY &&
+                   candidate->key != CK_HT_KEY_TOMBSTONE;
+
+               if (priority != NULL) {
+                       candidate = priority;
+                       probes = probes_wr;
+               }
+
+#ifdef CK_HT_PP
+               ck_pr_store_ptr(&candidate->value, (void *)entry->value);
+               ck_pr_fence_store();
+               ck_pr_store_ptr(&candidate->key, (void *)entry->key);
+#else
+               ck_pr_store_64(&candidate->key_length, entry->key_length);
+               ck_pr_store_64(&candidate->hash, entry->hash);
+               ck_pr_store_ptr(&candidate->value, (void *)entry->value);
+               ck_pr_fence_store();
+               ck_pr_store_ptr(&candidate->key, (void *)entry->key);
+#endif
+
+               /*
+                * If we are insert a new entry then increment number
+                * of entries associated with map.
+                */
+               if (replace == false)
+                       ck_pr_store_64(&map->n_entries, map->n_entries + 1);
+       }
+
+       ck_ht_map_bound_set(map, h, probes);
+
+       /* Enforce a load factor of 0.5. */
+       if (map->n_entries * 2 > map->capacity)
+               ck_ht_grow_spmc(table, map->capacity << 1);
+
+       if (empty == true) {
+               entry->key = CK_HT_KEY_EMPTY;
+       } else {
+               *entry = snapshot;
+       }
+
+       return true;
+}
+
+bool
+ck_ht_put_spmc(struct ck_ht *table,
+    ck_ht_hash_t h,
+    ck_ht_entry_t *entry)
+{
+       struct ck_ht_entry snapshot, *candidate, *priority;
+       struct ck_ht_map *map;
+       uint64_t probes, probes_wr;
+
+       for (;;) {
+               map = table->map;
+
+               if (table->mode & CK_HT_MODE_BYTESTRING) {
+                       candidate = ck_ht_map_probe_wr(map, h, &snapshot, 
&priority,
+                           ck_ht_entry_key(entry),
+                           ck_ht_entry_key_length(entry),
+                           &probes, &probes_wr);
+               } else {
+                       candidate = ck_ht_map_probe_wr(map, h, &snapshot, 
&priority,
+                           (void *)entry->key,
+                           sizeof(entry->key),
+                           &probes, &probes_wr);
+               }
+
+               if (candidate != NULL || priority != NULL)
+                       break;
+
+               if (ck_ht_grow_spmc(table, map->capacity << 1) == false)
+                       return false;
+       }
+
+       if (priority != NULL) {
+               /* Re-use tombstone if one was found. */
+               candidate = priority;
+               probes = probes_wr;
+       } else if (candidate->key != CK_HT_KEY_EMPTY &&
+           candidate->key != CK_HT_KEY_TOMBSTONE) {
+               /*
+                * If the snapshot key is non-empty and the value field is not
+                * a tombstone then an identical key was found. As store does
+                * not implement replacement, we will fail.
+                */
+               return false;
+       }
+
+       ck_ht_map_bound_set(map, h, probes);
+
+#ifdef CK_HT_PP
+       ck_pr_store_ptr(&candidate->value, (void *)entry->value);
+       ck_pr_fence_store();
+       ck_pr_store_ptr(&candidate->key, (void *)entry->key);
+#else
+       ck_pr_store_64(&candidate->key_length, entry->key_length);
+       ck_pr_store_64(&candidate->hash, entry->hash);
+       ck_pr_store_ptr(&candidate->value, (void *)entry->value);
+       ck_pr_fence_store();
+       ck_pr_store_ptr(&candidate->key, (void *)entry->key);
+#endif
+
+       ck_pr_store_64(&map->n_entries, map->n_entries + 1);
+
+       /* Enforce a load factor of 0.5. */
+       if (map->n_entries * 2 > map->capacity)
+               ck_ht_grow_spmc(table, map->capacity << 1);
+
+       return true;
+}
+
+void
+ck_ht_destroy(struct ck_ht *table)
+{
+
+       ck_ht_map_destroy(table->m, table->map, false);
+       return;
+}
+
+#endif /* CK_F_HT */
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/src/ck_ht_hash.h
----------------------------------------------------------------------
diff --git a/lib/ck/src/ck_ht_hash.h b/lib/ck/src/ck_ht_hash.h
new file mode 100644
index 0000000..254e3b8
--- /dev/null
+++ b/lib/ck/src/ck_ht_hash.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2012-2014 Samy Al Bahra
+ * Copyright 2011-2014 AppNexus, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CK_HT_HASH_H
+#define _CK_HT_HASH_H
+
+/*
+ * This is the Murmur hash written by Austin Appleby.
+ */
+
+#include <ck_stdint.h>
+#include <string.h>
+
+//-----------------------------------------------------------------------------
+// MurmurHash3 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+// Note - The x86 and x64 versions do _not_ produce the same results, as the
+// algorithms are optimized for their respective platforms. You can still
+// compile and run any of them on any platform, but your performance with the
+// non-native version will be less than optimal.
+
+//-----------------------------------------------------------------------------
+// Platform-specific functions and macros
+
+// Microsoft Visual Studio
+
+#if defined(_MSC_VER)
+
+#define FORCE_INLINE    __forceinline
+
+#include <stdlib.h>
+
+#define ROTL32(x,y)     _rotl(x,y)
+#define ROTL64(x,y)     _rotl64(x,y)
+
+#define BIG_CONSTANT(x) (x)
+
+// Other compilers
+
+#else   // defined(_MSC_VER)
+
+#define FORCE_INLINE inline __attribute__((always_inline))
+
+static inline uint32_t rotl32 ( uint32_t x, int8_t r )
+{
+  return (x << r) | (x >> (32 - r));
+}
+
+static inline uint64_t rotl64 ( uint64_t x, int8_t r )
+{
+  return (x << r) | (x >> (64 - r));
+}
+
+#define ROTL32(x,y)     rotl32(x,y)
+#define ROTL64(x,y)     rotl64(x,y)
+
+#define BIG_CONSTANT(x) (x##LLU)
+
+#endif // !defined(_MSC_VER)
+
+//-----------------------------------------------------------------------------
+// Block read - if your platform needs to do endian-swapping or can only
+// handle aligned reads, do the conversion here
+
+FORCE_INLINE static uint32_t getblock ( const uint32_t * p, int i )
+{
+  return p[i];
+}
+
+//-----------------------------------------------------------------------------
+// Finalization mix - force all bits of a hash block to avalanche
+
+FORCE_INLINE static uint32_t fmix ( uint32_t h )
+{
+  h ^= h >> 16;
+  h *= 0x85ebca6b;
+  h ^= h >> 13;
+  h *= 0xc2b2ae35;
+  h ^= h >> 16;
+
+  return h;
+}
+
+//-----------------------------------------------------------------------------
+
+static inline void MurmurHash3_x86_32 ( const void * key, int len,
+                          uint32_t seed, uint32_t * out )
+{
+  const uint8_t * data = (const uint8_t*)key;
+  const int nblocks = len / 4;
+  int i;
+
+  uint32_t h1 = seed;
+
+  uint32_t c1 = 0xcc9e2d51;
+  uint32_t c2 = 0x1b873593;
+
+  //----------
+  // body
+
+  const uint32_t * blocks = (const uint32_t *)(void *)(data + nblocks*4);
+
+  for(i = -nblocks; i; i++)
+  {
+    uint32_t k1 = getblock(blocks,i);
+
+    k1 *= c1;
+    k1 = ROTL32(k1,15);
+    k1 *= c2;
+
+    h1 ^= k1;
+    h1 = ROTL32(h1,13);
+    h1 = h1*5+0xe6546b64;
+  }
+
+  //----------
+  // tail
+
+  const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
+
+  uint32_t k1 = 0;
+
+  switch(len & 3)
+  {
+  case 3: k1 ^= tail[2] << 16;
+  case 2: k1 ^= tail[1] << 8;
+  case 1: k1 ^= tail[0];
+          k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+  };
+
+  //----------
+  // finalization
+
+  h1 ^= len;
+
+  h1 = fmix(h1);
+
+  *(uint32_t *)out = h1;
+}
+
+static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t 
seed )
+{
+  const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
+  const int r = 47;
+
+  uint64_t h = seed ^ (len * m);
+
+  const uint64_t * data = (const uint64_t *)key;
+  const uint64_t * end = data + (len/8);
+
+  while(data != end)
+  {
+    uint64_t k;
+ 
+    if (!((uintptr_t)data & 0x7))
+           k = *data++;
+    else {
+           memcpy(&k, (void *)data, sizeof(k));
+           data++;
+    }
+
+    k *= m;
+    k ^= k >> r;
+    k *= m;
+
+    h ^= k;
+    h *= m;
+  }
+
+  const unsigned char * data2 = (const unsigned char*)data;
+
+  switch(len & 7)
+  {
+  case 7: h ^= (uint64_t)(data2[6]) << 48;
+  case 6: h ^= (uint64_t)(data2[5]) << 40;
+  case 5: h ^= (uint64_t)(data2[4]) << 32;
+  case 4: h ^= (uint64_t)(data2[3]) << 24;
+  case 3: h ^= (uint64_t)(data2[2]) << 16;
+  case 2: h ^= (uint64_t)(data2[1]) << 8;
+  case 1: h ^= (uint64_t)(data2[0]);
+          h *= m;
+  };
+
+  h ^= h >> r;
+  h *= m;
+  h ^= h >> r;
+
+  return h;
+}
+
+
+// 64-bit hash for 32-bit platforms
+
+static inline uint64_t MurmurHash64B ( const void * key, int len, uint64_t 
seed )
+{
+  const uint32_t m = 0x5bd1e995;
+  const int r = 24;
+
+  uint32_t h1 = (uint32_t)(seed) ^ len;
+  uint32_t h2 = (uint32_t)(seed >> 32);
+
+  const uint32_t * data = (const uint32_t *)key;
+
+  while(len >= 8)
+  {
+    uint32_t k1 = *data++;
+    k1 *= m; k1 ^= k1 >> r; k1 *= m;
+    h1 *= m; h1 ^= k1;
+    len -= 4;
+
+    uint32_t k2 = *data++;
+    k2 *= m; k2 ^= k2 >> r; k2 *= m;
+    h2 *= m; h2 ^= k2;
+    len -= 4;
+  }
+
+  if(len >= 4)
+  {
+    uint32_t k1 = *data++;
+    k1 *= m; k1 ^= k1 >> r; k1 *= m;
+    h1 *= m; h1 ^= k1;
+    len -= 4;
+  }
+
+  switch(len)
+  {
+  case 3: h2 ^= ((unsigned char*)data)[2] << 16;
+  case 2: h2 ^= ((unsigned char*)data)[1] << 8;
+  case 1: h2 ^= ((unsigned char*)data)[0];
+      h2 *= m;
+  };
+
+  h1 ^= h2 >> 18; h1 *= m;
+  h2 ^= h1 >> 22; h2 *= m;
+  h1 ^= h2 >> 17; h1 *= m;
+  h2 ^= h1 >> 19; h2 *= m;
+
+  uint64_t h = h1;
+
+  h = (h << 32) | h2;
+
+  return h;
+}
+
+#endif /* _CK_HT_HASH_H */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/src/ck_internal.h
----------------------------------------------------------------------
diff --git a/lib/ck/src/ck_internal.h b/lib/ck/src/ck_internal.h
new file mode 100644
index 0000000..ea51a6b
--- /dev/null
+++ b/lib/ck/src/ck_internal.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2011-2014 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Several of these are from: 
http://graphics.stanford.edu/~seander/bithacks.html
+ */
+
+#define CK_INTERNAL_LOG_0 (0xAAAAAAAA)
+#define CK_INTERNAL_LOG_1 (0xCCCCCCCC)
+#define CK_INTERNAL_LOG_2 (0xF0F0F0F0)
+#define CK_INTERNAL_LOG_3 (0xFF00FF00)
+#define CK_INTERNAL_LOG_4 (0xFFFF0000)
+
+CK_CC_INLINE static uint32_t
+ck_internal_log(uint32_t v)
+{
+        uint32_t r = (v & CK_INTERNAL_LOG_0) != 0;
+
+       r |= ((v & CK_INTERNAL_LOG_4) != 0) << 4;
+       r |= ((v & CK_INTERNAL_LOG_3) != 0) << 3;
+       r |= ((v & CK_INTERNAL_LOG_2) != 0) << 2;
+       r |= ((v & CK_INTERNAL_LOG_1) != 0) << 1;
+        return (r);
+}
+
+CK_CC_INLINE static uint32_t
+ck_internal_power_2(uint32_t v)
+{
+
+        --v;
+        v |= v >> 1;
+        v |= v >> 2;
+        v |= v >> 4;
+        v |= v >> 8;
+        v |= v >> 16;
+        return (++v);
+}
+
+CK_CC_INLINE static unsigned long
+ck_internal_max(unsigned long x, unsigned long y)
+{
+
+       return x ^ ((x ^ y) & -(x < y));
+}
+
+CK_CC_INLINE static uint64_t
+ck_internal_max_64(uint64_t x, uint64_t y)
+{
+
+       return x ^ ((x ^ y) & -(x < y));
+}
+
+CK_CC_INLINE static uint32_t
+ck_internal_max_32(uint32_t x, uint32_t y)
+{
+
+       return x ^ ((x ^ y) & -(x < y));
+}
+
+CK_CC_INLINE static unsigned long
+ck_internal_bsf(unsigned long v)
+{
+#if defined(__GNUC__)
+       return __builtin_ffs(v);
+#else
+       unsigned int i;
+       const unsigned int s = sizeof(unsigned long) * 8 - 1;
+
+       for (i = 0; i < s; i++) {
+               if (v & (1UL << (s - i)))
+                       return sizeof(unsigned long) * 8 - i;
+       }
+
+       return 1;
+#endif /* !__GNUC__ */
+}
+
+CK_CC_INLINE static uint64_t
+ck_internal_bsf_64(uint64_t v)
+{
+#if defined(__GNUC__)
+       return __builtin_ffs(v);
+#else
+       unsigned int i;
+       const unsigned int s = sizeof(unsigned long) * 8 - 1;
+
+       for (i = 0; i < s; i++) {
+               if (v & (1ULL << (63U - i)))
+                       return i;
+       }
+#endif /* !__GNUC__ */
+
+       return 1;
+}
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/src/ck_rhs.c
----------------------------------------------------------------------
diff --git a/lib/ck/src/ck_rhs.c b/lib/ck/src/ck_rhs.c
new file mode 100644
index 0000000..553d718
--- /dev/null
+++ b/lib/ck/src/ck_rhs.c
@@ -0,0 +1,1335 @@
+/*
+ * Copyright 2014 Olivier Houchard
+ * Copyright 2012-2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_cc.h>
+#include <ck_rhs.h>
+#include <ck_limits.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "ck_internal.h"
+
+#ifndef CK_RHS_PROBE_L1_SHIFT
+#define CK_RHS_PROBE_L1_SHIFT 3ULL
+#endif /* CK_RHS_PROBE_L1_SHIFT */
+
+#define CK_RHS_PROBE_L1 (1 << CK_RHS_PROBE_L1_SHIFT)
+#define CK_RHS_PROBE_L1_MASK (CK_RHS_PROBE_L1 - 1)
+
+#ifndef CK_RHS_PROBE_L1_DEFAULT
+#define CK_RHS_PROBE_L1_DEFAULT CK_MD_CACHELINE
+#endif
+
+#define CK_RHS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1))
+#define CK_RHS_VMA(x)  \
+       ((void *)((uintptr_t)(x) & CK_RHS_VMA_MASK))
+
+#define CK_RHS_EMPTY     NULL
+#define CK_RHS_G               (1024)
+#define CK_RHS_G_MASK  (CK_RHS_G - 1)
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
+#define CK_RHS_WORD          uint8_t
+#define CK_RHS_WORD_MAX            UINT8_MAX
+#define CK_RHS_STORE(x, y)   ck_pr_store_8(x, y)
+#define CK_RHS_LOAD(x)       ck_pr_load_8(x)
+#elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
+#define CK_RHS_WORD          uint16_t
+#define CK_RHS_WORD_MAX            UINT16_MAX
+#define CK_RHS_STORE(x, y)   ck_pr_store_16(x, y)
+#define CK_RHS_LOAD(x)       ck_pr_load_16(x)
+#elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
+#define CK_RHS_WORD          uint32_t
+#define CK_RHS_WORD_MAX            UINT32_MAX
+#define CK_RHS_STORE(x, y)   ck_pr_store_32(x, y)
+#define CK_RHS_LOAD(x)       ck_pr_load_32(x)
+#else
+#error "ck_rhs is not supported on your platform."
+#endif
+
+#define CK_RHS_MAX_WANTED      0xffff
+
+enum ck_rhs_probe_behavior {
+       CK_RHS_PROBE = 0,       /* Default behavior. */
+       CK_RHS_PROBE_RH,                /* Short-circuit if RH slot found. */
+       CK_RHS_PROBE_INSERT,    /* Short-circuit on probe bound if tombstone 
found. */
+
+       CK_RHS_PROBE_ROBIN_HOOD,        /* Look for the first slot available 
for the entry we are about to replace, only used to internally implement Robin 
Hood */
+       CK_RHS_PROBE_NO_RH,     /* Don't do the RH dance */
+};
+struct ck_rhs_entry_desc {
+       unsigned int probes;
+       unsigned short wanted;
+       CK_RHS_WORD probe_bound;
+       bool in_rh;
+       void *entry;
+} CK_CC_ALIGN(16);
+
+struct ck_rhs_no_entry_desc {
+       unsigned int probes;
+       unsigned short wanted;
+       CK_RHS_WORD probe_bound;
+       bool in_rh;
+} CK_CC_ALIGN(8);
+
+typedef long ck_rhs_probe_cb_t(struct ck_rhs *hs,
+    struct ck_rhs_map *map,
+    unsigned long *n_probes,
+    long *priority,
+    unsigned long h,
+    const void *key,
+    void **object,
+    unsigned long probe_limit,
+    enum ck_rhs_probe_behavior behavior);
+
+struct ck_rhs_map {
+       unsigned int generation[CK_RHS_G];
+       unsigned int probe_maximum;
+       unsigned long mask;
+       unsigned long step;
+       unsigned int probe_limit;
+       unsigned long n_entries;
+       unsigned long capacity;
+       unsigned long size;
+       char offset_mask;
+       union {
+               struct ck_rhs_entry_desc *descs;
+               struct ck_rhs_no_entry {
+                       void **entries;
+                       struct ck_rhs_no_entry_desc *descs;
+               } no_entries;
+       } entries;
+       bool read_mostly;
+       ck_rhs_probe_cb_t *probe_func;
+};
+
+static CK_CC_INLINE void *
+ck_rhs_entry(struct ck_rhs_map *map, long offset)
+{
+
+       if (map->read_mostly)
+               return (map->entries.no_entries.entries[offset]);
+       else
+               return (map->entries.descs[offset].entry);
+}
+
+static CK_CC_INLINE void **
+ck_rhs_entry_addr(struct ck_rhs_map *map, long offset)
+{
+
+       if (map->read_mostly)
+               return (&map->entries.no_entries.entries[offset]);
+       else
+               return (&map->entries.descs[offset].entry);
+}
+
+static CK_CC_INLINE struct ck_rhs_entry_desc *
+ck_rhs_desc(struct ck_rhs_map *map, long offset)
+{
+
+       if (CK_CC_UNLIKELY(map->read_mostly))
+               return ((struct ck_rhs_entry_desc *)(void 
*)&map->entries.no_entries.descs[offset]);
+       else
+               return (&map->entries.descs[offset]);
+}
+
+static CK_CC_INLINE void
+ck_rhs_wanted_inc(struct ck_rhs_map *map, long offset)
+{
+
+       if (CK_CC_UNLIKELY(map->read_mostly))
+               map->entries.no_entries.descs[offset].wanted++;
+       else
+               map->entries.descs[offset].wanted++;
+}
+
+static CK_CC_INLINE unsigned int
+ck_rhs_probes(struct ck_rhs_map *map, long offset)
+{
+
+       if (CK_CC_UNLIKELY(map->read_mostly))
+               return (map->entries.no_entries.descs[offset].probes);
+       else
+               return (map->entries.descs[offset].probes);
+}
+
+static CK_CC_INLINE void
+ck_rhs_set_probes(struct ck_rhs_map *map, long offset, unsigned int value)
+{
+
+       if (CK_CC_UNLIKELY(map->read_mostly))
+               map->entries.no_entries.descs[offset].probes = value;
+       else
+               map->entries.descs[offset].probes = value;
+}
+
+static CK_CC_INLINE CK_RHS_WORD
+ck_rhs_probe_bound(struct ck_rhs_map *map, long offset)
+{
+
+       if (CK_CC_UNLIKELY(map->read_mostly))
+               return (map->entries.no_entries.descs[offset].probe_bound);
+       else
+               return (map->entries.descs[offset].probe_bound);
+}
+
+static CK_CC_INLINE CK_RHS_WORD *
+ck_rhs_probe_bound_addr(struct ck_rhs_map *map, long offset)
+{
+
+       if (CK_CC_UNLIKELY(map->read_mostly))
+               return (&map->entries.no_entries.descs[offset].probe_bound);
+       else
+               return (&map->entries.descs[offset].probe_bound);
+}
+
+
+static CK_CC_INLINE bool
+ck_rhs_in_rh(struct ck_rhs_map *map, long offset)
+{
+
+       if (CK_CC_UNLIKELY(map->read_mostly))
+               return (map->entries.no_entries.descs[offset].in_rh);
+       else
+               return (map->entries.descs[offset].in_rh);
+}
+
+static CK_CC_INLINE void
+ck_rhs_set_rh(struct ck_rhs_map *map, long offset)
+{
+
+       if (CK_CC_UNLIKELY(map->read_mostly))
+               map->entries.no_entries.descs[offset].in_rh = true;
+       else
+               map->entries.descs[offset].in_rh = true;
+}
+
+static CK_CC_INLINE void
+ck_rhs_unset_rh(struct ck_rhs_map *map, long offset)
+{
+
+       if (CK_CC_UNLIKELY(map->read_mostly))
+               map->entries.no_entries.descs[offset].in_rh = false;
+       else
+               map->entries.descs[offset].in_rh = false;
+}
+
+
+#define CK_RHS_LOAD_FACTOR     50
+
+static ck_rhs_probe_cb_t ck_rhs_map_probe;
+static ck_rhs_probe_cb_t ck_rhs_map_probe_rm;
+
+void
+ck_rhs_iterator_init(struct ck_rhs_iterator *iterator)
+{
+
+       iterator->cursor = NULL;
+       iterator->offset = 0;
+       return;
+}
+
+bool
+ck_rhs_next(struct ck_rhs *hs, struct ck_rhs_iterator *i, void **key)
+{
+       struct ck_rhs_map *map = hs->map;
+       void *value;
+
+       if (i->offset >= map->capacity)
+               return false;
+
+       do {
+               value = ck_rhs_entry(map, i->offset);
+               if (value != CK_RHS_EMPTY) {
+#ifdef CK_RHS_PP
+                       if (hs->mode & CK_RHS_MODE_OBJECT)
+                               value = CK_RHS_VMA(value);
+#endif
+                       i->offset++;
+                       *key = value;
+                       return true;
+               }
+       } while (++i->offset < map->capacity);
+
+       return false;
+}
+
+void
+ck_rhs_stat(struct ck_rhs *hs, struct ck_rhs_stat *st)
+{
+       struct ck_rhs_map *map = hs->map;
+
+       st->n_entries = map->n_entries;
+       st->probe_maximum = map->probe_maximum;
+       return;
+}
+
+unsigned long
+ck_rhs_count(struct ck_rhs *hs)
+{
+
+       return hs->map->n_entries;
+}
+
+static void
+ck_rhs_map_destroy(struct ck_malloc *m, struct ck_rhs_map *map, bool defer)
+{
+
+       m->free(map, map->size, defer);
+       return;
+}
+
+void
+ck_rhs_destroy(struct ck_rhs *hs)
+{
+
+       ck_rhs_map_destroy(hs->m, hs->map, false);
+       return;
+}
+
+static struct ck_rhs_map *
+ck_rhs_map_create(struct ck_rhs *hs, unsigned long entries)
+{
+       struct ck_rhs_map *map;
+       unsigned long size, n_entries, limit;
+
+       n_entries = ck_internal_power_2(entries);
+       if (n_entries < CK_RHS_PROBE_L1)
+               return NULL;
+
+       if (hs->mode & CK_RHS_MODE_READ_MOSTLY)
+               size = sizeof(struct ck_rhs_map) +
+                   (sizeof(void *) * n_entries +
+                    sizeof(struct ck_rhs_no_entry_desc) * n_entries + 
+                    2 * CK_MD_CACHELINE - 1);
+       else
+               size = sizeof(struct ck_rhs_map) +
+                   (sizeof(struct ck_rhs_entry_desc) * n_entries + 
+                    CK_MD_CACHELINE - 1);
+       map = hs->m->malloc(size);
+       if (map == NULL)
+               return NULL;
+       map->read_mostly = !!(hs->mode & CK_RHS_MODE_READ_MOSTLY);
+
+       map->size = size;
+       /* We should probably use a more intelligent heuristic for default 
probe length. */
+       limit = ck_internal_max(n_entries >> (CK_RHS_PROBE_L1_SHIFT + 2), 
CK_RHS_PROBE_L1_DEFAULT);
+       if (limit > UINT_MAX)
+               limit = UINT_MAX;
+
+       map->probe_limit = (unsigned int)limit;
+       map->probe_maximum = 0;
+       map->capacity = n_entries;
+       map->step = ck_internal_bsf(n_entries);
+       map->mask = n_entries - 1;
+       map->n_entries = 0;
+
+       /* Align map allocation to cache line. */
+       if (map->read_mostly) {
+               map->entries.no_entries.entries = (void *)(((uintptr_t)&map[1] +
+                   CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
+               map->entries.no_entries.descs = (void 
*)(((uintptr_t)map->entries.no_entries.entries + (sizeof(void *) * n_entries) + 
CK_MD_CACHELINE - 1) &~ (CK_MD_CACHELINE - 1));
+               memset(map->entries.no_entries.entries, 0,
+                   sizeof(void *) * n_entries);
+               memset(map->entries.no_entries.descs, 0,
+                   sizeof(struct ck_rhs_no_entry_desc));
+               map->offset_mask = (CK_MD_CACHELINE / sizeof(void *)) - 1;
+               map->probe_func = ck_rhs_map_probe_rm;
+
+       } else {
+               map->entries.descs = (void *)(((uintptr_t)&map[1] +
+                   CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
+               memset(map->entries.descs, 0, sizeof(struct ck_rhs_entry_desc) 
* n_entries);
+               map->offset_mask = (CK_MD_CACHELINE / sizeof(struct 
ck_rhs_entry_desc)) - 1;
+               map->probe_func = ck_rhs_map_probe;
+       }
+       memset(map->generation, 0, sizeof map->generation);
+
+       /* Commit entries purge with respect to map publication. */
+       ck_pr_fence_store();
+       return map;
+}
+
+bool
+ck_rhs_reset_size(struct ck_rhs *hs, unsigned long capacity)
+{
+       struct ck_rhs_map *map, *previous;
+
+       previous = hs->map;
+       map = ck_rhs_map_create(hs, capacity);
+       if (map == NULL)
+               return false;
+
+       ck_pr_store_ptr(&hs->map, map);
+       ck_rhs_map_destroy(hs->m, previous, true);
+       return true;
+}
+
+bool
+ck_rhs_reset(struct ck_rhs *hs)
+{
+       struct ck_rhs_map *previous;
+
+       previous = hs->map;
+       return ck_rhs_reset_size(hs, previous->capacity);
+}
+
+static inline unsigned long
+ck_rhs_map_probe_next(struct ck_rhs_map *map,
+    unsigned long offset,
+    unsigned long probes)
+{
+
+       if (probes & map->offset_mask) {
+               offset = (offset &~ map->offset_mask) + 
+                   ((offset + 1) & map->offset_mask);
+               return offset;
+       } else
+               return (offset + probes) & map->mask;
+}
+
+static inline unsigned long
+ck_rhs_map_probe_prev(struct ck_rhs_map *map, unsigned long offset,
+    unsigned long probes)
+{
+
+       if (probes & map->offset_mask) {
+               offset = (offset &~ map->offset_mask) + ((offset - 1) & 
+                   map->offset_mask);
+               return offset;
+       } else 
+               return ((offset - probes) & map->mask);
+}
+
+
+static inline void
+ck_rhs_map_bound_set(struct ck_rhs_map *m,
+    unsigned long h,
+    unsigned long n_probes)
+{
+       unsigned long offset = h & m->mask;
+       struct ck_rhs_entry_desc *desc;
+
+       if (n_probes > m->probe_maximum)
+               ck_pr_store_uint(&m->probe_maximum, n_probes);
+       if (!(m->read_mostly)) {
+               desc = &m->entries.descs[offset];
+
+               if (desc->probe_bound < n_probes) {
+                       if (n_probes > CK_RHS_WORD_MAX)
+                               n_probes = CK_RHS_WORD_MAX;
+
+                       CK_RHS_STORE(&desc->probe_bound, n_probes);
+                       ck_pr_fence_store();
+               }
+       }
+
+       return;
+}
+
+static inline unsigned int
+ck_rhs_map_bound_get(struct ck_rhs_map *m, unsigned long h)
+{
+       unsigned long offset = h & m->mask;
+       unsigned int r = CK_RHS_WORD_MAX;
+
+       if (m->read_mostly)
+               r = ck_pr_load_uint(&m->probe_maximum);
+       else {
+               r = CK_RHS_LOAD(&m->entries.descs[offset].probe_bound);
+               if (r == CK_RHS_WORD_MAX)
+                       r = ck_pr_load_uint(&m->probe_maximum);
+       }
+       return r;
+}
+
+bool
+ck_rhs_grow(struct ck_rhs *hs,
+    unsigned long capacity)
+{
+       struct ck_rhs_map *map, *update;
+       void *previous, *prev_saved;
+       unsigned long k, offset, probes;
+
+restart:
+       map = hs->map;
+       if (map->capacity > capacity)
+               return false;
+
+       update = ck_rhs_map_create(hs, capacity);
+       if (update == NULL)
+               return false;
+
+       for (k = 0; k < map->capacity; k++) {
+               unsigned long h;
+
+               prev_saved = previous = ck_rhs_entry(map, k);
+               if (previous == CK_RHS_EMPTY)
+                       continue;
+
+#ifdef CK_RHS_PP
+               if (hs->mode & CK_RHS_MODE_OBJECT)
+                       previous = CK_RHS_VMA(previous);
+#endif
+
+               h = hs->hf(previous, hs->seed);
+               offset = h & update->mask;
+               probes = 0;
+
+               for (;;) {
+                       void **cursor = ck_rhs_entry_addr(update, offset);
+
+                       if (probes++ == update->probe_limit) {
+                               /*
+                                * We have hit the probe limit, map needs to be 
even larger.
+                                */
+                               ck_rhs_map_destroy(hs->m, update, false);
+                               capacity <<= 1;
+                               goto restart;
+                       }
+
+                       if (CK_CC_LIKELY(*cursor == CK_RHS_EMPTY)) {
+                               *cursor = prev_saved;
+                               update->n_entries++;
+                               ck_rhs_set_probes(update, offset, probes);
+                               ck_rhs_map_bound_set(update, h, probes);
+                               break;
+                       } else if (ck_rhs_probes(update, offset) < probes) {
+                               void *tmp = prev_saved;
+                               unsigned int old_probes;
+                               prev_saved = previous = *cursor;
+#ifdef CK_RHS_PP
+                               if (hs->mode & CK_RHS_MODE_OBJECT)
+                                       previous = CK_RHS_VMA(previous);
+#endif
+                               *cursor = tmp;
+                               ck_rhs_map_bound_set(update, h, probes);
+                               h = hs->hf(previous, hs->seed);
+                               old_probes = ck_rhs_probes(update, offset);
+                               ck_rhs_set_probes(update, offset, probes);
+                               probes = old_probes - 1;
+                               continue;
+                       }
+                       ck_rhs_wanted_inc(update, offset);
+                       offset = ck_rhs_map_probe_next(update, offset,  probes);
+               }
+
+       }
+
+       ck_pr_fence_store();
+       ck_pr_store_ptr(&hs->map, update);
+       ck_rhs_map_destroy(hs->m, map, true);
+       return true;
+}
+
+bool
+ck_rhs_rebuild(struct ck_rhs *hs)
+{
+
+       return ck_rhs_grow(hs, hs->map->capacity);
+}
+
+static long
+ck_rhs_map_probe_rm(struct ck_rhs *hs,
+    struct ck_rhs_map *map,
+    unsigned long *n_probes,
+    long *priority,
+    unsigned long h,
+    const void *key,
+    void **object,
+    unsigned long probe_limit,
+    enum ck_rhs_probe_behavior behavior)
+{
+       void *k;
+       const void *compare;
+       long pr = -1;
+       unsigned long offset, probes, opl;
+
+#ifdef CK_RHS_PP
+       /* If we are storing object pointers, then we may leverage pointer 
packing. */
+       unsigned long hv = 0;
+
+       if (hs->mode & CK_RHS_MODE_OBJECT) {
+               hv = (h >> 25) & CK_RHS_KEY_MASK;
+               compare = CK_RHS_VMA(key);
+       } else {
+               compare = key;
+       }
+#else
+       compare = key;
+#endif
+       *object = NULL;
+       if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
+               probes = 0;
+               offset = h & map->mask;
+       } else {
+               /* Restart from the bucket we were previously in */
+               probes = *n_probes;
+               offset = ck_rhs_map_probe_next(map, *priority,
+                   probes);
+       }
+       opl = probe_limit;
+
+       for (;;) {
+               if (probes++ == probe_limit) {
+                       if (probe_limit == opl || pr != -1) {
+                               k = CK_RHS_EMPTY;
+                               goto leave;
+                       }
+                       /*
+                        * If no eligible slot has been found yet, continue 
probe
+                        * sequence with original probe limit.
+                        */
+                       probe_limit = opl;
+               }
+               k = ck_pr_load_ptr(&map->entries.no_entries.entries[offset]);
+               if (k == CK_RHS_EMPTY)
+                       goto leave;
+
+               if (behavior != CK_RHS_PROBE_NO_RH) {
+                       struct ck_rhs_entry_desc *desc = (void 
*)&map->entries.no_entries.descs[offset];
+
+                       if (pr == -1 && 
+                           desc->in_rh == false && desc->probes < probes) {
+                               pr = offset;
+                               *n_probes = probes;
+
+                               if (behavior == CK_RHS_PROBE_RH ||
+                                   behavior == CK_RHS_PROBE_ROBIN_HOOD) {
+                                       k = CK_RHS_EMPTY;
+                                       goto leave;
+                               }
+                       }
+               }
+
+               if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
+#ifdef CK_RHS_PP
+                       if (hs->mode & CK_RHS_MODE_OBJECT) {
+                               if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv) {
+                                       offset = ck_rhs_map_probe_next(map, 
offset, probes);
+                                       continue;
+                               }
+
+                               k = CK_RHS_VMA(k);
+                       }
+#endif
+
+                       if (k == compare)
+                               goto leave;
+
+                       if (hs->compare == NULL) {
+                               offset = ck_rhs_map_probe_next(map, offset, 
probes);
+                               continue;
+                       }
+
+                       if (hs->compare(k, key) == true)
+                               goto leave;
+               }
+               offset = ck_rhs_map_probe_next(map, offset, probes);
+       }
+leave:
+       if (probes > probe_limit) {
+               offset = -1;
+       } else {
+               *object = k;
+       }
+
+       if (pr == -1)
+               *n_probes = probes;
+
+       *priority = pr;
+       return offset;
+}
+
+static long
+ck_rhs_map_probe(struct ck_rhs *hs,
+    struct ck_rhs_map *map,
+    unsigned long *n_probes,
+    long *priority,
+    unsigned long h,
+    const void *key,
+    void **object,
+    unsigned long probe_limit,
+    enum ck_rhs_probe_behavior behavior)
+{
+       void *k;
+       const void *compare;
+       long pr = -1;
+       unsigned long offset, probes, opl;
+
+#ifdef CK_RHS_PP
+       /* If we are storing object pointers, then we may leverage pointer 
packing. */
+       unsigned long hv = 0;
+
+       if (hs->mode & CK_RHS_MODE_OBJECT) {
+               hv = (h >> 25) & CK_RHS_KEY_MASK;
+               compare = CK_RHS_VMA(key);
+       } else {
+               compare = key;
+       }
+#else
+       compare = key;
+#endif
+
+       *object = NULL;
+       if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
+               probes = 0;
+               offset = h & map->mask;
+       } else {
+               /* Restart from the bucket we were previously in */
+               probes = *n_probes;
+               offset = ck_rhs_map_probe_next(map, *priority,
+                   probes);
+       }
+       opl = probe_limit;
+       if (behavior == CK_RHS_PROBE_INSERT)
+               probe_limit = ck_rhs_map_bound_get(map, h);
+
+       for (;;) {
+               if (probes++ == probe_limit) {
+                       if (probe_limit == opl || pr != -1) {
+                               k = CK_RHS_EMPTY;
+                               goto leave;
+                       }
+                       /*
+                        * If no eligible slot has been found yet, continue 
probe
+                        * sequence with original probe limit.
+                        */
+                       probe_limit = opl;
+               }
+               k = ck_pr_load_ptr(&map->entries.descs[offset].entry);
+               if (k == CK_RHS_EMPTY)
+                       goto leave;
+               if ((behavior != CK_RHS_PROBE_NO_RH)) {
+                       struct ck_rhs_entry_desc *desc = 
&map->entries.descs[offset];
+
+                       if (pr == -1 && 
+                           desc->in_rh == false && desc->probes < probes) {
+                               pr = offset;
+                               *n_probes = probes;
+
+                               if (behavior == CK_RHS_PROBE_RH ||
+                                   behavior == CK_RHS_PROBE_ROBIN_HOOD) {
+                                       k = CK_RHS_EMPTY;
+                                       goto leave;
+                               }
+                       }
+               }
+
+               if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
+#ifdef CK_RHS_PP
+                       if (hs->mode & CK_RHS_MODE_OBJECT) {
+                               if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv) {
+                                       offset = ck_rhs_map_probe_next(map, 
offset, probes);
+                                       continue;
+                               }
+
+                               k = CK_RHS_VMA(k);
+                       }
+#endif
+
+                       if (k == compare)
+                               goto leave;
+
+                       if (hs->compare == NULL) {
+                               offset = ck_rhs_map_probe_next(map, offset, 
probes);
+                               continue;
+                       }
+
+                       if (hs->compare(k, key) == true)
+                               goto leave;
+               }
+               offset = ck_rhs_map_probe_next(map, offset, probes);
+       }
+leave:
+       if (probes > probe_limit) {
+               offset = -1;
+       } else {
+               *object = k;
+       }
+
+       if (pr == -1)
+               *n_probes = probes;
+
+       *priority = pr;
+       return offset;
+}
+
+static inline void *
+ck_rhs_marshal(unsigned int mode, const void *key, unsigned long h)
+{
+       void *insert;
+
+#ifdef CK_RHS_PP
+       if (mode & CK_RHS_MODE_OBJECT) {
+               insert = (void *)((uintptr_t)CK_RHS_VMA(key) | ((h >> 25) << 
CK_MD_VMA_BITS));
+       } else {
+               insert = (void *)key;
+       }
+#else
+       (void)mode;
+       (void)h;
+       insert = (void *)key;
+#endif
+
+       return insert;
+}
+
+bool
+ck_rhs_gc(struct ck_rhs *hs)
+{
+       unsigned long i;
+       struct ck_rhs_map *map = hs->map;
+
+       unsigned int max_probes = 0;
+       for (i = 0; i < map->capacity; i++) {
+               if (ck_rhs_probes(map, i) > max_probes)
+                       max_probes = ck_rhs_probes(map, i);
+       }
+       map->probe_maximum = max_probes;
+       return true;
+}
+
+static void
+ck_rhs_add_wanted(struct ck_rhs *hs, long end_offset, long old_slot, 
+       unsigned long h)
+{
+       struct ck_rhs_map *map = hs->map;
+       long offset;
+       unsigned int probes = 1;
+       bool found_slot = false;
+       struct ck_rhs_entry_desc *desc;
+
+       offset = h & map->mask;
+
+       if (old_slot == -1)
+               found_slot = true;
+       while (offset != end_offset) {
+               if (offset == old_slot)
+                       found_slot = true;
+               if (found_slot) {
+                       desc = ck_rhs_desc(map, offset);
+                       if (desc->wanted < CK_RHS_MAX_WANTED)
+                               desc->wanted++;
+               }
+               offset = ck_rhs_map_probe_next(map, offset, probes);
+               probes++;
+       }
+}
+
+static unsigned long
+ck_rhs_remove_wanted(struct ck_rhs *hs, long offset, long limit)
+{
+       struct ck_rhs_map *map = hs->map;
+       int probes = ck_rhs_probes(map, offset);
+       bool do_remove = true;
+       struct ck_rhs_entry_desc *desc;
+
+       while (probes > 1) {
+               probes--;
+               offset = ck_rhs_map_probe_prev(map, offset, probes);
+               if (offset == limit)
+                       do_remove = false;
+               if (do_remove) {
+                       desc = ck_rhs_desc(map, offset);
+                       if (desc->wanted != CK_RHS_MAX_WANTED)
+                               desc->wanted--;
+               }
+       }
+       return offset;
+}
+
+static long
+ck_rhs_get_first_offset(struct ck_rhs_map *map, unsigned long offset, unsigned 
int probes)
+{
+       while (probes > (unsigned long)map->offset_mask + 1) {
+               offset -= ((probes - 1) &~ map->offset_mask);
+               offset &= map->mask;
+               offset = (offset &~ map->offset_mask) + 
+                   ((offset - map->offset_mask) & map->offset_mask);
+               probes -= map->offset_mask + 1;
+       }
+       return ((offset &~ map->offset_mask) + ((offset - (probes - 1)) & 
map->offset_mask));
+}
+
+#define CK_RHS_MAX_RH  512
+
+static int
+ck_rhs_put_robin_hood(struct ck_rhs *hs,
+    long orig_slot, struct ck_rhs_entry_desc *desc)
+{
+       long slot, first;
+       void *object, *insert;
+       unsigned long n_probes;
+       struct ck_rhs_map *map;
+       unsigned long h = 0;
+       long prev;
+       void *key;
+       long prevs[512];
+       unsigned int prevs_nb = 0;
+
+       map = hs->map;
+       first = orig_slot;
+       n_probes = desc->probes;
+restart:
+       key = ck_rhs_entry(map, first);
+       insert = key;
+#ifdef CK_RHS_PP
+       if (hs->mode & CK_RHS_MODE_OBJECT)
+           key = CK_RHS_VMA(key);
+#endif
+       orig_slot = first;
+       ck_rhs_set_rh(map, orig_slot);
+
+       slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
+           map->probe_limit, prevs_nb == CK_RHS_MAX_RH ?
+           CK_RHS_PROBE_NO_RH : CK_RHS_PROBE_ROBIN_HOOD);
+
+       if (slot == -1 && first == -1) {
+               if (ck_rhs_grow(hs, map->capacity << 1) == false) {
+                       desc->in_rh = false;
+                       for (unsigned int i = 0; i < prevs_nb; i++)
+                               ck_rhs_unset_rh(map, prevs[i]);
+                       return -1;
+               }
+               return 1;
+       }
+
+       if (first != -1) {
+               desc = ck_rhs_desc(map, first);
+               int old_probes = desc->probes;
+
+               desc->probes = n_probes;
+               h = ck_rhs_get_first_offset(map, first, n_probes);
+               ck_rhs_map_bound_set(map, h, n_probes);
+               prev = orig_slot;
+               prevs[prevs_nb++] = prev;
+               n_probes = old_probes;
+               goto restart;
+       } else {
+               /* An empty slot was found. */
+               h =  ck_rhs_get_first_offset(map, slot, n_probes);
+               ck_rhs_map_bound_set(map, h, n_probes);
+               ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
+               ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+               ck_pr_fence_atomic_store();
+               ck_rhs_set_probes(map, slot, n_probes);
+               desc->in_rh = 0;
+               ck_rhs_add_wanted(hs, slot, orig_slot, h);
+       }
+       while (prevs_nb > 0) {
+               prev = prevs[--prevs_nb];
+               ck_pr_store_ptr(ck_rhs_entry_addr(map, orig_slot),
+                   ck_rhs_entry(map, prev));
+               h = ck_rhs_get_first_offset(map, orig_slot, 
+                   desc->probes);
+               ck_rhs_add_wanted(hs, orig_slot, prev, h);
+               ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+               ck_pr_fence_atomic_store();
+               orig_slot = prev;
+               desc->in_rh = false;
+               desc = ck_rhs_desc(map, orig_slot);
+       }
+       return 0;
+}
+
+static void
+ck_rhs_do_backward_shift_delete(struct ck_rhs *hs, long slot)
+{
+       struct ck_rhs_map *map = hs->map;
+       struct ck_rhs_entry_desc *desc, *new_desc = NULL;
+       unsigned long h;
+       
+       desc = ck_rhs_desc(map, slot);
+       h = ck_rhs_remove_wanted(hs, slot, -1);
+       while (desc->wanted > 0) {
+               unsigned long offset = 0, tmp_offset;
+               unsigned long wanted_probes = 1;
+               unsigned int probe = 0;
+               unsigned int max_probes;
+
+               /* Find a successor */
+               while (wanted_probes < map->probe_maximum) {
+                       probe = wanted_probes;
+                       offset = ck_rhs_map_probe_next(map, slot, probe);
+                       while (probe < map->probe_maximum) {
+                               new_desc = ck_rhs_desc(map, offset);
+                               if (new_desc->probes == probe + 1)
+                                       break;
+                               probe++;
+                               offset = ck_rhs_map_probe_next(map, offset,
+                                   probe);
+                       }
+                       if (probe < map->probe_maximum)
+                               break;
+                       wanted_probes++;
+               }
+               if (wanted_probes == map->probe_maximum) {
+                       desc->wanted = 0;
+                       break;
+               }
+               desc->probes = wanted_probes;
+               h = ck_rhs_remove_wanted(hs, offset, slot);
+               ck_pr_store_ptr(ck_rhs_entry_addr(map, slot),
+                   ck_rhs_entry(map, offset));
+               ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+               ck_pr_fence_atomic_store();
+               if (wanted_probes < CK_RHS_WORD_MAX) {
+                       struct ck_rhs_entry_desc *hdesc = ck_rhs_desc(map, h);
+                       if (hdesc->wanted == 1)
+                               CK_RHS_STORE(&hdesc->probe_bound,
+                                   wanted_probes);
+                       else if (hdesc->probe_bound == CK_RHS_WORD_MAX ||
+                           hdesc->probe_bound == new_desc->probes) {
+                               probe++;
+                               if (hdesc->probe_bound == CK_RHS_WORD_MAX)
+                                       max_probes = map->probe_maximum;
+                               else {
+                                       max_probes = hdesc->probe_bound;
+                                       max_probes--;
+                               }
+                               tmp_offset = ck_rhs_map_probe_next(map, offset,
+                                   probe);
+                               while (probe < max_probes) {
+                                       if (h == (unsigned 
long)ck_rhs_get_first_offset(map, tmp_offset, probe))
+                                               break;
+                                       probe++;
+                                       tmp_offset = ck_rhs_map_probe_next(map, 
tmp_offset, probe);
+                               }
+                               if (probe == max_probes)
+                                       CK_RHS_STORE(&hdesc->probe_bound,
+                                           wanted_probes);
+                       }
+               }
+               if (desc->wanted < CK_RHS_MAX_WANTED)
+                       desc->wanted--;
+               slot = offset;
+               desc = new_desc;
+       }
+       ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), CK_RHS_EMPTY);
+       if ((desc->probes - 1) < CK_RHS_WORD_MAX)
+               CK_RHS_STORE(ck_rhs_probe_bound_addr(map, h),
+                   desc->probes - 1);
+       desc->probes = 0;
+}
+
+bool
+ck_rhs_fas(struct ck_rhs *hs,
+    unsigned long h,
+    const void *key,
+    void **previous)
+{
+       long slot, first;
+       void *object, *insert;
+       unsigned long n_probes;
+       struct ck_rhs_map *map = hs->map;
+       struct ck_rhs_entry_desc *desc, *desc2;
+
+       *previous = NULL;
+restart:
+       slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
+           ck_rhs_map_bound_get(map, h), CK_RHS_PROBE);
+
+       /* Replacement semantics presume existence. */
+       if (object == NULL)
+               return false;
+
+       insert = ck_rhs_marshal(hs->mode, key, h);
+
+       if (first != -1) {
+               int ret;
+
+               desc = ck_rhs_desc(map, slot);
+               desc2 = ck_rhs_desc(map, first);
+               desc->in_rh = true;
+               ret = ck_rhs_put_robin_hood(hs, first, desc2);
+               desc->in_rh = false;
+               if (CK_CC_UNLIKELY(ret == 1))
+                       goto restart;
+               else if (CK_CC_UNLIKELY(ret != 0))
+                       return false;
+               ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
+               ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+               ck_pr_fence_atomic_store();
+               desc2->probes = n_probes;
+               ck_rhs_add_wanted(hs, first, -1, h);
+               ck_rhs_do_backward_shift_delete(hs, slot);
+       } else {
+               ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
+               ck_rhs_set_probes(map, slot, n_probes);
+       }
+       *previous = object;
+       return true;
+}
+
+bool
+ck_rhs_set(struct ck_rhs *hs,
+    unsigned long h,
+    const void *key,
+    void **previous)
+{
+       long slot, first;
+       void *object, *insert;
+       unsigned long n_probes;
+       struct ck_rhs_map *map;
+
+       *previous = NULL;
+
+restart:
+       map = hs->map;
+
+       slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, 
map->probe_limit, CK_RHS_PROBE_INSERT);
+       if (slot == -1 && first == -1) {
+               if (ck_rhs_grow(hs, map->capacity << 1) == false)
+                       return false;
+
+               goto restart;
+       }
+       ck_rhs_map_bound_set(map, h, n_probes);
+       insert = ck_rhs_marshal(hs->mode, key, h);
+
+       if (first != -1) {
+               struct ck_rhs_entry_desc *desc = NULL, *desc2;
+               if (slot != -1) {
+                       desc = ck_rhs_desc(map, slot);
+                       desc->in_rh = true;
+               }
+               desc2 = ck_rhs_desc(map, first);
+               int ret = ck_rhs_put_robin_hood(hs, first, desc2);
+               if (slot != -1)
+                       desc->in_rh = false;
+
+               if (CK_CC_UNLIKELY(ret == 1))
+                       goto restart;
+               if (CK_CC_UNLIKELY(ret == -1))
+                       return false;
+               /* If an earlier bucket was found, then store entry there. */
+               ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
+               desc2->probes = n_probes;
+               /*
+                * If a duplicate key was found, then delete it after
+                * signaling concurrent probes to restart. Optionally,
+                * it is possible to install tombstone after grace
+                * period if we can guarantee earlier position of
+                * duplicate key.
+                */
+               ck_rhs_add_wanted(hs, first, -1, h); 
+               if (object != NULL) {
+                       ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+                       ck_pr_fence_atomic_store();
+                       ck_rhs_do_backward_shift_delete(hs, slot);
+               }
+
+       } else {
+               /*
+                * If we are storing into same slot, then atomic store is 
sufficient
+                * for replacement.
+                */
+               ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
+               ck_rhs_set_probes(map, slot, n_probes);
+               if (object == NULL)
+                       ck_rhs_add_wanted(hs, slot, -1, h); 
+       }
+
+       if (object == NULL) {
+               map->n_entries++;
+               if ((map->n_entries ) > ((map->capacity * CK_RHS_LOAD_FACTOR) / 
100))
+                       ck_rhs_grow(hs, map->capacity << 1);
+       }
+
+       *previous = object;
+       return true;
+}
+
+static bool
+ck_rhs_put_internal(struct ck_rhs *hs,
+    unsigned long h,
+    const void *key,
+    enum ck_rhs_probe_behavior behavior)
+{
+       long slot, first;
+       void *object, *insert;
+       unsigned long n_probes;
+       struct ck_rhs_map *map;
+
+restart:
+       map = hs->map;
+
+       slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
+           map->probe_limit, behavior);
+
+       if (slot == -1 && first == -1) {
+               if (ck_rhs_grow(hs, map->capacity << 1) == false)
+                       return false;
+
+               goto restart;
+       }
+
+       /* Fail operation if a match was found. */
+       if (object != NULL)
+               return false;
+
+       ck_rhs_map_bound_set(map, h, n_probes);
+       insert = ck_rhs_marshal(hs->mode, key, h);
+
+       if (first != -1) {
+               struct ck_rhs_entry_desc *desc = ck_rhs_desc(map, first);
+               int ret = ck_rhs_put_robin_hood(hs, first, desc);
+               if (CK_CC_UNLIKELY(ret == 1))
+                       return ck_rhs_put_internal(hs, h, key, behavior);
+               else if (CK_CC_UNLIKELY(ret == -1))
+                       return false;
+               /* Insert key into first bucket in probe sequence. */
+               ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
+               desc->probes = n_probes;
+               ck_rhs_add_wanted(hs, first, -1, h); 
+       } else {
+               /* An empty slot was found. */
+               ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
+               ck_rhs_set_probes(map, slot, n_probes);
+               ck_rhs_add_wanted(hs, slot, -1, h); 
+       }
+
+       map->n_entries++;
+       if ((map->n_entries ) > ((map->capacity * CK_RHS_LOAD_FACTOR) / 100))
+               ck_rhs_grow(hs, map->capacity << 1);
+       return true;
+}
+
+bool
+ck_rhs_put(struct ck_rhs *hs,
+    unsigned long h,
+    const void *key)
+{
+
+       return ck_rhs_put_internal(hs, h, key, CK_RHS_PROBE_INSERT);
+}
+
+bool
+ck_rhs_put_unique(struct ck_rhs *hs,
+    unsigned long h,
+    const void *key)
+{
+
+       return ck_rhs_put_internal(hs, h, key, CK_RHS_PROBE_RH);
+}
+
+void *
+ck_rhs_get(struct ck_rhs *hs,
+    unsigned long h,
+    const void *key)
+{
+       long first;
+       void *object;
+       struct ck_rhs_map *map;
+       unsigned long n_probes;
+       unsigned int g, g_p, probe;
+       unsigned int *generation;
+
+       do { 
+               map = ck_pr_load_ptr(&hs->map);
+               generation = &map->generation[h & CK_RHS_G_MASK];
+               g = ck_pr_load_uint(generation);
+               probe  = ck_rhs_map_bound_get(map, h);
+               ck_pr_fence_load();
+
+               first = -1;
+               map->probe_func(hs, map, &n_probes, &first, h, key, &object, 
probe, CK_RHS_PROBE_NO_RH);
+
+               ck_pr_fence_load();
+               g_p = ck_pr_load_uint(generation);
+       } while (g != g_p);
+
+       return object;
+}
+
+void *
+ck_rhs_remove(struct ck_rhs *hs,
+    unsigned long h,
+    const void *key)
+{
+       long slot, first;
+       void *object;
+       struct ck_rhs_map *map = hs->map;
+       unsigned long n_probes;
+
+       slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
+           ck_rhs_map_bound_get(map, h), CK_RHS_PROBE_NO_RH);
+       if (object == NULL)
+               return NULL;
+
+       map->n_entries--;
+       ck_rhs_do_backward_shift_delete(hs, slot);
+       return object;
+}
+
+bool
+ck_rhs_move(struct ck_rhs *hs,
+    struct ck_rhs *source,
+    ck_rhs_hash_cb_t *hf,
+    ck_rhs_compare_cb_t *compare,
+    struct ck_malloc *m)
+{
+
+       if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
+               return false;
+
+       hs->mode = source->mode;
+       hs->seed = source->seed;
+       hs->map = source->map;
+       hs->m = m;
+       hs->hf = hf;
+       hs->compare = compare;
+       return true;
+}
+
+bool
+ck_rhs_init(struct ck_rhs *hs,
+    unsigned int mode,
+    ck_rhs_hash_cb_t *hf,
+    ck_rhs_compare_cb_t *compare,
+    struct ck_malloc *m,
+    unsigned long n_entries,
+    unsigned long seed)
+{
+
+       if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
+               return false;
+
+       hs->m = m;
+       hs->mode = mode;
+       hs->seed = seed;
+       hs->hf = hf;
+       hs->compare = compare;
+
+       hs->map = ck_rhs_map_create(hs, n_entries);
+       return hs->map != NULL;
+}
+

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/f098175e/lib/ck/tools/feature.sh
----------------------------------------------------------------------
diff --git a/lib/ck/tools/feature.sh b/lib/ck/tools/feature.sh
new file mode 100755
index 0000000..f6c8934
--- /dev/null
+++ b/lib/ck/tools/feature.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+# This will generate the list of feature flags for implemented symbols.
+
+echo '/* DO NOT EDIT. This is auto-generated from feature.sh */'
+nm ../regressions/ck_pr/validate/ck_pr_cas|cut -d ' ' -f 3|sed 
s/ck_pr/ck_f_pr/|awk '/^ck_f_pr/ {print "#define " toupper($1);}'|sort

Reply via email to