This experimental algorithm uses only XCHG at the cost of having a consumer 
wait for the next pointer to be set in a node. However, it allows for real 
work to be done before any waits are performed. So, the "real" work should 
provide a "natural backoff" that might minimize the waiting. The algorithm 
can be improved upon quite a bit. I have some "interesting" ideas for it. 
Well, here is the code in the form of a Relacy unit test:

Notice the "hand off" comments and 0xDEADBEEF, this can be made to work 
much better...
______________
// Simple Atomic Stack
// For Academic and Experimental things... 
// Beginner, Moderate?
// In Good Ol' Relacy! Nice as always.
//_______________________________________________


//#define RL_DEBUGBREAK_ON_ASSERT
//#define RL_MSVC_OUTPUT
//#define RL_FORCE_SEQ_CST
//#define RL_GC


#include <relacy/relacy_std.hpp>
#include <iostream>


// Simple macro based redirection of the verbose  std membars.
#define CT_MB_ACQ std::memory_order_acquire
#define CT_MB_REL std::memory_order_release
#define CT_MB_RLX std::memory_order_relaxed
#define CT_MB_ACQ_REL std::memory_order_acq_rel
#define CT_MB_SEQ_CST std::memory_order_seq_cst
#define CT_MB(mp_0) std::atomic_thread_fence(mp_0)


// Some global vars directing the show...
#define THREADS 3
#define ITERS 2


// Experimental Stack
struct ct_ecstack
{
    #define WAITNEXT ((node*)0xDEADBEEF)

    struct node
    {
        std::atomic<node*> m_next;
        VAR_T(unsigned int) m_data;

        node(unsigned int data) : m_next(nullptr), m_data(data) {}

        void process()
        {
            // simulate some work?
            rl::yield(1 + rl::rand(2), $); // ;^)
        }

        node* next_wait()
        {
            node* next = nullptr;

            while ((next = m_next.load(CT_MB_RLX)) == WAITNEXT)
            {
                // Humm, we can actually do other work right here...
                // Hand off...
                rl::yield(1, $);
            }

            return next;
        }
    };


    std::atomic<node*> m_head;

    ct_ecstack() : m_head(nullptr) {}


    void push(node* n)
    {
        n->m_next.store(WAITNEXT, CT_MB_RLX);
        node* head = m_head.exchange(n, CT_MB_REL); // release
        n->m_next.store(head, CT_MB_RLX);
    }


    node* flush_try()
    {
        return m_head.exchange(nullptr, CT_MB_ACQ); // acquire
    }
};




// Relacy Stack Test...
struct ct_ecstack_test
    : rl::test_suite<ct_ecstack_test, THREADS>
{
    ct_ecstack g_ecstack;

    void process()
    {
        ct_ecstack::node* n = g_ecstack.flush_try(); // flush all

        while (n)
        {
            // Process n first, acts like a backoff for the next wait
            // Hand off some other nodes? Future note...
            n->process();

            // Wait for the next pointer, or hand off?
            ct_ecstack::node* next = n->next_wait();

            // Destroy
            delete n;

            // Loop on...
            n = next;
        }
    }

    void thread(unsigned int tidx)
    {
        for (unsigned int i = 0; i < ITERS; ++i)
        {
            g_ecstack.push(new ct_ecstack::node(tidx));
            g_ecstack.push(new ct_ecstack::node(tidx));
            g_ecstack.push(new ct_ecstack::node(tidx));

            process();

            g_ecstack.push(new ct_ecstack::node(tidx));
        }

        process();
    }
};



// Test away... Or fly? Humm...
int main()
{
    {
        rl::test_params p;

        p.iteration_count = 10000000;
        //p.execution_depth_limit = 33333;
        //p.search_type = rl::sched_bound;
        //p.search_type = rl::fair_full_search_scheduler_type;
        //p.search_type = rl::fair_context_bound_scheduler_type;

        rl::simulate<ct_ecstack_test>(p);
    }

    return 0;
}
______________


Any thoughts? This should be fairly basic in nature.

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"Scalable Synchronization Algorithms" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to lock-free+unsubscr...@googlegroups.com.
To view this discussion on the web visit 
https://groups.google.com/d/msgid/lock-free/1a2de2c4-9c5c-4bd9-a663-a084e16e3bcd%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to