On Sun, 3 Nov 2013 21:20:37 +0100, Oleg Nesterov wrote:
> On 11/01, Oleg Nesterov wrote:
>>
>> Ah, please ignore... handler_chain() is not self-serialized, so
>> tu->buffer needs locking/waiting too.
>
> Still I have to admit that I strongly dislike this yet another
> (and imho strange) memory pool. However, I am not going to argue
> because I can't suggest something better right now.

Okay.

>
> But. Perhaps it makes sense to at least add a couple of trivial
> helpers in 10/13? Something like arg_buf_get/put/init, just to
> simplify the potential changes.

Good idea.  How about something like below?


struct uprobe_cpu_buffer {
        struct mutex mutex;
        void *buf;
};
static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
static DEFINE_MUTEX(uprobe_buffer_mutex);
static int uprobe_buffer_refcnt;

static int uprobe_buffer_init(void)
{
        int cpu, err_cpu;

        uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
        if (uprobe_cpu_buffer == NULL)
                return -ENOMEM;

        for_each_possible_cpu(cpu) {
                struct page *p = alloc_pages_node(cpu_to_node(cpu),
                                                  GFP_KERNEL, 0);
                if (p == NULL) {
                        err_cpu = cpu;
                        goto err;
                }
                per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
                mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
        }

        return 0;

err:
        for_each_possible_cpu(cpu) {
                if (cpu == err_cpu)
                        break;
                free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, 
cpu)->buf);
        }

        free_percpu(uprobe_cpu_buffer);
        return -ENOMEM;
}

static int uprobe_buffer_enable(void)
{
        int ret = 0;

        mutex_lock(&uprobe_buffer_mutex);
        if (uprobe_buffer_refcnt++ == 0) {
                ret = uprobe_buffer_init();
                if (ret < 0)
                        uprobe_buffer_refcnt--;
        }
        mutex_unlock(&uprobe_buffer_mutex);

        return ret;
}

static void uprobe_buffer_disable(void)
{
        mutex_lock(&uprobe_buffer_mutex);
        if (--uprobe_buffer_refcnt == 0) {
                free_percpu(uprobe_cpu_buffer);
                uprobe_cpu_buffer = NULL;
        }
        mutex_unlock(&uprobe_buffer_mutex);
}

static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
{
        struct uprobe_cpu_buffer *ucb;
        int cpu;

        cpu = raw_smp_processor_id();
        ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);

        /*
         * Use per-cpu buffers for fastest access, but we might migrate
         * so the mutex makes sure we have sole access to it.
         */
        mutex_lock(&ucb->mutex);

        return ucb;
}

static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
{
        mutex_unlock(&ucb->mutex);
}


Thanks,
Namhyung
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to