This patch removes confusing names in work.c: 1. before: wi->nr_workers, after: wi->nr_queued_work 2. before: struct worker_info, after: struct wq_info
Also fixes obsolete comments. Signed-off-by: Hitoshi Mitake <mitake.hito...@lab.ntt.co.jp> --- lib/work.c | 52 ++++++++++++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/lib/work.c b/lib/work.c index c3b568b..6754dc9 100644 --- a/lib/work.c +++ b/lib/work.c @@ -40,18 +40,18 @@ */ #define WQ_PROTECTION_PERIOD 1000 /* ms */ -struct worker_info { +struct wq_info { const char *name; struct list_head finished_list; - struct list_node worker_info_siblings; + struct list_node wq_info_siblings; pthread_mutex_t finished_lock; pthread_mutex_t startup_lock; - /* wokers sleep on this and signaled by tgtd */ + /* wokers sleep on this and signaled by work producer */ pthread_cond_t pending_cond; - /* locked by tgtd and workers */ + /* locked by work producers and workers */ pthread_mutex_t pending_lock; /* protected by pending_lock */ struct work_queue q; @@ -61,7 +61,7 @@ struct worker_info { size_t nr_threads; /* protected by uatomic primitives */ - size_t nr_workers; + size_t nr_queued_work; /* we cannot shrink work queue till this time */ uint64_t tm_end_of_protection; @@ -69,7 +69,7 @@ struct worker_info { }; static int efd; -static LIST_HEAD(worker_info_list); +static LIST_HEAD(wq_info_list); static size_t nr_nodes = 1; static size_t (*wq_get_nr_nodes)(void); @@ -88,10 +88,10 @@ static int ack_efd; void suspend_worker_threads(void) { - struct worker_info *wi; + struct wq_info *wi; int tid; - list_for_each_entry(wi, &worker_info_list, worker_info_siblings) { + list_for_each_entry(wi, &wq_info_list, wq_info_siblings) { pthread_mutex_lock(&wi->pending_lock); } @@ -112,7 +112,7 @@ void suspend_worker_threads(void) void resume_worker_threads(void) { - struct worker_info *wi; + struct wq_info *wi; int nr_threads = 0, tid; FOR_EACH_BIT(tid, tid_map, tid_max) { @@ -123,7 +123,7 @@ void resume_worker_threads(void) for (int i = 0; i < nr_threads; i++) eventfd_xread(ack_efd); - list_for_each_entry(wi, &worker_info_list, worker_info_siblings) { + list_for_each_entry(wi, &wq_info_list, wq_info_siblings) { pthread_mutex_unlock(&wi->pending_lock); } } @@ -198,7 +198,7 @@ static uint64_t get_msec_time(void) return tv.tv_sec * 1000 + tv.tv_usec / 1000; } -static inline uint64_t wq_get_roof(struct worker_info *wi) +static inline uint64_t wq_get_roof(struct wq_info *wi) { uint64_t nr = 1; @@ -218,9 +218,9 @@ static inline uint64_t wq_get_roof(struct worker_info *wi) return nr; } -static bool wq_need_grow(struct worker_info *wi) +static bool wq_need_grow(struct wq_info *wi) { - if (wi->nr_threads < uatomic_read(&wi->nr_workers) && + if (wi->nr_threads < uatomic_read(&wi->nr_queued_work) && wi->nr_threads * 2 <= wq_get_roof(wi)) { wi->tm_end_of_protection = get_msec_time() + WQ_PROTECTION_PERIOD; @@ -234,9 +234,9 @@ static bool wq_need_grow(struct worker_info *wi) * Return true if more than half of threads are not used more than * WQ_PROTECTION_PERIOD seconds */ -static bool wq_need_shrink(struct worker_info *wi) +static bool wq_need_shrink(struct wq_info *wi) { - if (uatomic_read(&wi->nr_workers) < wi->nr_threads / 2) + if (uatomic_read(&wi->nr_queued_work) < wi->nr_threads / 2) /* we cannot shrink work queue during protection period. */ return wi->tm_end_of_protection <= get_msec_time(); @@ -245,7 +245,7 @@ static bool wq_need_shrink(struct worker_info *wi) return false; } -static int create_worker_threads(struct worker_info *wi, size_t nr_threads) +static int create_worker_threads(struct wq_info *wi, size_t nr_threads) { pthread_t thread; int ret; @@ -268,9 +268,9 @@ static int create_worker_threads(struct worker_info *wi, size_t nr_threads) void queue_work(struct work_queue *q, struct work *work) { - struct worker_info *wi = container_of(q, struct worker_info, q); + struct wq_info *wi = container_of(q, struct wq_info, q); - uatomic_inc(&wi->nr_workers); + uatomic_inc(&wi->nr_queued_work); if (wq_need_grow(wi)) /* double the thread pool size */ @@ -285,7 +285,7 @@ void queue_work(struct work_queue *q, struct work *work) static void worker_thread_request_done(int fd, int events, void *data) { - struct worker_info *wi; + struct wq_info *wi; struct work *work; LIST_HEAD(list); @@ -294,7 +294,7 @@ static void worker_thread_request_done(int fd, int events, void *data) eventfd_xread(fd); - list_for_each_entry(wi, &worker_info_list, worker_info_siblings) { + list_for_each_entry(wi, &wq_info_list, wq_info_siblings) { pthread_mutex_lock(&wi->finished_lock); list_splice_init(&wi->finished_list, &list); pthread_mutex_unlock(&wi->finished_lock); @@ -304,14 +304,14 @@ static void worker_thread_request_done(int fd, int events, void *data) list_del(&work->w_list); work->done(work); - uatomic_dec(&wi->nr_workers); + uatomic_dec(&wi->nr_queued_work); } } } static void *worker_routine(void *arg) { - struct worker_info *wi = arg; + struct wq_info *wi = arg; struct work *work; int tid = gettid(); @@ -407,7 +407,7 @@ struct work_queue *create_work_queue(const char *name, enum wq_thread_control tc) { int ret; - struct worker_info *wi; + struct wq_info *wi; wi = xzalloc(sizeof(*wi)); wi->name = name; @@ -427,7 +427,7 @@ struct work_queue *create_work_queue(const char *name, if (ret < 0) goto destroy_threads; - list_add(&wi->worker_info_siblings, &worker_info_list); + list_add(&wi->wq_info_siblings, &wq_info_list); return &wi->q; destroy_threads: @@ -448,7 +448,7 @@ struct work_queue *create_ordered_work_queue(const char *name) bool work_queue_empty(struct work_queue *q) { - struct worker_info *wi = container_of(q, struct worker_info, q); + struct wq_info *wi = container_of(q, struct wq_info, q); - return uatomic_read(&wi->nr_workers) == 0; + return uatomic_read(&wi->nr_queued_work) == 0; } -- 1.7.10.4 -- sheepdog mailing list sheepdog@lists.wpkg.org http://lists.wpkg.org/mailman/listinfo/sheepdog