Re: svn commit: r357771 - in head/sys: kern sys

2020-02-11 Thread O. Hartmann
-BEGIN PGP SIGNED MESSAGE-
Hash: SHA256

Am Tue, 11 Feb 2020 18:48:08 + (UTC)
Gleb Smirnoff  schrieb:

> Author: glebius
> Date: Tue Feb 11 18:48:07 2020
> New Revision: 357771
> URL: https://svnweb.freebsd.org/changeset/base/357771
> 
> Log:
>   Add flag to struct task to mark the task as requiring network epoch.
>   
>   When processing a taskqueue and a task has associated epoch, then
>   enter for duration of the task.  If consecutive tasks belong to the
>   same epoch, batch them.  Now we are talking about the network epoch
>   only.
>   
>   Shrink the ta_priority size to 8-bits.  No current consumers use
>   a priority that won't fit into 8 bits.  Also complexity of
>   taskqueue_enqueue() is a square of maximum value of priority, so
>   we unlikely ever want to go over UCHAR_MAX here.
>   
>   Reviewed by:hselasky
>   Differential Revision:  https://reviews.freebsd.org/D23518
> 
> Modified:
>   head/sys/kern/subr_gtaskqueue.c
>   head/sys/kern/subr_taskqueue.c
>   head/sys/sys/_task.h
>   head/sys/sys/epoch.h
>   head/sys/sys/gtaskqueue.h
>   head/sys/sys/taskqueue.h
> 
> Modified: head/sys/kern/subr_gtaskqueue.c
> ==
> --- head/sys/kern/subr_gtaskqueue.c   Tue Feb 11 18:19:56 2020
> (r357770)
> +++ head/sys/kern/subr_gtaskqueue.c   Tue Feb 11 18:48:07 2020
> (r357771)
> @@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -342,13 +343,16 @@ gtaskqueue_unblock(struct gtaskqueue *queue)
>  static void
>  gtaskqueue_run_locked(struct gtaskqueue *queue)
>  {
> + struct epoch_tracker et;
>   struct gtaskqueue_busy tb;
>   struct gtask *gtask;
> + bool in_net_epoch;
>  
>   KASSERT(queue != NULL, ("tq is NULL"));
>   TQ_ASSERT_LOCKED(queue);
>   tb.tb_running = NULL;
>   LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
> + in_net_epoch = false;
>  
>   while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
>   STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
> @@ -358,11 +362,20 @@ gtaskqueue_run_locked(struct gtaskqueue *queue)
>   TQ_UNLOCK(queue);
>  
>   KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
> + if (!in_net_epoch && TASK_IS_NET(gtask)) {
> + in_net_epoch = true;
> + NET_EPOCH_ENTER(et);
> + } else if (in_net_epoch && !TASK_IS_NET(gtask)) {
> + NET_EPOCH_EXIT(et);
> + in_net_epoch = false;
> + }
>   gtask->ta_func(gtask->ta_context);
>  
>   TQ_LOCK(queue);
>   wakeup(gtask);
>   }
> + if (in_net_epoch)
> + NET_EPOCH_EXIT(et);
>   LIST_REMOVE(&tb, tb_link);
>  }
>  
> 
> Modified: head/sys/kern/subr_taskqueue.c
> ==
> --- head/sys/kern/subr_taskqueue.cTue Feb 11 18:19:56 2020
> (r357770)
> +++ head/sys/kern/subr_taskqueue.cTue Feb 11 18:48:07 2020
> (r357771)
> @@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -371,7 +372,7 @@ taskqueue_drain_tq_queue(struct taskqueue *queue)
>* anyway) so just insert it at tail while we have the
>* queue lock.
>*/
> - TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
> + TASK_INIT(&t_barrier, UCHAR_MAX, taskqueue_task_nop_fn, &t_barrier);
>   STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
>   queue->tq_hint = &t_barrier;
>   t_barrier.ta_pending = 1;
> @@ -442,14 +443,17 @@ taskqueue_unblock(struct taskqueue *queue)
>  static void
>  taskqueue_run_locked(struct taskqueue *queue)
>  {
> + struct epoch_tracker et;
>   struct taskqueue_busy tb;
>   struct task *task;
> + bool in_net_epoch;
>   int pending;
>  
>   KASSERT(queue != NULL, ("tq is NULL"));
>   TQ_ASSERT_LOCKED(queue);
>   tb.tb_running = NULL;
>   LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
> + in_net_epoch = false;
>  
>   while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
>   STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
> @@ -462,11 +466,20 @@ taskqueue_run_locked(struct taskqueue *queue)
>   TQ_UNLOCK(queue);
>  
>   KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
> + if (!in_net_epoch && TASK_IS_NET(task)) {
> + in_net_epoch = true;
> + NET_EPOCH_ENTER(et);
> + } else if (in_net_epoch && !TASK_IS_NET(task)) {
> + NET_EPOCH_EXIT(et);
> + in_net_epoch = false;
> + }
>   task->ta_func(task->ta_context, pending);
>  
>   TQ_LOCK(queue);

svn commit: r357771 - in head/sys: kern sys

2020-02-11 Thread Gleb Smirnoff
Author: glebius
Date: Tue Feb 11 18:48:07 2020
New Revision: 357771
URL: https://svnweb.freebsd.org/changeset/base/357771

Log:
  Add flag to struct task to mark the task as requiring network epoch.
  
  When processing a taskqueue and a task has associated epoch, then
  enter for duration of the task.  If consecutive tasks belong to the
  same epoch, batch them.  Now we are talking about the network epoch
  only.
  
  Shrink the ta_priority size to 8-bits.  No current consumers use
  a priority that won't fit into 8 bits.  Also complexity of
  taskqueue_enqueue() is a square of maximum value of priority, so
  we unlikely ever want to go over UCHAR_MAX here.
  
  Reviewed by:  hselasky
  Differential Revision:https://reviews.freebsd.org/D23518

Modified:
  head/sys/kern/subr_gtaskqueue.c
  head/sys/kern/subr_taskqueue.c
  head/sys/sys/_task.h
  head/sys/sys/epoch.h
  head/sys/sys/gtaskqueue.h
  head/sys/sys/taskqueue.h

Modified: head/sys/kern/subr_gtaskqueue.c
==
--- head/sys/kern/subr_gtaskqueue.c Tue Feb 11 18:19:56 2020
(r357770)
+++ head/sys/kern/subr_gtaskqueue.c Tue Feb 11 18:48:07 2020
(r357771)
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -342,13 +343,16 @@ gtaskqueue_unblock(struct gtaskqueue *queue)
 static void
 gtaskqueue_run_locked(struct gtaskqueue *queue)
 {
+   struct epoch_tracker et;
struct gtaskqueue_busy tb;
struct gtask *gtask;
+   bool in_net_epoch;
 
KASSERT(queue != NULL, ("tq is NULL"));
TQ_ASSERT_LOCKED(queue);
tb.tb_running = NULL;
LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
+   in_net_epoch = false;
 
while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
@@ -358,11 +362,20 @@ gtaskqueue_run_locked(struct gtaskqueue *queue)
TQ_UNLOCK(queue);
 
KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
+   if (!in_net_epoch && TASK_IS_NET(gtask)) {
+   in_net_epoch = true;
+   NET_EPOCH_ENTER(et);
+   } else if (in_net_epoch && !TASK_IS_NET(gtask)) {
+   NET_EPOCH_EXIT(et);
+   in_net_epoch = false;
+   }
gtask->ta_func(gtask->ta_context);
 
TQ_LOCK(queue);
wakeup(gtask);
}
+   if (in_net_epoch)
+   NET_EPOCH_EXIT(et);
LIST_REMOVE(&tb, tb_link);
 }
 

Modified: head/sys/kern/subr_taskqueue.c
==
--- head/sys/kern/subr_taskqueue.c  Tue Feb 11 18:19:56 2020
(r357770)
+++ head/sys/kern/subr_taskqueue.c  Tue Feb 11 18:48:07 2020
(r357771)
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -371,7 +372,7 @@ taskqueue_drain_tq_queue(struct taskqueue *queue)
 * anyway) so just insert it at tail while we have the
 * queue lock.
 */
-   TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
+   TASK_INIT(&t_barrier, UCHAR_MAX, taskqueue_task_nop_fn, &t_barrier);
STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
queue->tq_hint = &t_barrier;
t_barrier.ta_pending = 1;
@@ -442,14 +443,17 @@ taskqueue_unblock(struct taskqueue *queue)
 static void
 taskqueue_run_locked(struct taskqueue *queue)
 {
+   struct epoch_tracker et;
struct taskqueue_busy tb;
struct task *task;
+   bool in_net_epoch;
int pending;
 
KASSERT(queue != NULL, ("tq is NULL"));
TQ_ASSERT_LOCKED(queue);
tb.tb_running = NULL;
LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
+   in_net_epoch = false;
 
while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
@@ -462,11 +466,20 @@ taskqueue_run_locked(struct taskqueue *queue)
TQ_UNLOCK(queue);
 
KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
+   if (!in_net_epoch && TASK_IS_NET(task)) {
+   in_net_epoch = true;
+   NET_EPOCH_ENTER(et);
+   } else if (in_net_epoch && !TASK_IS_NET(task)) {
+   NET_EPOCH_EXIT(et);
+   in_net_epoch = false;
+   }
task->ta_func(task->ta_context, pending);
 
TQ_LOCK(queue);
wakeup(task);
}
+   if (in_net_epoch)
+   NET_EPOCH_EXIT(et);
LIST_REMOVE(&tb, tb_link);
 }
 

Modified: head/sys/sys/_task.h
==
--- head/sys/sys