Re: [PATCH v3 2/2] virtiofs: use GFP_NOFS when enqueuing request through kworker

2024-05-10 Thread Hou Tao
Hi,

On 5/10/2024 7:19 PM, Miklos Szeredi wrote:
> On Fri, 26 Apr 2024 at 16:38, Hou Tao  wrote:
>> From: Hou Tao 
>>
>> When invoking virtio_fs_enqueue_req() through kworker, both the
>> allocation of the sg array and the bounce buffer still use GFP_ATOMIC.
>> Considering the size of the sg array may be greater than PAGE_SIZE, use
>> GFP_NOFS instead of GFP_ATOMIC to lower the possibility of memory
>> allocation failure and to avoid unnecessarily depleting the atomic
>> reserves. GFP_NOFS is not passed to virtio_fs_enqueue_req() directly,
>> GFP_KERNEL and memalloc_nofs_{save|restore} helpers are used instead.
> Makes sense.
>
> However, I don't understand why the GFP_NOFS behavior is optional. It
> should work when queuing the request for the first time as well, no?

No. fuse_request_queue_background() may call queue_request_and_unlock()
with fc->bg_lock being held and bg_lock is a spin-lock, so as for now it
is bad to call kmalloc(GFP_NOFS) with a spin-lock being held. The
acquisition of fc->bg_lock inĀ  fuse_request_queue_background() may could
be optimized, but I will leave it for future work.
> Thanks,
> Miklos
> .




Re: [PATCH v3 2/2] virtiofs: use GFP_NOFS when enqueuing request through kworker

2024-05-10 Thread Miklos Szeredi
On Fri, 26 Apr 2024 at 16:38, Hou Tao  wrote:
>
> From: Hou Tao 
>
> When invoking virtio_fs_enqueue_req() through kworker, both the
> allocation of the sg array and the bounce buffer still use GFP_ATOMIC.
> Considering the size of the sg array may be greater than PAGE_SIZE, use
> GFP_NOFS instead of GFP_ATOMIC to lower the possibility of memory
> allocation failure and to avoid unnecessarily depleting the atomic
> reserves. GFP_NOFS is not passed to virtio_fs_enqueue_req() directly,
> GFP_KERNEL and memalloc_nofs_{save|restore} helpers are used instead.

Makes sense.

However, I don't understand why the GFP_NOFS behavior is optional. It
should work when queuing the request for the first time as well, no?

Thanks,
Miklos



[PATCH v3 2/2] virtiofs: use GFP_NOFS when enqueuing request through kworker

2024-04-26 Thread Hou Tao
From: Hou Tao 

When invoking virtio_fs_enqueue_req() through kworker, both the
allocation of the sg array and the bounce buffer still use GFP_ATOMIC.
Considering the size of the sg array may be greater than PAGE_SIZE, use
GFP_NOFS instead of GFP_ATOMIC to lower the possibility of memory
allocation failure and to avoid unnecessarily depleting the atomic
reserves. GFP_NOFS is not passed to virtio_fs_enqueue_req() directly,
GFP_KERNEL and memalloc_nofs_{save|restore} helpers are used instead.

Signed-off-by: Hou Tao 
---
 fs/fuse/virtio_fs.c | 24 +++-
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 36984c0e23d14..096b589ed2fcc 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -91,7 +91,8 @@ struct virtio_fs_req_work {
 };
 
 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
-struct fuse_req *req, bool in_flight);
+struct fuse_req *req, bool in_flight,
+gfp_t gfp);
 
 static const struct constant_table dax_param_enums[] = {
{"always",  FUSE_DAX_ALWAYS },
@@ -430,6 +431,8 @@ static void virtio_fs_request_dispatch_work(struct 
work_struct *work)
 
/* Dispatch pending requests */
while (1) {
+   unsigned int flags;
+
spin_lock(>lock);
req = list_first_entry_or_null(>queued_reqs,
   struct fuse_req, list);
@@ -440,7 +443,9 @@ static void virtio_fs_request_dispatch_work(struct 
work_struct *work)
list_del_init(>list);
spin_unlock(>lock);
 
-   ret = virtio_fs_enqueue_req(fsvq, req, true);
+   flags = memalloc_nofs_save();
+   ret = virtio_fs_enqueue_req(fsvq, req, true, GFP_KERNEL);
+   memalloc_nofs_restore(flags);
if (ret < 0) {
if (ret == -ENOMEM || ret == -ENOSPC) {
spin_lock(>lock);
@@ -545,7 +550,7 @@ static void virtio_fs_hiprio_dispatch_work(struct 
work_struct *work)
 }
 
 /* Allocate and copy args into req->argbuf */
-static int copy_args_to_argbuf(struct fuse_req *req)
+static int copy_args_to_argbuf(struct fuse_req *req, gfp_t gfp)
 {
struct fuse_args *args = req->args;
unsigned int offset = 0;
@@ -559,7 +564,7 @@ static int copy_args_to_argbuf(struct fuse_req *req)
len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
  fuse_len_args(num_out, args->out_args);
 
-   req->argbuf = kmalloc(len, GFP_ATOMIC);
+   req->argbuf = kmalloc(len, gfp);
if (!req->argbuf)
return -ENOMEM;
 
@@ -1183,7 +1188,8 @@ static unsigned int sg_init_fuse_args(struct scatterlist 
*sg,
 
 /* Add a request to a virtqueue and kick the device */
 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
-struct fuse_req *req, bool in_flight)
+struct fuse_req *req, bool in_flight,
+gfp_t gfp)
 {
/* requests need at least 4 elements */
struct scatterlist *stack_sgs[6];
@@ -1204,8 +1210,8 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq 
*fsvq,
/* Does the sglist fit on the stack? */
total_sgs = sg_count_fuse_req(req);
if (total_sgs > ARRAY_SIZE(stack_sgs)) {
-   sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
-   sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
+   sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), gfp);
+   sg = kmalloc_array(total_sgs, sizeof(sg[0]), gfp);
if (!sgs || !sg) {
ret = -ENOMEM;
goto out;
@@ -1213,7 +1219,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq 
*fsvq,
}
 
/* Use a bounce buffer since stack args cannot be mapped */
-   ret = copy_args_to_argbuf(req);
+   ret = copy_args_to_argbuf(req, gfp);
if (ret < 0)
goto out;
 
@@ -1309,7 +1315,7 @@ __releases(fiq->lock)
 fuse_len_args(req->args->out_numargs, req->args->out_args));
 
fsvq = >vqs[queue_id];
-   ret = virtio_fs_enqueue_req(fsvq, req, false);
+   ret = virtio_fs_enqueue_req(fsvq, req, false, GFP_ATOMIC);
if (ret < 0) {
if (ret == -ENOMEM || ret == -ENOSPC) {
/*
-- 
2.29.2