> On Feb 12, 2026, at 04:56, Stefan Hajnoczi <[email protected]> wrote:
> 
> On Sat, Feb 07, 2026 at 08:08:56PM +0800, Brian Song wrote:
>> +typedef struct FuseUringEnt {
>> +    /* back pointer */
>> +    FuseUringQueue *rq;
>> +
>> +    /* commit id of a fuse request */
>> +    uint64_t req_commit_id;

[...]

>> +
>> +/**
>> + * Distribute uring queues across FUSE queues in the round-robin manner.
>> + * This ensures even distribution of kernel uring queues across 
>> user-specified
>> + * FUSE queues.
>> + *
>> + * num_uring_queues > num_fuse_queues: Each IOThread manages multiple uring
>> + * queues (multi-queue mapping).
>> + * num_uring_queues < num_fuse_queues: Excess IOThreads remain idle with no
>> + * assigned uring queues.
>> + */
>> +static void fuse_uring_setup_queues(FuseExport *exp, size_t bufsize)
>> +{
>> +    int num_uring_queues = get_nprocs_conf();
>> +
>> +    exp->num_uring_queues = num_uring_queues;
>> +    exp->uring_queues = g_new(FuseUringQueue, num_uring_queues);
>> +
>> +    for (int i = 0; i < num_uring_queues; i++) {
>> +        FuseUringQueue *rq = &exp->uring_queues[i];
>> +        rq->rqid = i;
>> +        rq->ent = g_new(FuseUringEnt, exp->uring_queue_depth);
>> +
>> +        for (int j = 0; j < exp->uring_queue_depth; j++) {
>> +            FuseUringEnt *ent = &rq->ent[j];
>> +            ent->rq = rq;
>> +            ent->req_payload_sz = bufsize - FUSE_BUFFER_HEADER_SIZE;
>> +            ent->req_payload = g_malloc0(ent->req_payload_sz);
> 
> I don't see a corresponding g_free() in this patch? Exports can be
> deleted at runtime, so this memory must be freed.
> 

ent->req_payload is deleted in fuse_export_delete_uring() in a later patch.
Should we merge patch 5 into this one?

>> +
>> +            ent->iov[0] = (struct iovec) {
>> +                &ent->req_header,
>> +                sizeof(struct fuse_uring_req_header)
>> +            };
>> +            ent->iov[1] = (struct iovec) {
>> +                ent->req_payload,
>> +                ent->req_payload_sz
>> +            };
>> +
>> +            ent->fuse_cqe_handler.cb = fuse_uring_cqe_handler;
>> +        }
>> +
>> +        /* Distribute uring queues across FUSE queues */
>> +        rq->q = &exp->queues[i % exp->num_fuse_queues];
>> +        QLIST_INSERT_HEAD(&(rq->q->uring_queue_list), rq, next);
>> +    }
>> +}
>> +
>> +static void
>> +fuse_schedule_ring_queue_registrations(FuseExport *exp)
>> +{
>> +    for (int i = 0; i < exp->num_fuse_queues; i++) {
>> +        FuseQueue *q = &exp->queues[i];
>> +        FuseUringQueue *rq;
>> +
>> +        QLIST_FOREACH(rq, &q->uring_queue_list, next) {
>> +            aio_bh_schedule_oneshot(q->ctx, fuse_uring_submit_register, rq);
>> +        }
>> +    }
>> +}
>> +
>> +static void fuse_uring_start(FuseExport *exp, struct fuse_init_out *out)
>> +{
>> +    assert(!exp->uring_started);
>> +    exp->uring_started = true;
>> +
>> +    /*
>> +     * Since we dont't enable the FUSE_MAX_PAGES feature, the value of
> 
> s/dont't/don't/


Reply via email to