> On Feb 12, 2026, at 04:56, Stefan Hajnoczi <[email protected]> wrote:
> 
> On Sat, Feb 07, 2026 at 08:08:56PM +0800, Brian Song wrote:
>> +typedef struct FuseUringEnt {
>> +    /* back pointer */
>> +    FuseUringQueue *rq;
>> +
>> +    /* commit id of a fuse request */
>> +    uint64_t req_commit_id;
> 
> This field is only read by fuse_uring_resubmit() in this patch, never
> assigned. Maybe some code later in the patch series should be squashed
> into this patch so that this field is initialized somewhere?
> 

Yes, req_commit_id is assigned in fuse_uring_co_process_request(), so I left it 
empty in this patch.
For the same reason, I will probably keep fuse_uring_resubmit() empty for now 
and introduce req_commit_id later.

> Also, is it possible to drop req_commit_id and read from the existing
> req_header.ring_ent_in_out.commit_id field instead?

Of course. I just did the same as in libfuse.

Both members, req_commit_id and payload_sz, of struct fuse_ring_ent are already 
present in req_header, but we need to dereference multiple times to access 
them. That is probably why these duplicated members exist here.

I think I will remove req_commit_id and req_payload_sz from struct FuseUringEnt 
and add a comment indicating where they can be found.

> 
>> +static void fuse_uring_cqe_handler(CqeHandler *cqe_handler)
>> +{
>> +    Coroutine *co;
>> +    FuseUringEnt *ent =
>> +        container_of(cqe_handler, FuseUringEnt, fuse_cqe_handler);
>> +    FuseExport *exp = ent->rq->q->exp;
>> +
>> +    if (unlikely(exp->halted)) {
>> +        return;
> 
> Missing blk_exp_unref()?

Yes, missed one in this patch..

> 
>> +    }
>> +
>> +    int err = cqe_handler->cqe.res;
>> +
>> +    if (unlikely(err != 0)) {
>> +        switch (err) {
>> +        case -EAGAIN:
>> +        case -EINTR:
>> +            aio_add_sqe(fuse_uring_resubmit, ent, &ent->fuse_cqe_handler);
>> +            break;
>> +        case -ENOTCONN:
>> +            /* Connection already gone */
>> +            break;
>> +        default:
>> +            fuse_export_halt(exp);
>> +            break;
>> +        }
>> +
>> +        /* A uring entry returned */
>> +        blk_exp_unref(&exp->common);
>> +    } else {
>> +        co = qemu_coroutine_create(co_fuse_uring_queue_handle_cqe, ent);
>> +        /* Account this request as in-flight */
>> +        fuse_inc_in_flight(exp);
>> +        qemu_coroutine_enter(co);
>> +    }
>> +}
>> +
>> +static void
>> +fuse_uring_sqe_set_req_data(struct fuse_uring_cmd_req *req,
>> +                            const unsigned int rqid,
>> +                            const unsigned int commit_id)
>> +{
>> +    req->qid = rqid;
>> +    req->commit_id = commit_id;
>> +    req->flags = 0;
>> +}
>> +
>> +static void
>> +fuse_uring_sqe_prepare(struct io_uring_sqe *sqe, FuseQueue *q, __u32 cmd_op)
>> +{
>> +    sqe->opcode = IORING_OP_URING_CMD;
>> +
>> +    sqe->fd = q->fuse_fd;
>> +    sqe->rw_flags = 0;
>> +    sqe->ioprio = 0;
>> +    sqe->off = 0;
>> +
>> +    sqe->cmd_op = cmd_op;
>> +    sqe->__pad1 = 0;
>> +}
>> +
>> +static void fuse_uring_prep_sqe_register(struct io_uring_sqe *sqe, void 
>> *opaque)
>> +{
>> +    FuseUringEnt *ent = opaque;
>> +    struct fuse_uring_cmd_req *req = (void *)&sqe->cmd[0];
>> +
>> +    ent->last_cmd = FUSE_IO_URING_CMD_REGISTER;
>> +    fuse_uring_sqe_prepare(sqe, ent->rq->q, ent->last_cmd);
>> +
>> +    sqe->addr = (uint64_t)(ent->iov);
>> +    sqe->len = 2;
>> +
>> +    fuse_uring_sqe_set_req_data(req, ent->rq->rqid, 0);
>> +}
>> +
>> +static void fuse_uring_resubmit(struct io_uring_sqe *sqe, void *opaque)
>> +{
>> +    FuseUringEnt *ent = opaque;
>> +    struct fuse_uring_cmd_req *req = (void *)&sqe->cmd[0];
>> +
>> +    fuse_uring_sqe_prepare(sqe, ent->rq->q, ent->last_cmd);
>> +
>> +    switch (ent->last_cmd) {
>> +    case FUSE_IO_URING_CMD_REGISTER:
>> +        sqe->addr = (uint64_t)(ent->iov);
>> +        sqe->len = 2;
>> +        fuse_uring_sqe_set_req_data(req, ent->rq->rqid, 0);
>> +        break;
>> +    case FUSE_IO_URING_CMD_COMMIT_AND_FETCH:
>> +        fuse_uring_sqe_set_req_data(req, ent->rq->rqid, ent->req_commit_id);
>> +        break;
>> +    default:
>> +        error_report("Unknown command type: %d", ent->last_cmd);
>> +        break;
>> +    }
>> +}
>> +
>> +static void fuse_uring_submit_register(void *opaque)
>> +{
>> +    FuseUringQueue *rq = opaque;
>> +    FuseExport *exp = rq->q->exp;
>> +
>> +    for (int j = 0; j < exp->uring_queue_depth; j++) {
>> +        /* Register a uring entry */
>> +        blk_exp_ref(&exp->common);
>> +
>> +        aio_add_sqe(fuse_uring_prep_sqe_register, &rq->ent[j],
>> +                    &rq->ent[j].fuse_cqe_handler);
>> +    }
>> +}
>> +
>> +/**
>> + * Distribute uring queues across FUSE queues in the round-robin manner.
>> + * This ensures even distribution of kernel uring queues across 
>> user-specified
>> + * FUSE queues.
>> + *
>> + * num_uring_queues > num_fuse_queues: Each IOThread manages multiple uring
>> + * queues (multi-queue mapping).
>> + * num_uring_queues < num_fuse_queues: Excess IOThreads remain idle with no
>> + * assigned uring queues.
>> + */
>> +static void fuse_uring_setup_queues(FuseExport *exp, size_t bufsize)
>> +{
>> +    int num_uring_queues = get_nprocs_conf();
>> +
>> +    exp->num_uring_queues = num_uring_queues;
>> +    exp->uring_queues = g_new(FuseUringQueue, num_uring_queues);
>> +
>> +    for (int i = 0; i < num_uring_queues; i++) {
>> +        FuseUringQueue *rq = &exp->uring_queues[i];
>> +        rq->rqid = i;
>> +        rq->ent = g_new(FuseUringEnt, exp->uring_queue_depth);
>> +
>> +        for (int j = 0; j < exp->uring_queue_depth; j++) {
>> +            FuseUringEnt *ent = &rq->ent[j];
>> +            ent->rq = rq;
>> +            ent->req_payload_sz = bufsize - FUSE_BUFFER_HEADER_SIZE;
>> +            ent->req_payload = g_malloc0(ent->req_payload_sz);
> 
> I don't see a corresponding g_free() in this patch? Exports can be
> deleted at runtime, so this memory must be freed.
> 
>> +
>> +            ent->iov[0] = (struct iovec) {
>> +                &ent->req_header,
>> +                sizeof(struct fuse_uring_req_header)
>> +            };
>> +            ent->iov[1] = (struct iovec) {
>> +                ent->req_payload,
>> +                ent->req_payload_sz
>> +            };
>> +
>> +            ent->fuse_cqe_handler.cb = fuse_uring_cqe_handler;
>> +        }
>> +
>> +        /* Distribute uring queues across FUSE queues */
>> +        rq->q = &exp->queues[i % exp->num_fuse_queues];
>> +        QLIST_INSERT_HEAD(&(rq->q->uring_queue_list), rq, next);
>> +    }
>> +}
>> +
>> +static void
>> +fuse_schedule_ring_queue_registrations(FuseExport *exp)
>> +{
>> +    for (int i = 0; i < exp->num_fuse_queues; i++) {
>> +        FuseQueue *q = &exp->queues[i];
>> +        FuseUringQueue *rq;
>> +
>> +        QLIST_FOREACH(rq, &q->uring_queue_list, next) {
>> +            aio_bh_schedule_oneshot(q->ctx, fuse_uring_submit_register, rq);
>> +        }
>> +    }
>> +}
>> +
>> +static void fuse_uring_start(FuseExport *exp, struct fuse_init_out *out)
>> +{
>> +    assert(!exp->uring_started);
>> +    exp->uring_started = true;
>> +
>> +    /*
>> +     * Since we dont't enable the FUSE_MAX_PAGES feature, the value of
> 
> s/dont't/don't/


Reply via email to