Realize the symmetric algorithm control queue handler, including plain cipher and chainning algorithms.
Currently the control queue is used to create and close session for symmetric algorithm. Signed-off-by: Gonglei <arei.gong...@huawei.com> --- hw/virtio/virtio-crypto.c | 234 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 233 insertions(+), 1 deletion(-) diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index e74a15f..ee1dcdc 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -26,9 +26,241 @@ static void virtio_crypto_process(VirtIOCrypto *vcrypto) { } -static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +static inline int virtio_crypto_vq2q(int queue_index) +{ + return queue_index; +} + +static void +virtio_crypto_cipher_session_helper(VirtIODevice *vdev, + QCryptoCryptoDevBackendSymSessionInfo *info, + struct virtio_crypto_cipher_session_para *cipher_para, + struct virtio_crypto_cipher_session_output *cipher_out) +{ + hwaddr key_gpa; + void *key_hva; + hwaddr len; + + info->cipher_alg = cipher_para->algo; + info->key_len = cipher_para->keylen; + info->direction = cipher_para->op; + len = info->key_len; + /* get cipher key */ + if (len > 0) { + DPRINTF("keylen=%" PRIu32 "\n", info->key_len); + key_gpa = cipher_out->key_addr; + + key_hva = cpu_physical_memory_map(key_gpa, &len, 0); + + info->cipher_key = g_malloc(info->key_len); + memcpy(info->cipher_key, key_hva, info->key_len); + cpu_physical_memory_unmap(key_hva, len, 0, len); + } +} + +static int64_t +virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto, + struct virtio_crypto_sym_create_session_req *sess_req, + uint32_t queue_id, + uint32_t opcode, + VirtQueueElement *elem) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + QCryptoCryptoDevBackendSymSessionInfo info; + int64_t session_id; + int queue_index; + uint32_t op_type; + hwaddr auth_key_gpa; + void *auth_key_hva; + struct virtio_crypto_session_input *input; + hwaddr len; + size_t input_offset; + Error *local_err = NULL; + struct iovec *iov = elem->in_sg; + + memset(&info, 0, sizeof(info)); + op_type = sess_req->op_type; + info.op_type = op_type; + info.op_code = opcode; + + if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { + virtio_crypto_cipher_session_helper(vdev, &info, + &sess_req->u.cipher.para, + &sess_req->u.cipher.out); + /* calculate the offset of input data */ + input_offset = offsetof(struct virtio_crypto_op_ctrl_req, + u.sym_create_session.u.cipher.input); + input = (void *)iov[0].iov_base + input_offset; + } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { + /* cipher part */ + virtio_crypto_cipher_session_helper(vdev, &info, + &sess_req->u.chain.para.cipher_param, + &sess_req->u.chain.out.cipher); + /* calculate the offset of input data */ + input_offset = offsetof(struct virtio_crypto_op_ctrl_req, + u.sym_create_session.u.chain.input); + input = (void *)iov[0].iov_base + input_offset; + /* hash part */ + info.alg_chain_order = sess_req->u.chain.para.alg_chain_order; + info.add_len = sess_req->u.chain.para.aad_len; + info.hash_mode = sess_req->u.chain.para.hash_mode; + if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) { + info.hash_alg = sess_req->u.chain.para.u.mac_param.algo; + len = info.auth_key_len = + sess_req->u.chain.para.u.mac_param.auth_key_len; + info.hash_result_len = + sess_req->u.chain.para.u.mac_param.hash_result_len; + /* get auth key */ + if (len > 0) { + DPRINTF("keylen=%" PRIu32 "\n", info.auth_key_len); + auth_key_gpa = sess_req->u.chain.out.mac.auth_key_addr; + auth_key_hva = cpu_physical_memory_map(auth_key_gpa, + &len, false); + info.auth_key = g_malloc(len); + memcpy(info.auth_key, auth_key_hva, len); + cpu_physical_memory_unmap(auth_key_hva, len, false, len); + } + } else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) { + info.hash_alg = sess_req->u.chain.para.u.hash_param.algo; + info.hash_result_len = + sess_req->u.chain.para.u.hash_param.hash_result_len; + } else { + /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */ + error_report("unsupported hash mode"); + goto err; + } + } else { + /* calculate the offset of input data */ + input_offset = offsetof(struct virtio_crypto_op_ctrl_req, + u.sym_create_session.u.cipher.input); + input = (void *)iov[0].iov_base + input_offset; + /* VIRTIO_CRYPTO_SYM_OP_NONE */ + error_report("unsupported cipher type"); + goto err; + } + + queue_index = virtio_crypto_vq2q(queue_id); + session_id = qcrypto_cryptodev_backend_sym_create_session( + vcrypto->cryptodev, + &info, queue_index, &local_err); + if (session_id >= 0) { + DPRINTF("create session_id=%" PRIu64 "\n", session_id); + /* Set the result, notify the frontend driver soon */ + input->status = VIRTIO_CRYPTO_OK; + input->session_id = session_id; + + g_free(info.cipher_key); + g_free(info.auth_key); + return 0; + } else { + if (local_err) { + error_report_err(local_err); + } + } + +err: + g_free(info.cipher_key); + g_free(info.auth_key); + input->status = VIRTIO_CRYPTO_ERR; + return -1; +} + +static void +virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto, + struct virtio_crypto_destroy_session_req *close_sess_req, + uint32_t queue_id, + VirtQueueElement *elem) { + int ret; + uint64_t session_id; + uint32_t status; + struct iovec *iov = elem->in_sg; + size_t status_offset; + void *in_status_ptr; + Error *local_err = NULL; + + session_id = close_sess_req->session_id; + DPRINTF("close session, id=%" PRIu64 "\n", session_id); + + ret = qcrypto_cryptodev_backend_sym_close_session( + vcrypto->cryptodev, session_id, queue_id, &local_err); + if (ret == 0) { + status = VIRTIO_CRYPTO_OK; + } else { + if (local_err) { + error_report_err(local_err); + } else { + error_report("destroy session failed"); + } + status = VIRTIO_CRYPTO_ERR; + } + /* Calculate the offset of status bits */ + status_offset = offsetof(struct virtio_crypto_op_ctrl_req, + u.destroy_session.status); + in_status_ptr = (void *)iov[0].iov_base + status_offset; + /* Set the result, notify the frontend driver soon */ + memcpy(in_status_ptr, &status, sizeof(status)); +} + +static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + struct virtio_crypto_op_ctrl_req ctrl; + VirtQueueElement *elem; + size_t s; + struct iovec *iov; + unsigned int iov_cnt; + uint32_t queue_id; + uint32_t opcode; + + for (;;) { + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + if (elem->in_num < 1 || + iov_size(elem->in_sg, elem->in_num) < sizeof(ctrl)) { + error_report("virtio-crypto ctrl missing headers"); + exit(1); + } + + iov_cnt = elem->in_num; + iov = elem->in_sg; + s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); + assert(s == sizeof(ctrl)); + opcode = ctrl.header.opcode; + queue_id = ctrl.header.queue_id; + + switch (opcode) { + case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: + virtio_crypto_create_sym_session(vcrypto, + &ctrl.u.sym_create_session, + queue_id, opcode, + elem); + + break; + case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION: + case VIRTIO_CRYPTO_HASH_DESTROY_SESSION: + case VIRTIO_CRYPTO_MAC_DESTROY_SESSION: + case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION: + virtio_crypto_handle_close_session(vcrypto, + &ctrl.u.destroy_session, queue_id, + elem); + break; + case VIRTIO_CRYPTO_HASH_CREATE_SESSION: + case VIRTIO_CRYPTO_MAC_CREATE_SESSION: + case VIRTIO_CRYPTO_AEAD_CREATE_SESSION: + default: + error_report("virtio-crypto unsupported ctrl opcode: %u", + opcode); + exit(1); + } + + virtqueue_push(vq, elem, sizeof(ctrl)); + virtio_notify(vdev, vq); + g_free(elem); + } } static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq) -- 1.7.12.4