Adds the files that implement the data transfer part of the communication protocol with the VEx. The RDMA of ethernet packets is implemented in here.
Signed-off-by: Ramachandra K <[EMAIL PROTECTED]> --- drivers/infiniband/ulp/vnic/vnic_data.c | 1065 ++++++++++++++++++++++++++++ drivers/infiniband/ulp/vnic/vnic_data.h | 179 +++++ drivers/infiniband/ulp/vnic/vnic_trailer.h | 63 ++ 3 files changed, 1307 insertions(+), 0 deletions(-) diff --git a/drivers/infiniband/ulp/vnic/vnic_data.c b/drivers/infiniband/ulp/vnic/vnic_data.c new file mode 100644 index 0000000..e3b9739 --- /dev/null +++ b/drivers/infiniband/ulp/vnic/vnic_data.c @@ -0,0 +1,1065 @@ +/* + * Copyright (c) 2006 SilverStorm Technologies Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <net/inet_sock.h> +#include <linux/ip.h> + +#include "vnic_util.h" +#include "vnic_viport.h" +#include "vnic_config.h" +#include "vnic_data.h" +#include "vnic_trailer.h" + +static void data_received_kick(struct io *io); +static void data_xmit_complete(struct io *io); + +#define LOCAL_IO(x) PTR64((x)) + +#define INBOUND_COPY + +#ifdef INBOUND_COPY +u32 min_rcv_skb = 60; +module_param(min_rcv_skb, int, 0444); +#endif + +u32 min_xmt_skb = 60; +module_param(min_xmt_skb, int, 0444); + +#ifdef CONFIG_INFINIBAND_VNIC_STATS +cycles_t recv_ref; +#endif /* CONFIG_INFINIBAND_VNIC_STATS */ + +BOOLEAN data_init(struct data * data, struct viport * viport, + struct data_config * config, struct ib_pd *pd, u64 guid) +{ + DATA_FUNCTION("data_init()\n"); + + data->parent = viport; + data->config = config; + data->ib_conn.viport = viport; + data->ib_conn.ib_config = &config->ib_config; + data->ib_conn.state = IB_CONN_UNINITTED; + + if ((min_xmt_skb < 60) || (min_xmt_skb > 9000)) { + DATA_ERROR("min_xmt_skb (%d) must be between 60 and 9000\n", + min_xmt_skb); + goto failure; + } + if (!vnic_ib_conn_init(&data->ib_conn, viport, pd, guid, + &config->ib_config)) { + goto failure; + } + data->mr = ib_get_dma_mr(pd, + IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE); + if (IS_ERR(data->mr)) { + DATA_ERROR("failed to register memory for data connection\n"); + goto destroy_conn; + } + + data->ib_conn.cm_id = ib_create_cm_id(viport->config->ibdev, + vnic_ib_cm_handler, + &data->ib_conn); + + if (IS_ERR(data->ib_conn.cm_id)) { + DATA_ERROR("creating data CM ID failed\n"); + return FALSE; + } + + return TRUE; + +destroy_conn: + ib_destroy_qp(data->ib_conn.qp); + ib_destroy_cq(data->ib_conn.cq); +failure: + return FALSE; +} + +static void data_post_recvs(struct data *data) +{ + unsigned long flags; + + DATA_FUNCTION("data_post_recvs()\n"); + spin_lock_irqsave(&data->recv_ios_lock, flags); + while (!list_empty(&data->recv_ios)) { + struct io *io = list_entry(data->recv_ios.next, + struct io, list_ptrs); + struct recv_io *recv_io = (struct recv_io *)io; + + list_del(&recv_io->io.list_ptrs); + spin_unlock_irqrestore(&data->recv_ios_lock, flags); + if (!vnic_ib_post_recv(&data->ib_conn, &recv_io->io)) { + viport_failure(data->parent); + return; + } + spin_lock_irqsave(&data->recv_ios_lock, flags); + } + spin_unlock_irqrestore(&data->recv_ios_lock, flags); +} + +BOOLEAN data_connect(struct data * data) +{ + struct xmit_pool *xmit_pool = &data->xmit_pool; + struct recv_pool *recv_pool = &data->recv_pool; + struct recv_io *recv_io; + struct send_io *send_io; + struct rdma_io *rdma_io; + struct rdma_dest *rdma_dest; + u8 *region_data = NULL; + int sz; + unsigned int i; + dma_addr_t region_data_dma; + dma_addr_t xmit_dma; + u8 *xmit_data; + struct viport *viport = data->parent; + + DATA_FUNCTION("data_connect()\n"); + + recv_pool->pool_sz = data->config->host_recv_pool_entries; + recv_pool->eioc_pool_sz = data->host_pool_parms.num_recv_pool_entries; + if (recv_pool->pool_sz > recv_pool->eioc_pool_sz) + recv_pool->pool_sz = + data->host_pool_parms.num_recv_pool_entries; + + xmit_pool->pool_sz = data->eioc_pool_parms.num_recv_pool_entries; + + recv_pool->buffer_sz = data->host_pool_parms.size_recv_pool_entry; + xmit_pool->buffer_sz = data->eioc_pool_parms.size_recv_pool_entry; + + xmit_pool->notify_count = 0; + xmit_pool->notify_bundle = data->config->notify_bundle; + xmit_pool->next_xmit_pool = 0; +#ifdef LIMIT_OUTSTANDING_SENDS + xmit_pool->num_xmit_bufs = xmit_pool->notify_bundle * 2; +#else /* !LIMIT_OUTSTANDING_SENDS */ + xmit_pool->num_xmit_bufs = xmit_pool->pool_sz; +#endif /* LIMIT_OUTSTANDING_SENDS */ + xmit_pool->next_xmit_buf = 0; + xmit_pool->last_comp_buf = xmit_pool->num_xmit_bufs - 1; + + recv_pool->sz_free_bundle = + data->host_pool_parms.free_recv_pool_entries_per_update; + recv_pool->num_free_bufs = 0; + recv_pool->num_posted_bufs = 0; + xmit_pool->kick_count = 0; + xmit_pool->kick_byte_count = 0; + + xmit_pool->send_kicks = + data->eioc_pool_parms.num_recv_pool_entries_before_kick + || data->eioc_pool_parms.num_recv_pool_bytes_before_kick; + xmit_pool->kick_bundle = + data->eioc_pool_parms.num_recv_pool_entries_before_kick; + xmit_pool->kick_byte_bundle = + data->eioc_pool_parms.num_recv_pool_bytes_before_kick; + recv_pool->next_full_buf = 0; + recv_pool->next_free_buf = 0; + recv_pool->kick_on_free = FALSE; + + xmit_pool->need_buffers = TRUE; + + sz = sizeof(struct rdma_dest) * recv_pool->pool_sz; + sz += sizeof(struct recv_io) * data->config->num_recvs; + sz += sizeof(struct rdma_io) * xmit_pool->num_xmit_bufs; + + xmit_pool->xmitdata_len = + BUFFER_SIZE(min_xmt_skb) * xmit_pool->num_xmit_bufs; + if ((data->local_storage = vmalloc(sz)) == NULL) { + DATA_ERROR("failed allocating %d bytes local storage\n", sz); + goto failure; + } + + memset(data->local_storage, '\0', sz); + + recv_pool->recv_bufs = (struct rdma_dest *)data->local_storage; + sz = sizeof(struct rdma_dest) * recv_pool->pool_sz; + recv_io = (struct recv_io *)(data->local_storage + sz); + sz += sizeof(struct recv_io) * data->config->num_recvs; + xmit_pool->xmit_bufs = (struct rdma_io *)(data->local_storage + sz); + sz += sizeof(struct rdma_io) * xmit_pool->num_xmit_bufs; + + if ((region_data = kzalloc(4, GFP_KERNEL)) == NULL) { + DATA_ERROR("failed to alloc memory for region data\n"); + goto failure; + } + + data->region_data = region_data; + + recv_pool->buf_pool_len = + sizeof(struct buff_pool_entry) * recv_pool->eioc_pool_sz; + if ((recv_pool->buf_pool = + kzalloc(recv_pool->buf_pool_len, GFP_KERNEL)) == NULL) { + DATA_ERROR("failed allocating %d bytes" + " for recv pool bufpool\n", + recv_pool->buf_pool_len); + goto failure; + } + + recv_pool->buf_pool_dma = + dma_map_single(viport->config->ibdev->dma_device, + recv_pool->buf_pool, recv_pool->buf_pool_len, + DMA_TO_DEVICE); + + if (dma_mapping_error(recv_pool->buf_pool_dma)) { + DATA_ERROR("xmit buf_pool dma map error\n"); + goto failure; + } + + xmit_pool->buf_pool_len = + sizeof(struct buff_pool_entry) * xmit_pool->pool_sz; + if ((xmit_pool->buf_pool = + kzalloc(xmit_pool->buf_pool_len, GFP_KERNEL)) == NULL) { + DATA_ERROR("failed allocating %d bytes" + " for xmit pool bufpool\n", + xmit_pool->buf_pool_len); + goto failure; + } + xmit_pool->buf_pool_dma = + dma_map_single(viport->config->ibdev->dma_device, + xmit_pool->buf_pool, xmit_pool->buf_pool_len, + DMA_FROM_DEVICE); + + if (dma_mapping_error(xmit_pool->buf_pool_dma)) { + DATA_ERROR("xmit buf_pool dma map error\n"); + goto failure; + } + + if ((xmit_pool->xmit_data = + kzalloc(xmit_pool->xmitdata_len, GFP_KERNEL)) == NULL) { + DATA_ERROR("failed allocating %d bytes for xmit data\n", + xmit_pool->xmitdata_len); + goto failure; + } + + xmit_pool->xmitdata_dma = + dma_map_single(viport->config->ibdev->dma_device, + xmit_pool->xmit_data, xmit_pool->xmitdata_len, + DMA_TO_DEVICE); + + if (dma_mapping_error(xmit_pool->xmitdata_dma)) { + DATA_ERROR("xmit data dma map error\n"); + goto failure; + } + + rdma_io = &data->free_bufs_io; + rdma_io->io.viport = data->parent; + rdma_io->io.routine = NULL; + + rdma_io->list[0].lkey = data->mr->lkey; + + rdma_io->io.swr.wr_id = (unsigned long)rdma_io; + rdma_io->io.swr.sg_list = rdma_io->list; + rdma_io->io.swr.num_sge = 1; + rdma_io->io.swr.opcode = IB_WR_RDMA_WRITE; + rdma_io->io.swr.send_flags = IB_SEND_SIGNALED; + rdma_io->io.type = RDMA; + + send_io = &data->kick_io; + send_io->io.viport = data->parent; + send_io->io.routine = NULL; + + region_data_dma = dma_map_single(viport->config->ibdev->dma_device, + region_data, 4, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(region_data_dma)) { + DATA_ERROR("region data dma map error\n"); + goto failure; + } + + data->regiondata_dma = region_data_dma; + + send_io->list.addr = region_data_dma; + send_io->list.length = 0; + send_io->list.lkey = data->mr->lkey; + + send_io->io.swr.wr_id = (unsigned long)send_io; + send_io->io.swr.sg_list = &send_io->list; + send_io->io.swr.num_sge = 1; + send_io->io.swr.opcode = IB_WR_SEND; + send_io->io.swr.send_flags = IB_SEND_SIGNALED; + send_io->io.type = SEND; + + INIT_LIST_HEAD(&data->recv_ios); + spin_lock_init(&data->recv_ios_lock); + spin_lock_init(&data->xmit_buf_lock); + for (i = 0; i < data->config->num_recvs; i++) { + recv_io[i].io.viport = data->parent; + recv_io[i].io.routine = data_received_kick; + recv_io[i].list.addr = region_data_dma; + recv_io[i].list.length = 4; + recv_io[i].list.lkey = data->mr->lkey; + + recv_io[i].io.rwr.wr_id = PTR64(&recv_io[i].io); + recv_io[i].io.rwr.sg_list = &recv_io[i].list; + recv_io[i].io.rwr.num_sge = 1; + + list_add(&recv_io[i].io.list_ptrs, &data->recv_ios); + } + INIT_LIST_HEAD(&recv_pool->avail_recv_bufs); + for (i = 0; i < recv_pool->pool_sz; i++) { + rdma_dest = &recv_pool->recv_bufs[i]; + list_add(&rdma_dest->list_ptrs, &recv_pool->avail_recv_bufs); + } + + xmit_dma = xmit_pool->xmitdata_dma; + xmit_data = xmit_pool->xmit_data; + + for (i = 0; i < xmit_pool->num_xmit_bufs; i++) { + rdma_io = &xmit_pool->xmit_bufs[i]; + rdma_io->index = i; + rdma_io->io.viport = data->parent; + rdma_io->io.routine = data_xmit_complete; + + rdma_io->list[0].lkey = data->mr->lkey; + rdma_io->list[1].lkey = data->mr->lkey; + rdma_io->io.swr.wr_id = PTR64(rdma_io); + rdma_io->io.swr.sg_list = rdma_io->list; + rdma_io->io.swr.num_sge = 2; + rdma_io->io.swr.opcode = IB_WR_RDMA_WRITE; + rdma_io->io.swr.send_flags = IB_SEND_SIGNALED; + rdma_io->io.type = RDMA; + + rdma_io->data = xmit_data; + rdma_io->data_dma = xmit_dma; + + xmit_data += ROUNDUPP2(min_xmt_skb, VIPORT_TRAILER_ALIGNMENT); + xmit_dma += ROUNDUPP2(min_xmt_skb, VIPORT_TRAILER_ALIGNMENT); + rdma_io->trailer = (struct viport_trailer *)xmit_data; + rdma_io->trailer_dma = xmit_dma; + xmit_data += sizeof(struct viport_trailer); + xmit_dma += sizeof(struct viport_trailer); + } + + xmit_pool->rdma_rkey = data->mr->rkey; + xmit_pool->rdma_addr = xmit_pool->buf_pool_dma; + + data_post_recvs(data); + + if (vnic_ib_cm_connect(&data->ib_conn)) + return TRUE; +failure: + if (data->local_storage) { + vfree(data->local_storage); + } + + if (region_data) + kfree(region_data); + + if (recv_pool->buf_pool) + kfree(recv_pool->buf_pool); + + if (xmit_pool->buf_pool) + kfree(xmit_pool->buf_pool); + + if (xmit_pool->xmit_data) + kfree(xmit_pool->xmit_data); + + return FALSE; +} + +static void data_add_free_buffer(struct data *data, int index, + struct rdma_dest *rdma_dest) +{ + struct recv_pool *pool = &data->recv_pool; + struct buff_pool_entry *bpe; + + DATA_FUNCTION("data_add_free_buffer()\n"); + rdma_dest->trailer->connection_hash_and_valid = 0; + dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device, + pool->buf_pool_dma, pool->buf_pool_len, + DMA_TO_DEVICE); + + bpe = &pool->buf_pool[index]; + bpe->r_key = hton32(data->mr->rkey); + + bpe->remote_addr = hton64(PTR64(virt_to_phys(rdma_dest->data))); + bpe->valid = (u32) (rdma_dest - &pool->recv_bufs[0]) + 1; + ++pool->num_free_bufs; + + dma_sync_single_for_device(data->parent->config->ibdev->dma_device, + pool->buf_pool_dma, pool->buf_pool_len, + DMA_TO_DEVICE); + return; +} + +/* NOTE: this routine is not reentrant */ +static void data_alloc_buffers(struct data *data, BOOLEAN initial_allocation) +{ + struct recv_pool *pool = &data->recv_pool; + struct rdma_dest *rdma_dest; + struct sk_buff *skb; + int index; + + DATA_FUNCTION("data_alloc_buffers()\n"); + index = + ADD(pool->next_free_buf, pool->num_free_bufs, pool->eioc_pool_sz); + DATA_INFO("next_free_buf %x\n", pool->next_free_buf); + while (!list_empty(&pool->avail_recv_bufs)) { + rdma_dest = + list_entry(pool->avail_recv_bufs.next, struct rdma_dest, + list_ptrs); + if (!rdma_dest->skb) { + if (initial_allocation) + skb = + alloc_skb(pool->buffer_sz + 2, GFP_KERNEL); + else + skb = dev_alloc_skb(pool->buffer_sz + 2); + if (skb == NULL) { + DATA_ERROR("failed to alloc skb\n"); + break; + } + skb_reserve(skb, 2); + skb_put(skb, pool->buffer_sz); + rdma_dest->skb = skb; + rdma_dest->data = skb->data; + rdma_dest->trailer = + (struct viport_trailer *)(rdma_dest->data + + pool->buffer_sz - + sizeof(struct + viport_trailer)); + } + rdma_dest->trailer->connection_hash_and_valid = 0; + + list_del_init(&rdma_dest->list_ptrs); + + data_add_free_buffer(data, index, rdma_dest); + index = NEXT(index, pool->eioc_pool_sz); + } + return; +} + +static void data_send_kick_message(struct data *data) +{ + struct xmit_pool *pool = &data->xmit_pool; + DATA_FUNCTION("data_send_kick_message()\n"); + /* stop timer for bundle_timeout */ + if (data->kick_timer_on == TRUE) { + del_timer(&data->kick_timer); + data->kick_timer_on = FALSE; + } + pool->kick_count = 0; + pool->kick_byte_count = 0; + + /* TBD: keep track of when kick is outstanding, and + * don't reuse until complete + */ + if (!vnic_ib_post_send(&data->ib_conn, &data->free_bufs_io.io)) { + DATA_ERROR("failed to post send\n"); + viport_failure(data->parent); + return; + } + return; +} + +static void data_send_free_recv_buffers(struct data *data) +{ + struct recv_pool *pool = &data->recv_pool; + struct ib_send_wr *swr = &data->free_bufs_io.io.swr; + + BOOLEAN bufs_sent = FALSE; + u64 rdma_addr; + u32 offset; + u32 sz; + unsigned int num_to_send, next_increment; + + DATA_FUNCTION("data_send_free_recv_buffers()\n"); + + DATA_INFO("num_free_bufs %x sz_free_bundle %x\n", + pool->num_free_bufs, pool->sz_free_bundle); + + for (num_to_send = pool->sz_free_bundle; + num_to_send <= pool->num_free_bufs; + num_to_send += pool->sz_free_bundle) { + /* handle multiple bundles as one when possible. */ + next_increment = num_to_send + pool->sz_free_bundle; + if ((next_increment <= pool->num_free_bufs) + && (pool->next_free_buf + next_increment <= + pool->eioc_pool_sz)) { + continue; + } + offset = pool->next_free_buf * sizeof(struct buff_pool_entry); + sz = num_to_send * sizeof(struct buff_pool_entry); + rdma_addr = pool->eioc_rdma_addr + offset; + swr->sg_list->length = sz; + swr->sg_list->addr = pool->buf_pool_dma + offset; + swr->wr.rdma.remote_addr = rdma_addr; + + if (!vnic_ib_post_send(&data->ib_conn, + &data->free_bufs_io.io)) { + DATA_ERROR("failed to post send\n"); + viport_failure(data->parent); + break; + } + INC(pool->next_free_buf, num_to_send, pool->eioc_pool_sz); + pool->num_free_bufs -= num_to_send; + pool->num_posted_bufs += num_to_send; + bufs_sent = TRUE; + } + + if (bufs_sent) { + if (pool->kick_on_free) { + data_send_kick_message(data); + } + } + if (pool->num_posted_bufs == 0) { + DATA_ERROR("%s: unable to allocate receive buffers\n", + config_viport_name(data->parent->config)); + viport_failure(data->parent); + } + return; +} + +void data_connected(struct data *data) +{ + DATA_FUNCTION("data_connected()\n"); + data->free_bufs_io.io.swr.wr.rdma.rkey = data->recv_pool.eioc_rdma_rkey; + data_alloc_buffers(data, TRUE); + data_send_free_recv_buffers(data); + data->connected = TRUE; + return; +} + +void data_disconnect(struct data *data) +{ + struct xmit_pool *xmit_pool = &data->xmit_pool; + struct recv_pool *recv_pool = &data->recv_pool; + u8 *region_data = data->region_data; + unsigned int i; + + DATA_FUNCTION("data_disconnect()\n"); + + data->connected = FALSE; + if (data->kick_timer_on) { + del_timer_sync(&data->kick_timer); + data->kick_timer_on = FALSE; + } + + for (i = 0; i < xmit_pool->num_xmit_bufs; i++) { + if (xmit_pool->xmit_bufs[i].skb) + dev_kfree_skb(xmit_pool->xmit_bufs[i].skb); + xmit_pool->xmit_bufs[i].skb = NULL; + + } + for (i = 0; i < recv_pool->pool_sz; i++) { + if (data->recv_pool.recv_bufs[i].skb) + dev_kfree_skb(recv_pool->recv_bufs[i].skb); + recv_pool->recv_bufs[i].skb = NULL; + } + vfree(data->local_storage); + if (region_data) { + dma_unmap_single(data->parent->config->ibdev->dma_device, + data->regiondata_dma, 4, DMA_BIDIRECTIONAL); + kfree(region_data); + } + + if (recv_pool->buf_pool) { + dma_unmap_single(data->parent->config->ibdev->dma_device, + recv_pool->buf_pool_dma, + recv_pool->buf_pool_len, DMA_TO_DEVICE); + kfree(recv_pool->buf_pool); + } + + if (xmit_pool->buf_pool) { + dma_unmap_single(data->parent->config->ibdev->dma_device, + xmit_pool->buf_pool_dma, + xmit_pool->buf_pool_len, DMA_FROM_DEVICE); + kfree(xmit_pool->buf_pool); + } + + if (xmit_pool->xmit_data) { + dma_unmap_single(data->parent->config->ibdev->dma_device, + xmit_pool->xmitdata_dma, + xmit_pool->xmitdata_len, DMA_TO_DEVICE); + kfree(xmit_pool->xmit_data); + } + + return; +} + +void data_cleanup(struct data *data) +{ + init_completion(&data->ib_conn.done); + if (ib_send_cm_dreq(data->ib_conn.cm_id, NULL, 0)) { + printk(KERN_DEBUG "data CM DREQ sending failed\n"); + } else + wait_for_completion(&data->ib_conn.done); + + ib_destroy_cm_id(data->ib_conn.cm_id); + + ib_destroy_qp(data->ib_conn.qp); + + ib_destroy_cq(data->ib_conn.cq); + ib_dereg_mr(data->mr); + +} + +static BOOLEAN data_alloc_xmit_buffer(struct data *data, struct sk_buff *skb, + struct buff_pool_entry **pp_bpe, + struct rdma_io **pp_rdma_io, + BOOLEAN * last) +{ + struct xmit_pool *pool = &data->xmit_pool; + unsigned long flags; + + DATA_FUNCTION("data_alloc_xmit_buffer()\n"); + + spin_lock_irqsave(&data->xmit_buf_lock, flags); + dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device, + pool->buf_pool_dma, pool->buf_pool_len, + DMA_TO_DEVICE); + + *last = FALSE; + *pp_rdma_io = &pool->xmit_bufs[pool->next_xmit_buf]; + *pp_bpe = &pool->buf_pool[pool->next_xmit_pool]; + + if ((*pp_bpe)->valid && pool->next_xmit_buf != pool->last_comp_buf) { + INC(pool->next_xmit_buf, 1, pool->num_xmit_bufs); + INC(pool->next_xmit_pool, 1, pool->pool_sz); + if (!pool->buf_pool[pool->next_xmit_pool].valid) { + DATA_INFO("just used the last EIOU receive buffer\n"); + *last = TRUE; + pool->need_buffers = TRUE; + viport_stop_xmit(data->parent); +#ifdef CONFIG_INFINIBAND_VNIC_STATS + data->statistics.kick_reqs++; +#endif /* CONFIG_INFINIBAND_VNIC_STATS */ + } else if (pool->next_xmit_buf == pool->last_comp_buf) { + DATA_INFO("just used our last xmit buffer\n"); + pool->need_buffers = TRUE; + viport_stop_xmit(data->parent); + } + (*pp_rdma_io)->skb = skb; + (*pp_bpe)->valid = 0; + spin_unlock_irqrestore(&data->xmit_buf_lock, flags); + return TRUE; + } else { +#ifdef CONFIG_INFINIBAND_VNIC_STATS + data->statistics.no_xmit_bufs++; +#endif /* CONFIG_INFINIBAND_VNIC_STATS */ + DATA_ERROR("Out of xmit buffers\n"); + viport_stop_xmit(data->parent); + dma_sync_single_for_device(data->parent->config->ibdev-> + dma_device, pool->buf_pool_dma, + pool->buf_pool_len, DMA_TO_DEVICE); + + spin_unlock_irqrestore(&data->xmit_buf_lock, flags); + return FALSE; + } +} + +static void data_rdma_packet(struct data *data, struct buff_pool_entry *bpe, + struct rdma_io *rdma_io) +{ + struct ib_send_wr *swr; + struct sk_buff *skb; + u8 *d; + dma_addr_t trailer_data_dma; + dma_addr_t skb_data_dma; + int len; + int fill_len; + struct xmit_pool *xmit_pool = &data->xmit_pool; + struct viport *viport = data->parent; + + DATA_FUNCTION("data_rdma_packet()\n"); + swr = &rdma_io->io.swr; + skb = rdma_io->skb; + len = ROUNDUPP2(rdma_io->len, VIPORT_TRAILER_ALIGNMENT); + fill_len = len - skb->len; + + dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device, + xmit_pool->xmitdata_dma, + xmit_pool->xmitdata_len, DMA_TO_DEVICE); + + d = (u8 *) rdma_io->trailer - fill_len; + trailer_data_dma = rdma_io->trailer_dma - fill_len; + memset(d, '\0', fill_len); + + swr->sg_list[0].length = skb->len; + if (skb->len <= min_xmt_skb) { + memcpy(rdma_io->data, skb->data, skb->len); + swr->sg_list[0].lkey = data->mr->lkey; + swr->sg_list[0].addr = rdma_io->data_dma; + dev_kfree_skb_any(skb); + rdma_io->skb = NULL; + } else { + swr->sg_list[0].lkey = data->mr->lkey; + + skb_data_dma = dma_map_single(viport->config->ibdev->dma_device, + skb->data, skb->len, + DMA_TO_DEVICE); + + if (dma_mapping_error(skb_data_dma)) { + DATA_ERROR("skb data dma map error\n"); + return; + } + + rdma_io->skb_data_dma = skb_data_dma; + + swr->sg_list[0].addr = skb_data_dma; + skb_orphan(skb); + } + dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device, + xmit_pool->buf_pool_dma, + xmit_pool->buf_pool_len, DMA_TO_DEVICE); + + swr->sg_list[1].addr = trailer_data_dma; + swr->sg_list[1].length = fill_len + sizeof(struct viport_trailer); + swr->sg_list[0].lkey = data->mr->lkey; + swr->wr.rdma.remote_addr = ntoh64(bpe->remote_addr); + swr->wr.rdma.remote_addr += data->xmit_pool.buffer_sz; + swr->wr.rdma.remote_addr -= (sizeof(struct viport_trailer) + len); + swr->wr.rdma.rkey = ntoh32(bpe->r_key); + + dma_sync_single_for_device(data->parent->config->ibdev->dma_device, + xmit_pool->buf_pool_dma, + xmit_pool->buf_pool_len, DMA_TO_DEVICE); + + data->xmit_pool.notify_count++; + if (data->xmit_pool.notify_count >= data->xmit_pool.notify_bundle) { + data->xmit_pool.notify_count = 0; + swr->send_flags = IB_SEND_SIGNALED; + } else { + swr->send_flags = 0; + } + dma_sync_single_for_device(data->parent->config->ibdev->dma_device, + xmit_pool->xmitdata_dma, + xmit_pool->xmitdata_len, DMA_TO_DEVICE); + if (!vnic_ib_post_send(&data->ib_conn, &rdma_io->io)) { + DATA_ERROR("failed to post send for data RDMA write\n"); + viport_failure(data->parent); + return; + } +#ifdef CONFIG_INFINIBAND_VNIC_STATS + data->statistics.xmit_num++; +#endif /* CONFIG_INFINIBAND_VNIC_STATS */ + return; +} + +static void data_kick_timeout_handler(unsigned long arg) +{ + struct data *data = (struct data *)arg; + + DATA_FUNCTION("data_kick_timeout_handler()\n"); + data->kick_timer_on = FALSE; + data_send_kick_message(data); + return; +} + +BOOLEAN data_xmit_packet(struct data *data, struct sk_buff *skb) +{ + struct xmit_pool *pool = &data->xmit_pool; + struct rdma_io *rdma_io; + struct buff_pool_entry *bpe; + struct viport_trailer *trailer; + BOOLEAN last; + unsigned int sz = skb->len; + + DATA_FUNCTION("data_xmit_packet()\n"); + if (sz > pool->buffer_sz) { + DATA_ERROR("outbound packet too large, size = %d\n", sz); + return FALSE; + } + + if (!data_alloc_xmit_buffer(data, skb, &bpe, &rdma_io, &last)) { + DATA_ERROR("error in allocating data xmit buffer\n"); + return FALSE; + } + + dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device, + pool->xmitdata_dma, pool->xmitdata_len, + DMA_TO_DEVICE); + + trailer = rdma_io->trailer; + + memset(trailer, '\0', sizeof(struct viport_trailer)); + memcpy(trailer->dest_mac_addr, skb->data, ETH_ALEN); + if (skb->sk) + trailer->connection_hash_and_valid = + 0x40 | ((get_sksport(skb->sk) + get_skdport(skb->sk)) & + 0x3f); + trailer->connection_hash_and_valid |= hton8(CHV_VALID); + if ((sz > 16) && (*(u16 *) (skb->data + 12) == hton16(0x8100))) { + trailer->vlan = *(u16 *) (skb->data + 14); + memmove(skb->data + 4, skb->data, 12); + skb_pull(skb, 4); + trailer->pkt_flags |= PF_VLAN_INSERT; + } + if (last) + trailer->pkt_flags |= PF_KICK; + if (sz < 60) { + /* EIOU requires all packets to be + * of ethernet minimum packet size. + */ + trailer->data_length = hton16(60); + rdma_io->len = 60; + } else { + trailer->data_length = hton16(sz); + rdma_io->len = sz; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + trailer->tx_chksum_flags = TX_CHKSUM_FLAGS_CHECKSUM_V4 + | TX_CHKSUM_FLAGS_IP_CHECKSUM + | TX_CHKSUM_FLAGS_TCP_CHECKSUM + | TX_CHKSUM_FLAGS_UDP_CHECKSUM; + } + + dma_sync_single_for_device(data->parent->config->ibdev->dma_device, + pool->xmitdata_dma, pool->xmitdata_len, + DMA_TO_DEVICE); + + data_rdma_packet(data, bpe, rdma_io); + + if (pool->send_kicks) { + /* EIOC needs kicks to inform it of sent packets */ + pool->kick_count++; + pool->kick_byte_count += sz; + if ((pool->kick_count >= pool->kick_bundle) + || (pool->kick_byte_count >= pool->kick_byte_bundle)) { + data_send_kick_message(data); + } else if (pool->kick_count == 1) { + init_timer(&data->kick_timer); + /* timeout_before_kick is in u_sec */ + data->kick_timer.expires = + (data->eioc_pool_parms.timeout_before_kick * HZ / + 1000000) + jiffies; + data->kick_timer.data = (unsigned long)data; + data->kick_timer.function = data_kick_timeout_handler; + add_timer(&data->kick_timer); + data->kick_timer_on = TRUE; + } + } + return TRUE; +} + +static void data_check_xmit_buffers(struct data *data) +{ + struct xmit_pool *pool = &data->xmit_pool; + unsigned long flags; + + DATA_FUNCTION("data_check_xmit_buffers()\n"); + spin_lock_irqsave(&data->xmit_buf_lock, flags); + dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device, + pool->buf_pool_dma, pool->buf_pool_len, + DMA_TO_DEVICE); + + if (data->xmit_pool.need_buffers + && pool->buf_pool[pool->next_xmit_pool].valid + && pool->next_xmit_buf != pool->last_comp_buf) { + data->xmit_pool.need_buffers = FALSE; + viport_restart_xmit(data->parent); + DATA_INFO("there are free xmit buffers\n"); + } + dma_sync_single_for_device(data->parent->config->ibdev->dma_device, + pool->buf_pool_dma, pool->buf_pool_len, + DMA_TO_DEVICE); + + spin_unlock_irqrestore(&data->xmit_buf_lock, flags); + return; +} + +static struct sk_buff *data_recv_to_skbuff(struct data *data, + struct rdma_dest *rdma_dest) +{ + struct viport_trailer *trailer; + struct sk_buff *skb; + int start; + unsigned int len; + u8 rx_chksum_flags; + + DATA_FUNCTION("data_recv_to_skbuff()\n"); + trailer = rdma_dest->trailer; + start = data_offset(data, trailer); + len = data_len(data, trailer); +#ifdef INBOUND_COPY + if (len <= min_rcv_skb) { + /* leave room for VLAN header */ + skb = dev_alloc_skb(len + 6); + if (!skb) + goto no_copy; + skb_reserve(skb, 6); + memcpy(skb->data, rdma_dest->data + start, len); + skb_put(skb, len); + } else +#endif + { +no_copy: + skb = rdma_dest->skb; + rdma_dest->skb = NULL; + rdma_dest->trailer = NULL; + rdma_dest->data = NULL; + skb_pull(skb, start); + skb_trim(skb, len); + } + + rx_chksum_flags = trailer->rx_chksum_flags; + DATA_INFO + ("rx_chksum_flags = %d, LOOP = %c, IP = %c, TCP = %c, UDP = %c\n", + rx_chksum_flags, + (rx_chksum_flags & RX_CHKSUM_FLAGS_LOOPBACK) ? 'Y' : 'N', + (rx_chksum_flags & RX_CHKSUM_FLAGS_IP_CHECKSUM_SUCCEEDED) ? 'Y' + : (rx_chksum_flags & RX_CHKSUM_FLAGS_IP_CHECKSUM_FAILED) ? 'N' : + '-', + (rx_chksum_flags & RX_CHKSUM_FLAGS_TCP_CHECKSUM_SUCCEEDED) ? 'Y' + : (rx_chksum_flags & RX_CHKSUM_FLAGS_TCP_CHECKSUM_FAILED) ? 'N' : + '-', + (rx_chksum_flags & RX_CHKSUM_FLAGS_UDP_CHECKSUM_SUCCEEDED) ? 'Y' + : (rx_chksum_flags & RX_CHKSUM_FLAGS_UDP_CHECKSUM_FAILED) ? 'N' : + '-'); + + if ((rx_chksum_flags & RX_CHKSUM_FLAGS_LOOPBACK) + || ((rx_chksum_flags & RX_CHKSUM_FLAGS_IP_CHECKSUM_SUCCEEDED) + && ((rx_chksum_flags & RX_CHKSUM_FLAGS_TCP_CHECKSUM_SUCCEEDED) + || (rx_chksum_flags & + RX_CHKSUM_FLAGS_UDP_CHECKSUM_SUCCEEDED)))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + if (trailer->pkt_flags & PF_VLAN_INSERT) { + u8 *rv; + + rv = skb_push(skb, 4); + memmove(rv, rv + 4, 12); + *(u16 *) (rv + 12) = hton16(0x8100); + if (trailer->pkt_flags & PF_PVID_OVERRIDDEN) { + *(u16 *) (rv + 14) = trailer->vlan & hton16(0xF000); + } else { + *(u16 *) (rv + 14) = trailer->vlan; + } + } + + return skb; +} + +static BOOLEAN data_incoming_recv(struct data *data) +{ + struct recv_pool *pool = &data->recv_pool; + struct rdma_dest *rdma_dest; + struct viport_trailer *trailer; + struct buff_pool_entry *bpe; + struct sk_buff *skb; + + DATA_FUNCTION("data_incoming_recv()\n"); + if (pool->next_full_buf == pool->next_free_buf) + return FALSE; + bpe = &pool->buf_pool[pool->next_full_buf]; + rdma_dest = &pool->recv_bufs[bpe->valid - 1]; + trailer = rdma_dest->trailer; + if ((trailer != NULL) + && (trailer->connection_hash_and_valid & CHV_VALID)) { + /* received a packet */ + if (trailer->pkt_flags & PF_KICK) { + pool->kick_on_free = TRUE; + } + if ((skb = data_recv_to_skbuff(data, rdma_dest)) != NULL) { + viport_recv_packet(data->parent, skb); + list_add(&rdma_dest->list_ptrs, &pool->avail_recv_bufs); + } + dma_sync_single_for_cpu(data->parent->config->ibdev->dma_device, + pool->buf_pool_dma, pool->buf_pool_len, + DMA_TO_DEVICE); + + bpe->valid = 0; + dma_sync_single_for_device(data->parent->config->ibdev-> + dma_device, pool->buf_pool_dma, + pool->buf_pool_len, DMA_TO_DEVICE); + + INC(pool->next_full_buf, 1, pool->eioc_pool_sz); + pool->num_posted_bufs--; + +#ifdef CONFIG_INFINIBAND_VNIC_STATS + data->statistics.recv_num++; +#endif /* CONFIG_INFINIBAND_VNIC_STATS */ + return TRUE; + } else { + return FALSE; + } +} + +static void data_received_kick(struct io *io) +{ + struct data *data = &io->viport->data; + unsigned long flags; + + DATA_FUNCTION("data_received_kick()\n"); +#ifdef CONFIG_INFINIBAND_VNIC_STATS + recv_ref = get_cycles(); +#endif /* CONFIG_INFINIBAND_VNIC_STATS */ + + spin_lock_irqsave(&data->recv_ios_lock, flags); + list_add(&io->list_ptrs, &data->recv_ios); + spin_unlock_irqrestore(&data->recv_ios_lock, flags); + data_post_recvs(data); + +#ifdef CONFIG_INFINIBAND_VNIC_STATS + data->statistics.kick_recvs++; +#endif /* CONFIG_INFINIBAND_VNIC_STATS */ + + data_check_xmit_buffers(data); + + while (data_incoming_recv(data)) ; + if (data->connected == TRUE) { + data_alloc_buffers(data, FALSE); + data_send_free_recv_buffers(data); + } + return; +} + +static void data_xmit_complete(struct io *io) +{ + struct rdma_io *rdma_io = (struct rdma_io *)io; + struct data *data = &io->viport->data; + struct xmit_pool *pool = &data->xmit_pool; + struct sk_buff *skb; + + DATA_FUNCTION("data_xmit_complete()\n"); + + if (rdma_io->skb) { + dma_unmap_single(data->parent->config->ibdev->dma_device, + rdma_io->skb_data_dma, rdma_io->skb->len, + DMA_TO_DEVICE); + } + + while (pool->last_comp_buf != rdma_io->index) { + INC(pool->last_comp_buf, 1, pool->num_xmit_bufs); + skb = pool->xmit_bufs[pool->last_comp_buf].skb; + if (skb != NULL) { + dev_kfree_skb_any(skb); + } + pool->xmit_bufs[pool->last_comp_buf].skb = NULL; + } +#ifdef LIMIT_OUTSTANDING_SENDS + data_check_xmit_buffers(data); +#endif /* LIMIT_OUTSTANDING_SENDS */ + + return; +} diff --git a/drivers/infiniband/ulp/vnic/vnic_data.h b/drivers/infiniband/ulp/vnic/vnic_data.h new file mode 100644 index 0000000..0588b09 --- /dev/null +++ b/drivers/infiniband/ulp/vnic/vnic_data.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2006 SilverStorm Technologies Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VNIC_DATA_H_INCLUDED +#define VNIC_DATA_H_INCLUDED + +#ifdef CONFIG_INFINIBAND_VNIC_STATS +#include <linux/timex.h> +#endif /* CONFIG_INFINIBAND_VNIC_STATS */ + +#include "vnic_ib.h" +#include "vnic_control_pkt.h" +#include "vnic_trailer.h" + +struct rdma_dest { + struct list_head list_ptrs; + struct sk_buff *skb; + u8 *data; + struct viport_trailer *trailer; +}; + +struct buff_pool_entry { + u64 remote_addr; + u32 r_key; + u32 valid; +}; + +struct recv_pool { + u32 buffer_sz; + u32 pool_sz; + u32 eioc_pool_sz; + uint32_t eioc_rdma_rkey; + u64 eioc_rdma_addr; + u32 next_full_buf; + u32 next_free_buf; + u32 num_free_bufs; + u32 num_posted_bufs; + u32 sz_free_bundle; + BOOLEAN kick_on_free; + struct buff_pool_entry *buf_pool; + dma_addr_t buf_pool_dma; + int buf_pool_len; + struct rdma_dest *recv_bufs; + struct list_head avail_recv_bufs; +}; + +struct xmit_pool { + u32 buffer_sz; + u32 pool_sz; + u32 notify_count; + u32 notify_bundle; + u32 next_xmit_buf; + u32 last_comp_buf; + u32 num_xmit_bufs; + u32 next_xmit_pool; + u32 kick_count; + u32 kick_byte_count; + u32 kick_bundle; + u32 kick_byte_bundle; + BOOLEAN need_buffers; + BOOLEAN send_kicks; + uint32_t rdma_rkey; + u64 rdma_addr; + struct buff_pool_entry *buf_pool; + dma_addr_t buf_pool_dma; + int buf_pool_len; + struct rdma_io *xmit_bufs; + u8 *xmit_data; + dma_addr_t xmitdata_dma; + int xmitdata_len; +}; + +struct data { + struct viport *parent; + struct data_config *config; + struct ib_mr *mr; + struct vnic_ib_conn ib_conn; + u8 *local_storage; + struct vnic_recv_pool_config host_pool_parms; + struct vnic_recv_pool_config eioc_pool_parms; + struct recv_pool recv_pool; + struct xmit_pool xmit_pool; + u8 *region_data; + dma_addr_t regiondata_dma; + struct rdma_io free_bufs_io; + struct send_io kick_io; + struct list_head recv_ios; + spinlock_t recv_ios_lock; + spinlock_t xmit_buf_lock; + BOOLEAN kick_timer_on; + BOOLEAN connected; + struct timer_list kick_timer; + struct completion done; +#ifdef CONFIG_INFINIBAND_VNIC_STATS + struct { + u32 xmit_num; + u32 recv_num; + u32 free_buf_sends; + u32 free_buf_num; + u32 free_buf_min; + u32 kick_recvs; + u32 kick_reqs; + u32 no_xmit_bufs; + cycles_t no_xmit_buf_time; + } statistics; +#endif /* CONFIG_INFINIBAND_VNIC_STATS */ +}; + +BOOLEAN data_init(struct data *data, struct viport *viport, + struct data_config *config, struct ib_pd *pd, u64 guid); + +BOOLEAN data_connect(struct data *data); +void data_connected(struct data *data); +void data_disconnect(struct data *data); + +BOOLEAN data_xmit_packet(struct data *data, struct sk_buff *skb); + +void data_cleanup(struct data *data); + +#define data_is_connected(data) (ib_conn_connected(&((data)->ib_conn))) +#define data_path_id(data) (data)->config->path_id +#define data_eioc_pool(data) &(data)->eioc_pool_parms +#define data_host_pool(data) &(data)->host_pool_parms +#define data_eioc_pool_min(data) &(data)->config->eioc_min +#define data_host_pool_min(data) &(data)->config->host_min +#define data_eioc_pool_max(data) &(data)->config->eioc_max +#define data_host_pool_max(data) &(data)->config->host_max +#define data_local_pool_addr(data) (data)->xmit_pool.rdma_addr +#define data_local_pool_rkey(data) (data)->xmit_pool.rdma_rkey +#define data_remote_pool_addr(data) &(data)->recv_pool.eioc_rdma_addr +#define data_remote_pool_rkey(data) &(data)->recv_pool.eioc_rdma_rkey + +#define data_max_mtu(data) \ + MAX_PAYLOAD(min((data)->recv_pool.buffer_sz, \ + (data)->xmit_pool.buffer_sz)) - ETH_VLAN_HLEN + +#define data_len(data, trailer) ntoh16(trailer->data_length) +#define data_offset(data, trailer) \ + data->recv_pool.buffer_sz - sizeof(struct viport_trailer) \ + - ROUNDUPP2(data_len(data, trailer), VIPORT_TRAILER_ALIGNMENT) \ + + ntoh8(trailer->data_alignment_offset) + +/* the following macros manipulate ring buffer indexes. + * the ring buffer size must be a power of 2. + */ +#define ADD(index, increment, size) (((index) + (increment))&((size) - 1)) +#define NEXT(index, size) ADD(index, 1, size) +#define INC(index, increment, size) (index) = ADD(index, increment, size) + +#endif /* VNIC_DATA_H_INCLUDED */ diff --git a/drivers/infiniband/ulp/vnic/vnic_trailer.h b/drivers/infiniband/ulp/vnic/vnic_trailer.h new file mode 100644 index 0000000..d6bd6c7 --- /dev/null +++ b/drivers/infiniband/ulp/vnic/vnic_trailer.h @@ -0,0 +1,63 @@ +#ifndef VNIC_TRAILER_H_INCLUDED +#define VNIC_TRAILER_H_INCLUDED + +/* pkt_flags values */ +#define PF_CHASH_VALID 0x01 +#define PF_IPSEC_VALID 0x02 +#define PF_TCP_SEGMENT 0x04 +#define PF_KICK 0x08 +#define PF_VLAN_INSERT 0x10 +#define PF_PVID_OVERRIDDEN 0x20 +#define PF_FCS_INCLUDED 0x40 +#define PF_FORCE_ROUTE 0x80 + +/* tx_chksum_flags values */ +#define TX_CHKSUM_FLAGS_CHECKSUM_V4 0x01 +#define TX_CHKSUM_FLAGS_CHECKSUM_V6 0x02 +#define TX_CHKSUM_FLAGS_TCP_CHECKSUM 0x04 +#define TX_CHKSUM_FLAGS_UDP_CHECKSUM 0x08 +#define TX_CHKSUM_FLAGS_IP_CHECKSUM 0x10 + +/* rx_chksum_flags values */ +#define RX_CHKSUM_FLAGS_TCP_CHECKSUM_FAILED 0x01 +#define RX_CHKSUM_FLAGS_UDP_CHECKSUM_FAILED 0x02 +#define RX_CHKSUM_FLAGS_IP_CHECKSUM_FAILED 0x04 +#define RX_CHKSUM_FLAGS_TCP_CHECKSUM_SUCCEEDED 0x08 +#define RX_CHKSUM_FLAGS_UDP_CHECKSUM_SUCCEEDED 0x10 +#define RX_CHKSUM_FLAGS_IP_CHECKSUM_SUCCEEDED 0x20 +#define RX_CHKSUM_FLAGS_LOOPBACK 0x40 +#define RX_CHKSUM_FLAGS_RESERVED 0x80 + +/* connection_hash_and_valid values */ +#define CHV_VALID 0x80 +#define CHV_HASH_MASH 0x7f + +struct viport_trailer { + s8 data_alignment_offset; + u8 rndis_header_length; /* reserved for use by edp */ + u16 data_length; + u8 pkt_flags; + u8 tx_chksum_flags; + u8 rx_chksum_flags; + u8 ip_sec_flags; + u32 tcp_seq_no; + u32 ip_sec_offload_handle; + u32 ip_sec_next_offload_handle; + u8 dest_mac_addr[6]; + u16 vlan; + u16 time_stamp; + u8 origin; + u8 connection_hash_and_valid; +}; + +#define VIPORT_TRAILER_ALIGNMENT 32 + +#define BUFFER_SIZE(len) \ + (sizeof(struct viport_trailer) + ROUNDUPP2((len), \ + VIPORT_TRAILER_ALIGNMENT)) + +#define MAX_PAYLOAD(len) \ + ROUNDDOWNP2((len) - sizeof(struct viport_trailer), \ + VIPORT_TRAILER_ALIGNMENT) + +#endif /* VNIC_TRAILER_H_INCLUDED */ _______________________________________________ openib-general mailing list openib-general@openib.org http://openib.org/mailman/listinfo/openib-general To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general