From: Igor Yarovinsky <[email protected]> mlx4: Add Raw Ethertype QP support.
This implementation supports one Raw Ethertype QP per port. Signed-off-by: Igor Yarovinsky <[email protected]> Signed-off-by: Jack Morgenstein <[email protected]> --- This is a repost of http://lists.openfabrics.org/pipermail/general/2008-August/053643.html Raw Ethertype is implemented similarly to MADs. When posting sends, the LRH and RWH headers are added as a single 16-byte inline segment. Index: infiniband/drivers/infiniband/hw/mlx4/qp.c =================================================================== --- infiniband.orig/drivers/infiniband/hw/mlx4/qp.c 2008-08-12 16:28:56.000000000 +0300 +++ infiniband/drivers/infiniband/hw/mlx4/qp.c 2008-08-12 16:30:22.000000000 +0300 @@ -54,7 +54,8 @@ enum { /* * Largest possible UD header: send with GRH and immediate data. */ - MLX4_IB_UD_HEADER_SIZE = 72 + MLX4_IB_UD_HEADER_SIZE = 72, + MLX4_IB_MAX_RAW_ETY_HDR_SIZE = 12 }; struct mlx4_ib_sqp { @@ -280,6 +281,12 @@ static int send_wqe_overhead(enum ib_qp_ ALIGN(4 + sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)); + case IB_QPT_RAW_ETY: + return sizeof(struct mlx4_wqe_ctrl_seg) + + ALIGN(MLX4_IB_MAX_RAW_ETY_HDR_SIZE + + sizeof(struct mlx4_wqe_inline_seg), + sizeof(struct mlx4_wqe_data_seg)); + default: return sizeof (struct mlx4_wqe_ctrl_seg); } @@ -335,6 +342,10 @@ static int set_kernel_sq_size(struct mlx cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) return -EINVAL; + if (type == IB_QPT_RAW_ETY && + cap->max_send_sge + 1 > dev->dev->caps.max_sq_sg) + return -EINVAL; + s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + send_wqe_overhead(type, qp->flags); @@ -375,7 +386,7 @@ static int set_kernel_sq_size(struct mlx */ if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && qp->sq_signal_bits && BITS_PER_LONG == 64 && - type != IB_QPT_SMI && type != IB_QPT_GSI) + type != IB_QPT_SMI && type != IB_QPT_GSI && type != IB_QPT_RAW_ETY) qp->sq.wqe_shift = ilog2(64); else qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); @@ -711,6 +722,9 @@ struct ib_qp *mlx4_ib_create_qp(struct i break; } + case IB_QPT_RAW_ETY: + if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_RAW_ETY)) + return ERR_PTR(-ENOSYS); case IB_QPT_SMI: case IB_QPT_GSI: { @@ -726,7 +740,8 @@ struct ib_qp *mlx4_ib_create_qp(struct i err = create_qp_common(dev, pd, init_attr, udata, dev->dev->caps.sqp_start + - (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) + + (init_attr->qp_type == IB_QPT_RAW_ETY ? 4 : + (init_attr->qp_type == IB_QPT_SMI ? 0 : 2)) + init_attr->port_num - 1, qp); if (err) { @@ -740,7 +755,6 @@ struct ib_qp *mlx4_ib_create_qp(struct i break; } default: - /* Don't support raw QPs */ return ERR_PTR(-EINVAL); } @@ -771,6 +785,7 @@ static int to_mlx4_st(enum ib_qp_type ty case IB_QPT_RC: return MLX4_QP_ST_RC; case IB_QPT_UC: return MLX4_QP_ST_UC; case IB_QPT_UD: return MLX4_QP_ST_UD; + case IB_QPT_RAW_ETY: case IB_QPT_SMI: case IB_QPT_GSI: return MLX4_QP_ST_MLX; default: return -1; @@ -895,7 +910,8 @@ static int __mlx4_ib_modify_qp(struct ib } } - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) + if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || + ibqp->qp_type == IB_QPT_RAW_ETY) context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; else if (ibqp->qp_type == IB_QPT_UD) { if (qp->flags & MLX4_IB_QP_LSO) @@ -1044,7 +1060,7 @@ static int __mlx4_ib_modify_qp(struct ib if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR && (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || - ibqp->qp_type == IB_QPT_UD)) { + ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_RAW_ETY)) { context->pri_path.sched_queue = (qp->port - 1) << 6; if (is_qp0(dev, qp)) context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; @@ -1186,6 +1202,49 @@ out: return err; } +static int build_raw_ety_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, + void *wqe, unsigned *mlx_seg_len) +{ + int payload = 0; + int header_size, packet_length; + struct mlx4_wqe_mlx_seg *mlx = wqe; + struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; + u32 *lrh = wqe + sizeof *mlx + sizeof *inl; + int i; + + /* Only IB_WR_SEND is supported */ + if (wr->opcode != IB_WR_SEND) + return -EINVAL; + + for (i = 0; i < wr->num_sge; ++i) + payload += wr->sg_list[i].length; + + header_size = IB_LRH_BYTES + 4; /* LRH + RAW_HEADER (32 bits) */ + + /* headers + payload and round up */ + packet_length = (header_size + payload + 3) / 4; + + mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); + + mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_ICRC | + (wr->wr.raw_ety.lrh->service_level << 8)); + + mlx->rlid = wr->wr.raw_ety.lrh->destination_lid; + + wr->wr.raw_ety.lrh->packet_length = cpu_to_be16(packet_length); + + ib_lrh_header_pack(wr->wr.raw_ety.lrh, lrh); + lrh += IB_LRH_BYTES / 4; /* LRH size is a dword multiple */ + *lrh = cpu_to_be32(wr->wr.raw_ety.eth_type); + + inl->byte_count = cpu_to_be32(1 << 31 | header_size); + + *mlx_seg_len = + ALIGN(sizeof(struct mlx4_wqe_inline_seg) + header_size, 16); + + return 0; +} + static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { @@ -1601,6 +1660,17 @@ int mlx4_ib_post_send(struct ib_qp *ibqp size += seglen / 16; break; + case IB_QPT_RAW_ETY: + err = build_raw_ety_header(to_msqp(qp), wr, ctrl, + &seglen); + if (unlikely(err)) { + *bad_wr = wr; + goto out; + } + wqe += seglen; + size += seglen / 16; + break; + default: break; } Index: infiniband/drivers/net/mlx4/qp.c =================================================================== --- infiniband.orig/drivers/net/mlx4/qp.c 2008-08-12 16:28:56.000000000 +0300 +++ infiniband/drivers/net/mlx4/qp.c 2008-08-12 16:30:22.000000000 +0300 @@ -247,8 +247,9 @@ EXPORT_SYMBOL_GPL(mlx4_qp_free); static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) { - return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, - MLX4_CMD_TIME_CLASS_B); + return mlx4_cmd(dev, 0, base_qpn, + (dev->caps.flags & MLX4_DEV_CAP_FLAG_RAW_ETY) ? 4 : 0, + MLX4_CMD_CONF_SPECIAL_QP, MLX4_CMD_TIME_CLASS_B); } int mlx4_init_qp_table(struct mlx4_dev *dev) Index: infiniband/include/linux/mlx4/device.h =================================================================== --- infiniband.orig/include/linux/mlx4/device.h 2008-08-12 16:28:56.000000000 +0300 +++ infiniband/include/linux/mlx4/device.h 2008-08-12 16:30:22.000000000 +0300 @@ -60,6 +60,7 @@ enum { MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, + MLX4_DEV_CAP_FLAG_RAW_ETY = 1 << 13, MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, MLX4_DEV_CAP_FLAG_APM = 1 << 17, MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, Index: infiniband/include/linux/mlx4/qp.h =================================================================== --- infiniband.orig/include/linux/mlx4/qp.h 2008-08-12 16:28:56.000000000 +0300 +++ infiniband/include/linux/mlx4/qp.h 2008-08-12 16:30:22.000000000 +0300 @@ -191,7 +191,8 @@ struct mlx4_wqe_ctrl_seg { enum { MLX4_WQE_MLX_VL15 = 1 << 17, - MLX4_WQE_MLX_SLR = 1 << 16 + MLX4_WQE_MLX_SLR = 1 << 16, + MLX4_WQE_MLX_ICRC = 1 << 4 }; struct mlx4_wqe_mlx_seg { Index: infiniband/drivers/infiniband/hw/mlx4/main.c =================================================================== --- infiniband.orig/drivers/infiniband/hw/mlx4/main.c 2008-08-12 16:28:56.000000000 +0300 +++ infiniband/drivers/infiniband/hw/mlx4/main.c 2008-08-12 16:30:22.000000000 +0300 @@ -111,6 +111,8 @@ static int mlx4_ib_query_device(struct i (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_RAW_ETY) + props->max_raw_ethy_qp = dev->ib_dev.phys_port_cnt; props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; _______________________________________________ general mailing list [email protected] http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general
