Re: [PATCH 1/4] iw_cxgb4: Detect Ing. Padding Boundary at run-time

2014-07-13 Thread Yann Droneaud
Hi,

Le vendredi 11 juillet 2014 à 20:44 +0530, Hariprasad Shenai a écrit :
 Updates iw_cxgb4 to determine the Ingress Padding Boundary from
 cxgb4_lld_info, and take subsequent actions.
 
 Signed-off-by: Steve Wise sw...@opengridcomputing.com
 Signed-off-by: Hariprasad Shenai haripra...@chelsio.com
 ---
  drivers/infiniband/hw/cxgb4/cq.c|4 ++--
  drivers/infiniband/hw/cxgb4/device.c|   21 +
  drivers/infiniband/hw/cxgb4/iw_cxgb4.h  |   12 
  drivers/infiniband/hw/cxgb4/provider.c  |4 ++--
  drivers/infiniband/hw/cxgb4/qp.c|   10 ++
  drivers/infiniband/hw/cxgb4/t4.h|8 
  drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |2 ++
  drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h  |2 ++
  8 files changed, 47 insertions(+), 16 deletions(-)
 

[...]

 diff --git a/drivers/infiniband/hw/cxgb4/device.c 
 b/drivers/infiniband/hw/cxgb4/device.c
 index dd93aad..95e6c6c 100644
 --- a/drivers/infiniband/hw/cxgb4/device.c
 +++ b/drivers/infiniband/hw/cxgb4/device.c
 @@ -768,6 +768,27 @@ static struct c4iw_dev *c4iw_alloc(const struct 
 cxgb4_lld_info *infop)
   }
   devp-rdev.lldi = *infop;
  
 + /* init various hw-queue params based on lld info */
 + pr_info(%s: ing. padding boundary is %d, egrsstatuspagesize = %d\n,
 + __func__, devp-rdev.lldi.sge_ingpadboundary,
 + devp-rdev.lldi.sge_egrstatuspagesize);

Is this really needed ?

Use dev_info() or perhaps dev_dbg() instead of pr_info().

Regards.

-- 
Yann Droneaud
OPTEYA


--
To unsubscribe from this list: send the line unsubscribe linux-rdma in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/4] iw_cxgb4: Detect Ing. Padding Boundary at run-time

2014-07-11 Thread Hariprasad Shenai
Updates iw_cxgb4 to determine the Ingress Padding Boundary from
cxgb4_lld_info, and take subsequent actions.

Signed-off-by: Steve Wise sw...@opengridcomputing.com
Signed-off-by: Hariprasad Shenai haripra...@chelsio.com
---
 drivers/infiniband/hw/cxgb4/cq.c|4 ++--
 drivers/infiniband/hw/cxgb4/device.c|   21 +
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h  |   12 
 drivers/infiniband/hw/cxgb4/provider.c  |4 ++--
 drivers/infiniband/hw/cxgb4/qp.c|   10 ++
 drivers/infiniband/hw/cxgb4/t4.h|8 
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |2 ++
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h  |2 ++
 8 files changed, 47 insertions(+), 16 deletions(-)

diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c04292c..f04a838 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -895,7 +895,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int 
entries,
/*
 * Make actual HW queue 2x to avoid cdix_inc overflows.
 */
-   hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
+   hwentries = min(entries * 2, rhp-rdev.hw_queue.t4_max_iq_size);
 
/*
 * Make HW queue at least 64 entries so GTS updates aren't too
@@ -912,7 +912,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int 
entries,
if (ucontext) {
memsize = roundup(memsize, PAGE_SIZE);
hwentries = memsize / sizeof *chp-cq.queue;
-   while (hwentries  T4_MAX_IQ_SIZE) {
+   while (hwentries  rhp-rdev.hw_queue.t4_max_iq_size) {
memsize -= PAGE_SIZE;
hwentries = memsize / sizeof *chp-cq.queue;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c 
b/drivers/infiniband/hw/cxgb4/device.c
index dd93aad..95e6c6c 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -768,6 +768,27 @@ static struct c4iw_dev *c4iw_alloc(const struct 
cxgb4_lld_info *infop)
}
devp-rdev.lldi = *infop;
 
+   /* init various hw-queue params based on lld info */
+   pr_info(%s: ing. padding boundary is %d, egrsstatuspagesize = %d\n,
+   __func__, devp-rdev.lldi.sge_ingpadboundary,
+   devp-rdev.lldi.sge_egrstatuspagesize);
+
+   devp-rdev.hw_queue.t4_eq_status_entries =
+   devp-rdev.lldi.sge_ingpadboundary  64 ? 2 : 1;
+   devp-rdev.hw_queue.t4_max_eq_size =
+   65520 - devp-rdev.hw_queue.t4_eq_status_entries;
+   devp-rdev.hw_queue.t4_max_iq_size = 65520 - 1;
+   devp-rdev.hw_queue.t4_max_rq_size =
+   8192 - devp-rdev.hw_queue.t4_eq_status_entries;
+   devp-rdev.hw_queue.t4_max_sq_size =
+   devp-rdev.hw_queue.t4_max_eq_size - 1;
+   devp-rdev.hw_queue.t4_max_qp_depth =
+   devp-rdev.hw_queue.t4_max_rq_size - 1;
+   devp-rdev.hw_queue.t4_max_cq_depth =
+   devp-rdev.hw_queue.t4_max_iq_size - 1;
+   devp-rdev.hw_queue.t4_stat_len =
+   devp-rdev.lldi.sge_egrstatuspagesize;
+
/*
 * For T5 devices, we map all of BAR2 with WC.
 * For T4 devices with onchip qp mem, we map only that part
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h 
b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 125bc5d..9b9754c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -139,6 +139,17 @@ struct c4iw_stats {
u64  pas_ofld_conn_fails;
 };
 
+struct c4iw_hw_queue {
+   int t4_eq_status_entries;
+   int t4_max_eq_size;
+   int t4_max_iq_size;
+   int t4_max_rq_size;
+   int t4_max_sq_size;
+   int t4_max_qp_depth;
+   int t4_max_cq_depth;
+   int t4_stat_len;
+};
+
 struct c4iw_rdev {
struct c4iw_resource resource;
unsigned long qpshift;
@@ -156,6 +167,7 @@ struct c4iw_rdev {
unsigned long oc_mw_pa;
void __iomem *oc_mw_kva;
struct c4iw_stats stats;
+   struct c4iw_hw_queue hw_queue;
struct t4_dev_status_page *status_page;
 };
 
diff --git a/drivers/infiniband/hw/cxgb4/provider.c 
b/drivers/infiniband/hw/cxgb4/provider.c
index b1d3053..1d41b92 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -319,13 +319,13 @@ static int c4iw_query_device(struct ib_device *ibdev,
props-vendor_part_id = (u32)dev-rdev.lldi.pdev-device;
props-max_mr_size = T4_MAX_MR_SIZE;
props-max_qp = T4_MAX_NUM_QP;
-   props-max_qp_wr = T4_MAX_QP_DEPTH;
+   props-max_qp_wr = dev-rdev.hw_queue.t4_max_qp_depth;
props-max_sge = T4_MAX_RECV_SGE;
props-max_sge_rd = 1;
props-max_qp_rd_atom = c4iw_max_read_depth;
props-max_qp_init_rd_atom = c4iw_max_read_depth;
props-max_cq = T4_MAX_NUM_CQ;
-