[PATCHv2 net-next 0/4] Misc. fixes for iw_cxgb4

2014-07-14 Thread Hariprasad Shenai
This patch series adds support to determine ingress padding boundary at runtime.
Advertise a larger max read queue depth for qps, and gather the resource limits
from fw and use them to avoid exhausting all the resources and display TPTE on
errors and add support for work request logging feature.

The patches series is created against 'net-next' tree.
And includes patches on cxgb4 and iw_cxgb4 driver.

Since this patch-series contains changes which are dependent on commit id
fc5ab02 (cxgb4: Replaced the backdoor mechanism to access the HW memory with
PCIe Window method) we would like to request this patch series to get merged
via David Miller's 'net-next' tree.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments. 

V2:
 Optimized alloc_ird function, and several other changes related to debug prints
 based on review comments given by Yann Droneaud.

Hariprasad Shenai (4):
  iw_cxgb4: Detect Ing. Padding Boundary at run-time
  cxgb4/iw_cxgb4: use firmware ord/ird resource limits
  cxgb4/iw_cxgb4: display TPTE on errors
  cxgb4/iw_cxgb4: work request logging feature

 drivers/infiniband/hw/cxgb4/cm.c|   80 +++---
 drivers/infiniband/hw/cxgb4/cq.c|8 +-
 drivers/infiniband/hw/cxgb4/device.c|  188 ++-
 drivers/infiniband/hw/cxgb4/ev.c|   55 ++-
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h  |   38 +-
 drivers/infiniband/hw/cxgb4/provider.c  |   10 +-
 drivers/infiniband/hw/cxgb4/qp.c|   76 --
 drivers/infiniband/hw/cxgb4/t4.h|   16 +--
 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h  |3 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |  100 
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h  |7 +
 drivers/net/ethernet/chelsio/cxgb4/t4_regs.h|6 +
 drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h   |2 +
 13 files changed, 528 insertions(+), 61 deletions(-)

--
To unsubscribe from this list: send the line unsubscribe linux-rdma in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCHv2 net-next 3/4] cxgb4/iw_cxgb4: display TPTE on errors

2014-07-14 Thread Hariprasad Shenai
With ingress WRITE or READ RESPONSE errors, HW provides the offending
stag from the packet.  This patch adds logic to log the parsed TPTE
in this case. cxgb4 now exports a function to read a TPTE entry
from adapter memory.

Signed-off-by: Steve Wise sw...@opengridcomputing.com
Signed-off-by: Hariprasad Shenai haripra...@chelsio.com
---
 drivers/infiniband/hw/cxgb4/device.c|   28 --
 drivers/infiniband/hw/cxgb4/ev.c|   55 +--
 drivers/infiniband/hw/cxgb4/t4.h|4 +-
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |   66 +++
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h  |1 +
 5 files changed, 143 insertions(+), 11 deletions(-)

diff --git a/drivers/infiniband/hw/cxgb4/device.c 
b/drivers/infiniband/hw/cxgb4/device.c
index e76358e..8386678 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -241,12 +241,32 @@ static int dump_stag(int id, void *p, void *data)
struct c4iw_debugfs_data *stagd = data;
int space;
int cc;
+   struct fw_ri_tpte tpte;
+   int ret;
 
space = stagd-bufsize - stagd-pos - 1;
if (space == 0)
return 1;
 
-   cc = snprintf(stagd-buf + stagd-pos, space, 0x%x\n, id8);
+   ret = cxgb4_read_tpte(stagd-devp-rdev.lldi.ports[0], (u32)id8,
+ (__be32 *)tpte);
+   if (ret) {
+   dev_err(stagd-devp-rdev.lldi.pdev-dev,
+   %s cxgb4_read_tpte err %d\n, __func__, ret);
+   return ret;
+   }
+   cc = snprintf(stagd-buf + stagd-pos, space,
+ stag: idx 0x%x valid %d key 0x%x state %d pdid %d 
+ perm 0x%x ps %d len 0x%llx va 0x%llx\n,
+ (u32)id8,
+ G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
+ G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+ ((u64)ntohl(tpte.len_hi)  32) | ntohl(tpte.len_lo),
+ ((u64)ntohl(tpte.va_hi)  32) | ntohl(tpte.va_lo_fbo));
if (cc  space)
stagd-pos += cc;
return 0;
@@ -259,7 +279,7 @@ static int stag_release(struct inode *inode, struct file 
*file)
printk(KERN_INFO %s null stagd?\n, __func__);
return 0;
}
-   kfree(stagd-buf);
+   vfree(stagd-buf);
kfree(stagd);
return 0;
 }
@@ -282,8 +302,8 @@ static int stag_open(struct inode *inode, struct file *file)
idr_for_each(stagd-devp-mmidr, count_idrs, count);
spin_unlock_irq(stagd-devp-lock);
 
-   stagd-bufsize = count * sizeof(0x12345678\n);
-   stagd-buf = kmalloc(stagd-bufsize, GFP_KERNEL);
+   stagd-bufsize = count * 256;
+   stagd-buf = vmalloc(stagd-bufsize);
if (!stagd-buf) {
ret = -ENOMEM;
goto err1;
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index d61d0a1..fbe6051 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -35,6 +35,55 @@
 
 #include iw_cxgb4.h
 
+static void print_tpte(struct c4iw_dev *dev, u32 stag)
+{
+   int ret;
+   struct fw_ri_tpte tpte;
+
+   ret = cxgb4_read_tpte(dev-rdev.lldi.ports[0], stag,
+ (__be32 *)tpte);
+   if (ret) {
+   dev_err(dev-rdev.lldi.pdev-dev,
+   %s cxgb4_read_tpte err %d\n, __func__, ret);
+   return;
+   }
+   PDBG(stag idx 0x%x valid %d key 0x%x state %d pdid %d 
+  perm 0x%x ps %d len 0x%llx va 0x%llx\n,
+  stag  0xff00,
+  G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
+  G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
+  G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
+  G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
+  G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
+  G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+  ((u64)ntohl(tpte.len_hi)  32) | ntohl(tpte.len_lo),
+  ((u64)ntohl(tpte.va_hi)  32) | ntohl(tpte.va_lo_fbo));
+}
+
+static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
+{
+   __be64 *p = (void *)err_cqe;
+
+   dev_err(dev-rdev.lldi.pdev-dev,
+   AE qpid %d opcode %d status 0x%x 
+   type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n,
+   CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+   CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe-len),
+   CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+
+   PDBG(%016llx %016llx %016llx 

[PATCHv2 net-next 2/4] cxgb4/iw_cxgb4: use firmware ord/ird resource limits

2014-07-14 Thread Hariprasad Shenai
Advertise a larger max read queue depth for qps, and gather the resource limits
from fw and use them to avoid exhaustinq all the resources.

Design:

cxgb4:

Obtain the max_ordird_qp and max_ird_adapter device params from FW
at init time and pass them up to the ULDs when they attach.  If these
parameters are not available, due to older firmware, then hard-code
the values based on the known values for older firmware.
iw_cxgb4:

Fix the c4iw_query_device() to report these correct values based on
adapter parameters.  ibv_query_device() will always return:

max_qp_rd_atom = max_qp_init_rd_atom = min(module_max, max_ordird_qp)
max_res_rd_atom = max_ird_adapter

Bump up the per qp max module option to 32, allowing it to be increased
by the user up to the device max of max_ordird_qp.  32 seems to be
sufficient to maximize throughput for streaming read benchmarks.

Fail connection setup if the negotiated IRD exhausts the available
adapter ird resources.  So the driver will track the amount of ird
resource in use and not send an RI_WR/INIT to FW that would reduce the
available ird resources below zero.

Signed-off-by: Steve Wise sw...@opengridcomputing.com
Signed-off-by: Hariprasad Shenai haripra...@chelsio.com
---
 drivers/infiniband/hw/cxgb4/cm.c|   80 ---
 drivers/infiniband/hw/cxgb4/device.c|2 +
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h  |9 ++-
 drivers/infiniband/hw/cxgb4/provider.c  |6 +-
 drivers/infiniband/hw/cxgb4/qp.c|   54 +--
 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h  |3 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |   18 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h  |2 +
 drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h   |2 +
 9 files changed, 142 insertions(+), 34 deletions(-)

diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index d62a0f9..df5bd3d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -79,9 +79,10 @@ static int dack_mode = 1;
 module_param(dack_mode, int, 0644);
 MODULE_PARM_DESC(dack_mode, Delayed ack mode (default=1));
 
-int c4iw_max_read_depth = 8;
+uint c4iw_max_read_depth = 32;
 module_param(c4iw_max_read_depth, int, 0644);
-MODULE_PARM_DESC(c4iw_max_read_depth, Per-connection max ORD/IRD 
(default=8));
+MODULE_PARM_DESC(c4iw_max_read_depth,
+Per-connection max ORD/IRD (default=32));
 
 static int enable_tcp_timestamps;
 module_param(enable_tcp_timestamps, int, 0644);
@@ -813,6 +814,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff 
*skb,
if (mpa_rev_to_use == 2) {
mpa-private_data_size = htons(ntohs(mpa-private_data_size) +
   sizeof (struct 
mpa_v2_conn_params));
+   PDBG(%s initiator ird %u ord %u\n, __func__, ep-ird,
+ep-ord);
mpa_v2_params.ird = htons((u16)ep-ird);
mpa_v2_params.ord = htons((u16)ep-ord);
 
@@ -1182,8 +1185,8 @@ static int connect_request_upcall(struct c4iw_ep *ep)
sizeof(struct mpa_v2_conn_params);
} else {
/* this means MPA_v1 is used. Send max supported */
-   event.ord = c4iw_max_read_depth;
-   event.ird = c4iw_max_read_depth;
+   event.ord = cur_max_read_depth(ep-com.dev);
+   event.ird = cur_max_read_depth(ep-com.dev);
event.private_data_len = ep-plen;
event.private_data = ep-mpa_pkt + sizeof(struct mpa_message);
}
@@ -1247,6 +1250,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 
credits)
return credits;
 }
 
+#define RELAXED_IRD_NEGOTIATION 1
+
 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 {
struct mpa_message *mpa;
@@ -1358,17 +1363,33 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct 
sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
resp_ord = ntohs(mpa_v2_params-ord) 
MPA_V2_IRD_ORD_MASK;
+   PDBG(%s responder ird %u ord %u ep ird %u ord %u\n,
+__func__, resp_ird, resp_ord, ep-ird, ep-ord);
 
/*
 * This is a double-check. Ideally, below checks are
 * not required since ird/ord stuff has been taken
 * care of in c4iw_accept_cr
 */
-   if ((ep-ird  resp_ord) || (ep-ord  resp_ird)) {
+   if (ep-ird  resp_ord) {
+   if (RELAXED_IRD_NEGOTIATION  resp_ord =
+   ep-com.dev-rdev.lldi.max_ordird_qp)
+   ep-ird = resp_ord;
+   else
+   insuff_ird = 1;
+   

[PATCHv2 net-next 1/4] iw_cxgb4: Detect Ing. Padding Boundary at run-time

2014-07-14 Thread Hariprasad Shenai
Updates iw_cxgb4 to determine the Ingress Padding Boundary from
cxgb4_lld_info, and take subsequent actions.

Signed-off-by: Steve Wise sw...@opengridcomputing.com
Signed-off-by: Hariprasad Shenai haripra...@chelsio.com
---
 drivers/infiniband/hw/cxgb4/cq.c|4 ++--
 drivers/infiniband/hw/cxgb4/device.c|   21 +
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h  |   12 
 drivers/infiniband/hw/cxgb4/provider.c  |4 ++--
 drivers/infiniband/hw/cxgb4/qp.c|   10 ++
 drivers/infiniband/hw/cxgb4/t4.h|8 
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |2 ++
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h  |2 ++
 8 files changed, 47 insertions(+), 16 deletions(-)

diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c04292c..f04a838 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -895,7 +895,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int 
entries,
/*
 * Make actual HW queue 2x to avoid cdix_inc overflows.
 */
-   hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
+   hwentries = min(entries * 2, rhp-rdev.hw_queue.t4_max_iq_size);
 
/*
 * Make HW queue at least 64 entries so GTS updates aren't too
@@ -912,7 +912,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int 
entries,
if (ucontext) {
memsize = roundup(memsize, PAGE_SIZE);
hwentries = memsize / sizeof *chp-cq.queue;
-   while (hwentries  T4_MAX_IQ_SIZE) {
+   while (hwentries  rhp-rdev.hw_queue.t4_max_iq_size) {
memsize -= PAGE_SIZE;
hwentries = memsize / sizeof *chp-cq.queue;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c 
b/drivers/infiniband/hw/cxgb4/device.c
index dd93aad..88291ef 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -768,6 +768,27 @@ static struct c4iw_dev *c4iw_alloc(const struct 
cxgb4_lld_info *infop)
}
devp-rdev.lldi = *infop;
 
+   /* init various hw-queue params based on lld info */
+   PDBG(%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n,
+__func__, devp-rdev.lldi.sge_ingpadboundary,
+devp-rdev.lldi.sge_egrstatuspagesize);
+
+   devp-rdev.hw_queue.t4_eq_status_entries =
+   devp-rdev.lldi.sge_ingpadboundary  64 ? 2 : 1;
+   devp-rdev.hw_queue.t4_max_eq_size =
+   65520 - devp-rdev.hw_queue.t4_eq_status_entries;
+   devp-rdev.hw_queue.t4_max_iq_size = 65520 - 1;
+   devp-rdev.hw_queue.t4_max_rq_size =
+   8192 - devp-rdev.hw_queue.t4_eq_status_entries;
+   devp-rdev.hw_queue.t4_max_sq_size =
+   devp-rdev.hw_queue.t4_max_eq_size - 1;
+   devp-rdev.hw_queue.t4_max_qp_depth =
+   devp-rdev.hw_queue.t4_max_rq_size - 1;
+   devp-rdev.hw_queue.t4_max_cq_depth =
+   devp-rdev.hw_queue.t4_max_iq_size - 1;
+   devp-rdev.hw_queue.t4_stat_len =
+   devp-rdev.lldi.sge_egrstatuspagesize;
+
/*
 * For T5 devices, we map all of BAR2 with WC.
 * For T4 devices with onchip qp mem, we map only that part
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h 
b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 125bc5d..9b9754c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -139,6 +139,17 @@ struct c4iw_stats {
u64  pas_ofld_conn_fails;
 };
 
+struct c4iw_hw_queue {
+   int t4_eq_status_entries;
+   int t4_max_eq_size;
+   int t4_max_iq_size;
+   int t4_max_rq_size;
+   int t4_max_sq_size;
+   int t4_max_qp_depth;
+   int t4_max_cq_depth;
+   int t4_stat_len;
+};
+
 struct c4iw_rdev {
struct c4iw_resource resource;
unsigned long qpshift;
@@ -156,6 +167,7 @@ struct c4iw_rdev {
unsigned long oc_mw_pa;
void __iomem *oc_mw_kva;
struct c4iw_stats stats;
+   struct c4iw_hw_queue hw_queue;
struct t4_dev_status_page *status_page;
 };
 
diff --git a/drivers/infiniband/hw/cxgb4/provider.c 
b/drivers/infiniband/hw/cxgb4/provider.c
index b1d3053..1d41b92 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -319,13 +319,13 @@ static int c4iw_query_device(struct ib_device *ibdev,
props-vendor_part_id = (u32)dev-rdev.lldi.pdev-device;
props-max_mr_size = T4_MAX_MR_SIZE;
props-max_qp = T4_MAX_NUM_QP;
-   props-max_qp_wr = T4_MAX_QP_DEPTH;
+   props-max_qp_wr = dev-rdev.hw_queue.t4_max_qp_depth;
props-max_sge = T4_MAX_RECV_SGE;
props-max_sge_rd = 1;
props-max_qp_rd_atom = c4iw_max_read_depth;
props-max_qp_init_rd_atom = c4iw_max_read_depth;
props-max_cq = T4_MAX_NUM_CQ;
-