Adds RQ's PB (Packet buffer) and WQE cache configuration
options.
Signed-off-by: Rahul Bhansali <[email protected]>
---
drivers/common/cnxk/roc_nix.h | 14 ++++++++++++++
drivers/common/cnxk/roc_nix_inl.c | 2 ++
drivers/common/cnxk/roc_nix_queue.c | 16 ++++++++--------
3 files changed, 24 insertions(+), 8 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index ffa1a706f9..7bc3e1f5c6 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -34,6 +34,16 @@
#define ROC_NIX_LSO_FORMAT_IDX_TSOV6 1
#define ROC_NIX_LSO_FORMAT_IDX_IPV4 2
+#define ROC_NIX_RQ_MAX_PB_CACHING_VAL 3
+
+/* First aligned cache block is allocated into the LLC.
+ * All remaining cache blocks are not allocated.
+ */
+#define ROC_NIX_RQ_DEFAULT_PB_CACHING 2
+
+/* Writes of WQE data are allocated into LLC. */
+#define ROC_NIX_RQ_DEFAULT_WQE_CACHING 1
+
enum roc_nix_rss_reta_sz {
ROC_NIX_RSS_RETA_SZ_64 = 64,
ROC_NIX_RSS_RETA_SZ_128 = 128,
@@ -448,6 +458,10 @@ struct roc_nix_rq {
bool spb_drop_ena;
/* XQE drop enable */
bool xqe_drop_ena;
+ /* RQ PB caching */
+ uint8_t pb_caching;
+ /* RQ WQE caching */
+ uint8_t wqe_caching;
/* End of Input parameters */
struct roc_nix *roc_nix;
uint64_t meta_aura_handle;
diff --git a/drivers/common/cnxk/roc_nix_inl.c
b/drivers/common/cnxk/roc_nix_inl.c
index a21c40acf1..911c349604 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -1838,6 +1838,8 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
inl_rq->spb_ena = rq->spb_ena;
inl_rq->spb_aura_handle = rq->spb_aura_handle;
inl_rq->spb_size = rq->spb_size;
+ inl_rq->pb_caching = rq->pb_caching;
+ inl_rq->wqe_caching = rq->wqe_caching;
if (roc_errata_nix_no_meta_aura()) {
uint64_t aura_limit =
diff --git a/drivers/common/cnxk/roc_nix_queue.c
b/drivers/common/cnxk/roc_nix_queue.c
index ab3a71ec60..ef9b651022 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -499,7 +499,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq,
uint16_t qints,
aq->rq.sso_grp = rq->hwgrp;
aq->rq.ena_wqwd = 1;
aq->rq.wqe_skip = rq->wqe_skip;
- aq->rq.wqe_caching = 1;
+ aq->rq.wqe_caching = rq->wqe_caching;
aq->rq.good_utag = rq->tag_mask >> 24;
aq->rq.bad_utag = rq->tag_mask >> 24;
@@ -530,7 +530,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq,
uint16_t qints,
aq->rq.lpb_sizem1 = rq->lpb_size / 8;
aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
aq->rq.ena = ena;
- aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+ aq->rq.pb_caching = rq->pb_caching;
aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
aq->rq.rq_int_ena = 0;
/* Many to one reduction */
@@ -616,7 +616,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq,
uint16_t qints, bool cf
aq->rq.sso_grp = rq->hwgrp;
aq->rq.ena_wqwd = 1;
aq->rq.wqe_skip = rq->wqe_skip;
- aq->rq.wqe_caching = 1;
+ aq->rq.wqe_caching = rq->wqe_caching;
aq->rq.xqe_drop_ena = 0;
aq->rq.good_utag = rq->tag_mask >> 24;
@@ -647,7 +647,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq,
uint16_t qints, bool cf
aq->rq.ipsecd_drop_en = 1;
aq->rq.ena_wqwd = 1;
aq->rq.wqe_skip = rq->wqe_skip;
- aq->rq.wqe_caching = 1;
+ aq->rq.wqe_caching = rq->wqe_caching;
}
aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
@@ -683,7 +683,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq,
uint16_t qints, bool cf
aq->rq.spb_ena = 0;
}
- aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+ aq->rq.pb_caching = rq->pb_caching;
aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
aq->rq.rq_int_ena = 0;
/* Many to one reduction */
@@ -797,7 +797,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t
qints, bool cfg, boo
aq->rq.sso_grp = rq->hwgrp;
aq->rq.ena_wqwd = 1;
aq->rq.wqe_skip = rq->wqe_skip;
- aq->rq.wqe_caching = 1;
+ aq->rq.wqe_caching = rq->wqe_caching;
aq->rq.good_utag = rq->tag_mask >> 24;
aq->rq.bad_utag = rq->tag_mask >> 24;
@@ -816,7 +816,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t
qints, bool cfg, boo
aq->rq.ipsecd_drop_en = 1;
aq->rq.ena_wqwd = 1;
aq->rq.wqe_skip = rq->wqe_skip;
- aq->rq.wqe_caching = 1;
+ aq->rq.wqe_caching = rq->wqe_caching;
}
aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
@@ -852,7 +852,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t
qints, bool cfg, boo
aq->rq.spb_ena = 0;
}
- aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+ aq->rq.pb_caching = rq->pb_caching;
aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
aq->rq.rq_int_ena = 0;
/* Many to one reduction */
--
2.34.1