As next_hop field for IPv4 is increased now the maximum number of tbl8s is 2^24.
A new rte_lpm_config structure is used so LPM library will allocate
exactly the amount of memory which is necessary to hold application?s rules.

Changed structures in LPM library:
rte_lpm_tbl24_entry and rte_lpm_tbl8_entry to one structure
rte_lpm_tbl_entry.

Signed-off-by: Michal Kobylinski <michalx.kobylinski at intel.com>
---
 app/test/test_func_reentrancy.c      |    9 +-
 app/test/test_lpm.c                  |  274 +++++---
 app/test/test_mp_secondary.c         |    7 +-
 app/test/test_table_combined.c       |    1 +
 app/test/test_table_tables.c         |    2 +
 doc/guides/rel_notes/release_2_3.rst |    4 +
 lib/librte_lpm/Makefile              |    2 +-
 lib/librte_lpm/rte_lpm.c             | 1157 ++++++++++++++++++++++++++++++----
 lib/librte_lpm/rte_lpm.h             |  232 +++++--
 lib/librte_lpm/rte_lpm_version.map   |   49 +-
 lib/librte_table/rte_table.h         |    1 +
 lib/librte_table/rte_table_lpm.c     |   30 +-
 lib/librte_table/rte_table_lpm.h     |    4 +
 13 files changed, 1454 insertions(+), 318 deletions(-)

diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c
index dbecc52..592e5d0 100644
--- a/app/test/test_func_reentrancy.c
+++ b/app/test/test_func_reentrancy.c
@@ -359,6 +359,11 @@ lpm_create_free(__attribute__((unused)) void *arg)
 {
        unsigned lcore_self = rte_lcore_id();
        struct rte_lpm *lpm;
+       struct rte_lpm_config config;
+
+       config.max_rules = 4;
+       config.number_tbl8s = 256;
+       config.flags = 0;
        char lpm_name[MAX_STRING_SIZE];
        int i;

@@ -366,7 +371,7 @@ lpm_create_free(__attribute__((unused)) void *arg)

        /* create the same lpm simultaneously on all threads */
        for (i = 0; i < MAX_ITER_TIMES; i++) {
-               lpm = rte_lpm_create("fr_test_once",  SOCKET_ID_ANY, 4, 0);
+               lpm = rte_lpm_create("fr_test_once", 0, &config);
                if ((NULL == lpm) && (rte_lpm_find_existing("fr_test_once") == 
NULL))
                        return -1;
        }
@@ -374,7 +379,7 @@ lpm_create_free(__attribute__((unused)) void *arg)
        /* create mutiple fbk tables simultaneously */
        for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
                snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", 
lcore_self, i);
-               lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, 4, 0);
+               lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
                if (NULL == lpm)
                        return -1;

diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c
index 8b4ded9..69026a4 100644
--- a/app/test/test_lpm.c
+++ b/app/test/test_lpm.c
@@ -57,7 +57,7 @@
        }                                                                     \
 } while(0)

-typedef int32_t (* rte_lpm_test)(void);
+typedef int32_t (*rte_lpm_test)(void);

 static int32_t test0(void);
 static int32_t test1(void);
@@ -105,6 +105,7 @@ rte_lpm_test tests[] = {
 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
 #define MAX_DEPTH 32
 #define MAX_RULES 256
+#define NUMBER_TBL8S 256
 #define PASS 0

 /*
@@ -115,18 +116,25 @@ int32_t
 test0(void)
 {
        struct rte_lpm *lpm = NULL;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;

        /* rte_lpm_create: lpm name == NULL */
-       lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm == NULL);

        /* rte_lpm_create: max_rules = 0 */
        /* Note: __func__ inserts the function name, in this case "test0". */
-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0);
+       config.max_rules = 0;
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm == NULL);

        /* socket_id < -1 is invalid */
-       lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0);
+       config.max_rules = MAX_RULES;
+       lpm = rte_lpm_create(__func__, -2, &config);
        TEST_LPM_ASSERT(lpm == NULL);

        return PASS;
@@ -140,11 +148,17 @@ int32_t
 test1(void)
 {
        struct rte_lpm *lpm = NULL;
+       struct rte_lpm_config config;
+
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+
        int32_t i;

        /* rte_lpm_free: Free NULL */
        for (i = 0; i < 100; i++) {
-               lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0);
+               config.max_rules = MAX_RULES - i;
+               lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
                TEST_LPM_ASSERT(lpm != NULL);

                rte_lpm_free(lpm);
@@ -164,8 +178,13 @@ int32_t
 test2(void)
 {
        struct rte_lpm *lpm = NULL;
+       struct rte_lpm_config config;

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        rte_lpm_free(lpm);
@@ -180,8 +199,13 @@ int32_t
 test3(void)
 {
        struct rte_lpm *lpm = NULL;
-       uint32_t ip = IPv4(0, 0, 0, 0);
-       uint8_t depth = 24, next_hop = 100;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip = IPv4(0, 0, 0, 0), next_hop = 100;
+       uint8_t depth = 24;
        int32_t status = 0;

        /* rte_lpm_add: lpm == NULL */
@@ -189,7 +213,7 @@ test3(void)
        TEST_LPM_ASSERT(status < 0);

        /*Create vaild lpm to use in rest of test. */
-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        /* rte_lpm_add: depth < 1 */
@@ -213,6 +237,11 @@ int32_t
 test4(void)
 {
        struct rte_lpm *lpm = NULL;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
        uint32_t ip = IPv4(0, 0, 0, 0);
        uint8_t depth = 24;
        int32_t status = 0;
@@ -222,7 +251,7 @@ test4(void)
        TEST_LPM_ASSERT(status < 0);

        /*Create vaild lpm to use in rest of test. */
-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        /* rte_lpm_delete: depth < 1 */
@@ -247,8 +276,12 @@ test5(void)
 {
 #if defined(RTE_LIBRTE_LPM_DEBUG)
        struct rte_lpm *lpm = NULL;
-       uint32_t ip = IPv4(0, 0, 0, 0);
-       uint8_t next_hop_return = 0;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip = IPv4(0, 0, 0, 0), next_hop_return = 0;
        int32_t status = 0;

        /* rte_lpm_lookup: lpm == NULL */
@@ -256,7 +289,7 @@ test5(void)
        TEST_LPM_ASSERT(status < 0);

        /*Create vaild lpm to use in rest of test. */
-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        /* rte_lpm_lookup: depth < 1 */
@@ -277,11 +310,16 @@ int32_t
 test6(void)
 {
        struct rte_lpm *lpm = NULL;
-       uint32_t ip = IPv4(0, 0, 0, 0);
-       uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
+       uint8_t depth = 24;
        int32_t status = 0;

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
@@ -309,13 +347,18 @@ int32_t
 test7(void)
 {
        __m128i ipx4;
-       uint16_t hop[4];
+       uint32_t hop[4];
        struct rte_lpm *lpm = NULL;
-       uint32_t ip = IPv4(0, 0, 0, 0);
-       uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
+       uint8_t depth = 32;
        int32_t status = 0;

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
@@ -325,10 +368,10 @@ test7(void)
        TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

        ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
-       rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+       rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
        TEST_LPM_ASSERT(hop[0] == next_hop_add);
-       TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
-       TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+       TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
+       TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
        TEST_LPM_ASSERT(hop[3] == next_hop_add);

        status = rte_lpm_delete(lpm, ip, depth);
@@ -355,13 +398,19 @@ int32_t
 test8(void)
 {
        __m128i ipx4;
-       uint16_t hop[4];
+       uint32_t hop[4];
        struct rte_lpm *lpm = NULL;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
        uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
-       uint8_t depth, next_hop_add, next_hop_return;
+       uint32_t next_hop_add, next_hop_return;
+       uint8_t depth;
        int32_t status = 0;

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        /* Loop with rte_lpm_add. */
@@ -381,10 +430,10 @@ test8(void)
                        (next_hop_return == next_hop_add));

                ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
-               rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
-               TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+               rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
+               TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
                TEST_LPM_ASSERT(hop[1] == next_hop_add);
-               TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+               TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
                TEST_LPM_ASSERT(hop[3] == next_hop_add);
        }

@@ -400,8 +449,7 @@ test8(void)
                if (depth != 1) {
                        TEST_LPM_ASSERT((status == 0) &&
                                (next_hop_return == next_hop_add));
-               }
-               else {
+               } else {
                        TEST_LPM_ASSERT(status == -ENOENT);
                }

@@ -409,16 +457,16 @@ test8(void)
                TEST_LPM_ASSERT(status == -ENOENT);

                ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
-               rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+               rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
                if (depth != 1) {
                        TEST_LPM_ASSERT(hop[0] == next_hop_add);
                        TEST_LPM_ASSERT(hop[1] == next_hop_add);
                } else {
-                       TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
-                       TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
+                       TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
+                       TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
                }
-               TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
-               TEST_LPM_ASSERT(hop[3] == UINT16_MAX);
+               TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
+               TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
        }

        rte_lpm_free(lpm);
@@ -437,9 +485,14 @@ int32_t
 test9(void)
 {
        struct rte_lpm *lpm = NULL;
-       uint32_t ip, ip_1, ip_2;
-       uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
-               next_hop_add_2, next_hop_return;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip, ip_1, ip_2, next_hop_add, next_hop_add_1,
+       next_hop_add_2, next_hop_return;
+       uint8_t depth, depth_1, depth_2;
        int32_t status = 0;

        /* Add & lookup to hit invalid TBL24 entry */
@@ -447,7 +500,7 @@ test9(void)
        depth = 24;
        next_hop_add = 100;

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        status = rte_lpm_add(lpm, ip, depth, next_hop_add);
@@ -599,15 +652,19 @@ test9(void)
 int32_t
 test10(void)
 {
-
        struct rte_lpm *lpm = NULL;
-       uint32_t ip;
-       uint8_t depth, next_hop_add, next_hop_return;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip, next_hop_add, next_hop_return;
+       uint8_t depth;
        int32_t status = 0;

        /* Add rule that covers a TBL24 range previously invalid & lookup
         * (& delete & lookup) */
-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        ip = IPv4(128, 0, 0, 0);
@@ -785,13 +842,17 @@ test10(void)
 int32_t
 test11(void)
 {
-
        struct rte_lpm *lpm = NULL;
-       uint32_t ip;
-       uint8_t depth, next_hop_add, next_hop_return;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip, next_hop_add, next_hop_return;
+       uint8_t depth;
        int32_t status = 0;

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        ip = IPv4(128, 0, 0, 0);
@@ -851,13 +912,18 @@ int32_t
 test12(void)
 {
        __m128i ipx4;
-       uint16_t hop[4];
+       uint32_t hop[4];
        struct rte_lpm *lpm = NULL;
-       uint32_t ip, i;
-       uint8_t depth, next_hop_add, next_hop_return;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip, i, next_hop_add, next_hop_return;
+       uint8_t depth;
        int32_t status = 0;

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        ip = IPv4(128, 0, 0, 0);
@@ -873,10 +939,10 @@ test12(void)
                                (next_hop_return == next_hop_add));

                ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
-               rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
-               TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+               rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
+               TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
                TEST_LPM_ASSERT(hop[1] == next_hop_add);
-               TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+               TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
                TEST_LPM_ASSERT(hop[3] == next_hop_add);

                status = rte_lpm_delete(lpm, ip, depth);
@@ -903,11 +969,16 @@ int32_t
 test13(void)
 {
        struct rte_lpm *lpm = NULL;
-       uint32_t ip, i;
-       uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
+       uint8_t depth;
        int32_t status = 0;

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        ip = IPv4(128, 0, 0, 0);
@@ -960,17 +1031,21 @@ test13(void)
 int32_t
 test14(void)
 {
-
        /* We only use depth = 32 in the loop below so we must make sure
         * that we have enough storage for all rules at that depth*/

        struct rte_lpm *lpm = NULL;
-       uint32_t ip;
-       uint8_t depth, next_hop_add, next_hop_return;
+       struct rte_lpm_config config;
+
+       config.max_rules = 256 * 32;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       uint32_t ip, next_hop_add, next_hop_return;
+       uint8_t depth;
        int32_t status = 0;

        /* Add enough space for 256 rules for every depth */
-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        depth = 32;
@@ -1012,9 +1087,14 @@ int32_t
 test15(void)
 {
        struct rte_lpm *lpm = NULL, *result = NULL;
+       struct rte_lpm_config config;
+
+       config.max_rules = 256 * 32;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;

        /* Create lpm  */
-       lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
+       lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        /* Try to find existing lpm */
@@ -1040,21 +1120,25 @@ int32_t
 test16(void)
 {
        uint32_t ip;
-       struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
-                       256 * 32, 0);
+       struct rte_lpm_config config;
+
+       config.max_rules = 256 * 32;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
+       struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);

        /* ip loops through all possibilities for top 24 bits of address */
-       for (ip = 0; ip < 0xFFFFFF; ip++){
+       for (ip = 0; ip < 0xFFFFFF; ip++) {
                /* add an entry within a different tbl8 each time, since
                 * depth >24 and the top 24 bits are different */
                if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
                        break;
        }

-       if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
+       if (ip != NUMBER_TBL8S) {
                printf("Error, unexpected failure with filling tbl8 groups\n");
                printf("Failed after %u additions, expected after %u\n",
-                               (unsigned)ip, 
(unsigned)RTE_LPM_TBL8_NUM_GROUPS);
+                               (unsigned)ip, (unsigned)NUMBER_TBL8S);
        }

        rte_lpm_free(lpm);
@@ -1072,19 +1156,24 @@ int32_t
 test17(void)
 {
        struct rte_lpm *lpm = NULL;
+       struct rte_lpm_config config;
+
+       config.max_rules = MAX_RULES;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
        const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
        const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
        const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
        const uint8_t d_ip_10_32 = 32,
                        d_ip_10_24 = 24,
                        d_ip_20_25 = 25;
-       const uint8_t next_hop_ip_10_32 = 100,
+       const uint32_t next_hop_ip_10_32 = 100,
                        next_hop_ip_10_24 = 105,
                        next_hop_ip_20_25 = 111;
-       uint8_t next_hop_return = 0;
+       uint32_t next_hop_return = 0;
        int32_t status = 0;

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
@@ -1092,7 +1181,7 @@ test17(void)
                return -1;

        status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
-       uint8_t test_hop_10_32 = next_hop_return;
+       uint32_t test_hop_10_32 = next_hop_return;
        TEST_LPM_ASSERT(status == 0);
        TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);

@@ -1101,7 +1190,7 @@ test17(void)
                        return -1;

        status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
-       uint8_t test_hop_10_24 = next_hop_return;
+       uint32_t test_hop_10_24 = next_hop_return;
        TEST_LPM_ASSERT(status == 0);
        TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);

@@ -1110,7 +1199,7 @@ test17(void)
                return -1;

        status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
-       uint8_t test_hop_20_25 = next_hop_return;
+       uint32_t test_hop_20_25 = next_hop_return;
        TEST_LPM_ASSERT(status == 0);
        TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);

@@ -1119,7 +1208,7 @@ test17(void)
                return -1;
        }

-       if (test_hop_10_24 == test_hop_20_25){
+       if (test_hop_10_24 == test_hop_20_25) {
                printf("Next hop return equal\n");
                return -1;
        }
@@ -1155,7 +1244,7 @@ print_route_distribution(const struct route_rule *table, 
uint32_t n)
        printf("--------------------------- \n");

        /* Count depths. */
-       for(i = 1; i <= 32; i++) {
+       for (i = 1; i <= 32; i++) {
                unsigned depth_counter = 0;
                double percent_hits;

@@ -1173,9 +1262,14 @@ int32_t
 perf_test(void)
 {
        struct rte_lpm *lpm = NULL;
+       struct rte_lpm_config config;
+
+       config.max_rules = 1000000;
+       config.number_tbl8s = NUMBER_TBL8S;
+       config.flags = 0;
        uint64_t begin, total_time, lpm_used_entries = 0;
        unsigned i, j;
-       uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+       uint32_t next_hop_add = 0xAA, next_hop_return = 0;
        int status = 0;
        uint64_t cache_line_counter = 0;
        int64_t count = 0;
@@ -1186,7 +1280,7 @@ perf_test(void)

        print_route_distribution(large_route_table, (uint32_t) 
NUM_ROUTE_ENTRIES);

-       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
        TEST_LPM_ASSERT(lpm != NULL);

        /* Measue add. */
@@ -1206,7 +1300,7 @@ perf_test(void)
                if (lpm->tbl24[i].valid)
                        lpm_used_entries++;

-               if (i % 32 == 0){
+               if (i % 32 == 0) {
                        if ((uint64_t)count < lpm_used_entries) {
                                cache_line_counter++;
                                count = lpm_used_entries;
@@ -1220,22 +1314,23 @@ perf_test(void)
        printf("64 byte Cache entries used = %u (%u bytes)\n",
                        (unsigned) cache_line_counter, (unsigned) 
cache_line_counter * 64);

-       printf("Average LPM Add: %g cycles\n", (double)total_time / 
NUM_ROUTE_ENTRIES);
+       printf("Average LPM Add: %g cycles\n",
+                       (double)total_time / NUM_ROUTE_ENTRIES);

        /* Measure single Lookup */
        total_time = 0;
        count = 0;

-       for (i = 0; i < ITERATIONS; i ++) {
+       for (i = 0; i < ITERATIONS; i++) {
                static uint32_t ip_batch[BATCH_SIZE];

-               for (j = 0; j < BATCH_SIZE; j ++)
+               for (j = 0; j < BATCH_SIZE; j++)
                        ip_batch[j] = rte_rand();

                /* Lookup per batch */
                begin = rte_rdtsc();

-               for (j = 0; j < BATCH_SIZE; j ++) {
+               for (j = 0; j < BATCH_SIZE; j++) {
                        if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) 
!= 0)
                                count++;
                }
@@ -1250,12 +1345,12 @@ perf_test(void)
        /* Measure bulk Lookup */
        total_time = 0;
        count = 0;
-       for (i = 0; i < ITERATIONS; i ++) {
+       for (i = 0; i < ITERATIONS; i++) {
                static uint32_t ip_batch[BATCH_SIZE];
-               uint16_t next_hops[BULK_SIZE];
+               uint32_t next_hops[BULK_SIZE];

                /* Create array of random IP addresses */
-               for (j = 0; j < BATCH_SIZE; j ++)
+               for (j = 0; j < BATCH_SIZE; j++)
                        ip_batch[j] = rte_rand();

                /* Lookup per batch */
@@ -1279,7 +1374,7 @@ perf_test(void)
        count = 0;
        for (i = 0; i < ITERATIONS; i++) {
                static uint32_t ip_batch[BATCH_SIZE];
-               uint16_t next_hops[4];
+               uint32_t next_hops[4];

                /* Create array of random IP addresses */
                for (j = 0; j < BATCH_SIZE; j++)
@@ -1293,9 +1388,9 @@ perf_test(void)

                        ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
                        ipx4 = *(__m128i *)(ip_batch + j);
-                       rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX);
+                       rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
                        for (k = 0; k < RTE_DIM(next_hops); k++)
-                               if (unlikely(next_hops[k] == UINT16_MAX))
+                               if (unlikely(next_hops[k] == UINT32_MAX))
                                        count++;
                }

@@ -1338,6 +1433,7 @@ test_lpm(void)

        for (i = 0; i < NUM_LPM_TESTS; i++) {
                status = tests[i]();
+               printf("LPM Test\n");
                if (status < 0) {
                        printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
                        global_status = status;
diff --git a/app/test/test_mp_secondary.c b/app/test/test_mp_secondary.c
index 2f941b5..75a889c 100644
--- a/app/test/test_mp_secondary.c
+++ b/app/test/test_mp_secondary.c
@@ -232,7 +232,12 @@ run_object_creation_tests(void)

 #ifdef RTE_LIBRTE_LPM
        rte_errno=0;
-       if ((rte_lpm_create("test_lpm", size, rte_socket_id(), 0) != NULL) &&
+       struct rte_lpm_config config;
+
+       config.max_rules = rte_socket_id();
+               config.number_tbl8s = 256;
+               config.flags = 0;
+       if ((rte_lpm_create("test_lpm", size, &config) != NULL) &&
            (rte_lpm_find_existing("test_lpm") == NULL)){
                printf("Error: unexpected return value from 
rte_lpm_create()\n");
                return -1;
diff --git a/app/test/test_table_combined.c b/app/test/test_table_combined.c
index 8bf4aeb..dcaf7ed 100644
--- a/app/test/test_table_combined.c
+++ b/app/test/test_table_combined.c
@@ -295,6 +295,7 @@ test_table_lpm_combined(void)
        struct rte_table_lpm_params lpm_params = {
                .name = "LPM",
                .n_rules = 1 << 16,
+               .number_tbl8s = 1 << 8,
                .entry_unique_size = 8,
                .offset = APP_METADATA_OFFSET(0),
        };
diff --git a/app/test/test_table_tables.c b/app/test/test_table_tables.c
index b6364c4..cbbbfc1 100644
--- a/app/test/test_table_tables.c
+++ b/app/test/test_table_tables.c
@@ -326,6 +326,8 @@ test_table_lpm(void)
        struct rte_table_lpm_params lpm_params = {
                .name = "LPM",
                .n_rules = 1 << 24,
+               .number_tbl8s = 1 << 8,
+               .flags = 0,
                .entry_unique_size = entry_size,
                .offset = APP_METADATA_OFFSET(1)
        };
diff --git a/doc/guides/rel_notes/release_2_3.rst 
b/doc/guides/rel_notes/release_2_3.rst
index 99de186..eef310c 100644
--- a/doc/guides/rel_notes/release_2_3.rst
+++ b/doc/guides/rel_notes/release_2_3.rst
@@ -18,7 +18,11 @@ Drivers

 Libraries
 ~~~~~~~~~
+** librte_lpm: Increase number of next hops for IPv4 to 2^24 **

+Extend next_hop field from 8-bits to 24-bits for IPv4. Changed structures:
+rte_lpm_tbl24_entry and rte_lpm_tbl8_entry to one structure rte_lpm_tbl_entry.
+Added a new rte_lpm_config structure.

 Examples
 ~~~~~~~~
diff --git a/lib/librte_lpm/Makefile b/lib/librte_lpm/Makefile
index 688cfc9..7a342f8 100644
--- a/lib/librte_lpm/Makefile
+++ b/lib/librte_lpm/Makefile
@@ -39,7 +39,7 @@ CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)

 EXPORT_MAP := rte_lpm_version.map

-LIBABIVER := 2
+LIBABIVER := 3

 # all source are stored in SRCS-y
 SRCS-$(CONFIG_RTE_LIBRTE_LPM) := rte_lpm.c rte_lpm6.c
diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 3981452..4d8ad61 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -57,9 +57,7 @@

 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);

-static struct rte_tailq_elem rte_lpm_tailq = {
-       .name = "RTE_LPM",
-};
+static struct rte_tailq_elem rte_lpm_tailq = { .name = "RTE_LPM", };
 EAL_REGISTER_TAILQ(rte_lpm_tailq)

 #define MAX_DEPTH_TBL24 24
@@ -95,7 +93,7 @@ depth_to_mask(uint8_t depth)
        /* To calculate a mask start with a 1 on the left hand side and right
         * shift while populating the left hand side with 1's
         */
-       return (int)0x80000000 >> (depth - 1);
+       return (int) 0x80000000 >> (depth - 1);
 }

 /*
@@ -119,8 +117,34 @@ depth_to_range(uint8_t depth)
 /*
  * Find an existing lpm table and return a pointer to it.
  */
+struct rte_lpm_v20 *
+rte_lpm_find_existing_v20(const char *name)
+{
+       struct rte_lpm_v20 *l = NULL;
+       struct rte_tailq_entry *te;
+       struct rte_lpm_list *lpm_list;
+
+       lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+       rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+       TAILQ_FOREACH(te, lpm_list, next) {
+               l = (struct rte_lpm_v20 *) te->data;
+               if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
+                       break;
+       }
+       rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+       if (te == NULL) {
+               rte_errno = ENOENT;
+               return NULL;
+       }
+
+       return l;
+}
+VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
+
 struct rte_lpm *
-rte_lpm_find_existing(const char *name)
+rte_lpm_find_existing_v23(const char *name)
 {
        struct rte_lpm *l = NULL;
        struct rte_tailq_entry *te;
@@ -143,16 +167,19 @@ rte_lpm_find_existing(const char *name)

        return l;
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v23, 2.3);
+MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
+               rte_lpm_find_existing_v23);

 /*
  * Allocates memory for LPM object
  */
-struct rte_lpm *
-rte_lpm_create(const char *name, int socket_id, int max_rules,
-               __rte_unused int flags)
+struct rte_lpm_v20 *
+rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
+__rte_unused int flags)
 {
        char mem_name[RTE_LPM_NAMESIZE];
-       struct rte_lpm *lpm = NULL;
+       struct rte_lpm_v20 *lpm = NULL;
        struct rte_tailq_entry *te;
        uint32_t mem_size;
        struct rte_lpm_list *lpm_list;
@@ -163,7 +190,7 @@ rte_lpm_create(const char *name, int socket_id, int 
max_rules,
        RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);

        /* Check user arguments. */
-       if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
+       if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
                rte_errno = EINVAL;
                return NULL;
        }
@@ -177,7 +204,7 @@ rte_lpm_create(const char *name, int socket_id, int 
max_rules,

        /* guarantee there's no existing */
        TAILQ_FOREACH(te, lpm_list, next) {
-               lpm = (struct rte_lpm *) te->data;
+               lpm = (struct rte_lpm_v20 *) te->data;
                if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
                        break;
        }
@@ -192,8 +219,8 @@ rte_lpm_create(const char *name, int socket_id, int 
max_rules,
        }

        /* Allocate memory to store the LPM data structures. */
-       lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
-                       RTE_CACHE_LINE_SIZE, socket_id);
+       lpm = (struct rte_lpm_v20 *) rte_zmalloc_socket(mem_name, mem_size,
+       RTE_CACHE_LINE_SIZE, socket_id);
        if (lpm == NULL) {
                RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
                rte_free(te);
@@ -213,12 +240,144 @@ exit:

        return lpm;
 }
+VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
+
+struct rte_lpm *
+rte_lpm_create_v23(const char *name, int socket_id,
+               const struct rte_lpm_config *config)
+{
+
+       char mem_name[RTE_LPM_NAMESIZE];
+       struct rte_lpm *lpm = NULL;
+       struct rte_tailq_entry *te;
+       uint32_t mem_size;
+       uint32_t rules_size, tbl8s_size;
+       struct rte_lpm_list *lpm_list;
+
+       lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
+
+       /* Check user arguments. */
+       if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
+                       || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
+               rte_errno = EINVAL;
+               return NULL;
+       }
+
+       snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
+
+       /* Determine the amount of memory to allocate. */
+       mem_size = sizeof(*lpm);
+       rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
+       tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
+                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
+
+       rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+       /* guarantee there's no existing */
+       TAILQ_FOREACH(te, lpm_list, next) {
+               lpm = (struct rte_lpm *) te->data;
+               if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
+                       break;
+       }
+       if (te != NULL)
+               goto exit;
+
+       /* allocate tailq entry */
+       te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
+       if (te == NULL) {
+               RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
+               goto exit;
+       }
+
+       /* Allocate memory to store the LPM data structures. */
+       lpm = (struct rte_lpm *) rte_zmalloc_socket(mem_name, (size_t) mem_size,
+       RTE_CACHE_LINE_SIZE, socket_id);
+       if (lpm == NULL) {
+               RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+               rte_free(te);
+               goto exit;
+       }
+
+       lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
+                       (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
+
+       if (lpm->rules_tbl == NULL) {
+               RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+               rte_free(lpm);
+               rte_free(te);
+               goto exit;
+       }
+
+       lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
+                       (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
+
+       if (lpm->tbl8 == NULL) {
+               RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+               rte_free(lpm);
+               rte_free(te);
+               goto exit;
+       }
+
+       /* Save user arguments. */
+       lpm->max_rules = config->max_rules;
+       lpm->number_tbl8s = config->number_tbl8s;
+       snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+
+       te->data = (void *) lpm;
+
+       TAILQ_INSERT_TAIL(lpm_list, te, next);
+
+exit:
+       rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+       return lpm;
+
+}
+BIND_DEFAULT_SYMBOL(rte_lpm_create, _v23, 2.3);
+MAP_STATIC_SYMBOL(
+       struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
+                       const struct rte_lpm_config *config), 
rte_lpm_create_v23);

 /*
  * Deallocates memory for given LPM table.
  */
 void
-rte_lpm_free(struct rte_lpm *lpm)
+rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
+{
+       struct rte_lpm_list *lpm_list;
+       struct rte_tailq_entry *te;
+
+       /* Check user arguments. */
+       if (lpm == NULL)
+               return;
+
+       lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+       rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+       /* find our tailq entry */
+       TAILQ_FOREACH(te, lpm_list, next) {
+               if (te->data == (void *) lpm)
+                       break;
+       }
+       if (te == NULL) {
+               rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+               return;
+       }
+
+       TAILQ_REMOVE(lpm_list, te, next);
+
+       rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+       rte_free(lpm);
+       rte_free(te);
+}
+VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
+
+void
+rte_lpm_free_v23(struct rte_lpm *lpm)
 {
        struct rte_lpm_list *lpm_list;
        struct rte_tailq_entry *te;
@@ -248,6 +407,9 @@ rte_lpm_free(struct rte_lpm *lpm)
        rte_free(lpm);
        rte_free(te);
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_free, _v23, 2.3);
+MAP_STATIC_SYMBOL(void rte_lpm_free (struct rte_lpm *lpm),
+               rte_lpm_free_v23);

 /*
  * Adds a rule to the rule table.
@@ -260,8 +422,8 @@ rte_lpm_free(struct rte_lpm *lpm)
  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
  */
 static inline int32_t
-rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
-       uint8_t next_hop)
+rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
+       uint8_t depth, uint32_t next_hop)
 {
        uint32_t rule_gindex, rule_index, last_rule;
        int i;
@@ -296,7 +458,8 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t 
depth,

                for (i = depth - 1; i > 0; i--) {
                        if (lpm->rule_info[i - 1].used_rules > 0) {
-                               rule_index = lpm->rule_info[i - 1].first_rule + 
lpm->rule_info[i - 1].used_rules;
+                               rule_index = lpm->rule_info[i - 1].first_rule
+                               + lpm->rule_info[i - 1].used_rules;
                                break;
                        }
                }
@@ -308,12 +471,86 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t 
depth,

        /* Make room for the new rule in the array. */
        for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
-               if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 
1].used_rules == lpm->max_rules)
+               if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 
1].used_rules
+                               == lpm->max_rules)
                        return -ENOSPC;

                if (lpm->rule_info[i - 1].used_rules > 0) {
-                       lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + 
lpm->rule_info[i - 1].used_rules]
-                                       = lpm->rules_tbl[lpm->rule_info[i - 
1].first_rule];
+                       lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
+                       + lpm->rule_info[i - 1].used_rules] =
+                       lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
+                       lpm->rule_info[i - 1].first_rule++;
+               }
+       }
+
+       /* Add the new rule. */
+       lpm->rules_tbl[rule_index].ip = ip_masked;
+       lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+       /* Increment the used rules counter for this rule group. */
+       lpm->rule_info[depth - 1].used_rules++;
+
+       return rule_index;
+}
+
+static inline int32_t
+rule_add_v23(struct rte_lpm *lpm, uint32_t ip_masked,
+               uint8_t depth, uint8_t next_hop)
+{
+       uint32_t rule_gindex, rule_index, last_rule;
+       int i;
+
+       VERIFY_DEPTH(depth);
+
+       /* Scan through rule group to see if rule already exists. */
+       if (lpm->rule_info[depth - 1].used_rules > 0) {
+
+               /* rule_gindex stands for rule group index. */
+               rule_gindex = lpm->rule_info[depth - 1].first_rule;
+               /* Initialise rule_index to point to start of rule group. */
+               rule_index = rule_gindex;
+               /* Last rule = Last used rule in this rule group. */
+               last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+               for (; rule_index < last_rule; rule_index++) {
+
+                       /* If rule already exists update its next_hop and 
return. */
+                       if (lpm->rules_tbl[rule_index].ip == ip_masked) {
+                               lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+                               return rule_index;
+                       }
+               }
+
+               if (rule_index == lpm->max_rules)
+                       return -ENOSPC;
+       } else {
+               /* Calculate the position in which the rule will be stored. */
+               rule_index = 0;
+
+               for (i = depth - 1; i > 0; i--) {
+                       if (lpm->rule_info[i - 1].used_rules > 0) {
+                               rule_index = lpm->rule_info[i - 1].first_rule
+                                       + lpm->rule_info[i - 1].used_rules;
+                               break;
+                       }
+               }
+               if (rule_index == lpm->max_rules)
+                       return -ENOSPC;
+
+               lpm->rule_info[depth - 1].first_rule = rule_index;
+       }
+
+       /* Make room for the new rule in the array. */
+       for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
+               if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 
1].used_rules
+                               == lpm->max_rules)
+                       return -ENOSPC;
+
+               if (lpm->rule_info[i - 1].used_rules > 0) {
+                       lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
+                               + lpm->rule_info[i - 1].used_rules] =
+                               lpm->rules_tbl[lpm->rule_info[i - 
1].first_rule];
                        lpm->rule_info[i - 1].first_rule++;
                }
        }
@@ -333,19 +570,46 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t 
depth,
  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
  */
 static inline void
-rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
+rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index,
+       uint8_t depth)
+{
+       int i;
+
+       VERIFY_DEPTH(depth);
+
+       lpm->rules_tbl[rule_index] =
+                       lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+                               + lpm->rule_info[depth - 1].used_rules - 1];
+
+       for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
+               if (lpm->rule_info[i].used_rules > 0) {
+                       lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
+                               lpm->rules_tbl[lpm->rule_info[i].first_rule
+                                       + lpm->rule_info[i].used_rules - 1];
+                       lpm->rule_info[i].first_rule--;
+               }
+       }
+
+       lpm->rule_info[depth - 1].used_rules--;
+}
+
+static inline void
+rule_delete_v23(struct rte_lpm *lpm, int32_t rule_index,
+       uint8_t depth)
 {
        int i;

        VERIFY_DEPTH(depth);

-       lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 
1].first_rule
-                       + lpm->rule_info[depth - 1].used_rules - 1];
+       lpm->rules_tbl[rule_index] =
+                       lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+                               + lpm->rule_info[depth - 1].used_rules - 1];

        for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
                if (lpm->rule_info[i].used_rules > 0) {
                        lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
-                                       
lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1];
+                               lpm->rules_tbl[lpm->rule_info[i].first_rule
+                                       + lpm->rule_info[i].used_rules - 1];
                        lpm->rule_info[i].first_rule--;
                }
        }
@@ -358,7 +622,28 @@ rule_delete(struct rte_lpm *lpm, int32_t rule_index, 
uint8_t depth)
  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
  */
 static inline int32_t
-rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
+rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
+{
+       uint32_t rule_gindex, last_rule, rule_index;
+
+       VERIFY_DEPTH(depth);
+
+       rule_gindex = lpm->rule_info[depth - 1].first_rule;
+       last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+       /* Scan used rules at given depth to find rule. */
+       for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
+               /* If rule is found return the rule index. */
+               if (lpm->rules_tbl[rule_index].ip == ip_masked)
+                       return rule_index;
+       }
+
+       /* If rule is not found return -EINVAL. */
+       return -EINVAL;
+}
+
+static inline int32_t
+rule_find_v23(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
 {
        uint32_t rule_gindex, last_rule, rule_index;

@@ -382,7 +667,7 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t 
depth)
  * Find, clean and allocate a tbl8.
  */
 static inline int32_t
-tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
+tbl8_alloc_v20(struct rte_lpm_tbl8_entry *tbl8)
 {
        uint32_t tbl8_gindex; /* tbl8 group index. */
        struct rte_lpm_tbl8_entry *tbl8_entry;
@@ -390,8 +675,33 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
        /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
        for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
                        tbl8_gindex++) {
-               tbl8_entry = &tbl8[tbl8_gindex *
-                                  RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+               tbl8_entry = &tbl8[tbl8_gindex * 
RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+               /* If a free tbl8 group is found clean it and set as VALID. */
+               if (!tbl8_entry->valid_group) {
+                       memset(&tbl8_entry[0], 0,
+                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES * sizeof(tbl8_entry[0]));
+
+                       tbl8_entry->valid_group = VALID;
+
+                       /* Return group index for allocated tbl8 group. */
+                       return tbl8_gindex;
+               }
+       }
+
+       /* If there are no tbl8 groups free then return error. */
+       return -ENOSPC;
+}
+
+static inline int32_t
+tbl8_alloc_v23(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
+{
+#define valid_group ext_entry
+       uint32_t tbl8_gindex; /* tbl8 group index. */
+       struct rte_lpm_tbl_entry *tbl8_entry;
+
+       /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
+       for (tbl8_gindex = 0; tbl8_gindex < number_tbl8s; tbl8_gindex++) {
+               tbl8_entry = &tbl8[tbl8_gindex * 
RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
                /* If a free tbl8 group is found clean it and set as VALID. */
                if (!tbl8_entry->valid_group) {
                        memset(&tbl8_entry[0], 0,
@@ -404,21 +714,30 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
                        return tbl8_gindex;
                }
        }
-
+#undef valid_group
        /* If there are no tbl8 groups free then return error. */
        return -ENOSPC;
 }

 static inline void
-tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_free_v20(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+{
+       /* Set tbl8 group invalid*/
+       tbl8[tbl8_group_start].valid_group = INVALID;
+}
+
+static inline void
+tbl8_free_v23(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
 {
+#define valid_group ext_entry
        /* Set tbl8 group invalid*/
        tbl8[tbl8_group_start].valid_group = INVALID;
+#undef valid_group
 }

 static inline int32_t
-add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
-               uint8_t next_hop)
+add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip,
+               uint8_t depth, uint8_t next_hop)
 {
        uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;

@@ -431,11 +750,80 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth,
                 * For invalid OR valid and non-extended tbl 24 entries set
                 * entry.
                 */
-               if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
-                               lpm->tbl24[i].depth <= depth)) {
+               if (!lpm->tbl24[i].valid
+                       || (lpm->tbl24[i].ext_entry == 0 && lpm->tbl24[i].depth 
<= depth)) {

                        struct rte_lpm_tbl24_entry new_tbl24_entry = {
-                               { .next_hop = next_hop, },
+                                       { .next_hop = next_hop, },
+                                       .valid = VALID,
+                                       .ext_entry = 0,
+                                       .depth = depth,
+                       };
+
+                       /* Setting tbl24 entry in one go to avoid race
+                        * conditions
+                        */
+                       lpm->tbl24[i] = new_tbl24_entry;
+
+                       continue;
+               }
+
+               if (lpm->tbl24[i].ext_entry == 1) {
+                       /* If tbl24 entry is valid and extended calculate the
+                        *  index into tbl8.
+                        */
+                       tbl8_index = lpm->tbl24[i].tbl8_gindex *
+                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+                       tbl8_group_end = tbl8_index +
+                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+                       for (j = tbl8_index; j < tbl8_group_end; j++) {
+                               if (!lpm->tbl8[j].valid || lpm->tbl8[j].depth 
<= depth) {
+                                       struct rte_lpm_tbl8_entry 
new_tbl8_entry = {
+                                                       .valid = VALID,
+                                                       .valid_group = VALID,
+                                                       .depth = depth,
+                                                       .next_hop = next_hop,
+                                       };
+
+                                       /*
+                                        * Setting tbl8 entry in one go to avoid
+                                        * race conditions
+                                        */
+                                       lpm->tbl8[j] = new_tbl8_entry;
+
+                                       continue;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static inline int32_t
+add_depth_small_v23(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+               uint32_t next_hop)
+{
+#define tbl8_gindex next_hop
+#define valid_group ext_entry
+
+       uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
+
+       /* Calculate the index into Table24. */
+       tbl24_index = ip >> 8;
+       tbl24_range = depth_to_range(depth);
+
+       for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+               /*
+                * For invalid OR valid and non-extended tbl 24 entries set
+                * entry.
+                */
+               if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
+                                               lpm->tbl24[i].depth <= depth)) {
+
+                       struct rte_lpm_tbl_entry new_tbl24_entry = {
+                               .next_hop = next_hop,
                                .valid = VALID,
                                .ext_entry = 0,
                                .depth = depth,
@@ -454,15 +842,15 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth,
                         *  index into tbl8.
                         */
                        tbl8_index = lpm->tbl24[i].tbl8_gindex *
-                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
                        tbl8_group_end = tbl8_index +
-                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;

                        for (j = tbl8_index; j < tbl8_group_end; j++) {
                                if (!lpm->tbl8[j].valid ||
                                                lpm->tbl8[j].depth <= depth) {
-                                       struct rte_lpm_tbl8_entry
-                                               new_tbl8_entry = {
+                                       struct rte_lpm_tbl_entry
+                                       new_tbl8_entry = {
                                                .valid = VALID,
                                                .valid_group = VALID,
                                                .depth = depth,
@@ -480,34 +868,34 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth,
                        }
                }
        }
+#undef tbl8_gindex
+#undef valid_group

        return 0;
 }

 static inline int32_t
-add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
-               uint8_t next_hop)
+add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
+               uint8_t depth, uint8_t next_hop)
 {
        uint32_t tbl24_index;
        int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
-               tbl8_range, i;
+                       tbl8_range, i;

        tbl24_index = (ip_masked >> 8);
        tbl8_range = depth_to_range(depth);

        if (!lpm->tbl24[tbl24_index].valid) {
                /* Search for a free tbl8 group. */
-               tbl8_group_index = tbl8_alloc(lpm->tbl8);
+               tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);

                /* Check tbl8 allocation was successful. */
-               if (tbl8_group_index < 0) {
+               if (tbl8_group_index < 0)
                        return tbl8_group_index;
-               }

                /* Find index into tbl8 and range. */
                tbl8_index = (tbl8_group_index *
-                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
-                               (ip_masked & 0xFF);
+               RTE_LPM_TBL8_GROUP_NUM_ENTRIES) + (ip_masked & 0xFF);

                /* Set tbl8 entry. */
                for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
@@ -523,42 +911,40 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, 
uint8_t depth,
                 */

                struct rte_lpm_tbl24_entry new_tbl24_entry = {
-                       { .tbl8_gindex = (uint8_t)tbl8_group_index, },
-                       .valid = VALID,
-                       .ext_entry = 1,
-                       .depth = 0,
+                               { .tbl8_gindex = (uint8_t) tbl8_group_index, },
+                               .valid = VALID,
+                               .ext_entry = 1,
+                               .depth = 0,
                };

                lpm->tbl24[tbl24_index] = new_tbl24_entry;

-       }/* If valid entry but not extended calculate the index into Table8. */
-       else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
+       } else if (lpm->tbl24[tbl24_index].ext_entry == 0) { /*
+       * If valid entry but not extended calculate the index into Table8.
+       */
                /* Search for free tbl8 group. */
-               tbl8_group_index = tbl8_alloc(lpm->tbl8);
+               tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);

-               if (tbl8_group_index < 0) {
+               if (tbl8_group_index < 0)
                        return tbl8_group_index;
-               }

                tbl8_group_start = tbl8_group_index *
-                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
                tbl8_group_end = tbl8_group_start +
-                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;

                /* Populate new tbl8 with tbl24 value. */
                for (i = tbl8_group_start; i < tbl8_group_end; i++) {
                        lpm->tbl8[i].valid = VALID;
                        lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
-                       lpm->tbl8[i].next_hop =
-                                       lpm->tbl24[tbl24_index].next_hop;
+                       lpm->tbl8[i].next_hop = 
lpm->tbl24[tbl24_index].next_hop;
                }

                tbl8_index = tbl8_group_start + (ip_masked & 0xFF);

                /* Insert new rule into the tbl8 entry. */
                for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
-                       if (!lpm->tbl8[i].valid ||
-                                       lpm->tbl8[i].depth <= depth) {
+                       if (!lpm->tbl8[i].valid || lpm->tbl8[i].depth <= depth) 
{
                                lpm->tbl8[i].valid = VALID;
                                lpm->tbl8[i].depth = depth;
                                lpm->tbl8[i].next_hop = next_hop;
@@ -574,7 +960,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, 
uint8_t depth,
                 */

                struct rte_lpm_tbl24_entry new_tbl24_entry = {
-                               { .tbl8_gindex = (uint8_t)tbl8_group_index, },
+                               { .tbl8_gindex = (uint8_t) tbl8_group_index, },
                                .valid = VALID,
                                .ext_entry = 1,
                                .depth = 0,
@@ -582,20 +968,150 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, 
uint8_t depth,

                lpm->tbl24[tbl24_index] = new_tbl24_entry;

+       } else { /*
+        * If it is valid, extended entry calculate the index into tbl8.
+        */
+               tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+               tbl8_group_start = tbl8_group_index *
+               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+               tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+               for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+
+                       if (!lpm->tbl8[i].valid || lpm->tbl8[i].depth <= depth) 
{
+                               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+                                               .valid = VALID,
+                                               .depth = depth,
+                                               .next_hop = next_hop,
+                                               .valid_group = 
lpm->tbl8[i].valid_group,
+                               };
+
+                               /*
+                                * Setting tbl8 entry in one go to avoid race
+                                * condition
+                                */
+                               lpm->tbl8[i] = new_tbl8_entry;
+
+                               continue;
+                       }
+               }
        }
-       else { /*
-               * If it is valid, extended entry calculate the index into tbl8.
+
+       return 0;
+}
+
+static inline int32_t
+add_depth_big_v23(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+               uint32_t next_hop)
+{
+#define tbl8_gindex next_hop
+#define valid_group ext_entry
+
+       uint32_t tbl24_index;
+       int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
+       tbl8_range, i;
+
+       tbl24_index = (ip_masked >> 8);
+       tbl8_range = depth_to_range(depth);
+
+       if (!lpm->tbl24[tbl24_index].valid) {
+               /* Search for a free tbl8 group. */
+               tbl8_group_index = tbl8_alloc_v23(lpm->tbl8, lpm->number_tbl8s);
+
+               /* Check tbl8 allocation was successful. */
+               if (tbl8_group_index < 0)
+                       return tbl8_group_index;
+
+               /* Find index into tbl8 and range. */
+               tbl8_index = (tbl8_group_index *
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
+               (ip_masked & 0xFF);
+
+               /* Set tbl8 entry. */
+               for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+                       lpm->tbl8[i].depth = depth;
+                       lpm->tbl8[i].next_hop = next_hop;
+                       lpm->tbl8[i].valid = VALID;
+               }
+
+               /*
+                * Update tbl24 entry to point to new tbl8 entry. Note: The
+                * ext_flag and tbl8_index need to be updated simultaneously,
+                * so assign whole structure in one go
+                */
+
+               struct rte_lpm_tbl_entry new_tbl24_entry = {
+                       tbl8_gindex = (uint8_t)tbl8_group_index,
+                       .valid = VALID,
+                       .ext_entry = 1,
+                       .depth = 0,
+               };
+
+               lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+       } else if (lpm->tbl24[tbl24_index].ext_entry == 0) { /*
+               * If valid entry but not extended calculate the index into 
Table8.
                */
+               /* Search for free tbl8 group. */
+               tbl8_group_index = tbl8_alloc_v23(lpm->tbl8, lpm->number_tbl8s);
+
+               if (tbl8_group_index < 0)
+                       return tbl8_group_index;
+
+               tbl8_group_start = tbl8_group_index *
+               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+               tbl8_group_end = tbl8_group_start +
+               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+               /* Populate new tbl8 with tbl24 value. */
+               for (i = tbl8_group_start; i < tbl8_group_end; i++) {
+                       lpm->tbl8[i].valid = VALID;
+                       lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
+                       lpm->tbl8[i].next_hop =
+                       lpm->tbl24[tbl24_index].next_hop;
+               }
+
+               tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+               /* Insert new rule into the tbl8 entry. */
+               for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
+                       if (!lpm->tbl8[i].valid || lpm->tbl8[i].depth <= depth) 
{
+                               lpm->tbl8[i].valid = VALID;
+                               lpm->tbl8[i].depth = depth;
+                               lpm->tbl8[i].next_hop = next_hop;
+
+                               continue;
+                       }
+               }
+
+               /*
+                * Update tbl24 entry to point to new tbl8 entry. Note: The
+                * ext_flag and tbl8_index need to be updated simultaneously,
+                * so assign whole structure in one go.
+                */
+
+               struct rte_lpm_tbl_entry new_tbl24_entry = {
+                       .tbl8_gindex = (uint8_t)tbl8_group_index,
+                       .valid = VALID,
+                       .ext_entry = 1,
+                       .depth = 0,
+               };
+
+               lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+       } else {
+       /*
+        * If it is valid, extended entry calculate the index into tbl8.
+        */
                tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
                tbl8_group_start = tbl8_group_index *
-                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
                tbl8_index = tbl8_group_start + (ip_masked & 0xFF);

                for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {

-                       if (!lpm->tbl8[i].valid ||
-                                       lpm->tbl8[i].depth <= depth) {
-                               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+                       if (!lpm->tbl8[i].valid || lpm->tbl8[i].depth <= depth) 
{
+                               struct rte_lpm_tbl_entry new_tbl8_entry = {
                                        .valid = VALID,
                                        .depth = depth,
                                        .next_hop = next_hop,
@@ -612,6 +1128,8 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, 
uint8_t depth,
                        }
                }
        }
+#undef tbl8_gindex
+#undef valid_group

        return 0;
 }
@@ -620,7 +1138,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, 
uint8_t depth,
  * Add a route
  */
 int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
                uint8_t next_hop)
 {
        int32_t rule_index, status = 0;
@@ -633,25 +1151,63 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth,
        ip_masked = ip & depth_to_mask(depth);

        /* Add the rule to the rule table. */
-       rule_index = rule_add(lpm, ip_masked, depth, next_hop);
+       rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);

        /* If the is no space available for new rule return error. */
-       if (rule_index < 0) {
+       if (rule_index < 0)
                return rule_index;
-       }

        if (depth <= MAX_DEPTH_TBL24) {
-               status = add_depth_small(lpm, ip_masked, depth, next_hop);
+               status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
+       } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
+               status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
+
+               /*
+                * If add fails due to exhaustion of tbl8 extensions delete
+                * rule that was added to rule table.
+                */
+               if (status < 0) {
+                       rule_delete_v20(lpm, rule_index, depth);
+
+                       return status;
+               }
        }
-       else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
-               status = add_depth_big(lpm, ip_masked, depth, next_hop);
+
+       return 0;
+}
+VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
+
+int
+rte_lpm_add_v23(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+               uint32_t next_hop)
+{
+       int32_t rule_index, status = 0;
+       uint32_t ip_masked;
+
+       /* Check user arguments. */
+       if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+               return -EINVAL;
+
+       ip_masked = ip & depth_to_mask(depth);
+
+       /* Add the rule to the rule table. */
+       rule_index = rule_add_v23(lpm, ip_masked, depth, next_hop);
+
+       /* If the is no space available for new rule return error. */
+       if (rule_index < 0)
+               return rule_index;
+
+       if (depth <= MAX_DEPTH_TBL24) {
+               status = add_depth_small_v23(lpm, ip_masked, depth, next_hop);
+       } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
+               status = add_depth_big_v23(lpm, ip_masked, depth, next_hop);

                /*
                 * If add fails due to exhaustion of tbl8 extensions delete
                 * rule that was added to rule table.
                 */
                if (status < 0) {
-                       rule_delete(lpm, rule_index, depth);
+                       rule_delete_v23(lpm, rule_index, depth);

                        return status;
                }
@@ -659,26 +1215,55 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth,

        return 0;
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_add, _v23, 2.3);
+MAP_STATIC_SYMBOL(
+               int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+                               uint32_t next_hop), rte_lpm_add_v23);

 /*
  * Look for a rule in the high-level rules table
  */
 int
-rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
-uint8_t *next_hop)
+rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t 
depth,
+               uint8_t *next_hop)
+{
+       uint32_t ip_masked;
+       int32_t rule_index;
+
+       /* Check user arguments. */
+       if ((lpm == NULL) || (next_hop == NULL) || (depth < 1)
+                       || (depth > RTE_LPM_MAX_DEPTH))
+               return -EINVAL;
+
+       /* Look for the rule using rule_find. */
+       ip_masked = ip & depth_to_mask(depth);
+       rule_index = rule_find_v20(lpm, ip_masked, depth);
+
+       if (rule_index >= 0) {
+               *next_hop = lpm->rules_tbl[rule_index].next_hop;
+               return 1;
+       }
+
+       /* If rule is not found return 0. */
+       return 0;
+}
+VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
+
+int
+rte_lpm_is_rule_present_v23(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+               uint32_t *next_hop)
 {
        uint32_t ip_masked;
        int32_t rule_index;

        /* Check user arguments. */
-       if ((lpm == NULL) ||
-               (next_hop == NULL) ||
-               (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+       if ((lpm == NULL) || (next_hop == NULL) || (depth < 1)
+                       || (depth > RTE_LPM_MAX_DEPTH))
                return -EINVAL;

        /* Look for the rule using rule_find. */
        ip_masked = ip & depth_to_mask(depth);
-       rule_index = rule_find(lpm, ip_masked, depth);
+       rule_index = rule_find_v23(lpm, ip_masked, depth);

        if (rule_index >= 0) {
                *next_hop = lpm->rules_tbl[rule_index].next_hop;
@@ -688,18 +1273,22 @@ uint8_t *next_hop)
        /* If rule is not found return 0. */
        return 0;
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v23, 2.3);
+MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
+                       uint8_t depth, uint32_t *next_hop), 
rte_lpm_is_rule_present_v23);

 static inline int32_t
-find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t 
*sub_rule_depth)
+find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip,
+               uint8_t depth, uint8_t *sub_rule_depth)
 {
        int32_t rule_index;
        uint32_t ip_masked;
        uint8_t prev_depth;

-       for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
+       for (prev_depth = (uint8_t) (depth - 1); prev_depth > 0; prev_depth--) {
                ip_masked = ip & depth_to_mask(prev_depth);

-               rule_index = rule_find(lpm, ip_masked, prev_depth);
+               rule_index = rule_find_v20(lpm, ip_masked, prev_depth);

                if (rule_index >= 0) {
                        *sub_rule_depth = prev_depth;
@@ -711,8 +1300,30 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, 
uint8_t depth, uint8_t *sub
 }

 static inline int32_t
-delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
-       uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+find_previous_rule_v23(struct rte_lpm *lpm, uint32_t ip,
+               uint8_t depth, uint8_t *sub_rule_depth)
+{
+       int32_t rule_index;
+       uint32_t ip_masked;
+       uint8_t prev_depth;
+
+       for (prev_depth = (uint8_t) (depth - 1); prev_depth > 0; prev_depth--) {
+               ip_masked = ip & depth_to_mask(prev_depth);
+
+               rule_index = rule_find_v23(lpm, ip_masked, prev_depth);
+
+               if (rule_index >= 0) {
+                       *sub_rule_depth = prev_depth;
+                       return rule_index;
+               }
+       }
+
+       return -1;
+}
+
+static inline int32_t
+delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t 
depth,
+               int32_t sub_rule_index, uint8_t sub_rule_depth)
 {
        uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;

@@ -731,8 +1342,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
                 */
                for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {

-                       if (lpm->tbl24[i].ext_entry == 0 &&
-                                       lpm->tbl24[i].depth <= depth ) {
+                       if (lpm->tbl24[i].ext_entry == 0 && lpm->tbl24[i].depth 
<= depth) {
                                lpm->tbl24[i].valid = INVALID;
                        } else if (lpm->tbl24[i].ext_entry == 1) {
                                /*
@@ -743,31 +1353,121 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t 
ip_masked,

                                tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
                                tbl8_index = tbl8_group_index *
-                                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;

                                for (j = tbl8_index; j < (tbl8_index +
-                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {

                                        if (lpm->tbl8[j].depth <= depth)
                                                lpm->tbl8[j].valid = INVALID;
                                }
                        }
                }
-       }
-       else {
+       } else {
                /*
                 * If a replacement rule exists then modify entries
                 * associated with this rule.
                 */

                struct rte_lpm_tbl24_entry new_tbl24_entry = {
-                       {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
+                               { .next_hop = 
lpm->rules_tbl[sub_rule_index].next_hop, },
+                               .valid = VALID,
+                               .ext_entry = 0,
+                               .depth = sub_rule_depth, };
+
+               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+                               .valid = VALID,
+                               .valid_group = VALID,
+                               .depth = sub_rule_depth,
+                               .next_hop = 
lpm->rules_tbl[sub_rule_index].next_hop, };
+
+               for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+
+                       if (lpm->tbl24[i].ext_entry == 0 && lpm->tbl24[i].depth 
<= depth) {
+                               lpm->tbl24[i] = new_tbl24_entry;
+                       } else if (lpm->tbl24[i].ext_entry == 1) {
+                               /*
+                                * If TBL24 entry is extended, then there has
+                                * to be a rule with depth >= 25 in the
+                                * associated TBL8 group.
+                                */
+
+                               tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+                               tbl8_index = tbl8_group_index *
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+                               for (j = tbl8_index; j < (tbl8_index +
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+
+                                       if (lpm->tbl8[j].depth <= depth)
+                                               lpm->tbl8[j] = new_tbl8_entry;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static inline int32_t
+delete_depth_small_v23(struct rte_lpm *lpm, uint32_t ip_masked,
+               uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+#define tbl8_gindex next_hop
+#define valid_group ext_entry
+
+       uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
+
+       /* Calculate the range and index into Table24. */
+       tbl24_range = depth_to_range(depth);
+       tbl24_index = (ip_masked >> 8);
+
+       /*
+        * Firstly check the sub_rule_index. A -1 indicates no replacement rule
+        * and a positive number indicates a sub_rule_index.
+        */
+       if (sub_rule_index < 0) {
+               /*
+                * If no replacement rule exists then invalidate entries
+                * associated with this rule.
+                */
+               for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+
+                       if (lpm->tbl24[i].ext_entry == 0 && lpm->tbl24[i].depth 
<= depth) {
+                               lpm->tbl24[i].valid = INVALID;
+                       } else if (lpm->tbl24[i].ext_entry == 1) {
+                               /*
+                                * If TBL24 entry is extended, then there has
+                                * to be a rule with depth >= 25 in the
+                                * associated TBL8 group.
+                                */
+
+                               tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+                               tbl8_index = tbl8_group_index *
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+                               for (j = tbl8_index; j < (tbl8_index +
+                                                               
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+
+                                       if (lpm->tbl8[j].depth <= depth)
+                                               lpm->tbl8[j].valid = INVALID;
+                               }
+                       }
+               }
+       } else {
+               /*
+                * If a replacement rule exists then modify entries
+                * associated with this rule.
+                */
+
+               struct rte_lpm_tbl_entry new_tbl24_entry = {
+                       .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
                        .valid = VALID,
                        .ext_entry = 0,
                        .depth = sub_rule_depth,
                };

-               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+               struct rte_lpm_tbl_entry new_tbl8_entry = {
                        .valid = VALID,
                        .valid_group = VALID,
                        .depth = sub_rule_depth,
@@ -777,10 +1477,9 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t 
ip_masked,

                for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {

-                       if (lpm->tbl24[i].ext_entry == 0 &&
-                                       lpm->tbl24[i].depth <= depth ) {
+                       if (lpm->tbl24[i].ext_entry == 0 && lpm->tbl24[i].depth 
<= depth) {
                                lpm->tbl24[i] = new_tbl24_entry;
-                       } else  if (lpm->tbl24[i].ext_entry == 1) {
+                       } else if (lpm->tbl24[i].ext_entry == 1) {
                                /*
                                 * If TBL24 entry is extended, then there has
                                 * to be a rule with depth >= 25 in the
@@ -789,10 +1488,10 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t 
ip_masked,

                                tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
                                tbl8_index = tbl8_group_index *
-                                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;

                                for (j = tbl8_index; j < (tbl8_index +
-                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+                                                               
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {

                                        if (lpm->tbl8[j].depth <= depth)
                                                lpm->tbl8[j] = new_tbl8_entry;
@@ -800,6 +1499,8 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
                        }
                }
        }
+#undef tbl8_gindex
+#undef valid_group

        return 0;
 }
@@ -813,9 +1514,10 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t 
ip_masked,
  * thus can be recycled
  */
 static inline int32_t
-tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_recycle_check_v20(struct rte_lpm_tbl8_entry *tbl8, uint32_t 
tbl8_group_start)
 {
        uint32_t tbl8_group_end, i;
+
        tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;

        /*
@@ -831,14 +1533,52 @@ tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, 
uint32_t tbl8_group_start)
                 * are all of this depth.
                 */
                if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
-                       for (i = (tbl8_group_start + 1); i < tbl8_group_end;
-                                       i++) {
+                       for (i = (tbl8_group_start + 1); i < tbl8_group_end; 
i++) {
+
+                               if (tbl8[i].depth != 
tbl8[tbl8_group_start].depth)
+                                       return -EEXIST;
+                       }
+                       /* If all entries are the same return the tb8 index */
+                       return tbl8_group_start;
+               }
+
+               return -EEXIST;
+       }
+       /*
+        * If the first entry is invalid check if the rest of the entries in
+        * the tbl8 are invalid.
+        */
+       for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
+               if (tbl8[i].valid)
+                       return -EEXIST;
+       }
+       /* If no valid entries are found then return -EINVAL. */
+       return -EINVAL;
+}
+
+static inline int32_t
+tbl8_recycle_check_v23(struct rte_lpm_tbl_entry *tbl8, uint32_t 
tbl8_group_start)
+{
+       uint32_t tbl8_group_end, i;

-                               if (tbl8[i].depth !=
-                                               tbl8[tbl8_group_start].depth) {
+       tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;

+       /*
+        * Check the first entry of the given tbl8. If it is invalid we know
+        * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
+        *  (As they would affect all entries in a tbl8) and thus this table
+        *  can not be recycled.
+        */
+       if (tbl8[tbl8_group_start].valid) {
+               /*
+                * If first entry is valid check if the depth is less than 24
+                * and if so check the rest of the entries to verify that they
+                * are all of this depth.
+                */
+               if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
+                       for (i = (tbl8_group_start + 1); i < tbl8_group_end; 
i++) {
+                               if (tbl8[i].depth != 
tbl8[tbl8_group_start].depth)
                                        return -EEXIST;
-                               }
                        }
                        /* If all entries are the same return the tb8 index */
                        return tbl8_group_start;
@@ -859,8 +1599,8 @@ tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, 
uint32_t tbl8_group_start)
 }

 static inline int32_t
-delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
-       uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t 
depth,
+               int32_t sub_rule_index, uint8_t sub_rule_depth)
 {
        uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
                        tbl8_range, i;
@@ -887,10 +1627,88 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
                        if (lpm->tbl8[i].depth <= depth)
                                lpm->tbl8[i].valid = INVALID;
                }
-       }
-       else {
+       } else {
                /* Set new tbl8 entry. */
                struct rte_lpm_tbl8_entry new_tbl8_entry = {
+                               .valid = VALID,
+                               .depth = sub_rule_depth,
+                               .valid_group = 
lpm->tbl8[tbl8_group_start].valid_group,
+                               .next_hop = 
lpm->rules_tbl[sub_rule_index].next_hop,
+               };
+
+               /*
+                * Loop through the range of entries on tbl8 for which the
+                * rule_to_delete must be modified.
+                */
+               for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+                       if (lpm->tbl8[i].depth <= depth)
+                               lpm->tbl8[i] = new_tbl8_entry;
+               }
+       }
+
+       /*
+        * Check if there are any valid entries in this tbl8 group. If all
+        * tbl8 entries are invalid we can free the tbl8 and invalidate the
+        * associated tbl24 entry.
+        */
+
+       tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, 
tbl8_group_start);
+
+       if (tbl8_recycle_index == -EINVAL) {
+               /* Set tbl24 before freeing tbl8 to avoid race condition. */
+               lpm->tbl24[tbl24_index].valid = 0;
+               tbl8_free_v20(lpm->tbl8, tbl8_group_start);
+       } else if (tbl8_recycle_index > -1) {
+               /* Update tbl24 entry. */
+               struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                               { .next_hop = 
lpm->tbl8[tbl8_recycle_index].next_hop, },
+                               .valid = VALID,
+                               .ext_entry = 0,
+                               .depth = lpm->tbl8[tbl8_recycle_index].depth, };
+
+               /* Set tbl24 before freeing tbl8 to avoid race condition. */
+               lpm->tbl24[tbl24_index] = new_tbl24_entry;
+               tbl8_free_v20(lpm->tbl8, tbl8_group_start);
+       }
+
+       return 0;
+}
+
+static inline int32_t
+delete_depth_big_v23(struct rte_lpm *lpm, uint32_t ip_masked,
+               uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+#define tbl8_gindex next_hop
+#define valid_group ext_entry
+
+       uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
+       tbl8_range, i;
+       int32_t tbl8_recycle_index;
+
+       /*
+        * Calculate the index into tbl24 and range. Note: All depths larger
+        * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
+        */
+       tbl24_index = ip_masked >> 8;
+
+       /* Calculate the index into tbl8 and range. */
+       tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+       tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+       tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+       tbl8_range = depth_to_range(depth);
+
+       if (sub_rule_index < 0) {
+               /*
+                * Loop through the range of entries on tbl8 for which the
+                * rule_to_delete must be removed or modified.
+                */
+               for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+                       if (lpm->tbl8[i].depth <= depth)
+                               lpm->tbl8[i].valid = INVALID;
+               }
+       } else {
+               /* Set new tbl8 entry. */
+               struct rte_lpm_tbl_entry new_tbl8_entry = {
                        .valid = VALID,
                        .depth = sub_rule_depth,
                        .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
@@ -913,17 +1731,16 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
         * associated tbl24 entry.
         */

-       tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
+       tbl8_recycle_index = tbl8_recycle_check_v23(lpm->tbl8, 
tbl8_group_start);

-       if (tbl8_recycle_index == -EINVAL){
+       if (tbl8_recycle_index == -EINVAL) {
                /* Set tbl24 before freeing tbl8 to avoid race condition. */
                lpm->tbl24[tbl24_index].valid = 0;
-               tbl8_free(lpm->tbl8, tbl8_group_start);
-       }
-       else if (tbl8_recycle_index > -1) {
+               tbl8_free_v23(lpm->tbl8, tbl8_group_start);
+       } else if (tbl8_recycle_index > -1) {
                /* Update tbl24 entry. */
-               struct rte_lpm_tbl24_entry new_tbl24_entry = {
-                       { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
+               struct rte_lpm_tbl_entry new_tbl24_entry = {
+                       .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
                        .valid = VALID,
                        .ext_entry = 0,
                        .depth = lpm->tbl8[tbl8_recycle_index].depth,
@@ -931,8 +1748,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,

                /* Set tbl24 before freeing tbl8 to avoid race condition. */
                lpm->tbl24[tbl24_index] = new_tbl24_entry;
-               tbl8_free(lpm->tbl8, tbl8_group_start);
+               tbl8_free_v23(lpm->tbl8, tbl8_group_start);
        }
+#undef tbl8_gindex
+#undef valid_group

        return 0;
 }
@@ -941,7 +1760,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
  * Deletes a rule
  */
 int
-rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
 {
        int32_t rule_to_delete_index, sub_rule_index;
        uint32_t ip_masked;
@@ -950,9 +1769,8 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth)
         * Check input arguments. Note: IP must be a positive integer of 32
         * bits in length therefore it need not be checked.
         */
-       if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
+       if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
                return -EINVAL;
-       }

        ip_masked = ip & depth_to_mask(depth);

@@ -960,7 +1778,7 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth)
         * Find the index of the input rule, that needs to be deleted, in the
         * rule table.
         */
-       rule_to_delete_index = rule_find(lpm, ip_masked, depth);
+       rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);

        /*
         * Check if rule_to_delete_index was found. If no rule was found the
@@ -970,7 +1788,7 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth)
                return -EINVAL;

        /* Delete the rule from the rule table. */
-       rule_delete(lpm, rule_to_delete_index, depth);
+       rule_delete_v20(lpm, rule_to_delete_index, depth);

        /*
         * Find rule to replace the rule_to_delete. If there is no rule to
@@ -978,26 +1796,99 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth)
         * entries associated with this rule.
         */
        sub_rule_depth = 0;
-       sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
+       sub_rule_index = find_previous_rule_v20(lpm, ip, depth, 
&sub_rule_depth);

        /*
         * If the input depth value is less than 25 use function
         * delete_depth_small otherwise use delete_depth_big.
         */
        if (depth <= MAX_DEPTH_TBL24) {
-               return delete_depth_small(lpm, ip_masked, depth,
-                               sub_rule_index, sub_rule_depth);
+               return delete_depth_small_v20(lpm, ip_masked, depth, 
sub_rule_index,
+                               sub_rule_depth);
+       } else { /* If depth > MAX_DEPTH_TBL24 */
+               return delete_depth_big_v20(lpm, ip_masked, depth, 
sub_rule_index,
+                               sub_rule_depth);
        }
-       else { /* If depth > MAX_DEPTH_TBL24 */
-               return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, 
sub_rule_depth);
+}
+VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
+
+int
+rte_lpm_delete_v23(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+{
+       int32_t rule_to_delete_index, sub_rule_index;
+       uint32_t ip_masked;
+       uint8_t sub_rule_depth;
+       /*
+        * Check input arguments. Note: IP must be a positive integer of 32
+        * bits in length therefore it need not be checked.
+        */
+       if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+               return -EINVAL;
+
+       ip_masked = ip & depth_to_mask(depth);
+
+       /*
+        * Find the index of the input rule, that needs to be deleted, in the
+        * rule table.
+        */
+       rule_to_delete_index = rule_find_v23(lpm, ip_masked, depth);
+
+       /*
+        * Check if rule_to_delete_index was found. If no rule was found the
+        * function rule_find returns -EINVAL.
+        */
+       if (rule_to_delete_index < 0)
+               return -EINVAL;
+
+       /* Delete the rule from the rule table. */
+       rule_delete_v23(lpm, rule_to_delete_index, depth);
+
+       /*
+        * Find rule to replace the rule_to_delete. If there is no rule to
+        * replace the rule_to_delete we return -1 and invalidate the table
+        * entries associated with this rule.
+        */
+       sub_rule_depth = 0;
+       sub_rule_index = find_previous_rule_v23(lpm, ip, depth, 
&sub_rule_depth);
+
+       /*
+        * If the input depth value is less than 25 use function
+        * delete_depth_small otherwise use delete_depth_big.
+        */
+       if (depth <= MAX_DEPTH_TBL24) {
+               return delete_depth_small_v23(lpm, ip_masked, depth, 
sub_rule_index,
+                               sub_rule_depth);
+       } else { /* If depth > MAX_DEPTH_TBL24 */
+               return delete_depth_big_v23(lpm, ip_masked, depth, 
sub_rule_index,
+                               sub_rule_depth);
        }
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v23, 2.3);
+MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
+               uint8_t depth), rte_lpm_delete_v23);

 /*
  * Delete all rules from the LPM table.
  */
 void
-rte_lpm_delete_all(struct rte_lpm *lpm)
+rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
+{
+       /* Zero rule information. */
+       memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
+
+       /* Zero tbl24. */
+       memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+
+       /* Zero tbl8. */
+       memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) * RTE_LPM_TBL8_NUM_ENTRIES);
+
+       /* Delete all rules form the rules table. */
+       memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
+}
+VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
+
+void
+rte_lpm_delete_all_v23(struct rte_lpm *lpm)
 {
        /* Zero rule information. */
        memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
@@ -1006,8 +1897,12 @@ rte_lpm_delete_all(struct rte_lpm *lpm)
        memset(lpm->tbl24, 0, sizeof(lpm->tbl24));

        /* Zero tbl8. */
-       memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
+       memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
+                       * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);

        /* Delete all rules form the rules table. */
        memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v23, 2.3);
+MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
+               rte_lpm_delete_all_v23);
diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h
index c299ce2..d45113c 100644
--- a/lib/librte_lpm/rte_lpm.h
+++ b/lib/librte_lpm/rte_lpm.h
@@ -48,6 +48,7 @@
 #include <rte_memory.h>
 #include <rte_common.h>
 #include <rte_vect.h>
+#include <rte_compat.h>

 #ifdef __cplusplus
 extern "C" {
@@ -65,6 +66,9 @@ extern "C" {
 /** @internal Number of entries in a tbl8 group. */
 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES  256

+/** @internal Max number of tbl8 groups in the tbl8. */
+#define RTE_LPM_MAX_TBL8_NUM_GROUPS         (1 << 24)
+
 /** @internal Total number of tbl8 groups in the tbl8. */
 #define RTE_LPM_TBL8_NUM_GROUPS         256

@@ -75,18 +79,12 @@ extern "C" {
 /** @internal Macro to enable/disable run-time checks. */
 #if defined(RTE_LIBRTE_LPM_DEBUG)
 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
-       if (cond) return (retval);                \
+       if (cond) return (retval);                    \
 } while (0)
 #else
 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
 #endif

-/** @internal bitmask with valid and ext_entry/valid_group fields set */
-#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
-
-/** Bitmask used to indicate successful lookup */
-#define RTE_LPM_LOOKUP_SUCCESS          0x0100
-
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 /** @internal Tbl24 entry structure. */
 struct rte_lpm_tbl24_entry {
@@ -109,6 +107,16 @@ struct rte_lpm_tbl8_entry {
        uint8_t valid_group :1; /**< Group validation flag. */
        uint8_t depth       :6; /**< Rule depth. */
 };
+
+/** @internal Tbl24 and Tbl8 entry structure. */
+struct rte_lpm_tbl_entry {
+       /* Stores Next hop or group index (i.e. gindex)into tbl8. */
+       uint32_t next_hop  :24;
+       /* Using single uint8_t to store 3 values. */
+       uint32_t valid     :1; /**< Validation flag. */
+       uint32_t ext_entry :1; /**< External entry or valid_group. */
+       uint32_t depth     :6; /**< Rule depth. */
+};
 #else
 struct rte_lpm_tbl24_entry {
        uint8_t depth       :6;
@@ -126,13 +134,14 @@ struct rte_lpm_tbl8_entry {
        uint8_t valid       :1;
        uint8_t next_hop;
 };
-#endif

-/** @internal Rule structure. */
-struct rte_lpm_rule {
-       uint32_t ip; /**< Rule IP address. */
-       uint8_t  next_hop; /**< Rule next hop. */
+struct rte_lpm_tbl_entry {
+       uint32_t depth       :6;
+       uint32_t ext_entry   :1;
+       uint32_t valid       :1;
+       uint32_t next_hop    :24;
 };
+#endif

 /** @internal Contains metadata about the rules table. */
 struct rte_lpm_rule_info {
@@ -140,22 +149,63 @@ struct rte_lpm_rule_info {
        uint32_t first_rule; /**< Indexes the first rule of a given depth. */
 };

+/** LPM configuration structure. */
+struct rte_lpm_config {
+       uint32_t max_rules;      /**< Max number of rules. */
+       uint32_t number_tbl8s;   /**< Number of tbl8s to allocate. */
+       int flags;               /**< This field is currently unused. */
+};
+
+/** @internal Rule structure. */
+struct rte_lpm_rule_v20 {
+       uint32_t ip; /**< Rule IP address. */
+       uint8_t  next_hop; /**< Rule next hop. */
+};
+
+/** @internal Rule structure. */
+struct rte_lpm_rule {
+       uint32_t ip; /**< Rule IP address. */
+       uint32_t next_hop; /**< Rule next hop. */
+};
+
 /** @internal LPM structure. */
-struct rte_lpm {
+struct rte_lpm_v20 {
        /* LPM metadata. */
        char name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */
        uint32_t max_rules; /**< Max. balanced rules per lpm. */
        struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info 
table. */

        /* LPM Tables. */
-       struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
+       struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
                        __rte_cache_aligned; /**< LPM tbl24 table. */
-       struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
+       struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES]
                        __rte_cache_aligned; /**< LPM tbl8 table. */
-       struct rte_lpm_rule rules_tbl[0] \
+       struct rte_lpm_rule_v20 rules_tbl[0]
                        __rte_cache_aligned; /**< LPM rules. */
 };

+/** @internal LPM structure. */
+struct rte_lpm {
+       /* LPM metadata. */
+       char name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */
+       uint32_t max_rules; /**< Max. balanced rules per lpm. */
+       uint32_t number_tbl8s; /**< Number of tbl8s. */
+       struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];/**< Rule info 
table*/
+
+       /* LPM Tables. */
+       struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
+                       __rte_cache_aligned; /**< LPM tbl24 table. */
+       struct rte_lpm_tbl_entry *tbl8; /**< LPM tbl8 table. */
+       struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
+
+};
+
+/** @internal bitmask with valid and ext_entry/valid_group fields set */
+#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
+
+/** Bitmask used to indicate successful lookup */
+#define RTE_LPM_LOOKUP_SUCCESS          0x01000000
+
 /**
  * Create an LPM object.
  *
@@ -178,7 +228,14 @@ struct rte_lpm {
  *    - ENOMEM - no appropriate memory area found in which to create memzone
  */
 struct rte_lpm *
-rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
+rte_lpm_create(const char *name, int socket_id,
+               const struct rte_lpm_config *config);
+struct rte_lpm_v20 *
+rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
+               __rte_unused int flags);
+struct rte_lpm *
+rte_lpm_create_v23(const char *name, int socket_id,
+               const struct rte_lpm_config *config);

 /**
  * Find an existing LPM object and return a pointer to it.
@@ -192,6 +249,10 @@ rte_lpm_create(const char *name, int socket_id, int 
max_rules, int flags);
  */
 struct rte_lpm *
 rte_lpm_find_existing(const char *name);
+struct rte_lpm_v20 *
+rte_lpm_find_existing_v20(const char *name);
+struct rte_lpm *
+rte_lpm_find_existing_v23(const char *name);

 /**
  * Free an LPM object.
@@ -203,6 +264,10 @@ rte_lpm_find_existing(const char *name);
  */
 void
 rte_lpm_free(struct rte_lpm *lpm);
+void
+rte_lpm_free_v20(struct rte_lpm_v20 *lpm);
+void
+rte_lpm_free_v23(struct rte_lpm *lpm);

 /**
  * Add a rule to the LPM table.
@@ -219,7 +284,14 @@ rte_lpm_free(struct rte_lpm *lpm);
  *   0 on success, negative value otherwise
  */
 int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+               uint32_t next_hop);
+int
+rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
+               uint8_t next_hop);
+int
+rte_lpm_add_v23(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+               uint32_t next_hop);

 /**
  * Check if a rule is present in the LPM table,
@@ -238,7 +310,13 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth, uint8_t next_hop);
  */
 int
 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint32_t *next_hop);
+int
+rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t 
depth,
 uint8_t *next_hop);
+int
+rte_lpm_is_rule_present_v23(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint32_t *next_hop);

 /**
  * Delete a rule from the LPM table.
@@ -254,6 +332,10 @@ uint8_t *next_hop);
  */
 int
 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
+int
+rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth);
+int
+rte_lpm_delete_v23(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);

 /**
  * Delete all rules from the LPM table.
@@ -263,6 +345,10 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t 
depth);
  */
 void
 rte_lpm_delete_all(struct rte_lpm *lpm);
+void
+rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm);
+void
+rte_lpm_delete_all_v23(struct rte_lpm *lpm);

 /**
  * Lookup an IP into the LPM table.
@@ -277,28 +363,32 @@ rte_lpm_delete_all(struct rte_lpm *lpm);
  *   -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
  */
 static inline int
-rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
+rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
 {
        unsigned tbl24_index = (ip >> 8);
-       uint16_t tbl_entry;
+       uint32_t tbl_entry;
+       const uint32_t *ptbl;

        /* DEBUG: Check user input arguments. */
        RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);

        /* Copy tbl24 entry */
-       tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
+       ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
+       tbl_entry = *ptbl;

        /* Copy tbl8 entry (only if needed) */
        if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
                        RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {

                unsigned tbl8_index = (uint8_t)ip +
-                               ((uint8_t)tbl_entry * 
RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+                               (((uint32_t)tbl_entry & 0x00FFFFFF) *
+                                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES);

-               tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+               ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
+               tbl_entry = *ptbl;
        }

-       *next_hop = (uint8_t)tbl_entry;
+       *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
        return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
 }

@@ -326,40 +416,42 @@ rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t 
*next_hop)
                rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)

 static inline int
-rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
-               uint16_t * next_hops, const unsigned n)
+rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
+               uint32_t *next_hops, const unsigned n)
 {
        unsigned i;
        unsigned tbl24_indexes[n];
+       const uint32_t *ptbl;

        /* DEBUG: Check user input arguments. */
        RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
                        (next_hops == NULL)), -EINVAL);

-       for (i = 0; i < n; i++) {
+       for (i = 0; i < n; i++)
                tbl24_indexes[i] = ips[i] >> 8;
-       }

        for (i = 0; i < n; i++) {
                /* Simply copy tbl24 entry to output */
-               next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
+               ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
+               next_hops[i] = *ptbl;

                /* Overwrite output with tbl8 entry if needed */
                if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
                                RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {

                        unsigned tbl8_index = (uint8_t)ips[i] +
-                                       ((uint8_t)next_hops[i] *
+                                       (((uint32_t)next_hops[i] & 0x00FFFFFF) *
                                         RTE_LPM_TBL8_GROUP_NUM_ENTRIES);

-                       next_hops[i] = *(const uint16_t 
*)&lpm->tbl8[tbl8_index];
+                       ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
+                       next_hops[i] = *ptbl;
                }
        }
        return 0;
 }

 /* Mask four results. */
-#define         RTE_LPM_MASKX4_RES     UINT64_C(0x00ff00ff00ff00ff)
+#define         RTE_LPM_MASKX4_RES     UINT64_C(0x00ffffff00ffffff)

 /**
  * Lookup four IP addresses in an LPM table.
@@ -381,36 +473,33 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const 
uint32_t * ips,
  *   if lookup would fail.
  */
 static inline void
-rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
-       uint16_t defv)
+rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint32_t hop[4],
+       uint32_t defv)
 {
        __m128i i24;
        rte_xmm_t i8;
-       uint16_t tbl[4];
-       uint64_t idx, pt;
+       uint32_t tbl[4];
+       uint64_t idx, pt, pt2;
+       const uint32_t *ptbl;

        const __m128i mask8 =
                _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);

        /*
-        * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
-        * as one 64-bit value (0x0300030003000300).
+        * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 2 LPM entries
+        * as one 64-bit value (0x0300000003000000)..
         */
        const uint64_t mask_xv =
                ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
-               (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
-               (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
-               (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
+               (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32);

        /*
-        * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
-        * as one 64-bit value (0x0100010001000100).
+        * RTE_LPM_LOOKUP_SUCCESS for 2 LPM entries
+        * as one 64-bit value (0x0100000001000000).
         */
        const uint64_t mask_v =
                ((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
-               (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
-               (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
-               (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
+               (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32);

        /* get 4 indexes for tbl24[]. */
        i24 = _mm_srli_epi32(ip, CHAR_BIT);
@@ -419,26 +508,31 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, 
uint16_t hop[4],
        idx = _mm_cvtsi128_si64(i24);
        i24 = _mm_srli_si128(i24, sizeof(uint64_t));

-       tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
-       tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+       ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx];
+       tbl[0] = *ptbl;
+       ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32];
+       tbl[1] = *ptbl;

        idx = _mm_cvtsi128_si64(i24);

-       tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
-       tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+       ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx];
+       tbl[2] = *ptbl;
+       ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32];
+       tbl[3] = *ptbl;

        /* get 4 indexes for tbl8[]. */
        i8.x = _mm_and_si128(ip, mask8);

        pt = (uint64_t)tbl[0] |
-               (uint64_t)tbl[1] << 16 |
-               (uint64_t)tbl[2] << 32 |
-               (uint64_t)tbl[3] << 48;
+               (uint64_t)tbl[1] << 32;
+       pt2 = (uint64_t)tbl[2] |
+               (uint64_t)tbl[3] << 32;

        /* search successfully finished for all 4 IP addresses. */
-       if (likely((pt & mask_xv) == mask_v)) {
-               uintptr_t ph = (uintptr_t)hop;
-               *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
+       if (likely((pt & mask_xv) == mask_v) &&
+                       likely((pt2 & mask_xv) == mask_v)) {
+               *(uint64_t *)hop = pt & RTE_LPM_MASKX4_RES;
+               *(uint64_t *)(hop + 2) = pt2 & RTE_LPM_MASKX4_RES;
                return;
        }

@@ -446,31 +540,35 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, 
uint16_t hop[4],
                        RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
                i8.u32[0] = i8.u32[0] +
                        (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-               tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
+               ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]];
+               tbl[0] = *ptbl;
        }
-       if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+       if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
                        RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
                i8.u32[1] = i8.u32[1] +
                        (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-               tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
+               ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]];
+               tbl[1] = *ptbl;
        }
-       if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+       if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
                        RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
                i8.u32[2] = i8.u32[2] +
                        (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-               tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
+               ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]];
+               tbl[2] = *ptbl;
        }
-       if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+       if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
                        RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
                i8.u32[3] = i8.u32[3] +
                        (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-               tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
+               ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]];
+               tbl[3] = *ptbl;
        }

-       hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
-       hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
-       hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
-       hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
+       hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[0] & 0x00FFFFFF : defv;
+       hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[1] & 0x00FFFFFF : defv;
+       hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[2] & 0x00FFFFFF : defv;
+       hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[3] & 0x00FFFFFF : defv;
 }

 #ifdef __cplusplus
diff --git a/lib/librte_lpm/rte_lpm_version.map 
b/lib/librte_lpm/rte_lpm_version.map
index 70e1c05..c2d5207 100644
--- a/lib/librte_lpm/rte_lpm_version.map
+++ b/lib/librte_lpm/rte_lpm_version.map
@@ -1,23 +1,34 @@
 DPDK_2.0 {
        global:
+               rte_lpm_add;
+               rte_lpm_create;
+               rte_lpm_delete;
+               rte_lpm_delete_all;
+               rte_lpm_find_existing;
+               rte_lpm_free;
+               rte_lpm_is_rule_present;
+               rte_lpm6_add;
+               rte_lpm6_create;
+               rte_lpm6_delete;
+               rte_lpm6_delete_all;
+               rte_lpm6_delete_bulk_func;
+               rte_lpm6_find_existing;
+               rte_lpm6_free;
+               rte_lpm6_is_rule_present;
+               rte_lpm6_lookup;
+               rte_lpm6_lookup_bulk_func;

-       rte_lpm_add;
-       rte_lpm_create;
-       rte_lpm_delete;
-       rte_lpm_delete_all;
-       rte_lpm_find_existing;
-       rte_lpm_free;
-       rte_lpm_is_rule_present;
-       rte_lpm6_add;
-       rte_lpm6_create;
-       rte_lpm6_delete;
-       rte_lpm6_delete_all;
-       rte_lpm6_delete_bulk_func;
-       rte_lpm6_find_existing;
-       rte_lpm6_free;
-       rte_lpm6_is_rule_present;
-       rte_lpm6_lookup;
-       rte_lpm6_lookup_bulk_func;
-
-       local: *;
+       local:
+               *;
 };
+
+DPDK_2.3 {
+       global:
+               rte_lpm_add;
+               rte_lpm_find_existing;
+               rte_lpm_create;
+               rte_lpm_free;
+               rte_lpm_is_rule_present;
+               rte_lpm_delete;
+               rte_lpm_delete_all;
+} DPDK_2.0;
diff --git a/lib/librte_table/rte_table.h b/lib/librte_table/rte_table.h
index 720514e..6a00f7b 100644
--- a/lib/librte_table/rte_table.h
+++ b/lib/librte_table/rte_table.h
@@ -294,6 +294,7 @@ struct rte_table_ops {
        rte_table_op_stats_read f_stats;              /**< Stats */
 };

+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_table/rte_table_lpm.c b/lib/librte_table/rte_table_lpm.c
index 673f401..1c4769f 100644
--- a/lib/librte_table/rte_table_lpm.c
+++ b/lib/librte_table/rte_table_lpm.c
@@ -74,7 +74,8 @@ struct rte_table_lpm {

        /* Next Hop Table (NHT) */
        uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
-       uint8_t nht[0] __rte_cache_aligned;
+
+       uint32_t nht[0] __rte_cache_aligned;
 };

 static void *
@@ -82,6 +83,8 @@ rte_table_lpm_create(void *params, int socket_id, uint32_t 
entry_size)
 {
        struct rte_table_lpm_params *p = (struct rte_table_lpm_params *) params;
        struct rte_table_lpm *lpm;
+       struct rte_lpm_config lpm_config;
+
        uint32_t total_size, nht_size;

        /* Check input parameters */
@@ -93,6 +96,10 @@ rte_table_lpm_create(void *params, int socket_id, uint32_t 
entry_size)
                RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
                return NULL;
        }
+       if (p->number_tbl8s == 0) {
+                       RTE_LOG(ERR, TABLE, "%s: Invalid number_tbl8s\n", 
__func__);
+                       return NULL;
+               }
        if (p->entry_unique_size == 0) {
                RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
                        __func__);
@@ -123,7 +130,11 @@ rte_table_lpm_create(void *params, int socket_id, uint32_t 
entry_size)
        }

        /* LPM low-level table creation */
-       lpm->lpm = rte_lpm_create(p->name, socket_id, p->n_rules, 0);
+       lpm_config.max_rules = p->n_rules;
+       lpm_config.number_tbl8s = p->number_tbl8s;
+       lpm_config.flags = p->flags;
+       lpm->lpm = rte_lpm_create(p->name, socket_id, &lpm_config);
+
        if (lpm->lpm == NULL) {
                rte_free(lpm);
                RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n");
@@ -178,7 +189,7 @@ nht_find_existing(struct rte_table_lpm *lpm, void *entry, 
uint32_t *pos)
        uint32_t i;

        for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
-               uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
+               uint32_t *nht_entry = &lpm->nht[i * lpm->entry_size];

                if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
                        lpm->entry_unique_size) == 0)) {
@@ -202,7 +213,7 @@ rte_table_lpm_entry_add(
        struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
        uint32_t nht_pos, nht_pos0_valid;
        int status;
-       uint8_t nht_pos0 = 0;
+       uint32_t nht_pos0 = 0;

        /* Check input parameters */
        if (lpm == NULL) {
@@ -232,7 +243,8 @@ rte_table_lpm_entry_add(

        /* Find existing or free NHT entry */
        if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
-               uint8_t *nht_entry;
+
+               uint32_t *nht_entry;

                if (nht_find_free(lpm, &nht_pos) == 0) {
                        RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
@@ -245,7 +257,7 @@ rte_table_lpm_entry_add(

        /* Add rule to low level LPM table */
        if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
-               (uint8_t) nht_pos) < 0) {
+               nht_pos) < 0) {
                RTE_LOG(ERR, TABLE, "%s: LPM rule add failed\n", __func__);
                return -1;
        }
@@ -268,7 +280,8 @@ rte_table_lpm_entry_delete(
 {
        struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
        struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
-       uint8_t nht_pos;
+
+       uint32_t nht_pos;
        int status;

        /* Check input parameters */
@@ -330,6 +343,7 @@ rte_table_lpm_lookup(
        uint32_t i;

        __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
+
        RTE_TABLE_LPM_STATS_PKTS_IN_ADD(lpm, n_pkts_in);

        pkts_out_mask = 0;
@@ -342,7 +356,7 @@ rte_table_lpm_lookup(
                        uint32_t ip = rte_bswap32(
                                RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
                        int status;
-                       uint8_t nht_pos;
+                       uint32_t nht_pos;

                        status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
                        if (status == 0) {
diff --git a/lib/librte_table/rte_table_lpm.h b/lib/librte_table/rte_table_lpm.h
index 06e8410..022c2f3 100644
--- a/lib/librte_table/rte_table_lpm.h
+++ b/lib/librte_table/rte_table_lpm.h
@@ -83,6 +83,10 @@ struct rte_table_lpm_params {
        /** Maximum number of LPM rules (i.e. IP routes) */
        uint32_t n_rules;

+       uint32_t number_tbl8s;
+
+       int flags;
+
        /** Number of bytes at the start of the table entry that uniquely
        identify the entry. Cannot be bigger than table entry size. */
        uint32_t entry_unique_size;
-- 
1.9.1

Reply via email to