[dpdk-dev] [PATCH v4] test/hash: improve hash unit tests

2015-07-10 Thread Thomas Monjalon
2015-07-10 10:11, Bruce Richardson:
> On Thu, Jul 09, 2015 at 05:54:30PM +0100, Pablo de Lara wrote:
> > Add new unit test for calculating the average table utilization,
> > using random keys, based on number of entries that can be added
> > until we encounter one that cannot be added (bucket if full).
> > 
> > Also, replace current hash_perf unit test to see performance more clear.
> s/clear/clearly/
> 
> > The current hash_perf unit test takes too long and add keys that
> > may or may not fit in the table and look up/delete that may not be
> > in the table. This new unit test gets a set of keys that we know
> > that fits in the table, and then measure the time to add/look up/delete
> > them.
> > 
> > Mind that performance numbers include time to take a random key
> s/Mind/Note/
> 
> > from a pre-made array of keys, plus a quick check of return value.
> > Also, as stated above, expect higher numbers, as all operations
> > in the new unit tests will be successful, which means that
> > it will take more time, than mixing both successful and unsuccesful
> > operations.
> > 
> > Signed-off-by: Pablo de Lara 
> 
> Looks good, Pablo. 
> Thomas, perhaps you could make the above minor changes to
> the commit log when applying the patch.
> 
> Acked-by: Bruce Richardson 

Applied with above comments, thanks



[dpdk-dev] [PATCH v4] test/hash: improve hash unit tests

2015-07-10 Thread Bruce Richardson
On Thu, Jul 09, 2015 at 05:54:30PM +0100, Pablo de Lara wrote:
> Add new unit test for calculating the average table utilization,
> using random keys, based on number of entries that can be added
> until we encounter one that cannot be added (bucket if full).
> 
> Also, replace current hash_perf unit test to see performance more clear.
s/clear/clearly/

> The current hash_perf unit test takes too long and add keys that
> may or may not fit in the table and look up/delete that may not be
> in the table. This new unit test gets a set of keys that we know
> that fits in the table, and then measure the time to add/look up/delete
> them.
> 
> Mind that performance numbers include time to take a random key
s/Mind/Note/

> from a pre-made array of keys, plus a quick check of return value.
> Also, as stated above, expect higher numbers, as all operations
> in the new unit tests will be successful, which means that
> it will take more time, than mixing both successful and unsuccesful
> operations.
> 
> Signed-off-by: Pablo de Lara 

Looks good, Pablo. 
Thomas, perhaps you could make the above minor changes to
the commit log when applying the patch.

Acked-by: Bruce Richardson 

> ---
>  app/test/test_hash.c  |  66 +++-
>  app/test/test_hash_perf.c | 923 
> --
>  2 files changed, 458 insertions(+), 531 deletions(-)
> 
> diff --git a/app/test/test_hash.c b/app/test/test_hash.c
> index 4300de9..7c71ed6 100644
> --- a/app/test/test_hash.c
> +++ b/app/test/test_hash.c
> @@ -190,7 +190,7 @@ test_crc32_hash_alg_equiv(void)
>   unsigned i, j;
>   size_t data_len;
>  
> - printf("# CRC32 implementations equivalence test\n");
> + printf("\n# CRC32 implementations equivalence test\n");
>   for (i = 0; i < CRC32_ITERATIONS; i++) {
>   /* Randomizing data_len of data set */
>   data_len = (size_t) ((rte_rand() % sizeof(data64)) + 1);
> @@ -785,7 +785,7 @@ fbk_hash_unit_test(void)
>  
>   /* Try creating hashes with invalid parameters */
>   printf("# Testing hash creation with invalid parameters "
> - "- expert error msgs\n");
> + "- expect error msgs\n");
>   handle = rte_fbk_hash_create(_params_1);
>   RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have 
> failed");
>  
> @@ -1087,6 +1087,7 @@ static int test_hash_creation_with_bad_parameters(void)
>   }
>  
>   rte_hash_free(handle);
> + printf("# Test successful. No more errors expected\n");
>  
>   return 0;
>  }
> @@ -1147,6 +1148,65 @@ test_hash_creation_with_good_parameters(void)
>   return 0;
>  }
>  
> +#define ITERATIONS 50
> +/*
> + * Test to see the average table utilization (entries added/max entries)
> + * before hitting a random entry that cannot be added
> + */
> +static int test_average_table_utilization(void)
> +{
> + struct rte_hash *handle;
> + uint8_t simple_key[RTE_HASH_KEY_LENGTH_MAX];
> + unsigned i, j;
> + unsigned added_keys, average_keys_added = 0;
> + int ret;
> +
> + printf("\n# Running test to determine average utilization"
> +"\n  before adding elements begins to fail\n");
> + printf("Measuring performance, please wait");
> + fflush(stdout);
> + ut_params.entries = 1 << 20;
> + ut_params.name = "test_average_utilization";
> + ut_params.hash_func = rte_jhash;
> + handle = rte_hash_create(_params);
> + RETURN_IF_ERROR(handle == NULL, "hash creation failed");
> +
> + for (j = 0; j < ITERATIONS; j++) {
> + ret = 0;
> + /* Add random entries until key cannot be added */
> + for (added_keys = 0; ret >= 0; added_keys++) {
> + for (i = 0; i < ut_params.key_len; i++)
> + simple_key[i] = rte_rand() % 255;
> + ret = rte_hash_add_key(handle, simple_key);
> + }
> + if (ret != -ENOSPC) {
> + printf("Unexpected error when adding keys\n");
> + rte_hash_free(handle);
> + return -1;
> + }
> +
> + average_keys_added += added_keys;
> +
> + /* Reset the table */
> + rte_hash_free(handle);
> + handle = rte_hash_create(_params);
> + RETURN_IF_ERROR(handle == NULL, "hash creation failed");
> +
> + /* Print a dot to show progress on operations */
> + printf(".");
> + fflush(stdout);
> + }
> +
> + average_keys_added /= ITERATIONS;
> +
> + printf("\nAverage table utilization = %.2f%% (%u/%u)\n",
> + ((double) average_keys_added / ut_params.entries * 100),
> + average_keys_added, ut_params.entries);
> + rte_hash_free(handle);
> +
> + return 0;
> +}
> +
>  static uint8_t key[16] = {0x00, 0x01, 0x02, 0x03,
>   0x04, 0x05, 0x06, 0x07,
>   0x08, 0x09, 0x0a, 

[dpdk-dev] [PATCH v4] test/hash: improve hash unit tests

2015-07-09 Thread Pablo de Lara
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).

Also, replace current hash_perf unit test to see performance more clear.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.

Mind that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.

Signed-off-by: Pablo de Lara 
---
 app/test/test_hash.c  |  66 +++-
 app/test/test_hash_perf.c | 923 --
 2 files changed, 458 insertions(+), 531 deletions(-)

diff --git a/app/test/test_hash.c b/app/test/test_hash.c
index 4300de9..7c71ed6 100644
--- a/app/test/test_hash.c
+++ b/app/test/test_hash.c
@@ -190,7 +190,7 @@ test_crc32_hash_alg_equiv(void)
unsigned i, j;
size_t data_len;

-   printf("# CRC32 implementations equivalence test\n");
+   printf("\n# CRC32 implementations equivalence test\n");
for (i = 0; i < CRC32_ITERATIONS; i++) {
/* Randomizing data_len of data set */
data_len = (size_t) ((rte_rand() % sizeof(data64)) + 1);
@@ -785,7 +785,7 @@ fbk_hash_unit_test(void)

/* Try creating hashes with invalid parameters */
printf("# Testing hash creation with invalid parameters "
-   "- expert error msgs\n");
+   "- expect error msgs\n");
handle = rte_fbk_hash_create(_params_1);
RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have 
failed");

@@ -1087,6 +1087,7 @@ static int test_hash_creation_with_bad_parameters(void)
}

rte_hash_free(handle);
+   printf("# Test successful. No more errors expected\n");

return 0;
 }
@@ -1147,6 +1148,65 @@ test_hash_creation_with_good_parameters(void)
return 0;
 }

+#define ITERATIONS 50
+/*
+ * Test to see the average table utilization (entries added/max entries)
+ * before hitting a random entry that cannot be added
+ */
+static int test_average_table_utilization(void)
+{
+   struct rte_hash *handle;
+   uint8_t simple_key[RTE_HASH_KEY_LENGTH_MAX];
+   unsigned i, j;
+   unsigned added_keys, average_keys_added = 0;
+   int ret;
+
+   printf("\n# Running test to determine average utilization"
+  "\n  before adding elements begins to fail\n");
+   printf("Measuring performance, please wait");
+   fflush(stdout);
+   ut_params.entries = 1 << 20;
+   ut_params.name = "test_average_utilization";
+   ut_params.hash_func = rte_jhash;
+   handle = rte_hash_create(_params);
+   RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+   for (j = 0; j < ITERATIONS; j++) {
+   ret = 0;
+   /* Add random entries until key cannot be added */
+   for (added_keys = 0; ret >= 0; added_keys++) {
+   for (i = 0; i < ut_params.key_len; i++)
+   simple_key[i] = rte_rand() % 255;
+   ret = rte_hash_add_key(handle, simple_key);
+   }
+   if (ret != -ENOSPC) {
+   printf("Unexpected error when adding keys\n");
+   rte_hash_free(handle);
+   return -1;
+   }
+
+   average_keys_added += added_keys;
+
+   /* Reset the table */
+   rte_hash_free(handle);
+   handle = rte_hash_create(_params);
+   RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+   /* Print a dot to show progress on operations */
+   printf(".");
+   fflush(stdout);
+   }
+
+   average_keys_added /= ITERATIONS;
+
+   printf("\nAverage table utilization = %.2f%% (%u/%u)\n",
+   ((double) average_keys_added / ut_params.entries * 100),
+   average_keys_added, ut_params.entries);
+   rte_hash_free(handle);
+
+   return 0;
+}
+
 static uint8_t key[16] = {0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b,
@@ -1405,6 +1465,8 @@ test_hash(void)
return -1;
if (test_hash_creation_with_good_parameters() < 0)
return -1;
+   if (test_average_table_utilization() < 0)
+   return -1;

run_hash_func_tests();

diff --git a/app/test/test_hash_perf.c b/app/test/test_hash_perf.c
index d0e5ce0..a3876c1 100644
---