The various hugetlb tests don't check to see if they are trying to allocate
> SHMMAX memory, nor do they try to compensate for multiple processes --
each process will try to get half of memory, so eventually a request will
fail.
I suggest adding a function to lib/system_specific_hugepages_info.c and
include/system_specific_hugepages_info.h, and replacing the existing
calculation with a call to the new function in
testcases\kernel\mem\hugetlb\hugeshmat\hugeshmat01.c
testcases\kernel\mem\hugetlb\hugeshmat\hugeshmat02.c
testcases\kernel\mem\hugetlb\hugeshmat\hugeshmat03.c
testcases\kernel\mem\hugetlb\hugeshmctl\hugeshmctl01.c
testcases\kernel\mem\hugetlb\hugeshmctl\hugeshmctl02.c
testcases\kernel\mem\hugetlb\hugeshmctl\hugeshmctl03.c
testcases\kernel\mem\hugetlb\hugeshmdt\hugeshmdt01.c
testcases\kernel\mem\hugetlb\hugeshmget\hugeshmget01.c
testcases\kernel\mem\hugetlb\hugeshmget\hugeshmget02.c
testcases\kernel\mem\hugetlb\hugeshmget\hugeshmget03.c
testcases\kernel\mem\hugetlb\hugeshmget\hugeshmget05.c
With this change you can run all the tests with multiple forked processes,
except hugeshmget02 and hugeshmget03.
hugeshmget02 creates a large shared memory segment, then loops, trying to
shmget() it in various invalid ways. Once it has finished looping, it
deletes the segment. If you run it with multiple parallel processes, once
one process finishes its loop and deletes the segment, the others fail.
hugeshmget03 allocates all of the available huge memory, then loops, trying
to allocate one more chunk and expecting an error. It finally deallocates
all the memory. However, running this test in multiple parallel processes
can cause false errors as follows:
- The numerous processes allocate all of huge memory.
- process 1 tries to allocate more. It fails, finishes its loops, and then
releases all the memory it allocated.
- process 2, which hasn't finished all its loops then tries to allocate
more, and succeeds, which is a failure.
The patches:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff -r 90778db5a87c include/system_specific_hugepages_info.h
--- a/include/system_specific_hugepages_info.h Mon Sep 13 11:19:49 2010
-0400
+++ b/include/system_specific_hugepages_info.h Mon Sep 13 14:51:14 2010
-0400
@@ -24,5 +24,13 @@
int get_no_of_hugepages(void);
/*Returns Hugepages Size from /proc/meminfo*/
int hugepages_size(void);
+
+/* Returns number of bytes to allocate, based on total huge memory,
+ a fraction, SHMMAX, and the number of parallel processes that will be
+ allocating memory. fraction is the fraction of total memory to use.
+ E.g., a fraction of 0.5 will allocate from 1/2 the total, 0.333 would
use 1/3.
+ A fraction of 1 would allocate from all the huge memory.
+*/
+size_t calculate_huge_pages_shm_to_be_allocated( double fraction );
#endif
diff -r 90778db5a87c lib/system_specific_hugepages_info.c
--- a/lib/system_specific_hugepages_info.c Mon Sep 13 11:19:49 2010
-0400
+++ b/lib/system_specific_hugepages_info.c Mon Sep 13 14:51:14 2010
-0400
@@ -26,8 +26,10 @@
*/
#include <fcntl.h>
+#include <linux/shm.h>
#include <sys/types.h>
#include <test.h>
+#include "usctest.h"
#define BUFSIZE 512
@@ -76,3 +78,29 @@
#endif
}
+/* Calculate how much memory to allocate based on the available huge
memory and
+ how many parallel processes are going to allocate it.
+*/
+size_t calculate_huge_pages_shm_to_be_allocated( double fraction )
+{
+#ifdef __linux__
+ size_t bytes_to_allocate;
+
+ bytes_to_allocate = (size_t)(fraction * (double)get_no_of_hugepages()
* (double)hugepages_size() * 1024.0);
+
+ /* Now divide that up amongst the processes */
+ if( STD_COPIES > 1 )
+ bytes_to_allocate /= (size_t)STD_COPIES;
+ /* and limit to SHMMAX */
+ if( bytes_to_allocate > SHMMAX )
+ bytes_to_allocate = (size_t)SHMMAX;
+
+ #ifdef DEBUG
+ printf( "calculate_huge_pages_shm_to_be_allocated(%lf) returns %lu
\n", divider, (unsigned long)bytes_to_allocate );
+ #endif
+
+ return bytes_to_allocate;
+#else
+ return (size_t)-1;
+#endif
+}
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat01.c
--- a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat01.c Mon Sep 13
11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat01.c Mon Sep 13
14:51:14 2010 -0400
@@ -106,8 +106,8 @@
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, tst_exit, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
setup(); /* global setup */
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat02.c
--- a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat02.c Mon Sep 13
11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat02.c Mon Sep 13
14:51:14 2010 -0400
@@ -103,8 +103,8 @@
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, tst_exit, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
setup(); /* global setup */
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat03.c
--- a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat03.c Mon Sep 13
11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat03.c Mon Sep 13
14:51:14 2010 -0400
@@ -87,8 +87,8 @@
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, cleanup, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
setup(); /* global setup */
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmctl/hugeshmctl01.c
--- a/testcases/kernel/mem/hugetlb/hugeshmctl/hugeshmctl01.c Mon Sep
13 11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmctl/hugeshmctl01.c Mon Sep
13 14:51:14 2010 -0400
@@ -131,8 +131,8 @@
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, tst_exit, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
setup(); /* global setup */
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmctl/hugeshmctl02.c
--- a/testcases/kernel/mem/hugetlb/hugeshmctl/hugeshmctl02.c Mon Sep
13 11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmctl/hugeshmctl02.c Mon Sep
13 14:51:14 2010 -0400
@@ -103,8 +103,8 @@
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, tst_exit, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
setup(); /* global setup */
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmctl/hugeshmctl03.c
--- a/testcases/kernel/mem/hugetlb/hugeshmctl/hugeshmctl03.c Mon Sep
13 11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmctl/hugeshmctl03.c Mon Sep
13 14:51:14 2010 -0400
@@ -106,8 +106,8 @@
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, cleanup, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
setup(); /* global setup */
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmdt/hugeshmdt01.c
--- a/testcases/kernel/mem/hugetlb/hugeshmdt/hugeshmdt01.c Mon Sep 13
11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmdt/hugeshmdt01.c Mon Sep 13
14:51:14 2010 -0400
@@ -88,8 +88,8 @@
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, tst_exit, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
setup(); /* global setup */
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget01.c
--- a/testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget01.c Mon Sep
13 11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget01.c Mon Sep
13 14:51:14 2010 -0400
@@ -81,9 +81,9 @@
/* The following loop checks looping state if -i option given */
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, tst_exit, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
-
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
+
setup(); /* global setup */
for (lc = 0; TEST_LOOPING(lc); lc++) {
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget02.c
--- a/testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget02.c Mon Sep
13 11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget02.c Mon Sep
13 14:51:14 2010 -0400
@@ -85,9 +85,9 @@
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, tst_exit, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
-
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
+
struct test_case_t TC[] = {
/* EINVAL - size is 0 */
{&shmkey2, 0, SHM_HUGETLB | IPC_CREAT | IPC_EXCL | SHM_RW,
EINVAL},
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget03.c
--- a/testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget03.c Mon Sep
13 11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget03.c Mon Sep
13 14:51:14 2010 -0400
@@ -86,8 +86,8 @@
/* The following loop checks looping state if -i option given */
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, tst_exit, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
setup2(huge_pages_shm_to_be_allocated); /* local setup
*/
diff -r 90778db5a87c testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget05.c
--- a/testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget05.c Mon Sep
13 11:19:49 2010 -0400
+++ b/testcases/kernel/mem/hugetlb/hugeshmget/hugeshmget05.c Mon Sep
13 14:51:14 2010 -0400
@@ -87,9 +87,9 @@
if ( get_no_of_hugepages() <= 0 || hugepages_size() <= 0 )
tst_brkm(TCONF, cleanup, "Not enough available Hugepages");
- else
- huge_pages_shm_to_be_allocated = ( get_no_of_hugepages() *
hugepages_size() * 1024) / 2 ;
-
+ else
+ huge_pages_shm_to_be_allocated =
calculate_huge_pages_shm_to_be_allocated(0.5);
+
setup(); /* global setup */
if ((pid = fork()) == -1) {
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--
Scott Romanowski------------------------------------------------------------------------------
Start uncovering the many advantages of virtual appliances
and start using them to simplify application deployment and
accelerate your shift to cloud computing
http://p.sf.net/sfu/novell-sfdev2dev
_______________________________________________
Ltp-list mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/ltp-list