Flag is needed to map process one shm block to process two.

Signed-off-by: Maxim Uvarov <maxim.uva...@linaro.org>
---
 include/odp/api/shared_memory.h            | 1 +
 platform/linux-generic/odp_shared_memory.c | 9 +++++++--
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/include/odp/api/shared_memory.h b/include/odp/api/shared_memory.h
index 5d851ce..63b04d0 100644
--- a/include/odp/api/shared_memory.h
+++ b/include/odp/api/shared_memory.h
@@ -49,6 +49,7 @@ extern "C" {
 /* Share level */
 #define ODP_SHM_SW_ONLY 0x1 /**< Application SW only, no HW access */
 #define ODP_SHM_PROC    0x2 /**< Share with external processes */
+#define ODP_SHM_PROC_NOCREAT 0x4
 
 /**
  * Shared memory block info
diff --git a/platform/linux-generic/odp_shared_memory.c 
b/platform/linux-generic/odp_shared_memory.c
index ab48dda..549f497 100644
--- a/platform/linux-generic/odp_shared_memory.c
+++ b/platform/linux-generic/odp_shared_memory.c
@@ -189,7 +189,7 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, 
uint64_t align,
        int fd = -1;
        int map_flag = MAP_SHARED;
        /* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */
-       int oflag = O_RDWR | O_CREAT | O_TRUNC;
+       int oflag = O_RDWR;
        uint64_t alloc_size;
        uint64_t page_sz, huge_sz;
 #ifdef MAP_HUGETLB
@@ -207,7 +207,12 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, 
uint64_t align,
        alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz);
 #endif
 
-       if (flags & ODP_SHM_PROC) {
+       if (flags & ODP_SHM_PROC)
+               oflag |= O_CREAT | O_TRUNC;
+
+       if (flags & (ODP_SHM_PROC | ODP_SHM_PROC_NOCREAT)) {
+               need_huge_page = 0;
+
                /* Creates a file to /dev/shm */
                fd = shm_open(name, oflag,
                              S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
-- 
1.9.1

_______________________________________________
lng-odp mailing list
lng-odp@lists.linaro.org
https://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to