[From: openssl-users] On Fri, Apr 11, 2014, Salz, Rich wrote:
> This patch is a variant of what we've been using to help protect > customer keys for a decade. Would you please elaborate on how it differs from what you've been using in production? > OpenSSL is important to us, and this is the first of what we hope will > be several significant contributions in the near future. I applaud Akamai's initiative and hope it serves as an example to other organizations. Message to all: If you use and benefit from OpenSSL and have developed significant in-house improvements, consider reciprocating by making your enhancements available to the community. No need to wait until the next disaster. --mancha PS I cleaned the patch up a bit (i.e. swapped in the correct cmm_init) and include it here. It applies cleanly to 1.0.1g which then builds fine on Linux with -pthread. Regression tests pass. Now on to real testing... Also, -dev is probably a better place for this than -users, isn't it?
From 8320f4697305785971a6a3cc5a5bed7b30cc46cd Mon Sep 17 00:00:00 2001 From: mancha <mancha1 AT zoho DOT com> Date: Sat, 12 Apr 2014 Subject: Akamai secure memory allocator Akamai Technologies patch that adds a "secure arena" used to store RSA private keys. The arena is mmap'd with guard pages, before and after, so pointer over- and under-runs won't wander in. It is also locked into memory so it doesn't appear on disk and, when possible, kept out of core files. This is a variant of what Akamai has been using to help protect customer keys for a decade. Ref: http://marc.info/?t=139723712400005&r=1&w=2 --- crypto/Makefile | 8 + crypto/asn1/tasn_dec.c | 32 +++ crypto/buddy_allocator.c | 411 +++++++++++++++++++++++++++++++++++++++ crypto/crypto.h | 24 +- crypto/secure_malloc.c | 223 +++++++++++++++++++++ crypto/secure_malloc.h | 45 ++++ 6 files changed, 726 insertions(+), 17 deletions(-) --- a/crypto/Makefile +++ b/crypto/Makefile @@ -35,14 +35,16 @@ GENERAL=Makefile README crypto-lib.com i LIB= $(TOP)/libcrypto.a SHARED_LIB= libcrypto$(SHLIB_EXT) LIBSRC= cryptlib.c mem.c mem_clr.c mem_dbg.c cversion.c ex_data.c cpt_err.c \ - ebcdic.c uid.c o_time.c o_str.c o_dir.c o_fips.c o_init.c fips_ers.c + ebcdic.c uid.c o_time.c o_str.c o_dir.c o_fips.c o_init.c fips_ers.c \ + secure_malloc.c buddy_allocator.c LIBOBJ= cryptlib.o mem.o mem_dbg.o cversion.o ex_data.o cpt_err.o ebcdic.o \ - uid.o o_time.o o_str.o o_dir.o o_fips.o o_init.o fips_ers.o $(CPUID_OBJ) + uid.o o_time.o o_str.o o_dir.o o_fips.o o_init.o fips_ers.o $(CPUID_OBJ) \ + secure_malloc.o buddy_allocator.o SRC= $(LIBSRC) EXHEADER= crypto.h opensslv.h opensslconf.h ebcdic.h symhacks.h \ - ossl_typ.h + ossl_typ.h secure_malloc.h HEADER= cryptlib.h buildinf.h md32_common.h o_time.h o_str.h o_dir.h $(EXHEADER) ALL= $(GENERAL) $(SRC) $(HEADER) --- a/crypto/crypto.h +++ b/crypto/crypto.h @@ -365,20 +365,16 @@ int CRYPTO_is_mem_check_on(void); #define MemCheck_off() CRYPTO_mem_ctrl(CRYPTO_MEM_CHECK_DISABLE) #define is_MemCheck_on() CRYPTO_is_mem_check_on() -#define OPENSSL_malloc(num) CRYPTO_malloc((int)num,__FILE__,__LINE__) -#define OPENSSL_strdup(str) CRYPTO_strdup((str),__FILE__,__LINE__) -#define OPENSSL_realloc(addr,num) \ - CRYPTO_realloc((char *)addr,(int)num,__FILE__,__LINE__) -#define OPENSSL_realloc_clean(addr,old_num,num) \ - CRYPTO_realloc_clean(addr,old_num,num,__FILE__,__LINE__) -#define OPENSSL_remalloc(addr,num) \ - CRYPTO_remalloc((char **)addr,(int)num,__FILE__,__LINE__) -#define OPENSSL_freeFunc CRYPTO_free -#define OPENSSL_free(addr) CRYPTO_free(addr) - -#define OPENSSL_malloc_locked(num) \ - CRYPTO_malloc_locked((int)num,__FILE__,__LINE__) -#define OPENSSL_free_locked(addr) CRYPTO_free_locked(addr) +#include <openssl/secure_malloc.h> +#define OPENSSL_malloc(s) secure_malloc(s) +#define OPENSSL_strdup(str) secure_strdup(str) +#define OPENSSL_free(a) secure_free(a) +#define OPENSSL_realloc(a,s) secure_realloc(a,s) +#define OPENSSL_realloc_clean(a,o,s) secure_realloc_clean(a,o,s) +#define OPENSSL_remalloc(a,s) (OPENSSL_free(a), OPENSSL_malloc(s)) +#define OPENSSL_freeFunc secure_free +#define OPENSSL_malloc_locked(s) OPENSSL_malloc(s) +#define OPENSSL_free_locked(a) OPENSSL_free(a) const char *SSLeay_version(int type); --- a/crypto/asn1/tasn_dec.c +++ b/crypto/asn1/tasn_dec.c @@ -169,6 +169,11 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, int otag; int ret = 0; ASN1_VALUE **pchptr, *ptmpval; + + int ak_is_rsa_key = 0; /* Are we parsing an RSA key? */ + int ak_is_secure_field = 0; /* should this field be allocated from the secure arena? */ + int ak_is_arena_active = 0; /* was the secure arena already activated? */ + if (!pval) return 0; if (aux && aux->asn1_cb) @@ -407,6 +412,11 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, if (asn1_cb && !asn1_cb(ASN1_OP_D2I_PRE, pval, it, NULL)) goto auxerr; + /* Watch out for this when OpenSSL is upgraded! */ + /* We have to be sure that it->sname will still be "RSA" */ + if (it->sname[0] == 'R' && it->sname[1] == 'S' && it->sname[2] == 'A' && it->sname[3] == 0) + ak_is_rsa_key = 1; + /* Get each field entry */ for (i = 0, tt = it->templates; i < it->tcount; i++, tt++) { @@ -445,8 +455,30 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, /* attempt to read in field, allowing each to be * OPTIONAL */ + + /* Watch out for this when OpenSSL is upgraded! */ + /* We have to be sure that seqtt->field_name will still be */ + /* "d", "p", and "q" */ + ak_is_secure_field = 0; + ak_is_arena_active = 0; + if (ak_is_rsa_key) + { + /* ak_is_rsa_key is set for public keys too */ + /* however those don't have these variables */ + const char *f = seqtt->field_name; + if ((f[0] == 'd' || f[0] == 'p' || f[0] == 'q') && f[1] == 0) + { + ak_is_secure_field = 1; + ak_is_arena_active = start_secure_allocation(); + } + } + ret = asn1_template_ex_d2i(pseqval, &p, len, seqtt, isopt, ctx); + + if (ak_is_secure_field && !ak_is_arena_active) + stop_secure_allocation(); + if (!ret) { errtt = seqtt; --- /dev/null +++ b/crypto/buddy_allocator.c @@ -0,0 +1,411 @@ +/* + * Memory allocator for secure heap for OpenSSL key storage. + * Copyright, 2001-2014, Akamai Technologies. All Rights Reserved. + * Distributed under the terms of the OpenSSL license. + */ +#include <stdlib.h> +#include <assert.h> +#include <unistd.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/param.h> + +static void *cmm_arena = NULL; +static void **cmm_free_list = NULL; +static int cmm_max_free_lists; +static int mem_arena_size = 0; +static int Mem_min_unit = 0; +static int Overrun_bytes = 0; + +typedef unsigned char u_int8; + +static u_int8 *cmm_bittable; +static u_int8 *cmm_bitmalloc; +/* size in bits */ +static int cmm_bittable_size; + +#define SETBIT(_a,_b) ((_a)[(_b)>>3] |= (1<<((_b)&7))) +#define CLEARBIT(_a,_b) ((_a)[(_b)>>3] &= (0xff&~(1<<((_b)&7)))) +#define TESTBIT(_a,_b) ((_a)[(_b)>>3] & (1<<((_b)&7))) + +static void cmm_add_to_list(void **list, void *lamb); +static void cmm_remove_from_list(void *lamb, void *list); +static void *mybuddy(void *lamb, int list); +static int getlist(void *lamb); +static int testbit(void *lamb, int list, u_int8 *table); +static void clearbit(void *lamb, int list, u_int8 *table); +static void set_bit(void *lamb, int list, u_int8 *table); + +void * +cmm_init(int size, int mem_min_unit, int overrun_bytes) +{ + int i; + size_t pgsize = (size_t)sysconf(_SC_PAGE_SIZE); + size_t aligned = (pgsize + size + (pgsize - 1)) & ~(pgsize - 1); + + mem_arena_size = size; + Mem_min_unit = mem_min_unit, + Overrun_bytes = overrun_bytes; + /* make sure mem_arena_size and Mem_min_unit are powers of 2 */ + assert(mem_arena_size > 0); + assert(mem_min_unit > 0); + assert(0 == ((mem_arena_size-1)&mem_arena_size)); + assert(0 == ((Mem_min_unit-1)&Mem_min_unit)); + + cmm_bittable_size = (mem_arena_size/Mem_min_unit) * 2; + + i = cmm_bittable_size; + cmm_max_free_lists = -1; + while(i) { + i>>=1; + cmm_max_free_lists++; + } + + cmm_free_list = malloc(cmm_max_free_lists * sizeof(void *)); + assert(cmm_free_list); + memset(cmm_free_list, 0, cmm_max_free_lists*sizeof(void *)); + + cmm_bittable = malloc(cmm_bittable_size>>3); + assert(cmm_bittable); + memset(cmm_bittable, 0, cmm_bittable_size>>3); + + cmm_bitmalloc = malloc(cmm_bittable_size>>3); + assert(cmm_bitmalloc); + memset(cmm_bitmalloc, 0, cmm_bittable_size>>3); + + cmm_arena = mmap(NULL, pgsize + mem_arena_size + pgsize, PROT_READ|PROT_WRITE, + MAP_ANON|MAP_PRIVATE, 0, 0); + assert(MAP_FAILED != cmm_arena); + mprotect(cmm_arena, pgsize, PROT_NONE); + mprotect(cmm_arena + aligned, pgsize, PROT_NONE); + set_bit(cmm_arena, 0, cmm_bittable); + cmm_add_to_list(&cmm_free_list[0], cmm_arena); + + /* first bit means that table is in use, multi-arena management */ + /* SETBIT(cmm_bittable, 0); */ + + return cmm_arena; +} + +void * +cmm_malloc(int size) +{ + int i, list, slist; + void *chunk = NULL, *temp; + + i = Mem_min_unit; list = cmm_max_free_lists-1; + while (i < size + Overrun_bytes) { + i<<=1; + list--; + } + if (list < 0) goto out; + + /* try to find a larger entry to split */ + slist = list; + while (slist >= 0) { + if (cmm_free_list[slist] != NULL) + break; + slist--; + } + if (slist < 0) goto out; + + /* split larger entry */ + while (slist != list) { + temp = cmm_free_list[slist]; + + /* remove from bigger list */ + assert(!testbit(temp, slist, cmm_bitmalloc)); + clearbit(temp, slist, cmm_bittable); + cmm_remove_from_list(temp, cmm_free_list[slist]); + assert(temp != cmm_free_list[slist]); + + /* done with bigger list */ + slist++; + + /* add to smaller list */ + assert(!testbit(temp, slist, cmm_bitmalloc)); + set_bit(temp, slist, cmm_bittable); + cmm_add_to_list(&cmm_free_list[slist], temp); + assert(cmm_free_list[slist] == temp); + + /* split in 2 */ + temp += mem_arena_size >> slist; + assert(!testbit(temp, slist, cmm_bitmalloc)); + set_bit(temp, slist, cmm_bittable); + cmm_add_to_list(&cmm_free_list[slist], temp); + assert(cmm_free_list[slist] == temp); + + assert(temp-(mem_arena_size>>slist) == mybuddy(temp, slist)); + } + + /* peel off memory to hand back */ + chunk = cmm_free_list[list]; + assert(testbit(chunk, list, cmm_bittable)); + set_bit(chunk, list, cmm_bitmalloc); + cmm_remove_from_list(chunk, cmm_free_list[list]); + + assert(chunk >= cmm_arena && chunk < cmm_arena+mem_arena_size); + +#ifdef CMM_DEBUG + for (i = 0; i < cmm_bittable_size; i++) { + if (TESTBIT(cmm_bitmalloc,i)) { + assert(TESTBIT(cmm_bittable,i)); + } + } +#endif + + out: + return chunk; +} + +static int cmm_free_calls = 0; + +int +cmm_free(void *lamb) +{ + int list; + void *buddy; +#ifdef CMM_DEBUG + int i; +#endif + cmm_free_calls++; + + assert(lamb >= cmm_arena && lamb < cmm_arena+mem_arena_size); + + list = getlist(lamb); + assert(testbit(lamb, list, cmm_bittable)); + clearbit(lamb, list, cmm_bitmalloc); + cmm_add_to_list(&cmm_free_list[list], lamb); + + while (NULL != (buddy = mybuddy(lamb, list))) { + assert(lamb == mybuddy(buddy, list)); + + assert(lamb); + assert(!testbit(lamb, list, cmm_bitmalloc)); + clearbit(lamb, list, cmm_bittable); + cmm_remove_from_list(lamb, cmm_free_list[list]); + assert(!testbit(lamb, list, cmm_bitmalloc)); + clearbit(buddy, list, cmm_bittable); + cmm_remove_from_list(buddy, cmm_free_list[list]); + + list--; + + if (lamb > buddy) lamb = buddy; + + assert(!testbit(lamb, list, cmm_bitmalloc)); + set_bit(lamb, list, cmm_bittable); + cmm_add_to_list(&cmm_free_list[list], lamb); + assert(cmm_free_list[list] == lamb); + } + +#ifdef CMM_DEBUG + for (i = 0; i < cmm_bittable_size; i++) { + if (TESTBIT(cmm_bitmalloc,i)) { + assert(TESTBIT(cmm_bittable,i)); + } + } +#endif + + return 0; +} + +int +cmm_usable_size(void *lamb) +{ + int list = getlist(lamb); + int size; + + assert(lamb >= cmm_arena && lamb < cmm_arena+mem_arena_size); + assert(testbit(lamb, list, cmm_bittable)); + + size = mem_arena_size/(1<<list); + + return size; +} + +void * +cmm_realloc(void *lamb, int size) +{ + void *temp; + int oldsize; + + oldsize = cmm_usable_size(lamb); + + if ((size > oldsize/2) && (size <= oldsize)) + return lamb; + + if ((size < Mem_min_unit) && (Mem_min_unit == oldsize)) + return lamb; + + temp = lamb; + lamb = cmm_malloc(size); + + if (NULL == lamb) + return NULL; + + size = MIN(size, oldsize); + memcpy(lamb, temp, size); + + cmm_free(temp); + + return lamb; +} + +typedef struct _cmm_list cmm_list; +struct _cmm_list { + cmm_list *next; + cmm_list **p_next; +}; + +static void +cmm_add_to_list(void **list, void *lamb) +{ + cmm_list *temp; +#ifdef CMM_DEBUG + cmm_list *temp2; +#endif + + assert(list >= cmm_free_list && + list < cmm_free_list + cmm_max_free_lists); + assert(lamb >= cmm_arena && + lamb < cmm_arena + mem_arena_size); + + temp = (cmm_list *)lamb; + temp->next = *(cmm_list **)list; + assert(temp->next == NULL || + ((void *)temp->next >= cmm_arena && + (void *)temp->next < cmm_arena+mem_arena_size)); + temp->p_next = (cmm_list **)list; + + if (NULL != temp->next) { + assert((void **)temp->next->p_next == list); + temp->next->p_next = &(temp->next); + } + + *list = lamb; + +#ifdef CMM_DEBUG + for (temp = *list; temp != NULL; temp = temp->next) { + if (NULL != temp->next) + assert(temp->next->p_next == &temp->next); + if (lamb == temp) temp2 = lamb; + } + assert (NULL != temp2); +#endif +} + +static void +cmm_remove_from_list(void *lamb, void *list) +{ + cmm_list *temp, *temp2; + +#ifdef CMM_DEBUG + temp2 = NULL; + + for (temp = list; temp != NULL; temp = temp->next) { + if (NULL != temp->next) + assert(temp->next->p_next == &temp->next); + if (lamb == temp) temp2 = lamb; + } + assert (NULL != temp2); +#endif + + temp = (cmm_list *)lamb; + if (NULL != temp->next) + temp->next->p_next = temp->p_next; + *temp->p_next = temp->next; + + if (NULL == temp->next) + return; + + temp2 = temp->next; + assert((((void **)temp2->p_next >= cmm_free_list) && + ((void **)temp2->p_next < cmm_free_list + cmm_max_free_lists)) + || + (((void *)temp2->p_next >= cmm_arena) && + ((void *)temp2->p_next < cmm_arena + mem_arena_size))); +} + +static void * +mybuddy(void *lamb, int list) +{ + int index; + void *chunk = NULL; + + index = (1<<list) + ((lamb-cmm_arena)/(mem_arena_size>>list)); + index ^= 1; + + if (TESTBIT(cmm_bittable,index) && + !TESTBIT(cmm_bitmalloc,index)) { + chunk = cmm_arena + ((index & ((1<<list)-1)) * (mem_arena_size>>list)); + } + + return chunk; +} + +static int +getlist(void *lamb) +{ + int index, list; + + list = cmm_max_free_lists-1; + index = (mem_arena_size + lamb - cmm_arena) / Mem_min_unit; + + while (index) { + if (TESTBIT(cmm_bittable,index)) { + break; + } + assert(!(index & 1)); + index >>= 1; + list--; + } + + return list; +} + +static int +testbit(void *lamb, int list, u_int8 *table) +{ + int index; + + assert(list < cmm_max_free_lists && list >= 0); + assert(!((lamb-cmm_arena)&((mem_arena_size>>list)-1))); + + index = (1<<list) + ((lamb - cmm_arena) / (mem_arena_size>>list)); + + assert(index > 0 && index < cmm_bittable_size); + + return TESTBIT(table,index); +} + +static void +clearbit(void *lamb, int list, u_int8 *table) +{ + int index; + + assert(list < cmm_max_free_lists && list >= 0); + assert(!((lamb-cmm_arena)&((mem_arena_size>>list)-1))); + + index = (1<<list) + ((lamb - cmm_arena) / (mem_arena_size>>list)); + + assert(index > 0 && index < cmm_bittable_size); + + assert(TESTBIT(table,index)); + CLEARBIT(table,index); +} + +static void +set_bit(void *lamb, int list, u_int8 *table) +{ + int index; + + assert(list < cmm_max_free_lists && list >= 0); + assert(!((lamb-cmm_arena)&((mem_arena_size>>list)-1))); + + index = (1<<list) + ((lamb - cmm_arena) / (mem_arena_size>>list)); + + assert(index > 0 && index < cmm_bittable_size); + + assert(!TESTBIT(table,index)); + SETBIT(table,index); +} --- /dev/null +++ b/crypto/secure_malloc.c @@ -0,0 +1,223 @@ +/* + * Memory allocator for secure heap for OpenSSL key storage. + * Copyright, 2001-2014, Akamai Technologies. All Rights Reserved. + * Distributed under the terms of the OpenSSL license. + * + * Note that to improve performance and simplfy the code, this allocator + * works only in the same thread where we called the init function; + * trying to allocate/free blocks from different threads will + * just delegate the calls to the standard malloc library. + */ + +#include <pthread.h> +#include <string.h> +#include <sys/mman.h> +#include <stdlib.h> +#include <assert.h> + +#include "secure_malloc.h" + +extern void OPENSSL_cleanse(void *ptr, size_t len); + +/* + * Set to 1 when secure_malloc_init() is called successfully. Can + * never be set back to 0 + */ +int secure_allocation_support = 0; + +static pthread_mutex_t secure_allocation_lock = PTHREAD_MUTEX_INITIALIZER; + +#define LOCK() pthread_mutex_lock(&secure_allocation_lock) +#define UNLOCK() pthread_mutex_unlock(&secure_allocation_lock) + +static pthread_key_t secure_allocation_key; +static const int secure_yes = 1; +static const int secure_no = 0; + +static char *arena = NULL; +static size_t arena_size = 0; + +/* The low-level secure heap interface. */ +extern void *cmm_init(int size, int mem_min_unit, int overrun_bytes); +extern void *cmm_malloc(int size); +extern int cmm_free(void *lamb); +extern void *cmm_realloc(void *lamb, int size); + +static int secure_allocation_enabled() +{ + if (!secure_allocation_support) + { + return 0; + } + int* answer = (int*)pthread_getspecific(secure_allocation_key); + return answer == &secure_yes; +} + +static void secure_allocation_enable(int status) +{ + if (secure_allocation_support) + { + pthread_setspecific(secure_allocation_key, + status ? &secure_yes : &secure_no); + } +} + +/* + * Start/stop secure allocation. + */ +int start_secure_allocation() +{ + int ret = secure_allocation_enabled(); + if (ret == 0) + { + secure_allocation_enable(1); + } + + return ret; +} + +int stop_secure_allocation() +{ + int ret = secure_allocation_enabled(); + if (ret == 1) + { + secure_allocation_enable(0); + } + + return ret; +} + +void flush_secure_arena() +{ + if (arena) + memset(arena, 0, arena_size); +} + +/* Module initialization, returns >0 upon success */ +int secure_malloc_init(size_t size, int mem_min_unit, int overrun_bytes) +{ + int ret = 0; + arena_size = size; + + LOCK(); + if (arena) + { + assert(0); + } + + else if ((arena = (char *) cmm_init(arena_size, mem_min_unit, overrun_bytes)) == NULL) + { + } + else if (mlock(arena, arena_size)) + { + } + else if (pthread_key_create(&secure_allocation_key, 0) != 0) + { + } + else + { + secure_allocation_support = 1; + ret = 1; + } + + /* MADV_DONTDUMP is supported from Kernel 3.4 and from glibc 2.16 */ +#ifdef MADV_DONTDUMP + if (madvise(arena, arena_size, MADV_DONTDUMP) == 0) + { + ret = 2; + } +#endif + + UNLOCK(); + return ret; +} + +/* Helper func to figure out whether a pointer was allocated from the + secure chunk. +*/ +static int is_secured_ptr(void *ptr) +{ + return secure_allocation_support + && (char*)ptr >= arena && (char*)ptr < arena + arena_size; +} + +void *secure_calloc(size_t nmemb, size_t size) +{ + void *ret; + int tot_size = nmemb*size; + + if (!secure_allocation_enabled()) + return calloc(nmemb,size); + LOCK(); + ret = cmm_malloc(tot_size); + if (ret) + { + memset(ret,0,tot_size); + } + UNLOCK(); + return ret; +} + +void *secure_malloc(size_t size) +{ + void *ret; + + if (!secure_allocation_enabled()) + return malloc(size); + LOCK(); + ret = cmm_malloc(size); + UNLOCK(); + return ret; +} + +void *secure_strdup(const char *str) +{ + return strcpy(secure_malloc(strlen(str) + 1), str); +} + +void secure_free(void *ptr) +{ + if (secure_allocation_support && is_secured_ptr(ptr)) + { + LOCK(); + cmm_free(ptr); + UNLOCK(); + } + else + { + free(ptr); + } + +} + +void *secure_realloc(void *ptr, size_t size) +{ + void *ret; + + if (secure_allocation_support && is_secured_ptr(ptr)) + { + LOCK(); + ret = cmm_realloc(ptr,size); + UNLOCK(); + } + else + { + ret = realloc(ptr,size); + } + + return ret; +} + +void *secure_realloc_clean(void *ptr, int old_len, size_t size) +{ + void *ret; + + ret = secure_malloc(size); + if (ret) + memcpy(ret, ptr, old_len); + + OPENSSL_cleanse(ptr, old_len); + secure_free(ptr); + + return ret; +} --- /dev/null +++ b/crypto/secure_malloc.h @@ -0,0 +1,45 @@ +/* + * Memory allocator for secure heap for OpenSSL key storage. + * Copyright, 2001-2014, Akamai Technologies. All Rights Reserved. + * Distributed under the terms of the OpenSSL license. + */ + +#ifndef __openssl_secure_malloc_h +#define __openssl_secure_malloc_h + +#ifdef __cplusplus +extern "C" { +#endif + +/* Global flag to designate whether secure malloc support is turned on */ +extern int secure_allocation_support; + +/* Secure versions of the malloc interface functions */ +extern void *secure_calloc(size_t nmemb, size_t size); +extern void *secure_malloc(size_t size); +extern void *secure_strdup(const char *str); +extern void secure_free(void *ptr); +extern void *secure_realloc(void *ptr, size_t size); +extern void *secure_realloc_clean(void *ptr, int old_len, size_t size); + +/* Module initialization including setting secure_malloc_support. */ +extern int secure_malloc_init(size_t arena_size, int mem_min_unit, int overrun_bytes); + +/* + * Enabling/Disabling the secure allocation. Use like this to ensure + * proper nesting: + * int x = start_secure_allocation(); + * .... do some work, calling OPENSSL_malloc etc ... + * if (x) stop_secure_allocation(); + */ +extern int start_secure_allocation(); +extern int stop_secure_allocation(); + +/* Erasing the content of all allocated buffers */ +extern void flush_secure_arena(); + +#ifdef __cplusplus +} +#endif + +#endif
pgpVZ7TW4VfVq.pgp
Description: PGP signature
