diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c
index c673bc3..6fbdb54 100644
--- a/src/backend/utils/mmgr/slab.c
+++ b/src/backend/utils/mmgr/slab.c
@@ -57,11 +57,11 @@
 #include "lib/ilist.h"
 
 
-#define SLAB_CHUNKHDRSZ MAXALIGN(sizeof(SlabChunk))
+#define SLAB_CHUNKHDRSZ (MAXALIGN(sizeof(SlabChunk)) + STANDARDCHUNKHEADERSIZE)
 
 /* Portion of SLAB_CHUNKHDRSZ excluding trailing padding. */
 #define SLAB_CHUNK_USED \
-	(offsetof(SlabChunk, header) + sizeof(StandardChunkHeader))
+	(offsetof(SlabChunk, header) + STANDARDCHUNKHEADERSIZE)
 
 /*
  * SlabContext is a specialized implementation of MemoryContext.
@@ -103,10 +103,6 @@ typedef struct SlabChunk
 {
 	/* block owning this chunk */
 	void	   *block;
-
-	/* include StandardChunkHeader because mcxt.c expects that */
-	StandardChunkHeader header;
-
 } SlabChunk;
 
 
@@ -121,6 +117,8 @@ typedef struct SlabChunk
 	((char *) block + sizeof(SlabBlock))
 #define SlabChunkIndex(slab, block, chunk)	\
 	(((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
+#define SlabChunkStandardHeader(chunk) \
+	( (StandardChunkHeader *) ((char *)chunk + MAXALIGN(sizeof(SlabChunk))))
 
 /*
  * These functions implement the MemoryContext API for Slab contexts.
@@ -164,10 +162,10 @@ static MemoryContextMethods SlabMethods = {
 #ifdef HAVE_ALLOCINFO
 #define SlabFreeInfo(_cxt, _chunk) \
 			fprintf(stderr, "SlabFree: %s: %p, %zu\n", \
-				(_cxt)->header.name, (_chunk), (_chunk)->header.size)
+				(_cxt)->header.name, (_chunk), (_chunk)->size)
 #define SlabAllocInfo(_cxt, _chunk) \
 			fprintf(stderr, "SlabAlloc: %s: %p, %zu\n", \
-				(_cxt)->header.name, (_chunk), (_chunk)->header.size)
+				(_cxt)->header.name, (_chunk), (_chunk)->size)
 #else
 #define SlabFreeInfo(_cxt, _chunk)
 #define SlabAllocInfo(_cxt, _chunk)
@@ -203,7 +201,8 @@ SlabContextCreate(MemoryContext parent,
 					 "MAXALIGN too small to fit int32");
 
 	/* chunk, including SLAB header (both addresses nicely aligned) */
-	fullChunkSize = MAXALIGN(sizeof(SlabChunk) + MAXALIGN(chunkSize));
+	fullChunkSize = MAXALIGN(sizeof(SlabChunk))
+		+ MAXALIGN(sizeof(StandardChunkHeader)) + MAXALIGN(chunkSize);
 
 	/* Make sure the block can store at least one chunk. */
 	if (blockSize - sizeof(SlabBlock) < fullChunkSize)
@@ -324,6 +323,7 @@ SlabAlloc(MemoryContext context, Size size)
 	SlabContext *slab = castNode(SlabContext, context);
 	SlabBlock  *block;
 	SlabChunk  *chunk;
+	StandardChunkHeader *header;
 	int			idx;
 
 	Assert(slab);
@@ -394,6 +394,7 @@ SlabAlloc(MemoryContext context, Size size)
 
 	/* compute the chunk location block start (after the block header) */
 	chunk = SlabBlockGetChunk(slab, block, idx);
+	header = SlabChunkStandardHeader(chunk);
 
 	/*
 	 * Update the block nfree count, and also the minFreeChunks as we've
@@ -449,15 +450,15 @@ SlabAlloc(MemoryContext context, Size size)
 
 	chunk->block = (void *) block;
 
-	chunk->header.context = (MemoryContext) slab;
-	chunk->header.size = MAXALIGN(size);
+	header->context = (MemoryContext) slab;
+	header->size = MAXALIGN(size);
 
 #ifdef MEMORY_CONTEXT_CHECKING
-	chunk->header.requested_size = size;
-	VALGRIND_MAKE_MEM_NOACCESS(&chunk->header.requested_size,
-							   sizeof(chunk->header.requested_size));
+	header->requested_size = size;
+	VALGRIND_MAKE_MEM_NOACCESS(&header->requested_size,
+							   sizeof(header->requested_size));
 	/* slab mark to catch clobber of "unused" space */
-	if (size < chunk->header.size)
+	if (size < header->size)
 		set_sentinel(SlabChunkGetPointer(chunk), size);
 #endif
 #ifdef RANDOMIZE_ALLOCATED_MEMORY
@@ -480,15 +481,16 @@ SlabFree(MemoryContext context, void *pointer)
 	SlabContext *slab = castNode(SlabContext, context);
 	SlabChunk  *chunk = SlabPointerGetChunk(pointer);
 	SlabBlock  *block = chunk->block;
+	StandardChunkHeader *header = SlabChunkStandardHeader(chunk);
 
 	SlabFreeInfo(slab, chunk);
 
 #ifdef MEMORY_CONTEXT_CHECKING
-	VALGRIND_MAKE_MEM_DEFINED(&chunk->header.requested_size,
-							  sizeof(chunk->header.requested_size));
+	VALGRIND_MAKE_MEM_DEFINED(&header->requested_size,
+							  sizeof(header->requested_size));
 	/* Test for someone scribbling on unused space in chunk */
-	if (chunk->header.requested_size < chunk->header.size)
-		if (!sentinel_ok(pointer, chunk->header.requested_size))
+	if (header->requested_size < header->size)
+		if (!sentinel_ok(pointer, header->requested_size))
 			elog(WARNING, "detected write past chunk end in %s %p",
 				 slab->header.name, chunk);
 #endif
@@ -507,12 +509,12 @@ SlabFree(MemoryContext context, void *pointer)
 #ifdef CLOBBER_FREED_MEMORY
 	/* XXX don't wipe the int32 index, used for block-level freelist */
 	wipe_mem((char *) pointer + sizeof(int32),
-			 chunk->header.size - sizeof(int32));
+			 header->size - sizeof(int32));
 #endif
 
 #ifdef MEMORY_CONTEXT_CHECKING
 	/* Reset requested_size to 0 in chunks that are on freelist */
-	chunk->header.requested_size = 0;
+	header->requested_size = 0;
 #endif
 
 	/* remove the block from a freelist */
@@ -591,8 +593,9 @@ static Size
 SlabGetChunkSpace(MemoryContext context, void *pointer)
 {
 	SlabChunk  *chunk = SlabPointerGetChunk(pointer);
+	StandardChunkHeader *header = SlabChunkStandardHeader(chunk);
 
-	return chunk->header.size + SLAB_CHUNKHDRSZ;
+	return header->size + SLAB_CHUNKHDRSZ;
 }
 
 /*
@@ -741,36 +744,37 @@ SlabCheck(MemoryContext context)
 				if (!freechunks[j])
 				{
 					SlabChunk  *chunk = SlabBlockGetChunk(slab, block, j);
+					StandardChunkHeader *header = SlabChunkStandardHeader(chunk);
 
-					VALGRIND_MAKE_MEM_DEFINED(&chunk->header.requested_size,
-									   sizeof(chunk->header.requested_size));
+					VALGRIND_MAKE_MEM_DEFINED(&header->requested_size,
+									   sizeof(header->requested_size));
 
 					/* we're in a no-freelist branch */
-					VALGRIND_MAKE_MEM_NOACCESS(&chunk->header.requested_size,
-									   sizeof(chunk->header.requested_size));
+					VALGRIND_MAKE_MEM_NOACCESS(&header->requested_size,
+									   sizeof(header->requested_size));
 
 					/* chunks have both block and slab pointers, so check both */
 					if (chunk->block != block)
 						elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
 							 name, block, chunk);
 
-					if (chunk->header.context != (MemoryContext) slab)
+					if (header->context != (MemoryContext) slab)
 						elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
 							 name, block, chunk);
 
 					/* now make sure the chunk size is correct */
-					if (chunk->header.size != MAXALIGN(slab->chunkSize))
+					if (header->size != MAXALIGN(slab->chunkSize))
 						elog(WARNING, "problem in slab %s: bogus chunk size in block %p, chunk %p",
 							 name, block, chunk);
 
 					/* now make sure the chunk size is correct */
-					if (chunk->header.requested_size != slab->chunkSize)
+					if (header->requested_size != slab->chunkSize)
 						elog(WARNING, "problem in slab %s: bogus chunk requested size in block %p, chunk %p",
 							 name, block, chunk);
 
 					/* there might be sentinel (thanks to alignment) */
-					if (chunk->header.requested_size < chunk->header.size &&
-						!sentinel_ok(chunk, SLAB_CHUNKHDRSZ + chunk->header.requested_size))
+					if (header->requested_size < header->size &&
+						!sentinel_ok(chunk, SLAB_CHUNKHDRSZ + header->requested_size))
 						elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
 							 name, block, chunk);
 				}
