Index: contrib/build-mca-comps-outside-of-tree/btl_tcp2.c
===================================================================
--- contrib/build-mca-comps-outside-of-tree/btl_tcp2.c	(revision 28700)
+++ contrib/build-mca-comps-outside-of-tree/btl_tcp2.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -174,12 +174,11 @@
     uint32_t flags)
 {
     mca_btl_tcp2_frag_t* frag = NULL;
-    int rc;

     if(size <= btl->btl_eager_limit) { 
-        MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag, rc); 
+        MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag); 
     } else if (size <= btl->btl_max_send_size) { 
-        MCA_BTL_TCP_FRAG_ALLOC_MAX(frag, rc); 
+        MCA_BTL_TCP_FRAG_ALLOC_MAX(frag); 
     }
     if( OPAL_UNLIKELY(NULL == frag) ) {
         return NULL;
@@ -243,13 +242,13 @@
      * than the eager limit pack into a fragment from the eager pool
      */
     if (max_data+reserve <= btl->btl_eager_limit) {
-        MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag, rc);
+        MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag);
     } else {
         /* 
          * otherwise pack as much data as we can into a fragment
          * that is the max send size.
          */
-        MCA_BTL_TCP_FRAG_ALLOC_MAX(frag, rc);
+        MCA_BTL_TCP_FRAG_ALLOC_MAX(frag);
     }
     if( OPAL_UNLIKELY(NULL == frag) ) {
         return NULL;
@@ -326,12 +325,11 @@
     uint32_t flags)
 {
     mca_btl_tcp2_frag_t* frag;
-    int rc;

     if( OPAL_UNLIKELY((*size) > UINT32_MAX) ) {  /* limit the size to what we support */
         *size = (size_t)UINT32_MAX;
     }
-    MCA_BTL_TCP_FRAG_ALLOC_USER(frag, rc);
+    MCA_BTL_TCP_FRAG_ALLOC_USER(frag);
     if( OPAL_UNLIKELY(NULL == frag) ) {
         return NULL;
     }
Index: contrib/build-mca-comps-outside-of-tree/btl_tcp2_endpoint.c
===================================================================
--- contrib/build-mca-comps-outside-of-tree/btl_tcp2_endpoint.c	(revision 28700)
+++ contrib/build-mca-comps-outside-of-tree/btl_tcp2_endpoint.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -689,12 +689,11 @@

             frag = btl_endpoint->endpoint_recv_frag;
             if(NULL == frag) {
-                int rc;
                 if(mca_btl_tcp2_module.super.btl_max_send_size > 
                    mca_btl_tcp2_module.super.btl_eager_limit) { 
-                    MCA_BTL_TCP_FRAG_ALLOC_MAX(frag, rc);
+                    MCA_BTL_TCP_FRAG_ALLOC_MAX(frag);
                 } else { 
-                    MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag, rc);
+                    MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag);
                 }

                 if(NULL == frag) {
Index: contrib/build-mca-comps-outside-of-tree/btl_tcp2_frag.h
===================================================================
--- contrib/build-mca-comps-outside-of-tree/btl_tcp2_frag.h	(revision 28700)
+++ contrib/build-mca-comps-outside-of-tree/btl_tcp2_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -79,25 +79,25 @@
  * free list(s).
  */

-#define MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag, rc)                             \
+#define MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag)                                 \
 {                                                                          \
     ompi_free_list_item_t *item;                                           \
-    OMPI_FREE_LIST_GET(&mca_btl_tcp2_component.tcp_frag_eager, item, rc);   \
-    frag = (mca_btl_tcp2_frag_t*) item;                                     \
+    OMPI_FREE_LIST_GET(&mca_btl_tcp2_component.tcp_frag_eager, item);      \
+    frag = (mca_btl_tcp2_frag_t*) item;                                    \
 }

-#define MCA_BTL_TCP_FRAG_ALLOC_MAX(frag, rc)                               \
+#define MCA_BTL_TCP_FRAG_ALLOC_MAX(frag)                                   \
 {                                                                          \
     ompi_free_list_item_t *item;                                           \
-    OMPI_FREE_LIST_GET(&mca_btl_tcp2_component.tcp_frag_max, item, rc);     \
-    frag = (mca_btl_tcp2_frag_t*) item;                                     \
+    OMPI_FREE_LIST_GET(&mca_btl_tcp2_component.tcp_frag_max, item);        \
+    frag = (mca_btl_tcp2_frag_t*) item;                                    \
 }

-#define MCA_BTL_TCP_FRAG_ALLOC_USER(frag, rc)                              \
+#define MCA_BTL_TCP_FRAG_ALLOC_USER(frag)                                  \
 {                                                                          \
     ompi_free_list_item_t *item;                                           \
-    OMPI_FREE_LIST_GET(&mca_btl_tcp2_component.tcp_frag_user, item, rc);    \
-    frag = (mca_btl_tcp2_frag_t*) item;                                     \
+    OMPI_FREE_LIST_GET(&mca_btl_tcp2_component.tcp_frag_user, item);       \
+    frag = (mca_btl_tcp2_frag_t*) item;                                    \
 }

 #define MCA_BTL_TCP_FRAG_RETURN(frag)                                      \
Index: ompi/class/ompi_free_list.h
===================================================================
--- ompi/class/ompi_free_list.h	(revision 28700)
+++ ompi/class/ompi_free_list.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -185,37 +185,33 @@
  *
  * @param fl (IN)        Free list.
  * @param item (OUT)     Allocated item.
- * @param rc (OUT)       OMPI_SUCCESS or error status on failure.
  *
  * If the requested item is not available the free list is grown to 
  * accomodate the request - unless the max number of allocations has 
- * been reached.  If this is the case - an out of resource error is 
- * returned to the caller.
+ * been reached.  If this is the case - a NULL pointer is returned 
+ * to the caller.
  */
- 
-#define OMPI_FREE_LIST_GET(fl, item, rc) \
-{ \
-    rc = OMPI_SUCCESS; \
-    item = (ompi_free_list_item_t*) opal_atomic_lifo_pop(&((fl)->super)); \
-    if( OPAL_UNLIKELY(NULL == item) ) { \
-        if(opal_using_threads()) { \
-            opal_mutex_lock(&((fl)->fl_lock)); \
-            ompi_free_list_grow((fl), (fl)->fl_num_per_alloc); \
-            opal_mutex_unlock(&((fl)->fl_lock)); \
-        } else { \
-            ompi_free_list_grow((fl), (fl)->fl_num_per_alloc); \
-        } \
+
+#define OMPI_FREE_LIST_GET(fl, item)                                    \
+    {                                                                   \
         item = (ompi_free_list_item_t*) opal_atomic_lifo_pop(&((fl)->super)); \
-        if( OPAL_UNLIKELY(NULL == item) ) rc = OMPI_ERR_TEMP_OUT_OF_RESOURCE; \
-    }  \
-} 
+        if( OPAL_UNLIKELY(NULL == item) ) {                             \
+            if(opal_using_threads()) {                                  \
+                opal_mutex_lock(&((fl)->fl_lock));                      \
+                ompi_free_list_grow((fl), (fl)->fl_num_per_alloc);      \
+                opal_mutex_unlock(&((fl)->fl_lock));                    \
+            } else {                                                    \
+                ompi_free_list_grow((fl), (fl)->fl_num_per_alloc);      \
+            }                                                           \
+            item = (ompi_free_list_item_t*) opal_atomic_lifo_pop(&((fl)->super)); \
+        }                                                               \
+    } 

 /**
  * Blocking call to obtain an item from a free list.
  *
  * @param fl (IN)        Free list.
  * @param item (OUT)     Allocated item.
- * @param rc (OUT)       OMPI_SUCCESS or error status on failure.
  *
  * If the requested item is not available the free list is grown to 
  * accomodate the request - unless the max number of allocations has 
@@ -223,11 +219,11 @@
  * is returned to the list.
  */

-#define OMPI_FREE_LIST_WAIT(fl, item, rc)                                  \
-    rc = __ompi_free_list_wait( (fl), &(item) )
+#define OMPI_FREE_LIST_WAIT(fl, item)           \
+    __ompi_free_list_wait( (fl), &(item) )

-static inline int __ompi_free_list_wait( ompi_free_list_t* fl,
-                                         ompi_free_list_item_t** item )
+static inline void __ompi_free_list_wait( ompi_free_list_t* fl,
+                                          ompi_free_list_item_t** item )
 {
     *item = (ompi_free_list_item_t*)opal_atomic_lifo_pop(&((fl)->super));
     while( NULL == *item ) {
@@ -262,7 +258,6 @@
         OPAL_THREAD_UNLOCK(&((fl)->fl_lock));
         *item = (ompi_free_list_item_t*)opal_atomic_lifo_pop(&((fl)->super));
     }
-    return OMPI_SUCCESS;
 } 

 /**
Index: ompi/class/ompi_rb_tree.c
===================================================================
--- ompi/class/ompi_rb_tree.c	(revision 28700)
+++ ompi/class/ompi_rb_tree.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2005 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -79,19 +79,18 @@
 int ompi_rb_tree_init(ompi_rb_tree_t * tree,
                       ompi_rb_tree_comp_fn_t comp)
 {
-    int rc;
-
     ompi_free_list_item_t * node;
     /* we need to get memory for the root pointer from the free list */
-    OMPI_FREE_LIST_GET(&(tree->free_list), node, rc);
+    OMPI_FREE_LIST_GET(&(tree->free_list), node);
     tree->root_ptr = (ompi_rb_tree_node_t *) node;
-    if (OMPI_SUCCESS != rc) {
-        return rc;
+    if (NULL == node) {
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

-    OMPI_FREE_LIST_GET(&(tree->free_list), node, rc);
-    if (OMPI_SUCCESS != rc) {
-        return rc;
+    OMPI_FREE_LIST_GET(&(tree->free_list), node);
+    if (NULL == node) {
+        OMPI_FREE_LIST_RETURN(&(tree->free_list), (ompi_free_list_item_t*)tree->root_ptr);
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }
     tree->nill = (ompi_rb_tree_node_t *) node;
     /* initialize tree->nill */
@@ -121,12 +120,11 @@
     ompi_rb_tree_node_t * y;
     ompi_rb_tree_node_t * node;
     ompi_free_list_item_t * item;
-    int rc;

     /* get the memory for a node */
-    OMPI_FREE_LIST_GET(&(tree->free_list), item, rc);
-    if (OMPI_SUCCESS != rc) {
-        return rc;
+    OMPI_FREE_LIST_GET(&(tree->free_list), item);
+    if (NULL == item) {
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }
     node = (ompi_rb_tree_node_t *) item;
     /* insert the data into the node */
Index: ompi/mca/allocator/basic/allocator_basic.c
===================================================================
--- ompi/mca/allocator/basic/allocator_basic.c	(revision 28700)
+++ ompi/mca/allocator/basic/allocator_basic.c	(working copy)
@@ -207,9 +207,8 @@

     /* create a segment for any extra allocation */
     if(allocated_size > size) {
-        int rc;
-        OMPI_FREE_LIST_GET(&module->seg_descriptors, item, rc);
-        if(rc != OMPI_SUCCESS) {
+        OMPI_FREE_LIST_GET(&module->seg_descriptors, item);
+        if(NULL == item) {
             OPAL_THREAD_UNLOCK(&module->seg_lock);
             return NULL;
         }
@@ -278,7 +277,6 @@
     ompi_free_list_item_t *item;
     unsigned char* addr = (unsigned char*)ptr - sizeof(size_t);
     size_t size = *(size_t*)addr;
-    int rc;
     OPAL_THREAD_LOCK(&module->seg_lock);

     /* maintain the free list in sorted order by address */
@@ -311,8 +309,8 @@
             /* insert before larger entry */
             } else {
                 mca_allocator_basic_segment_t* new_seg;
-                OMPI_FREE_LIST_GET(&module->seg_descriptors, item, rc);
-                if(rc != OMPI_SUCCESS) {
+                OMPI_FREE_LIST_GET(&module->seg_descriptors, item);
+                if(NULL == item) {
                     OPAL_THREAD_UNLOCK(&module->seg_lock);
                     return;
                 }
@@ -327,8 +325,8 @@
     }

     /* append to the end of the list */
-    OMPI_FREE_LIST_GET(&module->seg_descriptors, item, rc);
-    if(rc != OMPI_SUCCESS) {
+    OMPI_FREE_LIST_GET(&module->seg_descriptors, item);
+    if(NULL == item) {
         OPAL_THREAD_UNLOCK(&module->seg_lock);
         return;
     }
Index: ompi/mca/bcol/iboffload/bcol_iboffload_barrier.c
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_barrier.c	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_barrier.c	(working copy)
@@ -762,8 +762,6 @@
         collective_message_completion_callback_function cb_fn,
         struct mca_bcol_iboffload_collreq_t **coll_request)
 {
-    int rc;
-
     ompi_free_list_item_t *item;
     mca_bcol_iboffload_collfrag_t *coll_fragment;

@@ -771,10 +769,10 @@

     IBOFFLOAD_VERBOSE(10, ("Calling for mca_bcol_iboffload_barrier_init"));

-    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item, rc);
-    if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
+    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item);
+    if (OPAL_UNLIKELY(NULL == item)) {
         IBOFFLOAD_VERBOSE(10, ("Failing for coll request free list waiting.\n"));
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

     (*coll_request) = (mca_bcol_iboffload_collreq_t *) item;
Index: ompi/mca/bcol/iboffload/bcol_iboffload_bcast.c
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_bcast.c	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_bcast.c	(working copy)
@@ -34,17 +34,15 @@
                                bool if_bcol_last, int mq_credits,
                                collective_message_progress_function progress_fn)
 {
-    int rc;
-
     ompi_free_list_item_t *item;
     mca_bcol_iboffload_collfrag_t *coll_fragment;
     mca_bcol_iboffload_component_t *cm = &mca_bcol_iboffload_component;
     int my_group_index = iboffload_module->super.sbgp_partner_module->my_index;

-    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item, rc);
-    if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
+    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item);
+    if (OPAL_UNLIKELY(NULL == item)) {
         IBOFFLOAD_ERROR(("Wait for free list failed.\n"));
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }
     /* setup call request */
     (*coll_request) = (mca_bcol_iboffload_collreq_t *) item;
Index: ompi/mca/bcol/iboffload/bcol_iboffload_collfrag.h
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_collfrag.h	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_collfrag.h	(working copy)
@@ -115,13 +115,12 @@
                 struct mca_bcol_iboffload_collfrag_t *
                        mca_bcol_iboffload_get_collfrag(void)
 {
-    int rc;
     ompi_free_list_item_t *item;
     mca_bcol_iboffload_collfrag_t *cf;
     mca_bcol_iboffload_component_t *cm = &mca_bcol_iboffload_component;

     /* blocking allocation for collectives fragment */
-    OMPI_FREE_LIST_GET(&cm->collfrags_free, item, rc);
+    OMPI_FREE_LIST_GET(&cm->collfrags_free, item);
     if (OPAL_UNLIKELY(NULL == item)) {
         IBOFFLOAD_ERROR(("Failed to allocated collfrag.\n"));
         return NULL;
Index: ompi/mca/bcol/iboffload/bcol_iboffload_fanin.c
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_fanin.c	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_fanin.c	(working copy)
@@ -169,8 +169,6 @@
                 mca_bcol_iboffload_module_t *iboffload,
                 struct mca_bcol_iboffload_collreq_t **coll_request)
 {
-    int rc;
-
     ompi_free_list_item_t *item = NULL;
     mca_bcol_iboffload_collfrag_t *coll_fragment = NULL;

@@ -178,10 +176,10 @@

     IBOFFLOAD_VERBOSE(10, ("Calling for mca_bcol_iboffload_barrier_init"));

-    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item, rc);
-    if(OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
+    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item);
+    if(OPAL_UNLIKELY(NULL == item)) {
         IBOFFLOAD_VERBOSE(10, ("Failing for coll request free list waiting.\n"));
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

     (*coll_request) = (mca_bcol_iboffload_collreq_t *) item;
Index: ompi/mca/bcol/iboffload/bcol_iboffload_fanout.c
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_fanout.c	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_fanout.c	(working copy)
@@ -168,8 +168,6 @@
                 mca_bcol_iboffload_module_t *iboffload,
                 struct mca_bcol_iboffload_collreq_t **coll_request)
 {
-    int rc;
-
     ompi_free_list_item_t *item = NULL;
     mca_bcol_iboffload_collfrag_t *coll_fragment = NULL;

@@ -177,10 +175,10 @@

     IBOFFLOAD_VERBOSE(10, ("Calling for mca_bcol_iboffload_barrier_init"));

-    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item, rc);
-    if(OMPI_SUCCESS != rc) {
+    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item);
+    if(NULL == item) {
         IBOFFLOAD_VERBOSE(10, ("Failing for coll request free list waiting.\n"));
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

     (*coll_request) = (mca_bcol_iboffload_collreq_t *) item;
Index: ompi/mca/bcol/iboffload/bcol_iboffload_frag.c
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_frag.c	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_frag.c	(working copy)
@@ -103,7 +103,7 @@
     mca_bcol_iboffload_device_t *device = iboffload->device;

     /* Get frag from free list */
-    OMPI_FREE_LIST_GET(&device->frags_free[qp_index], item, rc);
+    OMPI_FREE_LIST_GET(&device->frags_free[qp_index], item);
     if (OPAL_UNLIKELY(NULL == item)) {
         return NULL;
     }
@@ -143,7 +143,7 @@
     IBOFFLOAD_VERBOSE(10, ("Start to pack frag.\n"));

     /* Get frag from free list */
-    OMPI_FREE_LIST_GET(&device->frags_free[qp_index], item, rc);
+    OMPI_FREE_LIST_GET(&device->frags_free[qp_index], item);
     if (OPAL_UNLIKELY(NULL == item)) {
         return NULL;
     }
@@ -202,12 +202,11 @@

         case MCA_BCOL_IBOFFLOAD_SEND_FRAG:
         {
-            int rc;
             ompi_free_list_item_t *item;
             IBOFFLOAD_VERBOSE(10, ("Getting MCA_BCOL_IBOFFLOAD_SEND_FRAG"));

             /* Get frag from free list */
-            OMPI_FREE_LIST_GET(&iboffload->device->frags_free[qp_index], item, rc);
+            OMPI_FREE_LIST_GET(&iboffload->device->frags_free[qp_index], item);

             frag = (mca_bcol_iboffload_frag_t *) item;
         }
Index: ompi/mca/bcol/iboffload/bcol_iboffload_frag.h
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_frag.h	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_frag.h	(working copy)
@@ -103,16 +103,13 @@
                     mca_bcol_iboffload_module_t *iboffload,
                     int qp_index)
 {
-    /* local variables */
-    int rc;
-
     ompi_free_list_item_t *item;
     mca_bcol_iboffload_frag_t *frag;

     mca_bcol_iboffload_component_t *cm = &mca_bcol_iboffload_component;

     /* Get frag from free list */
-    OMPI_FREE_LIST_GET(&cm->ml_frags_free, item, rc);
+    OMPI_FREE_LIST_GET(&cm->ml_frags_free, item);
     if (OPAL_UNLIKELY(NULL == item)) {
         return NULL;
     }
Index: ompi/mca/bcol/iboffload/bcol_iboffload_module.c
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_module.c	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_module.c	(working copy)
@@ -1372,10 +1372,10 @@

     mca_bcol_iboffload_component_t *cm = &mca_bcol_iboffload_component;

-    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item, rc);
-    if (OMPI_SUCCESS != rc) {
+    OMPI_FREE_LIST_WAIT(&cm->collreqs_free, item);
+    if (NULL == item) {
         IBOFFLOAD_ERROR(("Failing for coll request free list waiting.\n"));
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

     coll_request = (mca_bcol_iboffload_collreq_t *) item;
Index: ompi/mca/bcol/iboffload/bcol_iboffload_qp_info.c
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_qp_info.c	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_qp_info.c	(working copy)
@@ -150,7 +150,7 @@

     while (num_preposted < num_to_prepost) {
         /* put the item on list of preposted */
-        OMPI_FREE_LIST_GET(&device->frags_free[qp_index], item, ret);
+        OMPI_FREE_LIST_GET(&device->frags_free[qp_index], item);
         if (OPAL_UNLIKELY(NULL == item)) {
             break;
         }
Index: ompi/mca/bcol/iboffload/bcol_iboffload_task.h
===================================================================
--- ompi/mca/bcol/iboffload/bcol_iboffload_task.h	(revision 28700)
+++ ompi/mca/bcol/iboffload/bcol_iboffload_task.h	(working copy)
@@ -136,7 +136,6 @@
                                          mca_bcol_iboffload_frag_t *frags,
                                          int qp_index, struct ibv_qp *qp)
 {
-    int rc;
     ompi_free_list_item_t *item;
     mca_bcol_iboffload_task_t *task;

@@ -144,7 +143,7 @@
     mca_bcol_iboffload_endpoint_t *endpoint = iboffload->endpoints[source];

     /* blocking allocation for send fragment */
-    OMPI_FREE_LIST_GET(&cm->tasks_free, item, rc);
+    OMPI_FREE_LIST_GET(&cm->tasks_free, item);
     if (OPAL_UNLIKELY(NULL == item)) {
         mca_bcol_iboffload_return_recv_frags_toendpoint(frags, endpoint, qp_index);
         return NULL;
@@ -193,8 +192,6 @@
         int qp_index, ompi_free_list_t *task_list,
         mca_bcol_iboffload_collfrag_t *collfrag)
 {
-    int rc;
-
     ompi_free_list_item_t *item;
     mca_bcol_iboffload_task_t *task;

@@ -203,7 +200,7 @@
                             endpoint->iboffload_module->ibnet->super.group_list[endpoint->index]));

     /* get item from free list */
-    OMPI_FREE_LIST_GET(task_list, item, rc);
+    OMPI_FREE_LIST_GET(task_list, item);
     if (OPAL_UNLIKELY(NULL == item)) {
         return NULL;
     }
Index: ompi/mca/bcol/ptpcoll/bcol_ptpcoll_barrier.c
===================================================================
--- ompi/mca/bcol/ptpcoll/bcol_ptpcoll_barrier.c	(revision 28700)
+++ ompi/mca/bcol/ptpcoll/bcol_ptpcoll_barrier.c	(working copy)
@@ -53,10 +53,10 @@

     mca_bcol_ptpcoll_collreq_t *collreq;

-    OMPI_FREE_LIST_WAIT(&ptpcoll_module->collreqs_free, item, rc);
-    if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
+    OMPI_FREE_LIST_WAIT(&ptpcoll_module->collreqs_free, item);
+    if (OPAL_UNLIKELY(NULL == item)) {
         PTPCOLL_ERROR(("Free list waiting failed."));
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

     collreq = (mca_bcol_ptpcoll_collreq_t *) item;
@@ -384,10 +384,10 @@

     mca_bcol_ptpcoll_collreq_t *collreq;

-    OMPI_FREE_LIST_WAIT(&ptpcoll_module->collreqs_free, item, rc);
-    if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
+    OMPI_FREE_LIST_WAIT(&ptpcoll_module->collreqs_free, item);
+    if (OPAL_UNLIKELY(NULL == item)) {
         PTPCOLL_ERROR(("Free list waiting failed."));
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

     collreq = (mca_bcol_ptpcoll_collreq_t *) item;
@@ -466,10 +466,10 @@

     mca_bcol_ptpcoll_collreq_t *collreq;

-    OMPI_FREE_LIST_WAIT(&ptp_module->collreqs_free, item, rc);
-    if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
+    OMPI_FREE_LIST_WAIT(&ptp_module->collreqs_free, item);
+    if (OPAL_UNLIKELY(NULL == item)) {
         PTPCOLL_ERROR(("Free list waiting failed."));
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

     collreq = (mca_bcol_ptpcoll_collreq_t *) item;
@@ -771,10 +771,10 @@
                          (mca_bcol_ptpcoll_module_t *) const_args->bcol_module;
     ompi_communicator_t *comm = ptp_module->super.sbgp_partner_module->group_comm;

-    OMPI_FREE_LIST_WAIT(&ptp_module->collreqs_free, item, rc);
-    if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
+    OMPI_FREE_LIST_WAIT(&ptp_module->collreqs_free, item);
+    if (OPAL_UNLIKELY(NULL == item)) {
         PTPCOLL_ERROR(("Free list waiting failed."));
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

     collreq = (mca_bcol_ptpcoll_collreq_t *) item;
Index: ompi/mca/btl/mx/btl_mx.c
===================================================================
--- ompi/mca/btl/mx/btl_mx.c	(revision 28700)
+++ ompi/mca/btl/mx/btl_mx.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -123,11 +123,11 @@
         mca_btl_mx_frag_t* frag = NULL;
         mx_return_t mx_return;
         mx_segment_t mx_segment;
-        int i, rc;
+        int i;

         /* Post the receives if there is no unexpected handler */
         for( i = 0; i < mca_btl_mx_component.mx_max_posted_recv; i++ ) {
-            MCA_BTL_MX_FRAG_ALLOC_EAGER( mx_btl, frag, rc );
+            MCA_BTL_MX_FRAG_ALLOC_EAGER(mx_btl, frag);
             if( NULL == frag ) {
                 opal_output( 0, "mca_btl_mx_register: unable to allocate more eager fragments\n" );
                 if( 0 == i ) {
@@ -175,9 +175,8 @@
 {
     mca_btl_mx_module_t* mx_btl = (mca_btl_mx_module_t*) btl; 
     mca_btl_mx_frag_t* frag = NULL;
-    int rc;

-    MCA_BTL_MX_FRAG_ALLOC_EAGER(mx_btl, frag, rc);
+    MCA_BTL_MX_FRAG_ALLOC_EAGER(mx_btl, frag);
     if( OPAL_UNLIKELY(NULL == frag) ) {
         return NULL;
     }
@@ -229,7 +228,6 @@
     struct iovec iov;
     uint32_t iov_count = 1;
     size_t max_data;
-    int rc;

     max_data = btl->btl_eager_limit - reserve;
     if( (*size) < max_data ) {
@@ -245,21 +243,21 @@
          */
         iov.iov_base = NULL;
         if( 0 == reserve ) {
-            MCA_BTL_MX_FRAG_ALLOC_USER(btl, frag, rc);
+            MCA_BTL_MX_FRAG_ALLOC_USER(btl, frag);
             if( OPAL_UNLIKELY(NULL == frag) ) {
                 return NULL;
             }
             max_data = *size;
             frag->base.des_src_cnt = 1;
         } else {
-            MCA_BTL_MX_FRAG_ALLOC_EAGER( mx_btl, frag, rc );
+            MCA_BTL_MX_FRAG_ALLOC_EAGER(mx_btl, frag);
             if( OPAL_UNLIKELY(NULL == frag) ) {
                 return NULL;
             }
             frag->base.des_src_cnt = 2;
         }
     } else {
-        MCA_BTL_MX_FRAG_ALLOC_EAGER( mx_btl, frag, rc );
+        MCA_BTL_MX_FRAG_ALLOC_EAGER(mx_btl, frag);
         if( OPAL_UNLIKELY(NULL == frag) ) {
             return NULL;
         }
@@ -314,9 +312,8 @@
     mca_btl_mx_frag_t* frag = NULL;
     mx_return_t mx_return;
     mx_segment_t mx_segment;
-    int rc;

-    MCA_BTL_MX_FRAG_ALLOC_USER(btl, frag, rc);
+    MCA_BTL_MX_FRAG_ALLOC_USER(btl, frag);
     if( OPAL_UNLIKELY(NULL == frag) ) {
         return NULL;
     }
Index: ompi/mca/btl/mx/btl_mx_frag.h
===================================================================
--- ompi/mca/btl/mx/btl_mx_frag.h	(revision 28700)
+++ ompi/mca/btl/mx/btl_mx_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -67,10 +67,10 @@
  * free list(s).
  */

-#define MCA_BTL_MX_FRAG_ALLOC_EAGER(btl, frag, rc)                            \
+#define MCA_BTL_MX_FRAG_ALLOC_EAGER(btl, frag)                                \
 do {                                                                          \
     ompi_free_list_item_t *item;                                              \
-    OMPI_FREE_LIST_GET( &mca_btl_mx_component.mx_send_eager_frags, item, rc); \
+    OMPI_FREE_LIST_GET( &mca_btl_mx_component.mx_send_eager_frags, item);     \
     if( OPAL_LIKELY(NULL != item) ) {                                         \
         frag = (mca_btl_mx_frag_t*) item;                                     \
         frag->mx_frag_list = &(mca_btl_mx_component.mx_send_eager_frags);     \
@@ -78,10 +78,10 @@
     }                                                                         \
 } while(0)

-#define MCA_BTL_MX_FRAG_ALLOC_USER(btl, frag, rc)                            \
+#define MCA_BTL_MX_FRAG_ALLOC_USER(btl, frag)                                \
 do {                                                                         \
     ompi_free_list_item_t *item;                                             \
-    OMPI_FREE_LIST_GET( &mca_btl_mx_component.mx_send_user_frags, item, rc); \
+    OMPI_FREE_LIST_GET( &mca_btl_mx_component.mx_send_user_frags, item);     \
     if( OPAL_LIKELY(NULL != item) ) {                                        \
         frag = (mca_btl_mx_frag_t*) item;                                    \
         frag->mx_frag_list = &(mca_btl_mx_component.mx_send_user_frags);     \
Index: ompi/mca/btl/openib/btl_openib.c
===================================================================
--- ompi/mca/btl/openib/btl_openib.c	(revision 28700)
+++ ompi/mca/btl/openib/btl_openib.c	(working copy)
@@ -965,12 +965,12 @@
 ib_frag_alloc(mca_btl_openib_module_t *btl, size_t size, uint8_t order,
         uint32_t flags)
 {
-    int qp, rc;
+    int qp;
     ompi_free_list_item_t* item = NULL;

     for(qp = 0; qp < mca_btl_openib_component.num_qps; qp++) {
          if(mca_btl_openib_component.qp_infos[qp].size >= size) {
-             OMPI_FREE_LIST_GET(&btl->device->qps[qp].send_free, item, rc);
+             OMPI_FREE_LIST_GET(&btl->device->qps[qp].send_free, item);
              if(item)
                  break;
          }
@@ -1550,8 +1550,7 @@
     mca_btl_openib_module_t *obtl = (mca_btl_openib_module_t*)btl;
     size_t size = payload_size + header_size;
     size_t eager_limit;
-    int rc,
-        qp = frag_size_to_order(obtl, size),
+    int qp = frag_size_to_order(obtl, size),
         prio = !(flags & MCA_BTL_DES_FLAGS_PRIORITY),
         ib_rc;
     int32_t cm_return;
@@ -1606,7 +1605,7 @@
     }

     /* Allocate fragment */
-    OMPI_FREE_LIST_GET(&obtl->device->qps[qp].send_free, item, rc);
+    OMPI_FREE_LIST_GET(&obtl->device->qps[qp].send_free, item);
     if(OPAL_UNLIKELY(NULL == item)) {
         /* we don't return NULL because maybe later we will try to coalesce */
         goto no_frags;
Index: ompi/mca/btl/openib/btl_openib_component.c
===================================================================
--- ompi/mca/btl/openib/btl_openib_component.c	(revision 28700)
+++ ompi/mca/btl/openib/btl_openib_component.c	(working copy)
@@ -3802,7 +3802,7 @@

     for(i = 0; i < num_post; i++) {
         ompi_free_list_item_t* item;
-        OMPI_FREE_LIST_WAIT(&openib_btl->device->qps[qp].recv_free, item, rc);
+        OMPI_FREE_LIST_WAIT(&openib_btl->device->qps[qp].recv_free, item);
         to_base_frag(item)->base.order = qp;
         to_com_frag(item)->endpoint = NULL;
         if(NULL == wr)
Index: ompi/mca/btl/openib/btl_openib_endpoint.h
===================================================================
--- ompi/mca/btl/openib/btl_openib_endpoint.h	(revision 28700)
+++ ompi/mca/btl/openib/btl_openib_endpoint.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@@ -359,7 +359,7 @@
     for(i = 0; i < num_post; i++) {
         int rc;
         ompi_free_list_item_t* item;
-        OMPI_FREE_LIST_WAIT(&openib_btl->device->qps[qp].recv_free, item, rc);
+        OMPI_FREE_LIST_WAIT(&openib_btl->device->qps[qp].recv_free, item);
         to_base_frag(item)->base.order = qp;
         to_com_frag(item)->endpoint = ep;
         if(NULL == wr)
Index: ompi/mca/btl/openib/btl_openib_frag.h
===================================================================
--- ompi/mca/btl/openib/btl_openib_frag.h	(revision 28700)
+++ ompi/mca/btl/openib/btl_openib_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@@ -384,10 +384,9 @@
 static inline mca_btl_openib_send_control_frag_t *
 alloc_control_frag(mca_btl_openib_module_t *btl)
 {
-    int rc;
     ompi_free_list_item_t *item;

-    OMPI_FREE_LIST_WAIT(&btl->device->send_free_control, item, rc);
+    OMPI_FREE_LIST_WAIT(&btl->device->send_free_control, item);

     return to_send_control_frag(item);
 }
@@ -405,30 +404,27 @@

 static inline mca_btl_openib_com_frag_t *alloc_send_user_frag(void)
 {
-    int rc;
     ompi_free_list_item_t *item;

-    OMPI_FREE_LIST_GET(&mca_btl_openib_component.send_user_free, item, rc);
+    OMPI_FREE_LIST_GET(&mca_btl_openib_component.send_user_free, item);

     return to_com_frag(item);
 }

 static inline mca_btl_openib_com_frag_t *alloc_recv_user_frag(void)
 {
-    int rc;
     ompi_free_list_item_t *item;

-    OMPI_FREE_LIST_GET(&mca_btl_openib_component.recv_user_free, item, rc);
+    OMPI_FREE_LIST_GET(&mca_btl_openib_component.recv_user_free, item);

     return to_com_frag(item);
 }

 static inline mca_btl_openib_coalesced_frag_t *alloc_coalesced_frag(void)
 {
-    int rc;
     ompi_free_list_item_t *item;

-    OMPI_FREE_LIST_GET(&mca_btl_openib_component.send_free_coalesced, item, rc);
+    OMPI_FREE_LIST_GET(&mca_btl_openib_component.send_free_coalesced, item);

     return to_coalesced_frag(item);
 }
Index: ompi/mca/btl/sctp/btl_sctp.c
===================================================================
--- ompi/mca/btl/sctp/btl_sctp.c	(revision 28700)
+++ ompi/mca/btl/sctp/btl_sctp.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2009 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -173,12 +173,11 @@
     uint32_t flags)
 {
     mca_btl_sctp_frag_t* frag = NULL;
-    int rc;

     if(size <= btl->btl_eager_limit) { 
-        MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag, rc); 
+        MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag); 
     } else if (size <= btl->btl_max_send_size) { 
-        MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag, rc); 
+        MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag); 
     }
     if( NULL == frag ) {
         return NULL;
@@ -240,7 +239,7 @@
     */

     if (max_data+reserve <= btl->btl_eager_limit) {
-        MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag, rc);
+        MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag);
     }

     /* 
@@ -248,7 +247,7 @@
      * that is the max send size.
      */
     else {
-        MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag, rc);
+        MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag);
     }
     if(NULL == frag) {
         return NULL;
@@ -331,9 +330,8 @@
 {
     mca_btl_sctp_frag_t* frag;
     ptrdiff_t lb;
-    int rc;

-    MCA_BTL_SCTP_FRAG_ALLOC_USER(frag, rc);
+    MCA_BTL_SCTP_FRAG_ALLOC_USER(frag);
     if(NULL == frag) {
         return NULL;
     }
Index: ompi/mca/btl/sctp/btl_sctp_endpoint.c
===================================================================
--- ompi/mca/btl/sctp/btl_sctp_endpoint.c	(revision 28700)
+++ ompi/mca/btl/sctp/btl_sctp_endpoint.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2011 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -1072,12 +1072,11 @@

             frag = btl_endpoint->endpoint_recv_frag;
             if(NULL == frag) {
-                int rc;
                 if(mca_btl_sctp_module.super.btl_max_send_size > 
                    mca_btl_sctp_module.super.btl_eager_limit) { 
-                    MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag, rc);
+                    MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag);
                 } else { 
-                    MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag, rc);
+                    MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag);
                 }

                 if(NULL == frag) {
Index: ompi/mca/btl/sctp/btl_sctp_frag.h
===================================================================
--- ompi/mca/btl/sctp/btl_sctp_frag.h	(revision 28700)
+++ ompi/mca/btl/sctp/btl_sctp_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2009 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -75,26 +75,26 @@
  * free list(s).
  */

-#define MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag, rc)                            \
+#define MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag)                                \
 {                                                                          \
                                                                            \
     ompi_free_list_item_t *item;                                           \
-    OMPI_FREE_LIST_GET(&mca_btl_sctp_component.sctp_frag_eager, item, rc); \
+    OMPI_FREE_LIST_GET(&mca_btl_sctp_component.sctp_frag_eager, item);     \
     frag = (mca_btl_sctp_frag_t*) item;                                    \
 }

-#define MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag, rc)                            \
+#define MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag)                                \
 {                                                                        \
                                                                          \
     ompi_free_list_item_t *item;                                         \
-    OMPI_FREE_LIST_GET(&mca_btl_sctp_component.sctp_frag_max, item, rc); \
+    OMPI_FREE_LIST_GET(&mca_btl_sctp_component.sctp_frag_max, item);     \
     frag = (mca_btl_sctp_frag_t*) item;                                  \
 }

-#define MCA_BTL_SCTP_FRAG_ALLOC_USER(frag, rc)                            \
+#define MCA_BTL_SCTP_FRAG_ALLOC_USER(frag)                                \
 {                                                                         \
     ompi_free_list_item_t *item;                                          \
-    OMPI_FREE_LIST_GET(&mca_btl_sctp_component.sctp_frag_user, item, rc); \
+    OMPI_FREE_LIST_GET(&mca_btl_sctp_component.sctp_frag_user, item);     \
     frag = (mca_btl_sctp_frag_t*) item;                                   \
 }

Index: ompi/mca/btl/sctp/btl_sctp_recv_handler.c
===================================================================
--- ompi/mca/btl/sctp/btl_sctp_recv_handler.c	(revision 28700)
+++ ompi/mca/btl/sctp/btl_sctp_recv_handler.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -146,12 +146,11 @@

         frag = btl_endpoint->endpoint_recv_frag;
         if(NULL == frag) {
-            int rc;
             if(mca_btl_sctp_module.super.btl_max_send_size > 
                     mca_btl_sctp_module.super.btl_eager_limit) { 
-                MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag, rc);
+                MCA_BTL_SCTP_FRAG_ALLOC_MAX(frag);
             } else { 
-                MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag, rc);
+                MCA_BTL_SCTP_FRAG_ALLOC_EAGER(frag);
             }

             if(NULL == frag) {
Index: ompi/mca/btl/self/btl_self.c
===================================================================
--- ompi/mca/btl/self/btl_self.c	(revision 28700)
+++ ompi/mca/btl/self/btl_self.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -126,12 +126,11 @@
         uint32_t flags)
 {
     mca_btl_self_frag_t* frag = NULL;
-    int rc;

     if(size <= mca_btl_self.btl_eager_limit) {
-        MCA_BTL_SELF_FRAG_ALLOC_EAGER(frag,rc);
+        MCA_BTL_SELF_FRAG_ALLOC_EAGER(frag);
     } else if (size <= btl->btl_max_send_size) {
-        MCA_BTL_SELF_FRAG_ALLOC_SEND(frag,rc);
+        MCA_BTL_SELF_FRAG_ALLOC_SEND(frag);
     }
     if( OPAL_UNLIKELY(NULL == frag) ) {
         return NULL; 
@@ -141,7 +140,6 @@
     frag->base.des_flags   = flags;
     frag->base.des_src     = &(frag->segment);
     frag->base.des_src_cnt = 1;
-    (void)rc;  /* unused but needed by a macro */
     return (mca_btl_base_descriptor_t*)frag;
 }

@@ -198,7 +196,7 @@
         max_data < mca_btl_self.btl_max_send_size ||
         reserve != 0 ) {

-        MCA_BTL_SELF_FRAG_ALLOC_SEND(frag, rc);
+        MCA_BTL_SELF_FRAG_ALLOC_SEND(frag);
         if(OPAL_UNLIKELY(NULL == frag)) {
             return NULL;
         }
@@ -218,7 +216,7 @@
         frag->segment.seg_len = reserve + max_data;
         *size = max_data;
     } else {
-        MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag, rc);
+        MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag);
         if(OPAL_UNLIKELY(NULL == frag)) {
             return NULL;
         }
@@ -258,9 +256,8 @@
     mca_btl_self_frag_t* frag;
     size_t max_data = *size;
     void *ptr;
-    int rc;

-    MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag, rc);
+    MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag);
     if(OPAL_UNLIKELY(NULL == frag)) {
         return NULL;
     }
@@ -273,7 +270,6 @@
     frag->base.des_dst = &frag->segment;
     frag->base.des_dst_cnt = 1;
     frag->base.des_flags = flags;
-    (void)rc;  /* unused but needed by a macro */
     return &frag->base;
 }

Index: ompi/mca/btl/self/btl_self_frag.h
===================================================================
--- ompi/mca/btl/self/btl_self_frag.h	(revision 28700)
+++ ompi/mca/btl/self/btl_self_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -44,11 +44,11 @@
 OBJ_CLASS_DECLARATION(mca_btl_self_frag_send_t);
 OBJ_CLASS_DECLARATION(mca_btl_self_frag_rdma_t);

-#define MCA_BTL_SELF_FRAG_ALLOC_EAGER(frag, rc)                              \
-{                                                                            \
-    ompi_free_list_item_t* item;                                             \
-    OMPI_FREE_LIST_GET(&mca_btl_self_component.self_frags_eager, item, rc);  \
-    frag = (mca_btl_self_frag_t*)item;                                       \
+#define MCA_BTL_SELF_FRAG_ALLOC_EAGER(frag)                             \
+{                                                                       \
+    ompi_free_list_item_t* item;                                        \
+    OMPI_FREE_LIST_GET(&mca_btl_self_component.self_frags_eager, item); \
+    frag = (mca_btl_self_frag_t*)item;                                  \
 }

 #define MCA_BTL_SELF_FRAG_RETURN_EAGER(frag)                                 \
@@ -58,11 +58,11 @@
     frag->segment.seg_addr.pval = frag+1;                                    \
 }

-#define MCA_BTL_SELF_FRAG_ALLOC_SEND(frag, rc)                               \
-{                                                                            \
-    ompi_free_list_item_t* item;                                             \
-    OMPI_FREE_LIST_GET(&mca_btl_self_component.self_frags_send, item, rc);   \
-    frag = (mca_btl_self_frag_t*)item;                                       \
+#define MCA_BTL_SELF_FRAG_ALLOC_SEND(frag)                              \
+{                                                                       \
+    ompi_free_list_item_t* item;                                        \
+    OMPI_FREE_LIST_GET(&mca_btl_self_component.self_frags_send, item);  \
+    frag = (mca_btl_self_frag_t*)item;                                  \
 }

 #define MCA_BTL_SELF_FRAG_RETURN_SEND(frag)                                  \
@@ -72,11 +72,11 @@
     frag->segment.seg_addr.pval = frag+1;                                    \
 }

-#define MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag, rc)                               \
-{                                                                            \
-    ompi_free_list_item_t* item;                                             \
-    OMPI_FREE_LIST_GET(&mca_btl_self_component.self_frags_rdma, item, rc);   \
-    frag = (mca_btl_self_frag_t*)item;                                       \
+#define MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag)                              \
+{                                                                       \
+    ompi_free_list_item_t* item;                                        \
+    OMPI_FREE_LIST_GET(&mca_btl_self_component.self_frags_rdma, item);  \
+    frag = (mca_btl_self_frag_t*)item;                                  \
 }

 #define MCA_BTL_SELF_FRAG_RETURN_RDMA(frag)                                  \
Index: ompi/mca/btl/sm/btl_sm.c
===================================================================
--- ompi/mca/btl/sm/btl_sm.c	(revision 28700)
+++ ompi/mca/btl/sm/btl_sm.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2011 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2012 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2007 High Performance Computing Center Stuttgart,
@@ -707,11 +707,10 @@
     uint32_t flags)
 {
     mca_btl_sm_frag_t* frag = NULL;
-    int rc;
     if(size <= mca_btl_sm_component.eager_limit) {
-        MCA_BTL_SM_FRAG_ALLOC_EAGER(frag,rc);
+        MCA_BTL_SM_FRAG_ALLOC_EAGER(frag);
     } else if (size <= mca_btl_sm_component.max_frag_size) {
-        MCA_BTL_SM_FRAG_ALLOC_MAX(frag,rc);
+        MCA_BTL_SM_FRAG_ALLOC_MAX(frag);
     }

     if (OPAL_LIKELY(frag != NULL)) {
@@ -766,9 +765,9 @@
                             && OPAL_UNLIKELY(!mca_btl_sm_component.use_cma)) ) {
 #endif /* OMPI_BTL_SM_HAVE_KNEM || OMPI_BTL_SM_HAVE_CMA */
         if ( reserve + max_data <= mca_btl_sm_component.eager_limit ) {
-            MCA_BTL_SM_FRAG_ALLOC_EAGER(frag,rc);
+            MCA_BTL_SM_FRAG_ALLOC_EAGER(frag);
         } else {
-            MCA_BTL_SM_FRAG_ALLOC_MAX(frag, rc);
+            MCA_BTL_SM_FRAG_ALLOC_MAX(frag);
         }
         if( OPAL_UNLIKELY(NULL == frag) ) {
             return NULL;
@@ -793,7 +792,7 @@
         struct knem_cmd_create_region knem_cr;
         struct knem_cmd_param_iovec knem_iov;
 #endif /* OMPI_BTL_SM_HAVE_KNEM */
-        MCA_BTL_SM_FRAG_ALLOC_USER(frag, rc);
+        MCA_BTL_SM_FRAG_ALLOC_USER(frag);
         if( OPAL_UNLIKELY(NULL == frag) ) {
             return NULL;
         }
@@ -903,10 +902,10 @@

         /* allocate a fragment, giving up if we can't get one */
         /* note that frag==NULL is equivalent to rc returning an error code */
-        MCA_BTL_SM_FRAG_ALLOC_EAGER(frag, rc);
+        MCA_BTL_SM_FRAG_ALLOC_EAGER(frag);
         if( OPAL_UNLIKELY(NULL == frag) ) {
             *descriptor = NULL;
-            return rc;
+            return OMPI_ERR_OUT_OF_RESOURCE;
         }

         /* fill in fragment fields */
@@ -1013,11 +1012,10 @@
 		size_t* size,
 		uint32_t flags)
 {
-    int rc;
     void *ptr;
     mca_btl_sm_frag_t* frag;

-    MCA_BTL_SM_FRAG_ALLOC_USER(frag, rc);
+    MCA_BTL_SM_FRAG_ALLOC_USER(frag);
     if(OPAL_UNLIKELY(NULL == frag)) {
         return NULL;
     }
Index: ompi/mca/btl/sm/btl_sm_frag.h
===================================================================
--- ompi/mca/btl/sm/btl_sm_frag.h	(revision 28700)
+++ ompi/mca/btl/sm/btl_sm_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2009 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@@ -76,24 +76,24 @@
 OBJ_CLASS_DECLARATION(mca_btl_sm_frag2_t);
 OBJ_CLASS_DECLARATION(mca_btl_sm_user_t);

-#define MCA_BTL_SM_FRAG_ALLOC_EAGER(frag, rc)                           \
+#define MCA_BTL_SM_FRAG_ALLOC_EAGER(frag)                               \
 {                                                                       \
     ompi_free_list_item_t* item;                                        \
-    OMPI_FREE_LIST_GET(&mca_btl_sm_component.sm_frags_eager, item, rc); \
+    OMPI_FREE_LIST_GET(&mca_btl_sm_component.sm_frags_eager, item);     \
     frag = (mca_btl_sm_frag_t*)item;                                    \
 }

-#define MCA_BTL_SM_FRAG_ALLOC_MAX(frag, rc)                             \
+#define MCA_BTL_SM_FRAG_ALLOC_MAX(frag)                                 \
 {                                                                       \
     ompi_free_list_item_t* item;                                        \
-    OMPI_FREE_LIST_GET(&mca_btl_sm_component.sm_frags_max, item, rc);   \
+    OMPI_FREE_LIST_GET(&mca_btl_sm_component.sm_frags_max, item);       \
     frag = (mca_btl_sm_frag_t*)item;                                    \
 }

-#define MCA_BTL_SM_FRAG_ALLOC_USER(frag, rc)                             \
+#define MCA_BTL_SM_FRAG_ALLOC_USER(frag)                                \
 {                                                                       \
 	ompi_free_list_item_t* item;                                        \
-	OMPI_FREE_LIST_GET(&mca_btl_sm_component.sm_frags_user, item, rc);   \
+	OMPI_FREE_LIST_GET(&mca_btl_sm_component.sm_frags_user, item);      \
 	frag = (mca_btl_sm_frag_t*)item;                                    \
 }

Index: ompi/mca/btl/smcuda/btl_smcuda.c
===================================================================
--- ompi/mca/btl/smcuda/btl_smcuda.c	(revision 28700)
+++ ompi/mca/btl/smcuda/btl_smcuda.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2011 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2009 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2007 High Performance Computing Center Stuttgart,
@@ -733,11 +733,10 @@
     uint32_t flags)
 {
     mca_btl_smcuda_frag_t* frag = NULL;
-    int rc;
     if(size <= mca_btl_smcuda_component.eager_limit) {
-        MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag,rc);
+        MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag);
     } else if (size <= mca_btl_smcuda_component.max_frag_size) {
-        MCA_BTL_SMCUDA_FRAG_ALLOC_MAX(frag,rc);
+        MCA_BTL_SMCUDA_FRAG_ALLOC_MAX(frag);
     }

     if (OPAL_LIKELY(frag != NULL)) {
@@ -789,9 +788,9 @@
     if (0 != reserve) {
 #endif /* OMPI_CUDA_SUPPORT */
         if ( reserve + max_data <= mca_btl_smcuda_component.eager_limit ) {
-            MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag,rc);
+            MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag);
         } else {
-            MCA_BTL_SMCUDA_FRAG_ALLOC_MAX(frag, rc);
+            MCA_BTL_SMCUDA_FRAG_ALLOC_MAX(frag);
         }
         if( OPAL_UNLIKELY(NULL == frag) ) {
             return NULL;
@@ -821,7 +820,7 @@
             return NULL;
         }

-        MCA_BTL_SMCUDA_FRAG_ALLOC_USER(frag, rc);
+        MCA_BTL_SMCUDA_FRAG_ALLOC_USER(frag);
         if( OPAL_UNLIKELY(NULL == frag) ) {
             return NULL;
         }
@@ -914,10 +913,10 @@

         /* allocate a fragment, giving up if we can't get one */
         /* note that frag==NULL is equivalent to rc returning an error code */
-        MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag, rc);
+        MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag);
         if( OPAL_UNLIKELY(NULL == frag) ) {
             *descriptor = NULL;
-            return rc;
+            return OMPI_ERR_OUT_OF_RESOURCE;
         }

         /* fill in fragment fields */
@@ -1023,7 +1022,6 @@
         size_t* size,
         uint32_t flags)
 {
-    int rc;
     void *ptr;
     mca_btl_smcuda_frag_t* frag;

@@ -1032,7 +1030,7 @@
         return NULL;
     }

-    MCA_BTL_SMCUDA_FRAG_ALLOC_USER(frag, rc);
+    MCA_BTL_SMCUDA_FRAG_ALLOC_USER(frag);
     if(OPAL_UNLIKELY(NULL == frag)) {
         return NULL;
     }
Index: ompi/mca/btl/smcuda/btl_smcuda_frag.h
===================================================================
--- ompi/mca/btl/smcuda/btl_smcuda_frag.h	(revision 28700)
+++ ompi/mca/btl/smcuda/btl_smcuda_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2009 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@@ -84,29 +84,29 @@
 OBJ_CLASS_DECLARATION(mca_btl_smcuda_frag2_t);
 OBJ_CLASS_DECLARATION(mca_btl_smcuda_user_t);

-#define MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag, rc)                           \
+#define MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag)                           \
 {                                                                       \
     ompi_free_list_item_t* item;                                        \
-    OMPI_FREE_LIST_GET(&mca_btl_smcuda_component.sm_frags_eager, item, rc); \
-    frag = (mca_btl_smcuda_frag_t*)item;                                    \
+    OMPI_FREE_LIST_GET(&mca_btl_smcuda_component.sm_frags_eager, item); \
+    frag = (mca_btl_smcuda_frag_t*)item;                                \
 }

-#define MCA_BTL_SMCUDA_FRAG_ALLOC_MAX(frag, rc)                             \
+#define MCA_BTL_SMCUDA_FRAG_ALLOC_MAX(frag)                             \
 {                                                                       \
     ompi_free_list_item_t* item;                                        \
-    OMPI_FREE_LIST_GET(&mca_btl_smcuda_component.sm_frags_max, item, rc);   \
-    frag = (mca_btl_smcuda_frag_t*)item;                                    \
+    OMPI_FREE_LIST_GET(&mca_btl_smcuda_component.sm_frags_max, item);   \
+    frag = (mca_btl_smcuda_frag_t*)item;                                \
 }

-#define MCA_BTL_SMCUDA_FRAG_ALLOC_USER(frag, rc)                             \
-{                                                                       \
+#define MCA_BTL_SMCUDA_FRAG_ALLOC_USER(frag)                            \
+    {                                                                   \
 	ompi_free_list_item_t* item;                                        \
-	OMPI_FREE_LIST_GET(&mca_btl_smcuda_component.sm_frags_user, item, rc);   \
-	frag = (mca_btl_smcuda_frag_t*)item;                                    \
+	OMPI_FREE_LIST_GET(&mca_btl_smcuda_component.sm_frags_user, item);  \
+	frag = (mca_btl_smcuda_frag_t*)item;                                \
 }


-#define MCA_BTL_SMCUDA_FRAG_RETURN(frag)                                      \
+#define MCA_BTL_SMCUDA_FRAG_RETURN(frag)                                  \
 {                                                                         \
     OMPI_FREE_LIST_RETURN(frag->my_list, (ompi_free_list_item_t*)(frag)); \
 }
Index: ompi/mca/btl/tcp/btl_tcp.c
===================================================================
--- ompi/mca/btl/tcp/btl_tcp.c	(revision 28700)
+++ ompi/mca/btl/tcp/btl_tcp.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -174,12 +174,11 @@
     uint32_t flags)
 {
     mca_btl_tcp_frag_t* frag = NULL;
-    int rc;

     if(size <= btl->btl_eager_limit) { 
-        MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag, rc); 
+        MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag); 
     } else if (size <= btl->btl_max_send_size) { 
-        MCA_BTL_TCP_FRAG_ALLOC_MAX(frag, rc); 
+        MCA_BTL_TCP_FRAG_ALLOC_MAX(frag); 
     }
     if( OPAL_UNLIKELY(NULL == frag) ) {
         return NULL;
@@ -243,13 +242,13 @@
      * than the eager limit pack into a fragment from the eager pool
      */
     if (max_data+reserve <= btl->btl_eager_limit) {
-        MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag, rc);
+        MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag);
     } else {
         /* 
          * otherwise pack as much data as we can into a fragment
          * that is the max send size.
          */
-        MCA_BTL_TCP_FRAG_ALLOC_MAX(frag, rc);
+        MCA_BTL_TCP_FRAG_ALLOC_MAX(frag);
     }
     if( OPAL_UNLIKELY(NULL == frag) ) {
         return NULL;
@@ -326,12 +325,11 @@
     uint32_t flags)
 {
     mca_btl_tcp_frag_t* frag;
-    int rc;

     if( OPAL_UNLIKELY((*size) > UINT32_MAX) ) {  /* limit the size to what we support */
         *size = (size_t)UINT32_MAX;
     }
-    MCA_BTL_TCP_FRAG_ALLOC_USER(frag, rc);
+    MCA_BTL_TCP_FRAG_ALLOC_USER(frag);
     if( OPAL_UNLIKELY(NULL == frag) ) {
         return NULL;
     }
Index: ompi/mca/btl/tcp/btl_tcp_endpoint.c
===================================================================
--- ompi/mca/btl/tcp/btl_tcp_endpoint.c	(revision 28700)
+++ ompi/mca/btl/tcp/btl_tcp_endpoint.c	(working copy)
@@ -729,12 +729,11 @@

             frag = btl_endpoint->endpoint_recv_frag;
             if(NULL == frag) {
-                int rc;
                 if(mca_btl_tcp_module.super.btl_max_send_size > 
                    mca_btl_tcp_module.super.btl_eager_limit) { 
-                    MCA_BTL_TCP_FRAG_ALLOC_MAX(frag, rc);
+                    MCA_BTL_TCP_FRAG_ALLOC_MAX(frag);
                 } else { 
-                    MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag, rc);
+                    MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag);
                 }

                 if(NULL == frag) {
Index: ompi/mca/btl/tcp/btl_tcp_frag.h
===================================================================
--- ompi/mca/btl/tcp/btl_tcp_frag.h	(revision 28700)
+++ ompi/mca/btl/tcp/btl_tcp_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -77,24 +77,24 @@
  * free list(s).
  */

-#define MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag, rc)                             \
+#define MCA_BTL_TCP_FRAG_ALLOC_EAGER(frag)                                 \
 {                                                                          \
     ompi_free_list_item_t *item;                                           \
-    OMPI_FREE_LIST_GET(&mca_btl_tcp_component.tcp_frag_eager, item, rc);   \
+    OMPI_FREE_LIST_GET(&mca_btl_tcp_component.tcp_frag_eager, item);       \
     frag = (mca_btl_tcp_frag_t*) item;                                     \
 }

-#define MCA_BTL_TCP_FRAG_ALLOC_MAX(frag, rc)                               \
+#define MCA_BTL_TCP_FRAG_ALLOC_MAX(frag)                                   \
 {                                                                          \
     ompi_free_list_item_t *item;                                           \
-    OMPI_FREE_LIST_GET(&mca_btl_tcp_component.tcp_frag_max, item, rc);     \
+    OMPI_FREE_LIST_GET(&mca_btl_tcp_component.tcp_frag_max, item);         \
     frag = (mca_btl_tcp_frag_t*) item;                                     \
 }

-#define MCA_BTL_TCP_FRAG_ALLOC_USER(frag, rc)                              \
+#define MCA_BTL_TCP_FRAG_ALLOC_USER(frag)                                  \
 {                                                                          \
     ompi_free_list_item_t *item;                                           \
-    OMPI_FREE_LIST_GET(&mca_btl_tcp_component.tcp_frag_user, item, rc);    \
+    OMPI_FREE_LIST_GET(&mca_btl_tcp_component.tcp_frag_user, item);        \
     frag = (mca_btl_tcp_frag_t*) item;                                     \
 }

Index: ompi/mca/btl/template/btl_template.c
===================================================================
--- ompi/mca/btl/template/btl_template.c	(revision 28700)
+++ ompi/mca/btl/template/btl_template.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -169,12 +169,11 @@
 {
     mca_btl_template_module_t* template_btl = (mca_btl_template_module_t*) btl; 
     mca_btl_template_frag_t* frag = NULL;
-    int rc;

     if(size <= btl->btl_eager_limit){ 
-        MCA_BTL_TEMPLATE_FRAG_ALLOC_EAGER(template_btl, frag, rc); 
+        MCA_BTL_TEMPLATE_FRAG_ALLOC_EAGER(template_btl, frag); 
     } else { 
-        MCA_BTL_TEMPLATE_FRAG_ALLOC_MAX(template_btl, frag, rc); 
+        MCA_BTL_TEMPLATE_FRAG_ALLOC_MAX(template_btl, frag); 
     }
     if( OPAL_UNLIKELY(NULL != frag) ) {
         return NULL;
@@ -241,7 +240,7 @@
     */
     if (max_data+reserve <= btl->btl_eager_limit) {

-        MCA_BTL_TEMPLATE_FRAG_ALLOC_EAGER(btl, frag, rc);
+        MCA_BTL_TEMPLATE_FRAG_ALLOC_EAGER(btl, frag);
         if(OPAL_UNLIKELY(NULL == frag)) {
             return NULL;
         }
@@ -264,7 +263,7 @@
      */
     else {

-        MCA_BTL_TEMPLATE_FRAG_ALLOC_MAX(btl, frag, rc);
+        MCA_BTL_TEMPLATE_FRAG_ALLOC_MAX(btl, frag);
         if(OPAL_UNLIKELY(NULL == frag)) {
             return NULL;
         }
@@ -318,9 +317,8 @@
     uint32_t flags)
 {
     mca_btl_template_frag_t* frag;
-    int rc;

-    MCA_BTL_TEMPLATE_FRAG_ALLOC_USER(btl, frag, rc);
+    MCA_BTL_TEMPLATE_FRAG_ALLOC_USER(btl, frag);
     if(OPAL_UNLIKELY(NULL == frag)) {
         return NULL;
     }
Index: ompi/mca/btl/template/btl_template_frag.h
===================================================================
--- ompi/mca/btl/template/btl_template_frag.h	(revision 28700)
+++ ompi/mca/btl/template/btl_template_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -60,11 +60,11 @@
  * free list(s).
  */

-#define MCA_BTL_TEMPLATE_FRAG_ALLOC_EAGER(btl, frag, rc)           \
+#define MCA_BTL_TEMPLATE_FRAG_ALLOC_EAGER(btl, frag)               \
 {                                                                  \
                                                                    \
     ompi_free_list_item_t *item;                                        \
-    OMPI_FREE_LIST_GET(&((mca_btl_template_module_t*)btl)->template_frag_eager, item, rc); \
+    OMPI_FREE_LIST_GET(&((mca_btl_template_module_t*)btl)->template_frag_eager, item); \
     frag = (mca_btl_template_frag_t*) item;                        \
 }

@@ -74,11 +74,11 @@
         (ompi_free_list_item_t*)(frag));                                \
 }

-#define MCA_BTL_TEMPLATE_FRAG_ALLOC_MAX(btl, frag, rc)             \
+#define MCA_BTL_TEMPLATE_FRAG_ALLOC_MAX(btl, frag)                 \
 {                                                                  \
                                                                    \
     ompi_free_list_item_t *item;                                        \
-    OMPI_FREE_LIST_GET(&((mca_btl_template_module_t*)btl)->template_frag_max, item, rc); \
+    OMPI_FREE_LIST_GET(&((mca_btl_template_module_t*)btl)->template_frag_max, item); \
     frag = (mca_btl_template_frag_t*) item;                        \
 }

@@ -89,10 +89,10 @@
 }


-#define MCA_BTL_TEMPLATE_FRAG_ALLOC_USER(btl, frag, rc)            \
+#define MCA_BTL_TEMPLATE_FRAG_ALLOC_USER(btl, frag)                \
 {                                                                  \
     ompi_free_list_item_t *item;                                        \
-    OMPI_FREE_LIST_GET(&((mca_btl_template_module_t*)btl)->template_frag_user, item, rc); \
+    OMPI_FREE_LIST_GET(&((mca_btl_template_module_t*)btl)->template_frag_user, item); \
     frag = (mca_btl_template_frag_t*) item;                        \
 }

Index: ompi/mca/btl/udapl/btl_udapl.c
===================================================================
--- ompi/mca/btl/udapl/btl_udapl.c	(revision 28700)
+++ ompi/mca/btl/udapl/btl_udapl.c	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -937,7 +937,6 @@
 {
     mca_btl_udapl_module_t* udapl_btl = (mca_btl_udapl_module_t*) btl; 
     mca_btl_udapl_frag_t* frag;
-    int rc;
     int pad = 0;

     /* compute pad as needed */
@@ -945,9 +944,9 @@
         (size + sizeof(mca_btl_udapl_footer_t)));

     if((size + pad) <= btl->btl_eager_limit) { 
-        MCA_BTL_UDAPL_FRAG_ALLOC_EAGER(udapl_btl, frag, rc); 
+        MCA_BTL_UDAPL_FRAG_ALLOC_EAGER(udapl_btl, frag); 
     } else if(size <= btl->btl_max_send_size) {
-        MCA_BTL_UDAPL_FRAG_ALLOC_MAX(udapl_btl, frag, rc); 
+        MCA_BTL_UDAPL_FRAG_ALLOC_MAX(udapl_btl, frag); 
     } else {
         return NULL;
     }
@@ -1043,7 +1042,7 @@
     if(opal_convertor_need_buffers(convertor) == false && 0 == reserve) {
         if(registration != NULL || max_data > btl->btl_max_send_size) {

-            MCA_BTL_UDAPL_FRAG_ALLOC_USER(btl, frag, rc);
+            MCA_BTL_UDAPL_FRAG_ALLOC_USER(btl, frag);
             if(NULL == frag){
                 return NULL;
             }
@@ -1090,13 +1089,13 @@
     if(max_data + pad + reserve <= btl->btl_eager_limit) {
         /* the data is small enough to fit in the eager frag and
          * memory is not prepinned */
-        MCA_BTL_UDAPL_FRAG_ALLOC_EAGER(btl, frag, rc);
+        MCA_BTL_UDAPL_FRAG_ALLOC_EAGER(btl, frag);
     }

     if(NULL == frag) {
         /* the data doesn't fit into eager frag or eager frag is
          * not available */
-        MCA_BTL_UDAPL_FRAG_ALLOC_MAX(btl, frag, rc);
+        MCA_BTL_UDAPL_FRAG_ALLOC_MAX(btl, frag);
         if(NULL == frag) {
             return NULL;
         }
@@ -1161,7 +1160,7 @@
     mca_btl_udapl_frag_t* frag;
     int rc;

-    MCA_BTL_UDAPL_FRAG_ALLOC_USER(btl, frag, rc);
+    MCA_BTL_UDAPL_FRAG_ALLOC_USER(btl, frag);
     if(NULL == frag) {
         return NULL;
     }
Index: ompi/mca/btl/udapl/btl_udapl_endpoint.c
===================================================================
--- ompi/mca/btl/udapl/btl_udapl_endpoint.c	(revision 28700)
+++ ompi/mca/btl/udapl/btl_udapl_endpoint.c	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2011 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -1162,18 +1162,18 @@

     for(i = 0; i < mca_btl_udapl_component.udapl_num_recvs; i++) {
         if(size == mca_btl_udapl_component.udapl_eager_frag_size) {
-            MCA_BTL_UDAPL_FRAG_ALLOC_EAGER_RECV(endpoint->endpoint_btl, frag, rc);
+            MCA_BTL_UDAPL_FRAG_ALLOC_EAGER_RECV(endpoint->endpoint_btl, frag);
             ep = endpoint->endpoint_eager;
         } else {
             assert(size == mca_btl_udapl_component.udapl_max_frag_size);
-            MCA_BTL_UDAPL_FRAG_ALLOC_MAX_RECV(endpoint->endpoint_btl, frag, rc);
+            MCA_BTL_UDAPL_FRAG_ALLOC_MAX_RECV(endpoint->endpoint_btl, frag);
             ep = endpoint->endpoint_max;
         } 

         if (NULL == frag) {
             BTL_ERROR(("ERROR: %s posting recv, out of resources\n",
                 "MCA_BTL_UDAPL_ALLOC"));
-            return rc;
+            return OMPI_ERR_OUT_OF_RESOURCE;
         }

         assert(size == frag->size);
@@ -1333,7 +1333,6 @@
 {
     mca_btl_udapl_module_t* udapl_btl = (mca_btl_udapl_module_t*) btl; 
     mca_btl_udapl_frag_t* frag;
-    int rc;
     int pad = 0;

     /* compute pad as needed */
@@ -1343,7 +1342,7 @@
     /* control messages size should never be greater than eager message size */
     assert((size+pad) <= btl->btl_eager_limit);

-    MCA_BTL_UDAPL_FRAG_ALLOC_CONTROL(udapl_btl, frag, rc); 
+    MCA_BTL_UDAPL_FRAG_ALLOC_CONTROL(udapl_btl, frag); 

     /* Set up the LMR triplet from the frag segment */
     frag->segment.base.seg_len = (uint32_t)size;
Index: ompi/mca/btl/udapl/btl_udapl_frag.h
===================================================================
--- ompi/mca/btl/udapl/btl_udapl_frag.h	(revision 28700)
+++ ompi/mca/btl/udapl/btl_udapl_frag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -125,10 +125,10 @@
  * free list(s).
  */

-#define MCA_BTL_UDAPL_FRAG_ALLOC_EAGER(btl, frag, rc)              \
+#define MCA_BTL_UDAPL_FRAG_ALLOC_EAGER(btl, frag)                  \
 {                                                                  \
     ompi_free_list_item_t *item;                                   \
-    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_eager, item, rc); \
+    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_eager, item); \
     frag = (mca_btl_udapl_frag_t*) item;                           \
 }

@@ -138,17 +138,17 @@
         (ompi_free_list_item_t*)(frag));                           \
 }

-#define MCA_BTL_UDAPL_FRAG_ALLOC_EAGER_RECV(btl, frag, rc)              \
+#define MCA_BTL_UDAPL_FRAG_ALLOC_EAGER_RECV(btl, frag)             \
 {                                                                  \
     ompi_free_list_item_t *item;                                   \
-    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_eager_recv, item, rc); \
+    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_eager_recv, item); \
     frag = (mca_btl_udapl_frag_t*) item;                           \
 }

-#define MCA_BTL_UDAPL_FRAG_ALLOC_MAX(btl, frag, rc)                \
+#define MCA_BTL_UDAPL_FRAG_ALLOC_MAX(btl, frag)                    \
 {                                                                  \
     ompi_free_list_item_t *item;                                   \
-    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_max, item, rc); \
+    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_max, item); \
     frag = (mca_btl_udapl_frag_t*) item;                           \
 }

@@ -158,17 +158,17 @@
         (ompi_free_list_item_t*)(frag));                           \
 }

-#define MCA_BTL_UDAPL_FRAG_ALLOC_MAX_RECV(btl, frag, rc)                \
+#define MCA_BTL_UDAPL_FRAG_ALLOC_MAX_RECV(btl, frag)                \
 {                                                                  \
     ompi_free_list_item_t *item;                                   \
-    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_max_recv, item, rc); \
+    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_max_recv, item); \
     frag = (mca_btl_udapl_frag_t*) item;                           \
 }

-#define MCA_BTL_UDAPL_FRAG_ALLOC_USER(btl, frag, rc)               \
+#define MCA_BTL_UDAPL_FRAG_ALLOC_USER(btl, frag)                   \
 {                                                                  \
     ompi_free_list_item_t *item;                                   \
-    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_user, item, rc); \
+    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_user, item); \
     frag = (mca_btl_udapl_frag_t*) item;                           \
 }

@@ -178,14 +178,14 @@
         (ompi_free_list_item_t*)(frag)); \
 }

-#define MCA_BTL_UDAPL_FRAG_ALLOC_CONTROL(btl, frag, rc)              \
+#define MCA_BTL_UDAPL_FRAG_ALLOC_CONTROL(btl, frag)                \
 {                                                                  \
     ompi_free_list_item_t *item;                                   \
-    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_control, item, rc); \
+    OMPI_FREE_LIST_GET(&((mca_btl_udapl_module_t*)btl)->udapl_frag_control, item); \
     frag = (mca_btl_udapl_frag_t*) item;                           \
 }

-#define MCA_BTL_UDAPL_FRAG_RETURN_CONTROL(btl, frag)                 \
+#define MCA_BTL_UDAPL_FRAG_RETURN_CONTROL(btl, frag)               \
 {                                                                  \
     OMPI_FREE_LIST_RETURN(&((mca_btl_udapl_module_t*)btl)->udapl_frag_control, \
         (ompi_free_list_item_t*)(frag));                           \
Index: ompi/mca/btl/ugni/btl_ugni_endpoint.c
===================================================================
--- ompi/mca/btl/ugni/btl_ugni_endpoint.c	(revision 28700)
+++ ompi/mca/btl/ugni/btl_ugni_endpoint.c	(working copy)
@@ -2,7 +2,7 @@
 /*
  * Copyright (c) 2011-2012 Los Alamos National Security, LLC. All rights
  *                         reserved.
- * Copyright (c) 2011      UT-Battelle, LLC. All rights reserved.
+ * Copyright (c) 2011-2013 UT-Battelle, LLC. All rights reserved.
  * $COPYRIGHT$
  *
  * Additional copyrights may follow
@@ -34,9 +34,8 @@
 static inline int mca_btl_ugni_ep_smsg_get_mbox (mca_btl_base_endpoint_t *ep) {
     mca_btl_ugni_module_t *ugni_module = ep->btl;
     ompi_free_list_item_t *mbox;
-    int rc;

-    OMPI_FREE_LIST_GET(&ugni_module->smsg_mboxes, mbox, rc);
+    OMPI_FREE_LIST_GET(&ugni_module->smsg_mboxes, mbox);
     if (OPAL_UNLIKELY(NULL == mbox)) {
         return OMPI_ERR_OUT_OF_RESOURCE;
     }
@@ -46,7 +45,7 @@
     /* per ugni spec we need to zero mailbox data before connecting */
     memset ((char *)ep->mailbox->smsg_attrib.msg_buffer + ep->mailbox->smsg_attrib.mbox_offset, 0,
             ep->mailbox->smsg_attrib.buff_size);
-    return rc;
+    return OMPI_SUCCESS;
 }

 int mca_btl_ugni_ep_disconnect (mca_btl_base_endpoint_t *ep, bool send_disconnect) {
Index: ompi/mca/btl/ugni/btl_ugni_frag.h
===================================================================
--- ompi/mca/btl/ugni/btl_ugni_frag.h	(revision 28700)
+++ ompi/mca/btl/ugni/btl_ugni_frag.h	(working copy)
@@ -57,6 +57,7 @@

 enum {
     MCA_BTL_UGNI_FRAG_BUFFERED      = 1, /* frag data is buffered */
+k
     MCA_BTL_UGNI_FRAG_COMPLETE      = 2, /* smsg complete for frag */
     MCA_BTL_UGNI_FRAG_EAGER         = 4, /* eager get frag */
     MCA_BTL_UGNI_FRAG_IGNORE        = 8, /* ignore local smsg completion */
@@ -99,16 +100,16 @@
                                            mca_btl_ugni_base_frag_t **frag)
 {
     ompi_free_list_item_t *item = NULL;
-    int rc;

-    OMPI_FREE_LIST_GET(list, item, rc);
+    OMPI_FREE_LIST_GET(list, item);
     *frag = (mca_btl_ugni_base_frag_t *) item;
     if (OPAL_LIKELY(NULL != item)) {
         (*frag)->my_list  = list;
         (*frag)->endpoint = ep;
+        return OMPI_SUCCESS;
     }

-    return rc;
+    return OMPI_ERR_OUT_OF_RESOURCE;
 }

 static inline int mca_btl_ugni_frag_return (mca_btl_ugni_base_frag_t *frag)
Index: ompi/mca/btl/vader/btl_vader_frag.h
===================================================================
--- ompi/mca/btl/vader/btl_vader_frag.h	(revision 28700)
+++ ompi/mca/btl/vader/btl_vader_frag.h	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2009 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@@ -58,9 +58,8 @@

 static inline int mca_btl_vader_frag_alloc (mca_btl_vader_frag_t **frag, ompi_free_list_t *list) {
     ompi_free_list_item_t *item;
-    int rc;

-    OMPI_FREE_LIST_GET(list, item, rc);
+    OMPI_FREE_LIST_GET(list, item);
     *frag = (mca_btl_vader_frag_t *) item;
     if (OPAL_LIKELY(NULL != item)) {
         if (NULL == (*frag)->hdr) {
@@ -75,7 +74,7 @@
         (*frag)->my_list = list;
     }

-    return rc;
+    return OMPI_SUCCESS;
 }

 void mca_btl_vader_frag_return (mca_btl_vader_frag_t *frag);
Index: ompi/mca/coll/hcoll/coll_hcoll_rte.c
===================================================================
--- ompi/mca/coll/hcoll/coll_hcoll_rte.c	(revision 28700)
+++ ompi/mca/coll/hcoll/coll_hcoll_rte.c	(working copy)
@@ -381,16 +381,15 @@

 static void* get_coll_handle(void)
 {
-    int rc;
     ompi_request_t *ompi_req;
     ompi_free_list_item_t *item;
-    OMPI_FREE_LIST_WAIT(&(mca_coll_hcoll_component.requests),item,rc);
-    ompi_req = (ompi_request_t *)item;
-    OMPI_REQUEST_INIT(ompi_req,false);
-    if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
+    OMPI_FREE_LIST_WAIT(&(mca_coll_hcoll_component.requests),item);
+    if (OPAL_UNLIKELY(NULL == item)) {
         HCOL_ERROR("Wait for free list failed.\n");
         return NULL;
     }
+    ompi_req = (ompi_request_t *)item;
+    OMPI_REQUEST_INIT(ompi_req,false);
     return (void *)ompi_req;
 }

Index: ompi/mca/coll/libnbc/coll_libnbc.h
===================================================================
--- ompi/mca/coll/libnbc/coll_libnbc.h	(revision 28700)
+++ ompi/mca/coll/libnbc/coll_libnbc.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -116,11 +116,10 @@
 typedef ompi_coll_libnbc_request_t NBC_Handle;


-#define OMPI_COLL_LIBNBC_REQUEST_ALLOC(comm, req, rc)                   \
+#define OMPI_COLL_LIBNBC_REQUEST_ALLOC(comm, req)                       \
     do {                                                                \
         ompi_free_list_item_t *item;                                    \
-        OMPI_FREE_LIST_WAIT(&mca_coll_libnbc_component.requests,        \
-                            item, rc);                                  \
+        OMPI_FREE_LIST_WAIT(&mca_coll_libnbc_component.requests, item); \
         req = (ompi_coll_libnbc_request_t*) item;                       \
         OMPI_REQUEST_INIT(&req->super, false);                          \
         req->super.req_mpi_object.comm = comm;                          \
Index: ompi/mca/coll/libnbc/nbc.c
===================================================================
--- ompi/mca/coll/libnbc/nbc.c	(revision 28700)
+++ ompi/mca/coll/libnbc/nbc.c	(working copy)
@@ -1,9 +1,12 @@
 /*
- * Copyright (c) 2006 The Trustees of Indiana University and Indiana
- *                    University Research and Technology
- *                    Corporation.  All rights reserved.
- * Copyright (c) 2006 The Technical University of Chemnitz. All 
- *                    rights reserved.
+ * Copyright (c) 2006      The Trustees of Indiana University and Indiana
+ *                         University Research and Technology
+ *                         Corporation.  All rights reserved.
+ * Copyright (c) 2013      The University of Tennessee and The University
+ *                         of Tennessee Research Foundation.  All rights
+ *                         reserved.
+ * Copyright (c) 2006      The Technical University of Chemnitz. All 
+ *                         rights reserved.
  *
  * Author(s): Torsten Hoefler <htor@cs.indiana.edu>
  *
@@ -521,12 +524,12 @@

 int NBC_Init_handle(struct ompi_communicator_t *comm, ompi_coll_libnbc_request_t **request, ompi_coll_libnbc_module_t *comminfo)
 {
-  int res, tmp_tag;
+  int tmp_tag;
   bool need_register = false;
   ompi_coll_libnbc_request_t *handle;

-  OMPI_COLL_LIBNBC_REQUEST_ALLOC(comm, handle, res);
-  if (OMPI_SUCCESS != res) return res;
+  OMPI_COLL_LIBNBC_REQUEST_ALLOC(comm, handle);
+  if (NULL == handle) return OMPI_ERR_OUT_OF_RESOURCE;
   *request = handle;

   handle->tmpbuf = NULL;
Index: ompi/mca/coll/ml/coll_ml_barrier.c
===================================================================
--- ompi/mca/coll/ml/coll_ml_barrier.c	(revision 28700)
+++ ompi/mca/coll/ml/coll_ml_barrier.c	(working copy)
@@ -32,8 +32,6 @@
 static int mca_coll_ml_barrier_launch(mca_coll_ml_module_t *ml_module,
                                      ompi_request_t **req)
 {
-    int rc;
-
     ompi_free_list_item_t *item;
     mca_coll_ml_collective_operation_progress_t *coll_op;
     ml_payload_buffer_desc_t *src_buffer_desc = NULL;
@@ -49,8 +47,7 @@

     /* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */
     OMPI_FREE_LIST_WAIT(&(ml_module->coll_ml_collective_descriptors),
-                          item,
-                          rc);
+                        item);

     coll_op = (mca_coll_ml_collective_operation_progress_t *) item;
     assert(NULL != coll_op);
Index: ompi/mca/coll/ml/coll_ml_inlines.h
===================================================================
--- ompi/mca/coll/ml/coll_ml_inlines.h	(revision 28700)
+++ ompi/mca/coll/ml/coll_ml_inlines.h	(working copy)
@@ -467,15 +467,13 @@
         size_t offset_into_user_buffer
         )
 {
-    int rc;
     ompi_free_list_item_t *item;
     mca_coll_ml_collective_operation_progress_t  *coll_op = NULL;
     ompi_request_t *req;

     /* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */
     OMPI_FREE_LIST_WAIT(&(ml_module->coll_ml_collective_descriptors),
-                          item,
-                          rc);
+                        item);

     coll_op = (mca_coll_ml_collective_operation_progress_t *) item;
     ML_VERBOSE(10, (">>> Allocating coll op %p", coll_op));
@@ -530,14 +528,12 @@
                                         size_t offset_into_user_buffer
                                         )
 {
-    int rc;
     ompi_free_list_item_t *item;
     mca_coll_ml_collective_operation_progress_t  *coll_op = NULL;

     /* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */
     OMPI_FREE_LIST_WAIT(&(ml_module->coll_ml_collective_descriptors),
-                          item,
-                          rc);
+                        item);

     coll_op = (mca_coll_ml_collective_operation_progress_t *) item;

Index: ompi/mca/crcp/bkmrk/crcp_bkmrk_pml.c
===================================================================
--- ompi/mca/crcp/bkmrk/crcp_bkmrk_pml.c	(revision 28700)
+++ ompi/mca/crcp/bkmrk/crcp_bkmrk_pml.c	(working copy)
@@ -1,7 +1,7 @@
 /*
  * Copyright (c) 2004-2011 The Trustees of Indiana University.
  *                         All rights reserved.
- * Copyright (c) 2010-2011 The University of Tennessee and The University
+ * Copyright (c) 2010-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2010-2012 Oracle and/or its affiliates.  All rights reserved.
@@ -614,11 +614,11 @@
 /*
  * Free List Maintenance
  */
-#define HOKE_PEER_REF_ALLOC(peer_ref, rc)             \
+#define HOKE_PEER_REF_ALLOC(peer_ref)                 \
 do {                                                  \
   ompi_free_list_item_t* item;                        \
-  OMPI_FREE_LIST_WAIT(&peer_ref_free_list, item, rc); \
-  peer_ref = (ompi_crcp_bkmrk_pml_peer_ref_t*)item;    \
+  OMPI_FREE_LIST_WAIT(&peer_ref_free_list, item);     \
+  peer_ref = (ompi_crcp_bkmrk_pml_peer_ref_t*)item;   \
 } while(0); 

 #define HOKE_PEER_REF_RETURN(peer_ref)        \
@@ -628,12 +628,12 @@
 } while(0);


-#define HOKE_CONTENT_REF_ALLOC(content_ref, rc)                  \
-do {                                                             \
-  ompi_free_list_item_t* item;                                   \
-  OMPI_FREE_LIST_WAIT(&content_ref_free_list, item, rc);         \
+#define HOKE_CONTENT_REF_ALLOC(content_ref)                       \
+do {                                                              \
+  ompi_free_list_item_t* item;                                    \
+  OMPI_FREE_LIST_WAIT(&content_ref_free_list, item);              \
   content_ref = (ompi_crcp_bkmrk_pml_message_content_ref_t*)item; \
-  content_ref->msg_id = content_ref_seq_num;                     \
+  content_ref->msg_id = content_ref_seq_num;                      \
   content_ref_seq_num++;\
 } while(0); 

@@ -644,10 +644,10 @@
 } while(0);


-#define HOKE_TRAFFIC_MSG_REF_ALLOC(msg_ref, rc)              \
-do {                                                         \
-  ompi_free_list_item_t* item;                               \
-  OMPI_FREE_LIST_WAIT(&traffic_msg_ref_free_list, item, rc); \
+#define HOKE_TRAFFIC_MSG_REF_ALLOC(msg_ref)                   \
+do {                                                          \
+  ompi_free_list_item_t* item;                                \
+  OMPI_FREE_LIST_WAIT(&traffic_msg_ref_free_list, item);      \
   msg_ref = (ompi_crcp_bkmrk_pml_traffic_message_ref_t*)item; \
 } while(0); 

@@ -658,10 +658,10 @@
 } while(0);


-#define HOKE_DRAIN_MSG_REF_ALLOC(msg_ref, rc)              \
-do {                                                       \
-  ompi_free_list_item_t* item;                             \
-  OMPI_FREE_LIST_WAIT(&drain_msg_ref_free_list, item, rc); \
+#define HOKE_DRAIN_MSG_REF_ALLOC(msg_ref)                   \
+do {                                                        \
+  ompi_free_list_item_t* item;                              \
+  OMPI_FREE_LIST_WAIT(&drain_msg_ref_free_list, item);      \
   msg_ref = (ompi_crcp_bkmrk_pml_drain_message_ref_t*)item; \
 } while(0); 

@@ -672,10 +672,10 @@
 } while(0);


-#define HOKE_DRAIN_ACK_MSG_REF_ALLOC(msg_ref, rc)              \
-do {                                                           \
-  ompi_free_list_item_t* item;                                 \
-  OMPI_FREE_LIST_WAIT(&drain_ack_msg_ref_free_list, item, rc); \
+#define HOKE_DRAIN_ACK_MSG_REF_ALLOC(msg_ref)                   \
+do {                                                            \
+  ompi_free_list_item_t* item;                                  \
+  OMPI_FREE_LIST_WAIT(&drain_ack_msg_ref_free_list, item);      \
   msg_ref = (ompi_crcp_bkmrk_pml_drain_message_ack_ref_t*)item; \
 } while(0); 

@@ -965,10 +965,10 @@
 /************************************
  * Some Macro shortcuts
  ************************************/
-#define CRCP_COORD_STATE_ALLOC(state_ref, rc)            \
-do {                                                     \
-  ompi_free_list_item_t* item;                           \
-  OMPI_FREE_LIST_WAIT(&coord_state_free_list, item, rc); \
+#define CRCP_COORD_STATE_ALLOC(state_ref)                 \
+do {                                                      \
+  ompi_free_list_item_t* item;                            \
+  OMPI_FREE_LIST_WAIT(&coord_state_free_list, item);      \
   state_ref = (ompi_crcp_bkmrk_pml_state_t*)item;         \
 } while(0); 

@@ -980,7 +980,7 @@

 #define CREATE_COORD_STATE(coord_state, pml_state, v_peer_ref, v_msg_ref)         \
  {                                                                                \
-   CRCP_COORD_STATE_ALLOC(coord_state, ret);                                      \
+   CRCP_COORD_STATE_ALLOC(coord_state);                                           \
                                                                                   \
    coord_state->prev_ptr           = pml_state;                                   \
    coord_state->p_super.super      = pml_state->super;                            \
@@ -1004,7 +1004,7 @@

 #define CREATE_NEW_MSG(msg_ref, v_type, v_count, v_ddt_size, v_tag, v_rank, v_comm, p_jobid, p_vpid) \
  {                                                               \
-   HOKE_TRAFFIC_MSG_REF_ALLOC(msg_ref, ret);                     \
+   HOKE_TRAFFIC_MSG_REF_ALLOC(msg_ref);                          \
                                                                  \
    msg_ref->msg_id   = message_seq_num;                          \
    message_seq_num++;                                            \
@@ -1031,7 +1031,7 @@

 #define CREATE_NEW_DRAIN_MSG(msg_ref, v_type, v_count, v_ddt_size, v_tag, v_rank, v_comm, p_jobid, p_vpid) \
  {                                                               \
-   HOKE_DRAIN_MSG_REF_ALLOC(msg_ref, ret);                       \
+   HOKE_DRAIN_MSG_REF_ALLOC(msg_ref);                            \
                                                                  \
    msg_ref->msg_id   = message_seq_num;                          \
    message_seq_num++;                                            \
@@ -1426,7 +1426,6 @@
                                    size_t nprocs, 
                                    ompi_crcp_base_pml_state_t* pml_state )
 {
-    int ret;
     ompi_crcp_bkmrk_pml_peer_ref_t *new_peer_ref;
     size_t i;

@@ -1447,7 +1446,7 @@
      * Create a peer_ref for each peer added
      */
     for( i = 0; i < nprocs; ++i) {
-        HOKE_PEER_REF_ALLOC(new_peer_ref, ret);
+        HOKE_PEER_REF_ALLOC(new_peer_ref);

         new_peer_ref->proc_name.jobid  = procs[i]->proc_name.jobid;
         new_peer_ref->proc_name.vpid   = procs[i]->proc_name.vpid;
@@ -1560,7 +1559,7 @@
         /*
          * Update Message
          */
-        HOKE_CONTENT_REF_ALLOC(new_content, ret);
+        HOKE_CONTENT_REF_ALLOC(new_content);
         new_content->buffer  =  buf;
         new_content->request = *request;
         new_content->done    =  false;
@@ -1776,7 +1775,7 @@
         /*
          * Update Message
          */
-        HOKE_CONTENT_REF_ALLOC(new_content, ret);
+        HOKE_CONTENT_REF_ALLOC(new_content);
         new_content->buffer  =  NULL; /* No Tracked */
         new_content->request = *request;
         new_content->done    =  false;
@@ -2037,7 +2036,7 @@
         /*
          * Do the update
          */
-        HOKE_CONTENT_REF_ALLOC(new_content, ret);
+        HOKE_CONTENT_REF_ALLOC(new_content);
         new_content->buffer  =  buf;
         new_content->request = *request;
         new_content->done    =  false;
@@ -2487,7 +2486,7 @@
         /* 
          * Do the update
          */
-        HOKE_CONTENT_REF_ALLOC(new_content, ret);
+        HOKE_CONTENT_REF_ALLOC(new_content);
         new_content->buffer  =  NULL; /* No tracked */
         new_content->request = *request;
         new_content->done    =  false;
@@ -3351,7 +3350,7 @@
     }

     if( NULL != request ) {
-        HOKE_CONTENT_REF_ALLOC(new_content, ret);
+        HOKE_CONTENT_REF_ALLOC(new_content);
         new_content->buffer  =  NULL;
         new_content->request =  request;
         new_content->done    =  false;
@@ -3552,7 +3551,7 @@
         (*posted_msg_ref)->active_drain++;

         /* Create a new content for the drained message */
-        HOKE_CONTENT_REF_ALLOC(new_content, ret);
+        HOKE_CONTENT_REF_ALLOC(new_content);
         new_content->buffer  = NULL;
         if( NULL == prev_content ) {
             new_content->request  = NULL;
@@ -5508,7 +5507,7 @@
      * which is sent when they have finished receiving all of the 
      * inflight messages into a local buffer
      */
-    HOKE_DRAIN_ACK_MSG_REF_ALLOC(d_msg_ack, ret);
+    HOKE_DRAIN_ACK_MSG_REF_ALLOC(d_msg_ack);
     d_msg_ack->peer.jobid  = peer_ref->proc_name.jobid;
     d_msg_ack->peer.vpid   = peer_ref->proc_name.vpid;

Index: ompi/mca/mpool/base/mpool_base_tree.c
===================================================================
--- ompi/mca/mpool/base/mpool_base_tree.c	(revision 28700)
+++ ompi/mca/mpool/base/mpool_base_tree.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -143,11 +143,8 @@
  */
 mca_mpool_base_tree_item_t* mca_mpool_base_tree_item_get(void) { 
     ompi_free_list_item_t* item = NULL;
-    int rc;
-    OMPI_FREE_LIST_GET(&mca_mpool_base_tree_item_free_list, 
-                       item, 
-                       rc); 
-    if(OMPI_SUCCESS == rc) { 
+    OMPI_FREE_LIST_GET(&mca_mpool_base_tree_item_free_list, item);
+    if(NULL != item) { 
         return (mca_mpool_base_tree_item_t*) item; 
     } else { 
         return NULL;
Index: ompi/mca/mpool/gpusm/mpool_gpusm_module.c
===================================================================
--- ompi/mca/mpool/gpusm/mpool_gpusm_module.c	(revision 28700)
+++ ompi/mca/mpool/gpusm/mpool_gpusm_module.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2007 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@@ -135,9 +135,9 @@
     base = addr;
     bound = (unsigned char *)addr + size - 1;

-    OMPI_FREE_LIST_GET(&mpool_gpusm->reg_list, item, rc);
-    if(OMPI_SUCCESS != rc) {
-        return rc;
+    OMPI_FREE_LIST_GET(&mpool_gpusm->reg_list, item);
+    if(NULL == item) {
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }
     gpusm_reg = (mca_mpool_base_registration_t*)item;

Index: ompi/mca/mpool/grdma/mpool_grdma_module.c
===================================================================
--- ompi/mca/mpool/grdma/mpool_grdma_module.c	(revision 28700)
+++ ompi/mca/mpool/grdma/mpool_grdma_module.c	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2007 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@@ -263,10 +263,10 @@
         }
     }

-    OMPI_FREE_LIST_GET(&mpool_grdma->reg_list, item, rc);
-    if(OMPI_SUCCESS != rc) {
+    OMPI_FREE_LIST_GET(&mpool_grdma->reg_list, item);
+    if(NULL == item) {
         OPAL_THREAD_UNLOCK(&mpool->rcache->lock);
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }
     grdma_reg = (mca_mpool_base_registration_t*)item;

Index: ompi/mca/mpool/rgpusm/mpool_rgpusm_module.c
===================================================================
--- ompi/mca/mpool/rgpusm/mpool_rgpusm_module.c	(revision 28700)
+++ ompi/mca/mpool/rgpusm/mpool_rgpusm_module.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2007 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@@ -210,9 +210,9 @@
      * are not leaving the registrations pinned, the number of
      * registrations is unlimited and there is no need for a cache. */
     if(!mca_mpool_rgpusm_component.leave_pinned && 0 == mca_mpool_rgpusm_component.rcache_size_limit) {
-        OMPI_FREE_LIST_GET(&mpool_rgpusm->reg_list, item, rc);
-        if(OMPI_SUCCESS != rc) {
-            return rc;
+        OMPI_FREE_LIST_GET(&mpool_rgpusm->reg_list, item);
+        if(NULL == item) {
+            return OMPI_ERR_OUT_OF_RESOURCE;
         }
         rgpusm_reg = (mca_mpool_common_cuda_reg_t*)item;
         rgpusm_reg->base.mpool = mpool;
@@ -323,10 +323,10 @@
                         "RGPUSM: New registration ep=%d, addr=%p, size=%d. Need to register and insert in cache",
                          mypeer, addr, (int)size);

-    OMPI_FREE_LIST_GET(&mpool_rgpusm->reg_list, item, rc);
-    if(OMPI_SUCCESS != rc) {
+    OMPI_FREE_LIST_GET(&mpool_rgpusm->reg_list, item);
+    if(NULL == item) {
         OPAL_THREAD_UNLOCK(&mpool->rcache->lock);
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }
     rgpusm_reg = (mca_mpool_common_cuda_reg_t*)item;

Index: ompi/mca/mtl/mxm/mtl_mxm_probe.c
===================================================================
--- ompi/mca/mtl/mxm/mtl_mxm_probe.c	(revision 28700)
+++ ompi/mca/mtl/mxm/mtl_mxm_probe.c	(working copy)
@@ -52,16 +52,15 @@
                          struct ompi_status_public_t *status)
 {
 #if MXM_API >= MXM_VERSION(1,5)
-    int rc;
     mxm_error_t err;
     mxm_recv_req_t req;

     ompi_free_list_item_t *item;
     ompi_mtl_mxm_message_t *msgp;

-    OMPI_FREE_LIST_WAIT(&mca_mtl_mxm_component.mxm_messages, item, rc);
-    if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
-        return rc;
+    OMPI_FREE_LIST_WAIT(&mca_mtl_mxm_component.mxm_messages, item);
+    if (OPAL_UNLIKELY(NULL == item)) {
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }

     msgp = (ompi_mtl_mxm_message_t *) item;
Index: ompi/mca/osc/pt2pt/osc_pt2pt_sync.c
===================================================================
--- ompi/mca/osc/pt2pt/osc_pt2pt_sync.c	(revision 28700)
+++ ompi/mca/osc/pt2pt/osc_pt2pt_sync.c	(working copy)
@@ -525,7 +525,6 @@
                             int32_t lock_type)
 {
     bool send_ack = false;
-    int ret = OMPI_SUCCESS;
     ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, origin );
     ompi_osc_pt2pt_pending_lock_t *new_pending;

@@ -565,8 +564,6 @@
             new_pending->lock_type = lock_type;
             opal_list_append(&(module->p2p_locks_pending), &(new_pending->super));
         }
-    } else {
-        ret = OMPI_ERROR;
     }
     OPAL_THREAD_UNLOCK(&(module->p2p_lock));

Index: ompi/mca/pml/bfo/pml_bfo.h
===================================================================
--- ompi/mca/pml/bfo/pml_bfo.h	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo.h	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2007 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -217,10 +217,10 @@
 typedef struct mca_pml_bfo_pckt_pending_t mca_pml_bfo_pckt_pending_t;
 OBJ_CLASS_DECLARATION(mca_pml_bfo_pckt_pending_t);

-#define MCA_PML_BFO_PCKT_PENDING_ALLOC(pckt,rc)                 \
+#define MCA_PML_BFO_PCKT_PENDING_ALLOC(pckt)                    \
 do {                                                            \
     ompi_free_list_item_t* item;                                \
-    OMPI_FREE_LIST_WAIT(&mca_pml_bfo.pending_pckts, item, rc);  \
+    OMPI_FREE_LIST_WAIT(&mca_pml_bfo.pending_pckts, item);      \
     pckt = (mca_pml_bfo_pckt_pending_t*)item;                   \
 } while (0)

@@ -234,9 +234,8 @@
 #define MCA_PML_BFO_ADD_FIN_TO_PENDING(P, D, B, O, S)               \
     do {                                                            \
         mca_pml_bfo_pckt_pending_t *_pckt;                          \
-        int _rc;                                                    \
                                                                     \
-        MCA_PML_BFO_PCKT_PENDING_ALLOC(_pckt,_rc);                  \
+        MCA_PML_BFO_PCKT_PENDING_ALLOC(_pckt);                      \
         _pckt->hdr.hdr_common.hdr_type = MCA_PML_BFO_HDR_TYPE_FIN;  \
         _pckt->hdr.hdr_fin.hdr_des = (D);                           \
         _pckt->hdr.hdr_fin.hdr_fail = (S);                          \
Index: ompi/mca/pml/bfo/pml_bfo_iprobe.c
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_iprobe.c	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_iprobe.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2005 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -93,9 +93,9 @@
     *message = ompi_message_alloc();
     if (NULL == *message) return OMPI_ERR_TEMP_OUT_OF_RESOURCE;

-    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq)
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;
     recvreq->req_recv.req_base.req_type = MCA_PML_REQUEST_IMPROBE;

     /* initialize the request enough to probe and get the status */
@@ -145,9 +145,9 @@
     *message = ompi_message_alloc();
     if (NULL == *message) return OMPI_ERR_TEMP_OUT_OF_RESOURCE;

-    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq)
-        return rc;
+        return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
     recvreq->req_recv.req_base.req_type = MCA_PML_REQUEST_MPROBE;

     /* initialize the request enough to probe and get the status */
Index: ompi/mca/pml/bfo/pml_bfo_irecv.c
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_irecv.c	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_irecv.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2005 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -34,11 +34,10 @@
                            struct ompi_communicator_t *comm,
                            struct ompi_request_t **request)
 {
-    int rc;
     mca_pml_bfo_recv_request_t *recvreq;
-    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq)
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_BFO_RECV_REQUEST_INIT(recvreq,
                                    addr,
@@ -60,12 +59,10 @@
                       struct ompi_communicator_t *comm,
                       struct ompi_request_t **request)
 {
-    int rc;
-
     mca_pml_bfo_recv_request_t *recvreq;
-    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq)
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_BFO_RECV_REQUEST_INIT(recvreq,
                                    addr,
@@ -91,9 +88,9 @@
 {
     int rc;
     mca_pml_bfo_recv_request_t *recvreq;
-    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq)
-        return rc;
+        return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_BFO_RECV_REQUEST_INIT(recvreq,
                                    addr,
Index: ompi/mca/pml/bfo/pml_bfo_isend.c
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_isend.c	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_isend.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2007 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -35,12 +35,10 @@
                            ompi_communicator_t * comm,
                            ompi_request_t ** request)
 {
-    int rc;
-    
     mca_pml_bfo_send_request_t *sendreq = NULL;
-    MCA_PML_BFO_SEND_REQUEST_ALLOC(comm, dst, sendreq, rc);
-    if (rc != OMPI_SUCCESS)
-        return rc;
+    MCA_PML_BFO_SEND_REQUEST_ALLOC(comm, dst, sendreq);
+    if (NULL == sendreq)
+        return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_BFO_SEND_REQUEST_INIT(sendreq,
                                   buf,
@@ -70,9 +68,9 @@
     int rc;
     mca_pml_bfo_send_request_t *sendreq = NULL;

-    MCA_PML_BFO_SEND_REQUEST_ALLOC(comm, dst, sendreq, rc);
-    if (rc != OMPI_SUCCESS)
-        return rc;
+    MCA_PML_BFO_SEND_REQUEST_ALLOC(comm, dst, sendreq);
+    if (NULL == sendreq)
+        return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_BFO_SEND_REQUEST_INIT(sendreq,
                                   buf,
@@ -102,9 +100,9 @@
     int rc;
     mca_pml_bfo_send_request_t *sendreq;

-    MCA_PML_BFO_SEND_REQUEST_ALLOC(comm, dst, sendreq, rc);
-    if (rc != OMPI_SUCCESS)
-        return rc;
+    MCA_PML_BFO_SEND_REQUEST_ALLOC(comm, dst, sendreq);
+    if (NULL == sendreq)
+        return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_BFO_SEND_REQUEST_INIT(sendreq,
                                   buf,
Index: ompi/mca/pml/bfo/pml_bfo_rdmafrag.h
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_rdmafrag.h	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_rdmafrag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -54,10 +54,10 @@
 OBJ_CLASS_DECLARATION(mca_pml_bfo_rdma_frag_t);


-#define MCA_PML_BFO_RDMA_FRAG_ALLOC(frag,rc)                    \
+#define MCA_PML_BFO_RDMA_FRAG_ALLOC(frag)                       \
 do {                                                            \
     ompi_free_list_item_t* item;                                \
-    OMPI_FREE_LIST_WAIT(&mca_pml_bfo.rdma_frags, item, rc);     \
+    OMPI_FREE_LIST_WAIT(&mca_pml_bfo.rdma_frags, item);         \
     frag = (mca_pml_bfo_rdma_frag_t*)item;                      \
 } while(0)

Index: ompi/mca/pml/bfo/pml_bfo_recvfrag.c
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_recvfrag.c	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_recvfrag.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2009 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2007 High Performance Computing Center Stuttgart, 
@@ -69,10 +69,8 @@
                     mca_pml_bfo_match_hdr_t *hdr, mca_btl_base_segment_t* segments,
                     size_t num_segments, mca_pml_bfo_recv_frag_t* frag)
 {
-    int rc;
-
     if(NULL == frag) {
-        MCA_PML_BFO_RECV_FRAG_ALLOC(frag, rc);
+        MCA_PML_BFO_RECV_FRAG_ALLOC(frag);
         MCA_PML_BFO_RECV_FRAG_INIT(frag, hdr, segments, num_segments, btl);
     }
     opal_list_append(queue, (opal_list_item_t*)frag);
@@ -490,7 +488,6 @@
 {
     mca_pml_bfo_recv_request_t *match;
     mca_pml_bfo_comm_t *comm = (mca_pml_bfo_comm_t *)comm_ptr->c_pml_comm;
-    int rc;

     do {
         match = match_incomming(hdr, comm, proc);
@@ -511,7 +508,7 @@
                    restarted later during mrecv */
                 mca_pml_bfo_recv_frag_t *tmp;
                 if(NULL == frag) {
-                    MCA_PML_BFO_RECV_FRAG_ALLOC(tmp, rc);
+                    MCA_PML_BFO_RECV_FRAG_ALLOC(tmp);
                     MCA_PML_BFO_RECV_FRAG_INIT(tmp, hdr, segments, num_segments, btl);
                 } else {
                     tmp = frag;
Index: ompi/mca/pml/bfo/pml_bfo_recvfrag.h
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_recvfrag.h	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_recvfrag.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -50,10 +50,10 @@
 OBJ_CLASS_DECLARATION(mca_pml_bfo_recv_frag_t);


-#define MCA_PML_BFO_RECV_FRAG_ALLOC(frag,rc)                    \
+#define MCA_PML_BFO_RECV_FRAG_ALLOC(frag)                       \
 do {                                                            \
     ompi_free_list_item_t* item;                                \
-    OMPI_FREE_LIST_WAIT(&mca_pml_bfo.recv_frags, item, rc);     \
+    OMPI_FREE_LIST_WAIT(&mca_pml_bfo.recv_frags, item);         \
     frag = (mca_pml_bfo_recv_frag_t*)item;                      \
 } while(0)

Index: ompi/mca/pml/bfo/pml_bfo_recvreq.c
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_recvreq.c	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_recvreq.c	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2009 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2008 High Performance Computing Center Stuttgart, 
@@ -527,7 +527,6 @@
     mca_bml_base_endpoint_t* bml_endpoint = NULL;
     mca_pml_bfo_rdma_frag_t* frag;
     size_t i, size = 0;
-    int rc;

     recvreq->req_recv.req_bytes_packed = hdr->hdr_rndv.hdr_msg_length;

@@ -552,10 +551,10 @@
 #endif /* OMPI_CUDA_SUPPORT */
     }

-    MCA_PML_BFO_RDMA_FRAG_ALLOC(frag,rc);
+    MCA_PML_BFO_RDMA_FRAG_ALLOC(frag);
     if( OPAL_UNLIKELY(NULL == frag) ) {
         /* GLB - FIX */
-         OMPI_ERROR_LOG(rc);
+         OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
          ompi_rte_abort(-1, NULL);
     }

Index: ompi/mca/pml/bfo/pml_bfo_recvreq.h
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_recvreq.h	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_recvreq.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2010 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2007 High Performance Computing Center Stuttgart, 
@@ -83,11 +83,10 @@
  *  @param rc (OUT)  OMPI_SUCCESS or error status on failure.
  *  @return          Receive request.
  */
-#define MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq, rc)                \
+#define MCA_PML_BFO_RECV_REQUEST_ALLOC(recvreq)                    \
 do {                                                               \
    ompi_free_list_item_t* item;                                    \
-   rc = OMPI_SUCCESS;                                              \
-   OMPI_FREE_LIST_GET(&mca_pml_base_recv_requests, item, rc);      \
+   OMPI_FREE_LIST_GET(&mca_pml_base_recv_requests, item);          \
    recvreq = (mca_pml_bfo_recv_request_t*)item;                    \
 } while(0)

@@ -394,9 +393,8 @@
 #define MCA_PML_BFO_ADD_ACK_TO_PENDING(P, S, D, O)                      \
     do {                                                                \
         mca_pml_bfo_pckt_pending_t *_pckt;                              \
-        int _rc;                                                        \
                                                                         \
-        MCA_PML_BFO_PCKT_PENDING_ALLOC(_pckt,_rc);                      \
+        MCA_PML_BFO_PCKT_PENDING_ALLOC(_pckt);                          \
         _pckt->hdr.hdr_common.hdr_type = MCA_PML_BFO_HDR_TYPE_ACK;      \
         _pckt->hdr.hdr_ack.hdr_src_req.lval = (S);                      \
         _pckt->hdr.hdr_ack.hdr_dst_req.pval = (D);                      \
Index: ompi/mca/pml/bfo/pml_bfo_sendreq.c
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_sendreq.c	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_sendreq.c	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2008 High Performance Computing Center Stuttgart, 
@@ -963,13 +963,13 @@
     ompi_free_list_item_t *i;
     mca_bml_base_endpoint_t* bml_endpoint = sendreq->req_endpoint;
     int num_btls = mca_bml_base_btl_array_get_size(&bml_endpoint->btl_send);
-    int rc, n;
+    int n;
     double weight_total = 0;

     if( OPAL_UNLIKELY(0 == send_length) )
         return;

-    OMPI_FREE_LIST_WAIT(&mca_pml_bfo.send_ranges, i, rc);
+    OMPI_FREE_LIST_WAIT(&mca_pml_bfo.send_ranges, i);

     sr = (mca_pml_bfo_send_range_t*)i;

@@ -1335,7 +1335,6 @@
 {
     mca_bml_base_endpoint_t *bml_endpoint = sendreq->req_endpoint;
     mca_pml_bfo_rdma_frag_t* frag;
-    int rc;
     size_t i, size = 0;

     if(hdr->hdr_common.hdr_flags & MCA_PML_BFO_HDR_TYPE_ACK) { 
@@ -1346,11 +1345,10 @@
     sendreq->req_recv = hdr->hdr_dst_req; /* only needed once, but it is OK */
 #endif /* PML_BFO */

-    MCA_PML_BFO_RDMA_FRAG_ALLOC(frag, rc); 
-
+    MCA_PML_BFO_RDMA_FRAG_ALLOC(frag); 
     if( OPAL_UNLIKELY(NULL == frag) ) {
         /* TSW - FIX */
-        OMPI_ERROR_LOG(rc);
+        OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
         ompi_rte_abort(-1, NULL);
     }

Index: ompi/mca/pml/bfo/pml_bfo_sendreq.h
===================================================================
--- ompi/mca/pml/bfo/pml_bfo_sendreq.h	(revision 28700)
+++ ompi/mca/pml/bfo/pml_bfo_sendreq.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2010 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -122,16 +122,14 @@

 #define MCA_PML_BFO_SEND_REQUEST_ALLOC( comm,                           \
                                         dst,                            \
-                                        sendreq,                        \
-                                        rc)                             \
+                                        sendreq)                        \
     {                                                                   \
         ompi_proc_t *proc = ompi_comm_peer_lookup( comm, dst );         \
         ompi_free_list_item_t* item;                                    \
                                                                         \
-        rc = OMPI_ERR_OUT_OF_RESOURCE;                                  \
+        sendreq = NULL;                                                 \
         if( OPAL_LIKELY(NULL != proc) ) {                               \
-            rc = OMPI_SUCCESS;                                          \
-            OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests, item, rc); \
+            OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests, item);     \
             sendreq = (mca_pml_bfo_send_request_t*)item;                \
             sendreq->req_send.req_base.req_proc = proc;                 \
         }                                                               \
Index: ompi/mca/pml/cm/pml_cm_recv.c
===================================================================
--- ompi/mca/pml/cm/pml_cm_recv.c	(revision 28700)
+++ ompi/mca/pml/cm/pml_cm_recv.c	(working copy)
@@ -32,12 +32,11 @@
                       struct ompi_communicator_t *comm,
                       struct ompi_request_t **request)
 {
-    int ret;
     mca_pml_cm_hvy_recv_request_t *recvreq;
     ompi_proc_t* ompi_proc;

-    MCA_PML_CM_HVY_RECV_REQUEST_ALLOC(recvreq, ret);
-    if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) return ret;
+    MCA_PML_CM_HVY_RECV_REQUEST_ALLOC(recvreq);
+    if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_CM_HVY_RECV_REQUEST_INIT(recvreq, ompi_proc, comm, tag, src, 
                                      datatype, addr, count, true); 
@@ -61,8 +60,8 @@
     mca_pml_cm_thin_recv_request_t *recvreq;
     ompi_proc_t* ompi_proc;

-    MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq, ret);
-    if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) return ret;
+    MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
+    if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
                                       ompi_proc,
@@ -93,8 +92,8 @@
     mca_pml_cm_thin_recv_request_t *recvreq;
     ompi_proc_t* ompi_proc;

-    MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq, ret);
-    if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) return ret;
+    MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
+    if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
                                       ompi_proc,
@@ -137,8 +136,8 @@
     ompi_communicator_t *comm = (*message)->comm;
     int peer = (*message)->peer;

-    MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq, ret);
-    if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) return ret;
+    MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
+    if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
                                       ompi_proc,
@@ -169,8 +168,8 @@
     ompi_communicator_t *comm = (*message)->comm;
     int peer = (*message)->peer;

-    MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq, ret);
-    if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) return ret;
+    MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
+    if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
                                       ompi_proc,
Index: ompi/mca/pml/cm/pml_cm_recvreq.h
===================================================================
--- ompi/mca/pml/cm/pml_cm_recvreq.h	(revision 28700)
+++ ompi/mca/pml/cm/pml_cm_recvreq.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2007 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -52,20 +52,20 @@
  *  @param rc (OUT)  OMPI_SUCCESS or error status on failure.
  *  @return          Receive request.
  */
-#define MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq, rc)                        \
+#define MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq)                            \
     do {                                                                       \
     ompi_free_list_item_t*item;                                                \
-    OMPI_FREE_LIST_GET(&mca_pml_base_recv_requests, item, rc);                 \
+    OMPI_FREE_LIST_GET(&mca_pml_base_recv_requests, item);                     \
     recvreq = (mca_pml_cm_thin_recv_request_t*) item;                          \
     recvreq->req_base.req_pml_type = MCA_PML_CM_REQUEST_RECV_THIN;             \
     recvreq->req_mtl.ompi_req = (ompi_request_t*) recvreq;                     \
     recvreq->req_mtl.completion_callback = mca_pml_cm_recv_request_completion; \
  } while (0)

-#define MCA_PML_CM_HVY_RECV_REQUEST_ALLOC(recvreq, rc)                         \
+#define MCA_PML_CM_HVY_RECV_REQUEST_ALLOC(recvreq)                             \
 do {                                                                           \
     ompi_free_list_item_t*item;                                                \
-    OMPI_FREE_LIST_GET(&mca_pml_base_recv_requests, item, rc);                 \
+    OMPI_FREE_LIST_GET(&mca_pml_base_recv_requests, item);                     \
     recvreq = (mca_pml_cm_hvy_recv_request_t*) item;                           \
     recvreq->req_base.req_pml_type = MCA_PML_CM_REQUEST_RECV_HEAVY;            \
     recvreq->req_mtl.ompi_req = (ompi_request_t*) recvreq;                     \
Index: ompi/mca/pml/cm/pml_cm_send.c
===================================================================
--- ompi/mca/pml/cm/pml_cm_send.c	(revision 28700)
+++ ompi/mca/pml/cm/pml_cm_send.c	(working copy)
@@ -28,12 +28,11 @@
                         ompi_communicator_t* comm,
                         ompi_request_t** request)
 {
-    int ret;
     mca_pml_cm_hvy_send_request_t *sendreq;
     ompi_proc_t* ompi_proc;

-    MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc, ret);
-    if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) return ret;
+    MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
+    if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_CM_HVY_SEND_REQUEST_INIT(sendreq, ompi_proc, comm, tag, dst, 
                                      datatype, sendmode, true, false, buf, count);
@@ -60,8 +59,8 @@
         mca_pml_cm_hvy_send_request_t* sendreq;
         ompi_proc_t* ompi_proc;

-        MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc, ret);
-        if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) return ret;
+        MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
+        if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;

         MCA_PML_CM_HVY_SEND_REQUEST_INIT(sendreq, 
                                          ompi_proc, 
@@ -82,8 +81,8 @@
     } else { 
         mca_pml_cm_thin_send_request_t* sendreq;
         ompi_proc_t* ompi_proc;
-        MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc, ret);
-        if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) return ret;
+        MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
+        if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;

         MCA_PML_CM_THIN_SEND_REQUEST_INIT(sendreq, 
                                           ompi_proc, 
@@ -126,8 +125,8 @@
     if(sendmode == MCA_PML_BASE_SEND_BUFFERED) { 
         mca_pml_cm_hvy_send_request_t *sendreq;
         ompi_proc_t * ompi_proc;
-        MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc, ret);
-        if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) return ret;
+        MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
+        if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;

         MCA_PML_CM_HVY_SEND_REQUEST_INIT(sendreq,
                                          ompi_proc,
@@ -150,8 +149,8 @@
     } else { 
         mca_pml_cm_thin_send_request_t *sendreq;
         ompi_proc_t * ompi_proc;
-        MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc, ret);
-        if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) return ret;
+        MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
+        if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;

         MCA_PML_CM_THIN_SEND_REQUEST_INIT(sendreq,
                                           ompi_proc,
Index: ompi/mca/pml/cm/pml_cm_sendreq.h
===================================================================
--- ompi/mca/pml/cm/pml_cm_sendreq.h	(revision 28700)
+++ ompi/mca/pml/cm/pml_cm_sendreq.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2010 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -57,18 +57,15 @@


 #define MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst,          \
-                                           ompi_proc, rc)               \
+                                           ompi_proc)                   \
 do {                                                                    \
     ompi_free_list_item_t* item;                                        \
     ompi_proc = ompi_comm_peer_lookup( comm, dst );                     \
                                                                         \
     if(OPAL_UNLIKELY(NULL == ompi_proc)) {                              \
-        rc = OMPI_ERR_OUT_OF_RESOURCE;                                  \
         sendreq = NULL;                                                 \
     } else {                                                            \
-        rc = OMPI_SUCCESS;                                              \
-        OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests,                \
-                            item, rc);                                  \
+        OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests, item);         \
         sendreq = (mca_pml_cm_thin_send_request_t*)item;                \
         sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_THIN; \
         sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;          \
@@ -78,17 +75,14 @@


 #define MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst,           \
-                                          ompi_proc, rc)                \
+                                          ompi_proc)                    \
 {                                                                       \
     ompi_free_list_item_t* item;                                        \
     ompi_proc = ompi_comm_peer_lookup( comm, dst );                     \
     if(OPAL_UNLIKELY(NULL == ompi_proc)) {                              \
-        rc = OMPI_ERR_OUT_OF_RESOURCE;                                  \
         sendreq = NULL;                                                 \
     } else {                                                            \
-        rc = OMPI_SUCCESS;                                              \
-        OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests,                \
-                            item, rc);                                  \
+        OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests, item);         \
         sendreq = (mca_pml_cm_hvy_send_request_t*)item;                 \
         sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_HEAVY; \
         sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;          \
Index: ompi/mca/pml/crcpw/pml_crcpw_module.c
===================================================================
--- ompi/mca/pml/crcpw/pml_crcpw_module.c	(revision 28700)
+++ ompi/mca/pml/crcpw/pml_crcpw_module.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2009 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@@ -58,11 +58,11 @@
     }
 };

-#define PML_CRCP_STATE_ALLOC(pml_state, rc)         \
+#define PML_CRCP_STATE_ALLOC(pml_state)             \
 do {                                                \
   if( !pml_crcpw_is_finalized ) {                   \
     ompi_free_list_item_t* item;                    \
-    OMPI_FREE_LIST_WAIT(&pml_state_list, item, rc); \
+    OMPI_FREE_LIST_WAIT(&pml_state_list, item);     \
     pml_state = (ompi_crcp_base_pml_state_t*)item;  \
   }                                                 \
 } while(0); 
@@ -84,7 +84,7 @@
         return mca_pml_crcpw_module.wrapped_pml_module.pml_enable(enable);
     }

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -126,7 +126,7 @@
         return mca_pml_crcpw_module.wrapped_pml_module.pml_add_comm(comm);
     }

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -168,7 +168,7 @@
         return mca_pml_crcpw_module.wrapped_pml_module.pml_del_comm(comm);
     }

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);
     if( NULL == pml_state ) {
         return mca_pml_crcpw_module.wrapped_pml_module.pml_del_comm(comm);
     }
@@ -209,7 +209,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -247,7 +247,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -285,7 +285,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -328,7 +328,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -372,7 +372,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -411,7 +411,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -453,7 +453,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -495,7 +495,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -534,7 +534,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -579,7 +579,7 @@
     ompi_status_public_t* status = NULL;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -636,7 +636,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -678,7 +678,7 @@
         return mca_pml_crcpw_module.wrapped_pml_module.pml_progress();
     }

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -716,7 +716,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
@@ -805,7 +805,7 @@
     int ret;
     ompi_crcp_base_pml_state_t * pml_state = NULL;

-    PML_CRCP_STATE_ALLOC(pml_state, ret);
+    PML_CRCP_STATE_ALLOC(pml_state);

     pml_state->wrapped_pml_component = &(mca_pml_crcpw_module.wrapped_pml_component);
     pml_state->wrapped_pml_module    = &(mca_pml_crcpw_module.wrapped_pml_module);
Index: ompi/mca/pml/ob1/pml_ob1.h
===================================================================
--- ompi/mca/pml/ob1/pml_ob1.h	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1.h	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2007 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -220,10 +220,10 @@
 typedef struct mca_pml_ob1_pckt_pending_t mca_pml_ob1_pckt_pending_t;
 OBJ_CLASS_DECLARATION(mca_pml_ob1_pckt_pending_t);

-#define MCA_PML_OB1_PCKT_PENDING_ALLOC(pckt,rc)                 \
+#define MCA_PML_OB1_PCKT_PENDING_ALLOC(pckt)                    \
 do {                                                            \
     ompi_free_list_item_t* item;                                \
-    OMPI_FREE_LIST_WAIT(&mca_pml_ob1.pending_pckts, item, rc);  \
+    OMPI_FREE_LIST_WAIT(&mca_pml_ob1.pending_pckts, item);      \
     pckt = (mca_pml_ob1_pckt_pending_t*)item;                   \
 } while (0)

@@ -237,9 +237,8 @@
 #define MCA_PML_OB1_ADD_FIN_TO_PENDING(P, D, B, O, S)               \
     do {                                                            \
         mca_pml_ob1_pckt_pending_t *_pckt;                          \
-        int _rc;                                                    \
                                                                     \
-        MCA_PML_OB1_PCKT_PENDING_ALLOC(_pckt,_rc);                  \
+        MCA_PML_OB1_PCKT_PENDING_ALLOC(_pckt);                      \
         _pckt->hdr.hdr_common.hdr_type = MCA_PML_OB1_HDR_TYPE_FIN;  \
         _pckt->hdr.hdr_fin.hdr_des = (D);                           \
         _pckt->hdr.hdr_fin.hdr_fail = (S);                          \
Index: ompi/mca/pml/ob1/pml_ob1_iprobe.c
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_iprobe.c	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_iprobe.c	(working copy)
@@ -94,10 +94,10 @@
     *message = ompi_message_alloc();
     if (NULL == *message) return OMPI_ERR_TEMP_OUT_OF_RESOURCE;

-    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq) {
         ompi_message_return(*message);
-        return rc;
+        return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
     }
     recvreq->req_recv.req_base.req_type = MCA_PML_REQUEST_IMPROBE;

@@ -148,10 +148,10 @@
     *message = ompi_message_alloc();
     if (NULL == *message) return OMPI_ERR_TEMP_OUT_OF_RESOURCE;

-    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq) {
         ompi_message_return(*message);
-        return rc;
+        return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
     }
     recvreq->req_recv.req_base.req_type = MCA_PML_REQUEST_MPROBE;

Index: ompi/mca/pml/ob1/pml_ob1_irecv.c
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_irecv.c	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_irecv.c	(working copy)
@@ -35,11 +35,10 @@
                            struct ompi_communicator_t *comm,
                            struct ompi_request_t **request)
 {
-    int rc;
     mca_pml_ob1_recv_request_t *recvreq;
-    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq)
-        return rc;
+        return OMPI_ERR_TEMP_OUT_OF_RESOURCE;

     MCA_PML_OB1_RECV_REQUEST_INIT(recvreq,
                                    addr,
@@ -61,12 +60,10 @@
                       struct ompi_communicator_t *comm,
                       struct ompi_request_t **request)
 {
-    int rc;
-
     mca_pml_ob1_recv_request_t *recvreq;
-    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq)
-        return rc;
+        return OMPI_ERR_TEMP_OUT_OF_RESOURCE;

     MCA_PML_OB1_RECV_REQUEST_INIT(recvreq,
                                    addr,
@@ -92,9 +89,9 @@
 {
     int rc;
     mca_pml_ob1_recv_request_t *recvreq;
-    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq, rc);
+    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq);
     if (NULL == recvreq)
-        return rc;
+        return OMPI_ERR_TEMP_OUT_OF_RESOURCE;

     MCA_PML_OB1_RECV_REQUEST_INIT(recvreq,
                                    addr,
Index: ompi/mca/pml/ob1/pml_ob1_isend.c
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_isend.c	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_isend.c	(working copy)
@@ -34,12 +34,10 @@
                            ompi_communicator_t * comm,
                            ompi_request_t ** request)
 {
-    int rc;
-    
     mca_pml_ob1_send_request_t *sendreq = NULL;
-    MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq, rc);
-    if (rc != OMPI_SUCCESS)
-        return rc;
+    MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq);
+    if (NULL == sendreq)
+        return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
                                   buf,
@@ -69,9 +67,9 @@
     int rc;
     mca_pml_ob1_send_request_t *sendreq = NULL;

-    MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq, rc);
-    if (rc != OMPI_SUCCESS)
-        return rc;
+    MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq);
+    if (NULL == sendreq)
+        return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
                                   buf,
@@ -101,9 +99,9 @@
     int rc;
     mca_pml_ob1_send_request_t *sendreq;

-    MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq, rc);
-    if (rc != OMPI_SUCCESS)
-        return rc;
+    MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq);
+    if (NULL == sendreq)
+        return OMPI_ERR_OUT_OF_RESOURCE;

     MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
                                   buf,
Index: ompi/mca/pml/ob1/pml_ob1_rdmafrag.h
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_rdmafrag.h	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_rdmafrag.h	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -51,10 +51,10 @@
 OBJ_CLASS_DECLARATION(mca_pml_ob1_rdma_frag_t);


-#define MCA_PML_OB1_RDMA_FRAG_ALLOC(frag,rc)                    \
+#define MCA_PML_OB1_RDMA_FRAG_ALLOC(frag)                       \
 do {                                                            \
     ompi_free_list_item_t* item;                                \
-    OMPI_FREE_LIST_WAIT(&mca_pml_ob1.rdma_frags, item, rc);     \
+    OMPI_FREE_LIST_WAIT(&mca_pml_ob1.rdma_frags, item);         \
     frag = (mca_pml_ob1_rdma_frag_t*)item;                      \
 } while(0)

Index: ompi/mca/pml/ob1/pml_ob1_recvfrag.c
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_recvfrag.c	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_recvfrag.c	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2011 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2007 High Performance Computing Center Stuttgart, 
@@ -73,10 +73,8 @@
                     mca_pml_ob1_match_hdr_t *hdr, mca_btl_base_segment_t* segments,
                     size_t num_segments, mca_pml_ob1_recv_frag_t* frag)
 {
-    int rc;
-
     if(NULL == frag) {
-        MCA_PML_OB1_RECV_FRAG_ALLOC(frag, rc);
+        MCA_PML_OB1_RECV_FRAG_ALLOC(frag);
         MCA_PML_OB1_RECV_FRAG_INIT(frag, hdr, segments, num_segments, btl);
     }
     opal_list_append(queue, (opal_list_item_t*)frag);
@@ -509,7 +507,6 @@
 {
     mca_pml_ob1_recv_request_t *match;
     mca_pml_ob1_comm_t *comm = (mca_pml_ob1_comm_t *)comm_ptr->c_pml_comm;
-    int rc;

     do {
         match = match_incomming(hdr, comm, proc);
@@ -530,7 +527,7 @@
                    restarted later during mrecv */
                 mca_pml_ob1_recv_frag_t *tmp;
                 if(NULL == frag) {
-                    MCA_PML_OB1_RECV_FRAG_ALLOC(tmp, rc);
+                    MCA_PML_OB1_RECV_FRAG_ALLOC(tmp);
                     MCA_PML_OB1_RECV_FRAG_INIT(tmp, hdr, segments, num_segments, btl);
                 } else {
                     tmp = frag;
Index: ompi/mca/pml/ob1/pml_ob1_recvfrag.h
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_recvfrag.h	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_recvfrag.h	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2006 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -53,10 +53,10 @@
 OBJ_CLASS_DECLARATION(mca_pml_ob1_recv_frag_t);


-#define MCA_PML_OB1_RECV_FRAG_ALLOC(frag,rc)                    \
+#define MCA_PML_OB1_RECV_FRAG_ALLOC(frag)                       \
 do {                                                            \
     ompi_free_list_item_t* item;                                \
-    OMPI_FREE_LIST_WAIT(&mca_pml_ob1.recv_frags, item, rc);     \
+    OMPI_FREE_LIST_WAIT(&mca_pml_ob1.recv_frags, item);         \
     frag = (mca_pml_ob1_recv_frag_t*)item;                      \
 } while(0)

Index: ompi/mca/pml/ob1/pml_ob1_recvreq.c
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_recvreq.c	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_recvreq.c	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2012 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2008 High Performance Computing Center Stuttgart, 
@@ -684,10 +684,10 @@
      * accumulates the number of bytes that were sent so far. */
     while (bytes_remaining > 0) {
         /* allocate/initialize a fragment */
-        MCA_PML_OB1_RDMA_FRAG_ALLOC(frag,rc);
+        MCA_PML_OB1_RDMA_FRAG_ALLOC(frag);
         if (OPAL_UNLIKELY(NULL == frag)) {
             /* GLB - FIX */
-             OMPI_ERROR_LOG(rc);
+             OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
              ompi_rte_abort(-1, NULL);
         }

Index: ompi/mca/pml/ob1/pml_ob1_recvreq.h
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_recvreq.h	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_recvreq.h	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2010 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2007 High Performance Computing Center Stuttgart, 
@@ -72,11 +72,10 @@
  *  @param rc (OUT)  OMPI_SUCCESS or error status on failure.
  *  @return          Receive request.
  */
-#define MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq, rc)                \
+#define MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq)                    \
 do {                                                               \
    ompi_free_list_item_t* item;                                    \
-   rc = OMPI_SUCCESS;                                              \
-   OMPI_FREE_LIST_GET(&mca_pml_base_recv_requests, item, rc);      \
+   OMPI_FREE_LIST_GET(&mca_pml_base_recv_requests, item);          \
    recvreq = (mca_pml_ob1_recv_request_t*)item;                    \
 } while(0)

@@ -386,9 +385,8 @@
 #define MCA_PML_OB1_ADD_ACK_TO_PENDING(P, S, D, O)                      \
     do {                                                                \
         mca_pml_ob1_pckt_pending_t *_pckt;                              \
-        int _rc; (void)_rc;                                             \
                                                                         \
-        MCA_PML_OB1_PCKT_PENDING_ALLOC(_pckt,_rc);                      \
+        MCA_PML_OB1_PCKT_PENDING_ALLOC(_pckt);                          \
         _pckt->hdr.hdr_common.hdr_type = MCA_PML_OB1_HDR_TYPE_ACK;      \
         _pckt->hdr.hdr_ack.hdr_src_req.lval = (S);                      \
         _pckt->hdr.hdr_ack.hdr_dst_req.pval = (D);                      \
Index: ompi/mca/pml/ob1/pml_ob1_sendreq.c
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_sendreq.c	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_sendreq.c	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2008 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2008 High Performance Computing Center Stuttgart, 
@@ -868,13 +868,13 @@
     ompi_free_list_item_t *i;
     mca_bml_base_endpoint_t* bml_endpoint = sendreq->req_endpoint;
     int num_btls = mca_bml_base_btl_array_get_size(&bml_endpoint->btl_send);
-    int rc, n;
+    int n;
     double weight_total = 0;

     if( OPAL_UNLIKELY(0 == send_length) )
         return;

-    OMPI_FREE_LIST_WAIT(&mca_pml_ob1.send_ranges, i, rc);
+    OMPI_FREE_LIST_WAIT(&mca_pml_ob1.send_ranges, i);

     sr = (mca_pml_ob1_send_range_t*)i;

@@ -1241,7 +1241,6 @@
 {
     mca_bml_base_endpoint_t *bml_endpoint = sendreq->req_endpoint;
     mca_pml_ob1_rdma_frag_t* frag;
-    int rc;
     size_t i, size = 0;

     if(hdr->hdr_common.hdr_flags & MCA_PML_OB1_HDR_TYPE_ACK) { 
@@ -1250,11 +1249,11 @@

     sendreq->req_recv.pval = hdr->hdr_recv_req.pval;

-    MCA_PML_OB1_RDMA_FRAG_ALLOC(frag, rc); 
+    MCA_PML_OB1_RDMA_FRAG_ALLOC(frag); 

     if( OPAL_UNLIKELY(NULL == frag) ) {
         /* TSW - FIX */
-        OMPI_ERROR_LOG(rc);
+        OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
         ompi_rte_abort(-1, NULL);
     }

Index: ompi/mca/pml/ob1/pml_ob1_sendreq.h
===================================================================
--- ompi/mca/pml/ob1/pml_ob1_sendreq.h	(revision 28700)
+++ ompi/mca/pml/ob1/pml_ob1_sendreq.h	(working copy)
@@ -3,7 +3,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2010 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -119,16 +119,13 @@

 #define MCA_PML_OB1_SEND_REQUEST_ALLOC( comm,                           \
                                         dst,                            \
-                                        sendreq,                        \
-                                        rc)                             \
+                                        sendreq)                        \
     {                                                                   \
         ompi_proc_t *proc = ompi_comm_peer_lookup( comm, dst );         \
         ompi_free_list_item_t* item;                                    \
                                                                         \
-        rc = OMPI_ERR_OUT_OF_RESOURCE;                                  \
         if( OPAL_LIKELY(NULL != proc) ) {                               \
-            rc = OMPI_SUCCESS;                                          \
-            OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests, item, rc); \
+            OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests, item);     \
             sendreq = (mca_pml_ob1_send_request_t*)item;                \
             sendreq->req_send.req_base.req_proc = proc;                 \
             sendreq->src_des = NULL;                                    \
Index: ompi/mca/rcache/rb/rcache_rb_tree.c
===================================================================
--- ompi/mca/rcache/rb/rcache_rb_tree.c	(revision 28700)
+++ ompi/mca/rcache/rb/rcache_rb_tree.c	(working copy)
@@ -2,7 +2,7 @@
   * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
   *                         University Research and Technology
   *                         Corporation.  All rights reserved.
-  * Copyright (c) 2004-2006 The University of Tennessee and The University
+  * Copyright (c) 2004-2013 The University of Tennessee and The University
   *                         of Tennessee Research Foundation.  All rights
   *                         reserved.
   * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -116,9 +116,9 @@
     int rc; 
     mca_rcache_rb_tree_item_t* rb_tree_item; 

-    OMPI_FREE_LIST_GET(&rb_module->rb_tree_item_list, item, rc);
-    if(OMPI_SUCCESS != rc) { 
-        return rc; 
+    OMPI_FREE_LIST_GET(&rb_module->rb_tree_item_list, item);
+    if(NULL == item) { 
+        return OMPI_ERR_OUT_OF_RESOURCE;
     }
     rb_tree_item = (mca_rcache_rb_tree_item_t*) item; 

Index: ompi/mca/rcache/vma/rcache_vma_tree.c
===================================================================
--- ompi/mca/rcache/vma/rcache_vma_tree.c	(revision 28700)
+++ ompi/mca/rcache/vma/rcache_vma_tree.c	(working copy)
@@ -108,7 +108,6 @@
 static inline mca_rcache_vma_t *mca_rcache_vma_new(
         mca_rcache_vma_module_t *vma_rcache, uintptr_t start, uintptr_t end)
 {
-    int rc;
     mca_rcache_vma_t *vma = OBJ_NEW(mca_rcache_vma_t);

     if(NULL == vma)
@@ -118,7 +117,7 @@
     vma->end = end;
     vma->rcache = vma_rcache;

-    rc = ompi_rb_tree_insert(&vma_rcache->rb_tree, vma, vma);
+    (void)ompi_rb_tree_insert(&vma_rcache->rb_tree, vma, vma);

     return vma;
 }
Index: ompi/mca/vprotocol/pessimist/vprotocol_pessimist_event.h
===================================================================
--- ompi/mca/vprotocol/pessimist/vprotocol_pessimist_event.h	(revision 28700)
+++ ompi/mca/vprotocol/pessimist/vprotocol_pessimist_event.h	(working copy)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004-2007 The Trustees of the University of Tennessee.
+ * Copyright (c) 2004-2013 The Trustees of the University of Tennessee.
  *                         All rights reserved.
  * $COPYRIGHT$
  *
@@ -51,26 +51,26 @@
 OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_vprotocol_pessimist_event_t);


-#define VPESSIMIST_MATCHING_EVENT_NEW(event) do {                             \
-  ompi_free_list_item_t *item;                                                \
-  int rc;                                                                     \
-  OMPI_FREE_LIST_WAIT(&mca_vprotocol_pessimist.events_pool, item, rc);        \
-  event = (mca_vprotocol_pessimist_event_t *) item;                           \
-  event->type = VPROTOCOL_PESSIMIST_EVENT_TYPE_MATCHING;                      \
-  event->u_event.e_matching.src = -1;                                         \
-} while(0)
+#define VPESSIMIST_MATCHING_EVENT_NEW(event)                            \
+    do {                                                                \
+        ompi_free_list_item_t *item;                                    \
+        OMPI_FREE_LIST_WAIT(&mca_vprotocol_pessimist.events_pool, item); \
+        event = (mca_vprotocol_pessimist_event_t *) item;               \
+        event->type = VPROTOCOL_PESSIMIST_EVENT_TYPE_MATCHING;          \
+        event->u_event.e_matching.src = -1;                             \
+    } while(0)

-#define VPESSIMIST_DELIVERY_EVENT_NEW(event) do {                             \
-  ompi_free_list_item_t *item;                                                \
-  int rc;                                                                     \
-  OMPI_FREE_LIST_WAIT(&mca_vprotocol_pessimist.events_pool, item, rc);        \
-  event = (mca_vprotocol_pessimist_event_t *) item;                           \
-  event->type = VPROTOCOL_PESSIMIST_EVENT_TYPE_DELIVERY;                      \
-} while(0)
+#define VPESSIMIST_DELIVERY_EVENT_NEW(event)                            \
+    do {                                                                \
+        ompi_free_list_item_t *item;                                    \
+        OMPI_FREE_LIST_WAIT(&mca_vprotocol_pessimist.events_pool, item); \
+        event = (mca_vprotocol_pessimist_event_t *) item;               \
+        event->type = VPROTOCOL_PESSIMIST_EVENT_TYPE_DELIVERY;          \
+    } while(0)

-#define VPESSIMIST_EVENT_RETURN(event)                                        \
-  OMPI_FREE_LIST_RETURN(&mca_vprotocol_pessimist.events_pool,                 \
-  (ompi_free_list_item_t *) event)
+#define VPESSIMIST_EVENT_RETURN(event)                                  \
+    OMPI_FREE_LIST_RETURN(&mca_vprotocol_pessimist.events_pool,         \
+                          (ompi_free_list_item_t *) event)

 END_C_DECLS

Index: test/class/ompi_rb_tree.c
===================================================================
--- test/class/ompi_rb_tree.c	(revision 28700)
+++ test/class/ompi_rb_tree.c	(working copy)
@@ -2,7 +2,7 @@
  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
  *                         University Research and Technology
  *                         Corporation.  All rights reserved.
- * Copyright (c) 2004-2005 The University of Tennessee and The University
+ * Copyright (c) 2004-2013 The University of Tennessee and The University
  *                         of Tennessee Research Foundation.  All rights
  *                         reserved.
  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
@@ -338,8 +338,8 @@
             test_failure("system out of memory");
             return;
         }   
-        OMPI_FREE_LIST_GET(&key_list, new_value, rc);
-        if(OMPI_SUCCESS != rc)
+        OMPI_FREE_LIST_GET(&key_list, new_value);
+        if(NULL == new_value)
         {
             test_failure("failed to get memory from free list");
         }
