In a couple of places Valgrind spotted a few bugs where memory was
beeing freed too early. Following patches correct this errors.

Simo.

-- 
Simo Sorce * Red Hat, Inc * New York
>From f7968749cd062081421e72006581e0aacf547bab Mon Sep 17 00:00:00 2001
From: Simo Sorce <sso...@redhat.com>
Date: Fri, 11 Sep 2009 11:18:45 -0400
Subject: [PATCH 1/2] Fix memory mishandling.

By attaching the reply to a subreq, we ended up freeing the operations list
element before we used it to skip to the next one.
Do not steal the context and let the unlocking code free the old reply, when it
moves onto processing the next one.
Got this one with valgrind.
---
 server/providers/ldap/sdap_async.c |   17 +++++++----------
 1 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/server/providers/ldap/sdap_async.c b/server/providers/ldap/sdap_async.c
index 15985ff..84e13b5 100644
--- a/server/providers/ldap/sdap_async.c
+++ b/server/providers/ldap/sdap_async.c
@@ -300,8 +300,14 @@ static void sdap_unlock_next_reply(struct sdap_op *op)
 {
     struct timeval no_timeout = {0, 0};
     struct tevent_timer *te;
+    struct sdap_msg *next_reply;
 
-    op->list = op->list->next;
+    if (op->list) {
+        next_reply = op->list->next;
+        /* get rid of the previous reply, it has been processed already */
+        talloc_zfree(op->list);
+        op->list = next_reply;
+    }
 
     /* if there are still replies to parse, queue a new operation */
     if (op->list) {
@@ -1392,9 +1398,6 @@ static void sdap_get_users_done(struct sdap_op *op,
             return;
         }
         tevent_req_set_callback(subreq, sdap_get_users_save_done, req);
-        /* attach reply to subreq,
-         * will not be needed anymore once subreq is done */
-        talloc_steal(subreq, reply);
 
         break;
 
@@ -1616,9 +1619,6 @@ static void sdap_get_groups_done(struct sdap_op *op,
             return;
         }
         tevent_req_set_callback(subreq, sdap_get_groups_save_done, req);
-        /* attach reply to subreq,
-         * will not be needed anymore once subreq is done */
-        talloc_steal(subreq, reply);
 
         break;
 
@@ -1945,9 +1945,6 @@ static void sdap_get_initgr_done(struct sdap_op *op,
             return;
         }
         tevent_req_set_callback(subreq, sdap_get_initgr_save_done, req);
-        /* attach reply to subreq,
-         * will not be needed anymore once subreq is done */
-        talloc_steal(subreq, reply);
 
         break;
 
-- 
1.6.2.5

>From 3dd15301e4dfcf491cd8e8a4ee86e8aa71be2701 Mon Sep 17 00:00:00 2001
From: Simo Sorce <sso...@redhat.com>
Date: Fri, 11 Sep 2009 11:33:00 -0400
Subject: [PATCH 2/2] Fix ldap enumeration async task

The request was being freed, instead of marking it done and let the callback
free it when done. This was causing us to access freed memory, when trying to
set the next run.
Let the callback add new runs and free the request instead as normally we would
do with any other tevent_req async call.
Courtesy of valgrind again.
---
 server/providers/ldap/ldap_id.c |   28 ++++++++++++++++------------
 1 files changed, 16 insertions(+), 12 deletions(-)

diff --git a/server/providers/ldap/ldap_id.c b/server/providers/ldap/ldap_id.c
index efd9e91..bebeea2 100644
--- a/server/providers/ldap/ldap_id.c
+++ b/server/providers/ldap/ldap_id.c
@@ -846,7 +846,6 @@ static void ldap_id_enumerate(struct tevent_context *ev,
         ldap_id_enumerate_set_timer(ctx, tevent_timeval_current());
         return;
     }
-
     tevent_req_set_callback(req, ldap_id_enumerate_reschedule, ctx);
 
     /* if enumeration takes so long, either we try to enumerate too
@@ -877,6 +876,18 @@ static void ldap_id_enumerate_reschedule(struct tevent_req *req)
 {
     struct sdap_id_ctx *ctx = tevent_req_callback_data(req,
                                                        struct sdap_id_ctx);
+    struct timeval tv;
+    enum tevent_req_state tstate;
+    uint64_t err;
+
+    if (tevent_req_is_error(req, &tstate, &err)) {
+        /* On error schedule starting from now, not the last run */
+        tv = tevent_timeval_current();
+    } else {
+        tv = ctx->last_run;
+    }
+    talloc_zfree(req);
+
     ldap_id_enumerate_set_timer(ctx, ctx->last_run);
 }
 
@@ -965,10 +976,7 @@ fail:
     }
 
     DEBUG(1, ("Failed to enumerate users, retrying later!\n"));
-    /* schedule starting from now, not the last run */
-    ldap_id_enumerate_set_timer(state->ctx, tevent_timeval_current());
-
-    talloc_zfree(req);
+    tevent_req_done(req);
 }
 
 static void ldap_id_enum_groups_done(struct tevent_req *subreq)
@@ -983,10 +991,9 @@ static void ldap_id_enum_groups_done(struct tevent_req *subreq)
     if (tevent_req_is_error(subreq, &tstate, &err)) {
         goto fail;
     }
-    talloc_zfree(req);
-
-    ldap_id_enumerate_set_timer(state->ctx, state->ctx->last_run);
+    talloc_zfree(subreq);
 
+    tevent_req_done(req);
     return;
 
 fail:
@@ -995,10 +1002,7 @@ fail:
     }
 
     DEBUG(1, ("Failed to enumerate groups, retrying later!\n"));
-    /* schedule starting from now, not the last run */
-    ldap_id_enumerate_set_timer(state->ctx, tevent_timeval_current());
-
-    talloc_zfree(req);
+    tevent_req_done(req);
 }
 
 /* ==User-Enumeration===================================================== */
-- 
1.6.2.5

_______________________________________________
sssd-devel mailing list
sssd-devel@lists.fedorahosted.org
https://fedorahosted.org/mailman/listinfo/sssd-devel

Reply via email to