> jerenkrantz    02/05/29 22:42:46
>
>   Modified:    modules/generators mod_cgid.c
>   Log:
>   Apply same patch to mod_cgid that was applied to mod_cgi so that it
>   bypasses the *_client_block calls in favor of using the input filters
>   and brigades directly.
>
>   Revision  Changes    Path
>   1.134     +70 -35    httpd-2.0/modules/generators/mod_cgid.c
>
>   Index: mod_cgid.c
>   ===================================================================

<snip>

>   +    do {
>   +        apr_bucket *bucket;
>   +        apr_status_t rv;
>   +
>   +        rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
>   +                            APR_BLOCK_READ, HUGE_STRING_LEN);
>   +
>   +        if (rv != APR_SUCCESS) {
>   +            return rv;
>   +        }
>   +
>   +        APR_BRIGADE_FOREACH(bucket, bb) {
>   +            const char *data;
>   +            apr_size_t len;
>   +
>   +            if (APR_BUCKET_IS_EOS(bucket)) {
>   +                seen_eos = 1;
>   +                break;
>   +            }
>   +
>   +            /* We can't do much with this. */
>   +            if (APR_BUCKET_IS_FLUSH(bucket)) {
>   +                continue;
>   +            }
>   +
>   +            /* If the child stopped, we still must read to EOS. */
>   +            if (child_stopped_reading) {
>   +                continue;
>   +            }

The above section of code looks like a potential endless loop if the brigade does not
contain an EOS bucket. Should the check for child_stopped_reading occur after the
apr_bucket_read below?

Bill

>
>   -    if (ap_should_client_block(r)) {
>   -        int dbsize, len_read;
>   +            /* read */
>   +            apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
>   +
>   +            if (conf->logname && dbpos < conf->bufbytes) {
>   +                int cursize;
>   +
>   +                if ((dbpos + len) > conf->bufbytes) {
>   +                    cursize = conf->bufbytes - dbpos;
>   +                }
>   +                else {
>   +                    cursize = len;
>   +                }
>   +                memcpy(dbuf + dbpos, data, cursize);
>   +                dbpos += cursize;
>   +            }
>   +
>   +            /* Keep writing data to the child until done or too much time
>   +             * elapses with no progress or an error occurs.
>   +             */
>   +            rv = apr_file_write_full(tempsock, data, len, NULL);
>   +
>   +            if (rv != APR_SUCCESS) {
>   +                /* silly script stopped reading, soak up remaining message */
>   +                child_stopped_reading = 1;
>   +            }
>   +        }
>   +        apr_brigade_cleanup(bb);
>   +    }
>   +    while (!seen_eos);
>   +
>   +    if (conf->logname) {
>   +        dbuf[dbpos] = '\0';
>   +    }
>
>   -        if (conf->logname) {
>   -            dbuf = apr_pcalloc(r->pool, conf->bufbytes + 1);
>   -            dbpos = 0;
>   -        }
>   -
>   -        while ((len_read =
>   -                ap_get_client_block(r, argsbuffer, HUGE_STRING_LEN)) > 0) {
>   -            if (conf->logname) {
>   -                if ((dbpos + len_read) > conf->bufbytes) {
>   -                    dbsize = conf->bufbytes - dbpos;
>   -                }
>   -                else {
>   -                    dbsize = len_read;
>   -                }
>   -                memcpy(dbuf + dbpos, argsbuffer, dbsize);
>   -                dbpos += dbsize;
>   -            }
>   -            nbytes = len_read;
>   -            apr_file_write(tempsock, argsbuffer, &nbytes);
>   -            if (nbytes < len_read) {
>   -                /* silly script stopped reading, soak up remaining message */
>   -                while (ap_get_client_block(r, argsbuffer, HUGE_STRING_LEN) > 0) {
>   -                    /* dump it */
>   -                }
>   -                break;
>   -            }
>   -        }
>   -    }
>        /* we're done writing, or maybe we didn't write at all;
>         * force EOF on child's stdin so that the cgi detects end (or
>         * absence) of data
>
>
>
>

Reply via email to