Running pycurl-7.19.0/setup.py -q bdist_egg --dist-dir /tmp/easy_install-2ZCa8v/pycurl-7.19.0/egg-dist-tmp-DyHFls
Using curl-config (libcurl 7.12.1)
src/pycurl.c:42:20: Python.h: No such file or directory
src/pycurl.c:43:22: pythread.h: No such file or directory
src/pycurl.c:58:4: #error "Need Python version 2.2 or greater to compile pycurl." src/pycurl.c:61:4: #error "Need libcurl version 7.19.0 or greater to compile pycurl."
[... Error Clipped]
error: Setup script exited with error: command '/usr/bin/gcc' failed with exit status 1
I'm not familiar with red hat. But looks like this is now a sysadmin problem. I could install pycurl under freebsd and ubuntu too.

I have some code that might be able to PUT through a proxy for you. Please look at the attached file.

You can setup a proxy this way:

import MozzillaEmulator
MozzillaEmulator.DEFAULT_PROXIES = {
    'http':'http://user:passw...@proxy.host.com:3128',
    'https':'http://user:passw...@proxy.host.com:3128',
}

And here is how you use it:

dl = MozillaEmulator.MozillaEmulator()
put_url = "http://some_url_to_put_to";
headers = {'Content-Type':'application/xml;charset=UTF-8'}
data = open("some_file","rb").read()
response =  dl.download(put_url,data,headers,put_method=True)

If you don't set the put_method flag, then it will POST instead. (But in that case, post data must be a valid post data field fields and values.) If you don't give post_data then it will GET. The PUT method was tested without a proxy, and the GET and POST methods were tested with and without proxy.

Actually I have tried to do PUT through a proxy, but it didn't work. But it was a restriction on the proxy side. (As far as I can recall, the error message came from the proxy server, something about unsupported request?). So it *might* work for you, with the right proxy server that supports PUT requests.

Also be aware that this version uses urllib2. Although it can use https protocol, it doesn't check the server's certificate.

Good luck,

    Laszlo

#----------------------------------------------------------------------
#
# Author:      Laszlo Nagy
#
# Copyright:   (c) 2005 by Szoftver Messias Bt.
# Licence:     BSD style
#
# Setup urllib so it will use Mozilla user agent settings.
#
#
#----------------------------------------------------------------------
import os
import hashlib
import urllib
import urllib2
import mimetypes
#from gzip import GzipFile
import cStringIO
from cPickle import loads,dumps
import cookielib

DEFAULT_PROXIES = None

class MozillaCacher(object):
    """A dictionary like object, that can cache results on a storage device."""
    def __init__(self,cachedir='.cache'):
        self.cachedir = cachedir
        if not os.path.isdir(cachedir):
            os.mkdir(cachedir)
        
    def name2fname(self,name):
        return os.path.join(self.cachedir,name)
    def __getitem__(self,name):
        if not isinstance(name,str):
            raise TypeError()
        fname = self.name2fname(name)
        if os.path.isfile(fname):
            return file(fname,'rb').read()
        else:
            raise IndexError()
    def __setitem__(self,name,value):
        if not isinstance(name,str):
            raise TypeError()
        fname = self.name2fname(name)
        if os.path.isfile(fname):
            os.unlink(fname)
        f = file(fname,'wb+')
        try:
            f.write(value)
        finally:
            f.close()
    def __delitem__(self,name):
        if not isinstance(name,str):
            raise TypeError()
        fname = self.name2fname(name)
        if os.path.isfile(fname):
            os.unlink(fname)
    def __iter__(self):
        raise NotImplementedError()
    def has_key(self,name):
        return os.path.isfile(self.name2fname(name))

class MozillaEmulator(object):
    def __init__(self,cacher={},trycount=0,proxies=None):
        """Create a new MozillaEmulator object.

        @param cacher: A dictionary like object, that can cache search results on a storage device.
            You can use a simple dictionary here, but it is not recommended.
            You can also put None here to disable caching completely.
        @param trycount: The download() method will retry the operation if it fails. You can specify -1 for infinite retrying.
                A value of 0 means no retrying. A value of 1 means one retry. etc.
        
        
        :param proxies: It must be a dict like this:
        
            {
                'http': 'http://user:pas...@www.example.com:3128/',
                'https': 'http://user:pas...@www.example.com:3128/',
            }
        
        If you pass None then DEFAULT_PROXIES will be used. To force
        direct connection, use an empty dict.
        
        This is a simplified version of urllib2 proxy handling.
        To use direct connection, call with proxies=None.
        
        Look at the example here:
        
        http://docs.python.org/release/2.5.2/lib/urllib2-examples.html
        
        """
        global DEFAULT_PROXIES
        
        self.cacher = cacher
        self.cookies = cookielib.CookieJar()
        self.debug = False
        self.trycount = trycount
        if proxies is None:
            self.proxies = DEFAULT_PROXIES
        else:
            self.proxies = proxies
    
                
    def _hash(self,data):
        h = hashlib.md5()
        h.update(data)
        return h.hexdigest()

    def build_opener(self,url,postdata=None,extraheaders={},
            forbid_redirect=False,put_method=False):
        """Build an opener. Internal method, do not call directly."""
        txheaders = {
            #'Accept':'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
            'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language':'en,hu;q=0.8,en-us;q=0.5,hu-hu;q=0.3',
#            'Accept-Encoding': 'gzip, deflate',
            'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
#            'Keep-Alive': '300',
#            'Connection': 'keep-alive',
            'Connection': 'close',
#            'Cache-Control': 'max-age=0',
        }
        for key,value in extraheaders.iteritems():
            txheaders[key] = value
        req = urllib2.Request(url, postdata, txheaders)
        self.cookies.add_cookie_header(req)
        if forbid_redirect:
            redirector = HTTPNoRedirector()
        else:
            redirector = urllib2.HTTPRedirectHandler()

        http_handler = urllib2.HTTPHandler(debuglevel=self.debug)
        https_handler = urllib2.HTTPSHandler(debuglevel=self.debug)
        
        handlers = []
        if self.proxies:
            handlers.append(urllib2.ProxyHandler(self.proxies))
        handlers += [http_handler,https_handler,
            urllib2.HTTPCookieProcessor(self.cookies),redirector]
        u = urllib2.build_opener(*handlers)
        #u.addheaders = [('User-Agent','Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/8.04 (hardy) Firefox/3.0.11')]
        u.addheaders = [('User-Agent','Mozilla/5.0 (X11; U; Linux x86_64; hu-HU; rv:1.9.2.16) Gecko/20110323 Ubuntu/10.10 (maverick) Firefox/3.6.16')]
        
        if postdata is not None:
            req.add_data(postdata)
        if put_method:
            req.get_method = lambda: 'PUT'
        
        return (req,u)

    def download(self,url,postdata=None,extraheaders={},forbid_redirect=False,
            trycount=None,fd=None,onprogress=None,only_head=False,
            put_method=False):
        """Download an URL with GET POST or PUT methods.

        @param postdata: It can be a string that will be POST or PUT to the URL.
            When None is given, the method will be GET instead.
        @param extraheaders: You can add/modify HTTP headers with a dict here.
        @param forbid_redirect: Set this flag if you do not want to handle
            HTTP 301 and 302 redirects.
        @param trycount: Specify the maximum number of retries here.
            0 means no retry on error. Using -1 means infinite retring.
            None means the default value (that is self.trycount).
        @param fd: You can pass a file descriptor here. In this case,
            the data will be written into the file. Please note that
            when you save the raw data into a file then it won't be cached.
        @param onprogress: A function that has one parameter: two parameters:
            the size of the resource and the downloaded size. This will be
            called for each 1KB chunk. (If the HTTP header does not contain
            the content-length field, then the size parameter will be zero!)
        @param only_head: Create the openerdirector and return it. In other
            words, this will not retrieve any content except HTPP headers.
        @param put_method: Set this flag to use PUT instead of POST.
            In this case, postdata can contain raw data (e.g. no 
            field,value pairs.)

        @return: The raw HTML page data, unless fd was specified. When fd
            was given, the return value is undefined.
        """
        if trycount is None:
            trycount = self.trycount
        cnt = 0
        while True:
            try:
                key = self._hash(url)
                if (self.cacher is None) or (not self.cacher.has_key(key)):
                    req,u = self.build_opener(url,postdata,extraheaders,
                        forbid_redirect,put_method)
                    openerdirector = u.open(req)
                    if self.debug:
                        print req.get_method(),url
                        print openerdirector.code,openerdirector.msg
                        print openerdirector.headers
                    self.cookies.extract_cookies(openerdirector,req)
                    if only_head:
                        return openerdirector
                    if openerdirector.headers.has_key('content-length'):
                        length = long(openerdirector.headers['content-length'])
                    else:
                        length = 0
                    dlength = 0
                    if fd:
                        while True:
                            data = openerdirector.read(1024)
                            dlength += len(data)
                            fd.write(data)
                            if onprogress:
                                onprogress(length,dlength)
                            if not data:
                                break
                    else:
                        data = ''
                        while True:
                            newdata = openerdirector.read(1024)
                            dlength += len(newdata)
                            data += newdata
                            if onprogress:
                                onprogress(length,dlength)
                            if not newdata:
                                break
                        #data = openerdirector.read()
                        if not (self.cacher is None):
                            self.cacher[key] = data
                else:
                    data = self.cacher[key]
                #try:
                #    d2= GzipFile(fileobj=cStringIO.StringIO(data)).read()
                #    data = d2
                #except IOError:
                #    pass
                return data
            except urllib2.URLError:
                cnt += 1
                if (trycount > -1) and (trycount < cnt):
                    raise
                # Retry :-)
                if self.debug:
                    print "MozillaEmulator: urllib2.URLError, retryting ",cnt


    def post_multipart(self,url,fields, files, forbid_redirect=True):
        """Post fields and files to an http host as multipart/form-data.
        fields is a sequence of (name, value) elements for regular form fields.
        files is a sequence of (name, filename, value) elements for data to be uploaded as files
        Return the server's response page.
        """
        content_type, post_data = encode_multipart_formdata(fields, files)
        result = self.download(url,post_data,{
            'Content-Type': content_type,
            'Content-Length': str(len(post_data))
        },forbid_redirect=forbid_redirect
        )
        return result


class HTTPNoRedirector(urllib2.HTTPRedirectHandler):
    """This is a custom http redirect handler that FORBIDS redirection."""
    def http_error_301(self, req, fp, code, msg, headers):
        e = urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
        if e.code in (301,302):
            if 'location' in headers:
                newurl = headers.getheaders('location')[0]
            elif 'uri' in headers:
                newurl = headers.getheaders('uri')[0]
            e.newurl = newurl
        raise e
    http_error_302 = http_error_301

            
def encode_multipart_formdata(fields, files):
    """
    fields is a sequence of (name, value) elements for regular form fields.
    files is a sequence of (name, filename, value) elements for data to be uploaded as files
    Return (content_type, body) ready for httplib.HTTP instance
    """
    BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
    CRLF = '\r\n'
    L = []
    for (key, value) in fields:
        L.append('--' + BOUNDARY)
        L.append('Content-Disposition: form-data; name="%s"' % key)
        L.append('')
        L.append(value)
    for (key, filename, value) in files:
        L.append('--' + BOUNDARY)
        L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
        L.append('Content-Type: %s' % get_content_type(filename))
        L.append('')
        L.append(value)
    L.append('--' + BOUNDARY + '--')
    L.append('')
    body = CRLF.join(L)
    content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
    return content_type, body

def get_content_type(filename):
    return mimetypes.guess_type(filename)[0] or 'application/octet-stream'    
-- 
http://mail.python.org/mailman/listinfo/python-list

Reply via email to