--- /home/Kevin/Tools/w3af.orig/plugins/discovery/webSpider.py	2008-08-30 04:33:07.000000000 +0200
+++ webSpider.py	2008-12-05 10:16:05.000000000 +0100
@@ -57,6 +57,7 @@
         self._followRegex = '.*'
         self._onlyForward = False
         self._compileRE()
+        self._urlParameter = None
         
     def discover(self, fuzzableRequest ):
         '''
@@ -69,6 +70,11 @@
         # Init some internal variables
         self.is404 = kb.kb.getData( 'error404page', '404' )
         
+        # Set the URL parameter if necessary
+        if self._urlParameter != None:
+          fuzzableRequest.setURL(urlParser.setParameter(fuzzableRequest.getURL(), self._urlParameter))
+          fuzzableRequest.setURI(urlParser.setParameter(fuzzableRequest.getURI(), self._urlParameter))
+        
         # If its a form, then smartFill the Dc.
         originalDc = fuzzableRequest.getDc()
         if isinstance( fuzzableRequest, httpPostDataRequest.httpPostDataRequest ):
@@ -94,6 +100,10 @@
             # a image file, its useless and consumes cpu power.
             if response.is_text_or_html() or response.is_pdf():
                 originalURL = response.getRedirURI()
+
+                if self._urlParameter != None:
+                  originalURL = urlParser.setParameter(originalURL, self._urlParameter)
+
                 try:
                     documentParser = dpCache.dpc.getDocumentParserFor( response )
                 except w3afException:
@@ -118,6 +128,8 @@
                     references = [ r for r in references if not self._compiledIgnoreRe.match( r )]
                     
                     for ref in references:
+                        if self._urlParameter != None:
+                          ref = urlParser.setParameter(ref, self._urlParameter)
                         targs = (ref, fuzzableRequest, originalURL)
                         self._tm.startFunction( target=self._verifyReferences, args=targs, ownerObj=self )
             
@@ -154,6 +166,10 @@
                     
                     # Process the list.
                     for fuzzableRequest in fuzzableRequestList:
+                      if self._urlParameter != None:
+                        fuzzableRequest.setURL(urlParser.setParameter(fuzzableRequest.getURL(), self._urlParameter))
+                        fuzzableRequest.setURI(urlParser.setParameter(fuzzableRequest.getURI(), self._urlParameter))
+        
                         fuzzableRequest.setReferer( originalURL )
                         self._fuzzableRequests.append( fuzzableRequest )
     
@@ -197,10 +213,14 @@
         d3 = 'When spidering, DO NOT follow links that match this regular expression (has precedence over followRegex)'
         o3 = option('ignoreRegex', self._ignoreRegex, d3, 'string')
         
+        d4 = 'Append the given parameter to new URLs found by the spider. ex http://www.foobar.com/index.jsp;<parameter>?id=2'
+        o4 = option('urlParameter', self._urlParameter, d4, 'string')    
+
         ol = optionList()
         ol.add(o1)
         ol.add(o2)
         ol.add(o3)
+        ol.add(o4)
         return ol
         
     def setOptions( self, optionsMap ):
@@ -214,6 +234,7 @@
         self._onlyForward = optionsMap['onlyForward'].getValue()
         self._ignoreRegex = optionsMap['ignoreRegex'].getValue()
         self._followRegex = optionsMap['followRegex'].getValue()
+        self._urlParameter = optionsMap['urlParameter'].getValue() 
         
         self._compileRE()
     
@@ -252,6 +273,7 @@
             - onlyForward
             - ignoreRegex
             - followRegex
+            - urlParameter
             
         IgnoreRegex and followRegex are commonly used to configure the webSpider to spider
         all URLs except the "logout" or some other more exciting link like "Reboot Appliance"
