[PATCH] Bug 3150: do not start useless unlinkd.

2011-10-06 Thread Dmitry Kurochkin

Unlinkd is used only by UFS storage.  Bug before the change, Squid
always started unlinkd if it is built, even if it is not needed.
Moreover, unlinkd was started in non-daemon mode.

The patch adds unlinks() method for SwapDir to determine if it uses
unlinkd.  Unlinkd is started iff:

* Squid is running in daemon mode
* a configured cache_dir uses unlinkd

After the change, unlinkdClose() may be called when unlinkd was never
started.  The patch removes a warning which was printed in this case
on Windows.
---
 src/SwapDir.h   |2 ++
 src/fs/ufs/store_dir_ufs.cc |6 ++
 src/fs/ufs/ufscommon.h  |1 +
 src/main.cc |3 ++-
 src/protos.h|1 +
 src/unlinkd.cc  |   21 +++--
 6 files changed, 31 insertions(+), 3 deletions(-)

diff --git src/SwapDir.h src/SwapDir.h
index 723443c..b5772d8 100644
--- src/SwapDir.h
+++ src/SwapDir.h
@@ -115,40 +115,42 @@ SQUIDCEXTERN void storeDirLRUDelete(StoreEntry *);
 SQUIDCEXTERN void storeDirLRUAdd(StoreEntry *);
 SQUIDCEXTERN int storeDirGetBlkSize(const char *path, int *blksize);
 SQUIDCEXTERN int storeDirGetUFSStats(const char *, int *, int *, int *, int *);
 
 /// manages a single cache_dir
 class SwapDir : public Store
 {
 
 public:
 typedef RefCount Pointer;
 
 SwapDir(char const *aType);
 virtual ~SwapDir();
 virtual void reconfigure() = 0;
 char const *type() const;
 
 virtual bool needsDiskStrand() const; ///< needs a dedicated kid process
 virtual bool active() const; ///< may be used in this strand
 /// whether stat should be reported by this SwapDir
 virtual bool doReportStat() const { return active(); }
+/// whether SwapDir unlinks files a lot and may benefit from unlinkd
+virtual bool unlinks() const { return false; }
 
 /* official Store interface functions */
 virtual void diskFull();
 
 virtual StoreEntry * get(const cache_key *);
 
 virtual void get(String const, STOREGETCLIENT, void * cbdata);
 
 virtual uint64_t maxSize() const { return max_size;}
 
 virtual uint64_t minSize() const;
 
 virtual int64_t maxObjectSize() const { return max_objsize; }
 
 virtual void stat (StoreEntry &anEntry) const;
 virtual StoreSearch *search(String const url, HttpRequest *) = 0;
 
 /* migrated from store_dir.cc */
 bool objectSizeIsAcceptable(int64_t objsize) const;
 
diff --git src/fs/ufs/store_dir_ufs.cc src/fs/ufs/store_dir_ufs.cc
index 129629c..439a33b 100644
--- src/fs/ufs/store_dir_ufs.cc
+++ src/fs/ufs/store_dir_ufs.cc
@@ -1284,40 +1284,46 @@ UFSSwapDir::unlinkFile(sfileno f)
std::hex << std::uppercase << std::setw(8) << f << " '" <<
fullPath(f,NULL) << "'");
 /* commonUfsDirMapBitReset(this, f); */
 IO->unlinkFile(fullPath(f,NULL));
 }
 
 void
 UFSSwapDir::unlink(StoreEntry & e)
 {
 debugs(79, 3, "storeUfsUnlink: dirno " << index  << ", fileno "<<
std::setfill('0') << std::hex << std::uppercase << std::setw(8) << e.swap_filen);
 if (e.swap_status == SWAPOUT_DONE && EBIT_TEST(e.flags, ENTRY_VALIDATED)) {
 cur_size -= fs.blksize * sizeInBlocks(e.swap_file_sz);
 --n_disk_objects;
 }
 replacementRemove(&e);
 mapBitReset(e.swap_filen);
 UFSSwapDir::unlinkFile(e.swap_filen);
 }
 
+bool
+UFSSwapDir::unlinks() const
+{
+return IamWorkerProcess();
+}
+
 /*
  * Add and remove the given StoreEntry from the replacement policy in
  * use.
  */
 
 void
 UFSSwapDir::replacementAdd(StoreEntry * e)
 {
 debugs(47, 4, "UFSSwapDir::replacementAdd: added node " << e << " to dir " << index);
 repl->Add(repl, e, &e->repl);
 }
 
 
 void
 UFSSwapDir::replacementRemove(StoreEntry * e)
 {
 StorePointer SD;
 
 if (e->swap_dirn < 0)
 return;
diff --git src/fs/ufs/ufscommon.h src/fs/ufs/ufscommon.h
index 7561e8b..ae169d4 100644
--- src/fs/ufs/ufscommon.h
+++ src/fs/ufs/ufscommon.h
@@ -43,40 +43,41 @@ class StoreSearch;
 
 #include "SwapDir.h"
 
 /// \ingroup UFS
 class UFSSwapDir : public SwapDir
 {
 
 public:
 static int IsUFSDir(SwapDir* sd);
 static int DirClean(int swap_index);
 static int FilenoBelongsHere(int fn, int F0, int F1, int F2);
 
 UFSSwapDir(char const *aType, const char *aModuleType);
 virtual void init();
 virtual void create();
 virtual void dump(StoreEntry &) const;
 ~UFSSwapDir();
 virtual StoreSearch *search(String const url, HttpRequest *);
 virtual bool doubleCheck(StoreEntry &);
 virtual void unlink(StoreEntry &);
+virtual bool unlinks() const;
 virtual void statfs(StoreEntry &)const;
 virtual void maintain();
 virtual bool canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const;
 virtual void reference(StoreEntry &);
 virtual bool dereference(StoreEntry &);
 virtual StoreIOState::Pointer createStoreIO(StoreEntry &, StoreIOState::STFNCB *, StoreIOState::STIOCB *, void *);
 virtual StoreIOState

Jenkins build is back to normal : 3.HEAD-amd64-OpenBSD #265

2011-10-06 Thread noc
See 




Jenkins build is back to normal : 3.HEAD-i386-OpenBSD #1135

2011-10-06 Thread noc
See 




Build failed in Jenkins: 3.2-matrix » sheeva-debian-squeeze #178

2011-10-06 Thread noc
See 


--
Started by upstream project "3.2-matrix" build number 178
Building remotely on sheeva-debian-squeeze
java.io.IOException: Failed to mkdirs: 

at hudson.FilePath.mkdirs(FilePath.java:844)
at hudson.model.AbstractProject.checkout(AbstractProject.java:1191)
at 
hudson.model.AbstractBuild$AbstractRunner.checkout(AbstractBuild.java:566)
at hudson.model.AbstractBuild$AbstractRunner.run(AbstractBuild.java:454)
at hudson.model.Run.run(Run.java:1403)
at hudson.matrix.MatrixRun.run(MatrixRun.java:146)
at hudson.model.ResourceController.execute(ResourceController.java:88)
at hudson.model.Executor.run(Executor.java:230)
Retrying after 10 seconds
java.io.IOException: Failed to mkdirs: 

at hudson.FilePath.mkdirs(FilePath.java:844)
at hudson.model.AbstractProject.checkout(AbstractProject.java:1191)
at 
hudson.model.AbstractBuild$AbstractRunner.checkout(AbstractBuild.java:566)
at hudson.model.AbstractBuild$AbstractRunner.run(AbstractBuild.java:454)
at hudson.model.Run.run(Run.java:1403)
at hudson.matrix.MatrixRun.run(MatrixRun.java:146)
at hudson.model.ResourceController.execute(ResourceController.java:88)
at hudson.model.Executor.run(Executor.java:230)
Retrying after 10 seconds
java.io.IOException: Failed to mkdirs: 

at hudson.FilePath.mkdirs(FilePath.java:844)
at hudson.model.AbstractProject.checkout(AbstractProject.java:1191)
at 
hudson.model.AbstractBuild$AbstractRunner.checkout(AbstractBuild.java:566)
at hudson.model.AbstractBuild$AbstractRunner.run(AbstractBuild.java:454)
at hudson.model.Run.run(Run.java:1403)
at hudson.matrix.MatrixRun.run(MatrixRun.java:146)
at hudson.model.ResourceController.execute(ResourceController.java:88)
at hudson.model.Executor.run(Executor.java:230)



[PATCH] Account for max-swap-rate in swap-timeout handling for Rock.

2011-10-06 Thread Dmitry Kurochkin

Current swap-timeout code does not know about max-swap-rate.  It
simply finds the longest-waiting I/O in disker queues (incoming and
outgoing) and then assumes that the new I/O will wait at least that
long.  The assumption is likely to be wrong when the queue contains
lots of freshly queued requests to disker: Those requests have not
waited long yet, but a max-swap-rate limit will slow them down
shortly.

The patch changes the swap-timeout code to account for max-swap-rate
when dealing with the workers-to-disker queue: If there are N requests
pending, the new one will wait at least N/max-swap-rate seconds.  Also
expected wait time is adjusted based on the queue "balance" member, in
case we have been borrowing time against future I/O already.
---
 src/DiskIO/IpcIo/IpcIoFile.cc |   16 +++-
 src/ipc/Queue.cc  |   30 ++
 src/ipc/Queue.h   |   28 ++--
 3 files changed, 67 insertions(+), 7 deletions(-)

diff --git src/DiskIO/IpcIo/IpcIoFile.cc src/DiskIO/IpcIo/IpcIoFile.cc
index aec7b40..f70af46 100644
--- src/DiskIO/IpcIo/IpcIoFile.cc
+++ src/DiskIO/IpcIo/IpcIoFile.cc
@@ -343,41 +343,55 @@ IpcIoFile::push(IpcIoPendingRequest *const pending)
 pending->completeIo(NULL);
 delete pending;
 } catch (const TextException &e) {
 debugs(47, DBG_IMPORTANT, HERE << e.what());
 pending->completeIo(NULL);
 delete pending;
 }
 }
 
 /// whether we think there is enough time to complete the I/O
 bool
 IpcIoFile::canWait() const
 {
 if (!config.ioTimeout)
 return true; // no timeout specified
 
 IpcIoMsg oldestIo;
 if (!queue->peek(diskId, oldestIo) || oldestIo.start.tv_sec <= 0)
 return true; // we cannot estimate expected wait time; assume it is OK
 
-const int expectedWait = tvSubMsec(oldestIo.start, current_time);
+const int oldestWait = tvSubMsec(oldestIo.start, current_time);
+
+int rateWait = -1; // time in millisecons
+const Ipc::QueueReader::Rate::Value ioRate = queue->rateLimit(diskId);
+if (ioRate > 0) {
+// if there are N requests pending, the new one will wait at
+// least N/max-swap-rate seconds
+rateWait = 1e3 * queue->outSize(diskId) / ioRate;
+// adjust N/max-swap-rate value based on the queue "balance"
+// member, in case we have been borrowing time against future
+// I/O already
+rateWait += queue->balance(diskId);
+}
+
+const int expectedWait = max(oldestWait, rateWait);
 if (expectedWait < 0 ||
 static_cast(expectedWait) < config.ioTimeout)
 return true; // expected wait time is acceptible
 
 debugs(47,2, HERE << "cannot wait: " << expectedWait <<
" oldest: " << SipcIo(KidIdentifier, oldestIo, diskId));
 return false; // do not want to wait that long
 }
 
 /// called when coordinator responds to worker open request
 void
 IpcIoFile::HandleOpenResponse(const Ipc::StrandSearchResponse &response)
 {
 debugs(47, 7, HERE << "coordinator response to open request");
 for (IpcIoFileList::iterator i = WaitingForOpen.begin();
 i != WaitingForOpen.end(); ++i) {
 if (response.strand.tag == (*i)->dbName) {
 (*i)->openCompleted(&response);
 WaitingForOpen.erase(i);
 return;
diff --git src/ipc/Queue.cc src/ipc/Queue.cc
index 24e6706..76e266b 100644
--- src/ipc/Queue.cc
+++ src/ipc/Queue.cc
@@ -180,40 +180,56 @@ Ipc::FewToFewBiQueue::oneToOneQueueIndex(const Group fromGroup, const int fromPr
 index1 = toProcessId - metadata->theGroupAIdOffset;
 index2 = fromProcessId - metadata->theGroupBIdOffset;
 offset = metadata->theGroupASize * metadata->theGroupBSize;
 }
 const int index = offset + index1 * metadata->theGroupBSize + index2;
 return index;
 }
 
 Ipc::OneToOneUniQueue &
 Ipc::FewToFewBiQueue::oneToOneQueue(const Group fromGroup, const int fromProcessId, const Group toGroup, const int toProcessId)
 {
 return (*queues)[oneToOneQueueIndex(fromGroup, fromProcessId, toGroup, toProcessId)];
 }
 
 const Ipc::OneToOneUniQueue &
 Ipc::FewToFewBiQueue::oneToOneQueue(const Group fromGroup, const int fromProcessId, const Group toGroup, const int toProcessId) const
 {
 return (*queues)[oneToOneQueueIndex(fromGroup, fromProcessId, toGroup, toProcessId)];
 }
 
+/// incoming queue from a given remote process
+const Ipc::OneToOneUniQueue &
+Ipc::FewToFewBiQueue::inQueue(const int remoteProcessId) const
+{
+return oneToOneQueue(remoteGroup(), remoteProcessId,
+ theLocalGroup, theLocalProcessId);
+}
+
+/// outgoing queue to a given remote process
+const Ipc::OneToOneUniQueue &
+Ipc::FewToFewBiQueue::outQueue(const int remoteProcessId) const
+{
+return oneToOneQueue(theLocalGroup, theLocalProcessId,
+ remoteGroup(), remoteProcessId);
+}
+
 int
 Ipc::FewToFewBiQueue::readerIndex(const Group 

Jenkins build is back to normal : 3.HEAD-i386-FreeBSD-6.4 #1146

2011-10-06 Thread noc
See 




Re: [PATCH] Re: Adding a reset option to the session helper

2011-10-06 Thread Amos Jeffries

On 07/10/11 10:54, Andrew Beverley wrote:

On Thu, 2011-10-06 at 13:23 +1300, Amos Jeffries wrote:

Oh, I forgot to say: the DB_ENV functionality requires a *directory*
rather than a file to be specified for the database (it creates
several
files). So, although I don't see a problem with this in principle, it
does break backwards compatibility. Any thoughts? Just bite the
bullet?

Andy


  Um. Maybe check for dir/file type on the -b value and use it as base
  path?



Or even better check for dir/file type and then either use legacy mode
(just a database file, but with the modern DB API) or DB_ENV mode (a
directory with several files).

Or is that what you meant anyway?


Something along those lines. Just hoping we could do the single-file 
with the newer API I guess.


Amos
--
Please be using
  Current Stable Squid 2.7.STABLE9 or 3.1.15
  Beta testers wanted for 3.2.0.12


Jenkins build is back to normal : 3.2-matrix » rio.treenet #177

2011-10-06 Thread noc
See 




Re: [PATCH] Re: Adding a reset option to the session helper

2011-10-06 Thread Andrew Beverley
On Thu, 2011-10-06 at 13:23 +1300, Amos Jeffries wrote:
> > Oh, I forgot to say: the DB_ENV functionality requires a *directory*
> > rather than a file to be specified for the database (it creates 
> > several
> > files). So, although I don't see a problem with this in principle, it
> > does break backwards compatibility. Any thoughts? Just bite the 
> > bullet?
> >
> > Andy
> 
>  Um. Maybe check for dir/file type on the -b value and use it as base 
>  path?
> 

Or even better check for dir/file type and then either use legacy mode
(just a database file, but with the modern DB API) or DB_ENV mode (a
directory with several files).

Or is that what you meant anyway?

Andy




Re: [PATCH] Bug 3349: Bad support for adaptation service URIs with '='

2011-10-06 Thread Tsantilas Christos

Patch applied to trunk

On 10/06/2011 02:02 PM, Amos Jeffries wrote:

On 06/10/11 22:14, Tsantilas Christos wrote:

On 10/06/2011 12:51 AM, Amos Jeffries wrote:

On Wed, 05 Oct 2011 11:32:39 +0300, Tsantilas Christos wrote:

Currently using URIs which include "=" is not supported by
ecap_service/icap_service configuration parameters. Also the squid.conf
documentation about ecap_service is outdated.

This patch
- Fixes the [e|i]cap_service line parser to allow URIs with "="
- Changes the [e|i]cap_service configuration parameter to use the
following syntax:
ecap_service id vectoring_point uri [name=value ...]
- Check for duplicated options
- Fixes the related documentation
- Also the older [e|i]cap_service syntax forms are supported:
ecap_service id vectoring_point [1|0] uri
ecap_service id vectoring_point [name=value ...] uri
- The "uri" options is not documented but supported.

This is a Measurement Factory project.




+1 Okay. looks fine now.

Amos


Re: [PATCH] Fix CertficateDB locking scheme

2011-10-06 Thread Tsantilas Christos

On 10/06/2011 08:53 PM, Alex Rousskov wrote:

On 09/22/2011 05:11 AM, Tsantilas Christos wrote:

Fix CertficateDB locking scheme

Currently we are locking every file going to be accessed by
CertificateDB code even if it is not realy needed, because of a more
general lock.

This patch:
- Replace the old FileLocker class with the pair Lock/Locker classes
- Remove most of the locks in CertificateDB with only two locks one
  for main database locking and one lock for the file contain the
  current serial number.

This is a Measurement Factory project


Hi Christos,

 Please commit these changes if you are comfortable with them. They
will make further ssl_crtd polishing and optimization easier.


OK patch applied to trunk.






Thank you,

Alex.





Re: [PATCH] Fix CertficateDB locking scheme

2011-10-06 Thread Alex Rousskov
On 09/22/2011 05:11 AM, Tsantilas Christos wrote:
> Fix CertficateDB locking scheme
> 
> Currently we are locking every file going to be accessed by
> CertificateDB code even if it is not realy needed, because of a more
> general lock.
> 
> This patch:
>- Replace the old FileLocker class with the pair Lock/Locker classes
>- Remove most of the locks in CertificateDB with only two locks one
>  for main database locking and one lock for the file contain the
>  current serial number.
> 
> This is a Measurement Factory project

Hi Christos,

Please commit these changes if you are comfortable with them. They
will make further ssl_crtd polishing and optimization easier.


Thank you,

Alex.


Re: [PATCH] max-swap-rate=swaps/sec option for Rock cache_dir

2011-10-06 Thread Alex Rousskov
On 10/03/2011 02:43 PM, Alex Rousskov wrote:
> Hello,
> 
> The new option introduced by this patch limits Squid I/O rate to
> smooth out OS disk commit activity and to avoid blocking Rock diskers
> (or even other processes!) on I/O. It should be used when swap demand
> exceeds disk performance limits but the underlying file system does not
> slow down incoming I/Os, allowing the situation to get out of control.
> 
> Lab and limited deployment tests show that without this option, Squid
> may write to the OS buffers much faster than the OS is able to write to
> disk. This disbalance eventually (and quite suddenly!) blocks any
> process trying to do I/O for a second or two, which results in very poor
> overall performance. Tuning file system parameters may help, but
> sometimes is not enough.
> 
> 
> The new max-swap-rate option is meant to be used together with the
> existing swap-timeout option. Otherwise, the problem simply shifts one
> layer up, with unrestricted workers overflowing restricted Rock I/O
> queues. The swap-timeout option allows a worker to skip Store operations
> when there are "too many" I/O pending already.
> 
> The code seems to work well, but can be further optimized to minimize
> read (hit) delays and to better enforce configured swap timeouts.


Merged from lp:3p2-rock as trunk r11767 before I got any reviews on
squid-dev because these earlier-in-the-branch changes were blocking more
urgent fixes posted by Dmitry, and bzr does not support cherry picking
merge.

The code has been tested in production. Reviews are still gladly
accepted, and I can revert that commit or disable the feature if needed.


Thank you,

Alex.


Re: [PATCH] Fix "variable set but not used" GCC 4.6 warnings when building without GSSAPI

2011-10-06 Thread Alex Rousskov
On 10/02/2011 05:17 AM, Amos Jeffries wrote:
> On 28/09/11 03:49, Dmitry Kurochkin wrote:
>> Hello.
>>
>> A simple patch that makes Squid build without GSSAPI warning-free on
>> GCC 4.6.
>>
>> Regards,
>>Dmitry
> 
> +1.

Merged from lp:3p2-rock as trunk r11768.

Cheers,

Alex.


Re: [PATCH 2/2] Remove entryLimitAllowed() TODO from RockSwapDirRr::run().

2011-10-06 Thread Alex Rousskov
On 10/04/2011 05:21 AM, Amos Jeffries wrote:
> On 03/10/11 18:19, Dmitry Kurochkin wrote:
>>
>> RockSwapDirRr::run() calls sd->entryLimitAllowed() when sd does not
>> have a map yet, which causes entryLimitAllowed() to use zero for the
>> lower limit.  But that OK so we can remove the TODO.
>> ---
>>   src/fs/rock/RockSwapDir.cc |1 -
>>   1 files changed, 0 insertions(+), 1 deletions(-)
>>
> 
> +1. Both these look okay to me. Please combine on merge though.

Merged from lp:3p2-rock as trunk r11768. From high-level logging point
of view, they are now combined (with other merged changes), but it is
still possible to separate them using bzr options. These last two
patches are not closely related, IMO, so I hope keeping them separate in
bzr history is OK.


Cheers,

Alex.


Re: [PATCH] Check IPC I/O pages limits in Rock store only when using a disker.

2011-10-06 Thread Alex Rousskov
On 10/02/2011 08:18 PM, Amos Jeffries wrote:
> On Sun,  2 Oct 2011 21:03:49 +0400, Dmitry Kurochkin wrote:
>> Shared memory pages are used only when Rock is running with diskers.
>> Otherwise, Blocking IO is used and there is no need to check for IPC
>> I/O pages limits.
>> ---
>>  src/fs/rock/RockSwapDir.cc |6 --
>>  1 files changed, 4 insertions(+), 2 deletions(-)
> 
> 
> +1.


Merged from lp:3p2-rock as trunk r11768.


Thank you,

Alex.


Re: [PATCH 0/9] Make Rock work w/o SMP

2011-10-06 Thread Alex Rousskov
On 10/02/2011 05:43 AM, Amos Jeffries wrote:
> On 29/09/11 12:05, Dmitry Kurochkin wrote:
>> This patch series implements a number of fixes and cleanups
>> related to Rock store in non-SMP mode.  After the changes, Rock
>> store module can build on systems without POSIX SHM and work in
>> non-SMP mode using Blocking DiskIO module.  Running with Rock in
>> SMP mode requires SHM support as before.

> +1. Other than the readability tweak which can be done on commit the set
> looks okay.

All nine patches merged from lp:3p2-rock as trunk r11768.


Thank you,

Alex.


Jenkins build is back to normal : 3.HEAD-amd64-CentOS-5.3 #1603

2011-10-06 Thread noc
See 




Build failed in Jenkins: 3.HEAD-amd64-CentOS-5.3 #1602

2011-10-06 Thread noc
See 

Changes:

[Christos Tsantilas] Bug 3190: Large HTTP POST stuck after early ICAP 400 error 
response

When an ICAP REQMOD service responds with an error to
(or the REQMOD transaction aborts while processing) a large HTTP
request, the HTTP request may get stuck because the request body
buffer gets full and nobody consumes the no-longer-needed content.

The ICAP code quits but leaves the body buffer intact in case the
client-side code wants to bypass the error. After that, nobody consumes
the request body because the buggy client side does not inform the body
pipe that there will be no other consumers, which would have triggered
a noteBodyConsumerAborted() callback and enable auto-consumption or closed
the client connection.

This is a Measurement Factory project

--
[...truncated 25999 lines...]
if g++ -DHAVE_CONFIG_H  -I../../../.. -I../../../../include -I../../../../lib 
-I../../../../src -I../../../include
-I../../../../helpers/digest_auth/eDirectory  -I/usr/include/libxml2
-I/usr/include/libxml2 -Wall -Wpointer-arith -Wwrite-strings -Wcomments -Werror 
-pipe -D_REENTRANT -D_FILE_OFFSET_BITS=64 -g -O2 -MT digest_pw_auth.o -MD -MP 
-MF ".deps/digest_pw_auth.Tpo" -c -o digest_pw_auth.o 
../../../../helpers/digest_auth/eDirectory/digest_pw_auth.cc; \
then mv -f ".deps/digest_pw_auth.Tpo" ".deps/digest_pw_auth.Po"; else 
rm -f ".deps/digest_pw_auth.Tpo"; exit 1; fi
if g++ -DHAVE_CONFIG_H  -I../../../.. -I../../../../include -I../../../../lib 
-I../../../../src -I../../../include
-I../../../../helpers/digest_auth/eDirectory  -I/usr/include/libxml2
-I/usr/include/libxml2 -Wall -Wpointer-arith -Wwrite-strings -Wcomments -Werror 
-pipe -D_REENTRANT -D_FILE_OFFSET_BITS=64 -g -O2 -MT ldap_backend.o -MD -MP -MF 
".deps/ldap_backend.Tpo" -c -o ldap_backend.o 
../../../../helpers/digest_auth/eDirectory/ldap_backend.cc; \
then mv -f ".deps/ldap_backend.Tpo" ".deps/ldap_backend.Po"; else rm -f 
".deps/ldap_backend.Tpo"; exit 1; fi
if g++ -DHAVE_CONFIG_H  -I../../../.. -I../../../../include -I../../../../lib 
-I../../../../src -I../../../include
-I../../../../helpers/digest_auth/eDirectory  -I/usr/include/libxml2
-I/usr/include/libxml2 -Wall -Wpointer-arith -Wwrite-strings -Wcomments -Werror 
-pipe -D_REENTRANT -D_FILE_OFFSET_BITS=64 -g -O2 -MT edir_ldapext.o -MD -MP -MF 
".deps/edir_ldapext.Tpo" -c -o edir_ldapext.o 
../../../../helpers/digest_auth/eDirectory/edir_ldapext.cc; \
then mv -f ".deps/edir_ldapext.Tpo" ".deps/edir_ldapext.Po"; else rm -f 
".deps/edir_ldapext.Tpo"; exit 1; fi
/bin/sh ../../../libtool --tag=CXX --mode=link g++ -I/usr/include/libxml2 -Wall 
-Wpointer-arith -Wwrite-strings -Wcomments -Werror -pipe -D_REENTRANT 
-D_FILE_OFFSET_BITS=64 -g -O2  -g -o digest_edirectory_auth  digest_pw_auth.o 
ldap_backend.o edir_ldapext.o ../../../lib/libmiscencoding.la -L../../../compat 
-lcompat-squid  -lldap -llber -lcrypt -lssl -lcrypto -ldl -lm -lnsl -lresolv 
-lrt -ldl -ldl 
libtool: link: g++ -I/usr/include/libxml2 -Wall -Wpointer-arith -Wwrite-strings 
-Wcomments -Werror -pipe -D_REENTRANT -D_FILE_OFFSET_BITS=64 -g -O2 -g -o 
digest_edirectory_auth digest_pw_auth.o ldap_backend.o edir_ldapext.o  
../../../lib/.libs/libmiscencoding.a 
-L
 -lcompat-squid -lldap -llber -lcrypt -lssl -lcrypto -lm -lnsl -lresolv -lrt 
-ldl
make[4]: Leaving directory 
`
Making all in file
make[4]: Entering directory 
`
if g++ -DHAVE_CONFIG_H  -I../../../.. -I../../../../include -I../../../../lib 
-I../../../../src -I../../../include-I../../../../helpers/digest_auth/file  
-I/usr/include/libxml2-I/usr/include/libxml2 -Wall -Wpointer-arith 
-Wwrite-strings -Wcomments -Werror -pipe -D_REENTRANT -D_FILE_OFFSET_BITS=64 -g 
-O2 -MT digest_file_auth.o -MD -MP -MF ".deps/digest_file_auth.Tpo" -c -o 
digest_file_auth.o ../../../../helpers/digest_auth/file/digest_file_auth.cc; \
then mv -f ".deps/digest_file_auth.Tpo" ".deps/digest_file_auth.Po"; 
else rm -f ".deps/digest_file_auth.Tpo"; exit 1; fi
if g++ -DHAVE_CONFIG_H  -I../../../.. -I../../../../include -I../../../../lib 
-I../../../../src -I../../../include-I../../../../helpers/digest_auth/file  
-I/usr/include/libxml2-I/usr/include/libxml2 -Wall -Wpointer-arith 
-Wwrite-strings -Wcomments -Werror -pipe -D_REENTRANT -D_FILE_OFFSET_BITS=64 -g 
-O2 -MT text_backend.o -MD -MP -MF ".deps/text_backend.Tpo" -c -o 
text_backend.o ../../../../helpers/digest_auth/file/text_backend.cc; \
then mv -f ".deps/text_backend.Tpo" ".deps/tex

Re: [PATCH] Bug 3349: Bad support for adaptation service URIs with '='

2011-10-06 Thread Amos Jeffries

On 06/10/11 22:14, Tsantilas Christos wrote:

On 10/06/2011 12:51 AM, Amos Jeffries wrote:

On Wed, 05 Oct 2011 11:32:39 +0300, Tsantilas Christos wrote:

Currently using URIs which include "=" is not supported by
ecap_service/icap_service configuration parameters. Also the squid.conf
documentation about ecap_service is outdated.

This patch
- Fixes the [e|i]cap_service line parser to allow URIs with "="
- Changes the [e|i]cap_service configuration parameter to use the
following syntax:
ecap_service id vectoring_point uri [name=value ...]
- Check for duplicated options
- Fixes the related documentation
- Also the older [e|i]cap_service syntax forms are supported:
ecap_service id vectoring_point [1|0] uri
ecap_service id vectoring_point [name=value ...] uri
- The "uri" options is not documented but supported.

This is a Measurement Factory project.




+1 Okay. looks fine now.

Amos
--
Please be using
  Current Stable Squid 2.7.STABLE9 or 3.1.15
  Beta testers wanted for 3.2.0.12


Re: [PATCH] Bug 3349: Bad support for adaptation service URIs with '='

2011-10-06 Thread Tsantilas Christos

On 10/06/2011 12:51 AM, Amos Jeffries wrote:

On Wed, 05 Oct 2011 11:32:39 +0300, Tsantilas Christos wrote:

Currently using URIs which include "=" is not supported by
ecap_service/icap_service configuration parameters. Also the squid.conf
documentation about ecap_service is outdated.

This patch
- Fixes the [e|i]cap_service line parser to allow URIs with "="
- Changes the [e|i]cap_service configuration parameter to use the
following syntax:
ecap_service id vectoring_point uri [name=value ...]
- Check for duplicated options
- Fixes the related documentation
- Also the older [e|i]cap_service syntax forms are supported:
ecap_service id vectoring_point [1|0] uri
ecap_service id vectoring_point [name=value ...] uri
- The "uri" options is not documented but supported.

This is a Measurement Factory project.



Since you alter the way the 0/1 backward compatibility is parsed please
take the opportunity to add a debugs() "UPGRADE: " message at level
"(opt_parse_cfg_only?0:1)" to the cases. Stating that option format has
changed and what it should be upgraded to.


Done



In the duplicate warning message:
- s/Douplicate/Duplicate/
- s/ataptation/adaptation/

done



Amos




Bug 3349: Bad support for adaptation service URIs with '=' 

Currently using URIs  which  include "=" is not supported by 
ecap_service/icap_service configuration parameters. Also the squid.conf
documentation about ecap_service is outdated.

This patch 
- Fixes the [e|i]cap_service line parser to allow URIs with "="
- Changes the [e|i]cap_service configuration parameter to use the following syntax:
   ecap_service id vectoring_point uri [name=value ...]
- Check for duplicated options
- Fixes the related documentation 
- Also the older [e|i]cap_service syntax forms are supported:
   ecap_service id vectoring_point [1|0] uri 
   ecap_service id vectoring_point [name=value ...] uri 
- The "uri" options is not documented but supported.

This is a Measurement Factory project.

=== modified file 'src/adaptation/ServiceConfig.cc'
--- src/adaptation/ServiceConfig.cc	2011-05-24 22:26:21 +
+++ src/adaptation/ServiceConfig.cc	2011-10-06 09:10:48 +
@@ -1,28 +1,29 @@
 /*
  * DEBUG: section 93Adaptation
  */
 
 #include "squid.h"
 #include "ConfigParser.h"
 #include "adaptation/ServiceConfig.h"
 #include "ip/tools.h"
+#include 
 
 Adaptation::ServiceConfig::ServiceConfig():
 port(-1), method(methodNone), point(pointNone),
 bypass(false), maxConn(-1), onOverload(srvWait),
 routing(false), ipv6(false)
 {}
 
 const char *
 Adaptation::ServiceConfig::methodStr() const
 {
 return Adaptation::methodStr(method);
 }
 
 const char *
 Adaptation::ServiceConfig::vectPointStr() const
 {
 return Adaptation::vectPointStr(point);
 }
 
 Adaptation::Method
@@ -52,98 +53,106 @@
 if (!strcasecmp(t, "postcache"))
 return Adaptation::pointPostCache;
 
 return Adaptation::pointNone;
 }
 
 bool
 Adaptation::ServiceConfig::parse()
 {
 String method_point;
 
 ConfigParser::ParseString(&key);
 ConfigParser::ParseString(&method_point);
 method = parseMethod(method_point.termedBuf());
 point = parseVectPoint(method_point.termedBuf());
 
 // reset optional parameters in case we are reconfiguring
 bypass = routing = false;
 
 // handle optional service name=value parameters
-const char *lastOption = NULL;
 bool grokkedUri = false;
 bool onOverloadSet = false;
+std::set options;
+
 while (char *option = strtok(NULL, w_space)) {
+const char *name = option;
+const char *value = "";
 if (strcmp(option, "0") == 0) { // backward compatibility
-bypass = false;
-continue;
-}
-if (strcmp(option, "1") == 0) { // backward compatibility
-bypass = true;
-continue;
+name = "bypass";
+value = "off";
+debugs(3, opt_parse_cfg_only?0:1, "UPGRADE: Please use 'bypass=off' option to disable service bypass");
+}  else if (strcmp(option, "1") == 0) { // backward compatibility
+name = "bypass";
+value = "on";
+debugs(3, opt_parse_cfg_only?0:1, "UPGRADE: Please use 'bypass=on' option to enable service bypass");
+} else {
+char *eq = strstr(option, "=");
+const char *sffx = strstr(option, "://");
+if (!eq || (sffx && sffx < eq)) { //no "=" or has the form "icap://host?arg=val"
+name = "uri";
+value = option;
+}  else { // a normal name=value option
+*eq = '\0'; // terminate option name
+value = eq + 1; // skip '='
+}
 }
 
-const char *name = option;
-char *value = strstr(option, "=");
-if (!value) {
-lastOption = option;
-break;
+// Check if option is set twice
+if (options.find(name) != options.end()) {
+debugs(3, DBG_