On Monday 25 August 2008 14:46, nextgens at freenetproject.org wrote:
> Author: nextgens
> Date: 2008-08-25 13:46:46 +0000 (Mon, 25 Aug 2008)
> New Revision: 22133
> 
> Modified:
>    trunk/freenet/src/freenet/node/NodeClientCore.java
>    trunk/freenet/src/freenet/support/io/ArrayBucket.java
>    trunk/freenet/src/freenet/support/io/ArrayBucketFactory.java
>    
trunk/freenet/src/freenet/support/io/PaddedEphemerallyEncryptedBucket.java
>    trunk/freenet/src/freenet/support/io/TempBucketFactory.java
>    trunk/freenet/test/freenet/clients/http/filter/ContentFilterTest.java
>    trunk/freenet/test/freenet/support/compress/GzipCompressorTest.java
> Log:
> TempBucketFactory: it is *very* unlikely that it won't break things! If it 
doesn't it should speed things up significantly and solve the OOM problems a 
few users have been reporting.
> 
> Why isn't PaddedEphemerallyEncryptedBucket not properly synchronized?
> 
trunk/freenet/src/freenet/support/io/PaddedEphemerallyEncryptedBucket.java
> ===================================================================
> --- 
trunk/freenet/src/freenet/support/io/PaddedEphemerallyEncryptedBucket.java      
2008-08-25 01:29:48 UTC (rev 22132)
> +++ 
trunk/freenet/src/freenet/support/io/PaddedEphemerallyEncryptedBucket.java      
2008-08-25 13:46:46 UTC (rev 22133)
> @@ -151,6 +151,7 @@
>                       }
>               }
>               
> +             @Override
>               public void write(byte[] buf, int offset, int length) throws 
> IOException 
{
>                       if(closed) throw new IOException("Already closed!");
>                       if(streamNumber != lastOutputStream)
> @@ -165,14 +166,7 @@
>                       }
>               }
>               
> -             // Override this or FOS will use write(int)
> -             public void write(byte[] buf) throws IOException {
> -                     if(closed) throw new IOException("Already closed!");
> -                     if(streamNumber != lastOutputStream)
> -                             throw new IllegalStateException("Writing to old 
> stream in "+getName());
> -                     write(buf, 0, buf.length);
> -             }
> -             

Please don't delete this method. It is necessary for acceptable performance. 
The default implementation will call write() repeatedly on single bytes, 
*not* write(buf, 0, length).

> +             @Override
>               public void close() throws IOException {
>                       if(closed) return;
>                       try {
> @@ -225,11 +219,13 @@
>                       return pcfb.decipher(x);
>               }
>               
> +             @Override
>               public final int available() {
>                       int x = (int)Math.min(dataLength - ptr, 
> Integer.MAX_VALUE);
>                       return (x < 0) ? 0 : x;
>               }
>               
> +             @Override
>               public int read(byte[] buf, int offset, int length) throws 
> IOException {
>                       // FIXME remove debugging
>                       if((length+offset > buf.length) || (offset < 0) || 
> (length < 0))
> @@ -244,10 +240,12 @@
>                       return readBytes;
>               }
>  
> +             @Override
>               public int read(byte[] buf) throws IOException {
>                       return read(buf, 0, buf.length);
>               }
>               
> +             @Override
>               public long skip(long bytes) throws IOException {
>                       byte[] buf = new byte[(int)Math.min(4096, bytes)];
>                       long skipped = 0;
> @@ -259,6 +257,7 @@
>                       return skipped;
>               }
>               
> +             @Override
>               public void close() throws IOException {
>                       in.close();
>               }
> @@ -308,6 +307,7 @@
>               return "Encrypted:"+bucket.getName();
>       }
>  
> +     @Override
>       public String toString() {
>               return super.toString()+ ':' +bucket.toString();
>       }
> 
> Modified: trunk/freenet/src/freenet/support/io/TempBucketFactory.java
> ===================================================================
> --- trunk/freenet/src/freenet/support/io/TempBucketFactory.java       
> 2008-08-25 
01:29:48 UTC (rev 22132)
> +++ trunk/freenet/src/freenet/support/io/TempBucketFactory.java       
> 2008-08-25 
13:46:46 UTC (rev 22133)
> @@ -4,6 +4,10 @@
>  package freenet.support.io;
>  
>  import freenet.crypt.RandomSource;
> +import freenet.support.Executor;
> +import freenet.support.Logger;
> +import freenet.support.SizeUtil;
> +import freenet.support.TimeUtil;
>  import java.io.IOException;
>  
>  import freenet.support.api.Bucket;
> @@ -11,101 +15,259 @@
>  
>  import java.io.InputStream;
>  import java.io.OutputStream;
> +import java.util.LinkedList;
> +import java.util.Queue;
>  import java.util.Random;
> +import java.util.concurrent.LinkedBlockingDeque;
>  
>  /**
>   * Temporary Bucket Factory
> + * 
> + * Buckets created by this factory can be either:
> + *   - ArrayBuckets
> + * OR
> + *   - FileBuckets
> + * 
> + * ArrayBuckets are used if and only if:
> + *   1) there is enough room remaining on the pool (@see maxRamUsed and @see 
bytesInUse)
> + *   2) the initial size is smaller than (@maxRAMBucketSize)
> + * 
> + * Depending on how they are used they might switch from one type to 
another transparently.
> + * 
> + * Currently they are two factors considered for a migration:
> + *   - if they are long-lived or not (@see RAMBUCKET_MAX_AGE)
> + *   - if their size is over RAMBUCKET_CONVERSION_FACTOR*maxRAMBucketSize
>   */
>  public class TempBucketFactory implements BucketFactory {
> +     public final static long defaultIncrement = 4096;
> +     public final static float DEFAULT_FACTOR = 1.25F;
> +     
> +     private final FilenameGenerator filenameGenerator;
> +     private long bytesInUse = 0;
> +     private final RandomSource strongPRNG;
> +     private final Random weakPRNG;
> +     private final Executor executor;
> +     private volatile boolean logMINOR;
> +     private volatile boolean reallyEncrypt;
> +     
> +     /** How big can the defaultSize be for us to consider using RAMBuckets? 
> */
> +     private long maxRAMBucketSize;
> +     /** How much memory do we dedicate to the RAMBucketPool? (in bytes) */
> +     private long maxRamUsed;
> +     
> +     /** How old is a long-lived RAMBucket? */
> +     private final int RAMBUCKET_MAX_AGE = 5*60*1000; // 5mins
> +     /** How many times the maxRAMBucketSize can a RAMBucket be before it 
> gets 
migrated? */
> +     private final int RAMBUCKET_CONVERSION_FACTOR = 4;

I'm not sure what the point of this is - why not just use maxRAMBucketSize ??
> +     
>       public class TempBucket implements Bucket {
>               private Bucket currentBucket;
> +             private long currentSize;
> +             private volatile boolean shouldResetOS = false;
> +             private volatile boolean shouldResetIS = false;
> +             public final long creationTime;
>               
> -             public TempBucket(Bucket cur) {
> +             public TempBucket(long now, Bucket cur) {
> +                     if(cur == null)
> +                             throw new NullPointerException();
>                       this.currentBucket = cur;
> +                     this.creationTime = now;
>               }
>               
> -             public final void migrateToFileBucket() throws IOException {
> -                     RAMBucket ramBucket = null;
> -                     synchronized(this) {
> +             /** A blocking method to force-migrate from a RAMBucket to a 
> FileBucket 
*/
> +             private final void migrateToFileBucket() throws IOException {
> +                     Bucket toMigrate = null;
> +                     synchronized(currentBucket) {

You shouldn't synchronize on it if it is going to change. Can't you use 
synchronized(this)?

>                               if(!isRAMBucket())
> +                                     // Nothing to migrate! We don't want to 
> switch back to ram, do we?                                      
>                                       return;
>  
> -                             ramBucket = (RAMBucket) currentBucket;
> -                             TempFileBucket tempFB = new 
TempFileBucket(filenameGenerator.makeRandomFilename(), filenameGenerator);
> +                             toMigrate = currentBucket;
> +                             Bucket tempFB = _makeFileBucket();
>                               BucketTools.copy(currentBucket, tempFB);

If an OutputStream is open, you need to close it, open one to the new bucket, 
copy the old bucket to the new stream, and *keep the new stream because 
buckets don't support appending*. Discussed on IRC.

>                               currentBucket = tempFB;
> +                             // We need streams to be reset to point to the 
> new bucket
> +                             shouldResetOS = true;
> +                             shouldResetIS = true;
>                       }
> -                     ramBucket.free();
> +                     if(logMINOR)
> +                             Logger.minor(this, "We have migrated 
> "+toMigrate.hashCode());
> +                     
> +                     // Might have changed already so we can't rely on 
> currentSize!
> +                     _hasFreed(toMigrate.size());
> +                     // We can free it on-thread as it's a rambucket
> +                     toMigrate.free();

We should update the counter *after* freeing the RAM bucket.
>               }
>               
> -             public final synchronized boolean isRAMBucket() {
> -                     return (currentBucket instanceof RAMBucket);
> +             public final boolean isRAMBucket() {
> +                     synchronized(currentBucket) {
> +                             return (currentBucket instanceof ArrayBucket);
> +                     }
>               }
>  
> -             public synchronized OutputStream getOutputStream() throws 
> IOException {
> -                     return currentBucket.getOutputStream();
> +             public OutputStream getOutputStream() throws IOException {
> +                     synchronized(currentBucket) {
> +                             shouldResetOS = true;
> +                             return new TempBucketOutputStream();
> +                     }
>               }
>  
> -             public synchronized InputStream getInputStream() throws 
> IOException {
> -                     return currentBucket.getInputStream();
> +             private class TempBucketOutputStream extends OutputStream {
> +                     private OutputStream os;
> +                     
> +                     private void _maybeMigrateRamBucket(long futureSize) 
> throws IOException 
{
> +                             if(isRAMBucket()) {
> +                                     boolean shouldMigrate = false;
> +                                     boolean isOversized = false;
> +                                     
> +                                     if(futureSize > maxRAMBucketSize * 
> RAMBUCKET_CONVERSION_FACTOR) {
> +                                             isOversized = true;
> +                                             shouldMigrate = true;
> +                                     } else if (futureSize + currentSize > 
> maxRamUsed)

What is futureSize + currentSize supposed to mean? Don't you mean bytesInUse?

> +                                             shouldMigrate = true;
> +                                     
> +                                     if(shouldMigrate) {
> +                                             os.close();
> +                                             if(logMINOR) {
> +                                                     if(isOversized)
> +                                                             
> Logger.minor(this, "The bucket is 
over "+SizeUtil.formatSize(maxRAMBucketSize*RAMBUCKET_CONVERSION_FACTOR)+": 
we will force-migrate it to disk.");
> +                                                     else
> +                                                             
> Logger.minor(this, "The bucketpool is full: force-migrate before we 
go over the limit");
> +                                             }
> +                                             migrateToFileBucket();
> +                                     }
> +                             }
> +                     }
> +                     
> +                     private void _maybeResetOutputStream() throws 
> IOException {
> +                             if(shouldResetOS) {
> +                                     Closer.close(os);
> +                                     os = currentBucket.getOutputStream();
> +                                     shouldResetOS = false;
> +                             }
> +                     }
> +                     
> +                     @Override
> +                     public void write(int b) throws IOException {
> +                             synchronized(currentBucket) {
> +                                     long futurSize = currentSize + 1;
> +                                     _maybeMigrateRamBucket(futurSize);
> +                                     _maybeResetOutputStream();

If you migrate, reset the output stream, you *must* seek to where you were 
writing before. Nextgens says this has been fixed in a later commit.

> +                                     os.write(b);
> +                                     currentSize = futurSize;
> +                                     if(isRAMBucket()) // We need to 
> re-check because it might have 
changed!
> +                                             _hasTaken(1);
> +                             }
> +                     }
> +                     
> +                     @Override
> +                     public void write(byte b[], int off, int len) throws 
> IOException {
> +                             synchronized(currentBucket) {
> +                                     long futurSize = currentSize + len;
> +                                     _maybeMigrateRamBucket(futurSize);
> +                                     _maybeResetOutputStream();
> +                                     os.write(b, off, len);
> +                                     currentSize = futurSize;
> +                                     if(isRAMBucket()) // We need to 
> re-check because it might have 
changed!
> +                                             _hasTaken(len);
> +                             }
> +                     }
> +                     
> +                     @Override
> +                     public void flush() throws IOException {
> +                             synchronized(currentBucket) {
> +                                     _maybeMigrateRamBucket(currentSize);
> +                                     _maybeResetOutputStream();
> +                                     os.flush();
> +                             }
> +                     }

No point flushing *after* you've reset the OS, is there? Migrating will 
close() and therefore flush. Of course if you don't, you do want to call 
flush() here.
> +                     
> +                     @Override
> +                     public void close() throws IOException {
> +                             synchronized(currentBucket) {
> +                                     _maybeMigrateRamBucket(currentSize);
> +                                     _maybeResetOutputStream();
> +                                     os.close();
> +                             }
> +                     }
>               }
>  
> -             public synchronized String getName() {
> -                     return currentBucket.getName();
> +             public synchronized InputStream getInputStream() throws 
> IOException {
> +                     shouldResetIS = true;
> +                     return new TempBucketInputStream();
>               }
> +             
> +             private class TempBucketInputStream extends InputStream {
> +                     private InputStream is;
> +                     
> +                     private void _maybeResetInputStream() throws 
> IOException {
> +                             if(shouldResetIS) {
> +                                     Closer.close(is);
> +                                     is = currentBucket.getInputStream();

You should seek here. You fixed it in 22136.

> +                                     shouldResetIS = false;
> +                             }
> +                     }

If the implementation changes, you need to seek to where the stream was before 
it changed. If the implementation stays the same and a new output stream is 
opened, the input stream should throw. The easiest way to do this is to 
increment a counter on every getOutputStream(), and have each input and 
output stream know the counter at the time of its creation, and throw if it 
changes in read/write.
> +                     
> +                     @Override
> +                     public int read() throws IOException {
> +                             synchronized(currentBucket) {
> +                                     _maybeResetInputStream();
> +                                     return is.read();
> +                             }
> +                     }

How come no bulk read? Always reading 1 byte at a time will be really slow.
> +                     
> +                     @Override
> +                     public void close() throws IOException {
> +                             synchronized(currentBucket) {
> +                                     _maybeResetInputStream();
> +                                     is.close();
> +                             }
> +                     }
> +             }
>  
> -             public synchronized long size() {
> -                     return currentBucket.size();
> +             public String getName() {
> +                     synchronized(currentBucket) {
> +                             return currentBucket.getName();
> +                     }
>               }
>  
> -             public synchronized boolean isReadOnly() {
> -                     return currentBucket.isReadOnly();
> +             public long size() {
> +                     synchronized(currentBucket) {
> +                             return currentBucket.size();
> +                     }
>               }
>  
> -             public synchronized void setReadOnly() {
> -                     currentBucket.setReadOnly();
> +             public boolean isReadOnly() {
> +                     synchronized(currentBucket) {
> +                             return currentBucket.isReadOnly();
> +                     }
>               }
>  
> -             public synchronized void free() {
> -                     currentBucket.free();
> +             public void setReadOnly() {
> +                     synchronized(currentBucket) {
> +                             currentBucket.setReadOnly();
> +                     }
>               }
> -     }
>  
> -     private class RAMBucket extends ArrayBucket {
> -             public RAMBucket(long size) {
> -                     super("RAMBucket", size);
> -                     _hasTaken(size);
> -             }
> -             
> -             @Override
>               public void free() {
> -                     super.free();
> -                     _hasFreed(size());
> +                     synchronized(currentBucket) {
> +                             if(isRAMBucket())
> +                                     _hasFreed(currentSize);
> +                             currentBucket.free();
> +                     }
>               }
>       }
>       
> -     private final FilenameGenerator filenameGenerator;
> -     private long bytesInUse = 0;
> -     
> -     public final static long defaultIncrement = 4096;
> -     
> -     public final static float DEFAULT_FACTOR = 1.25F;
> -     
> -     public long maxRAMBucketSize;
> -     public long maxRamUsed;
> -
> -     private final RandomSource strongPRNG;
> -     private final Random weakPRNG;
> -     private volatile boolean reallyEncrypt;
> -
>       // Storage accounting disabled by default.
> -     public TempBucketFactory(FilenameGenerator filenameGenerator, long 
maxBucketSizeKeptInRam, long maxRamUsed, RandomSource strongPRNG, Random 
weakPRNG, boolean reallyEncrypt) {
> +     public TempBucketFactory(Executor executor, FilenameGenerator 
filenameGenerator, long maxBucketSizeKeptInRam, long maxRamUsed, RandomSource 
strongPRNG, Random weakPRNG, boolean reallyEncrypt) {
>               this.filenameGenerator = filenameGenerator;
>               this.maxRamUsed = maxRamUsed;
>               this.maxRAMBucketSize = maxBucketSizeKeptInRam;
>               this.strongPRNG = strongPRNG;
>               this.weakPRNG = weakPRNG;
>               this.reallyEncrypt = reallyEncrypt;
> +             this.executor = executor;
> +             this.logMINOR = Logger.shouldLog(Logger.MINOR, this);
>       }
>  
>       public Bucket makeBucket(long size) throws IOException {
> @@ -129,22 +291,27 @@
>       }
>       
>       public synchronized void setMaxRamUsed(long size) {
> +             logMINOR = Logger.shouldLog(Logger.MINOR, this);
>               maxRamUsed = size;
>       }
>       
>       public synchronized long getMaxRamUsed() {
> +             logMINOR = Logger.shouldLog(Logger.MINOR, this);
>               return maxRamUsed;
>       }
>       
>       public synchronized void setMaxRAMBucketSize(long size) {
> +             logMINOR = Logger.shouldLog(Logger.MINOR, this);
>               maxRAMBucketSize = size;
>       }
>       
>       public synchronized long getMaxRAMBucketSize() {
> +             logMINOR = Logger.shouldLog(Logger.MINOR, this);
>               return maxRAMBucketSize;
>       }
>       
>       public void setEncryption(boolean value) {
> +             logMINOR = Logger.shouldLog(Logger.MINOR, this);
>               reallyEncrypt = value;
>       }
>       
> @@ -167,7 +334,10 @@
>       public TempBucket makeBucket(long size, float factor, long increment) 
throws IOException {
>               Bucket realBucket = null;
>               boolean useRAMBucket = false;
> +             long now = System.currentTimeMillis();
>               
> +             // We need to clean the queue in order to have "space" to host 
> new 
buckets
> +             cleanBucketQueue(now);
>               synchronized(this) {
>                       if((size > 0) && (size <= maxRAMBucketSize) && 
> (bytesInUse <= 
maxRamUsed)) {
>                               useRAMBucket = true;
> @@ -175,10 +345,59 @@
>               }
>               
>               // Do we want a RAMBucket or a FileBucket?
> -             realBucket = (useRAMBucket ? new RAMBucket(size) : new 
TempFileBucket(filenameGenerator.makeRandomFilename(), filenameGenerator));
> -             // Do we want it to be encrypted?
> -             realBucket = (!reallyEncrypt ? realBucket : new 
PaddedEphemerallyEncryptedBucket(realBucket, 1024, strongPRNG, weakPRNG));
> +             realBucket = (useRAMBucket ? new ArrayBucket() : 
> _makeFileBucket());
>               
> -             return new TempBucket(realBucket);
> +             TempBucket toReturn = new TempBucket(now, realBucket);
> +             if(useRAMBucket) { // No need to consider them for migration if 
> they 
can't be migrated
> +                     synchronized(ramBucketQueue) {
> +                             ramBucketQueue.add(toReturn);
> +                     }
> +             }
> +             return toReturn;
> +}
> +     
> +     /** Migrate all long-lived buckets from the queue */
> +     private void cleanBucketQueue(long now) {
> +             boolean shouldContinue = true;
> +             // create a new list to avoid race-conditions
> +             final Queue<TempBucket> toMigrate = new 
> LinkedList<TempBucket>();
> +             do {
> +                     synchronized(ramBucketQueue) {
> +                             final TempBucket tmpBucket = 
> ramBucketQueue.peek();
> +                             if((tmpBucket == null) || 
> (tmpBucket.creationTime + RAMBUCKET_MAX_AGE > 
now))
> +                                     shouldContinue = false;
> +                             else {
> +                                     if(logMINOR)
> +                                             Logger.minor(this, "The bucket 
> is "+TimeUtil.formatTime(now - 
tmpBucket.creationTime)+" old: we will force-migrate it to disk.");
> +                                     ramBucketQueue.remove(tmpBucket);
> +                                     toMigrate.add(tmpBucket);
> +                             }
> +                     }
> +             } while(shouldContinue);
> +
> +             if(toMigrate.size() > 0) {
> +                     executor.execute(new Runnable() {
> +
> +                             public void run() {
> +                                     if(logMINOR)
> +                                             Logger.minor(this, "We are 
> going to migrate " + toMigrate.size() + " 
RAMBuckets");
> +                                     for(TempBucket tmpBucket : toMigrate) {
> +                                             try {
> +                                                     
> tmpBucket.migrateToFileBucket();
> +                                             } catch(IOException e) {
> +                                                     Logger.error(tmpBucket, 
> "An IOE occured while migrating long-lived 
buckets:" + e.getMessage(), e);
> +                                             }
> +                                     }
> +                             }
> +                     }, "RAMBucket migrator ("+now+')');
> +             }
>       }

This is always offline; should it be done inline if we immediately need the 
space for a new bucket? Making such a bucket would require disk access in any 
case... Also, if it is happening inline, you can migrate the oldest bucket 
even if it's not that old, subject to some reasonable minimum age (if the 
oldest bucket is less than 1 minute old, don't migrate it, just create a 
disk-based bucket).
> +     
> +     private final Queue<TempBucket> ramBucketQueue = new 
LinkedBlockingDeque<TempBucket>();
> +     
> +     private Bucket _makeFileBucket() {
> +             Bucket fileBucket = new 
TempFileBucket(filenameGenerator.makeRandomFilename(), filenameGenerator);
> +             // Do we want it to be encrypted?
> +             return (reallyEncrypt ? new 
> PaddedEphemerallyEncryptedBucket(fileBucket, 
1024, strongPRNG, weakPRNG) : fileBucket);
> +     }
>  }
> 
> Modified: 
trunk/freenet/test/freenet/clients/http/filter/ContentFilterTest.java
> ===================================================================
> --- trunk/freenet/test/freenet/clients/http/filter/ContentFilterTest.java     
2008-08-25 01:29:48 UTC (rev 22132)
> +++ trunk/freenet/test/freenet/clients/http/filter/ContentFilterTest.java     
2008-08-25 13:46:46 UTC (rev 22133)
> @@ -64,6 +64,6 @@
>               URI baseURI = new URI(BASE_URI);
>               byte[] dataToFilter = data.getBytes("UTF-8");
>               
> -             return ContentFilter.filter(new ArrayBucket(dataToFilter, -1), 
> bf, 
typeName, baseURI, null).data.toString();
> +             return ContentFilter.filter(new ArrayBucket(dataToFilter), bf, 
> typeName, 
baseURI, null).data.toString();
>       }
>  }
> 
> Modified: 
trunk/freenet/test/freenet/support/compress/GzipCompressorTest.java
> ===================================================================
> --- trunk/freenet/test/freenet/support/compress/GzipCompressorTest.java       
2008-08-25 01:29:48 UTC (rev 22132)
> +++ trunk/freenet/test/freenet/support/compress/GzipCompressorTest.java       
2008-08-25 13:46:46 UTC (rev 22133)
> @@ -111,7 +111,7 @@
>       public void testCompressException() {
>               
>               byte[] uncompressedData = UNCOMPRESSED_DATA_1.getBytes();
> -             Bucket inBucket = new ArrayBucket(uncompressedData, 
uncompressedData.length);
> +             Bucket inBucket = new ArrayBucket(uncompressedData);
>               BucketFactory factory = new ArrayBucketFactory();
>  
>               try {
> @@ -133,7 +133,7 @@
>               
>               byte[] compressedData = doCompress(uncompressedData);
>               
> -             Bucket inBucket = new ArrayBucket(compressedData, 
uncompressedData.length);
> +             Bucket inBucket = new ArrayBucket(compressedData);
>               BucketFactory factory = new ArrayBucketFactory();
>  
>               try {
> @@ -147,7 +147,7 @@
>       
>       private byte[] doBucketDecompress(byte[] compressedData) {
>  
> -             Bucket inBucket = new ArrayBucket(compressedData, 
> compressedData.length);
> +             Bucket inBucket = new ArrayBucket(compressedData);
>               BucketFactory factory = new ArrayBucketFactory();
>               Bucket outBucket = null;
>  
> @@ -179,7 +179,7 @@
>       }
>  
>       private byte[] doCompress(byte[] uncompressedData) {
> -             Bucket inBucket = new ArrayBucket(uncompressedData, 
uncompressedData.length);
> +             Bucket inBucket = new ArrayBucket(uncompressedData);
>               BucketFactory factory = new ArrayBucketFactory();
>               Bucket outBucket = null;
>  
> 
> _______________________________________________
> cvs mailing list
> cvs at freenetproject.org
> http://emu.freenetproject.org/cgi-bin/mailman/listinfo/cvs
> 
> 
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 189 bytes
Desc: not available
URL: 
<https://emu.freenetproject.org/pipermail/devl/attachments/20080825/235cf171/attachment.pgp>

Reply via email to