On Tuesday 21 October 2008 16:24, nextgens at freenetproject.org wrote:
> Author: nextgens
> Date: 2008-10-21 15:24:47 +0000 (Tue, 21 Oct 2008)
> New Revision: 23014
> 
> Modified:
>    trunk/freenet/src/freenet/client/ArchiveManager.java
>    trunk/freenet/src/freenet/client/ArchiveStoreContext.java
>    trunk/freenet/src/freenet/client/ClientMetadata.java
>    trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
>    trunk/freenet/src/freenet/client/Metadata.java
>    trunk/freenet/src/freenet/client/async/ClientPutter.java
>    trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
>    trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
>    trunk/freenet/src/freenet/client/async/SingleFileInserter.java
>    trunk/freenet/src/freenet/client/async/SplitFileInserter.java
>    trunk/freenet/src/freenet/clients/http/WelcomeToadlet.java
>    trunk/freenet/src/freenet/frost/message/FrostMessage.java
>    trunk/freenet/src/freenet/node/NodeARKInserter.java
>    trunk/freenet/src/freenet/node/TextModeClientInterface.java
>    trunk/freenet/src/freenet/node/fcp/ClientPut.java
>    trunk/freenet/src/freenet/node/fcp/DirPutFile.java
>    trunk/freenet/src/freenet/node/simulator/BootstrapPushPullTest.java
> Log:
> more work on bug #71: *** IT NEEDS TESTING! ***
> It's still not backward compatible with stable but should be 
forward-compatible ;)

...

All good to this point.
> 
> Modified: trunk/freenet/src/freenet/client/ClientMetadata.java
> ===================================================================
> --- trunk/freenet/src/freenet/client/ClientMetadata.java      2008-10-21 
> 14:22:02 
UTC (rev 23013)
> +++ trunk/freenet/src/freenet/client/ClientMetadata.java      2008-10-21 
> 15:24:47 
UTC (rev 23014)
> @@ -3,6 +3,8 @@
>   * http://www.gnu.org/ for further details of the GPL. */
>  package freenet.client;
>  
> +import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
> +
>  /**
>   * Stores the metadata that the client might actually be interested in.
>   */
> @@ -10,14 +12,16 @@
>       
>       /** The document MIME type */
>       private String mimeType;
> +     private COMPRESSOR_TYPE compressor;

This is BAD.

ClientMetadata simply contains the bits of the per-file metadata that are 
visible to clients. At the moment this is just its MIME type. In future 
clients might be able to add custom metadata. Compression type is a detail 
that should be kept within Metadata.
>  
> -     public ClientMetadata(String mime) {
> -             mimeType = (mime == null) ? null : mime.intern();
> +     public ClientMetadata(){
> +             mimeType = null;
> +             compressor = null;
>       }
>  
> -     /** Create an empty ClientMetadata instance */
> -     public ClientMetadata() {
> -             mimeType = null;
> +     public ClientMetadata(String mime, COMPRESSOR_TYPE comp) {
> +             mimeType = (mime == null) ? null : mime.intern();
> +             compressor = comp;
>       }
>       
>       /** Get the document MIME type. Will always be a valid MIME type, 
> unless 
there
> @@ -68,4 +72,12 @@
>               }
>               return s;
>       }
> +     
> +     public COMPRESSOR_TYPE getCompressorType() {
> +             return compressor;
>  }
> +     
> +     public void setCompressorType(COMPRESSOR_TYPE compressor) {
> +             this.compressor = compressor;
> +     }
> +}
> 
> Modified: trunk/freenet/src/freenet/client/Metadata.java
> ===================================================================
> --- trunk/freenet/src/freenet/client/Metadata.java    2008-10-21 14:22:02 UTC 
(rev 23013)
> +++ trunk/freenet/src/freenet/client/Metadata.java    2008-10-21 15:24:47 UTC 
(rev 23014)
> @@ -74,12 +74,12 @@
>       /** Container archive type 
>        * @see ARCHIVE_TYPE
>        */
> -     short archiveType;
> +     ARCHIVE_TYPE archiveType;
>       
>       /** Compressed splitfile codec 
>        * @see COMPRESSOR_TYPE
>        */
> -     short compressionCodec = -1;
> +     COMPRESSOR_TYPE compressionCodec;
>       
>       /** The length of the splitfile */
>       long dataLength;
> @@ -197,8 +197,8 @@
>               
>               if(documentType == ARCHIVE_MANIFEST) {
>                       if(logMINOR) Logger.minor(this, "Archive manifest");
> -                     archiveType = dis.readShort();
> -                     if(!ARCHIVE_TYPE.isValidMetadataID(archiveType))
> +                     archiveType = 
> ARCHIVE_TYPE.getArchiveType(dis.readShort());
> +                     if(archiveType == null)
>                               throw new MetadataParseException("Unrecognized 
> archive 
type "+archiveType);
>               }
>  
> @@ -215,8 +215,8 @@
>               }
>               
>               if(compressed) {
> -                     compressionCodec = dis.readShort();
> -                     if(!COMPRESSOR_TYPE.isValidMetadataID(compressionCodec))
> +                     compressionCodec = 
COMPRESSOR_TYPE.getCompressorByMetadataID(dis.readShort());
> +                     if(compressionCodec == null)
>                               throw new MetadataParseException("Unrecognized 
> splitfile compression 
codec "+compressionCodec);
>                       
>                       decompressedLength = dis.readLong();
> @@ -267,7 +267,7 @@
>                       extraMetadata = false; // can't parse, can't write
>               }
>               
> -             clientMetadata = new ClientMetadata(mimeType);
> +             clientMetadata = new ClientMetadata(mimeType, compressionCodec);
>               
>               if((!splitfile) && ((documentType == SIMPLE_REDIRECT) || 
> (documentType == 
ARCHIVE_MANIFEST))) {
>                       simpleRedirectKey = readKey(dis);
> @@ -388,7 +388,7 @@
>                       if(o instanceof String) {
>                               // External redirect
>                               FreenetURI uri = new FreenetURI((String)o);
> -                             target = new Metadata(SIMPLE_REDIRECT, (short) 
> -1, uri, null);
> +                             target = new Metadata(SIMPLE_REDIRECT, null, 
> null, uri, null);
>                       } else if(o instanceof HashMap) {
>                               target = new Metadata();
>                               target.addRedirectionManifest((HashMap)o);
> @@ -461,7 +461,7 @@
>               documentType = SIMPLE_MANIFEST;
>               noMIME = true;
>               mimeType = null;
> -             clientMetadata = new ClientMetadata(null);
> +             clientMetadata = new ClientMetadata(null,null);
>               manifestEntries = new HashMap();
>               int count = 0;
>               for(Iterator i = dir.keySet().iterator();i.hasNext();) {
> @@ -471,7 +471,8 @@
>                       Metadata target;
>                       if(o instanceof String) {
>                               // Zip internal redirect
> -                             target = new 
> Metadata(ARCHIVE_INTERNAL_REDIRECT, (short)-1, prefix+key, 
new ClientMetadata(DefaultMIMETypes.guessMIMEType(key, false)));
> +                             target = new 
> Metadata(ARCHIVE_INTERNAL_REDIRECT, null, null, 
prefix+key,
> +                                     new 
> ClientMetadata(DefaultMIMETypes.guessMIMEType(key, false),null));
>                       } else if(o instanceof HashMap) {
>                               target = new Metadata((HashMap)o, 
> prefix+key+"/");
>                       } else throw new IllegalArgumentException("Not String 
> nor HashMap: "+o);
> @@ -486,12 +487,13 @@
>        * @param arg The argument; in the case of ZIP_INTERNAL_REDIRECT, the 
filename in
>        * the archive to read from.
>        */
> -     public Metadata(byte docType, short archiveType, String arg, 
ClientMetadata cm) {
> +     public Metadata(byte docType, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE 
compressionCodec, String arg, ClientMetadata cm) {
>               if(docType == ARCHIVE_INTERNAL_REDIRECT) {
>                       documentType = docType;
>                       this.archiveType = archiveType;
>                       // Determine MIME type
>                       this.clientMetadata = cm;
> +                     this.compressionCodec = compressionCodec;
>                       if(cm != null)
>                               this.setMIMEType(cm.getMIMEType());
>                       nameInArchive = arg;
> @@ -505,10 +507,11 @@
>        * @param uri The URI pointed to.
>        * @param cm The client metadata, if any.
>        */
> -     public Metadata(byte docType, short archiveType, FreenetURI uri, 
ClientMetadata cm) {
> +     public Metadata(byte docType, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE 
compressionCodec, FreenetURI uri, ClientMetadata cm) {
>               if((docType == SIMPLE_REDIRECT) || (docType == 
> ARCHIVE_MANIFEST)) {
>                       documentType = docType;
>                       this.archiveType = archiveType;
> +                     this.compressionCodec = compressionCodec;
>                       clientMetadata = cm;
>                       if((cm != null) && !cm.isTrivial()) {
>                               setMIMEType(cm.getMIMEType());
> @@ -524,11 +527,11 @@
>       }
>  
>       public Metadata(short algo, ClientCHK[] dataURIs, ClientCHK[] 
> checkURIs, 
int segmentSize, int checkSegmentSize, 
> -                     ClientMetadata cm, long dataLength, short 
> compressionAlgo, long 
decompressedLength, boolean isMetadata, boolean insertAsArchiveManifest, 
short archiveType) {
> +                     ClientMetadata cm, long dataLength, ARCHIVE_TYPE 
> archiveType, 
COMPRESSOR_TYPE compressionCodec, long decompressedLength, boolean 
isMetadata) {
>               if(isMetadata)
>                       documentType = MULTI_LEVEL_METADATA;
>               else {
> -                     if(insertAsArchiveManifest) {
> +                     if(archiveType != null) {
>                               documentType = ARCHIVE_MANIFEST;
>                               this.archiveType = archiveType;
>                       } else documentType = SIMPLE_REDIRECT;
> @@ -536,12 +539,13 @@
>               splitfile = true;
>               splitfileAlgorithm = algo;
>               this.dataLength = dataLength;
> -             this.compressionCodec = compressionAlgo;
> +             this.compressionCodec = compressionCodec;
>               splitfileBlocks = dataURIs.length;
>               splitfileCheckBlocks = checkURIs.length;
>               splitfileDataKeys = dataURIs;
>               splitfileCheckKeys = checkURIs;
>               clientMetadata = cm;
> +             this.compressionCodec = compressionCodec;
>               this.decompressedLength = decompressedLength;
>               if(cm != null)
>                       setMIMEType(cm.getMIMEType());
> @@ -728,7 +732,7 @@
>       }
>  
>       /** What kind of archive is it? */
> -     public short getArchiveType() {
> +     public ARCHIVE_TYPE getArchiveType() {
>               return archiveType;
>       }
>  
> @@ -755,20 +759,21 @@
>                       if(compressedMIME) flags |= FLAGS_COMPRESSED_MIME;
>                       if(extraMetadata) flags |= FLAGS_EXTRA_METADATA;
>                       if(fullKeys) flags |= FLAGS_FULL_KEYS;
> -                     if(compressionCodec >= 0) flags |= FLAGS_COMPRESSED;
> +                     if(compressionCodec != null) flags |= FLAGS_COMPRESSED;
>                       dos.writeShort(flags);
>               }
>               
>               if(documentType == ARCHIVE_MANIFEST) {
> -                     dos.writeShort(archiveType);
> +                     short code = archiveType.metadataID;
> +                     dos.writeShort(code);
>               }
>               
>               if(splitfile) {
>                       dos.writeLong(dataLength);
>               }
>               
> -             if(compressionCodec >= 0) {
> -                     dos.writeShort(compressionCodec);
> +             if(compressionCodec != null) {
> +                     dos.writeShort(compressionCodec.metadataID);
>                       dos.writeLong(decompressedLength);
>               }
>               
> @@ -827,7 +832,7 @@
>                                       if(data.length > Short.MAX_VALUE) {
>                                               FreenetURI uri = 
> meta.resolvedURI;
>                                               if(uri != null) {
> -                                                     meta = new 
> Metadata(SIMPLE_REDIRECT, (short)-1,  uri, null);
> +                                                     meta = new 
> Metadata(SIMPLE_REDIRECT, null, null, uri, null);
>                                                       data = 
> meta.writeToByteArray();
>                                               } else {
>                                                       kill = true;
> @@ -878,10 +883,10 @@
>       }
>  
>       public boolean isCompressed() {
> -             return compressionCodec >= 0;
> +             return compressionCodec != null;
>       }
>  
> -     public short getCompressionCodec() {
> +     public COMPRESSOR_TYPE getCompressionCodec() {
>               return compressionCodec;
>       }
>  
> @@ -915,7 +920,9 @@
>       }
>  
>       public void setArchiveManifest() {
> -             archiveType = 
ARCHIVE_TYPE.getArchiveType(clientMetadata.getMIMEType()).metadataID;
> +             ARCHIVE_TYPE type = 
ARCHIVE_TYPE.getArchiveType(clientMetadata.getMIMEType());
> +             archiveType = type;
> +             compressionCodec = clientMetadata.getCompressorType();
>               clientMetadata.clear();
>               documentType = ARCHIVE_MANIFEST;
>       }
> 
> Modified: trunk/freenet/src/freenet/client/async/ClientPutter.java
> ===================================================================
> --- trunk/freenet/src/freenet/client/async/ClientPutter.java  2008-10-21 
14:22:02 UTC (rev 23013)
> +++ trunk/freenet/src/freenet/client/async/ClientPutter.java  2008-10-21 
15:24:47 UTC (rev 23014)
> @@ -96,7 +96,7 @@
>                                       if(!binaryBlob)
>                                               currentState =
>                                                       new 
> SingleFileInserter(this, this, new InsertBlock(data, cm, 
targetURI), isMetadata, ctx, 
> -                                                                     false, 
> getCHKOnly, false, null, false, false, targetFilename, 
earlyEncode);
> +                                                                     false, 
> getCHKOnly, false, null, null, false, targetFilename, 
earlyEncode);
>                                       else
>                                               currentState =
>                                                       new 
> BinaryBlobInserter(data, this, null, false, priorityClass, ctx);
> 
> Modified: trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
> ===================================================================
> --- trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java  
2008-10-21 14:22:02 UTC (rev 23013)
> +++ trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java  
2008-10-21 15:24:47 UTC (rev 23014)
> @@ -25,7 +25,6 @@
>  import freenet.support.Logger;
>  import freenet.support.api.Bucket;
>  import freenet.support.io.BucketTools;
> -import org.apache.tools.bzip2.CBZip2OutputStream;
>  import org.apache.tools.tar.TarEntry;
>  import org.apache.tools.tar.TarOutputStream;
>  
> @@ -41,7 +40,7 @@
>                       InsertBlock block = 
>                               new InsertBlock(data, cm, 
> FreenetURI.EMPTY_CHK_URI);
>                       this.origSFI =
> -                             new SingleFileInserter(this, this, block, 
> false, ctx, false, 
getCHKOnly, true, null, false, false, null, earlyEncode);
> +                             new SingleFileInserter(this, this, block, 
> false, ctx, false, 
getCHKOnly, true, null, null, false, null, earlyEncode);
>                       metadata = null;
>               }
>  
> @@ -49,7 +48,7 @@
>                       super(smp.getPriorityClass(), smp.chkScheduler, 
> smp.sskScheduler, 
smp.client);
>                       this.cm = cm;
>                       this.data = null;
> -                     Metadata m = new Metadata(Metadata.SIMPLE_REDIRECT, 
> (short)-1, target, 
cm);
> +                     Metadata m = new Metadata(Metadata.SIMPLE_REDIRECT, 
> null, null, target, 
cm);
>                       metadata = m;
>                       origSFI = null;
>               }
> @@ -59,7 +58,7 @@
>                       this.cm = cm;
>                       this.data = data;
>                       this.targetInArchive = targetInArchive;
> -                     Metadata m = new 
> Metadata(Metadata.ARCHIVE_INTERNAL_REDIRECT, (short)-1, 
targetInArchive, cm);
> +                     Metadata m = new 
> Metadata(Metadata.ARCHIVE_INTERNAL_REDIRECT, null, 
null, targetInArchive, cm);
>                       metadata = m;
>                       origSFI = null;
>               }
> @@ -118,7 +117,7 @@
>                               // The file was too small to have its own 
> metadata, we get this 
instead.
>                               // So we make the key into metadata.
>                               Metadata m =
> -                                     new Metadata(Metadata.SIMPLE_REDIRECT, 
> (short) -1, key.getURI(), cm);
> +                                     new Metadata(Metadata.SIMPLE_REDIRECT, 
> null, null, key.getURI(), cm);
>                               onMetadata(m, null);
>                       }
>               }
> @@ -311,7 +310,7 @@
>                               if(mimeType == null || 
mimeType.equals(DefaultMIMETypes.DEFAULT_MIME_TYPE))
>                                       cm = null;
>                               else
> -                                     cm = new ClientMetadata(mimeType);
> +                                     cm = new ClientMetadata(mimeType, null);
>                               PutHandler ph;
>                               Bucket data = element.data;
>                               if(element.targetURI != null) {
> @@ -418,6 +417,7 @@
>               InsertBlock block;
>               boolean isMetadata = true;
>               boolean insertAsArchiveManifest = false;
> +             ARCHIVE_TYPE archiveType = null;
>               if(!(elementsToPutInArchive.isEmpty())) {
>                       // There is an archive to insert.
>                       // We want to include the metadata.
> @@ -426,16 +426,19 @@
>                       try {                           
>                               Bucket outputBucket = 
> ctx.bf.makeBucket(baseMetadata.dataLength());
>                               // TODO: try both ? - maybe not worth it
> -                             String mimeType = (ARCHIVE_TYPE.getDefault() == 
> ARCHIVE_TYPE.TAR ?
> +                             archiveType = ARCHIVE_TYPE.getDefault();
> +                             String mimeType = (archiveType == 
> ARCHIVE_TYPE.TAR ?
>                                       createTarBucket(bucket, outputBucket) :
>                                       createZipBucket(bucket, outputBucket));
>                               
> +                             if(logMINOR) Logger.minor(this, "We are using 
> "+archiveType);
> +                             
>                               // Now we have to insert the Archive we have 
> generated.
>                               
>                               // Can we just insert it, and not bother with a 
> redirect to it?
>                               // Thereby exploiting implicit manifest 
> support, which will pick up 
on .metadata??
>                               // We ought to be able to !!
> -                             block = new InsertBlock(outputBucket, new 
> ClientMetadata(mimeType), 
targetURI);
> +                             block = new InsertBlock(outputBucket, new 
> ClientMetadata(mimeType, 
null), targetURI);
>                               isMetadata = false;
>                               insertAsArchiveManifest = true;
>                       } catch (IOException e) {
> @@ -446,7 +449,7 @@
>                       block = new InsertBlock(bucket, null, targetURI);
>               try {
>                       SingleFileInserter metadataInserter = 
> -                             new SingleFileInserter(this, this, block, 
> isMetadata, ctx, false, 
getCHKOnly, false, baseMetadata, insertAsArchiveManifest, true, null, 
earlyEncode);
> +                             new SingleFileInserter(this, this, block, 
> isMetadata, ctx, false, 
getCHKOnly, false, baseMetadata, archiveType, true, null, earlyEncode);
>                       if(logMINOR) Logger.minor(this, "Inserting main 
metadata: "+metadataInserter);
>                       this.metadataPuttersByMetadata.put(baseMetadata, 
> metadataInserter);
>                       metadataPuttersUnfetchable.put(baseMetadata, 
> metadataInserter);
> @@ -460,8 +463,7 @@
>               if(logMINOR) Logger.minor(this, "Create a TAR Bucket");
>               
>               OutputStream os = new 
BufferedOutputStream(outputBucket.getOutputStream());
> -             CBZip2OutputStream bz2OS = new CBZip2OutputStream(os);
> -             TarOutputStream tarOS = new TarOutputStream(bz2OS);
> +             TarOutputStream tarOS = new TarOutputStream(os);
>               TarEntry ze;
>  
>               for(PutHandler ph : elementsToPutInArchive) {
> @@ -537,7 +539,7 @@
>                               
>                               InsertBlock ib = new InsertBlock(b, null, 
> FreenetURI.EMPTY_CHK_URI);
>                               SingleFileInserter metadataInserter = 
> -                                     new SingleFileInserter(this, this, ib, 
> true, ctx, false, getCHKOnly, 
false, m, false, true, null, earlyEncode);
> +                                     new SingleFileInserter(this, this, ib, 
> true, ctx, false, getCHKOnly, 
false, m, null, true, null, earlyEncode);
>                               if(logMINOR) Logger.minor(this, "Inserting 
> subsidiary 
metadata: "+metadataInserter+" for "+m);
>                               synchronized(this) {
>                                       this.metadataPuttersByMetadata.put(m, 
> metadataInserter);
> 
> Modified: trunk/freenet/src/freenet/client/async/SingleFileInserter.java
> ===================================================================
> --- trunk/freenet/src/freenet/client/async/SingleFileInserter.java    
2008-10-21 14:22:02 UTC (rev 23013)
> +++ trunk/freenet/src/freenet/client/async/SingleFileInserter.java    
2008-10-21 15:24:47 UTC (rev 23014)
> @@ -44,7 +44,8 @@
>       final boolean metadata;
>       final PutCompletionCallback cb;
>       final boolean getCHKOnly;
> -     final boolean insertAsArchiveManifest;
> +     final ARCHIVE_TYPE archiveType;
> +     COMPRESSOR_TYPE compressorUsed;
>       /** If true, we are not the top level request, and should not
>        * update our parent to point to us as current put-stage. */
>       private final boolean reportMetadataOnly;
> @@ -70,7 +71,7 @@
>        */
>       SingleFileInserter(BaseClientPutter parent, PutCompletionCallback cb, 
InsertBlock block, 
>                       boolean metadata, InsertContext ctx, boolean 
> dontCompress, 
> -                     boolean getCHKOnly, boolean reportMetadataOnly, Object 
> token, boolean 
insertAsArchiveManifest, 
> +                     boolean getCHKOnly, boolean reportMetadataOnly, Object 
> token, 
ARCHIVE_TYPE archiveType, 
>                       boolean freeData, String targetFilename, boolean 
> earlyEncode) throws 
InsertException {
>               this.earlyEncode = earlyEncode;
>               this.reportMetadataOnly = reportMetadataOnly;
> @@ -81,7 +82,7 @@
>               this.metadata = metadata;
>               this.cb = cb;
>               this.getCHKOnly = getCHKOnly;
> -             this.insertAsArchiveManifest = insertAsArchiveManifest;
> +             this.archiveType = archiveType;
>               this.freeData = freeData;
>               this.targetFilename = targetFilename;
>               logMINOR = Logger.shouldLog(Logger.MINOR, this);
> @@ -165,9 +166,10 @@
>                       // Try to compress the data.
>                       // Try each algorithm, starting with the fastest and 
> weakest.
>                       // Stop when run out of algorithms, or the compressed 
> data fits in a 
single block.
> -                     try {
>                               for(COMPRESSOR_TYPE comp : 
> COMPRESSOR_TYPE.values()) {
> -                                     if(logMINOR) Logger.minor(this, 
> "Attempt to compress using "+comp);
> +                             try {
> +                                     if(logMINOR)
> +                                             Logger.minor(this, "Attempt to 
> compress using " + comp);
>                                       // Only produce if we are compressing 
> *the original data*
>                                       if(parent == cb)
>                                               
> ctx.eventProducer.produceEvent(new StartedCompressionEvent(comp));
> @@ -180,30 +182,32 @@
>                                               bestCompressedData = result;
>                                               break;
>                                       }
> -                                     if((bestCompressedData != null) && 
> (result.size() <  
bestCompressedData.size())) {
> +                                     if((bestCompressedData != null) && 
> (result.size() < 
bestCompressedData.size())) {
>                                               bestCompressedData.free();
>                                               bestCompressedData = result;
>                                               bestCodec = comp;
>                                       } else if((bestCompressedData == null) 
> && (result.size() < 
data.size())) {
>                                               bestCompressedData = result;
>                                               bestCodec = comp;
> -                                     } else {
> +                                     } else
>                                               result.free();
> -                                     }
> -                             }
> -                     } catch (IOException e) {
> +
> +                             } catch(IOException e) {
>                               throw new 
> InsertException(InsertException.BUCKET_ERROR, e, null);
> -                     } catch (CompressionOutputSizeException e) {
> +                             } catch(CompressionOutputSizeException e) {
>                               // Impossible
>                               throw new Error(e);
>                       }
>               }
> -             boolean freeData = false;
> +             }
> +             boolean shouldFreeData = false;
>               if(bestCompressedData != null) {
>                       long compressedSize = bestCompressedData.size();
>                       if(logMINOR) Logger.minor(this, "The best compression 
> algorithm 
is "+bestCodec+ " we have a "+origSize/compressedSize+" ratio! 
("+origSize+'/'+compressedSize+')');
>                       data = bestCompressedData;
> -                     freeData = true;
> +                     shouldFreeData = true;
> +                     block.clientMetadata.setCompressorType(bestCodec);

There is no need to store it here.

> +                     compressorUsed = bestCodec;
>               }
>               
>               if(parent == cb) {
> @@ -224,7 +228,7 @@
>                       throw new 
> InsertException(InsertException.INTERNAL_ERROR, "2GB+ should 
not encode to one block!", null);
>  
>               boolean noMetadata = ((block.clientMetadata == null) || 
block.clientMetadata.isTrivial()) && targetFilename == null;
> -             if(noMetadata && !insertAsArchiveManifest) {
> +             if(noMetadata && archiveType == null) {
>                       if(fitsInOneBlockAsIs) {
>                               // Just insert it
>                               ClientPutState bi =

If it fits in a single block and we don't need a MIME type, we pass the 
compression codec in to SingleBlockInserter, and the key will include the 
compression type in the URI.

> @@ -239,7 +243,7 @@
>                       // Insert single block, then insert pointer to it
>                       if(reportMetadataOnly) {
>                               SingleBlockInserter dataPutter = new 
> SingleBlockInserter(parent, data, 
codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, cb, metadata, (int)origSize, -1, 
getCHKOnly, true, true, token);

If the payload fits in a single block, we pass it in to the 
SingleBlockInserter here.

> -                             Metadata meta = 
> makeMetadata(dataPutter.getURI());
> +                             Metadata meta = makeMetadata(archiveType, 
> bestCodec, 
dataPutter.getURI());
>                               cb.onMetadata(meta, this);
>                               cb.onTransition(this, dataPutter);
>                               dataPutter.schedule();
> @@ -248,7 +252,7 @@
>                               MultiPutCompletionCallback mcb = 
>                                       new MultiPutCompletionCallback(cb, 
> parent, token);
>                               SingleBlockInserter dataPutter = new 
> SingleBlockInserter(parent, data, 
codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, mcb, metadata, (int)origSize, -1, 
getCHKOnly, true, false, token);

And again, as you can see, if it fits in one block, we pass it in to SBI 
constructor, and that passes it in to the relevant KeyBlock constructor. It's 
encoded into the key for a CHK, or into the block-level metadata for an SSK.

> -                             Metadata meta = 
> makeMetadata(dataPutter.getURI());
> +                             Metadata meta = makeMetadata(archiveType, 
> bestCodec, 
dataPutter.getURI());
>                               Bucket metadataBucket;
>                               try {
>                                       metadataBucket = 
> BucketTools.makeImmutableBucket(ctx.bf, 
meta.writeToByteArray());
> @@ -280,13 +284,13 @@
>               // insert it. Then when the splitinserter has finished, and the
>               // metadata insert has finished too, tell the master callback.
>               if(reportMetadataOnly) {
> -                     SplitFileInserter sfi = new SplitFileInserter(parent, 
> cb, data, 
bestCodec, origSize, block.clientMetadata, ctx, getCHKOnly, metadata, token, 
insertAsArchiveManifest, freeData);
> +                     SplitFileInserter sfi = new SplitFileInserter(parent, 
> cb, data, 
bestCodec, origSize, block.clientMetadata, ctx, getCHKOnly, metadata, token, 
archiveType, shouldFreeData);

And again, the compression type is passed in here.

So it should not be necessary to include it in the ClientMetadata. It is in 
any case not the sort of thing that should be on the ClienetMetadata.

>                       cb.onTransition(this, sfi);
>                       sfi.start();
>                       if(earlyEncode) sfi.forceEncode();
>               } else {
>                       SplitHandler sh = new SplitHandler();
> -                     SplitFileInserter sfi = new SplitFileInserter(parent, 
> sh, data, 
bestCodec, origSize, block.clientMetadata, ctx, getCHKOnly, metadata, token, 
insertAsArchiveManifest, freeData);
> +                     SplitFileInserter sfi = new SplitFileInserter(parent, 
> sh, data, 
bestCodec, origSize, block.clientMetadata, ctx, getCHKOnly, metadata, token, 
archiveType, shouldFreeData);
>                       sh.sfi = sfi;
>                       cb.onTransition(this, sh);
>                       sfi.start();
> @@ -294,8 +298,12 @@
>               }
>       }
>       
> -     private Metadata makeMetadata(FreenetURI uri) {
> -             Metadata meta = new Metadata(insertAsArchiveManifest ? 
Metadata.ARCHIVE_MANIFEST : Metadata.SIMPLE_REDIRECT, 
ARCHIVE_TYPE.getDefault().metadataID, uri, block.clientMetadata);
> +     private Metadata makeMetadata(ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE 
codec, FreenetURI uri) {
> +             Metadata meta = null;
> +             if(archiveType != null)
> +                     meta = new Metadata(Metadata.ARCHIVE_MANIFEST, 
> archiveType, codec, uri, 
block.clientMetadata);
> +             else  // redirect
> +                     meta = new Metadata(Metadata.SIMPLE_REDIRECT, 
> archiveType, codec, uri, 
block.clientMetadata);
>               if(targetFilename != null) {
>                       HashMap hm = new HashMap();
>                       hm.put(targetFilename, meta);

makeMetadata is only called if it fits in a single block. In which case the 
compression codec is already part of the key, so it can safely be left out: 
from the point of view of ArchiveManager.extractToCache, it is a plain 
uncompressed tar archive.

> @@ -363,7 +371,7 @@
>                       if(sfiFS == null)
>                               throw new ResumeException("No 
> SplitFileInserter");
>                       ClientPutState newSFI, newMetaPutter = null;
> -                     newSFI = new SplitFileInserter(parent, this, 
> forceMetadata ? null : 
block.clientMetadata, ctx, getCHKOnly, meta, token, insertAsArchiveManifest, 
sfiFS);
> +                     newSFI = new SplitFileInserter(parent, this, 
> forceMetadata ? null : 
block.clientMetadata, ctx, getCHKOnly, meta, token, archiveType, 
compressorUsed, sfiFS);

Here we are starting an insert from a SimpleFieldSet i.e. from 
downloads.dat.gz. SplitFileInserter will pull the CompressionCodec from the 
SimpleFieldSet.

>                       if(logMINOR) Logger.minor(this, "Starting "+newSFI+" 
> for "+this);
>                       fs.removeSubset("SplitFileInserter");
>                       SimpleFieldSet metaFS = fs.subset("MetadataPutter");
> @@ -373,7 +381,7 @@
>                                       if(type.equals("SplitFileInserter")) {
>                                               // FIXME 
> insertAsArchiveManifest ?!?!?!
>                                               newMetaPutter = 
> -                                                     new 
> SplitFileInserter(parent, this, null, ctx, getCHKOnly, true, 
token, insertAsArchiveManifest, metaFS);
> +                                                     new 
> SplitFileInserter(parent, this, null, ctx, getCHKOnly, true, 
token, archiveType, compressorUsed, metaFS);

Likewise here. Just because the data itself is compressed with bzip2 doesn't 
mean the metadata (which is itself split because the file is big) is 
compressed with bzip2. It might be compressed with gzip, if gzip is smaller.

>                                       } else if(type.equals("SplitHandler")) {
>                                               newMetaPutter = new 
> SplitHandler();
>                                               
> ((SplitHandler)newMetaPutter).start(metaFS, true);
> @@ -522,7 +530,7 @@
>                       InsertBlock newBlock = new InsertBlock(metadataBucket, 
> null, 
block.desiredURI);
>                       try {
>                               synchronized(this) {
> -                                     metadataPutter = new 
> SingleFileInserter(parent, this, newBlock, true, 
ctx, false, getCHKOnly, false, token, false, true, metaPutterTargetFilename, 
earlyEncode);
> +                                     metadataPutter = new 
> SingleFileInserter(parent, this, newBlock, true, 
ctx, false, getCHKOnly, false, token, archiveType, true, 
metaPutterTargetFilename, earlyEncode);
>                                       // If EarlyEncode, then start the 
> metadata insert ASAP, to get the 
key.
>                                       // Otherwise, wait until the data is 
> fetchable (to improve 
persistence).
>                                       if(!(earlyEncode || 
> splitInsertSuccess)) return;
> 
> Modified: trunk/freenet/src/freenet/client/async/SplitFileInserter.java
> ===================================================================
> --- trunk/freenet/src/freenet/client/async/SplitFileInserter.java     
> 2008-10-21 
14:22:02 UTC (rev 23013)
> +++ trunk/freenet/src/freenet/client/async/SplitFileInserter.java     
> 2008-10-21 
15:24:47 UTC (rev 23014)
> @@ -28,7 +28,7 @@
>       final InsertContext ctx;
>       final PutCompletionCallback cb;
>       final long dataLength;
> -     final short compressionCodec;
> +     final COMPRESSOR_TYPE compressionCodec;
>       final short splitfileAlgorithm;
>       final int segmentSize;
>       final int checkSegmentSize;
> @@ -42,7 +42,7 @@
>       private volatile boolean finished;
>       private boolean fetchable;
>       public final Object token;
> -     final boolean insertAsArchiveManifest;
> +     final ARCHIVE_TYPE archiveType;
>       private boolean forceEncode;
>       private final long decompressedLength;
>  
> @@ -53,7 +53,7 @@
>               fs.putSingle("Type", "SplitFileInserter");
>               fs.put("DataLength", dataLength);
>               fs.put("DecompressedLength", decompressedLength);
> -             fs.put("CompressionCodec", compressionCodec);
> +             fs.putSingle("CompressionCodec", compressionCodec.toString());
>               fs.put("SplitfileCodec", splitfileAlgorithm);
>               fs.put("Finished", finished);
>               fs.put("SegmentSize", segmentSize);
> @@ -67,10 +67,11 @@
>               return fs;
>       }
>  
> -     public SplitFileInserter(BaseClientPutter put, PutCompletionCallback 
> cb, 
Bucket data, COMPRESSOR_TYPE bestCodec, long decompressedLength, 
ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly, boolean 
isMetadata, Object token, boolean insertAsArchiveManifest, boolean freeData) 
throws InsertException {
> +     public SplitFileInserter(BaseClientPutter put, PutCompletionCallback 
> cb, 
Bucket data, COMPRESSOR_TYPE bestCodec, long decompressedLength, 
ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly, boolean 
isMetadata, Object token, ARCHIVE_TYPE archiveType, boolean freeData) throws 
InsertException {
>               logMINOR = Logger.shouldLog(Logger.MINOR, this);
>               this.parent = put;
> -             this.insertAsArchiveManifest = insertAsArchiveManifest;
> +             this.archiveType = archiveType;
> +             this.compressionCodec = bestCodec;
>               this.token = token;
>               this.finished = false;
>               this.isMetadata = isMetadata;
> @@ -88,10 +89,6 @@
>               }
>               countDataBlocks = dataBuckets.length;
>               // Encoding is done by segments
> -             if(bestCodec == null)
> -                     compressionCodec = -1;
> -             else
> -                     compressionCodec = bestCodec.metadataID;
>               this.splitfileAlgorithm = ctx.splitfileAlgorithm;
>               segmentSize = ctx.splitfileSegmentDataBlocks;
>               checkSegmentSize = splitfileAlgorithm == 
Metadata.SPLITFILE_NONREDUNDANT ? 0 : ctx.splitfileSegmentCheckBlocks;
> @@ -106,10 +103,10 @@
>               parent.onMajorProgress();
>       }
>  
> -     public SplitFileInserter(BaseClientPutter parent, PutCompletionCallback 
cb, ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly, 
boolean metadata, Object token, boolean insertAsArchiveManifest, 
SimpleFieldSet fs) throws ResumeException {
> +     public SplitFileInserter(BaseClientPutter parent, PutCompletionCallback 
cb, ClientMetadata clientMetadata, InsertContext ctx, boolean getCHKOnly, 
boolean metadata, Object token, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE 
bestCodec, SimpleFieldSet fs) throws ResumeException {

As I have explained, the compression type etc should be pulled from the SFS 
here.

>               this.parent = parent;
> -             this.insertAsArchiveManifest = insertAsArchiveManifest;
> +             this.archiveType = archiveType;
>               this.token = token;
>               this.finished = false;
>               this.isMetadata = metadata;
> @@ -149,12 +146,13 @@
>               } catch (NumberFormatException e) {
>                       throw new ResumeException("Corrupt CheckSegmentSize: 
> "+e+" : "+length);
>               }
> +             if(bestCodec != null) {
> +                     compressionCodec = bestCodec;
> +             } else {
>               String ccodec = fs.get("CompressionCodec");
> -             if(ccodec == null) throw new ResumeException("No compression 
> codec");
> -             try {
> -                     compressionCodec = Short.parseShort(ccodec);
> -             } catch (NumberFormatException e) {
> -                     throw new ResumeException("Corrupt CompressionCodec: 
> "+e+" : "+ccodec);
> +                     if(ccodec == null)
> +                             throw new ResumeException("No compression 
> codec");
> +                     compressionCodec = COMPRESSOR_TYPE.valueOf(ccodec);
>               }
>               String scodec = fs.get("SplitfileCodec");
>               if(scodec == null) throw new ResumeException("No splitfile 
> codec");
> @@ -291,7 +289,7 @@
>                       
>                       if(!missingURIs) {
>                               // Create Metadata
> -                             m = new Metadata(splitfileAlgorithm, dataURIs, 
> checkURIs, segmentSize, 
checkSegmentSize, cm, dataLength, compressionCodec, decompressedLength, 
isMetadata, insertAsArchiveManifest, ARCHIVE_TYPE.getDefault().metadataID);
> +                             m = new Metadata(splitfileAlgorithm, dataURIs, 
> checkURIs, segmentSize, 
checkSegmentSize, cm, dataLength, archiveType, compressionCodec, 
decompressedLength, isMetadata);
>                       }
>                       haveSentMetadata = true;
>               }
> 

Do we attempt to compress all files with bzip2 as well as gzip now? Shouldn't 
there be a max size configuration above which we don't try bzip2, perhaps 
unless asked to via FCP? bzip2'ing ISOs could take a really long time ...
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 827 bytes
Desc: not available
URL: 
<https://emu.freenetproject.org/pipermail/devl/attachments/20081021/762b66f1/attachment.pgp>

Reply via email to