On Tuesday 14 April 2009 19:42:37 sa...@freenetproject.org wrote: > Author: saces > Date: 2009-04-14 18:42:36 +0000 (Tue, 14 Apr 2009) > New Revision: 26788 > > Added: > trunk/freenet/src/freenet/client/async/ContainerInserter.java > Log: > new class ContainerInserter - insert a ManifestElements as container > TODOs: > fix resolve metadata > -how to format the redirects/submetadata? > -the current metadataMap thingy s***s, invent a working one ;) > persistence > add a MAX_SIZE for the final container > > Added: trunk/freenet/src/freenet/client/async/ContainerInserter.java > =================================================================== > --- trunk/freenet/src/freenet/client/async/ContainerInserter.java > (rev 0) > +++ trunk/freenet/src/freenet/client/async/ContainerInserter.java > 2009-04-14 18:42:36 UTC (rev 26788) ... > + > + /** > + * @param metadata2 > + * @param archiveType2 > + * @param targetURI2 > + * @param token2 > + * @param getCHKOnly2 > + * @param earlyEncode2 > + * @param ctx2 > + * @param reportMetadataOnly2 > + * > + */ > + public ContainerInserter( > + BaseClientPutter parent2, > + PutCompletionCallback cb2, > + HashMap<String, Object> metadata2, > + FreenetURI targetURI2, > + InsertContext ctx2, > + boolean dontCompress2, > + boolean getCHKOnly2, > + boolean reportMetadataOnly2, > + Object token2, > + ARCHIVE_TYPE archiveType2, > + boolean freeData, > + boolean earlyEncode2) { > + parent = parent2; > + cb = cb2; > + hashCode = super.hashCode(); > + persistent = parent.persistent();
Pass in a boolean unless you are 100% sure that parent is active. If you are, document it. > + origMetadata = metadata2; > + archiveType = archiveType2; > + targetURI = targetURI2; > + token = token2; > + getCHKOnly = getCHKOnly2; > + earlyEncode = earlyEncode2; > + ctx = ctx2; > + dontCompress = dontCompress2; > + reportMetadataOnly = reportMetadataOnly2; > + containerItems = new ArrayList<ContainerElement>(); > + metadataMap = new HashMap<Metadata, Entry<String, Object>>(); > + } > + ... > + > + private void start(ObjectContainer container, ClientContext context) { > + if(logDEBUG) Logger.debug(this, "Atempt to start a container > inserter", new Exception("debug")); > + > + makeMetadata(context, container); > + > + synchronized(this) { > + if(finished) return; > + } > + > + InsertBlock block; > + try { > + Bucket outputBucket = context.getBucketFactory(persistent).makeBucket(-1); > + String mimeType = (archiveType == ARCHIVE_TYPE.TAR ? > + createTarBucket(outputBucket, container) : > + createZipBucket(outputBucket, container)); > + > + if(logMINOR) Logger.minor(this, "We are using > "+archiveType); > + > + // Now we have to insert the Archive we have generated. > + > + // Can we just insert it, and not bother with a > redirect to it? > + // Thereby exploiting implicit manifest support, which > will pick up on .metadata?? > + // We ought to be able to !! > + block = new InsertBlock(outputBucket, new > ClientMetadata(mimeType), persistent ? targetURI.clone() : targetURI); > + } catch (IOException e) { > + fail(new InsertException(InsertException.BUCKET_ERROR, > e, null), container, context); > + return; > + } > + > + boolean dc = dontCompress; > + if (!dontCompress) { > + dc = (archiveType == ARCHIVE_TYPE.ZIP); > + } > + > + SingleFileInserter sfi = new SingleFileInserter(parent, cb, > block, false, ctx, dc, getCHKOnly, reportMetadataOnly, token, archiveType, true, null, earlyEncode); The SFI is not reporting back to the ContainerInserter! Is this a problem? Does ContainerInserter need to know when the container insert has finished? (Given that some stuff will have be inserted separately e.g. because of size?) > + if(logMINOR) > + Logger.minor(this, "Inserting container: "+sfi+" for > "+this); > + cb.onTransition(this, sfi, container); Activate cb. > + try { > + sfi.schedule(container, context); > + } catch (InsertException e) { > + fail(new InsertException(InsertException.BUCKET_ERROR, > e, null), container, context); > + return; > + } > + cb.onBlockSetFinished(this, container, context); > + } > + > + private void makeMetadata(ClientContext context, ObjectContainer container) { > + > + Bucket bucket = null; > + int x = 0; > + > + while(true) { > + try { > + HashMap<String,Object> manifest = new > HashMap<String,Object>(); > + makeManifest(origMetadata, manifest, "/"); > + Metadata md = > Metadata.mkRedirectionManifestWithMetadata(manifest); > + bucket = context.tempBucketFactory.makeBucket(Metadata.MAX_SPLITFILE_PARAMS_LENGTH); Seems a rather odd size limit for a bucket? > + byte[] buf = md.writeToByteArray(); > + OutputStream os = bucket.getOutputStream(); > + os.write(buf); > + os.close(); > + containerItems.add(new ContainerElement(bucket, > ".metadata")); > + return; > + } catch (IOException e) { > + fail(new > InsertException(InsertException.INTERNAL_ERROR, e, null), container, context); > + return; > + } catch (MetadataUnresolvedException e) { > + try { > + x = resolve(e, x, bucket, null, null, > container, context); > + } catch (IOException e1) { > + fail(new > InsertException(InsertException.INTERNAL_ERROR, e, null), container, context); > + return; > + } > + } > + } > + > + } > + > + private int resolve(MetadataUnresolvedException e, int x, Bucket > bucket, FreenetURI key, String element2, ObjectContainer container, ClientContext context) throws IOException { > + Metadata[] m = e.mustResolve; > + for(int i=0;i<m.length;i++) { > + try { > + byte[] buf = m[i].writeToByteArray(); > + OutputStream os = bucket.getOutputStream(); > + os.write(buf); > + os.close(); > + String nameInArchive = ".metadata-"+(x++); > + containerItems.add(new ContainerElement(bucket, > nameInArchive)); > + Entry<String, Object> entry = > metadataMap.get(m[i]); > + entry.setValue(new > Metadata(Metadata.ARCHIVE_INTERNAL_REDIRECT, null, null, nameInArchive, null)); > + } catch (MetadataUnresolvedException e1) { > + x = resolve(e, x, bucket, key, element2, > container, context); bucket is reused, surely this will cause problems? > + } > + } > + return x; > + } > + ... > + > + private String createTarBucket(Bucket outputBucket, @SuppressWarnings("unused") ObjectContainer container) throws IOException { > + if(logMINOR) Logger.minor(this, "Create a TAR Bucket"); > + > + OutputStream os = new BufferedOutputStream(outputBucket.getOutputStream()); > + TarOutputStream tarOS = new TarOutputStream(os); > + tarOS.setLongFileMode(TarOutputStream.LONGFILE_GNU); > + TarEntry ze; > + > + for(ContainerElement ph : containerItems) { > + if(logMINOR) > + Logger.minor(this, "Putting into tar: "+ph+" > data length "+ph.data.size()+" name "+ph.targetInArchive); > + ze = new TarEntry(ph.targetInArchive); > + ze.setModTime(0); > + long size = ph.data.size(); > + ze.setSize(size); > + tarOS.putNextEntry(ze); > + BucketTools.copyTo(ph.data, tarOS, size); > + tarOS.closeEntry(); > + } > + > + tarOS.closeEntry(); > + // Both finish() and close() are necessary. > + tarOS.finish(); > + tarOS.flush(); > + tarOS.close(); > + > + if(logMINOR) > + Logger.minor(this, "Archive size is > "+outputBucket.size()); > + > + return ARCHIVE_TYPE.TAR.mimeTypes[0]; > + } > + > + private String createZipBucket(Bucket outputBucket, @SuppressWarnings("unused") ObjectContainer container) throws IOException { Are you planning to support both? > + if(logMINOR) Logger.minor(this, "Create a ZIP Bucket"); > + > + OutputStream os = new BufferedOutputStream(outputBucket.getOutputStream()); > + ZipOutputStream zos = new ZipOutputStream(os); > + ZipEntry ze; > + > + for(ContainerElement ph: containerItems) { > + ze = new ZipEntry(ph.targetInArchive); > + ze.setTime(0); > + zos.putNextEntry(ze); > + BucketTools.copyTo(ph.data, zos, ph.data.size()); > + zos.closeEntry(); > + } > + > + zos.closeEntry(); > + // Both finish() and close() are necessary. > + zos.finish(); > + zos.flush(); > + zos.close(); > + > + return ARCHIVE_TYPE.ZIP.mimeTypes[0]; > + } > + > + private void makeManifest(HashMap<String, Object> manifestElements, HashMap<String,Object> manifest, String archivePrefix) { Caller must ensure the whole subtree is activated. > + for (Entry<String, Object>entry:manifestElements.entrySet()) { > + String name = entry.getKey(); > + Object o = entry.getValue(); > + if(o instanceof HashMap) { > + @SuppressWarnings("unchecked") > + HashMap<String,Object> hm = (HashMap<String, > Object>) o; > + HashMap<String,Object> subMap = new > HashMap<String,Object>(); > + //System.out.println("Decompose: "+name+" > (SubDir)"); > + manifest.put(name, subMap); > + makeManifest(hm, subMap, archivePrefix+name+ > '/'); > + if(Logger.shouldLog(Logger.DEBUG, this)) > + Logger.debug(this, "Sub map for > "+name+" : "+subMap.size()+" elements from "+hm.size()); > + } else if (o instanceof Metadata) { > + //already Metadata, take as is > + //System.out.println("Decompose: "+name+" > (Metadata)"); > + manifest.put(name, o); > + metadataMap.put((Metadata) o, entry); > + } else { > + ManifestElement element = (ManifestElement) o; > + String mimeType = element.mimeOverride; > + ClientMetadata cm; > + if(mimeType == null || mimeType.equals(DefaultMIMETypes.DEFAULT_MIME_TYPE)) > + cm = null; > + else > + cm = new ClientMetadata(mimeType); > + Metadata m; > + if(element.targetURI != null) { > + //System.out.println("Decompose: > "+name+" (ManifestElement, Redirect)"); > + m = new > Metadata(Metadata.SIMPLE_REDIRECT, null, null, element.targetURI, cm); > + } else { > + //System.out.println("Decompose: > "+name+" (ManifestElement, Data)"); > + containerItems.add(new > ContainerElement(element.data, archivePrefix+name)); > + m = new > Metadata(Metadata.ARCHIVE_INTERNAL_REDIRECT, null, null, archivePrefix+element.fullName, cm); > + } You should check size and make it external if it is too big? Or will that be done already by this point? > + manifest.put(name, m); > + metadataMap.put(m, entry); > + } > + } > + } > +}
signature.asc
Description: This is a digitally signed message part.
_______________________________________________ Devl mailing list Devl@freenetproject.org http://emu.freenetproject.org/cgi-bin/mailman/listinfo/devl