kstaken 2003/02/05 20:49:44
Modified: java/src/org/apache/xindice/core/filer Paged.java
Log:
Adding documentation and a little code cleanup.
Revision Changes Path
1.13 +100 -25
xml-xindice/java/src/org/apache/xindice/core/filer/Paged.java
Index: Paged.java
===================================================================
RCS file:
/home/cvs/xml-xindice/java/src/org/apache/xindice/core/filer/Paged.java,v
retrieving revision 1.12
retrieving revision 1.13
diff -u -r1.12 -r1.13
--- Paged.java 26 Nov 2002 01:50:16 -0000 1.12
+++ Paged.java 6 Feb 2003 04:49:43 -0000 1.13
@@ -86,20 +86,37 @@
*/
public abstract class Paged {
+ // The maximum number of pages that will be held in the dirty cache.
private static final int MAX_DIRTY_SIZE = 128;
+
+ // The maximum number of open random access files we can have
private static final int MAX_DESCRIPTORS = 16;
protected static final byte UNUSED = 0;
protected static final byte OVERFLOW = 126;
protected static final byte DELETED = 127;
+ protected static final int NO_PAGE = -1;
+
+ // Cache of recently read pages.
private Map pages = new WeakHashMap();
+
+ // Cache of modified pages waiting to be written out.
private Map dirty = new HashMap();
- private File file;
+ // Random access file cache.
private Stack descriptors = new Stack();
+
+ // The number of random access file objects that exist. Either in the
cache
+ // or in use.
private int descCount = 0;
+
+ // Whether the file is opened or not.
private boolean opened = false;
+
+ // The underlying file where the Paged object stores its pages.
+ private File file;
+
private FileHeader fileHeader;
public Paged() {
@@ -130,13 +147,18 @@
}
protected synchronized final RandomAccessFile getDescriptor() throws
IOException {
- if ( !descriptors.empty() )
+ // If there are descriptors in the cache return one.
+ if ( !descriptors.empty() ) {
return (RandomAccessFile)descriptors.pop();
+ }
+ // Otherwise we need to get one some other way.
else {
+ // First try to create a new one if there's room
if ( descCount < MAX_DESCRIPTORS ) {
descCount++;
return new RandomAccessFile(file, "rw");
}
+ // Otherwise we have to wait for one to be released by another
thread.
else {
while ( true ) {
try {
@@ -151,6 +173,9 @@
}
}
+ /**
+ * Puts a RandomAccessFile/descriptor into the descriptor cache.
+ */
protected synchronized final void putDescriptor(RandomAccessFile raf) {
if ( raf != null ) {
descriptors.push(raf);
@@ -168,20 +193,29 @@
protected final Page getPage(Long lp) throws IOException {
Page p;
synchronized ( this ) {
- p = (Page)dirty.get(lp); // Check if it's in the dirty stash
- if ( p == null )
- p = (Page)pages.get(lp); // Check if it's in the volatile cache
+ // Check if it's in the dirty cache
+ p = (Page)dirty.get(lp);
+
+ // if not check if it's already loaded in the page cache
+ if ( p == null ) {
+ p = (Page)pages.get(lp);
+ }
+
+ // if still not found we need to create it and add it to the page
cache.
if ( p == null ) {
p = new Page(lp.longValue());
pages.put(lp, p);
}
}
+
+ // Load the page from disk if necessary
synchronized ( p ) {
- if ( !p.isLoaded() ) {
+ if ( ! p.isLoaded() ) {
p.read();
p.setLoaded(true);
}
}
+
return p;
}
@@ -210,15 +244,23 @@
Page p = page;
PageHeader ph = null;
long nextPage;
+
+ // Loop until we've read all the pages into memory.
while ( true ) {
ph = p.getPageHeader();
+
+ // Add the contents of the page onto the stream
p.streamTo(bos);
+
+ // Continue following the list of pages until we get to the end.
nextPage = ph.getNextPage();
- if ( nextPage != -1 )
+ if ( nextPage != NO_PAGE )
p = getPage(nextPage);
else
break;
}
+
+ // Return a Value with the collected contents of all pages.
return new Value(bos.toByteArray());
}
@@ -249,34 +291,46 @@
InputStream is = value.getInputStream();
+ // Write as much as we can onto the primary page.
PageHeader hdr = page.getPageHeader();
hdr.setRecordLen(value.getLength());
page.streamFrom(is);
- // Write out the rest of the value
+ // Write out the rest of the value onto any needed overflow pages
while ( is.available() > 0 ) {
Page lpage = page;
PageHeader lhdr = hdr;
+ // Find an overflow page to use
long np = lhdr.getNextPage();
- if ( np != -1 )
+ if ( np != NO_PAGE ) {
+ // Use an existing page.
page = getPage(np);
+ }
else {
+ // Create a new overflow page
page = getFreePage();
lhdr.setNextPage(page.getPageNum());
}
+ // Mark the page as an overflow page.
hdr = page.getPageHeader();
hdr.setStatus(OVERFLOW);
+
+ // Write some more of the value to the overflow page.
page.streamFrom(is);
lpage.write();
}
+ // Cleanup any unused overflow pages. i.e. the value is smaller then
the
+ // last time it was written.
long np = hdr.getNextPage();
- if ( np != -1 )
+ if ( np != NO_PAGE ) {
unlinkPages(np);
- hdr.setNextPage(-1);
+ }
+
+ hdr.setNextPage(NO_PAGE);
page.write();
}
@@ -299,31 +353,48 @@
* @throws IOException if an Exception occurs
*/
protected final void unlinkPages(Page page) throws IOException {
- // If the page is in primary space, just reset it's
- // status. If it's in overflow, add it to the unused list.
+ // Handle the page if it's in primary space by setting its status to
+ // DELETED and freeing any overflow pages linked to it.
if ( page.pageNum < fileHeader.pageCount ) {
long nextPage = page.header.nextPage;
page.header.setStatus(DELETED);
- page.header.setNextPage(-1);
+ page.header.setNextPage(NO_PAGE);
page.write();
- page = nextPage != -1 ? getPage(nextPage)
- : null;
+
+ // See if there are any chained pages from the page that was just
removed
+ if ( nextPage == NO_PAGE ) {
+ page = null;
+ }
+ else {
+ page = getPage(nextPage);
+ }
}
+ // Add any overflow pages to the list of free pages.
if ( page != null ) {
- // Walk the chain and add it to the unused list
+ // Get the first page in the chain.
long firstPage = page.pageNum;
- while ( page.header.nextPage != -1 )
+
+ // Find the last page in the chain.
+ while ( page.header.nextPage != NO_PAGE ) {
page = getPage(page.header.nextPage);
+ }
long lastPage = page.pageNum;
- if ( fileHeader.lastFreePage != -1 ) {
+ // If there are already some free pages, add the start of the chain
+ // to the list of free pages.
+ if ( fileHeader.lastFreePage != NO_PAGE ) {
Page p = getPage(fileHeader.lastFreePage);
p.header.setNextPage(firstPage);
p.write();
}
- if ( fileHeader.firstFreePage == -1 )
+
+ // Otherwise set the chain as the list of free pages.
+ if ( fileHeader.firstFreePage == NO_PAGE ) {
fileHeader.setFirstFreePage(firstPage);
+ }
+
+ // Add a reference to the end of the chain.
fileHeader.setLastFreePage(lastPage);
}
}
@@ -349,12 +420,12 @@
protected final Page getFreePage() throws IOException {
Page p = null;
long pageNum = fileHeader.firstFreePage;
- if ( pageNum != -1 ) {
+ if ( pageNum != NO_PAGE ) {
// Steal a deleted page
p = getPage(pageNum);
fileHeader.setFirstFreePage(p.getPageHeader().nextPage);
- if ( fileHeader.firstFreePage == -1 )
- fileHeader.setLastFreePage(-1);
+ if ( fileHeader.firstFreePage == NO_PAGE )
+ fileHeader.setLastFreePage(NO_PAGE);
}
else {
// Grow the file
@@ -364,7 +435,7 @@
}
// Initialize The Page Header (Cleanly)
- p.header.setNextPage(-1);
+ p.header.setNextPage(NO_PAGE);
p.header.setStatus(UNUSED);
return p;
}
@@ -1099,7 +1170,11 @@
public synchronized void setKey(Key key) {
header.setKey(key);
+ // Insert the key into the data array.
key.copyTo(data, keyPos);
+
+ // Set the start of data to skip over the key.
+ dataPos = keyPos + header.keyLen;
}
public synchronized Key getKey() {